summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-02 21:50:24 (GMT)
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-02 21:58:19 (GMT)
commitc7fe7d25ed6036ff16b1c112463baff21c3b205d (patch)
tree7a57dbe985bbdf4834511d7fc78aa7abbaf7b890 /drivers/gpu/drm/i915/intel_lrc.c
parentaad29fbbb86dbac69e25433b14c8a718fb53115e (diff)
downloadlinux-c7fe7d25ed6036ff16b1c112463baff21c3b205d.tar.xz
drm/i915: Remove obsolete engine->gpu_caches_dirty
Space for flushing the GPU cache prior to completing the request is preallocated and so cannot fail - the GPU caches will always be flushed along with the completed request. This means we no longer have to track whether the GPU cache is dirty between batches like we had to with the outstanding_lazy_seqno. With the removal of the duplication in the per-backend entry points for emitting the obsolete lazy flush, we can then further unify the engine->emit_flush. v2: Expand a bit on the legacy of gpu_caches_dirty Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-7-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c47
1 files changed, 7 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 86b8f41..e8d971e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
spin_unlock_bh(&engine->execlist_lock);
}
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- uint32_t flush_domains;
- int ret;
-
- flush_domains = 0;
- if (engine->gpu_caches_dirty)
- flush_domains = I915_GEM_GPU_DOMAINS;
-
- ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(req);
+ return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- if (!engine->gpu_caches_dirty)
- return 0;
-
- ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_advance(ring);
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;