summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
5 files changed, 51 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6b57d8f..a1c4c76 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3153,7 +3153,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req);
+ struct drm_i915_gem_request *req,
+ unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2afb435..ce57c50 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
return obj->mapping;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *engine;
-
- engine = i915_gem_request_get_engine(req);
-
- /* Add a reference if we're newly entering the active list. */
- if (obj->active == 0)
- i915_gem_object_get(obj);
- obj->active |= intel_engine_flag(engine);
-
- i915_gem_active_set(&obj->last_read[engine->id], req);
-
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-}
-
static void
i915_gem_object_retire__write(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 24383f0..823e74c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -816,8 +816,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
+ struct drm_i915_gem_object *obj = from->engine[RCS].state;
+
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -825,10 +825,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->engine[RCS].state->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->engine[RCS].state);
+ i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from);
}
engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2f7173d..8bf20f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1143,43 +1143,64 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct drm_i915_gem_request *req,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ const unsigned int idx = req->engine->id;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ obj->dirty = 1; /* be paranoid */
+
+ /* Add a reference if we're newly entering the active list. */
+ if (obj->active == 0)
+ i915_gem_object_get(obj);
+ obj->active |= 1 << idx;
+ i915_gem_active_set(&obj->last_read[idx], req);
+
+ if (flags & EXEC_OBJECT_WRITE) {
+ i915_gem_active_set(&obj->last_write, req);
+
+ intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+ /* update for the implicit flush after a batch */
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ i915_gem_active_set(&obj->last_fence, req);
+ if (flags & __EXEC_OBJECT_HAS_FENCE) {
+ struct drm_i915_private *dev_priv = req->i915;
+
+ list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
+
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
- if (obj->base.write_domain == 0)
+ if (obj->base.write_domain)
+ vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+ else
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- i915_vma_move_to_active(vma, req);
- if (obj->base.write_domain) {
- i915_gem_active_set(&obj->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
- /* update for the implicit flush after a batch */
- obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
- }
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- i915_gem_active_set(&obj->last_fence, req);
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = engine->i915;
- list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
- &dev_priv->mm.fence_list);
- }
- }
-
+ i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index f85c550..9023667 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -217,7 +217,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
goto err_unpin;
}
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
err_unpin:
i915_gem_object_ggtt_unpin(so.obj);
err_obj: