diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2010-10-01 20:05:20 (GMT) |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-27 22:31:08 (GMT) |
commit | fb7d516af11837126eb1e4a44ab0653bf9b57702 (patch) | |
tree | f74c587c80c53f6f30fdab9d80d41a40bbd1bc5e | |
parent | ec57d2602a985b54b53e53098f74fdefc24723af (diff) | |
download | linux-fb7d516af11837126eb1e4a44ab0653bf9b57702.tar.xz |
drm/i915: add accounting for mappable objects in gtt v2
More precisely: For those that _need_ to be mappable. Also add two
BUG_ONs in fault and pin to check the consistency of the mappable
flag.
Changes in v2:
- Add tracking of gtt mappable space (to notice mappable/unmappable
balancing issues).
- Improve the mappable working set tracking by tracking fault and pin
separately.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 93 |
3 files changed, 98 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index beb3de7..2e59611 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -131,6 +131,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->pin_mappable || obj->fault_mappable) + seq_printf(m, " (mappable)"); if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); } @@ -207,6 +209,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); + seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count); + seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory); + seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used); + seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total); seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 296ed38..cdae5d1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -643,9 +643,13 @@ typedef struct drm_i915_private { size_t object_memory; size_t pin_memory; size_t gtt_memory; + size_t gtt_mappable_memory; + size_t mappable_gtt_used; + size_t mappable_gtt_total; size_t gtt_total; u32 object_count; u32 pin_count; + u32 gtt_mappable_count; u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; @@ -775,6 +779,14 @@ struct drm_i915_gem_object { unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + /** + * Whether the current gtt mapping needs to be mappable (and isn't just + * mappable by accident). Track pin and fault separate for a more + * accurate mappable working set. + */ + unsigned int fault_mappable : 1; + unsigned int pin_mappable : 1; + /** AGP memory structure for our GTT binding. */ DRM_AGP_MEM *agp_mem; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bb5435b..2eceb24 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -84,31 +84,83 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, } static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count++; - dev_priv->mm.gtt_memory += size; + dev_priv->mm.gtt_memory += obj->size; + if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.mappable_gtt_used += + min_t(size_t, obj->size, + dev_priv->mm.gtt_mappable_end + - obj_priv->gtt_offset); + } } static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count--; - dev_priv->mm.gtt_memory -= size; + dev_priv->mm.gtt_memory -= obj->size; + if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.mappable_gtt_used -= + min_t(size_t, obj->size, + dev_priv->mm.gtt_mappable_end + - obj_priv->gtt_offset); + } +} + +/** + * Update the mappable working set counters. Call _only_ when there is a change + * in one of (pin|fault)_mappable and update *_mappable _before_ calling. + * @mappable: new state the changed mappable flag (either pin_ or fault_). + */ +static void +i915_gem_info_update_mappable(struct drm_i915_private *dev_priv, + struct drm_gem_object *obj, + bool mappable) +{ + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (mappable) { + if (obj_priv->pin_mappable && obj_priv->fault_mappable) + /* Combined state was already mappable. */ + return; + dev_priv->mm.gtt_mappable_count++; + dev_priv->mm.gtt_mappable_memory += obj->size; + } else { + if (obj_priv->pin_mappable || obj_priv->fault_mappable) + /* Combined state still mappable. */ + return; + dev_priv->mm.gtt_mappable_count--; + dev_priv->mm.gtt_mappable_memory -= obj->size; + } } static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj, + bool mappable) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count++; - dev_priv->mm.pin_memory += size; + dev_priv->mm.pin_memory += obj->size; + if (mappable) { + obj_priv->pin_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); + } } static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count--; - dev_priv->mm.pin_memory -= size; + dev_priv->mm.pin_memory -= obj->size; + if (obj_priv->pin_mappable) { + obj_priv->pin_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); + } } int @@ -188,6 +240,7 @@ int i915_gem_do_init(struct drm_device *dev, end - start); dev_priv->mm.gtt_total = end - start; + dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.gtt_mappable_end = mappable_end; return 0; @@ -1266,6 +1319,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); + BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); if (!i915_gem_object_cpu_accessible(obj_priv)) i915_gem_object_unbind(obj); @@ -1279,6 +1333,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } + if (!obj_priv->fault_mappable) { + obj_priv->fault_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); + } + /* Need a new fence register? */ if (obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, true); @@ -1396,11 +1455,17 @@ void i915_gem_release_mmap(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev->dev_mapping) unmap_mapping_range(dev->dev_mapping, obj_priv->mmap_offset, obj->size, 1); + + if (obj_priv->fault_mappable) { + obj_priv->fault_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); + } } static void @@ -2177,7 +2242,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_object_put_pages(obj); BUG_ON(obj_priv->pages_refcount); - i915_gem_info_remove_gtt(dev_priv, obj->size); + i915_gem_info_remove_gtt(dev_priv, obj); list_del_init(&obj_priv->mm_list); drm_mm_put_block(obj_priv->gtt_space); @@ -2763,9 +2828,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, goto search_free; } + obj_priv->gtt_offset = obj_priv->gtt_space->start; + /* keep track of bounds object by adding it to the inactive list */ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj->size); + i915_gem_info_add_gtt(dev_priv, obj); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2774,7 +2841,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - obj_priv->gtt_offset = obj_priv->gtt_space->start; trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); return 0; @@ -4107,11 +4173,12 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, * remove it from the inactive list */ if (obj_priv->pin_count == 1) { - i915_gem_info_add_pin(dev_priv, obj->size); + i915_gem_info_add_pin(dev_priv, obj, mappable); if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } + BUG_ON(!obj_priv->pin_mappable && mappable); WARN_ON(i915_verify_lists(dev)); return 0; @@ -4137,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj->size); + i915_gem_info_remove_pin(dev_priv, obj); } WARN_ON(i915_verify_lists(dev)); } |