From e6994aeedcee4f71998d89d2c10c5baa419ebeac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Jul 2012 10:27:08 +0100 Subject: drm/i915: Export ability of changing cache levels to userspace By selecting the cache level (essentially whether or not the CPU snoops any updates to the bo, and on more recent machines whether it resides inside the CPU's last-level-cache) a userspace driver is able to then manage all of its memory within buffer objects, if it so desires. This enables the userspace driver to accelerate uploads and more importantly downloads from the GPU and to able to mix CPU and GPU rendering/activity efficiently. Signed-off-by: Chris Wilson [danvet: Added code comment about where we plan to stuff platform specific cacheing control bits in the ioctl struct.] Signed-off-by: Daniel Vetter diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 733744f..71672ce 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1835,6 +1835,8 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 270b31c..0dc89a4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -848,9 +848,9 @@ enum hdmi_force_audio { }; enum i915_cache_level { - I915_CACHE_NONE, + I915_CACHE_NONE = 0, I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+ */ + I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ }; struct drm_i915_gem_object { @@ -1238,6 +1238,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19bdc24..c540321 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3015,6 +3015,68 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return 0; } +int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_cacheing *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + args->cacheing = obj->cache_level != I915_CACHE_NONE; + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_cacheing *args = data; + struct drm_i915_gem_object *obj; + enum i915_cache_level level; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + switch (args->cacheing) { + case I915_CACHEING_NONE: + level = I915_CACHE_NONE; + break; + case I915_CACHEING_CACHED: + level = I915_CACHE_LLC; + break; + default: + return -EINVAL; + } + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + ret = i915_gem_object_set_cache_level(obj, level); + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + /* * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 0f149fe..772b0d6 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -716,10 +716,16 @@ struct drm_i915_gem_busy { #define I915_CACHEING_CACHED 1 struct drm_i915_gem_cacheing { - /** Handle of the buffer to set/get the cacheing level of */ + /** + * Handle of the buffer to set/get the cacheing level of. */ __u32 handle; - /** Cacheing level to apply or return value */ + /** + * Cacheing level to apply or return value + * + * bits0-15 are for generic cacheing control (i.e. the above defined + * values). bits16-31 are reserved for platform-specific variations + * (e.g. l3$ caching on gen7). */ __u32 cacheing; }; -- cgit v0.10.2