summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 20:08:22 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 20:08:22 (GMT)
commitbe53bfdb8088e9d1924199cc1a96e113756b1075 (patch)
tree8c65eb9d82ca4c0f11c17cfdc44d5263820b415b /drivers/gpu/drm/vmwgfx
parentb2094ef840697bc8ca5d17a83b7e30fad5f1e9fa (diff)
parent5466c7b1683a23dbbcfb7ee4a71c4f23886001c7 (diff)
downloadlinux-fsl-qoriq-be53bfdb8088e9d1924199cc1a96e113756b1075.tar.xz
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm main changes from Dave Airlie: "This is the main drm pull request, I'm probably going to send two more smaller ones, will explain below. This contains a patch that is also in the fbdev tree, but it should be the same patch, it added an API for hot unplugging framebuffer devices, and I need that API for a new driver. It also contains some changes to the i2c tree which Jean has acked, and one change to moorestown platform stuff in x86. Highlights: - new drivers: UDL driver for USB displaylink devices, kms only, should support correct hotplug operations. - core: i2c speedups + better hotplug support, EDID overriding via firmware interface - allows user to load a firmware for a broken monitor/kvm from userspace, it even has documentation for it. - exynos: new HDMI audio + hdmi 1.4 + virtual output driver - gma500: code cleanup - radeon: cleanups, CS optimisations, streamout support and pageflip fix - nouveau: NVD9 displayport support + more reclocking work - i915: re-enabling GMBUS, finish gpu patch (might help hibernation who knows), missed irq fixes, stencil tiling fixes, interlaced support, aliasesd PPGTT support for SNB/IVB, swizzling for SNB/IVB, semaphore fixes As well as the usual bunch of cleanups and fixes all over the place. I've got two things I'd like to merge a bit later: a) AMD support for all their new radeonhd 7000 series GPU and APUs. AMD dropped this a bit late due to insane internal review processes, (please AMD just follow Intel and let open source guys ship stuff early) however I don't want to penalise people who own this hardware (since its been on sale for 3-4 months and GPU hw doesn't exactly have a lifetime in years) and consign them to using closed drivers for longer than necessary. The changes are well contained and just plug into the driver new gpu functionality so they should be fairly regression proof. I just want to give them a bit of a run on the hw AMD kindly sent me. b) drm prime/dma-buf interface code. This is just infrastructure code to expose the dma-buf stuff to drm drivers and to userspace. I'm not planning on pushing any driver support in this cycle (except maybe exynos), but I'd like to get the infrastructure code in so for the next cycle I can start getting the driver support into the individual drivers. We have started driver support for i915, nouveau and udl along with I think exynos and omap in staging. However this code relies on the dma-buf tree being pulled into your tree first since it needs the latest interfaces from that tree. I'll push to get that tree sent asap. (oh and any warnings you see in i915 are gcc's fault from what anyone can see)." Fix up trivial conflicts in arch/x86/platform/mrst/mrst.c due to the new msic_thermal_platform_data() thermal function being added next to the tc35876x_platform_data() i2c device function.. * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (326 commits) drm/i915: use DDC_ADDR instead of hard-coding it drm/radeon: use DDC_ADDR instead of hard-coding it drm: remove unneeded redefinition of DDC_ADDR drm/exynos: added virtual display driver. drm: allow loading an EDID as firmware to override broken monitor drm/exynos: enable hdmi audio feature drm/exynos: add default pixel format for plane drm/exynos: cleanup exynos_hdmi.h drm/exynos: add is_local member in exynos_drm_subdrv struct drm/exynos: add subdrv open/close functions drm/exynos: remove module of exynos drm subdrv drm/exynos: release pending pageflip events when closed drm/exynos: added new funtion to get/put dma address. drm/exynos: update gem and buffer framework. drm/exynos: added mode_fixup feature and code clean. drm/exynos: add HDMI version 1.4 support drm/exynos: remove exynos_mixer.h gma500: Fix mmap frambuffer drm/radeon: Drop radeon_gem_object_(un)pin. drm/radeon: Restrict offset for legacy display engine. ...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c55
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c38
10 files changed, 353 insertions, 151 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2d6f573..ee24d21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -38,6 +38,10 @@
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
+#define VMW_MIN_INITIAL_WIDTH 800
+#define VMW_MIN_INITIAL_HEIGHT 600
+
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -387,6 +391,41 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
BUG_ON(n3d < 0);
}
+/**
+ * Sets the initial_[width|height] fields on the given vmw_private.
+ *
+ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
+ * clamping the value to fb_max_[width|height] fields and the
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ * If the values appear to be invalid, set them to
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ */
+static void vmw_get_initial_size(struct vmw_private *dev_priv)
+{
+ uint32_t width;
+ uint32_t height;
+
+ width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+ height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+
+ width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+ height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+
+ if (width > dev_priv->fb_max_width ||
+ height > dev_priv->fb_max_height) {
+
+ /*
+ * This is a host error and shouldn't occur.
+ */
+
+ width = VMW_MIN_INITIAL_WIDTH;
+ height = VMW_MIN_INITIAL_HEIGHT;
+ }
+
+ dev_priv->initial_width = width;
+ dev_priv->initial_height = height;
+}
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -400,6 +439,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
memset(dev_priv, 0, sizeof(*dev_priv));
+ pci_set_master(dev->pdev);
+
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
@@ -441,6 +482,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+ vmw_get_initial_size(dev_priv);
+
if (dev_priv->capabilities & SVGA_CAP_GMR) {
dev_priv->max_gmr_descriptors =
vmw_read(dev_priv,
@@ -688,6 +732,15 @@ static int vmw_driver_unload(struct drm_device *dev)
return 0;
}
+static void vmw_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
+}
+
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -710,6 +763,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp == NULL))
return ret;
+ INIT_LIST_HEAD(&vmw_fp->fence_events);
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
@@ -1102,6 +1156,7 @@ static struct drm_driver driver = {
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
+ .preclose = vmw_preclose,
.postclose = vmw_postclose,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index dc27970..d0f2c07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include "ttm/ttm_module.h"
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20111025"
+#define VMWGFX_DRIVER_DATE "20120209"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 3
+#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -62,6 +62,7 @@
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
+ struct list_head fence_events;
};
struct vmw_dma_buffer {
@@ -202,6 +203,8 @@ struct vmw_private {
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
+ uint32_t initial_width;
+ uint32_t initial_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
@@ -533,7 +536,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t command_size,
uint64_t throttle_us,
struct drm_vmw_fence_rep __user
- *user_fence_rep);
+ *user_fence_rep,
+ struct vmw_fence_obj **out_fence);
extern void
vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 40932fb..4acced4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1109,10 +1109,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
void *kernel_commands,
uint32_t command_size,
uint64_t throttle_us,
- struct drm_vmw_fence_rep __user *user_fence_rep)
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
- struct vmw_fence_obj *fence;
+ struct vmw_fence_obj *fence = NULL;
uint32_t handle;
void *cmd;
int ret;
@@ -1208,8 +1209,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle);
- if (likely(fence != NULL))
+ /* Don't unreference when handing fence out */
+ if (unlikely(out_fence != NULL)) {
+ *out_fence = fence;
+ fence = NULL;
+ } else if (likely(fence != NULL)) {
vmw_fence_obj_unreference(&fence);
+ }
mutex_unlock(&dev_priv->cmdbuf_mutex);
return 0;
@@ -1362,7 +1368,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
- (void __user *)(unsigned long)arg->fence_rep);
+ (void __user *)(unsigned long)arg->fence_rep,
+ NULL);
if (unlikely(ret != 0))
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 34e51a1..3c447bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -414,10 +414,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
- /* XXX These shouldn't be hardcoded. */
- initial_width = 800;
- initial_height = 600;
-
fb_bpp = 32;
fb_depth = 24;
@@ -425,8 +421,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
- initial_width = min(fb_width, initial_width);
- initial_height = min(fb_height, initial_height);
+ initial_width = min(vmw_priv->initial_width, fb_width);
+ initial_height = min(vmw_priv->initial_height, fb_height);
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
@@ -515,19 +511,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->var.xres = initial_width;
info->var.yres = initial_height;
-#if 0
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-#else
- info->pixmap.size = 0;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
-#endif
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 15fb260..f2fb8f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -69,12 +69,13 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct drm_pending_event e;
struct vmw_fence_action action;
+ struct list_head fpriv_head;
+
+ struct drm_pending_event *event;
struct vmw_fence_obj *fence;
struct drm_device *dev;
- struct kref kref;
- uint32_t size;
+
uint32_t *tv_sec;
uint32_t *tv_usec;
};
@@ -784,46 +785,40 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_event_fence_action_destroy
- *
- * @kref: The struct kref embedded in a struct vmw_event_fence_action.
- *
- * The vmw_event_fence_action destructor that may be called either after
- * the fence action cleanup, or when the event is delivered.
- * It frees both the vmw_event_fence_action struct and the actual
- * event structure copied to user-space.
- */
-static void vmw_event_fence_action_destroy(struct kref *kref)
-{
- struct vmw_event_fence_action *eaction =
- container_of(kref, struct vmw_event_fence_action, kref);
- struct ttm_mem_global *mem_glob =
- vmw_mem_glob(vmw_priv(eaction->dev));
- uint32_t size = eaction->size;
-
- kfree(eaction->e.event);
- kfree(eaction);
- ttm_mem_global_free(mem_glob, size);
-}
-
-
-/**
- * vmw_event_fence_action_delivered
+ * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
*
- * @e: The struct drm_pending_event embedded in a struct
- * vmw_event_fence_action.
+ * @fman: Pointer to a struct vmw_fence_manager
+ * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
+ * with pointers to a struct drm_file object about to be closed.
*
- * The struct drm_pending_event destructor that is called by drm
- * once the event is delivered. Since we don't know whether this function
- * will be called before or after the fence action destructor, we
- * free a refcount and destroy if it becomes zero.
+ * This function removes all pending fence events with references to a
+ * specific struct drm_file object about to be closed. The caller is required
+ * to pass a list of all struct vmw_event_fence_action objects with such
+ * events attached. This function is typically called before the
+ * struct drm_file object's event management is taken down.
*/
-static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
+void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+ struct list_head *event_list)
{
- struct vmw_event_fence_action *eaction =
- container_of(e, struct vmw_event_fence_action, e);
+ struct vmw_event_fence_action *eaction;
+ struct drm_pending_event *event;
+ unsigned long irq_flags;
- kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+ while (1) {
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ if (list_empty(event_list))
+ goto out_unlock;
+ eaction = list_first_entry(event_list,
+ struct vmw_event_fence_action,
+ fpriv_head);
+ list_del_init(&eaction->fpriv_head);
+ event = eaction->event;
+ eaction->event = NULL;
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+ event->destroy(event);
+ }
+out_unlock:
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
}
@@ -836,18 +831,21 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context, and may be called
- * from irq context. It ups a refcount reflecting that we now have two
- * destructors.
+ * from irq context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
- struct drm_file *file_priv = eaction->e.file_priv;
+ struct drm_pending_event *event = eaction->event;
+ struct drm_file *file_priv;
unsigned long irq_flags;
- kref_get(&eaction->kref);
+ if (unlikely(event == NULL))
+ return;
+
+ file_priv = event->file_priv;
spin_lock_irqsave(&dev->event_lock, irq_flags);
if (likely(eaction->tv_sec != NULL)) {
@@ -858,7 +856,9 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
*eaction->tv_usec = tv.tv_usec;
}
- list_add_tail(&eaction->e.link, &file_priv->event_list);
+ list_del_init(&eaction->fpriv_head);
+ list_add_tail(&eaction->event->link, &file_priv->event_list);
+ eaction->event = NULL;
wake_up_all(&file_priv->event_wait);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}
@@ -876,9 +876,15 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
+ struct vmw_fence_manager *fman = eaction->fence->fman;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ list_del(&eaction->fpriv_head);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
vmw_fence_obj_unreference(&eaction->fence);
- kref_put(&eaction->kref, vmw_event_fence_action_destroy);
+ kfree(eaction);
}
@@ -946,39 +952,23 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
* an error code, the caller needs to free that object.
*/
-int vmw_event_fence_action_create(struct drm_file *file_priv,
- struct vmw_fence_obj *fence,
- struct drm_event *event,
- uint32_t *tv_sec,
- uint32_t *tv_usec,
- bool interruptible)
+int vmw_event_fence_action_queue(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_pending_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible)
{
struct vmw_event_fence_action *eaction;
- struct ttm_mem_global *mem_glob =
- vmw_mem_glob(fence->fman->dev_priv);
struct vmw_fence_manager *fman = fence->fman;
- uint32_t size = fman->event_fence_action_size +
- ttm_round_pot(event->length);
- int ret;
-
- /*
- * Account for internal structure size as well as the
- * event size itself.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
- if (unlikely(ret != 0))
- return ret;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ unsigned long irq_flags;
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL)) {
- ttm_mem_global_free(mem_glob, size);
+ if (unlikely(eaction == NULL))
return -ENOMEM;
- }
- eaction->e.event = event;
- eaction->e.file_priv = file_priv;
- eaction->e.destroy = vmw_event_fence_action_delivered;
+ eaction->event = event;
eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
eaction->action.cleanup = vmw_event_fence_action_cleanup;
@@ -986,16 +976,89 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = fman->dev_priv->dev;
- eaction->size = size;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- kref_init(&eaction->kref);
+ spin_lock_irqsave(&fman->lock, irq_flags);
+ list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
+ spin_unlock_irqrestore(&fman->lock, irq_flags);
+
vmw_fence_obj_add_action(fence, &eaction->action);
return 0;
}
+struct vmw_event_fence_pending {
+ struct drm_pending_event base;
+ struct drm_vmw_event_fence event;
+};
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+ struct vmw_fence_obj *fence,
+ uint32_t flags,
+ uint64_t user_data,
+ bool interruptible)
+{
+ struct vmw_event_fence_pending *event;
+ struct drm_device *dev = fence->fman->dev_priv->dev;
+ unsigned long irq_flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+ ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
+ if (likely(ret == 0))
+ file_priv->event_space -= sizeof(event->event);
+
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate event space for this file.\n");
+ goto out_no_space;
+ }
+
+
+ event = kzalloc(sizeof(event->event), GFP_KERNEL);
+ if (unlikely(event == NULL)) {
+ DRM_ERROR("Failed to allocate an event.\n");
+ ret = -ENOMEM;
+ goto out_no_event;
+ }
+
+ event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+ event->event.base.length = sizeof(*event);
+ event->event.user_data = user_data;
+
+ event->base.event = &event->event.base;
+ event->base.file_priv = file_priv;
+ event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+
+ if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ interruptible);
+ else
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ NULL,
+ NULL,
+ interruptible);
+ if (ret != 0)
+ goto out_no_queue;
+
+out_no_queue:
+ event->base.destroy(&event->base);
+out_no_event:
+ spin_lock_irqsave(&dev->event_lock, irq_flags);
+ file_priv->event_space += sizeof(*event);
+ spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_space:
+ return ret;
+}
+
int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1008,8 +1071,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
uint32_t handle;
- unsigned long irq_flags;
- struct drm_vmw_event_fence *event;
int ret;
/*
@@ -1062,59 +1123,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- spin_lock_irqsave(&dev->event_lock, irq_flags);
-
- ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
- if (likely(ret == 0))
- file_priv->event_space -= sizeof(*event);
-
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate event space for this file.\n");
- goto out_no_event_space;
- }
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
- DRM_ERROR("Failed to allocate an event.\n");
- goto out_no_event;
- }
-
- event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
- event->base.length = sizeof(*event);
- event->user_data = arg->user_data;
-
if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
ret = vmw_event_fence_action_create(file_priv, fence,
- &event->base,
- &event->tv_sec,
- &event->tv_usec,
+ arg->flags,
+ arg->user_data,
true);
else
ret = vmw_event_fence_action_create(file_priv, fence,
- &event->base,
- NULL,
- NULL,
+ arg->flags,
+ arg->user_data,
true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
- goto out_no_attach;
+ goto out_no_create;
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle);
vmw_fence_obj_unreference(&fence);
return 0;
-out_no_attach:
- kfree(event);
-out_no_event:
- spin_lock_irqsave(&dev->event_lock, irq_flags);
- file_priv->event_space += sizeof(*event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-out_no_event_space:
+out_no_create:
if (user_fence_rep != NULL)
ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
handle, TTM_REF_USAGE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 0854a20..faf2e78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -109,5 +109,12 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+ struct list_head *event_list);
+extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
+ struct vmw_fence_obj *fence,
+ struct drm_pending_event *event,
+ uint32_t *tv_sec,
+ uint32_t *tv_usec,
+ bool interruptible);
#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b66ef0e..2286d47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -422,7 +422,8 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc)
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
@@ -542,12 +543,15 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
if (num == 0)
continue;
+ /* only return the last fence */
+ if (out_fence && *out_fence)
+ vmw_fence_obj_unreference(out_fence);
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
@@ -598,7 +602,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
- clips, num_clips, inc);
+ clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
return 0;
@@ -809,7 +813,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, NULL);
kfree(cmd);
@@ -821,7 +825,8 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
+ unsigned num_clips, int increment,
+ struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
@@ -894,9 +899,13 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
if (hit_num == 0)
continue;
+ /* only return the last fence */
+ if (out_fence && *out_fence)
+ vmw_fence_obj_unreference(out_fence);
+
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
@@ -942,7 +951,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
- clips, num_clips, increment);
+ clips, num_clips, increment, NULL);
}
ttm_read_unlock(&vmaster->lock);
@@ -1296,7 +1305,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL);
+ fifo_size, 0, NULL, NULL);
if (unlikely(ret != 0))
break;
@@ -1409,7 +1418,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep);
+ 0, user_fence_rep, NULL);
kfree(cmd);
@@ -1672,6 +1681,70 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
+int vmw_du_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct drm_framebuffer *old_fb = crtc->fb;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+ struct drm_file *file_priv = event->base.file_priv;
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_clip_rect clips;
+ int ret;
+
+ /* require ScreenObject support for page flipping */
+ if (!dev_priv->sou_priv)
+ return -ENOSYS;
+
+ if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
+ return -EINVAL;
+
+ crtc->fb = fb;
+
+ /* do a full screen dirty update */
+ clips.x1 = clips.y1 = 0;
+ clips.x2 = fb->width;
+ clips.y2 = fb->height;
+
+ if (vfb->dmabuf)
+ ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
+ 0, 0, &clips, 1, 1, &fence);
+ else
+ ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
+ 0, 0, &clips, 1, 1, &fence);
+
+
+ if (ret != 0)
+ goto out_no_fence;
+ if (!fence) {
+ ret = -EINVAL;
+ goto out_no_fence;
+ }
+
+ ret = vmw_event_fence_action_queue(file_priv, fence,
+ &event->base,
+ &event->event.tv_sec,
+ &event->event.tv_usec,
+ true);
+
+ /*
+ * No need to hold on to this now. The only cleanup
+ * we need to do if we fail is unref the fence.
+ */
+ vmw_fence_obj_unreference(&fence);
+
+ if (vmw_crtc_to_du(crtc)->is_implicit)
+ vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
+
+ return ret;
+
+out_no_fence:
+ crtc->fb = old_fb;
+ return ret;
+}
+
+
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index a4f7f03..8184bc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -121,6 +121,9 @@ struct vmw_display_unit {
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -154,5 +157,10 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index f77b184..070fb23 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -354,8 +354,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
INIT_LIST_HEAD(&ldu->active);
ldu->base.pref_active = (unit == 0);
- ldu->base.pref_width = 800;
- ldu->base.pref_height = 600;
+ ldu->base.pref_width = dev_priv->initial_width;
+ ldu->base.pref_height = dev_priv->initial_height;
ldu->base.pref_mode = NULL;
ldu->base.is_implicit = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4defdcf..6deaf2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -394,6 +394,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.set_config = vmw_sou_crtc_set_config,
+ .page_flip = vmw_du_page_flip,
};
/*
@@ -448,8 +449,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
sou->active_implicit = false;
sou->base.pref_active = (unit == 0);
- sou->base.pref_width = 800;
- sou->base.pref_height = 600;
+ sou->base.pref_width = dev_priv->initial_width;
+ sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
sou->base.is_implicit = true;
@@ -535,3 +536,36 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
return 0;
}
+
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ if (!sou->base.is_implicit)
+ return true;
+
+ if (dev_priv->sou_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+ BUG_ON(!sou->base.is_implicit);
+
+ dev_priv->sou_priv->implicit_fb =
+ vmw_framebuffer_to_vfb(sou->base.crtc.fb);
+}