diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 19:24:07 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 19:24:07 (GMT) |
commit | 5f757f91e70a97eda8f0cc13bddc853209b2d173 (patch) | |
tree | e2e97796f5145b5d6eb3fee6ec93050d90f2bd7d | |
parent | 9fa0853a85a3a4067e4ad0aaa5d90984c2dd21b5 (diff) | |
parent | ce7dd06372058f9e3e57ee4c0aeba694a43a80ad (diff) | |
download | linux-5f757f91e70a97eda8f0cc13bddc853209b2d173.tar.xz |
Merge branch 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Add 965GM pci id update
drm: just use io_remap_pfn_range on all archs..
drm: fix DRM_CONSISTENT mapping
drm: fix up mmap locking in preparation for ttm changes
drm: fix driver deadlock with AIGLX and reclaim_buffers_locked
drm: fix warning in drm_fops.c
drm: allow for more generic drm ioctls
drm: fix alpha domain handling
via: fix CX700 pci id
drm: make drm_io_prot static.
drm: remove via_mm.h
drm: add missing NULL assignment
drm/radeon: Fix u32 overflows when determining AGP base address in card space.
drm: port over use_vmalloc code from git hashtab
drm: fix crash with fops lock and fixup sarea/page size locking
drm: bring bufs code from git tree.
drm: move protection stuff into separate function
drm: Use ARRAY_SIZE macro when appropriate
drm: update README.drm (bugzilla #7933)
drm: remove unused exports
-rw-r--r-- | drivers/char/drm/README.drm | 16 | ||||
-rw-r--r-- | drivers/char/drm/drm.h | 4 | ||||
-rw-r--r-- | drivers/char/drm/drmP.h | 23 | ||||
-rw-r--r-- | drivers/char/drm/drm_bufs.c | 75 | ||||
-rw-r--r-- | drivers/char/drm/drm_drv.c | 9 | ||||
-rw-r--r-- | drivers/char/drm/drm_fops.c | 96 | ||||
-rw-r--r-- | drivers/char/drm/drm_hashtab.c | 17 | ||||
-rw-r--r-- | drivers/char/drm/drm_hashtab.h | 1 | ||||
-rw-r--r-- | drivers/char/drm/drm_irq.c | 4 | ||||
-rw-r--r-- | drivers/char/drm/drm_lock.c | 134 | ||||
-rw-r--r-- | drivers/char/drm/drm_mm.c | 2 | ||||
-rw-r--r-- | drivers/char/drm/drm_pciids.h | 3 | ||||
-rw-r--r-- | drivers/char/drm/drm_proc.c | 2 | ||||
-rw-r--r-- | drivers/char/drm/drm_stub.c | 1 | ||||
-rw-r--r-- | drivers/char/drm/drm_vm.c | 102 | ||||
-rw-r--r-- | drivers/char/drm/i915_dma.c | 3 | ||||
-rw-r--r-- | drivers/char/drm/radeon_cp.c | 8 | ||||
-rw-r--r-- | drivers/char/drm/sis_drv.c | 2 | ||||
-rw-r--r-- | drivers/char/drm/via_drv.c | 3 | ||||
-rw-r--r-- | drivers/char/drm/via_mm.h | 40 |
20 files changed, 349 insertions, 196 deletions
diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm index 6441e01..af74cd7 100644 --- a/drivers/char/drm/README.drm +++ b/drivers/char/drm/README.drm @@ -1,6 +1,6 @@ ************************************************************ * For the very latest on DRI development, please see: * -* http://dri.sourceforge.net/ * +* http://dri.freedesktop.org/ * ************************************************************ The Direct Rendering Manager (drm) is a device-independent kernel-level @@ -26,21 +26,19 @@ ways: Documentation on the DRI is available from: - http://precisioninsight.com/piinsights.html + http://dri.freedesktop.org/wiki/Documentation + http://sourceforge.net/project/showfiles.php?group_id=387 + http://dri.sourceforge.net/doc/ For specific information about kernel-level support, see: The Direct Rendering Manager, Kernel Support for the Direct Rendering Infrastructure - http://precisioninsight.com/dr/drm.html + http://dri.sourceforge.net/doc/drm_low_level.html Hardware Locking for the Direct Rendering Infrastructure - http://precisioninsight.com/dr/locking.html + http://dri.sourceforge.net/doc/hardware_locking_low_level.html A Security Analysis of the Direct Rendering Infrastructure - http://precisioninsight.com/dr/security.html + http://dri.sourceforge.net/doc/security_low_level.html -************************************************************ -* For the very latest on DRI development, please see: * -* http://dri.sourceforge.net/ * -************************************************************ diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h index 8db9041..0891984 100644 --- a/drivers/char/drm/drm.h +++ b/drivers/char/drm/drm.h @@ -654,11 +654,13 @@ typedef struct drm_set_version { /** * Device specific ioctls should only be in their respective headers - * The device specific ioctl range is from 0x40 to 0x79. + * The device specific ioctl range is from 0x40 to 0x99. + * Generic IOCTLS restart at 0xA0. * * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * drmCommandReadWrite(). */ #define DRM_COMMAND_BASE 0x40 +#define DRM_COMMAND_END 0xA0 #endif diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 85d99e2..80041d5 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h @@ -414,6 +414,10 @@ typedef struct drm_lock_data { struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; } drm_lock_data_t; /** @@ -590,6 +594,8 @@ struct drm_driver { void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); void (*reclaim_buffers_locked) (struct drm_device *dev, struct file *filp); + void (*reclaim_buffers_idlelocked) (struct drm_device *dev, + struct file * filp); unsigned long (*get_map_ofs) (drm_map_t * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); @@ -764,7 +770,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev, } #ifdef __alpha__ -#define drm_get_pci_domain(dev) dev->hose->bus->number +#define drm_get_pci_domain(dev) dev->hose->index #else #define drm_get_pci_domain(dev) 0 #endif @@ -915,9 +921,18 @@ extern int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); -extern int drm_lock_free(drm_device_t * dev, - __volatile__ unsigned int *lock, unsigned int context); +extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); +extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); +extern void drm_idlelock_take(drm_lock_data_t *lock_data); +extern void drm_idlelock_release(drm_lock_data_t *lock_data); + +/* + * These are exported to drivers so that they can implement fencing using + * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. + */ + +extern int drm_i_have_hw_lock(struct file *filp); +extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index a6828cc..c113458 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c @@ -57,7 +57,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, list_for_each(list, &dev->maplist->head) { drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); if (entry->map && map->type == entry->map->type && - entry->map->offset == map->offset) { + ((entry->map->offset == map->offset) || + (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) { return entry; } } @@ -180,8 +181,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, if (map->type == _DRM_REGISTERS) map->handle = ioremap(map->offset, map->size); break; - case _DRM_SHM: + list = drm_find_matching_map(dev, map); + if (list != NULL) { + if(list->map->size != map->size) { + DRM_DEBUG("Matching maps of type %d with " + "mismatched sizes, (%ld vs %ld)\n", + map->type, map->size, list->map->size); + list->map->size = map->size; + } + + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + *maplist = list; + return 0; + } map->handle = vmalloc_user(map->size); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->handle); @@ -200,15 +213,45 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; - case _DRM_AGP: - if (drm_core_has_AGP(dev)) { + case _DRM_AGP: { + drm_agp_mem_t *entry; + int valid = 0; + + if (!drm_core_has_AGP(dev)) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EINVAL; + } #ifdef __alpha__ - map->offset += dev->hose->mem_space->start; + map->offset += dev->hose->mem_space->start; #endif - map->offset += dev->agp->base; - map->mtrr = dev->agp->agp_mtrr; /* for getmap */ + /* Note: dev->agp->base may actually be 0 when the DRM + * is not in control of AGP space. But if user space is + * it should already have added the AGP base itself. + */ + map->offset += dev->agp->base; + map->mtrr = dev->agp->agp_mtrr; /* for getmap */ + + /* This assumes the DRM is in total control of AGP space. + * It's not always the case as AGP can be in the control + * of user space (i.e. i810 driver). So this loop will get + * skipped and we double check that dev->agp->memory is + * actually set as well as being invalid before EPERM'ing + */ + for (entry = dev->agp->memory; entry; entry = entry->next) { + if ((map->offset >= entry->bound) && + (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { + valid = 1; + break; + } } + if (dev->agp->memory && !valid) { + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + return -EPERM; + } + DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); + break; + } case _DRM_SCATTER_GATHER: if (!dev->sg) { drm_free(map, sizeof(*map), DRM_MEM_MAPS); @@ -267,7 +310,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, *maplist = list; return 0; -} + } int drm_addmap(drm_device_t * dev, unsigned int offset, unsigned int size, drm_map_type_t type, @@ -519,6 +562,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) { drm_device_dma_t *dma = dev->dma; drm_buf_entry_t *entry; + drm_agp_mem_t *agp_entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; @@ -529,7 +573,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) int page_order; int total; int byte_count; - int i; + int i, valid; drm_buf_t **temp_buflist; if (!dma) @@ -560,6 +604,19 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) if (dev->queue_count) return -EBUSY; /* Not while in use */ + /* Make sure buffers are located in AGP memory that we own */ + valid = 0; + for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) { + if ((agp_offset >= agp_entry->bound) && + (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { + valid = 1; + break; + } + } + if (dev->agp->memory && !valid) { + DRM_DEBUG("zone invalid\n"); + return -EINVAL; + } spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index f5b9b24..26bec30 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c @@ -496,11 +496,14 @@ int drm_ioctl(struct inode *inode, struct file *filp, (long)old_encode_dev(priv->head->device), priv->authenticated); - if (nr < DRIVER_IOCTL_COUNT) - ioctl = &drm_ioctls[nr]; - else if ((nr >= DRM_COMMAND_BASE) + if ((nr >= DRIVER_IOCTL_COUNT) && + ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) + goto err_i1; + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) + ioctl = &drm_ioctls[nr]; else goto err_i1; diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 898f47d..3b159ca 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c @@ -46,6 +46,7 @@ static int drm_setup(drm_device_t * dev) drm_local_map_t *map; int i; int ret; + u32 sareapage; if (dev->driver->firstopen) { ret = dev->driver->firstopen(dev); @@ -56,7 +57,8 @@ static int drm_setup(drm_device_t * dev) dev->magicfree.next = NULL; /* prebuild the SAREA */ - i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); + sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); + i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; @@ -84,7 +86,7 @@ static int drm_setup(drm_device_t * dev) INIT_LIST_HEAD(&dev->ctxlist->head); dev->vmalist = NULL; - dev->sigdata.lock = dev->lock.hw_lock = NULL; + dev->sigdata.lock = NULL; init_waitqueue_head(&dev->lock.lock_queue); dev->queue_count = 0; dev->queue_reserved = 0; @@ -354,58 +356,56 @@ int drm_release(struct inode *inode, struct file *filp) current->pid, (long)old_encode_dev(priv->head->device), dev->open_count); - if (priv->lock_count && dev->lock.hw_lock && - _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.filp == filp) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - - if (dev->driver->reclaim_buffers_locked) + if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { + if (drm_i_have_hw_lock(filp)) { dev->driver->reclaim_buffers_locked(dev, filp); - - drm_lock_free(dev, &dev->lock.hw_lock->lock, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - - /* FIXME: may require heavy-handed reset of - hardware at this point, possibly - processed via a callback to the X - server. */ - } else if (dev->driver->reclaim_buffers_locked && priv->lock_count - && dev->lock.hw_lock) { - /* The lock is required to reclaim buffers */ - DECLARE_WAITQUEUE(entry, current); - - add_wait_queue(&dev->lock.lock_queue, &entry); - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (!dev->lock.hw_lock) { - /* Device has been unregistered */ - retcode = -EINTR; - break; + } else { + unsigned long _end=jiffies + 3*DRM_HZ; + int locked = 0; + + drm_idlelock_take(&dev->lock); + + /* + * Wait for a while. + */ + + do{ + spin_lock(&dev->lock.spinlock); + locked = dev->lock.idle_has_lock; + spin_unlock(&dev->lock.spinlock); + if (locked) + break; + schedule(); + } while (!time_after_eq(jiffies, _end)); + + if (!locked) { + DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" + "\tdriver to use reclaim_buffers_idlelocked() instead.\n" + "\tI will go on reclaiming the buffers anyway.\n"); } - if (drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - dev->lock.filp = filp; - dev->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); - break; /* Got lock */ - } - /* Contention */ - schedule(); - if (signal_pending(current)) { - retcode = -ERESTARTSYS; - break; - } - } - __set_current_state(TASK_RUNNING); - remove_wait_queue(&dev->lock.lock_queue, &entry); - if (!retcode) { + dev->driver->reclaim_buffers_locked(dev, filp); - drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT); + drm_idlelock_release(&dev->lock); } } + if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { + + drm_idlelock_take(&dev->lock); + dev->driver->reclaim_buffers_idlelocked(dev, filp); + drm_idlelock_release(&dev->lock); + + } + + if (drm_i_have_hw_lock(filp)) { + DRM_DEBUG("File %p released, freeing lock for context %d\n", + filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + drm_lock_free(&dev->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + } + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { dev->driver->reclaim_buffers(dev, filp); diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c index a0b2d68..31acb62 100644 --- a/drivers/char/drm/drm_hashtab.c +++ b/drivers/char/drm/drm_hashtab.c @@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order) ht->size = 1 << order; ht->order = order; ht->fill = 0; - ht->table = vmalloc(ht->size*sizeof(*ht->table)); + ht->table = NULL; + ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); + if (!ht->use_vmalloc) { + ht->table = drm_calloc(ht->size, sizeof(*ht->table), + DRM_MEM_HASHTAB); + } + if (!ht->table) { + ht->use_vmalloc = 1; + ht->table = vmalloc(ht->size*sizeof(*ht->table)); + } if (!ht->table) { DRM_ERROR("Out of memory for hash table\n"); return -ENOMEM; @@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item) void drm_ht_remove(drm_open_hash_t *ht) { if (ht->table) { - vfree(ht->table); + if (ht->use_vmalloc) + vfree(ht->table); + else + drm_free(ht->table, ht->size * sizeof(*ht->table), + DRM_MEM_HASHTAB); ht->table = NULL; } } diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h index 40afec0..613091c 100644 --- a/drivers/char/drm/drm_hashtab.h +++ b/drivers/char/drm/drm_hashtab.h @@ -47,6 +47,7 @@ typedef struct drm_open_hash{ unsigned int order; unsigned int fill; struct hlist_head *table; + int use_vmalloc; } drm_open_hash_t; diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index 9d00c51..2e75331 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c @@ -424,7 +424,7 @@ static void drm_locked_tasklet_func(unsigned long data) spin_lock_irqsave(&dev->tasklet_lock, irqflags); if (!dev->locked_tasklet_func || - !drm_lock_take(&dev->lock.hw_lock->lock, + !drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); return; @@ -435,7 +435,7 @@ static void drm_locked_tasklet_func(unsigned long data) dev->locked_tasklet_func(dev); - drm_lock_free(dev, &dev->lock.hw_lock->lock, + drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); dev->locked_tasklet_func = NULL; diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index e9993ba..befd1af 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c @@ -35,9 +35,6 @@ #include "drmP.h" -static int drm_lock_transfer(drm_device_t * dev, - __volatile__ unsigned int *lock, - unsigned int context); static int drm_notifier(void *priv); /** @@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp, return -EINVAL; add_wait_queue(&dev->lock.lock_queue, &entry); + spin_lock(&dev->lock.spinlock); + dev->lock.user_waiters++; + spin_unlock(&dev->lock.spinlock); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (!dev->lock.hw_lock) { @@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, ret = -EINTR; break; } - if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { + if (drm_lock_take(&dev->lock, lock.context)) { dev->lock.filp = filp; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp, break; } } + spin_lock(&dev->lock.spinlock); + dev->lock.user_waiters--; + spin_unlock(&dev->lock.spinlock); __set_current_state(TASK_RUNNING); remove_wait_queue(&dev->lock.lock_queue, &entry); - DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); - if (ret) - return ret; + DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + if (ret) return ret; sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); @@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp, } } - /* dev->driver->kernel_context_switch isn't used by any of the x86 - * drivers but is used by the Sparc driver. - */ if (dev->driver->kernel_context_switch && dev->last_context != lock.context) { dev->driver->kernel_context_switch(dev, dev->last_context, lock.context); } + return 0; } @@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp, if (dev->driver->kernel_context_switch_unlock) dev->driver->kernel_context_switch_unlock(dev); else { - drm_lock_transfer(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT); - - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); + if (drm_lock_free(&dev->lock,lock.context)) { + /* FIXME: Should really bail out here. */ } } @@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp, * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) +int drm_lock_take(drm_lock_data_t *lock_data, + unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + spin_lock(&lock_data->spinlock); do { old = *lock; if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT; - else - new = context | _DRM_LOCK_HELD; + else { + new = context | _DRM_LOCK_HELD | + ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? + _DRM_LOCK_CONT : 0); + } prev = cmpxchg(lock, old, new); } while (prev != old); + spin_unlock(&lock_data->spinlock); + if (_DRM_LOCKING_CONTEXT(old) == context) { if (old & _DRM_LOCK_HELD) { if (context != DRM_KERNEL_CONTEXT) { @@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) return 0; } } - if (new == (context | _DRM_LOCK_HELD)) { + + if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { /* Have lock */ return 1; } @@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_device_t * dev, - __volatile__ unsigned int *lock, +static int drm_lock_transfer(drm_lock_data_t *lock_data, unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; - dev->lock.filp = NULL; + lock_data->filp = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev, * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_device_t * dev, - __volatile__ unsigned int *lock, unsigned int context) +int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock(&lock_data->spinlock); + if (lock_data->kernel_waiters != 0) { + drm_lock_transfer(lock_data, 0); + lock_data->idle_has_lock = 1; + spin_unlock(&lock_data->spinlock); + return 1; + } + spin_unlock(&lock_data->spinlock); - dev->lock.filp = NULL; do { old = *lock; - new = 0; + new = _DRM_LOCKING_CONTEXT(old); prev = cmpxchg(lock, old, new); } while (prev != old); + if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } - wake_up_interruptible(&dev->lock.lock_queue); + wake_up_interruptible(&lock_data->lock_queue); return 0; } @@ -322,3 +336,67 @@ static int drm_notifier(void *priv) } while (prev != old); return 0; } + +/** + * This function returns immediately and takes the hw lock + * with the kernel context if it is free, otherwise it gets the highest priority when and if + * it is eventually released. + * + * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held + * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause + * a deadlock, which is why the "idlelock" was invented). + * + * This should be sufficient to wait for GPU idle without + * having to worry about starvation. + */ + +void drm_idlelock_take(drm_lock_data_t *lock_data) +{ + int ret = 0; + + spin_lock(&lock_data->spinlock); + lock_data->kernel_waiters++; + if (!lock_data->idle_has_lock) { + + spin_unlock(&lock_data->spinlock); + ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); + spin_lock(&lock_data->spinlock); + + if (ret == 1) + lock_data->idle_has_lock = 1; + } + spin_unlock(&lock_data->spinlock); +} +EXPORT_SYMBOL(drm_idlelock_take); + +void drm_idlelock_release(drm_lock_data_t *lock_data) +{ + unsigned int old, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock(&lock_data->spinlock); + if (--lock_data->kernel_waiters == 0) { + if (lock_data->idle_has_lock) { + do { + old = *lock; + prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); + } while (prev != old); + wake_up_interruptible(&lock_data->lock_queue); + lock_data->idle_has_lock = 0; + } + } + spin_unlock(&lock_data->spinlock); +} +EXPORT_SYMBOL(drm_idlelock_release); + + +int drm_i_have_hw_lock(struct file *filp) +{ + DRM_DEVICE; + + return (priv->lock_count && dev->lock.hw_lock && + _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && + dev->lock.filp == filp); +} + +EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c index 9b46b85..2ec1d9f 100644 --- a/drivers/char/drm/drm_mm.c +++ b/drivers/char/drm/drm_mm.c @@ -274,7 +274,6 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) return drm_mm_create_tail_node(mm, start, size); } -EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(drm_mm_t * mm) { @@ -295,4 +294,3 @@ void drm_mm_takedown(drm_mm_t * mm) drm_free(entry, sizeof(*entry), DRM_MEM_MM); } -EXPORT_SYMBOL(drm_mm_takedown); diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index ad54b84..01cf482 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h @@ -230,10 +230,10 @@ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ - {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ + {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ {0, 0, 0} #define i810_PCI_IDS \ @@ -296,5 +296,6 @@ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0, 0, 0} diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 7fd0da7..b204498 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c @@ -72,7 +72,7 @@ static struct drm_proc_list { #endif }; -#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0])) +#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) /** * Initialize the DRI proc filesystem for a device. diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 120d102..19408ad 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c @@ -62,6 +62,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->tasklet_lock); + spin_lock_init(&dev->lock.spinlock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 54a6328..35540cf 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c @@ -41,6 +41,30 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); +static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) +{ + pgprot_t tmp = vm_get_page_prot(vma->vm_flags); + +#if defined(__i386__) || defined(__x86_64__) + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (map_type == _DRM_REGISTERS) + pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) + if (efi_range_is_wc(vma->vm_start, vma->vm_end - + vma->vm_start)) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + /** * \c nopage method for AGP virtual memory. * @@ -151,8 +175,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start; i = (unsigned long)map->handle + offset; - page = (map->type == _DRM_CONSISTENT) ? - virt_to_page((void *)i) : vmalloc_to_page((void *)i); + page = vmalloc_to_page((void *)i); if (!page) return NOPAGE_SIGBUS; get_page(page); @@ -389,7 +412,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ -static void drm_vm_open(struct vm_area_struct *vma) +static void drm_vm_open_locked(struct vm_area_struct *vma) { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; @@ -401,15 +424,23 @@ static void drm_vm_open(struct vm_area_struct *vma) vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { - mutex_lock(&dev->struct_mutex); vma_entry->vma = vma; vma_entry->next = dev->vmalist; vma_entry->pid = current->pid; dev->vmalist = vma_entry; - mutex_unlock(&dev->struct_mutex); } } +static void drm_vm_open(struct vm_area_struct *vma) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + /** * \c close method for all virtual memory types. * @@ -460,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) drm_device_dma_t *dma; unsigned long length = vma->vm_end - vma->vm_start; - lock_kernel(); dev = priv->head->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", @@ -468,10 +498,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - unlock_kernel(); return -EINVAL; } - unlock_kernel(); if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { @@ -494,7 +522,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } @@ -529,7 +557,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ -int drm_mmap(struct file *filp, struct vm_area_struct *vma) +static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -565,7 +593,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) return -EPERM; /* Check for valid size. */ - if (map->size != vma->vm_end - vma->vm_start) + if (map->size < vma->vm_end - vma->vm_start) return -EINVAL; if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { @@ -600,37 +628,16 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: -#if defined(__i386__) || defined(__x86_64__) - if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { - pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; - pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; - } -#elif defined(__powerpc__) - pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; - if (map->type == _DRM_REGISTERS) - pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; -#endif - vma->vm_flags |= VM_IO; /* not in core dump */ -#if defined(__ia64__) - if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) - vma->vm_page_prot = - pgprot_writecombine(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -#endif offset = dev->driver->get_reg_ofs(dev); + vma->vm_flags |= VM_IO; /* not in core dump */ + vma->vm_page_prot = drm_io_prot(map->type, vma); #ifdef __sparc__ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) -#else - if (io_remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) -#endif return -EAGAIN; DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%lx\n", @@ -638,10 +645,15 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_start, vma->vm_end, map->offset + offset); vma->vm_ops = &drm_vm_ops; break; - case _DRM_SHM: case _DRM_CONSISTENT: - /* Consistent memory is really like shared memory. It's only - * allocate in a different way */ + /* Consistent memory is really like shared memory. But + * it's allocated in a different way, so avoid nopage */ + if (remap_pfn_range(vma, vma->vm_start, + page_to_pfn(virt_to_page(map->handle)), + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return -EAGAIN; + /* fall through to _DRM_SHM */ + case _DRM_SHM: vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *)map; /* Don't let this area swap. Change when @@ -659,8 +671,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } +int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_mmap_locked(filp, vma); + mutex_unlock(&dev->struct_mutex); + + return ret; +} EXPORT_SYMBOL(drm_mmap); diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index 9354ce3..1ba15d9 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c @@ -34,7 +34,8 @@ #define IS_I965G(dev) (dev->pci_device == 0x2972 || \ dev->pci_device == 0x2982 || \ dev->pci_device == 0x2992 || \ - dev->pci_device == 0x29A2) + dev->pci_device == 0x29A2 || \ + dev->pci_device == 0x2A02) /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 5ed9656..c1850ec 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c @@ -1560,8 +1560,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) if (dev_priv->flags & RADEON_IS_AGP) { base = dev->agp->base; /* Check if valid */ - if ((base + dev_priv->gart_size) > dev_priv->fb_location && - base < (dev_priv->fb_location + dev_priv->fb_size)) { + if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && + base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", dev->agp->base); base = 0; @@ -1571,8 +1571,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ if (base == 0) { base = dev_priv->fb_location + dev_priv->fb_size; - if (((base + dev_priv->gart_size) & 0xfffffffful) - < base) + if (base < dev_priv->fb_location || + ((base + dev_priv->gart_size) & 0xfffffffful) < base) base = dev_priv->fb_location - dev_priv->gart_size; } diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c index 3d5b321..690e0af 100644 --- a/drivers/char/drm/sis_drv.c +++ b/drivers/char/drm/sis_drv.c @@ -71,7 +71,7 @@ static struct drm_driver driver = { .context_dtor = NULL, .dma_quiescent = sis_idle, .reclaim_buffers = NULL, - .reclaim_buffers_locked = sis_reclaim_buffers_locked, + .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, .lastclose = sis_lastclose, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c index bb9dde8..2d4957a 100644 --- a/drivers/char/drm/via_drv.c +++ b/drivers/char/drm/via_drv.c @@ -52,7 +52,8 @@ static struct drm_driver driver = { .dma_quiescent = via_driver_dma_quiescent, .dri_library_name = dri_library_name, .reclaim_buffers = drm_core_reclaim_buffers, - .reclaim_buffers_locked = via_reclaim_buffers_locked, + .reclaim_buffers_locked = NULL, + .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, .lastclose = via_lastclose, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, diff --git a/drivers/char/drm/via_mm.h b/drivers/char/drm/via_mm.h deleted file mode 100644 index d57efda..0000000 --- a/drivers/char/drm/via_mm.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. - * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sub license, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ -#ifndef _via_drm_mm_h_ -#define _via_drm_mm_h_ - -typedef struct { - unsigned int context; - unsigned int size; - unsigned long offset; - unsigned long free; -} drm_via_mm_t; - -typedef struct { - unsigned int size; - unsigned long handle; - void *virtual; -} drm_via_dma_t; - -#endif |