summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 20:32:24 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-14 20:32:24 (GMT)
commitc142bda458a9c81097238800e1bd8eeeea09913d (patch)
treea412321f62cbb90cae831d7f2dea026cbd3c958c /include
parentb5cf43c47b05c8deb10f9674d541dddbdec0e341 (diff)
parentc0e09200dc0813972442e550a5905a132768e56c (diff)
downloadlinux-fsl-qoriq-c142bda458a9c81097238800e1bd8eeeea09913d.tar.xz
Merge branch 'drm-reorg' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-reorg' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm: reorganise drm tree to be more future proof.
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild1
-rw-r--r--include/drm/Kbuild10
-rw-r--r--include/drm/drm.h694
-rw-r--r--include/drm/drmP.h1153
-rw-r--r--include/drm/drm_core.h34
-rw-r--r--include/drm/drm_hashtab.h67
-rw-r--r--include/drm/drm_memory.h61
-rw-r--r--include/drm/drm_memory_debug.h309
-rw-r--r--include/drm/drm_os_linux.h108
-rw-r--r--include/drm/drm_pciids.h415
-rw-r--r--include/drm/drm_sarea.h84
-rw-r--r--include/drm/drm_sman.h176
-rw-r--r--include/drm/i810_drm.h281
-rw-r--r--include/drm/i830_drm.h342
-rw-r--r--include/drm/i915_drm.h270
-rw-r--r--include/drm/mga_drm.h417
-rw-r--r--include/drm/r128_drm.h326
-rw-r--r--include/drm/radeon_drm.h749
-rw-r--r--include/drm/savage_drm.h210
-rw-r--r--include/drm/sis_drm.h67
-rw-r--r--include/drm/via_drm.h275
21 files changed, 6049 insertions, 0 deletions
diff --git a/include/Kbuild b/include/Kbuild
index b522887..bdca155 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -4,5 +4,6 @@ header-y += sound/
header-y += mtd/
header-y += rdma/
header-y += video/
+header-y += drm/
header-y += asm-$(ARCH)/
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
new file mode 100644
index 0000000..82b6983
--- /dev/null
+++ b/include/drm/Kbuild
@@ -0,0 +1,10 @@
+unifdef-y += drm.h drm_sarea.h
+unifdef-y += i810_drm.h
+unifdef-y += i830_drm.h
+unifdef-y += i915_drm.h
+unifdef-y += mga_drm.h
+unifdef-y += r128_drm.h
+unifdef-y += radeon_drm.h
+unifdef-y += sis_drm.h
+unifdef-y += savage_drm.h
+unifdef-y += via_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
new file mode 100644
index 0000000..38d3c6b
--- /dev/null
+++ b/include/drm/drm.h
@@ -0,0 +1,694 @@
+/**
+ * \file drm.h
+ * Header for the Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ * \par Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_H_
+#define _DRM_H_
+
+#if defined(__linux__)
+#if defined(__KERNEL__)
+#endif
+#include <asm/ioctl.h> /* For _IO* macros */
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+#if defined(__FreeBSD__) && defined(IN_MODULE)
+/* Prevent name collision when including sys/ioccom.h */
+#undef ioctl
+#include <sys/ioccom.h>
+#define ioctl(a,b,c) xf86ioctl(a,b,c)
+#else
+#include <sys/ioccom.h>
+#endif /* __FreeBSD__ && xf86ioctl */
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#endif
+
+#define DRM_MAJOR 226
+#define DRM_MAX_MINOR 15
+
+#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
+#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
+#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
+#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
+
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
+#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
+#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
+#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+
+typedef unsigned int drm_handle_t;
+typedef unsigned int drm_context_t;
+typedef unsigned int drm_drawable_t;
+typedef unsigned int drm_magic_t;
+
+/**
+ * Cliprect.
+ *
+ * \warning: If you change this structure, make sure you change
+ * XF86DRIClipRectRec in the server as well
+ *
+ * \note KW: Actually it's illegal to change either for
+ * backwards-compatibility reasons.
+ */
+struct drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+/**
+ * Drawable information.
+ */
+struct drm_drawable_info {
+ unsigned int num_rects;
+ struct drm_clip_rect *rects;
+};
+
+/**
+ * Texture region,
+ */
+struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+};
+
+/**
+ * Hardware lock.
+ *
+ * The lock structure is a simple cache-line aligned integer. To avoid
+ * processor bus contention on a multiprocessor system, there should not be any
+ * other data stored in the same cache line.
+ */
+struct drm_hw_lock {
+ __volatile__ unsigned int lock; /**< lock variable */
+ char padding[60]; /**< Pad to cache line */
+};
+
+/**
+ * DRM_IOCTL_VERSION ioctl argument type.
+ *
+ * \sa drmGetVersion().
+ */
+struct drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ size_t name_len; /**< Length of name buffer */
+ char __user *name; /**< Name of driver */
+ size_t date_len; /**< Length of date buffer */
+ char __user *date; /**< User-space buffer to hold date */
+ size_t desc_len; /**< Length of desc buffer */
+ char __user *desc; /**< User-space buffer to hold desc */
+};
+
+/**
+ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
+ *
+ * \sa drmGetBusid() and drmSetBusId().
+ */
+struct drm_unique {
+ size_t unique_len; /**< Length of unique */
+ char __user *unique; /**< Unique name for driver instantiation */
+};
+
+struct drm_list {
+ int count; /**< Length of user-space structures */
+ struct drm_version __user *version;
+};
+
+struct drm_block {
+ int unused;
+};
+
+/**
+ * DRM_IOCTL_CONTROL ioctl argument type.
+ *
+ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
+ */
+struct drm_control {
+ enum {
+ DRM_ADD_COMMAND,
+ DRM_RM_COMMAND,
+ DRM_INST_HANDLER,
+ DRM_UNINST_HANDLER
+ } func;
+ int irq;
+};
+
+/**
+ * Type of memory to map.
+ */
+enum drm_map_type {
+ _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /**< no caching, no core dump */
+ _DRM_SHM = 2, /**< shared, cached */
+ _DRM_AGP = 3, /**< AGP/GART */
+ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+};
+
+/**
+ * Memory mapping flags.
+ */
+enum drm_map_flags {
+ _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
+ _DRM_READ_ONLY = 0x02,
+ _DRM_LOCKED = 0x04, /**< shared, cached, locked */
+ _DRM_KERNEL = 0x08, /**< kernel requires access */
+ _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
+ _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
+};
+
+struct drm_ctx_priv_map {
+ unsigned int ctx_id; /**< Context requesting private mapping */
+ void *handle; /**< Handle of map */
+};
+
+/**
+ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
+ * argument type.
+ *
+ * \sa drmAddMap().
+ */
+struct drm_map {
+ unsigned long offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+ /* Private data */
+};
+
+/**
+ * DRM_IOCTL_GET_CLIENT ioctl argument type.
+ */
+struct drm_client {
+ int idx; /**< Which client desired? */
+ int auth; /**< Is client authenticated? */
+ unsigned long pid; /**< Process ID */
+ unsigned long uid; /**< User ID */
+ unsigned long magic; /**< Magic */
+ unsigned long iocs; /**< Ioctl count */
+};
+
+enum drm_stat_type {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /**< Generic value */
+ _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /**< IRQ */
+ _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
+ _DRM_STAT_DMA, /**< DMA */
+ _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /**< Missed DMA opportunity */
+ /* Add to the *END* of the list */
+};
+
+/**
+ * DRM_IOCTL_GET_STATS ioctl argument type.
+ */
+struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ enum drm_stat_type type;
+ } data[15];
+};
+
+/**
+ * Hardware locking flags.
+ */
+enum drm_lock_flags {
+ _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
+ _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
+ _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
+ _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
+ /* These *HALT* flags aren't supported yet
+ -- they will be used to support the
+ full-screen DGA-like mode. */
+ _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
+ _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
+};
+
+/**
+ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
+ *
+ * \sa drmGetLock() and drmUnlock().
+ */
+struct drm_lock {
+ int context;
+ enum drm_lock_flags flags;
+};
+
+/**
+ * DMA flags
+ *
+ * \warning
+ * These values \e must match xf86drm.h.
+ *
+ * \sa drm_dma.
+ */
+enum drm_dma_flags {
+ /* Flags for DMA buffer dispatch */
+ _DRM_DMA_BLOCK = 0x01, /**<
+ * Block until buffer dispatched.
+ *
+ * \note The buffer may not yet have
+ * been processed by the hardware --
+ * getting a hardware lock with the
+ * hardware quiescent will ensure
+ * that the buffer has been
+ * processed.
+ */
+ _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
+ _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
+
+ /* Flags for DMA buffer request */
+ _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
+ _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
+ _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
+};
+
+/**
+ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
+ *
+ * \sa drmAddBufs().
+ */
+struct drm_buf_desc {
+ int count; /**< Number of buffers of this size */
+ int size; /**< Size in bytes */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ enum {
+ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
+ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
+ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
+ _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
+ _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
+ } flags;
+ unsigned long agp_start; /**<
+ * Start address of where the AGP buffers are
+ * in the AGP aperture
+ */
+};
+
+/**
+ * DRM_IOCTL_INFO_BUFS ioctl argument type.
+ */
+struct drm_buf_info {
+ int count; /**< Entries in list */
+ struct drm_buf_desc __user *list;
+};
+
+/**
+ * DRM_IOCTL_FREE_BUFS ioctl argument type.
+ */
+struct drm_buf_free {
+ int count;
+ int __user *list;
+};
+
+/**
+ * Buffer information
+ *
+ * \sa drm_buf_map.
+ */
+struct drm_buf_pub {
+ int idx; /**< Index into the master buffer list */
+ int total; /**< Buffer size */
+ int used; /**< Amount of buffer in use (for DMA) */
+ void __user *address; /**< Address of buffer */
+};
+
+/**
+ * DRM_IOCTL_MAP_BUFS ioctl argument type.
+ */
+struct drm_buf_map {
+ int count; /**< Length of the buffer list */
+ void __user *virtual; /**< Mmap'd area in user-virtual */
+ struct drm_buf_pub __user *list; /**< Buffer information */
+};
+
+/**
+ * DRM_IOCTL_DMA ioctl argument type.
+ *
+ * Indices here refer to the offset into the buffer list in drm_buf_get.
+ *
+ * \sa drmDMA().
+ */
+struct drm_dma {
+ int context; /**< Context handle */
+ int send_count; /**< Number of buffers to send */
+ int __user *send_indices; /**< List of handles to buffers */
+ int __user *send_sizes; /**< Lengths of data to send */
+ enum drm_dma_flags flags; /**< Flags */
+ int request_count; /**< Number of buffers requested */
+ int request_size; /**< Desired size for buffers */
+ int __user *request_indices; /**< Buffer information */
+ int __user *request_sizes;
+ int granted_count; /**< Number of buffers granted */
+};
+
+enum drm_ctx_flags {
+ _DRM_CONTEXT_PRESERVED = 0x01,
+ _DRM_CONTEXT_2DONLY = 0x02
+};
+
+/**
+ * DRM_IOCTL_ADD_CTX ioctl argument type.
+ *
+ * \sa drmCreateContext() and drmDestroyContext().
+ */
+struct drm_ctx {
+ drm_context_t handle;
+ enum drm_ctx_flags flags;
+};
+
+/**
+ * DRM_IOCTL_RES_CTX ioctl argument type.
+ */
+struct drm_ctx_res {
+ int count;
+ struct drm_ctx __user *contexts;
+};
+
+/**
+ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
+ */
+struct drm_draw {
+ drm_drawable_t handle;
+};
+
+/**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+ DRM_DRAWABLE_CLIPRECTS,
+} drm_drawable_info_type_t;
+
+struct drm_update_draw {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+/**
+ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
+ */
+struct drm_auth {
+ drm_magic_t magic;
+};
+
+/**
+ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
+ *
+ * \sa drmGetInterruptFromBusID().
+ */
+struct drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+enum drm_vblank_seq_type {
+ _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
+};
+
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+ _DRM_VBLANK_NEXTONMISS)
+
+struct drm_wait_vblank_request {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
+ enum drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
+};
+
+/**
+ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
+ *
+ * \sa drmAgpEnable().
+ */
+struct drm_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+/**
+ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
+ *
+ * \sa drmAgpAlloc() and drmAgpFree().
+ */
+struct drm_agp_buffer {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for binding / unbinding */
+ unsigned long type; /**< Type of memory to allocate */
+ unsigned long physical; /**< Physical used by i810 */
+};
+
+/**
+ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
+ *
+ * \sa drmAgpBind() and drmAgpUnbind().
+ */
+struct drm_agp_binding {
+ unsigned long handle; /**< From drm_agp_buffer */
+ unsigned long offset; /**< In bytes -- will round to page boundary */
+};
+
+/**
+ * DRM_IOCTL_AGP_INFO ioctl argument type.
+ *
+ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
+ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
+ * drmAgpVendorId() and drmAgpDeviceId().
+ */
+struct drm_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+/**
+ * DRM_IOCTL_SG_ALLOC ioctl argument type.
+ */
+struct drm_scatter_gather {
+ unsigned long size; /**< In bytes -- will round to page boundary */
+ unsigned long handle; /**< Used for mapping / unmapping */
+};
+
+/**
+ * DRM_IOCTL_SET_VERSION ioctl argument type.
+ */
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+/**
+ * Device specific ioctls should only be in their respective headers
+ * The device specific ioctl range is from 0x40 to 0x99.
+ * Generic IOCTLS restart at 0xA0.
+ *
+ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
+ * drmCommandReadWrite().
+ */
+#define DRM_COMMAND_BASE 0x40
+#define DRM_COMMAND_END 0xA0
+
+/* typedef area */
+#ifndef __KERNEL__
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+#endif
+
+#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
new file mode 100644
index 0000000..0764b66
--- /dev/null
+++ b/include/drm/drmP.h
@@ -0,0 +1,1153 @@
+/**
+ * \file drmP.h
+ * Private header for Direct Rendering Manager
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+/* If you want the memory alloc debug functionality, change define below */
+/* #define DEBUG_MEMORY */
+
+#ifdef __KERNEL__
+#ifdef __alpha__
+/* add include of current.h so that "current" is defined
+ * before static inline funcs in wait.h. Doing this so we
+ * can build the DRM (part of PI DRI). 4/21/2000 S + B */
+#include <asm/current.h>
+#endif /* __alpha__ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/file.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/smp_lock.h> /* For (un)lock_kernel */
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#if defined(__alpha__) || defined(__powerpc__)
+#include <asm/pgtable.h> /* For pte_wrprotect */
+#endif
+#include <asm/io.h>
+#include <asm/mman.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+#include <linux/types.h>
+#include <linux/agp_backend.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <asm/pgalloc.h>
+#include "drm.h"
+
+#include <linux/idr.h>
+
+#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
+
+struct drm_file;
+struct drm_device;
+
+#include "drm_os_linux.h"
+#include "drm_hashtab.h"
+
+/***********************************************************************/
+/** \name DRM template customization defaults */
+/*@{*/
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_REQUIRE_AGP 0x2
+#define DRIVER_USE_MTRR 0x4
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_IRQ_VBL 0x100
+#define DRIVER_DMA_QUEUE 0x200
+#define DRIVER_FB_DMA 0x400
+#define DRIVER_IRQ_VBL2 0x800
+
+/***********************************************************************/
+/** \name Begin the DRM... */
+/*@{*/
+
+#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
+ also include looping detection. */
+
+#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
+#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
+#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
+#define DRM_LOOPING_LIMIT 5000000
+#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
+#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
+
+#define DRM_FLAG_DEBUG 0x01
+
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_AGPLISTS 15
+#define DRM_MEM_TOTALAGP 16
+#define DRM_MEM_BOUNDAGP 17
+#define DRM_MEM_CTXBITMAP 18
+#define DRM_MEM_STUB 19
+#define DRM_MEM_SGLISTS 20
+#define DRM_MEM_CTXLIST 21
+#define DRM_MEM_MM 22
+#define DRM_MEM_HASHTAB 23
+
+#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
+#define DRM_MAP_HASH_OFFSET 0x10000000
+
+/*@}*/
+
+/***********************************************************************/
+/** \name Macros to make printk easier */
+/*@{*/
+
+/**
+ * Error output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR(fmt, arg...) \
+ printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
+
+/**
+ * Memory error output.
+ *
+ * \param area memory area where the error occurred.
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
+ drm_mem_stats[area].name , ##arg)
+
+#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
+
+/**
+ * Debug output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if ( drm_debug ) \
+ printk(KERN_DEBUG \
+ "[" DRM_NAME ":%s] " fmt , \
+ __func__ , ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#define DRM_PROC_PRINT(fmt, arg...) \
+ len += sprintf(&buf[len], fmt , ##arg); \
+ if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
+
+#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
+ len += sprintf(&buf[len], fmt , ##arg); \
+ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
+
+/*@}*/
+
+/***********************************************************************/
+/** \name Internal types and structures */
+/*@{*/
+
+#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
+
+#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+/**
+ * Get the private SAREA mapping.
+ *
+ * \param _dev DRM device.
+ * \param _ctx context number.
+ * \param _map output mapping.
+ */
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
+ (_map) = (_dev)->context_sareas[_ctx]; \
+} while(0)
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
+do { \
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
+ dev->lock.file_priv != file_priv ) { \
+ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+ __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
+ dev->lock.file_priv, file_priv ); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( copy_to_user( name, value, len ) ) \
+ return -EFAULT; \
+ }
+
+/**
+ * Ioctl function type.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private pointer.
+ * \param cmd command.
+ * \param arg argument.
+ */
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+
+struct drm_ioctl_desc {
+ unsigned int cmd;
+ drm_ioctl_t *func;
+ int flags;
+};
+
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#define DRM_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
+
+struct drm_magic_entry {
+ struct list_head head;
+ struct drm_hash_item hash_item;
+ struct drm_file *priv;
+};
+
+struct drm_vma_entry {
+ struct list_head head;
+ struct vm_area_struct *vma;
+ pid_t pid;
+};
+
+/**
+ * DMA buffer.
+ */
+struct drm_buf {
+ int idx; /**< Index into master buflist */
+ int total; /**< Buffer size */
+ int order; /**< log-base-2(total) */
+ int used; /**< Amount of buffer in use (for DMA) */
+ unsigned long offset; /**< Byte offset (used internally) */
+ void *address; /**< Address of buffer */
+ unsigned long bus_address; /**< Bus address of buffer */
+ struct drm_buf *next; /**< Kernel-only: used for free list */
+ __volatile__ int waiting; /**< On kernel DMA queue */
+ __volatile__ int pending; /**< On hardware DMA queue */
+ wait_queue_head_t dma_wait; /**< Processes waiting */
+ struct drm_file *file_priv; /**< Private of holding file descr */
+ int context; /**< Kernel queue for this buffer */
+ int while_locked; /**< Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /**< Which list we're on */
+
+ int dev_priv_size; /**< Size of buffer private storage */
+ void *dev_private; /**< Per-buffer private storage */
+};
+
+/** bufs is one longer than it has to be */
+struct drm_waitlist {
+ int count; /**< Number of possible buffers */
+ struct drm_buf **bufs; /**< List of pointers to buffers */
+ struct drm_buf **rp; /**< Read pointer */
+ struct drm_buf **wp; /**< Write pointer */
+ struct drm_buf **end; /**< End pointer */
+ spinlock_t read_lock;
+ spinlock_t write_lock;
+};
+
+struct drm_freelist {
+ int initialized; /**< Freelist in use */
+ atomic_t count; /**< Number of free buffers */
+ struct drm_buf *next; /**< End pointer */
+
+ wait_queue_head_t waiting; /**< Processes waiting on free bufs */
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+ atomic_t wfh; /**< If waiting for high mark */
+ spinlock_t lock;
+};
+
+typedef struct drm_dma_handle {
+ dma_addr_t busaddr;
+ void *vaddr;
+ size_t size;
+} drm_dma_handle_t;
+
+/**
+ * Buffer entry. There is one of this for each buffer size order.
+ */
+struct drm_buf_entry {
+ int buf_size; /**< size */
+ int buf_count; /**< number of buffers */
+ struct drm_buf *buflist; /**< buffer list */
+ int seg_count;
+ int page_order;
+ struct drm_dma_handle **seglist;
+
+ struct drm_freelist freelist;
+};
+
+/** File private data */
+struct drm_file {
+ int authenticated;
+ int master;
+ pid_t pid;
+ uid_t uid;
+ drm_magic_t magic;
+ unsigned long ioctl_count;
+ struct list_head lhead;
+ struct drm_minor *minor;
+ int remove_auth_on_close;
+ unsigned long lock_count;
+ struct file *filp;
+ void *driver_priv;
+};
+
+/** Wait queue */
+struct drm_queue {
+ atomic_t use_count; /**< Outstanding uses (+1) */
+ atomic_t finalization; /**< Finalization in progress */
+ atomic_t block_count; /**< Count of processes waiting */
+ atomic_t block_read; /**< Queue blocked for reads */
+ wait_queue_head_t read_queue; /**< Processes waiting on block_read */
+ atomic_t block_write; /**< Queue blocked for writes */
+ wait_queue_head_t write_queue; /**< Processes waiting on block_write */
+ atomic_t total_queued; /**< Total queued statistic */
+ atomic_t total_flushed; /**< Total flushes statistic */
+ atomic_t total_locks; /**< Total locks statistics */
+ enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
+ struct drm_waitlist waitlist; /**< Pending buffers */
+ wait_queue_head_t flush_queue; /**< Processes waiting until flush */
+};
+
+/**
+ * Lock data.
+ */
+struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
+ wait_queue_head_t lock_queue; /**< Queue of blocked processes */
+ unsigned long lock_time; /**< Time of last lock in jiffies */
+ spinlock_t spinlock;
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
+};
+
+/**
+ * DMA data.
+ */
+struct drm_device_dma {
+
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ int buf_count; /**< total number of buffers */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ int seg_count;
+ int page_count; /**< number of pages */
+ unsigned long *pagelist; /**< page list */
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02,
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
+ } flags;
+
+};
+
+/**
+ * AGP memory entry. Stored as a doubly linked list.
+ */
+struct drm_agp_mem {
+ unsigned long handle; /**< handle */
+ DRM_AGP_MEM *memory;
+ unsigned long bound; /**< address */
+ int pages;
+ struct list_head head;
+};
+
+/**
+ * AGP data.
+ *
+ * \sa drm_agp_init() and drm_device::agp.
+ */
+struct drm_agp_head {
+ DRM_AGP_KERN agp_info; /**< AGP device information */
+ struct list_head memory;
+ unsigned long mode; /**< AGP mode */
+ struct agp_bridge_data *bridge;
+ int enabled; /**< whether the AGP bus as been enabled */
+ int acquired; /**< whether the AGP device has been acquired */
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
+
+/**
+ * Scatter-gather memory.
+ */
+struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ struct page **pagelist;
+ dma_addr_t *busaddr;
+};
+
+struct drm_sigdata {
+ int context;
+ struct drm_hw_lock *lock;
+};
+
+
+/*
+ * Generic memory manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+};
+
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+ struct list_head head; /**< list head */
+ struct drm_hash_item hash;
+ struct drm_map *map; /**< mapping */
+ uint64_t user_token;
+};
+
+typedef struct drm_map drm_local_map_t;
+
+/**
+ * Context handle list
+ */
+struct drm_ctx_list {
+ struct list_head head; /**< list head */
+ drm_context_t handle; /**< context handle */
+ struct drm_file *tag; /**< associated fd private data */
+};
+
+struct drm_vbl_sig {
+ struct list_head head;
+ unsigned int sequence;
+ struct siginfo info;
+ struct task_struct *task;
+};
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ struct drm_dma_handle *table_handle;
+ drm_local_map_t mapping;
+ int table_size;
+};
+
+/**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+ * in this family
+ */
+struct drm_driver {
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
+ int (*suspend) (struct drm_device *, pm_message_t state);
+ int (*resume) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ void (*dma_ready) (struct drm_device *);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_ctor) (struct drm_device *dev, int context);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*kernel_context_switch) (struct drm_device *dev, int old,
+ int new);
+ void (*kernel_context_switch_unlock) (struct drm_device *dev);
+ int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
+ int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
+ int (*dri_library_name) (struct drm_device *dev, char *buf);
+
+ /**
+ * Called by \c drm_device_is_agp. Typically used to determine if a
+ * card is really attached to AGP or not.
+ *
+ * \param dev DRM device handle
+ *
+ * \returns
+ * One of three values is returned depending on whether or not the
+ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device *dev);
+
+ /* these have to be filled in */
+
+ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+ void (*irq_preinstall) (struct drm_device *dev);
+ void (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+ void (*reclaim_buffers) (struct drm_device *dev,
+ struct drm_file * file_priv);
+ void (*reclaim_buffers_locked) (struct drm_device *dev,
+ struct drm_file *file_priv);
+ void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
+ struct drm_file *file_priv);
+ unsigned long (*get_map_ofs) (struct drm_map * map);
+ unsigned long (*get_reg_ofs) (struct drm_device *dev);
+ void (*set_version) (struct drm_device *dev,
+ struct drm_set_version *sv);
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ int dev_priv_size;
+ struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ struct file_operations fops;
+ struct pci_driver pci_driver;
+};
+
+#define DRM_MINOR_UNASSIGNED 0
+#define DRM_MINOR_LEGACY 1
+
+/**
+ * DRM minor structure. This structure represents a drm minor number.
+ */
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ dev_t device; /**< Device number for mknod */
+ struct device kdev; /**< Linux device */
+ struct drm_device *dev;
+ struct proc_dir_entry *dev_root; /**< proc directory entry */
+};
+
+/**
+ * DRM device structure. This structure represent a complete card that
+ * may contain multiple heads.
+ */
+struct drm_device {
+ char *unique; /**< Unique identifier: e.g., busid */
+ int unique_len; /**< Length of unique field */
+ char *devname; /**< For /proc/interrupts */
+ int if_version; /**< Highest interface version set */
+
+ int blocked; /**< Blocked due to VC switch? */
+
+ /** \name Locks */
+ /*@{ */
+ spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */
+ struct mutex struct_mutex; /**< For others */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+ int open_count; /**< Outstanding files open */
+ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
+ atomic_t vma_count; /**< Outstanding vma areas open */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+
+ /** \name Performance counters */
+ /*@{ */
+ unsigned long counters;
+ enum drm_stat_type types[15];
+ atomic_t counts[15];
+ /*@} */
+
+ /** \name Authentication */
+ /*@{ */
+ struct list_head filelist;
+ struct drm_open_hash magiclist; /**< magic hash table */
+ struct list_head magicfree;
+ /*@} */
+
+ /** \name Memory management */
+ /*@{ */
+ struct list_head maplist; /**< Linked list of regions */
+ int map_count; /**< Number of mappable regions */
+ struct drm_open_hash map_hash; /**< User token hash table for maps */
+
+ /** \name Context handle management */
+ /*@{ */
+ struct list_head ctxlist; /**< Linked list of context handles */
+ int ctx_count; /**< Number of context handles */
+ struct mutex ctxlist_mutex; /**< For ctxlist */
+
+ struct idr ctx_idr;
+
+ struct list_head vmalist; /**< List of vmas (for debugging) */
+ struct drm_lock_data lock; /**< Information on hardware lock */
+ /*@} */
+
+ /** \name DMA queues (contexts) */
+ /*@{ */
+ int queue_count; /**< Number of active DMA queues */
+ int queue_reserved; /**< Number of reserved DMA queues */
+ int queue_slots; /**< Actual length of queuelist */
+ struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
+ /*@} */
+
+ /** \name Context support */
+ /*@{ */
+ int irq; /**< Interrupt used by board */
+ int irq_enabled; /**< True if irq handler is enabled */
+ __volatile__ long context_flag; /**< Context swapping flag */
+ __volatile__ long interrupt_flag; /**< Interruption handler flag */
+ __volatile__ long dma_flag; /**< DMA dispatch flag */
+ struct timer_list timer; /**< Timer for delaying ctx switch */
+ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
+ int last_checked; /**< Last context checked for DMA */
+ int last_context; /**< Last current context */
+ unsigned long last_switch; /**< jiffies at last context switch */
+ /*@} */
+
+ struct work_struct work;
+ /** \name VBLANK IRQ support */
+ /*@{ */
+
+ wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
+ atomic_t vbl_received;
+ atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
+ spinlock_t vbl_lock;
+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
+ unsigned int vbl_pending;
+ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
+ void (*locked_tasklet_func)(struct drm_device *dev);
+
+ /*@} */
+ cycles_t ctx_start;
+ cycles_t lck_start;
+
+ struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
+ wait_queue_head_t buf_readers; /**< Processes waiting to read */
+ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
+
+ struct drm_agp_head *agp; /**< AGP data */
+
+ struct pci_dev *pdev; /**< PCI device structure */
+ int pci_vendor; /**< PCI vendor id */
+ int pci_device; /**< PCI device id */
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ void *dev_private; /**< device private data */
+ struct drm_sigdata sigdata; /**< For block_all_signals */
+ sigset_t sigmask;
+
+ struct drm_driver *driver;
+ drm_local_map_t *agp_buffer_map;
+ unsigned int agp_buffer_token;
+ struct drm_minor *primary; /**< render type primary screen head */
+
+ /** \name Drawable information */
+ /*@{ */
+ spinlock_t drw_lock;
+ struct idr drw_idr;
+ /*@} */
+};
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+
+#ifdef __alpha__
+#define drm_get_pci_domain(dev) dev->hose->index
+#else
+#define drm_get_pci_domain(dev) 0
+#endif
+
+#if __OS_HAS_AGP
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+#else
+#define drm_core_has_AGP(dev) (0)
+#endif
+
+#if __OS_HAS_MTRR
+static inline int drm_core_has_MTRR(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_MTRR);
+}
+
+#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+ unsigned int flags)
+{
+ return mtrr_add(offset, size, flags, 1);
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+ unsigned long size, unsigned int flags)
+{
+ return mtrr_del(handle, offset, size);
+}
+
+#else
+#define drm_core_has_MTRR(dev) (0)
+
+#define DRM_MTRR_WC 0
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+ unsigned long size, unsigned int flags)
+{
+ return 0;
+}
+#endif
+
+/******************************************************************/
+/** \name Internal function definitions */
+/*@{*/
+
+ /* Driver support (drm_drv.h) */
+extern int drm_init(struct drm_driver *driver);
+extern void drm_exit(struct drm_driver *driver);
+extern int drm_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int drm_lastclose(struct drm_device *dev);
+
+ /* Device support (drm_fops.h) */
+extern int drm_open(struct inode *inode, struct file *filp);
+extern int drm_stub_open(struct inode *inode, struct file *filp);
+extern int drm_fasync(int fd, struct file *filp, int on);
+extern int drm_release(struct inode *inode, struct file *filp);
+
+ /* Mapping support (drm_vm.h) */
+extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
+extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
+extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+
+ /* Memory management support (drm_memory.h) */
+#include "drm_memory.h"
+extern void drm_mem_init(void);
+extern int drm_mem_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
+
+extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
+extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
+extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern int drm_unbind_agp(DRM_AGP_MEM * handle);
+
+ /* Misc. IOCTL support (drm_ioctl.h) */
+extern int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getmap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getstats(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_setversion(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+ /* Context IOCTL support (drm_context.h) */
+extern int drm_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_modctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+extern int drm_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+ /* Drawable IOCTL support (drm_drawable.h) */
+extern int drm_adddraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmdraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_update_drawable_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+ drm_drawable_t id);
+extern void drm_drawable_free_all(struct drm_device *dev);
+
+ /* Authentication IOCTL support (drm_auth.h) */
+extern int drm_getmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+ /* Locking IOCTL support (drm_lock.h) */
+extern int drm_lock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_unlock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
+
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
+
+ /* Buffer management support (drm_bufs.h) */
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, drm_local_map_t ** map_ptr);
+extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_addbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_markbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_freebufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_order(unsigned long size);
+extern unsigned long drm_get_resource_start(struct drm_device *dev,
+ unsigned int resource);
+extern unsigned long drm_get_resource_len(struct drm_device *dev,
+ unsigned int resource);
+
+ /* DMA support (drm_dma.h) */
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *filp);
+
+ /* IRQ support (drm_irq.h) */
+extern int drm_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+extern int drm_irq_uninstall(struct drm_device *dev);
+extern void drm_driver_irq_preinstall(struct drm_device *dev);
+extern void drm_driver_irq_postinstall(struct drm_device *dev);
+extern void drm_driver_irq_uninstall(struct drm_device *dev);
+
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+
+ /* AGP/GART support (drm_agpsupport.h) */
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
+extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
+extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
+extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
+
+ /* Stub support (drm_stub.h) */
+extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ struct drm_driver *driver);
+extern int drm_put_dev(struct drm_device *dev);
+extern int drm_put_minor(struct drm_minor **minor);
+extern unsigned int drm_debug;
+
+extern struct class *drm_class;
+extern struct proc_dir_entry *drm_proc_root;
+
+extern struct idr drm_minors_idr;
+
+extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
+
+ /* Proc support (drm_proc.h) */
+extern int drm_proc_init(struct drm_minor *minor, int minor_id,
+ struct proc_dir_entry *root);
+extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
+
+ /* Scatter Gather Support (drm_scatter.h) */
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+extern int drm_sg_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+ /* ATI PCIGART support (ati_pcigart.h) */
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align, dma_addr_t maxaddr);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+
+ /* sysfs support (drm_sysfs.c) */
+struct drm_sysfs_class;
+extern struct class *drm_sysfs_create(struct module *owner, char *name);
+extern void drm_sysfs_destroy(void);
+extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_device_remove(struct drm_minor *minor);
+
+/*
+ * Basic memory manager support (drm_mm.c)
+ */
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+ unsigned long size,
+ unsigned alignment);
+extern void drm_mm_put_block(struct drm_mm_node * cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
+ unsigned alignment, int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+
+extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
+extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
+
+static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
+ unsigned int token)
+{
+ struct drm_map_list *_entry;
+ list_for_each_entry(_entry, &dev->maplist, head)
+ if (_entry->user_token == token)
+ return _entry->map;
+ return NULL;
+}
+
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
+{
+ if (dev->driver->device_is_agp != NULL) {
+ int err = (*dev->driver->device_is_agp) (dev);
+
+ if (err != 2) {
+ return err;
+ }
+ }
+
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
+}
+
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
+{
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
+}
+
+static __inline__ void drm_core_dropmap(struct drm_map *map)
+{
+}
+
+#ifndef DEBUG_MEMORY
+/** Wrapper around kmalloc() */
+static __inline__ void *drm_alloc(size_t size, int area)
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+/** Wrapper around kfree() */
+static __inline__ void drm_free(void *pt, size_t size, int area)
+{
+ kfree(pt);
+}
+
+/** Wrapper around kcalloc() */
+static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
+{
+ return kcalloc(nmemb, size, GFP_KERNEL);
+}
+#else
+extern void *drm_alloc(size_t size, int area);
+extern void drm_free(void *pt, size_t size, int area);
+extern void *drm_calloc(size_t nmemb, size_t size, int area);
+#endif
+
+/*@}*/
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
new file mode 100644
index 0000000..3167390
--- /dev/null
+++ b/include/drm/drm_core.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
+
+#define CORE_NAME "drm"
+#define CORE_DESC "DRM shared core routines"
+#define CORE_DATE "20060810"
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 3
+
+#define CORE_MAJOR 1
+#define CORE_MINOR 1
+#define CORE_PATCHLEVEL 0
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
new file mode 100644
index 0000000..cd2b189
--- /dev/null
+++ b/include/drm/drm_hashtab.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_HASHTAB_H
+#define DRM_HASHTAB_H
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct drm_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct drm_open_hash {
+ unsigned int size;
+ unsigned int order;
+ unsigned int fill;
+ struct hlist_head *table;
+ int use_vmalloc;
+};
+
+
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
+
+
+#endif
diff --git a/include/drm/drm_memory.h b/include/drm/drm_memory.h
new file mode 100644
index 0000000..63e425b
--- /dev/null
+++ b/include/drm/drm_memory.h
@@ -0,0 +1,61 @@
+/**
+ * \file drm_memory.h
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+/**
+ * Cut down version of drm_memory_debug.h, which used to be called
+ * drm_memory.h.
+ */
+
+#if __OS_HAS_AGP
+
+#include <linux/vmalloc.h>
+
+#ifdef HAVE_PAGE_AGP
+#include <asm/agp.h>
+#else
+# ifdef __powerpc__
+# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# else
+# define PAGE_AGP PAGE_KERNEL
+# endif
+#endif
+
+#else /* __OS_HAS_AGP */
+
+#endif
diff --git a/include/drm/drm_memory_debug.h b/include/drm/drm_memory_debug.h
new file mode 100644
index 0000000..6463271
--- /dev/null
+++ b/include/drm/drm_memory_debug.h
@@ -0,0 +1,309 @@
+/**
+ * \file drm_memory_debug.h
+ * Memory management wrappers for DRM.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+typedef struct drm_mem_stats {
+ const char *name;
+ int succeed_count;
+ int free_count;
+ int fail_count;
+ unsigned long bytes_allocated;
+ unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static DEFINE_SPINLOCK(drm_mem_lock);
+static unsigned long drm_ram_available = 0; /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] =
+{
+ [DRM_MEM_DMA] = {"dmabufs"},
+ [DRM_MEM_SAREA] = {"sareas"},
+ [DRM_MEM_DRIVER] = {"driver"},
+ [DRM_MEM_MAGIC] = {"magic"},
+ [DRM_MEM_IOCTLS] = {"ioctltab"},
+ [DRM_MEM_MAPS] = {"maplist"},
+ [DRM_MEM_VMAS] = {"vmalist"},
+ [DRM_MEM_BUFS] = {"buflist"},
+ [DRM_MEM_SEGS] = {"seglist"},
+ [DRM_MEM_PAGES] = {"pagelist"},
+ [DRM_MEM_FILES] = {"files"},
+ [DRM_MEM_QUEUES] = {"queues"},
+ [DRM_MEM_CMDS] = {"commands"},
+ [DRM_MEM_MAPPINGS] = {"mappings"},
+ [DRM_MEM_BUFLISTS] = {"buflists"},
+ [DRM_MEM_AGPLISTS] = {"agplist"},
+ [DRM_MEM_SGLISTS] = {"sglist"},
+ [DRM_MEM_TOTALAGP] = {"totalagp"},
+ [DRM_MEM_BOUNDAGP] = {"boundagp"},
+ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+ [DRM_MEM_CTXLIST] = {"ctxlist"},
+ [DRM_MEM_STUB] = {"stub"},
+ {NULL, 0,} /* Last entry must be null */
+};
+
+void drm_mem_init (void) {
+ drm_mem_stats_t *mem;
+ struct sysinfo si;
+
+ for (mem = drm_mem_stats; mem->name; ++mem) {
+ mem->succeed_count = 0;
+ mem->free_count = 0;
+ mem->fail_count = 0;
+ mem->bytes_allocated = 0;
+ mem->bytes_freed = 0;
+ }
+
+ si_meminfo(&si);
+ drm_ram_available = si.totalram;
+ drm_ram_used = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int drm__mem_info (char *buf, char **start, off_t offset,
+ int request, int *eof, void *data) {
+ drm_mem_stats_t *pt;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *eof = 0;
+ *start = &buf[offset];
+
+ DRM_PROC_PRINT(" total counts "
+ " | outstanding \n");
+ DRM_PROC_PRINT("type alloc freed fail bytes freed"
+ " | allocs bytes\n\n");
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
+ "system", 0, 0, 0,
+ drm_ram_available << (PAGE_SHIFT - 10));
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
+ "locked", 0, 0, 0, drm_ram_used >> 10);
+ DRM_PROC_PRINT("\n");
+ for (pt = drm_mem_stats; pt->name; pt++) {
+ DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+ pt->name,
+ pt->succeed_count,
+ pt->free_count,
+ pt->fail_count,
+ pt->bytes_allocated,
+ pt->bytes_freed,
+ pt->succeed_count - pt->free_count,
+ (long)pt->bytes_allocated
+ - (long)pt->bytes_freed);
+ }
+
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+int drm_mem_info (char *buf, char **start, off_t offset,
+ int len, int *eof, void *data) {
+ int ret;
+
+ spin_lock(&drm_mem_lock);
+ ret = drm__mem_info (buf, start, offset, len, eof, data);
+ spin_unlock(&drm_mem_lock);
+ return ret;
+}
+
+void *drm_alloc (size_t size, int area) {
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+ return NULL;
+ }
+
+ if (!(pt = kmalloc(size, GFP_KERNEL))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return NULL;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
+ return pt;
+}
+
+void *drm_calloc (size_t nmemb, size_t size, int area) {
+ void *addr;
+
+ addr = drm_alloc (nmemb * size, area);
+ if (addr != NULL)
+ memset((void *)addr, 0, size * nmemb);
+
+ return addr;
+}
+
+void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
+ void *pt;
+
+ if (!(pt = drm_alloc (size, area)))
+ return NULL;
+ if (oldpt && oldsize) {
+ memcpy(pt, oldpt, oldsize);
+ drm_free (oldpt, oldsize, area);
+ }
+ return pt;
+}
+
+void drm_free (void *pt, size_t size, int area) {
+ int alloc_count;
+ int free_count;
+
+ if (!pt)
+ DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+ else
+ kfree(pt);
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[area].bytes_freed += size;
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+#if __OS_HAS_AGP
+
+DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
+ DRM_AGP_MEM *handle;
+
+ if (!pages) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+ return NULL;
+ }
+
+ if ((handle = drm_agp_allocate_memory (pages, type))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+ += pages << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ return handle;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return NULL;
+}
+
+int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
+ int alloc_count;
+ int free_count;
+ int retval = -EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Attempt to free NULL AGP handle\n");
+ return retval;
+ }
+
+ if (drm_agp_free_memory (handle)) {
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+ += pages << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return 0;
+ }
+ return retval;
+}
+
+int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
+ int retcode = -EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to bind NULL AGP handle\n");
+ return retcode;
+ }
+
+ if (!(retcode = drm_agp_bind_memory (handle, start))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+ += handle->page_count << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ return retcode;
+ }
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
+ return retcode;
+}
+
+int drm_unbind_agp (DRM_AGP_MEM * handle) {
+ int alloc_count;
+ int free_count;
+ int retcode = -EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to unbind NULL AGP handle\n");
+ return retcode;
+ }
+
+ if ((retcode = drm_agp_unbind_memory (handle)))
+ return retcode;
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+ += handle->page_count << PAGE_SHIFT;
+ spin_unlock(&drm_mem_lock);
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return retcode;
+}
+#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
new file mode 100644
index 0000000..8dbd257
--- /dev/null
+++ b/include/drm/drm_os_linux.h
@@ -0,0 +1,108 @@
+/**
+ * \file drm_os_linux.h
+ * OS abstraction macros.
+ */
+
+#include <linux/interrupt.h> /* For task queue support */
+#include <linux/delay.h>
+
+/** Current process ID */
+#define DRM_CURRENTPID task_pid_nr(current)
+#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
+#define DRM_UDELAY(d) udelay(d)
+/** Read a byte from a MMIO region */
+#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
+/** Read a word from a MMIO region */
+#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
+/** Read a dword from a MMIO region */
+#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
+/** Write a byte into a MMIO region */
+#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a word into a MMIO region */
+#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a dword into a MMIO region */
+#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
+/** Read memory barrier */
+#define DRM_READMEMORYBARRIER() rmb()
+/** Write memory barrier */
+#define DRM_WRITEMEMORYBARRIER() wmb()
+/** Read/write memory barrier */
+#define DRM_MEMORYBARRIER() mb()
+
+/** IRQ handler arguments and return type and values */
+#define DRM_IRQ_ARGS int irq, void *arg
+
+/** AGP types */
+#if __OS_HAS_AGP
+#define DRM_AGP_MEM struct agp_memory
+#define DRM_AGP_KERN struct agp_kern_info
+#else
+/* define some dummy types for non AGP supporting kernels */
+struct no_agp_kern {
+ unsigned long aper_base;
+ unsigned long aper_size;
+};
+#define DRM_AGP_MEM int
+#define DRM_AGP_KERN struct no_agp_kern
+#endif
+
+#if !(__OS_HAS_MTRR)
+static __inline__ int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, char increment)
+{
+ return -ENODEV;
+}
+
+static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
+{
+ return -ENODEV;
+}
+
+#define MTRR_TYPE_WRCOMB 1
+
+#endif
+
+/** Other copying of data to kernel space */
+#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \
+ copy_from_user(arg1, arg2, arg3)
+/** Other copying of data from kernel space */
+#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
+ copy_to_user(arg1, arg2, arg3)
+/* Macros for copyfrom user, but checking readability only once */
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
+ (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+ __copy_from_user(arg1, arg2, arg3)
+#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
+ __copy_to_user(arg1, arg2, arg3)
+#define DRM_GET_USER_UNCHECKED(val, uaddr) \
+ __get_user(val, uaddr)
+
+#define DRM_HZ HZ
+
+#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
+
+#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
+#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
new file mode 100644
index 0000000..135bd19
--- /dev/null
+++ b/include/drm/drm_pciids.h
@@ -0,0 +1,415 @@
+/*
+ This file is auto-generated from the drm_pciids.txt in the DRM CVS
+ Please contact dri-devel@lists.sf.net to add new cards to this list
+*/
+#define radeon_PCI_IDS \
+ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
+ {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+ {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
+ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0, 0, 0}
+
+#define r128_PCI_IDS \
+ {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define mga_PCI_IDS \
+ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
+ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
+ {0, 0, 0}
+
+#define mach64_PCI_IDS \
+ {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define sisdrv_PCI_IDS \
+ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0, 0, 0}
+
+#define tdfx_PCI_IDS \
+ {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define viadrv_PCI_IDS \
+ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
+ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+ {0, 0, 0}
+
+#define i810_PCI_IDS \
+ {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define i830_PCI_IDS \
+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define gamma_PCI_IDS \
+ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define savage_PCI_IDS \
+ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+ {0, 0, 0}
+
+#define ffb_PCI_IDS \
+ {0, 0, 0}
+
+#define i915_PCI_IDS \
+ {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
diff --git a/include/drm/drm_sarea.h b/include/drm/drm_sarea.h
new file mode 100644
index 0000000..4800373
--- /dev/null
+++ b/include/drm/drm_sarea.h
@@ -0,0 +1,84 @@
+/**
+ * \file drm_sarea.h
+ * \brief SAREA definitions
+ *
+ * \author Michel Dänzer <michel@daenzer.net>
+ */
+
+/*
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_SAREA_H_
+#define _DRM_SAREA_H_
+
+#include "drm.h"
+
+/* SAREA area needs to be at least a page */
+#if defined(__alpha__)
+#define SAREA_MAX 0x2000
+#elif defined(__ia64__)
+#define SAREA_MAX 0x10000 /* 64kB */
+#else
+/* Intel 830M driver needs at least 8k SAREA */
+#define SAREA_MAX 0x2000
+#endif
+
+/** Maximum number of drawables in the SAREA */
+#define SAREA_MAX_DRAWABLES 256
+
+#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
+
+/** SAREA drawable */
+struct drm_sarea_drawable {
+ unsigned int stamp;
+ unsigned int flags;
+};
+
+/** SAREA frame */
+struct drm_sarea_frame {
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ unsigned int fullscreen;
+};
+
+/** SAREA */
+struct drm_sarea {
+ /** first thing is always the DRM locking structure */
+ struct drm_hw_lock lock;
+ /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
+ drm_context_t dummy_context;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
+
+#endif /* _DRM_SAREA_H_ */
diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
new file mode 100644
index 0000000..08ecf83
--- /dev/null
+++ b/include/drm/drm_sman.h
@@ -0,0 +1,176 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory MANager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_SMAN_H
+#define DRM_SMAN_H
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+
+/*
+ * A class that is an abstration of a simple memory allocator.
+ * The sman implementation provides a default such allocator
+ * using the drm_mm.c implementation. But the user can replace it.
+ * See the SiS implementation, which may use the SiS FB kernel module
+ * for memory management.
+ */
+
+struct drm_sman_mm {
+ /* private info. If allocated, needs to be destroyed by the destroy
+ function */
+ void *private;
+
+ /* Allocate a memory block with given size and alignment.
+ Return an opaque reference to the memory block */
+
+ void *(*allocate) (void *private, unsigned long size,
+ unsigned alignment);
+
+ /* Free a memory block. "ref" is the opaque reference that we got from
+ the "alloc" function */
+
+ void (*free) (void *private, void *ref);
+
+ /* Free all resources associated with this allocator */
+
+ void (*destroy) (void *private);
+
+ /* Return a memory offset from the opaque reference returned from the
+ "alloc" function */
+
+ unsigned long (*offset) (void *private, void *ref);
+};
+
+struct drm_memblock_item {
+ struct list_head owner_list;
+ struct drm_hash_item user_hash;
+ void *mm_info;
+ struct drm_sman_mm *mm;
+ struct drm_sman *sman;
+};
+
+struct drm_sman {
+ struct drm_sman_mm *mm;
+ int num_managers;
+ struct drm_open_hash owner_hash_tab;
+ struct drm_open_hash user_hash_tab;
+ struct list_head owner_items;
+};
+
+/*
+ * Take down a memory manager. This function should only be called after a
+ * successful init and after a call to drm_sman_cleanup.
+ */
+
+extern void drm_sman_takedown(struct drm_sman * sman);
+
+/*
+ * Allocate structures for a manager.
+ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
+ * user_order is the log2 of the number of buckets in the user hash table.
+ * set this to approximately log2 of the max number of memory regions
+ * that will be allocated for _all_ pools together.
+ * owner_order is the log2 of the number of buckets in the owner hash table.
+ * set this to approximately log2 of
+ * the number of client file connections that will
+ * be using the manager.
+ *
+ */
+
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+ unsigned int user_order, unsigned int owner_order);
+
+/*
+ * Initialize a drm_mm.c allocator. Should be called only once for each
+ * manager unless a customized allogator is used.
+ */
+
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+ unsigned long start, unsigned long size);
+
+/*
+ * Initialize a customized allocator for one of the managers.
+ * (See the SiS module). The object pointed to by "allocator" is copied,
+ * so it can be destroyed after this call.
+ */
+
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+ struct drm_sman_mm * allocator);
+
+/*
+ * Allocate a memory block. Aligment is not implemented yet.
+ */
+
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+ unsigned int manager,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long owner);
+/*
+ * Free a memory block identified by its user hash key.
+ */
+
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
+
+/*
+ * returns 1 iff there are no stale memory blocks associated with this owner.
+ * Typically called to determine if we need to idle the hardware and call
+ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
+ * resources associated with owner.
+ */
+
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with this owner. Note that this
+ * requires that the hardware is finished with all blocks, so the graphics engine
+ * should be idled before this call is made. This function also frees
+ * any resources associated with "owner" and should be called when owner
+ * is not going to be referenced anymore.
+ */
+
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
+
+/*
+ * Frees all stale memory blocks associated with the memory manager.
+ * See idling above.
+ */
+
+extern void drm_sman_cleanup(struct drm_sman * sman);
+
+#endif
diff --git a/include/drm/i810_drm.h b/include/drm/i810_drm.h
new file mode 100644
index 0000000..7a10bb6
--- /dev/null
+++ b/include/drm/i810_drm.h
@@ -0,0 +1,281 @@
+#ifndef _I810_DRM_H_
+#define _I810_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _I810_DEFINES_
+#define _I810_DEFINES_
+
+#define I810_DMA_BUF_ORDER 12
+#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
+#define I810_DMA_BUF_NR 256
+#define I810_NR_SAREA_CLIPRECTS 8
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define I810_NR_TEX_REGIONS 64
+#define I810_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+#define I810_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define I810_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define I810_UPLOAD_CTX 0x4
+#define I810_UPLOAD_BUFFERS 0x8
+#define I810_UPLOAD_TEX0 0x10
+#define I810_UPLOAD_TEX1 0x20
+#define I810_UPLOAD_CLIPRECTS 0x40
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to accommodate private buffers
+ * in the future.
+ */
+#define I810_DESTREG_DI0 0 /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
+#define I810_DESTREG_DI1 1
+#define I810_DESTREG_DV0 2 /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
+#define I810_DESTREG_DV1 3
+#define I810_DESTREG_DR0 4 /* GFX_OP_DRAWRECT_INFO (4 dwords) */
+#define I810_DESTREG_DR1 5
+#define I810_DESTREG_DR2 6
+#define I810_DESTREG_DR3 7
+#define I810_DESTREG_DR4 8
+#define I810_DEST_SETUP_SIZE 10
+
+/* Context state
+ */
+#define I810_CTXREG_CF0 0 /* GFX_OP_COLOR_FACTOR */
+#define I810_CTXREG_CF1 1
+#define I810_CTXREG_ST0 2 /* GFX_OP_STIPPLE */
+#define I810_CTXREG_ST1 3
+#define I810_CTXREG_VF 4 /* GFX_OP_VERTEX_FMT */
+#define I810_CTXREG_MT 5 /* GFX_OP_MAP_TEXELS */
+#define I810_CTXREG_MC0 6 /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
+#define I810_CTXREG_MC1 7 /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
+#define I810_CTXREG_MC2 8 /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
+#define I810_CTXREG_MA0 9 /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
+#define I810_CTXREG_MA1 10 /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
+#define I810_CTXREG_MA2 11 /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
+#define I810_CTXREG_SDM 12 /* GFX_OP_SRC_DEST_MONO */
+#define I810_CTXREG_FOG 13 /* GFX_OP_FOG_COLOR */
+#define I810_CTXREG_B1 14 /* GFX_OP_BOOL_1 */
+#define I810_CTXREG_B2 15 /* GFX_OP_BOOL_2 */
+#define I810_CTXREG_LCS 16 /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
+#define I810_CTXREG_PV 17 /* GFX_OP_PV_RULE -- Invarient! */
+#define I810_CTXREG_ZA 18 /* GFX_OP_ZBIAS_ALPHAFUNC */
+#define I810_CTXREG_AA 19 /* GFX_OP_ANTIALIAS */
+#define I810_CTX_SETUP_SIZE 20
+
+/* Texture state (per tex unit)
+ */
+#define I810_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (4 dwords) */
+#define I810_TEXREG_MI1 1
+#define I810_TEXREG_MI2 2
+#define I810_TEXREG_MI3 3
+#define I810_TEXREG_MF 4 /* GFX_OP_MAP_FILTER */
+#define I810_TEXREG_MLC 5 /* GFX_OP_MAP_LOD_CTL */
+#define I810_TEXREG_MLL 6 /* GFX_OP_MAP_LOD_LIMITS */
+#define I810_TEXREG_MCS 7 /* GFX_OP_MAP_COORD_SETS ??? */
+#define I810_TEX_SETUP_SIZE 8
+
+/* Flags for clear ioctl
+ */
+#define I810_FRONT 0x1
+#define I810_BACK 0x2
+#define I810_DEPTH 0x4
+
+typedef enum _drm_i810_init_func {
+ I810_INIT_DMA = 0x01,
+ I810_CLEANUP_DMA = 0x02,
+ I810_INIT_DMA_1_4 = 0x03
+} drm_i810_init_func_t;
+
+/* This is the init structure after v1.2 */
+typedef struct _drm_i810_init {
+ drm_i810_init_func_t func;
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int overlay_offset;
+ unsigned int overlay_physical;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+} drm_i810_init_t;
+
+/* This is the init structure prior to v1.2 */
+typedef struct _drm_i810_pre12_init {
+ drm_i810_init_func_t func;
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+} drm_i810_pre12_init_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_i810_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char in_use; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_i810_tex_region_t;
+
+typedef struct _drm_i810_sarea {
+ unsigned int ContextState[I810_CTX_SETUP_SIZE];
+ unsigned int BufferState[I810_DEST_SETUP_SIZE];
+ unsigned int TexState[2][I810_TEX_SETUP_SIZE];
+ unsigned int dirty;
+
+ unsigned int nbox;
+ struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
+
+ /* Maintain an LRU of contiguous regions of texture space. If
+ * you think you own a region of texture memory, and it has an
+ * age different to the one you set, then you are mistaken and
+ * it has been stolen by another client. If global texAge
+ * hasn't changed, there is no need to walk the list.
+ *
+ * These regions can be used as a proxy for the fine-grained
+ * texture information of other clients - by maintaining them
+ * in the same lru which is used to age their own textures,
+ * clients have an approximate lru for the whole of global
+ * texture space, and can make informed decisions as to which
+ * areas to kick out. There is no need to choose whether to
+ * kick out your own texture or someone else's - simply eject
+ * them all in LRU order.
+ */
+
+ drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
+ /* Last elt is sentinal */
+ int texAge; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int last_quiescent; /* */
+ int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+} drm_i810_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmMga.h)
+ */
+
+/* i810 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I810_INIT 0x00
+#define DRM_I810_VERTEX 0x01
+#define DRM_I810_CLEAR 0x02
+#define DRM_I810_FLUSH 0x03
+#define DRM_I810_GETAGE 0x04
+#define DRM_I810_GETBUF 0x05
+#define DRM_I810_SWAP 0x06
+#define DRM_I810_COPY 0x07
+#define DRM_I810_DOCOPY 0x08
+#define DRM_I810_OV0INFO 0x09
+#define DRM_I810_FSTATUS 0x0a
+#define DRM_I810_OV0FLIP 0x0b
+#define DRM_I810_MC 0x0c
+#define DRM_I810_RSTATUS 0x0d
+#define DRM_I810_FLIP 0x0e
+
+#define DRM_IOCTL_I810_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
+#define DRM_IOCTL_I810_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
+#define DRM_IOCTL_I810_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_I810_FLUSH)
+#define DRM_IOCTL_I810_GETAGE DRM_IO( DRM_COMMAND_BASE + DRM_I810_GETAGE)
+#define DRM_IOCTL_I810_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_I810_SWAP)
+#define DRM_IOCTL_I810_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
+#define DRM_IOCTL_I810_DOCOPY DRM_IO( DRM_COMMAND_BASE + DRM_I810_DOCOPY)
+#define DRM_IOCTL_I810_OV0INFO DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
+#define DRM_IOCTL_I810_FSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
+#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
+#define DRM_IOCTL_I810_MC DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
+#define DRM_IOCTL_I810_RSTATUS DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
+#define DRM_IOCTL_I810_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
+
+typedef struct _drm_i810_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+} drm_i810_clear_t;
+
+/* These may be placeholders if we have more cliprects than
+ * I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
+ * false, indicating that the buffer will be dispatched again with a
+ * new set of cliprects.
+ */
+typedef struct _drm_i810_vertex {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int discard; /* client is finished with the buffer? */
+} drm_i810_vertex_t;
+
+typedef struct _drm_i810_copy_t {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ void *address; /* Address to copy from */
+} drm_i810_copy_t;
+
+#define PR_TRIANGLES (0x0<<18)
+#define PR_TRISTRIP_0 (0x1<<18)
+#define PR_TRISTRIP_1 (0x2<<18)
+#define PR_TRIFAN (0x3<<18)
+#define PR_POLYGON (0x4<<18)
+#define PR_LINES (0x5<<18)
+#define PR_LINESTRIP (0x6<<18)
+#define PR_RECTS (0x7<<18)
+#define PR_MASK (0x7<<18)
+
+typedef struct drm_i810_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i810_dma_t;
+
+typedef struct _drm_i810_overlay_t {
+ unsigned int offset; /* Address of the Overlay Regs */
+ unsigned int physical;
+} drm_i810_overlay_t;
+
+typedef struct _drm_i810_mc {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int num_blocks; /* number of GFXBlocks */
+ int *length; /* List of lengths for GFXBlocks (FUTURE) */
+ unsigned int last_render; /* Last Render Request */
+} drm_i810_mc_t;
+
+#endif /* _I810_DRM_H_ */
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
new file mode 100644
index 0000000..4b00d2d
--- /dev/null
+++ b/include/drm/i830_drm.h
@@ -0,0 +1,342 @@
+#ifndef _I830_DRM_H_
+#define _I830_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ *
+ * KW: Actually, you can't ever change them because doing so would
+ * break backwards compatibility.
+ */
+
+#ifndef _I830_DEFINES_
+#define _I830_DEFINES_
+
+#define I830_DMA_BUF_ORDER 12
+#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
+#define I830_DMA_BUF_NR 256
+#define I830_NR_SAREA_CLIPRECTS 8
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define I830_NR_TEX_REGIONS 64
+#define I830_LOG_MIN_TEX_REGION_SIZE 16
+
+/* KW: These aren't correct but someone set them to two and then
+ * released the module. Now we can't change them as doing so would
+ * break backwards compatibility.
+ */
+#define I830_TEXTURE_COUNT 2
+#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
+
+#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
+
+#define I830_UPLOAD_CTX 0x1
+#define I830_UPLOAD_BUFFERS 0x2
+#define I830_UPLOAD_CLIPRECTS 0x4
+#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
+#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
+#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
+#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
+#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
+#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
+#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
+#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
+#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
+#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
+#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
+#define I830_UPLOAD_TEX0 0x10000
+#define I830_UPLOAD_TEX1 0x20000
+#define I830_UPLOAD_TEX2 0x40000
+#define I830_UPLOAD_TEX3 0x80000
+#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
+#define I830_UPLOAD_TEX_MASK 0xf0000
+#define I830_UPLOAD_TEXBLEND0 0x100000
+#define I830_UPLOAD_TEXBLEND1 0x200000
+#define I830_UPLOAD_TEXBLEND2 0x400000
+#define I830_UPLOAD_TEXBLEND3 0x800000
+#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
+#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
+#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
+#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
+#define I830_UPLOAD_STIPPLE 0x8000000
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to accommodate private buffers
+ * in the future.
+ */
+
+#define I830_DESTREG_CBUFADDR 0
+#define I830_DESTREG_DBUFADDR 1
+#define I830_DESTREG_DV0 2
+#define I830_DESTREG_DV1 3
+#define I830_DESTREG_SENABLE 4
+#define I830_DESTREG_SR0 5
+#define I830_DESTREG_SR1 6
+#define I830_DESTREG_SR2 7
+#define I830_DESTREG_DR0 8
+#define I830_DESTREG_DR1 9
+#define I830_DESTREG_DR2 10
+#define I830_DESTREG_DR3 11
+#define I830_DESTREG_DR4 12
+#define I830_DEST_SETUP_SIZE 13
+
+/* Context state
+ */
+#define I830_CTXREG_STATE1 0
+#define I830_CTXREG_STATE2 1
+#define I830_CTXREG_STATE3 2
+#define I830_CTXREG_STATE4 3
+#define I830_CTXREG_STATE5 4
+#define I830_CTXREG_IALPHAB 5
+#define I830_CTXREG_STENCILTST 6
+#define I830_CTXREG_ENABLES_1 7
+#define I830_CTXREG_ENABLES_2 8
+#define I830_CTXREG_AA 9
+#define I830_CTXREG_FOGCOLOR 10
+#define I830_CTXREG_BLENDCOLR0 11
+#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
+#define I830_CTXREG_VF 13
+#define I830_CTXREG_VF2 14
+#define I830_CTXREG_MCSB0 15
+#define I830_CTXREG_MCSB1 16
+#define I830_CTX_SETUP_SIZE 17
+
+/* 1.3: Stipple state
+ */
+#define I830_STPREG_ST0 0
+#define I830_STPREG_ST1 1
+#define I830_STP_SETUP_SIZE 2
+
+/* Texture state (per tex unit)
+ */
+
+#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
+#define I830_TEXREG_MI1 1
+#define I830_TEXREG_MI2 2
+#define I830_TEXREG_MI3 3
+#define I830_TEXREG_MI4 4
+#define I830_TEXREG_MI5 5
+#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
+#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
+#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
+#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
+#define I830_TEX_SETUP_SIZE 10
+
+#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
+#define I830_TEXREG_TM0S0 1
+#define I830_TEXREG_TM0S1 2
+#define I830_TEXREG_TM0S2 3
+#define I830_TEXREG_TM0S3 4
+#define I830_TEXREG_TM0S4 5
+#define I830_TEXREG_NOP0 6 /* noop */
+#define I830_TEXREG_NOP1 7 /* noop */
+#define I830_TEXREG_NOP2 8 /* noop */
+#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
+#define __I830_TEX_SETUP_SIZE 10
+
+#define I830_FRONT 0x1
+#define I830_BACK 0x2
+#define I830_DEPTH 0x4
+
+#endif /* _I830_DEFINES_ */
+
+typedef struct _drm_i830_init {
+ enum {
+ I830_INIT_DMA = 0x01,
+ I830_CLEANUP_DMA = 0x02
+ } func;
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+} drm_i830_init_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_i830_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char in_use; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_i830_tex_region_t;
+
+typedef struct _drm_i830_sarea {
+ unsigned int ContextState[I830_CTX_SETUP_SIZE];
+ unsigned int BufferState[I830_DEST_SETUP_SIZE];
+ unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
+ unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
+ unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
+ unsigned int Palette[2][256];
+ unsigned int dirty;
+
+ unsigned int nbox;
+ struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
+
+ /* Maintain an LRU of contiguous regions of texture space. If
+ * you think you own a region of texture memory, and it has an
+ * age different to the one you set, then you are mistaken and
+ * it has been stolen by another client. If global texAge
+ * hasn't changed, there is no need to walk the list.
+ *
+ * These regions can be used as a proxy for the fine-grained
+ * texture information of other clients - by maintaining them
+ * in the same lru which is used to age their own textures,
+ * clients have an approximate lru for the whole of global
+ * texture space, and can make informed decisions as to which
+ * areas to kick out. There is no need to choose whether to
+ * kick out your own texture or someone else's - simply eject
+ * them all in LRU order.
+ */
+
+ drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
+ /* Last elt is sentinal */
+ int texAge; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int last_quiescent; /* */
+ int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+
+ int perf_boxes; /* performance boxes to be displayed */
+
+ /* Here's the state for texunits 2,3:
+ */
+ unsigned int TexState2[I830_TEX_SETUP_SIZE];
+ unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
+ unsigned int TexBlendStateWordsUsed2;
+
+ unsigned int TexState3[I830_TEX_SETUP_SIZE];
+ unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
+ unsigned int TexBlendStateWordsUsed3;
+
+ unsigned int StippleState[I830_STP_SETUP_SIZE];
+} drm_i830_sarea_t;
+
+/* Flags for perf_boxes
+ */
+#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
+#define I830_BOX_FLIP 0x2 /* populated by kernel */
+#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
+#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
+#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
+
+/* I830 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I830_INIT 0x00
+#define DRM_I830_VERTEX 0x01
+#define DRM_I830_CLEAR 0x02
+#define DRM_I830_FLUSH 0x03
+#define DRM_I830_GETAGE 0x04
+#define DRM_I830_GETBUF 0x05
+#define DRM_I830_SWAP 0x06
+#define DRM_I830_COPY 0x07
+#define DRM_I830_DOCOPY 0x08
+#define DRM_I830_FLIP 0x09
+#define DRM_I830_IRQ_EMIT 0x0a
+#define DRM_I830_IRQ_WAIT 0x0b
+#define DRM_I830_GETPARAM 0x0c
+#define DRM_I830_SETPARAM 0x0d
+
+#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
+#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
+#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
+#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
+#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
+#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
+#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
+
+typedef struct _drm_i830_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+ unsigned int clear_colormask;
+ unsigned int clear_depthmask;
+} drm_i830_clear_t;
+
+/* These may be placeholders if we have more cliprects than
+ * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
+ * false, indicating that the buffer will be dispatched again with a
+ * new set of cliprects.
+ */
+typedef struct _drm_i830_vertex {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int discard; /* client is finished with the buffer? */
+} drm_i830_vertex_t;
+
+typedef struct _drm_i830_copy_t {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ void __user *address; /* Address to copy from */
+} drm_i830_copy_t;
+
+typedef struct drm_i830_dma {
+ void __user *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i830_dma_t;
+
+/* 1.3: Userspace can request & wait on irq's:
+ */
+typedef struct drm_i830_irq_emit {
+ int __user *irq_seq;
+} drm_i830_irq_emit_t;
+
+typedef struct drm_i830_irq_wait {
+ int irq_seq;
+} drm_i830_irq_wait_t;
+
+/* 1.3: New ioctl to query kernel params:
+ */
+#define I830_PARAM_IRQ_ACTIVE 1
+
+typedef struct drm_i830_getparam {
+ int param;
+ int __user *value;
+} drm_i830_getparam_t;
+
+/* 1.3: New ioctl to set kernel params:
+ */
+#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
+
+typedef struct drm_i830_setparam {
+ int param;
+ int value;
+} drm_i830_setparam_t;
+
+#endif /* _I830_DRM_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
new file mode 100644
index 0000000..05c66cf
--- /dev/null
+++ b/include/drm/i915_drm.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#include "drm.h"
+
+/* Each region is a minimum of 16k, and there are at most 255 of them.
+ */
+#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
+ * of chars for next/prev indices */
+#define I915_LOG_MIN_TEX_REGION_SIZE 14
+
+typedef struct _drm_i915_init {
+ enum {
+ I915_INIT_DMA = 0x01,
+ I915_CLEANUP_DMA = 0x02,
+ I915_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drm_i915_init_t;
+
+typedef struct _drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+} drm_i915_sarea_t;
+
+/* Flags for perf_boxes
+ */
+#define I915_BOX_RING_EMPTY 0x1
+#define I915_BOX_FLIP 0x2
+#define I915_BOX_WAIT 0x4
+#define I915_BOX_TEXTURE_LOAD 0x8
+#define I915_BOX_LOST_CONTEXT 0x10
+
+/* I915 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_I915_INIT 0x00
+#define DRM_I915_FLUSH 0x01
+#define DRM_I915_FLIP 0x02
+#define DRM_I915_BATCHBUFFER 0x03
+#define DRM_I915_IRQ_EMIT 0x04
+#define DRM_I915_IRQ_WAIT 0x05
+#define DRM_I915_GETPARAM 0x06
+#define DRM_I915_SETPARAM 0x07
+#define DRM_I915_ALLOC 0x08
+#define DRM_I915_FREE 0x09
+#define DRM_I915_INIT_HEAP 0x0a
+#define DRM_I915_CMDBUFFER 0x0b
+#define DRM_I915_DESTROY_HEAP 0x0c
+#define DRM_I915_SET_VBLANK_PIPE 0x0d
+#define DRM_I915_GET_VBLANK_PIPE 0x0e
+#define DRM_I915_VBLANK_SWAP 0x0f
+#define DRM_I915_HWS_ADDR 0x11
+
+#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
+#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
+#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
+#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
+#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
+#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
+#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
+#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
+#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
+#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
+#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
+#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+
+/* Allow drivers to submit batchbuffers directly to hardware, relying
+ * on the security mechanisms provided by hardware.
+ */
+typedef struct _drm_i915_batchbuffer {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_batchbuffer_t;
+
+/* As above, but pass a pointer to userspace buffer which can be
+ * validated by the kernel prior to sending to hardware.
+ */
+typedef struct _drm_i915_cmdbuffer {
+ char __user *buf; /* pointer to userspace command buffer */
+ int sz; /* nr bytes in buf */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
+} drm_i915_cmdbuffer_t;
+
+/* Userspace can request & wait on irq's:
+ */
+typedef struct drm_i915_irq_emit {
+ int __user *irq_seq;
+} drm_i915_irq_emit_t;
+
+typedef struct drm_i915_irq_wait {
+ int irq_seq;
+} drm_i915_irq_wait_t;
+
+/* Ioctl to query kernel params:
+ */
+#define I915_PARAM_IRQ_ACTIVE 1
+#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
+
+typedef struct drm_i915_getparam {
+ int param;
+ int __user *value;
+} drm_i915_getparam_t;
+
+/* Ioctl to set kernel params:
+ */
+#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+
+typedef struct drm_i915_setparam {
+ int param;
+ int value;
+} drm_i915_setparam_t;
+
+/* A memory manager for regions of shared memory:
+ */
+#define I915_MEM_REGION_AGP 1
+
+typedef struct drm_i915_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or agp */
+} drm_i915_mem_alloc_t;
+
+typedef struct drm_i915_mem_free {
+ int region;
+ int region_offset;
+} drm_i915_mem_free_t;
+
+typedef struct drm_i915_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_i915_mem_init_heap_t;
+
+/* Allow memory manager to be torn down and re-initialized (eg on
+ * rotate):
+ */
+typedef struct drm_i915_mem_destroy_heap {
+ int region;
+} drm_i915_mem_destroy_heap_t;
+
+/* Allow X server to configure which pipes to monitor for vblank signals
+ */
+#define DRM_I915_VBLANK_PIPE_A 1
+#define DRM_I915_VBLANK_PIPE_B 2
+
+typedef struct drm_i915_vblank_pipe {
+ int pipe;
+} drm_i915_vblank_pipe_t;
+
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+ drm_drawable_t drawable;
+ enum drm_vblank_seq_type seqtype;
+ unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
+typedef struct drm_i915_hws_addr {
+ uint64_t addr;
+} drm_i915_hws_addr_t;
+
+#endif /* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
new file mode 100644
index 0000000..944b50a
--- /dev/null
+++ b/include/drm/mga_drm.h
@@ -0,0 +1,417 @@
+/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __MGA_DRM_H__
+#define __MGA_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (mga_sarea.h)
+ */
+
+#ifndef __MGA_SAREA_DEFINES__
+#define __MGA_SAREA_DEFINES__
+
+/* WARP pipe flags
+ */
+#define MGA_F 0x1 /* fog */
+#define MGA_A 0x2 /* alpha */
+#define MGA_S 0x4 /* specular */
+#define MGA_T2 0x8 /* multitexture */
+
+#define MGA_WARP_TGZ 0
+#define MGA_WARP_TGZF (MGA_F)
+#define MGA_WARP_TGZA (MGA_A)
+#define MGA_WARP_TGZAF (MGA_F|MGA_A)
+#define MGA_WARP_TGZS (MGA_S)
+#define MGA_WARP_TGZSF (MGA_S|MGA_F)
+#define MGA_WARP_TGZSA (MGA_S|MGA_A)
+#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
+#define MGA_WARP_T2GZ (MGA_T2)
+#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
+#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
+#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
+#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
+#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
+#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
+#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
+
+#define MGA_MAX_G200_PIPES 8 /* no multitex */
+#define MGA_MAX_G400_PIPES 16
+#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
+#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
+
+#define MGA_CARD_TYPE_G200 1
+#define MGA_CARD_TYPE_G400 2
+#define MGA_CARD_TYPE_G450 3 /* not currently used */
+#define MGA_CARD_TYPE_G550 4
+
+#define MGA_FRONT 0x1
+#define MGA_BACK 0x2
+#define MGA_DEPTH 0x4
+
+/* What needs to be changed for the current vertex dma buffer?
+ */
+#define MGA_UPLOAD_CONTEXT 0x1
+#define MGA_UPLOAD_TEX0 0x2
+#define MGA_UPLOAD_TEX1 0x4
+#define MGA_UPLOAD_PIPE 0x8
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
+#define MGA_UPLOAD_2D 0x40
+#define MGA_WAIT_AGE 0x80 /* handled client-side */
+#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
+#if 0
+#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
+ quiescent */
+#endif
+
+/* 32 buffers of 64k each, total 2 meg.
+ */
+#define MGA_BUFFER_SIZE (1 << 16)
+#define MGA_NUM_BUFFERS 128
+
+/* Keep these small for testing.
+ */
+#define MGA_NR_SAREA_CLIPRECTS 8
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define MGA_CARD_HEAP 0
+#define MGA_AGP_HEAP 1
+#define MGA_NR_TEX_HEAPS 2
+#define MGA_NR_TEX_REGIONS 16
+#define MGA_LOG_MIN_TEX_REGION_SIZE 16
+
+#define DRM_MGA_IDLE_RETRY 2048
+
+#endif /* __MGA_SAREA_DEFINES__ */
+
+/* Setup registers for 3D context
+ */
+typedef struct {
+ unsigned int dstorg;
+ unsigned int maccess;
+ unsigned int plnwt;
+ unsigned int dwgctl;
+ unsigned int alphactrl;
+ unsigned int fogcolor;
+ unsigned int wflag;
+ unsigned int tdualstage0;
+ unsigned int tdualstage1;
+ unsigned int fcol;
+ unsigned int stencil;
+ unsigned int stencilctl;
+} drm_mga_context_regs_t;
+
+/* Setup registers for 2D, X server
+ */
+typedef struct {
+ unsigned int pitch;
+} drm_mga_server_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int texctl;
+ unsigned int texctl2;
+ unsigned int texfilter;
+ unsigned int texbordercol;
+ unsigned int texorg;
+ unsigned int texwidth;
+ unsigned int texheight;
+ unsigned int texorg1;
+ unsigned int texorg2;
+ unsigned int texorg3;
+ unsigned int texorg4;
+} drm_mga_texture_regs_t;
+
+/* General aging mechanism
+ */
+typedef struct {
+ unsigned int head; /* Position of head pointer */
+ unsigned int wrap; /* Primary DMA wrap count */
+} drm_mga_age_t;
+
+typedef struct _drm_mga_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex dma buffer.
+ */
+ drm_mga_context_regs_t context_state;
+ drm_mga_server_regs_t server_state;
+ drm_mga_texture_regs_t tex_state[2];
+ unsigned int warp_pipe;
+ unsigned int dirty;
+ unsigned int vertsize;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Information about the most recently used 3d drawable. The
+ * client fills in the req_* fields, the server fills in the
+ * exported_ fields and puts the cliprects into boxes, above.
+ *
+ * The client clears the exported_drawable field before
+ * clobbering the boxes data.
+ */
+ unsigned int req_drawable; /* the X drawable id */
+ unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
+
+ unsigned int exported_drawable;
+ unsigned int exported_index;
+ unsigned int exported_stamp;
+ unsigned int exported_buffers;
+ unsigned int exported_nfront;
+ unsigned int exported_nback;
+ int exported_back_x, exported_front_x, exported_w;
+ int exported_back_y, exported_front_y, exported_h;
+ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+
+ /* Counters for aging textures and for client-side throttling.
+ */
+ unsigned int status[4];
+ unsigned int last_wrap;
+
+ drm_mga_age_t last_frame;
+ unsigned int last_enqueue; /* last time a buffer was enqueued */
+ unsigned int last_dispatch; /* age of the most recently dispatched buffer */
+ unsigned int last_quiescent; /* */
+
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+ unsigned int texAge[MGA_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_mga_sarea_t;
+
+/* MGA specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_MGA_INIT 0x00
+#define DRM_MGA_FLUSH 0x01
+#define DRM_MGA_RESET 0x02
+#define DRM_MGA_SWAP 0x03
+#define DRM_MGA_CLEAR 0x04
+#define DRM_MGA_VERTEX 0x05
+#define DRM_MGA_INDICES 0x06
+#define DRM_MGA_ILOAD 0x07
+#define DRM_MGA_BLIT 0x08
+#define DRM_MGA_GETPARAM 0x09
+
+/* 3.2:
+ * ioctls for operating on fences.
+ */
+#define DRM_MGA_SET_FENCE 0x0a
+#define DRM_MGA_WAIT_FENCE 0x0b
+#define DRM_MGA_DMA_BOOTSTRAP 0x0c
+
+#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
+#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
+#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
+#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
+#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
+#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
+
+typedef struct _drm_mga_warp_index {
+ int installed;
+ unsigned long phys_addr;
+ int size;
+} drm_mga_warp_index_t;
+
+typedef struct drm_mga_init {
+ enum {
+ MGA_INIT_DMA = 0x01,
+ MGA_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+
+ int chipset;
+ int sgram;
+
+ unsigned int maccess;
+
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long status_offset;
+ unsigned long warp_offset;
+ unsigned long primary_offset;
+ unsigned long buffers_offset;
+} drm_mga_init_t;
+
+typedef struct drm_mga_dma_bootstrap {
+ /**
+ * \name AGP texture region
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
+ * be filled in with the actual AGP texture settings.
+ *
+ * \warning
+ * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
+ * is zero, it means that PCI memory (most likely through the use of
+ * an IOMMU) is being used for "AGP" textures.
+ */
+ /*@{ */
+ unsigned long texture_handle; /**< Handle used to map AGP textures. */
+ uint32_t texture_size; /**< Size of the AGP texture region. */
+ /*@} */
+
+ /**
+ * Requested size of the primary DMA region.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ */
+ uint32_t primary_size;
+
+ /**
+ * Requested number of secondary DMA buffers.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual number of secondary DMA buffers
+ * allocated. Particularly when PCI DMA is used, this may be
+ * (subtantially) less than the number requested.
+ */
+ uint32_t secondary_bin_count;
+
+ /**
+ * Requested size of each secondary DMA buffer.
+ *
+ * While the kernel \b is free to reduce
+ * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
+ * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
+ */
+ uint32_t secondary_bin_size;
+
+ /**
+ * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
+ * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
+ * zero, it means that PCI DMA should be used, even if AGP is
+ * possible.
+ *
+ * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+ * filled in with the actual AGP mode. If AGP was not available
+ * (i.e., PCI DMA was used), this value will be zero.
+ */
+ uint32_t agp_mode;
+
+ /**
+ * Desired AGP GART size, measured in megabytes.
+ */
+ uint8_t agp_size;
+} drm_mga_dma_bootstrap_t;
+
+typedef struct drm_mga_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_mga_clear_t;
+
+typedef struct drm_mga_vertex {
+ int idx; /* buffer to queue */
+ int used; /* bytes in use */
+ int discard; /* client finished with buffer? */
+} drm_mga_vertex_t;
+
+typedef struct drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
+
+typedef struct drm_mga_iload {
+ int idx;
+ unsigned int dstorg;
+ unsigned int length;
+} drm_mga_iload_t;
+
+typedef struct _drm_mga_blit {
+ unsigned int planemask;
+ unsigned int srcorg;
+ unsigned int dstorg;
+ int src_pitch, dst_pitch;
+ int delta_sx, delta_sy;
+ int delta_dx, delta_dy;
+ int height, ydir; /* flip image vertically */
+ int source_pitch, dest_pitch;
+} drm_mga_blit_t;
+
+/* 3.1: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define MGA_PARAM_IRQ_NR 1
+
+/* 3.2: Query the actual card type. The DDX only distinguishes between
+ * G200 chips and non-G200 chips, which it calls G400. It turns out that
+ * there are some very sublte differences between the G4x0 chips and the G550
+ * chips. Using this parameter query, a client-side driver can detect the
+ * difference between a G4x0 and a G550.
+ */
+#define MGA_PARAM_CARD_TYPE 2
+
+typedef struct drm_mga_getparam {
+ int param;
+ void __user *value;
+} drm_mga_getparam_t;
+
+#endif
diff --git a/include/drm/r128_drm.h b/include/drm/r128_drm.h
new file mode 100644
index 0000000..8d8878b
--- /dev/null
+++ b/include/drm/r128_drm.h
@@ -0,0 +1,326 @@
+/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
+ * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
+ */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#ifndef __R128_DRM_H__
+#define __R128_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (r128_sarea.h)
+ */
+#ifndef __R128_SAREA_DEFINES__
+#define __R128_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define R128_UPLOAD_CONTEXT 0x001
+#define R128_UPLOAD_SETUP 0x002
+#define R128_UPLOAD_TEX0 0x004
+#define R128_UPLOAD_TEX1 0x008
+#define R128_UPLOAD_TEX0IMAGES 0x010
+#define R128_UPLOAD_TEX1IMAGES 0x020
+#define R128_UPLOAD_CORE 0x040
+#define R128_UPLOAD_MASKS 0x080
+#define R128_UPLOAD_WINDOW 0x100
+#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
+#define R128_REQUIRE_QUIESCENCE 0x400
+#define R128_UPLOAD_ALL 0x7ff
+
+#define R128_FRONT 0x1
+#define R128_BACK 0x2
+#define R128_DEPTH 0x4
+
+/* Primitive types
+ */
+#define R128_POINTS 0x1
+#define R128_LINES 0x2
+#define R128_LINE_STRIP 0x3
+#define R128_TRIANGLES 0x4
+#define R128_TRIANGLE_FAN 0x5
+#define R128_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define R128_BUFFER_SIZE 16384
+
+/* Byte offsets for indirect buffer data
+ */
+#define R128_INDEX_PRIM_OFFSET 20
+#define R128_HOSTDATA_BLIT_OFFSET 32
+
+/* Keep these small for testing.
+ */
+#define R128_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/AGP). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+#define R128_NR_CONTEXT_REGS 12
+
+#define R128_MAX_TEXTURE_LEVELS 11
+#define R128_MAX_TEXTURE_UNITS 2
+
+#endif /* __R128_SAREA_DEFINES__ */
+
+typedef struct {
+ /* Context state - can be written in one large chunk */
+ unsigned int dst_pitch_offset_c;
+ unsigned int dp_gui_master_cntl_c;
+ unsigned int sc_top_left_c;
+ unsigned int sc_bottom_right_c;
+ unsigned int z_offset_c;
+ unsigned int z_pitch_c;
+ unsigned int z_sten_cntl_c;
+ unsigned int tex_cntl_c;
+ unsigned int misc_3d_state_cntl_reg;
+ unsigned int texture_clr_cmp_clr_c;
+ unsigned int texture_clr_cmp_msk_c;
+ unsigned int fog_color_c;
+
+ /* Texture state */
+ unsigned int tex_size_pitch_c;
+ unsigned int constant_color_c;
+
+ /* Setup state */
+ unsigned int pm4_vc_fpu_setup;
+ unsigned int setup_cntl;
+
+ /* Mask state */
+ unsigned int dp_write_mask;
+ unsigned int sten_ref_mask_c;
+ unsigned int plane_3d_mask_c;
+
+ /* Window state */
+ unsigned int window_xy_offset;
+
+ /* Core state */
+ unsigned int scale_3d_cntl;
+} drm_r128_context_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int tex_cntl;
+ unsigned int tex_combine_cntl;
+ unsigned int tex_size_pitch;
+ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
+ unsigned int tex_border_color;
+} drm_r128_texture_regs_t;
+
+typedef struct drm_r128_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex buffer.
+ */
+ drm_r128_context_regs_t context_state;
+ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+
+ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+ unsigned int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+} drm_r128_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmR128.h)
+ */
+
+/* Rage 128 specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_R128_INIT 0x00
+#define DRM_R128_CCE_START 0x01
+#define DRM_R128_CCE_STOP 0x02
+#define DRM_R128_CCE_RESET 0x03
+#define DRM_R128_CCE_IDLE 0x04
+/* 0x05 not used */
+#define DRM_R128_RESET 0x06
+#define DRM_R128_SWAP 0x07
+#define DRM_R128_CLEAR 0x08
+#define DRM_R128_VERTEX 0x09
+#define DRM_R128_INDICES 0x0a
+#define DRM_R128_BLIT 0x0b
+#define DRM_R128_DEPTH 0x0c
+#define DRM_R128_STIPPLE 0x0d
+/* 0x0e not used */
+#define DRM_R128_INDIRECT 0x0f
+#define DRM_R128_FULLSCREEN 0x10
+#define DRM_R128_CLEAR2 0x11
+#define DRM_R128_GETPARAM 0x12
+#define DRM_R128_FLIP 0x13
+
+#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
+#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
+#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
+/* 0x05 not used */
+#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
+#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
+#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
+/* 0x0e not used */
+#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
+#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
+#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
+#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
+#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
+
+typedef struct drm_r128_init {
+ enum {
+ R128_INIT_CCE = 0x01,
+ R128_CLEANUP_CCE = 0x02
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cce_mode;
+ int cce_secure;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int span_offset;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+} drm_r128_init_t;
+
+typedef struct drm_r128_cce_stop {
+ int flush;
+ int idle;
+} drm_r128_cce_stop_t;
+
+typedef struct drm_r128_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask;
+} drm_r128_clear_t;
+
+typedef struct drm_r128_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_r128_vertex_t;
+
+typedef struct drm_r128_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_r128_indices_t;
+
+typedef struct drm_r128_blit {
+ int idx;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_r128_blit_t;
+
+typedef struct drm_r128_depth {
+ enum {
+ R128_WRITE_SPAN = 0x01,
+ R128_WRITE_PIXELS = 0x02,
+ R128_READ_SPAN = 0x03,
+ R128_READ_PIXELS = 0x04
+ } func;
+ int n;
+ int __user *x;
+ int __user *y;
+ unsigned int __user *buffer;
+ unsigned char __user *mask;
+} drm_r128_depth_t;
+
+typedef struct drm_r128_stipple {
+ unsigned int __user *mask;
+} drm_r128_stipple_t;
+
+typedef struct drm_r128_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_r128_indirect_t;
+
+typedef struct drm_r128_fullscreen {
+ enum {
+ R128_INIT_FULLSCREEN = 0x01,
+ R128_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_r128_fullscreen_t;
+
+/* 2.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define R128_PARAM_IRQ_NR 1
+
+typedef struct drm_r128_getparam {
+ int param;
+ void __user *value;
+} drm_r128_getparam_t;
+
+#endif
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
new file mode 100644
index 0000000..73ff51f
--- /dev/null
+++ b/include/drm/radeon_drm.h
@@ -0,0 +1,749 @@
+/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#ifndef __RADEON_DRM_H__
+#define __RADEON_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (radeon_sarea.h)
+ */
+#ifndef __RADEON_SAREA_DEFINES__
+#define __RADEON_SAREA_DEFINES__
+
+/* Old style state flags, required for sarea interface (1.1 and 1.2
+ * clears) and 1.2 drm_vertex2 ioctl.
+ */
+#define RADEON_UPLOAD_CONTEXT 0x00000001
+#define RADEON_UPLOAD_VERTFMT 0x00000002
+#define RADEON_UPLOAD_LINE 0x00000004
+#define RADEON_UPLOAD_BUMPMAP 0x00000008
+#define RADEON_UPLOAD_MASKS 0x00000010
+#define RADEON_UPLOAD_VIEWPORT 0x00000020
+#define RADEON_UPLOAD_SETUP 0x00000040
+#define RADEON_UPLOAD_TCL 0x00000080
+#define RADEON_UPLOAD_MISC 0x00000100
+#define RADEON_UPLOAD_TEX0 0x00000200
+#define RADEON_UPLOAD_TEX1 0x00000400
+#define RADEON_UPLOAD_TEX2 0x00000800
+#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
+#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
+#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
+#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
+#define RADEON_REQUIRE_QUIESCENCE 0x00010000
+#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
+#define RADEON_UPLOAD_ALL 0x003effff
+#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
+
+/* New style per-packet identifiers for use in cmd_buffer ioctl with
+ * the RADEON_EMIT_PACKET command. Comments relate new packets to old
+ * state bits and the packet size:
+ */
+#define RADEON_EMIT_PP_MISC 0 /* context/7 */
+#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
+#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
+#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
+#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
+#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
+#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
+#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
+#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
+#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
+#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
+#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
+#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
+#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
+#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
+#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
+#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
+#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
+#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
+#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
+#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
+#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
+#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
+#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
+#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
+#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
+#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
+#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
+#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
+#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
+#define R200_EMIT_VAP_CTL 32 /* vap/1 */
+#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
+#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
+#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
+#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
+#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
+#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
+#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
+#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
+#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
+#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
+#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
+#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
+#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
+#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
+#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
+#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
+#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
+#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
+#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
+#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
+#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
+#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
+#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
+#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
+#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
+#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
+#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
+#define R200_EMIT_PP_CUBIC_FACES_0 61
+#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
+#define R200_EMIT_PP_CUBIC_FACES_1 63
+#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
+#define R200_EMIT_PP_CUBIC_FACES_2 65
+#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
+#define R200_EMIT_PP_CUBIC_FACES_3 67
+#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
+#define R200_EMIT_PP_CUBIC_FACES_4 69
+#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
+#define R200_EMIT_PP_CUBIC_FACES_5 71
+#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
+#define RADEON_EMIT_PP_TEX_SIZE_0 73
+#define RADEON_EMIT_PP_TEX_SIZE_1 74
+#define RADEON_EMIT_PP_TEX_SIZE_2 75
+#define R200_EMIT_RB3D_BLENDCOLOR 76
+#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
+#define RADEON_EMIT_PP_CUBIC_FACES_0 78
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
+#define RADEON_EMIT_PP_CUBIC_FACES_1 80
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
+#define RADEON_EMIT_PP_CUBIC_FACES_2 82
+#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
+#define R200_EMIT_PP_TRI_PERF_CNTL 84
+#define R200_EMIT_PP_AFS_0 85
+#define R200_EMIT_PP_AFS_1 86
+#define R200_EMIT_ATF_TFACTOR 87
+#define R200_EMIT_PP_TXCTLALL_0 88
+#define R200_EMIT_PP_TXCTLALL_1 89
+#define R200_EMIT_PP_TXCTLALL_2 90
+#define R200_EMIT_PP_TXCTLALL_3 91
+#define R200_EMIT_PP_TXCTLALL_4 92
+#define R200_EMIT_PP_TXCTLALL_5 93
+#define R200_EMIT_VAP_PVS_CNTL 94
+#define RADEON_MAX_STATE_PACKETS 95
+
+/* Commands understood by cmd_buffer ioctl. More can be added but
+ * obviously these can't be removed or changed:
+ */
+#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
+#define RADEON_CMD_SCALARS 2 /* emit scalar data */
+#define RADEON_CMD_VECTORS 3 /* emit vector data */
+#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
+#define RADEON_CMD_PACKET3 5 /* emit hw packet */
+#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
+#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
+#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
+ * doesn't make the cpu wait, just
+ * the graphics hardware */
+#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
+
+typedef union {
+ int i;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, packet_id, pad0, pad1;
+ } packet;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } scalars;
+ struct {
+ unsigned char cmd_type, offset, stride, count;
+ } vectors;
+ struct {
+ unsigned char cmd_type, addr_lo, addr_hi, count;
+ } veclinear;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+} drm_radeon_cmd_header_t;
+
+#define RADEON_WAIT_2D 0x1
+#define RADEON_WAIT_3D 0x2
+
+/* Allowed parameters for R300_CMD_PACKET3
+ */
+#define R300_CMD_PACKET3_CLEAR 0
+#define R300_CMD_PACKET3_RAW 1
+
+/* Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0 1
+#define R300_CMD_VPU 2 /* emit vertex program upload */
+#define R300_CMD_PACKET3 3 /* emit a packet3 */
+#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
+#define R300_CMD_CP_DELAY 5
+#define R300_CMD_DMA_DISCARD 6
+#define R300_CMD_WAIT 7
+# define R300_WAIT_2D 0x1
+# define R300_WAIT_3D 0x2
+/* these two defines are DOING IT WRONG - however
+ * we have userspace which relies on using these.
+ * The wait interface is backwards compat new
+ * code should use the NEW_WAIT defines below
+ * THESE ARE NOT BIT FIELDS
+ */
+# define R300_WAIT_2D_CLEAN 0x3
+# define R300_WAIT_3D_CLEAN 0x4
+
+# define R300_NEW_WAIT_2D_3D 0x3
+# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
+# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
+# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
+
+#define R300_CMD_SCRATCH 8
+#define R300_CMD_R500FP 9
+
+typedef union {
+ unsigned int u;
+ struct {
+ unsigned char cmd_type, pad0, pad1, pad2;
+ } header;
+ struct {
+ unsigned char cmd_type, count, reglo, reghi;
+ } packet0;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi;
+ } vpu;
+ struct {
+ unsigned char cmd_type, packet, pad0, pad1;
+ } packet3;
+ struct {
+ unsigned char cmd_type, packet;
+ unsigned short count; /* amount of packet2 to emit */
+ } delay;
+ struct {
+ unsigned char cmd_type, buf_idx, pad0, pad1;
+ } dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
+ struct {
+ unsigned char cmd_type, reg, n_bufs, flags;
+ } scratch;
+ struct {
+ unsigned char cmd_type, count, adrlo, adrhi_flags;
+ } r500fp;
+} drm_r300_cmd_header_t;
+
+#define RADEON_FRONT 0x1
+#define RADEON_BACK 0x2
+#define RADEON_DEPTH 0x4
+#define RADEON_STENCIL 0x8
+#define RADEON_CLEAR_FASTZ 0x80000000
+#define RADEON_USE_HIERZ 0x40000000
+#define RADEON_USE_COMP_ZBUF 0x20000000
+
+#define R500FP_CONSTANT_TYPE (1 << 1)
+#define R500FP_CONSTANT_CLAMP (1 << 2)
+
+/* Primitive types
+ */
+#define RADEON_POINTS 0x1
+#define RADEON_LINES 0x2
+#define RADEON_LINE_STRIP 0x3
+#define RADEON_TRIANGLES 0x4
+#define RADEON_TRIANGLE_FAN 0x5
+#define RADEON_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define RADEON_BUFFER_SIZE 65536
+
+/* Byte offsets for indirect buffer data
+ */
+#define RADEON_INDEX_PRIM_OFFSET 20
+
+#define RADEON_SCRATCH_REG_OFFSET 32
+
+#define RADEON_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/GART). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define RADEON_LOCAL_TEX_HEAP 0
+#define RADEON_GART_TEX_HEAP 1
+#define RADEON_NR_TEX_HEAPS 2
+#define RADEON_NR_TEX_REGIONS 64
+#define RADEON_LOG_TEX_GRANULARITY 16
+
+#define RADEON_MAX_TEXTURE_LEVELS 12
+#define RADEON_MAX_TEXTURE_UNITS 3
+
+#define RADEON_MAX_SURFACES 8
+
+/* Blits have strict offset rules. All blit offset must be aligned on
+ * a 1K-byte boundary.
+ */
+#define RADEON_OFFSET_SHIFT 10
+#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
+#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
+
+#endif /* __RADEON_SAREA_DEFINES__ */
+
+typedef struct {
+ unsigned int red;
+ unsigned int green;
+ unsigned int blue;
+ unsigned int alpha;
+} radeon_color_regs_t;
+
+typedef struct {
+ /* Context state */
+ unsigned int pp_misc; /* 0x1c14 */
+ unsigned int pp_fog_color;
+ unsigned int re_solid_color;
+ unsigned int rb3d_blendcntl;
+ unsigned int rb3d_depthoffset;
+ unsigned int rb3d_depthpitch;
+ unsigned int rb3d_zstencilcntl;
+
+ unsigned int pp_cntl; /* 0x1c38 */
+ unsigned int rb3d_cntl;
+ unsigned int rb3d_coloroffset;
+ unsigned int re_width_height;
+ unsigned int rb3d_colorpitch;
+ unsigned int se_cntl;
+
+ /* Vertex format state */
+ unsigned int se_coord_fmt; /* 0x1c50 */
+
+ /* Line state */
+ unsigned int re_line_pattern; /* 0x1cd0 */
+ unsigned int re_line_state;
+
+ unsigned int se_line_width; /* 0x1db8 */
+
+ /* Bumpmap state */
+ unsigned int pp_lum_matrix; /* 0x1d00 */
+
+ unsigned int pp_rot_matrix_0; /* 0x1d58 */
+ unsigned int pp_rot_matrix_1;
+
+ /* Mask state */
+ unsigned int rb3d_stencilrefmask; /* 0x1d7c */
+ unsigned int rb3d_ropcntl;
+ unsigned int rb3d_planemask;
+
+ /* Viewport state */
+ unsigned int se_vport_xscale; /* 0x1d98 */
+ unsigned int se_vport_xoffset;
+ unsigned int se_vport_yscale;
+ unsigned int se_vport_yoffset;
+ unsigned int se_vport_zscale;
+ unsigned int se_vport_zoffset;
+
+ /* Setup state */
+ unsigned int se_cntl_status; /* 0x2140 */
+
+ /* Misc state */
+ unsigned int re_top_left; /* 0x26c0 */
+ unsigned int re_misc;
+} drm_radeon_context_regs_t;
+
+typedef struct {
+ /* Zbias state */
+ unsigned int se_zbias_factor; /* 0x1dac */
+ unsigned int se_zbias_constant;
+} drm_radeon_context2_regs_t;
+
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int pp_txfilter;
+ unsigned int pp_txformat;
+ unsigned int pp_txoffset;
+ unsigned int pp_txcblend;
+ unsigned int pp_txablend;
+ unsigned int pp_tfactor;
+ unsigned int pp_border_color;
+} drm_radeon_texture_regs_t;
+
+typedef struct {
+ unsigned int start;
+ unsigned int finish;
+ unsigned int prim:8;
+ unsigned int stateidx:8;
+ unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
+ unsigned int vc_format; /* vertex format */
+} drm_radeon_prim_t;
+
+typedef struct {
+ drm_radeon_context_regs_t context;
+ drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
+ drm_radeon_context2_regs_t context2;
+ unsigned int dirty;
+} drm_radeon_state_t;
+
+typedef struct {
+ /* The channel for communication of state information to the
+ * kernel on firing a vertex buffer with either of the
+ * obsoleted vertex/index ioctls.
+ */
+ drm_radeon_context_regs_t context_state;
+ drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+ unsigned int last_clear;
+
+ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+ 1];
+ unsigned int tex_age[RADEON_NR_TEX_HEAPS];
+ int ctx_owner;
+ int pfState; /* number of 3d windows (0,1,2ormore) */
+ int pfCurrentPage; /* which buffer is being displayed? */
+ int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
+} drm_radeon_sarea_t;
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmRadeon.h)
+ *
+ * KW: actually it's illegal to change any of this (backwards compatibility).
+ */
+
+/* Radeon specific ioctls
+ * The device specific ioctl range is 0x40 to 0x79.
+ */
+#define DRM_RADEON_CP_INIT 0x00
+#define DRM_RADEON_CP_START 0x01
+#define DRM_RADEON_CP_STOP 0x02
+#define DRM_RADEON_CP_RESET 0x03
+#define DRM_RADEON_CP_IDLE 0x04
+#define DRM_RADEON_RESET 0x05
+#define DRM_RADEON_FULLSCREEN 0x06
+#define DRM_RADEON_SWAP 0x07
+#define DRM_RADEON_CLEAR 0x08
+#define DRM_RADEON_VERTEX 0x09
+#define DRM_RADEON_INDICES 0x0A
+#define DRM_RADEON_NOT_USED
+#define DRM_RADEON_STIPPLE 0x0C
+#define DRM_RADEON_INDIRECT 0x0D
+#define DRM_RADEON_TEXTURE 0x0E
+#define DRM_RADEON_VERTEX2 0x0F
+#define DRM_RADEON_CMDBUF 0x10
+#define DRM_RADEON_GETPARAM 0x11
+#define DRM_RADEON_FLIP 0x12
+#define DRM_RADEON_ALLOC 0x13
+#define DRM_RADEON_FREE 0x14
+#define DRM_RADEON_INIT_HEAP 0x15
+#define DRM_RADEON_IRQ_EMIT 0x16
+#define DRM_RADEON_IRQ_WAIT 0x17
+#define DRM_RADEON_CP_RESUME 0x18
+#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
+
+#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
+#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
+#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
+#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
+#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
+#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
+#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
+#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
+#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
+#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
+#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
+
+typedef struct drm_radeon_init {
+ enum {
+ RADEON_INIT_CP = 0x01,
+ RADEON_CLEANUP_CP = 0x02,
+ RADEON_INIT_R200_CP = 0x03,
+ RADEON_INIT_R300_CP = 0x04
+ } func;
+ unsigned long sarea_priv_offset;
+ int is_pci;
+ int cp_mode;
+ int gart_size;
+ int ring_size;
+ int usec_timeout;
+
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long gart_textures_offset;
+} drm_radeon_init_t;
+
+typedef struct drm_radeon_cp_stop {
+ int flush;
+ int idle;
+} drm_radeon_cp_stop_t;
+
+typedef struct drm_radeon_fullscreen {
+ enum {
+ RADEON_INIT_FULLSCREEN = 0x01,
+ RADEON_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_radeon_fullscreen_t;
+
+#define CLEAR_X1 0
+#define CLEAR_Y1 1
+#define CLEAR_X2 2
+#define CLEAR_Y2 3
+#define CLEAR_DEPTH 4
+
+typedef union drm_radeon_clear_rect {
+ float f[5];
+ unsigned int ui[5];
+} drm_radeon_clear_rect_t;
+
+typedef struct drm_radeon_clear {
+ unsigned int flags;
+ unsigned int clear_color;
+ unsigned int clear_depth;
+ unsigned int color_mask;
+ unsigned int depth_mask; /* misnamed field: should be stencil */
+ drm_radeon_clear_rect_t __user *depth_boxes;
+} drm_radeon_clear_t;
+
+typedef struct drm_radeon_vertex {
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
+} drm_radeon_vertex_t;
+
+typedef struct drm_radeon_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_radeon_indices_t;
+
+/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
+ * - allows multiple primitives and state changes in a single ioctl
+ * - supports driver change to emit native primitives
+ */
+typedef struct drm_radeon_vertex2 {
+ int idx; /* Index of vertex buffer */
+ int discard; /* Client finished with buffer? */
+ int nr_states;
+ drm_radeon_state_t __user *state;
+ int nr_prims;
+ drm_radeon_prim_t __user *prim;
+} drm_radeon_vertex2_t;
+
+/* v1.3 - obsoletes drm_radeon_vertex2
+ * - allows arbitarily large cliprect list
+ * - allows updating of tcl packet, vector and scalar state
+ * - allows memory-efficient description of state updates
+ * - allows state to be emitted without a primitive
+ * (for clears, ctx switches)
+ * - allows more than one dma buffer to be referenced per ioctl
+ * - supports tcl driver
+ * - may be extended in future versions with new cmd types, packets
+ */
+typedef struct drm_radeon_cmd_buffer {
+ int bufsz;
+ char __user *buf;
+ int nbox;
+ struct drm_clip_rect __user *boxes;
+} drm_radeon_cmd_buffer_t;
+
+typedef struct drm_radeon_tex_image {
+ unsigned int x, y; /* Blit coordinates */
+ unsigned int width, height;
+ const void __user *data;
+} drm_radeon_tex_image_t;
+
+typedef struct drm_radeon_texture {
+ unsigned int offset;
+ int pitch;
+ int format;
+ int width; /* Texture image coordinates */
+ int height;
+ drm_radeon_tex_image_t __user *image;
+} drm_radeon_texture_t;
+
+typedef struct drm_radeon_stipple {
+ unsigned int __user *mask;
+} drm_radeon_stipple_t;
+
+typedef struct drm_radeon_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_radeon_indirect_t;
+
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
+/* 1.3: An ioctl to get parameters that aren't available to the 3d
+ * client any other way.
+ */
+#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
+#define RADEON_PARAM_LAST_FRAME 2
+#define RADEON_PARAM_LAST_DISPATCH 3
+#define RADEON_PARAM_LAST_CLEAR 4
+/* Added with DRM version 1.6. */
+#define RADEON_PARAM_IRQ_NR 5
+#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
+/* Added with DRM version 1.8. */
+#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
+#define RADEON_PARAM_STATUS_HANDLE 8
+#define RADEON_PARAM_SAREA_HANDLE 9
+#define RADEON_PARAM_GART_TEX_HANDLE 10
+#define RADEON_PARAM_SCRATCH_OFFSET 11
+#define RADEON_PARAM_CARD_TYPE 12
+#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
+#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
+
+typedef struct drm_radeon_getparam {
+ int param;
+ void __user *value;
+} drm_radeon_getparam_t;
+
+/* 1.6: Set up a memory manager for regions of shared memory:
+ */
+#define RADEON_MEM_REGION_GART 1
+#define RADEON_MEM_REGION_FB 2
+
+typedef struct drm_radeon_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int __user *region_offset; /* offset from start of fb or GART */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+ int region;
+ int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_radeon_mem_init_heap_t;
+
+/* 1.6: Userspace can request & wait on irq's:
+ */
+typedef struct drm_radeon_irq_emit {
+ int __user *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+ int irq_seq;
+} drm_radeon_irq_wait_t;
+
+/* 1.10: Clients tell the DRM where they think the framebuffer is located in
+ * the card's address space, via a new generic ioctl to set parameters
+ */
+
+typedef struct drm_radeon_setparam {
+ unsigned int param;
+ int64_t value;
+} drm_radeon_setparam_t;
+
+#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
+#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
+#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
+#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
+#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
+
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
+
+#endif
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h
new file mode 100644
index 0000000..8a576ef
--- /dev/null
+++ b/include/drm/savage_drm.h
@@ -0,0 +1,210 @@
+/* savage_drm.h -- Public header for the savage driver
+ *
+ * Copyright 2004 Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRM_H__
+#define __SAVAGE_DRM_H__
+
+#ifndef __SAVAGE_SAREA_DEFINES__
+#define __SAVAGE_SAREA_DEFINES__
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define SAVAGE_CARD_HEAP 0
+#define SAVAGE_AGP_HEAP 1
+#define SAVAGE_NR_TEX_HEAPS 2
+#define SAVAGE_NR_TEX_REGIONS 16
+#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
+
+#endif /* __SAVAGE_SAREA_DEFINES__ */
+
+typedef struct _drm_savage_sarea {
+ /* LRU lists for texture memory in agp space and on the card.
+ */
+ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
+ 1];
+ unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
+
+ /* Mechanism to validate card state.
+ */
+ int ctxOwner;
+} drm_savage_sarea_t, *drm_savage_sarea_ptr;
+
+/* Savage-specific ioctls
+ */
+#define DRM_SAVAGE_BCI_INIT 0x00
+#define DRM_SAVAGE_BCI_CMDBUF 0x01
+#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
+#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
+
+#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+
+#define SAVAGE_DMA_PCI 1
+#define SAVAGE_DMA_AGP 3
+typedef struct drm_savage_init {
+ enum {
+ SAVAGE_INIT_BCI = 1,
+ SAVAGE_CLEANUP_BCI = 2
+ } func;
+ unsigned int sarea_priv_offset;
+
+ /* some parameters */
+ unsigned int cob_size;
+ unsigned int bci_threshold_lo, bci_threshold_hi;
+ unsigned int dma_type;
+
+ /* frame buffer layout */
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+
+ /* local textures */
+ unsigned int texture_offset;
+ unsigned int texture_size;
+
+ /* physical locations of non-permanent maps */
+ unsigned long status_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+ unsigned long cmd_dma_offset;
+} drm_savage_init_t;
+
+typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
+typedef struct drm_savage_cmdbuf {
+ /* command buffer in client's address space */
+ drm_savage_cmd_header_t __user *cmd_addr;
+ unsigned int size; /* size of the command buffer in 64bit units */
+
+ unsigned int dma_idx; /* DMA buffer index to use */
+ int discard; /* discard DMA buffer when done */
+ /* vertex buffer in client's address space */
+ unsigned int __user *vb_addr;
+ unsigned int vb_size; /* size of client vertex buffer in bytes */
+ unsigned int vb_stride; /* stride of vertices in 32bit words */
+ /* boxes in client's address space */
+ struct drm_clip_rect __user *box_addr;
+ unsigned int nbox; /* number of clipping boxes */
+} drm_savage_cmdbuf_t;
+
+#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
+#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
+#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
+typedef struct drm_savage_event {
+ unsigned int count;
+ unsigned int flags;
+} drm_savage_event_emit_t, drm_savage_event_wait_t;
+
+/* Commands for the cmdbuf ioctl
+ */
+#define SAVAGE_CMD_STATE 0 /* a range of state registers */
+#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
+#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
+#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
+#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
+#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
+#define SAVAGE_CMD_SWAP 6 /* swap buffers */
+
+/* Primitive types
+*/
+#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
+#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
+#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
+#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
+ * shading on s3d */
+
+/* Skip flags (vertex format)
+ */
+#define SAVAGE_SKIP_Z 0x01
+#define SAVAGE_SKIP_W 0x02
+#define SAVAGE_SKIP_C0 0x04
+#define SAVAGE_SKIP_C1 0x08
+#define SAVAGE_SKIP_S0 0x10
+#define SAVAGE_SKIP_T0 0x20
+#define SAVAGE_SKIP_ST0 0x30
+#define SAVAGE_SKIP_S1 0x40
+#define SAVAGE_SKIP_T1 0x80
+#define SAVAGE_SKIP_ST1 0xc0
+#define SAVAGE_SKIP_ALL_S3D 0x3f
+#define SAVAGE_SKIP_ALL_S4 0xff
+
+/* Buffer names for clear command
+ */
+#define SAVAGE_FRONT 0x1
+#define SAVAGE_BACK 0x2
+#define SAVAGE_DEPTH 0x4
+
+/* 64-bit command header
+ */
+union drm_savage_cmd_header {
+ struct {
+ unsigned char cmd; /* command */
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned short pad2;
+ unsigned short pad3;
+ } cmd; /* generic */
+ struct {
+ unsigned char cmd;
+ unsigned char global; /* need idle engine? */
+ unsigned short count; /* number of consecutive registers */
+ unsigned short start; /* first register */
+ unsigned short pad3;
+ } state; /* SAVAGE_CMD_STATE */
+ struct {
+ unsigned char cmd;
+ unsigned char prim; /* primitive type */
+ unsigned short skip; /* vertex format (skip flags) */
+ unsigned short count; /* number of vertices */
+ unsigned short start; /* first vertex in DMA/vertex buffer */
+ } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
+ struct {
+ unsigned char cmd;
+ unsigned char prim;
+ unsigned short skip;
+ unsigned short count; /* number of indices that follow */
+ unsigned short pad3;
+ } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
+ struct {
+ unsigned char cmd;
+ unsigned char pad0;
+ unsigned short pad1;
+ unsigned int flags;
+ } clear0; /* SAVAGE_CMD_CLEAR */
+ struct {
+ unsigned int mask;
+ unsigned int value;
+ } clear1; /* SAVAGE_CMD_CLEAR data */
+};
+
+#endif
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
new file mode 100644
index 0000000..30f7b38
--- /dev/null
+++ b/include/drm/sis_drm.h
@@ -0,0 +1,67 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 2005 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __SIS_DRM_H__
+#define __SIS_DRM_H__
+
+/* SiS specific ioctls */
+#define NOT_USED_0_3
+#define DRM_SIS_FB_ALLOC 0x04
+#define DRM_SIS_FB_FREE 0x05
+#define NOT_USED_6_12
+#define DRM_SIS_AGP_INIT 0x13
+#define DRM_SIS_AGP_ALLOC 0x14
+#define DRM_SIS_AGP_FREE 0x15
+#define DRM_SIS_FB_INIT 0x16
+
+#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
+#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
+#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
+/*
+#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
+#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
+#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
+*/
+
+typedef struct {
+ int context;
+ unsigned int offset;
+ unsigned int size;
+ unsigned long free;
+} drm_sis_mem_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_agp_t;
+
+typedef struct {
+ unsigned int offset, size;
+} drm_sis_fb_t;
+
+#endif /* __SIS_DRM_H__ */
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
new file mode 100644
index 0000000..a3b5c10
--- /dev/null
+++ b/include/drm/via_drm.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_DRM_H_
+#define _VIA_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _VIA_DEFINES_
+#define _VIA_DEFINES_
+
+#ifndef __KERNEL__
+#include "via_drmclient.h"
+#endif
+
+#define VIA_NR_SAREA_CLIPRECTS 8
+#define VIA_NR_XVMC_PORTS 10
+#define VIA_NR_XVMC_LOCKS 5
+#define VIA_MAX_CACHELINE_SIZE 64
+#define XVMCLOCKPTR(saPriv,lockNo) \
+ ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+ (VIA_MAX_CACHELINE_SIZE - 1)) & \
+ ~(VIA_MAX_CACHELINE_SIZE - 1)) + \
+ VIA_MAX_CACHELINE_SIZE*(lockNo)))
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define VIA_NR_TEX_REGIONS 64
+#define VIA_LOG_MIN_TEX_REGION_SIZE 16
+#endif
+
+#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
+#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
+#define VIA_UPLOAD_CTX 0x4
+#define VIA_UPLOAD_BUFFERS 0x8
+#define VIA_UPLOAD_TEX0 0x10
+#define VIA_UPLOAD_TEX1 0x20
+#define VIA_UPLOAD_CLIPRECTS 0x40
+#define VIA_UPLOAD_ALL 0xff
+
+/* VIA specific ioctls */
+#define DRM_VIA_ALLOCMEM 0x00
+#define DRM_VIA_FREEMEM 0x01
+#define DRM_VIA_AGP_INIT 0x02
+#define DRM_VIA_FB_INIT 0x03
+#define DRM_VIA_MAP_INIT 0x04
+#define DRM_VIA_DEC_FUTEX 0x05
+#define NOT_USED
+#define DRM_VIA_DMA_INIT 0x07
+#define DRM_VIA_CMDBUFFER 0x08
+#define DRM_VIA_FLUSH 0x09
+#define DRM_VIA_PCICMD 0x0a
+#define DRM_VIA_CMDBUF_SIZE 0x0b
+#define NOT_USED
+#define DRM_VIA_WAIT_IRQ 0x0d
+#define DRM_VIA_DMA_BLIT 0x0e
+#define DRM_VIA_BLIT_SYNC 0x0f
+
+#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
+#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
+#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
+#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
+#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
+#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
+#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
+#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
+#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
+ drm_via_cmdbuf_size_t)
+#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+#define VIA_TEX_SETUP_SIZE 8
+
+/* Flags for clear ioctl
+ */
+#define VIA_FRONT 0x1
+#define VIA_BACK 0x2
+#define VIA_DEPTH 0x4
+#define VIA_STENCIL 0x8
+#define VIA_MEM_VIDEO 0 /* matches drm constant */
+#define VIA_MEM_AGP 1 /* matches drm constant */
+#define VIA_MEM_SYSTEM 2
+#define VIA_MEM_MIXED 3
+#define VIA_MEM_UNKNOWN 4
+
+typedef struct {
+ uint32_t offset;
+ uint32_t size;
+} drm_via_agp_t;
+
+typedef struct {
+ uint32_t offset;
+ uint32_t size;
+} drm_via_fb_t;
+
+typedef struct {
+ uint32_t context;
+ uint32_t type;
+ uint32_t size;
+ unsigned long index;
+ unsigned long offset;
+} drm_via_mem_t;
+
+typedef struct _drm_via_init {
+ enum {
+ VIA_INIT_MAP = 0x01,
+ VIA_CLEANUP_MAP = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long agpAddr;
+} drm_via_init_t;
+
+typedef struct _drm_via_futex {
+ enum {
+ VIA_FUTEX_WAIT = 0x00,
+ VIA_FUTEX_WAKE = 0X01
+ } func;
+ uint32_t ms;
+ uint32_t lock;
+ uint32_t val;
+} drm_via_futex_t;
+
+typedef struct _drm_via_dma_init {
+ enum {
+ VIA_INIT_DMA = 0x01,
+ VIA_CLEANUP_DMA = 0x02,
+ VIA_DMA_INITIALIZED = 0x03
+ } func;
+
+ unsigned long offset;
+ unsigned long size;
+ unsigned long reg_pause_addr;
+} drm_via_dma_init_t;
+
+typedef struct _drm_via_cmdbuffer {
+ char __user *buf;
+ unsigned long size;
+} drm_via_cmdbuffer_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_via_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char inUse; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_via_tex_region_t;
+
+typedef struct _drm_via_sarea {
+ unsigned int dirty;
+ unsigned int nbox;
+ struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
+ drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
+ int texAge; /* last time texture was uploaded */
+ int ctxOwner; /* last context to upload state */
+ int vertexPrim;
+
+ /*
+ * Below is for XvMC.
+ * We want the lock integers alone on, and aligned to, a cache line.
+ * Therefore this somewhat strange construct.
+ */
+
+ char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
+
+ unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
+ unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
+
+ /* Used by the 3d driver only at this point, for pageflipping:
+ */
+ unsigned int pfCurrentOffset;
+} drm_via_sarea_t;
+
+typedef struct _drm_via_cmdbuf_size {
+ enum {
+ VIA_CMDBUF_SPACE = 0x01,
+ VIA_CMDBUF_LAG = 0x02
+ } func;
+ int wait;
+ uint32_t size;
+} drm_via_cmdbuf_size_t;
+
+typedef enum {
+ VIA_IRQ_ABSOLUTE = 0x0,
+ VIA_IRQ_RELATIVE = 0x1,
+ VIA_IRQ_SIGNAL = 0x10000000,
+ VIA_IRQ_FORCE_SEQUENCE = 0x20000000
+} via_irq_seq_type_t;
+
+#define VIA_IRQ_FLAGS_MASK 0xF0000000
+
+enum drm_via_irqs {
+ drm_via_irq_hqv0 = 0,
+ drm_via_irq_hqv1,
+ drm_via_irq_dma0_dd,
+ drm_via_irq_dma0_td,
+ drm_via_irq_dma1_dd,
+ drm_via_irq_dma1_td,
+ drm_via_irq_num
+};
+
+struct drm_via_wait_irq_request {
+ unsigned irq;
+ via_irq_seq_type_t type;
+ uint32_t sequence;
+ uint32_t signal;
+};
+
+typedef union drm_via_irqwait {
+ struct drm_via_wait_irq_request request;
+ struct drm_wait_vblank_reply reply;
+} drm_via_irqwait_t;
+
+typedef struct drm_via_blitsync {
+ uint32_t sync_handle;
+ unsigned engine;
+} drm_via_blitsync_t;
+
+/* - * Below,"flags" is currently unused but will be used for possible future
+ * extensions like kernel space bounce buffers for bad alignments and
+ * blit engine busy-wait polling for better latency in the absence of
+ * interrupts.
+ */
+
+typedef struct drm_via_dmablit {
+ uint32_t num_lines;
+ uint32_t line_length;
+
+ uint32_t fb_addr;
+ uint32_t fb_stride;
+
+ unsigned char *mem_addr;
+ uint32_t mem_stride;
+
+ uint32_t flags;
+ int to_fb;
+
+ drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
+
+#endif /* _VIA_DRM_H_ */