aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-30 14:29:54 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-30 14:29:54 -0400
commite823aff2d6eb43083abcc75a32ddfb167c324089 (patch)
tree60b67f3f2f088d6741a5af8488b4a565fb4c4cfe /drivers
parent77ed74da26f50fa28471571ee7a2251b77526d84 (diff)
parent3e14a2867d8ccf555fe6e318eac0f8200399fe1c (diff)
Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (36 commits) drm: Use register writes instead of BITBLT_MULTI packets for buffer swap blits drm: use radeon specific names for radeon flags drm: add device/vendor id to drm_device_t for compat with FreeBSD drivers drm: allow multiple addMaps with the same 32-bit map offsset. drm: fd.o Bug #7595: Avoid u32 overflows in radeon_check_and_fixup_offset(). drm: Fix hashtab implementation leaking illegal error codes to user space. drm: domain changes broke ppc r200 drm: fixup setversion return codes.. drm: fixup i915 error codes drm: realign sosme radeon code with drm git tree drm: realign via driver with drm git tree drm: remove hash tables on drm exit drm: cleanups drm: i810_dma.c: fix pointer arithmetic for 64-bit target drm: avoid kernel oops in some error paths calling drm_lastclose drm: allow detection of new VIA chipsets drm: fix i965 build bug drm: remove FALSE/TRUE that snuck in with simple memory manager changes. drm: Add support for Intel i965G chipsets. drm: add better explanation for i830/i915 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/drm/Kconfig9
-rw-r--r--drivers/char/drm/Makefile6
-rw-r--r--drivers/char/drm/drmP.h68
-rw-r--r--drivers/char/drm/drm_auth.c64
-rw-r--r--drivers/char/drm/drm_bufs.c74
-rw-r--r--drivers/char/drm/drm_drv.c12
-rw-r--r--drivers/char/drm/drm_fops.c10
-rw-r--r--drivers/char/drm/drm_hashtab.c190
-rw-r--r--drivers/char/drm/drm_hashtab.h67
-rw-r--r--drivers/char/drm/drm_ioc32.c2
-rw-r--r--drivers/char/drm/drm_ioctl.c34
-rw-r--r--drivers/char/drm/drm_irq.c12
-rw-r--r--drivers/char/drm/drm_mm.c201
-rw-r--r--drivers/char/drm/drm_pciids.h187
-rw-r--r--drivers/char/drm/drm_proc.c2
-rw-r--r--drivers/char/drm/drm_sman.c352
-rw-r--r--drivers/char/drm/drm_sman.h176
-rw-r--r--drivers/char/drm/drm_stub.c12
-rw-r--r--drivers/char/drm/drm_vm.c45
-rw-r--r--drivers/char/drm/i810_dma.c10
-rw-r--r--drivers/char/drm/i830_dma.c4
-rw-r--r--drivers/char/drm/i915_dma.c45
-rw-r--r--drivers/char/drm/i915_drm.h6
-rw-r--r--drivers/char/drm/i915_drv.h10
-rw-r--r--drivers/char/drm/i915_irq.c16
-rw-r--r--drivers/char/drm/radeon_cp.c72
-rw-r--r--drivers/char/drm/radeon_drv.c2
-rw-r--r--drivers/char/drm/radeon_drv.h36
-rw-r--r--drivers/char/drm/radeon_state.c48
-rw-r--r--drivers/char/drm/sis_drv.c39
-rw-r--r--drivers/char/drm/sis_drv.h34
-rw-r--r--drivers/char/drm/sis_ds.c299
-rw-r--r--drivers/char/drm/sis_ds.h146
-rw-r--r--drivers/char/drm/sis_mm.c504
-rw-r--r--drivers/char/drm/via_dmablit.c68
-rw-r--r--drivers/char/drm/via_drm.h8
-rw-r--r--drivers/char/drm/via_drv.c3
-rw-r--r--drivers/char/drm/via_drv.h16
-rw-r--r--drivers/char/drm/via_ds.c273
-rw-r--r--drivers/char/drm/via_ds.h104
-rw-r--r--drivers/char/drm/via_map.c9
-rw-r--r--drivers/char/drm/via_mm.c375
42 files changed, 1878 insertions, 1772 deletions
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 5278c388d3e7..ef833a1c27eb 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -60,7 +60,9 @@ config DRM_I830
60 Choose this option if you have a system that has Intel 830M, 845G, 60 Choose this option if you have a system that has Intel 830M, 845G,
61 852GM, 855GM or 865G integrated graphics. If M is selected, the 61 852GM, 855GM or 865G integrated graphics. If M is selected, the
62 module will be called i830. AGP support is required for this driver 62 module will be called i830. AGP support is required for this driver
63 to work. This driver will eventually be replaced by the i915 one. 63 to work. This driver is used by the older X releases X.org 6.7 and
64 XFree86 4.3. If unsure, build this and i915 as modules and the X server
65 will load the correct one.
64 66
65config DRM_I915 67config DRM_I915
66 tristate "i915 driver" 68 tristate "i915 driver"
@@ -68,8 +70,9 @@ config DRM_I915
68 Choose this option if you have a system that has Intel 830M, 845G, 70 Choose this option if you have a system that has Intel 830M, 845G,
69 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 71 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
70 module will be called i915. AGP support is required for this driver 72 module will be called i915. AGP support is required for this driver
71 to work. This driver will eventually replace the I830 driver, when 73 to work. This driver is used by the Intel driver in X.org 6.8 and
72 later release of X start to use the new DDX and DRI. 74 XFree86 4.4 and above. If unsure, build this and i830 as modules and
75 the X server will load the correct one.
73 76
74endchoice 77endchoice
75 78
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index 9d180c42816c..3ad0f648c6b2 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -6,7 +6,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
6 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 6 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o 9 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
10 10
11tdfx-objs := tdfx_drv.o 11tdfx-objs := tdfx_drv.o
12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o 12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
@@ -16,9 +16,9 @@ i830-objs := i830_drv.o i830_dma.o i830_irq.o
16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o 17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
18ffb-objs := ffb_drv.o ffb_context.o 18ffb-objs := ffb_drv.o ffb_context.o
19sis-objs := sis_drv.o sis_ds.o sis_mm.o 19sis-objs := sis_drv.o sis_mm.o
20savage-objs := savage_drv.o savage_bci.o savage_state.o 20savage-objs := savage_drv.o savage_bci.o savage_state.o
21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o 21via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
22 22
23ifeq ($(CONFIG_COMPAT),y) 23ifeq ($(CONFIG_COMPAT),y)
24drm-objs += drm_ioc32.o 24drm-objs += drm_ioc32.o
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index d2a56182bc35..7690a59ace04 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -79,6 +79,7 @@
79#define __OS_HAS_MTRR (defined(CONFIG_MTRR)) 79#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
80 80
81#include "drm_os_linux.h" 81#include "drm_os_linux.h"
82#include "drm_hashtab.h"
82 83
83/***********************************************************************/ 84/***********************************************************************/
84/** \name DRM template customization defaults */ 85/** \name DRM template customization defaults */
@@ -104,7 +105,7 @@
104#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then 105#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
105 also include looping detection. */ 106 also include looping detection. */
106 107
107#define DRM_HASH_SIZE 16 /**< Size of key hash table. Must be power of 2. */ 108#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
108#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ 109#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
109#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ 110#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
110#define DRM_LOOPING_LIMIT 5000000 111#define DRM_LOOPING_LIMIT 5000000
@@ -134,19 +135,12 @@
134#define DRM_MEM_CTXBITMAP 18 135#define DRM_MEM_CTXBITMAP 18
135#define DRM_MEM_STUB 19 136#define DRM_MEM_STUB 19
136#define DRM_MEM_SGLISTS 20 137#define DRM_MEM_SGLISTS 20
137#define DRM_MEM_CTXLIST 21 138#define DRM_MEM_CTXLIST 21
139#define DRM_MEM_MM 22
140#define DRM_MEM_HASHTAB 23
138 141
139#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) 142#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
140 143#define DRM_MAP_HASH_OFFSET 0x10000000
141/*@}*/
142
143/***********************************************************************/
144/** \name Backward compatibility section */
145/*@{*/
146
147#define DRM_RPR_ARG(vma) vma,
148
149#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
150 144
151/*@}*/ 145/*@}*/
152 146
@@ -211,8 +205,6 @@
211/*@{*/ 205/*@{*/
212 206
213#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) 207#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
214#define DRM_MIN(a,b) min(a,b)
215#define DRM_MAX(a,b) max(a,b)
216 208
217#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) 209#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
218#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) 210#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
@@ -286,7 +278,8 @@ typedef struct drm_devstate {
286} drm_devstate_t; 278} drm_devstate_t;
287 279
288typedef struct drm_magic_entry { 280typedef struct drm_magic_entry {
289 drm_magic_t magic; 281 drm_hash_item_t hash_item;
282 struct list_head head;
290 struct drm_file *priv; 283 struct drm_file *priv;
291 struct drm_magic_entry *next; 284 struct drm_magic_entry *next;
292} drm_magic_entry_t; 285} drm_magic_entry_t;
@@ -493,6 +486,7 @@ typedef struct drm_sigdata {
493 */ 486 */
494typedef struct drm_map_list { 487typedef struct drm_map_list {
495 struct list_head head; /**< list head */ 488 struct list_head head; /**< list head */
489 drm_hash_item_t hash;
496 drm_map_t *map; /**< mapping */ 490 drm_map_t *map; /**< mapping */
497 unsigned int user_token; 491 unsigned int user_token;
498} drm_map_list_t; 492} drm_map_list_t;
@@ -527,6 +521,22 @@ typedef struct ati_pcigart_info {
527 drm_local_map_t mapping; 521 drm_local_map_t mapping;
528} drm_ati_pcigart_info; 522} drm_ati_pcigart_info;
529 523
524/*
525 * Generic memory manager structs
526 */
527typedef struct drm_mm_node {
528 struct list_head fl_entry;
529 struct list_head ml_entry;
530 int free;
531 unsigned long start;
532 unsigned long size;
533 void *private;
534} drm_mm_node_t;
535
536typedef struct drm_mm {
537 drm_mm_node_t root_node;
538} drm_mm_t;
539
530/** 540/**
531 * DRM driver structure. This structure represent the common code for 541 * DRM driver structure. This structure represent the common code for
532 * a family of cards. There will one drm_device for each card present 542 * a family of cards. There will one drm_device for each card present
@@ -646,13 +656,15 @@ typedef struct drm_device {
646 /*@{ */ 656 /*@{ */
647 drm_file_t *file_first; /**< file list head */ 657 drm_file_t *file_first; /**< file list head */
648 drm_file_t *file_last; /**< file list tail */ 658 drm_file_t *file_last; /**< file list tail */
649 drm_magic_head_t magiclist[DRM_HASH_SIZE]; /**< magic hash table */ 659 drm_open_hash_t magiclist; /**< magic hash table */
660 struct list_head magicfree;
650 /*@} */ 661 /*@} */
651 662
652 /** \name Memory management */ 663 /** \name Memory management */
653 /*@{ */ 664 /*@{ */
654 drm_map_list_t *maplist; /**< Linked list of regions */ 665 drm_map_list_t *maplist; /**< Linked list of regions */
655 int map_count; /**< Number of mappable regions */ 666 int map_count; /**< Number of mappable regions */
667 drm_open_hash_t map_hash; /**< User token hash table for maps */
656 668
657 /** \name Context handle management */ 669 /** \name Context handle management */
658 /*@{ */ 670 /*@{ */
@@ -711,10 +723,8 @@ typedef struct drm_device {
711 drm_agp_head_t *agp; /**< AGP data */ 723 drm_agp_head_t *agp; /**< AGP data */
712 724
713 struct pci_dev *pdev; /**< PCI device structure */ 725 struct pci_dev *pdev; /**< PCI device structure */
714 int pci_domain; /**< PCI bus domain number */ 726 int pci_vendor; /**< PCI vendor id */
715 int pci_bus; /**< PCI bus number */ 727 int pci_device; /**< PCI device id */
716 int pci_slot; /**< PCI slot number */
717 int pci_func; /**< PCI function number */
718#ifdef __alpha__ 728#ifdef __alpha__
719 struct pci_controller *hose; 729 struct pci_controller *hose;
720#endif 730#endif
@@ -736,6 +746,12 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
736 return ((dev->driver->driver_features & feature) ? 1 : 0); 746 return ((dev->driver->driver_features & feature) ? 1 : 0);
737} 747}
738 748
749#ifdef __alpha__
750#define drm_get_pci_domain(dev) dev->hose->bus->number
751#else
752#define drm_get_pci_domain(dev) 0
753#endif
754
739#if __OS_HAS_AGP 755#if __OS_HAS_AGP
740static inline int drm_core_has_AGP(struct drm_device *dev) 756static inline int drm_core_has_AGP(struct drm_device *dev)
741{ 757{
@@ -1011,6 +1027,18 @@ extern struct class_device *drm_sysfs_device_add(struct class *cs,
1011 drm_head_t *head); 1027 drm_head_t *head);
1012extern void drm_sysfs_device_remove(struct class_device *class_dev); 1028extern void drm_sysfs_device_remove(struct class_device *class_dev);
1013 1029
1030/*
1031 * Basic memory manager support (drm_mm.c)
1032 */
1033extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
1034 unsigned long size,
1035 unsigned alignment);
1036extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
1037extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
1038 unsigned alignment, int best_match);
1039extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
1040extern void drm_mm_takedown(drm_mm_t *mm);
1041
1014/* Inline replacements for DRM_IOREMAP macros */ 1042/* Inline replacements for DRM_IOREMAP macros */
1015static __inline__ void drm_core_ioremap(struct drm_map *map, 1043static __inline__ void drm_core_ioremap(struct drm_map *map,
1016 struct drm_device *dev) 1044 struct drm_device *dev)
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index 2a37586a7ee8..c7b19d35bcd6 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -36,20 +36,6 @@
36#include "drmP.h" 36#include "drmP.h"
37 37
38/** 38/**
39 * Generate a hash key from a magic.
40 *
41 * \param magic magic.
42 * \return hash key.
43 *
44 * The key is the modulus of the hash table size, #DRM_HASH_SIZE, which must be
45 * a power of 2.
46 */
47static int drm_hash_magic(drm_magic_t magic)
48{
49 return magic & (DRM_HASH_SIZE - 1);
50}
51
52/**
53 * Find the file with the given magic number. 39 * Find the file with the given magic number.
54 * 40 *
55 * \param dev DRM device. 41 * \param dev DRM device.
@@ -63,14 +49,12 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
63{ 49{
64 drm_file_t *retval = NULL; 50 drm_file_t *retval = NULL;
65 drm_magic_entry_t *pt; 51 drm_magic_entry_t *pt;
66 int hash = drm_hash_magic(magic); 52 drm_hash_item_t *hash;
67 53
68 mutex_lock(&dev->struct_mutex); 54 mutex_lock(&dev->struct_mutex);
69 for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { 55 if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
70 if (pt->magic == magic) { 56 pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
71 retval = pt->priv; 57 retval = pt->priv;
72 break;
73 }
74 } 58 }
75 mutex_unlock(&dev->struct_mutex); 59 mutex_unlock(&dev->struct_mutex);
76 return retval; 60 return retval;
@@ -90,28 +74,20 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
90static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, 74static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
91 drm_magic_t magic) 75 drm_magic_t magic)
92{ 76{
93 int hash;
94 drm_magic_entry_t *entry; 77 drm_magic_entry_t *entry;
95 78
96 DRM_DEBUG("%d\n", magic); 79 DRM_DEBUG("%d\n", magic);
97 80
98 hash = drm_hash_magic(magic);
99 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); 81 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
100 if (!entry) 82 if (!entry)
101 return -ENOMEM; 83 return -ENOMEM;
102 memset(entry, 0, sizeof(*entry)); 84 memset(entry, 0, sizeof(*entry));
103 entry->magic = magic;
104 entry->priv = priv; 85 entry->priv = priv;
105 entry->next = NULL;
106 86
87 entry->hash_item.key = (unsigned long)magic;
107 mutex_lock(&dev->struct_mutex); 88 mutex_lock(&dev->struct_mutex);
108 if (dev->magiclist[hash].tail) { 89 drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
109 dev->magiclist[hash].tail->next = entry; 90 list_add_tail(&entry->head, &dev->magicfree);
110 dev->magiclist[hash].tail = entry;
111 } else {
112 dev->magiclist[hash].head = entry;
113 dev->magiclist[hash].tail = entry;
114 }
115 mutex_unlock(&dev->struct_mutex); 91 mutex_unlock(&dev->struct_mutex);
116 92
117 return 0; 93 return 0;
@@ -128,34 +104,24 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
128 */ 104 */
129static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) 105static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
130{ 106{
131 drm_magic_entry_t *prev = NULL;
132 drm_magic_entry_t *pt; 107 drm_magic_entry_t *pt;
133 int hash; 108 drm_hash_item_t *hash;
134 109
135 DRM_DEBUG("%d\n", magic); 110 DRM_DEBUG("%d\n", magic);
136 hash = drm_hash_magic(magic);
137 111
138 mutex_lock(&dev->struct_mutex); 112 mutex_lock(&dev->struct_mutex);
139 for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { 113 if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
140 if (pt->magic == magic) { 114 mutex_unlock(&dev->struct_mutex);
141 if (dev->magiclist[hash].head == pt) { 115 return -EINVAL;
142 dev->magiclist[hash].head = pt->next;
143 }
144 if (dev->magiclist[hash].tail == pt) {
145 dev->magiclist[hash].tail = prev;
146 }
147 if (prev) {
148 prev->next = pt->next;
149 }
150 mutex_unlock(&dev->struct_mutex);
151 return 0;
152 }
153 } 116 }
117 pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
118 drm_ht_remove_item(&dev->magiclist, hash);
119 list_del(&pt->head);
154 mutex_unlock(&dev->struct_mutex); 120 mutex_unlock(&dev->struct_mutex);
155 121
156 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 122 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
157 123
158 return -EINVAL; 124 return 0;
159} 125}
160 126
161/** 127/**
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 006b06d29727..029baea33b62 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -65,43 +65,29 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
65 return NULL; 65 return NULL;
66} 66}
67 67
68/* 68static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
69 * Used to allocate 32-bit handles for mappings. 69 unsigned long user_token, int hashed_handle)
70 */
71#define START_RANGE 0x10000000
72#define END_RANGE 0x40000000
73
74#ifdef _LP64
75static __inline__ unsigned int HandleID(unsigned long lhandle,
76 drm_device_t *dev)
77{ 70{
78 static unsigned int map32_handle = START_RANGE; 71 int use_hashed_handle;
79 unsigned int hash; 72#if (BITS_PER_LONG == 64)
80 73 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
81 if (lhandle & 0xffffffff00000000) { 74#elif (BITS_PER_LONG == 32)
82 hash = map32_handle; 75 use_hashed_handle = hashed_handle;
83 map32_handle += PAGE_SIZE; 76#else
84 if (map32_handle > END_RANGE) 77#error Unsupported long size. Neither 64 nor 32 bits.
85 map32_handle = START_RANGE; 78#endif
86 } else
87 hash = lhandle;
88
89 while (1) {
90 drm_map_list_t *_entry;
91 list_for_each_entry(_entry, &dev->maplist->head, head) {
92 if (_entry->user_token == hash)
93 break;
94 }
95 if (&_entry->head == &dev->maplist->head)
96 return hash;
97 79
98 hash += PAGE_SIZE; 80 if (!use_hashed_handle) {
99 map32_handle += PAGE_SIZE; 81 int ret;
82 hash->key = user_token;
83 ret = drm_ht_insert_item(&dev->map_hash, hash);
84 if (ret != -EINVAL)
85 return ret;
100 } 86 }
87 return drm_ht_just_insert_please(&dev->map_hash, hash,
88 user_token, 32 - PAGE_SHIFT - 3,
89 PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
101} 90}
102#else
103# define HandleID(x,dev) (unsigned int)(x)
104#endif
105 91
106/** 92/**
107 * Ioctl to specify a range of memory that is available for mapping by a non-root process. 93 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
@@ -123,6 +109,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
123 drm_map_t *map; 109 drm_map_t *map;
124 drm_map_list_t *list; 110 drm_map_list_t *list;
125 drm_dma_handle_t *dmah; 111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
113 int ret;
126 114
127 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS); 115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
128 if (!map) 116 if (!map)
@@ -257,11 +245,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
257 245
258 mutex_lock(&dev->struct_mutex); 246 mutex_lock(&dev->struct_mutex);
259 list_add(&list->head, &dev->maplist->head); 247 list_add(&list->head, &dev->maplist->head);
248
260 /* Assign a 32-bit handle */ 249 /* Assign a 32-bit handle */
261 /* We do it here so that dev->struct_mutex protects the increment */ 250 /* We do it here so that dev->struct_mutex protects the increment */
262 list->user_token = HandleID(map->type == _DRM_SHM 251 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
263 ? (unsigned long)map->handle 252 map->offset;
264 : map->offset, dev); 253 ret = drm_map_handle(dev, &list->hash, user_token, 0);
254 if (ret) {
255 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
256 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
257 mutex_unlock(&dev->struct_mutex);
258 return ret;
259 }
260
261 list->user_token = list->hash.key;
265 mutex_unlock(&dev->struct_mutex); 262 mutex_unlock(&dev->struct_mutex);
266 263
267 *maplist = list; 264 *maplist = list;
@@ -346,6 +343,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
346 343
347 if (r_list->map == map) { 344 if (r_list->map == map) {
348 list_del(list); 345 list_del(list);
346 drm_ht_remove_key(&dev->map_hash, r_list->user_token);
349 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 347 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
350 break; 348 break;
351 } 349 }
@@ -441,8 +439,10 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
441 return -EINVAL; 439 return -EINVAL;
442 } 440 }
443 441
444 if (!map) 442 if (!map) {
443 mutex_unlock(&dev->struct_mutex);
445 return -EINVAL; 444 return -EINVAL;
445 }
446 446
447 /* Register and framebuffer maps are permanent */ 447 /* Register and framebuffer maps are permanent */
448 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 448 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 3c0b882a8e72..b366c5b1bd16 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -118,7 +118,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
118 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, 118 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
119}; 119};
120 120
121#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) 121#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
122 122
123/** 123/**
124 * Take down the DRM device. 124 * Take down the DRM device.
@@ -155,12 +155,13 @@ int drm_lastclose(drm_device_t * dev)
155 del_timer(&dev->timer); 155 del_timer(&dev->timer);
156 156
157 /* Clear pid list */ 157 /* Clear pid list */
158 for (i = 0; i < DRM_HASH_SIZE; i++) { 158 if (dev->magicfree.next) {
159 for (pt = dev->magiclist[i].head; pt; pt = next) { 159 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
160 next = pt->next; 160 list_del(&pt->head);
161 drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
161 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); 162 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
162 } 163 }
163 dev->magiclist[i].head = dev->magiclist[i].tail = NULL; 164 drm_ht_remove(&dev->magiclist);
164 } 165 }
165 166
166 /* Clear AGP information */ 167 /* Clear AGP information */
@@ -299,6 +300,7 @@ static void drm_cleanup(drm_device_t * dev)
299 if (dev->maplist) { 300 if (dev->maplist) {
300 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); 301 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
301 dev->maplist = NULL; 302 dev->maplist = NULL;
303 drm_ht_remove(&dev->map_hash);
302 } 304 }
303 305
304 drm_ctxbitmap_cleanup(dev); 306 drm_ctxbitmap_cleanup(dev);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index b7f7951c4587..898f47dafec0 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -53,6 +53,8 @@ static int drm_setup(drm_device_t * dev)
53 return ret; 53 return ret;
54 } 54 }
55 55
56 dev->magicfree.next = NULL;
57
56 /* prebuild the SAREA */ 58 /* prebuild the SAREA */
57 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); 59 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
58 if (i != 0) 60 if (i != 0)
@@ -69,13 +71,11 @@ static int drm_setup(drm_device_t * dev)
69 return i; 71 return i;
70 } 72 }
71 73
72 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) 74 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
73 atomic_set(&dev->counts[i], 0); 75 atomic_set(&dev->counts[i], 0);
74 76
75 for (i = 0; i < DRM_HASH_SIZE; i++) { 77 drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
76 dev->magiclist[i].head = NULL; 78 INIT_LIST_HEAD(&dev->magicfree);
77 dev->magiclist[i].tail = NULL;
78 }
79 79
80 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST); 80 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
81 if (dev->ctxlist == NULL) 81 if (dev->ctxlist == NULL)
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
new file mode 100644
index 000000000000..a0b2d6802ae4
--- /dev/null
+++ b/drivers/char/drm/drm_hashtab.c
@@ -0,0 +1,190 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple open hash tab implementation.
30 *
31 * Authors:
32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 */
34
35#include "drmP.h"
36#include "drm_hashtab.h"
37#include <linux/hash.h>
38
39int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
40{
41 unsigned int i;
42
43 ht->size = 1 << order;
44 ht->order = order;
45 ht->fill = 0;
46 ht->table = vmalloc(ht->size*sizeof(*ht->table));
47 if (!ht->table) {
48 DRM_ERROR("Out of memory for hash table\n");
49 return -ENOMEM;
50 }
51 for (i=0; i< ht->size; ++i) {
52 INIT_HLIST_HEAD(&ht->table[i]);
53 }
54 return 0;
55}
56
57void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
58{
59 drm_hash_item_t *entry;
60 struct hlist_head *h_list;
61 struct hlist_node *list;
62 unsigned int hashed_key;
63 int count = 0;
64
65 hashed_key = hash_long(key, ht->order);
66 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
67 h_list = &ht->table[hashed_key];
68 hlist_for_each(list, h_list) {
69 entry = hlist_entry(list, drm_hash_item_t, head);
70 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
71 }
72}
73
74static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
75 unsigned long key)
76{
77 drm_hash_item_t *entry;
78 struct hlist_head *h_list;
79 struct hlist_node *list;
80 unsigned int hashed_key;
81
82 hashed_key = hash_long(key, ht->order);
83 h_list = &ht->table[hashed_key];
84 hlist_for_each(list, h_list) {
85 entry = hlist_entry(list, drm_hash_item_t, head);
86 if (entry->key == key)
87 return list;
88 if (entry->key > key)
89 break;
90 }
91 return NULL;
92}
93
94
95int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
96{
97 drm_hash_item_t *entry;
98 struct hlist_head *h_list;
99 struct hlist_node *list, *parent;
100 unsigned int hashed_key;
101 unsigned long key = item->key;
102
103 hashed_key = hash_long(key, ht->order);
104 h_list = &ht->table[hashed_key];
105 parent = NULL;
106 hlist_for_each(list, h_list) {
107 entry = hlist_entry(list, drm_hash_item_t, head);
108 if (entry->key == key)
109 return -EINVAL;
110 if (entry->key > key)
111 break;
112 parent = list;
113 }
114 if (parent) {
115 hlist_add_after(parent, &item->head);
116 } else {
117 hlist_add_head(&item->head, h_list);
118 }
119 return 0;
120}
121
122/*
123 * Just insert an item and return any "bits" bit key that hasn't been
124 * used before.
125 */
126int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
127 unsigned long seed, int bits, int shift,
128 unsigned long add)
129{
130 int ret;
131 unsigned long mask = (1 << bits) - 1;
132 unsigned long first, unshifted_key;
133
134 unshifted_key = hash_long(seed, bits);
135 first = unshifted_key;
136 do {
137 item->key = (unshifted_key << shift) + add;
138 ret = drm_ht_insert_item(ht, item);
139 if (ret)
140 unshifted_key = (unshifted_key + 1) & mask;
141 } while(ret && (unshifted_key != first));
142
143 if (ret) {
144 DRM_ERROR("Available key bit space exhausted\n");
145 return -EINVAL;
146 }
147 return 0;
148}
149
150int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
151 drm_hash_item_t **item)
152{
153 struct hlist_node *list;
154
155 list = drm_ht_find_key(ht, key);
156 if (!list)
157 return -EINVAL;
158
159 *item = hlist_entry(list, drm_hash_item_t, head);
160 return 0;
161}
162
163int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
164{
165 struct hlist_node *list;
166
167 list = drm_ht_find_key(ht, key);
168 if (list) {
169 hlist_del_init(list);
170 ht->fill--;
171 return 0;
172 }
173 return -EINVAL;
174}
175
176int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
177{
178 hlist_del_init(&item->head);
179 ht->fill--;
180 return 0;
181}
182
183void drm_ht_remove(drm_open_hash_t *ht)
184{
185 if (ht->table) {
186 vfree(ht->table);
187 ht->table = NULL;
188 }
189}
190
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
new file mode 100644
index 000000000000..40afec05bff8
--- /dev/null
+++ b/drivers/char/drm/drm_hashtab.h
@@ -0,0 +1,67 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple open hash tab implementation.
30 *
31 * Authors:
32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 */
34
35#ifndef DRM_HASHTAB_H
36#define DRM_HASHTAB_H
37
38#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
39
40typedef struct drm_hash_item{
41 struct hlist_node head;
42 unsigned long key;
43} drm_hash_item_t;
44
45typedef struct drm_open_hash{
46 unsigned int size;
47 unsigned int order;
48 unsigned int fill;
49 struct hlist_head *table;
50} drm_open_hash_t;
51
52
53extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order);
54extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item);
55extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
56 unsigned long seed, int bits, int shift,
57 unsigned long add);
58extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item);
59
60extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key);
61extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key);
62extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item);
63extern void drm_ht_remove(drm_open_hash_t *ht);
64
65
66#endif
67
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index e9e2db18952d..d4f874520082 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -1051,7 +1051,7 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1051 drm_ioctl_compat_t *fn; 1051 drm_ioctl_compat_t *fn;
1052 int ret; 1052 int ret;
1053 1053
1054 if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) 1054 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
1055 return -ENOTTY; 1055 return -ENOTTY;
1056 1056
1057 fn = drm_compat_ioctls[nr]; 1057 fn = drm_compat_ioctls[nr];
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 555f323b8a32..565895547d75 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -127,9 +127,10 @@ int drm_setunique(struct inode *inode, struct file *filp,
127 domain = bus >> 8; 127 domain = bus >> 8;
128 bus &= 0xff; 128 bus &= 0xff;
129 129
130 if ((domain != dev->pci_domain) || 130 if ((domain != drm_get_pci_domain(dev)) ||
131 (bus != dev->pci_bus) || 131 (bus != dev->pdev->bus->number) ||
132 (slot != dev->pci_slot) || (func != dev->pci_func)) 132 (slot != PCI_SLOT(dev->pdev->devfn)) ||
133 (func != PCI_FUNC(dev->pdev->devfn)))
133 return -EINVAL; 134 return -EINVAL;
134 135
135 return 0; 136 return 0;
@@ -140,15 +141,17 @@ static int drm_set_busid(drm_device_t * dev)
140 int len; 141 int len;
141 142
142 if (dev->unique != NULL) 143 if (dev->unique != NULL)
143 return EBUSY; 144 return 0;
144 145
145 dev->unique_len = 40; 146 dev->unique_len = 40;
146 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); 147 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
147 if (dev->unique == NULL) 148 if (dev->unique == NULL)
148 return ENOMEM; 149 return -ENOMEM;
149 150
150 len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", 151 len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
151 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); 152 drm_get_pci_domain(dev), dev->pdev->bus->number,
153 PCI_SLOT(dev->pdev->devfn),
154 PCI_FUNC(dev->pdev->devfn));
152 155
153 if (len > dev->unique_len) 156 if (len > dev->unique_len)
154 DRM_ERROR("Unique buffer overflowed\n"); 157 DRM_ERROR("Unique buffer overflowed\n");
@@ -157,7 +160,7 @@ static int drm_set_busid(drm_device_t * dev)
157 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 160 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
158 2, DRM_MEM_DRIVER); 161 2, DRM_MEM_DRIVER);
159 if (dev->devname == NULL) 162 if (dev->devname == NULL)
160 return ENOMEM; 163 return -ENOMEM;
161 164
162 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, 165 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
163 dev->unique); 166 dev->unique);
@@ -330,27 +333,32 @@ int drm_setversion(DRM_IOCTL_ARGS)
330 drm_set_version_t retv; 333 drm_set_version_t retv;
331 int if_version; 334 int if_version;
332 drm_set_version_t __user *argp = (void __user *)data; 335 drm_set_version_t __user *argp = (void __user *)data;
336 int ret;
333 337
334 DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv)); 338 if (copy_from_user(&sv, argp, sizeof(sv)))
339 return -EFAULT;
335 340
336 retv.drm_di_major = DRM_IF_MAJOR; 341 retv.drm_di_major = DRM_IF_MAJOR;
337 retv.drm_di_minor = DRM_IF_MINOR; 342 retv.drm_di_minor = DRM_IF_MINOR;
338 retv.drm_dd_major = dev->driver->major; 343 retv.drm_dd_major = dev->driver->major;
339 retv.drm_dd_minor = dev->driver->minor; 344 retv.drm_dd_minor = dev->driver->minor;
340 345
341 DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv)); 346 if (copy_to_user(argp, &retv, sizeof(retv)))
347 return -EFAULT;
342 348
343 if (sv.drm_di_major != -1) { 349 if (sv.drm_di_major != -1) {
344 if (sv.drm_di_major != DRM_IF_MAJOR || 350 if (sv.drm_di_major != DRM_IF_MAJOR ||
345 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) 351 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
346 return EINVAL; 352 return -EINVAL;
347 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); 353 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
348 dev->if_version = DRM_MAX(if_version, dev->if_version); 354 dev->if_version = max(if_version, dev->if_version);
349 if (sv.drm_di_minor >= 1) { 355 if (sv.drm_di_minor >= 1) {
350 /* 356 /*
351 * Version 1.1 includes tying of DRM to specific device 357 * Version 1.1 includes tying of DRM to specific device
352 */ 358 */
353 drm_set_busid(dev); 359 ret = drm_set_busid(dev);
360 if (ret)
361 return ret;
354 } 362 }
355 } 363 }
356 364
@@ -358,7 +366,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
358 if (sv.drm_dd_major != dev->driver->major || 366 if (sv.drm_dd_major != dev->driver->major ||
359 sv.drm_dd_minor < 0 367 sv.drm_dd_minor < 0
360 || sv.drm_dd_minor > dev->driver->minor) 368 || sv.drm_dd_minor > dev->driver->minor)
361 return EINVAL; 369 return -EINVAL;
362 370
363 if (dev->driver->set_version) 371 if (dev->driver->set_version)
364 dev->driver->set_version(dev, &sv); 372 dev->driver->set_version(dev, &sv);
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index ebdb7182c4fd..4553a3a1e496 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -64,9 +64,9 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
64 if (copy_from_user(&p, argp, sizeof(p))) 64 if (copy_from_user(&p, argp, sizeof(p)))
65 return -EFAULT; 65 return -EFAULT;
66 66
67 if ((p.busnum >> 8) != dev->pci_domain || 67 if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
68 (p.busnum & 0xff) != dev->pci_bus || 68 (p.busnum & 0xff) != dev->pdev->bus->number ||
69 p.devnum != dev->pci_slot || p.funcnum != dev->pci_func) 69 p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
70 return -EINVAL; 70 return -EINVAL;
71 71
72 p.irq = dev->irq; 72 p.irq = dev->irq;
@@ -255,7 +255,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
255 if (!dev->irq) 255 if (!dev->irq)
256 return -EINVAL; 256 return -EINVAL;
257 257
258 DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait)); 258 if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
259 return -EFAULT;
259 260
260 switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) { 261 switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) {
261 case _DRM_VBLANK_RELATIVE: 262 case _DRM_VBLANK_RELATIVE:
@@ -329,7 +330,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
329 } 330 }
330 331
331 done: 332 done:
332 DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait)); 333 if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
334 return -EFAULT;
333 335
334 return ret; 336 return ret;
335} 337}
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
new file mode 100644
index 000000000000..617526bd5b0c
--- /dev/null
+++ b/drivers/char/drm/drm_mm.c
@@ -0,0 +1,201 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 */
43
44#include "drmP.h"
45
46drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
47 unsigned long size, unsigned alignment)
48{
49
50 drm_mm_node_t *child;
51
52 if (alignment)
53 size += alignment - 1;
54
55 if (parent->size == size) {
56 list_del_init(&parent->fl_entry);
57 parent->free = 0;
58 return parent;
59 } else {
60 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
61 if (!child)
62 return NULL;
63
64 INIT_LIST_HEAD(&child->ml_entry);
65 INIT_LIST_HEAD(&child->fl_entry);
66
67 child->free = 0;
68 child->size = size;
69 child->start = parent->start;
70
71 list_add_tail(&child->ml_entry, &parent->ml_entry);
72 parent->size -= size;
73 parent->start += size;
74 }
75 return child;
76}
77
78/*
79 * Put a block. Merge with the previous and / or next block if they are free.
80 * Otherwise add to the free stack.
81 */
82
83void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
84{
85
86 drm_mm_node_t *list_root = &mm->root_node;
87 struct list_head *cur_head = &cur->ml_entry;
88 struct list_head *root_head = &list_root->ml_entry;
89 drm_mm_node_t *prev_node = NULL;
90 drm_mm_node_t *next_node;
91
92 int merged = 0;
93
94 if (cur_head->prev != root_head) {
95 prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry);
96 if (prev_node->free) {
97 prev_node->size += cur->size;
98 merged = 1;
99 }
100 }
101 if (cur_head->next != root_head) {
102 next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry);
103 if (next_node->free) {
104 if (merged) {
105 prev_node->size += next_node->size;
106 list_del(&next_node->ml_entry);
107 list_del(&next_node->fl_entry);
108 drm_free(next_node, sizeof(*next_node),
109 DRM_MEM_MM);
110 } else {
111 next_node->size += cur->size;
112 next_node->start = cur->start;
113 merged = 1;
114 }
115 }
116 }
117 if (!merged) {
118 cur->free = 1;
119 list_add(&cur->fl_entry, &list_root->fl_entry);
120 } else {
121 list_del(&cur->ml_entry);
122 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
123 }
124}
125
126drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
127 unsigned long size,
128 unsigned alignment, int best_match)
129{
130 struct list_head *list;
131 const struct list_head *free_stack = &mm->root_node.fl_entry;
132 drm_mm_node_t *entry;
133 drm_mm_node_t *best;
134 unsigned long best_size;
135
136 best = NULL;
137 best_size = ~0UL;
138
139 if (alignment)
140 size += alignment - 1;
141
142 list_for_each(list, free_stack) {
143 entry = list_entry(list, drm_mm_node_t, fl_entry);
144 if (entry->size >= size) {
145 if (!best_match)
146 return entry;
147 if (size < best_size) {
148 best = entry;
149 best_size = entry->size;
150 }
151 }
152 }
153
154 return best;
155}
156
157int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
158{
159 drm_mm_node_t *child;
160
161 INIT_LIST_HEAD(&mm->root_node.ml_entry);
162 INIT_LIST_HEAD(&mm->root_node.fl_entry);
163 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
164 if (!child)
165 return -ENOMEM;
166
167 INIT_LIST_HEAD(&child->ml_entry);
168 INIT_LIST_HEAD(&child->fl_entry);
169
170 child->start = start;
171 child->size = size;
172 child->free = 1;
173
174 list_add(&child->fl_entry, &mm->root_node.fl_entry);
175 list_add(&child->ml_entry, &mm->root_node.ml_entry);
176
177 return 0;
178}
179
180EXPORT_SYMBOL(drm_mm_init);
181
182void drm_mm_takedown(drm_mm_t * mm)
183{
184 struct list_head *bnode = mm->root_node.fl_entry.next;
185 drm_mm_node_t *entry;
186
187 entry = list_entry(bnode, drm_mm_node_t, fl_entry);
188
189 if (entry->ml_entry.next != &mm->root_node.ml_entry ||
190 entry->fl_entry.next != &mm->root_node.fl_entry) {
191 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
192 return;
193 }
194
195 list_del(&entry->fl_entry);
196 list_del(&entry->ml_entry);
197
198 drm_free(entry, sizeof(*entry), DRM_MEM_MM);
199}
200
201EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index b1bb3c7b568d..09398d5fbd3f 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -3,13 +3,13 @@
3 Please contact dri-devel@lists.sf.net to add new cards to this list 3 Please contact dri-devel@lists.sf.net to add new cards to this list
4*/ 4*/
5#define radeon_PCI_IDS \ 5#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP}, \ 11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
12 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \ 12 {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
13 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 13 {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
14 {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 14 {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
15 {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 15 {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
@@ -25,35 +25,35 @@
25 {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 25 {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
26 {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 26 {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
27 {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 27 {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
28 {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP}, \ 28 {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
29 {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 29 {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
30 {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 30 {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
31 {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 31 {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
32 {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 32 {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
33 {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 33 {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
34 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ 34 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
35 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \ 35 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
36 {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 36 {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
37 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 37 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
38 {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 38 {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
39 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 39 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
40 {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 40 {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
41 {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 41 {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
42 {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 42 {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
43 {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 43 {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
44 {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 44 {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
45 {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 45 {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
46 {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 46 {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
47 {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 47 {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
48 {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 48 {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
49 {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 49 {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
50 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 50 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
51 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 51 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
52 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ 52 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
53 {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ 53 {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
54 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ 54 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
55 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ 55 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
56 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|CHIP_IS_MOBILITY}, \ 56 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
57 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 57 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
58 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 58 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
59 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 59 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
@@ -62,16 +62,16 @@
62 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ 62 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
63 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ 63 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
64 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ 64 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
65 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 65 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
66 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 66 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
67 {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 67 {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
68 {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 68 {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
69 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 69 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
70 {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 70 {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
71 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 71 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
72 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 72 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
73 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 73 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
74 {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 74 {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
75 {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 75 {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
76 {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 76 {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
77 {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 77 {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
@@ -80,59 +80,59 @@
80 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 80 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
81 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 81 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
82 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 82 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ 83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ 84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_IS_MOBILITY}, \ 85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 86 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
87 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 87 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
88 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 88 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
89 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 89 {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
90 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 90 {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
91 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 91 {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
92 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 92 {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
93 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 93 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
94 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 94 {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
95 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 95 {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
96 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 96 {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
97 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 97 {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
98 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 98 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
99 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 99 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
100 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 100 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
101 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 101 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
102 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 102 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
103 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 103 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
104 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 104 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
105 {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 105 {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
106 {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 106 {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
107 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 107 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
108 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 108 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
109 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 109 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
110 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 110 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
111 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 111 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
112 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 112 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
113 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 113 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
114 {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 114 {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
115 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|CHIP_NEW_MEMMAP}, \ 115 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
116 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 116 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
117 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 117 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
118 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 118 {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
119 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 119 {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
120 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 120 {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
121 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 121 {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
122 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 122 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
123 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 123 {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
124 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 124 {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
125 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 125 {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
126 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 126 {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
127 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|CHIP_NEW_MEMMAP}, \ 127 {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
128 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 128 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
129 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 129 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
130 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 130 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
131 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 131 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
132 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 132 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
133 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|CHIP_NEW_MEMMAP}, \ 133 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
134 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP}, \ 134 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
135 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP}, \ 135 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
136 {0, 0, 0} 136 {0, 0, 0}
137 137
138#define r128_PCI_IDS \ 138#define r128_PCI_IDS \
@@ -209,6 +209,7 @@
209 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 209 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
210 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 210 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
211 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 211 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
212 {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
212 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 213 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
213 {0, 0, 0} 214 {0, 0, 0}
214 215
@@ -227,6 +228,10 @@
227 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
228 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 230 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
232 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
233 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
234 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0, 0, 0} 235 {0, 0, 0}
231 236
232#define i810_PCI_IDS \ 237#define i810_PCI_IDS \
@@ -285,5 +290,9 @@
285 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 290 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
286 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 291 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
287 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 292 {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
293 {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
294 {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
295 {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
296 {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
288 {0, 0, 0} 297 {0, 0, 0}
289 298
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 362a270af0f1..62d5fe15f046 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -510,7 +510,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p', 510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
511 vma->vm_flags & VM_LOCKED ? 'l' : '-', 511 vma->vm_flags & VM_LOCKED ? 'l' : '-',
512 vma->vm_flags & VM_IO ? 'i' : '-', 512 vma->vm_flags & VM_IO ? 'i' : '-',
513 VM_OFFSET(vma)); 513 vma->vm_pgoff << PAGE_SHIFT);
514 514
515#if defined(__i386__) 515#if defined(__i386__)
516 pgprot = pgprot_val(vma->vm_page_prot); 516 pgprot = pgprot_val(vma->vm_page_prot);
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
new file mode 100644
index 000000000000..425c82336ee0
--- /dev/null
+++ b/drivers/char/drm/drm_sman.c
@@ -0,0 +1,352 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory manager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#include "drm_sman.h"
40
41typedef struct drm_owner_item {
42 drm_hash_item_t owner_hash;
43 struct list_head sman_list;
44 struct list_head mem_blocks;
45} drm_owner_item_t;
46
47void drm_sman_takedown(drm_sman_t * sman)
48{
49 drm_ht_remove(&sman->user_hash_tab);
50 drm_ht_remove(&sman->owner_hash_tab);
51 if (sman->mm)
52 drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
53 DRM_MEM_MM);
54}
55
56EXPORT_SYMBOL(drm_sman_takedown);
57
58int
59drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
60 unsigned int user_order, unsigned int owner_order)
61{
62 int ret = 0;
63
64 sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm),
65 DRM_MEM_MM);
66 if (!sman->mm) {
67 ret = -ENOMEM;
68 goto out;
69 }
70 sman->num_managers = num_managers;
71 INIT_LIST_HEAD(&sman->owner_items);
72 ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
73 if (ret)
74 goto out1;
75 ret = drm_ht_create(&sman->user_hash_tab, user_order);
76 if (!ret)
77 goto out;
78
79 drm_ht_remove(&sman->owner_hash_tab);
80out1:
81 drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
82out:
83 return ret;
84}
85
86EXPORT_SYMBOL(drm_sman_init);
87
88static void *drm_sman_mm_allocate(void *private, unsigned long size,
89 unsigned alignment)
90{
91 drm_mm_t *mm = (drm_mm_t *) private;
92 drm_mm_node_t *tmp;
93
94 tmp = drm_mm_search_free(mm, size, alignment, 1);
95 if (!tmp) {
96 return NULL;
97 }
98 tmp = drm_mm_get_block(tmp, size, alignment);
99 return tmp;
100}
101
102static void drm_sman_mm_free(void *private, void *ref)
103{
104 drm_mm_t *mm = (drm_mm_t *) private;
105 drm_mm_node_t *node = (drm_mm_node_t *) ref;
106
107 drm_mm_put_block(mm, node);
108}
109
110static void drm_sman_mm_destroy(void *private)
111{
112 drm_mm_t *mm = (drm_mm_t *) private;
113 drm_mm_takedown(mm);
114 drm_free(mm, sizeof(*mm), DRM_MEM_MM);
115}
116
117static unsigned long drm_sman_mm_offset(void *private, void *ref)
118{
119 drm_mm_node_t *node = (drm_mm_node_t *) ref;
120 return node->start;
121}
122
123int
124drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
125 unsigned long start, unsigned long size)
126{
127 drm_sman_mm_t *sman_mm;
128 drm_mm_t *mm;
129 int ret;
130
131 BUG_ON(manager >= sman->num_managers);
132
133 sman_mm = &sman->mm[manager];
134 mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
135 if (!mm) {
136 return -ENOMEM;
137 }
138 sman_mm->private = mm;
139 ret = drm_mm_init(mm, start, size);
140
141 if (ret) {
142 drm_free(mm, sizeof(*mm), DRM_MEM_MM);
143 return ret;
144 }
145
146 sman_mm->allocate = drm_sman_mm_allocate;
147 sman_mm->free = drm_sman_mm_free;
148 sman_mm->destroy = drm_sman_mm_destroy;
149 sman_mm->offset = drm_sman_mm_offset;
150
151 return 0;
152}
153
154EXPORT_SYMBOL(drm_sman_set_range);
155
156int
157drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
158 drm_sman_mm_t * allocator)
159{
160 BUG_ON(manager >= sman->num_managers);
161 sman->mm[manager] = *allocator;
162
163 return 0;
164}
165
166static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
167 unsigned long owner)
168{
169 int ret;
170 drm_hash_item_t *owner_hash_item;
171 drm_owner_item_t *owner_item;
172
173 ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
174 if (!ret) {
175 return drm_hash_entry(owner_hash_item, drm_owner_item_t,
176 owner_hash);
177 }
178
179 owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
180 if (!owner_item)
181 goto out;
182
183 INIT_LIST_HEAD(&owner_item->mem_blocks);
184 owner_item->owner_hash.key = owner;
185 if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
186 goto out1;
187
188 list_add_tail(&owner_item->sman_list, &sman->owner_items);
189 return owner_item;
190
191out1:
192 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
193out:
194 return NULL;
195}
196
197drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager,
198 unsigned long size, unsigned alignment,
199 unsigned long owner)
200{
201 void *tmp;
202 drm_sman_mm_t *sman_mm;
203 drm_owner_item_t *owner_item;
204 drm_memblock_item_t *memblock;
205
206 BUG_ON(manager >= sman->num_managers);
207
208 sman_mm = &sman->mm[manager];
209 tmp = sman_mm->allocate(sman_mm->private, size, alignment);
210
211 if (!tmp) {
212 return NULL;
213 }
214
215 memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
216
217 if (!memblock)
218 goto out;
219
220 memblock->mm_info = tmp;
221 memblock->mm = sman_mm;
222 memblock->sman = sman;
223
224 if (drm_ht_just_insert_please
225 (&sman->user_hash_tab, &memblock->user_hash,
226 (unsigned long)memblock, 32, 0, 0))
227 goto out1;
228
229 owner_item = drm_sman_get_owner_item(sman, owner);
230 if (!owner_item)
231 goto out2;
232
233 list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
234
235 return memblock;
236
237out2:
238 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
239out1:
240 drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
241out:
242 sman_mm->free(sman_mm->private, tmp);
243
244 return NULL;
245}
246
247EXPORT_SYMBOL(drm_sman_alloc);
248
249static void drm_sman_free(drm_memblock_item_t *item)
250{
251 drm_sman_t *sman = item->sman;
252
253 list_del(&item->owner_list);
254 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
255 item->mm->free(item->mm->private, item->mm_info);
256 drm_free(item, sizeof(*item), DRM_MEM_MM);
257}
258
259int drm_sman_free_key(drm_sman_t *sman, unsigned int key)
260{
261 drm_hash_item_t *hash_item;
262 drm_memblock_item_t *memblock_item;
263
264 if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
265 return -EINVAL;
266
267 memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash);
268 drm_sman_free(memblock_item);
269 return 0;
270}
271
272EXPORT_SYMBOL(drm_sman_free_key);
273
274static void drm_sman_remove_owner(drm_sman_t *sman,
275 drm_owner_item_t *owner_item)
276{
277 list_del(&owner_item->sman_list);
278 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
279 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
280}
281
282int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
283{
284
285 drm_hash_item_t *hash_item;
286 drm_owner_item_t *owner_item;
287
288 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
289 return -1;
290 }
291
292 owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
293 if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
294 drm_sman_remove_owner(sman, owner_item);
295 return -1;
296 }
297
298 return 0;
299}
300
301EXPORT_SYMBOL(drm_sman_owner_clean);
302
303static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
304 drm_owner_item_t *owner_item)
305{
306 drm_memblock_item_t *entry, *next;
307
308 list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
309 owner_list) {
310 drm_sman_free(entry);
311 }
312 drm_sman_remove_owner(sman, owner_item);
313}
314
315void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner)
316{
317
318 drm_hash_item_t *hash_item;
319 drm_owner_item_t *owner_item;
320
321 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
322
323 return;
324 }
325
326 owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
327 drm_sman_do_owner_cleanup(sman, owner_item);
328}
329
330EXPORT_SYMBOL(drm_sman_owner_cleanup);
331
332void drm_sman_cleanup(drm_sman_t *sman)
333{
334 drm_owner_item_t *entry, *next;
335 unsigned int i;
336 drm_sman_mm_t *sman_mm;
337
338 list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
339 drm_sman_do_owner_cleanup(sman, entry);
340 }
341 if (sman->mm) {
342 for (i = 0; i < sman->num_managers; ++i) {
343 sman_mm = &sman->mm[i];
344 if (sman_mm->private) {
345 sman_mm->destroy(sman_mm->private);
346 sman_mm->private = NULL;
347 }
348 }
349 }
350}
351
352EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/char/drm/drm_sman.h b/drivers/char/drm/drm_sman.h
new file mode 100644
index 000000000000..ddc732a1bf27
--- /dev/null
+++ b/drivers/char/drm/drm_sman.h
@@ -0,0 +1,176 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory MANager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#ifndef DRM_SMAN_H
40#define DRM_SMAN_H
41
42#include "drmP.h"
43#include "drm_hashtab.h"
44
45/*
46 * A class that is an abstration of a simple memory allocator.
47 * The sman implementation provides a default such allocator
48 * using the drm_mm.c implementation. But the user can replace it.
49 * See the SiS implementation, which may use the SiS FB kernel module
50 * for memory management.
51 */
52
53typedef struct drm_sman_mm {
54 /* private info. If allocated, needs to be destroyed by the destroy
55 function */
56 void *private;
57
58 /* Allocate a memory block with given size and alignment.
59 Return an opaque reference to the memory block */
60
61 void *(*allocate) (void *private, unsigned long size,
62 unsigned alignment);
63
64 /* Free a memory block. "ref" is the opaque reference that we got from
65 the "alloc" function */
66
67 void (*free) (void *private, void *ref);
68
69 /* Free all resources associated with this allocator */
70
71 void (*destroy) (void *private);
72
73 /* Return a memory offset from the opaque reference returned from the
74 "alloc" function */
75
76 unsigned long (*offset) (void *private, void *ref);
77} drm_sman_mm_t;
78
79typedef struct drm_memblock_item {
80 struct list_head owner_list;
81 drm_hash_item_t user_hash;
82 void *mm_info;
83 drm_sman_mm_t *mm;
84 struct drm_sman *sman;
85} drm_memblock_item_t;
86
87typedef struct drm_sman {
88 drm_sman_mm_t *mm;
89 int num_managers;
90 drm_open_hash_t owner_hash_tab;
91 drm_open_hash_t user_hash_tab;
92 struct list_head owner_items;
93} drm_sman_t;
94
95/*
96 * Take down a memory manager. This function should only be called after a
97 * successful init and after a call to drm_sman_cleanup.
98 */
99
100extern void drm_sman_takedown(drm_sman_t * sman);
101
102/*
103 * Allocate structures for a manager.
104 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
105 * user_order is the log2 of the number of buckets in the user hash table.
106 * set this to approximately log2 of the max number of memory regions
107 * that will be allocated for _all_ pools together.
108 * owner_order is the log2 of the number of buckets in the owner hash table.
109 * set this to approximately log2 of
110 * the number of client file connections that will
111 * be using the manager.
112 *
113 */
114
115extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
116 unsigned int user_order, unsigned int owner_order);
117
118/*
119 * Initialize a drm_mm.c allocator. Should be called only once for each
120 * manager unless a customized allogator is used.
121 */
122
123extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
124 unsigned long start, unsigned long size);
125
126/*
127 * Initialize a customized allocator for one of the managers.
128 * (See the SiS module). The object pointed to by "allocator" is copied,
129 * so it can be destroyed after this call.
130 */
131
132extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger,
133 drm_sman_mm_t * allocator);
134
135/*
136 * Allocate a memory block. Aligment is not implemented yet.
137 */
138
139extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman,
140 unsigned int manager,
141 unsigned long size,
142 unsigned alignment,
143 unsigned long owner);
144/*
145 * Free a memory block identified by its user hash key.
146 */
147
148extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
149
150/*
151 * returns 1 iff there are no stale memory blocks associated with this owner.
152 * Typically called to determine if we need to idle the hardware and call
153 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
154 * resources associated with owner.
155 */
156
157extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
158
159/*
160 * Frees all stale memory blocks associated with this owner. Note that this
161 * requires that the hardware is finished with all blocks, so the graphics engine
162 * should be idled before this call is made. This function also frees
163 * any resources associated with "owner" and should be called when owner
164 * is not going to be referenced anymore.
165 */
166
167extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner);
168
169/*
170 * Frees all stale memory blocks associated with the memory manager.
171 * See idling above.
172 */
173
174extern void drm_sman_cleanup(drm_sman_t * sman);
175
176#endif
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 9a842a36bb27..7b1d4e8659ba 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -65,22 +65,22 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
65 mutex_init(&dev->ctxlist_mutex); 65 mutex_init(&dev->ctxlist_mutex);
66 66
67 dev->pdev = pdev; 67 dev->pdev = pdev;
68 dev->pci_device = pdev->device;
69 dev->pci_vendor = pdev->vendor;
68 70
69#ifdef __alpha__ 71#ifdef __alpha__
70 dev->hose = pdev->sysdata; 72 dev->hose = pdev->sysdata;
71 dev->pci_domain = dev->hose->bus->number;
72#else
73 dev->pci_domain = 0;
74#endif 73#endif
75 dev->pci_bus = pdev->bus->number;
76 dev->pci_slot = PCI_SLOT(pdev->devfn);
77 dev->pci_func = PCI_FUNC(pdev->devfn);
78 dev->irq = pdev->irq; 74 dev->irq = pdev->irq;
79 75
80 dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); 76 dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
81 if (dev->maplist == NULL) 77 if (dev->maplist == NULL)
82 return -ENOMEM; 78 return -ENOMEM;
83 INIT_LIST_HEAD(&dev->maplist->head); 79 INIT_LIST_HEAD(&dev->maplist->head);
80 if (drm_ht_create(&dev->map_hash, 12)) {
81 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
82 return -ENOMEM;
83 }
84 84
85 /* the DRM has 6 basic counters */ 85 /* the DRM has 6 basic counters */
86 dev->counters = 6; 86 dev->counters = 6;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index ffd0800ed601..b40ae438f531 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -59,7 +59,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
59 drm_device_t *dev = priv->head->dev; 59 drm_device_t *dev = priv->head->dev;
60 drm_map_t *map = NULL; 60 drm_map_t *map = NULL;
61 drm_map_list_t *r_list; 61 drm_map_list_t *r_list;
62 struct list_head *list; 62 drm_hash_item_t *hash;
63 63
64 /* 64 /*
65 * Find the right map 65 * Find the right map
@@ -70,14 +70,11 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
70 if (!dev->agp || !dev->agp->cant_use_aperture) 70 if (!dev->agp || !dev->agp->cant_use_aperture)
71 goto vm_nopage_error; 71 goto vm_nopage_error;
72 72
73 list_for_each(list, &dev->maplist->head) { 73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash))
74 r_list = list_entry(list, drm_map_list_t, head); 74 goto vm_nopage_error;
75 map = r_list->map; 75
76 if (!map) 76 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
77 continue; 77 map = r_list->map;
78 if (r_list->user_token == VM_OFFSET(vma))
79 break;
80 }
81 78
82 if (map && map->type == _DRM_AGP) { 79 if (map && map->type == _DRM_AGP) {
83 unsigned long offset = address - vma->vm_start; 80 unsigned long offset = address - vma->vm_start;
@@ -467,7 +464,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
467 dev = priv->head->dev; 464 dev = priv->head->dev;
468 dma = dev->dma; 465 dma = dev->dma;
469 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
470 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); 467 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
471 468
472 /* Length must match exact page count */ 469 /* Length must match exact page count */
473 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@@ -521,12 +518,11 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
521 drm_file_t *priv = filp->private_data; 518 drm_file_t *priv = filp->private_data;
522 drm_device_t *dev = priv->head->dev; 519 drm_device_t *dev = priv->head->dev;
523 drm_map_t *map = NULL; 520 drm_map_t *map = NULL;
524 drm_map_list_t *r_list;
525 unsigned long offset = 0; 521 unsigned long offset = 0;
526 struct list_head *list; 522 drm_hash_item_t *hash;
527 523
528 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 524 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
529 vma->vm_start, vma->vm_end, VM_OFFSET(vma)); 525 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
530 526
531 if (!priv->authenticated) 527 if (!priv->authenticated)
532 return -EACCES; 528 return -EACCES;
@@ -535,7 +531,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
535 * the AGP mapped at physical address 0 531 * the AGP mapped at physical address 0
536 * --BenH. 532 * --BenH.
537 */ 533 */
538 if (!VM_OFFSET(vma) 534 if (!(vma->vm_pgoff << PAGE_SHIFT)
539#if __OS_HAS_AGP 535#if __OS_HAS_AGP
540 && (!dev->agp 536 && (!dev->agp
541 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 537 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@@ -543,23 +539,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
543 ) 539 )
544 return drm_mmap_dma(filp, vma); 540 return drm_mmap_dma(filp, vma);
545 541
546 /* A sequential search of a linked list is 542 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) {
547 fine here because: 1) there will only be 543 DRM_ERROR("Could not find map\n");
548 about 5-10 entries in the list and, 2) a 544 return -EINVAL;
549 DRI client only has to do this mapping
550 once, so it doesn't have to be optimized
551 for performance, even if the list was a
552 bit longer. */
553 list_for_each(list, &dev->maplist->head) {
554
555 r_list = list_entry(list, drm_map_list_t, head);
556 map = r_list->map;
557 if (!map)
558 continue;
559 if (r_list->user_token == VM_OFFSET(vma))
560 break;
561 } 545 }
562 546
547 map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
563 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 548 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
564 return -EPERM; 549 return -EPERM;
565 550
@@ -620,7 +605,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
620 offset = dev->driver->get_reg_ofs(dev); 605 offset = dev->driver->get_reg_ofs(dev);
621#ifdef __sparc__ 606#ifdef __sparc__
622 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 607 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
623 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, 608 if (io_remap_pfn_range(vma, vma->vm_start,
624 (map->offset + offset) >> PAGE_SHIFT, 609 (map->offset + offset) >> PAGE_SHIFT,
625 vma->vm_end - vma->vm_start, 610 vma->vm_end - vma->vm_start,
626 vma->vm_page_prot)) 611 vma->vm_page_prot))
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index c658dde3633b..fa2de70f7401 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -106,7 +106,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
106 unlock_kernel(); 106 unlock_kernel();
107 107
108 if (io_remap_pfn_range(vma, vma->vm_start, 108 if (io_remap_pfn_range(vma, vma->vm_start,
109 VM_OFFSET(vma) >> PAGE_SHIFT, 109 vma->vm_pgoff,
110 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 110 vma->vm_end - vma->vm_start, vma->vm_page_prot))
111 return -EAGAIN; 111 return -EAGAIN;
112 return 0; 112 return 0;
@@ -141,10 +141,10 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
141 MAP_SHARED, buf->bus_address); 141 MAP_SHARED, buf->bus_address);
142 dev_priv->mmap_buffer = NULL; 142 dev_priv->mmap_buffer = NULL;
143 filp->f_op = old_fops; 143 filp->f_op = old_fops;
144 if ((unsigned long)buf_priv->virtual > -1024UL) { 144 if (IS_ERR(buf_priv->virtual)) {
145 /* Real error */ 145 /* Real error */
146 DRM_ERROR("mmap error\n"); 146 DRM_ERROR("mmap error\n");
147 retcode = (signed int)buf_priv->virtual; 147 retcode = PTR_ERR(buf_priv->virtual);
148 buf_priv->virtual = NULL; 148 buf_priv->virtual = NULL;
149 } 149 }
150 up_write(&current->mm->mmap_sem); 150 up_write(&current->mm->mmap_sem);
@@ -808,7 +808,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev,
808 ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2))); 808 ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
809 809
810 if (used & 4) { 810 if (used & 4) {
811 *(u32 *) ((u32) buf_priv->kernel_virtual + used) = 0; 811 *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
812 used += 4; 812 used += 4;
813 } 813 }
814 814
@@ -1166,7 +1166,7 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
1166 1166
1167 if (buf_priv->currently_mapped == I810_BUF_MAPPED) { 1167 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1168 if (used & 4) { 1168 if (used & 4) {
1169 *(u32 *) ((u32) buf_priv->virtual + used) = 0; 1169 *(u32 *) ((char *) buf_priv->virtual + used) = 0;
1170 used += 4; 1170 used += 4;
1171 } 1171 }
1172 1172
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index b0f815d8cea8..4f0e5746ab33 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -108,7 +108,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
108 unlock_kernel(); 108 unlock_kernel();
109 109
110 if (io_remap_pfn_range(vma, vma->vm_start, 110 if (io_remap_pfn_range(vma, vma->vm_start,
111 VM_OFFSET(vma) >> PAGE_SHIFT, 111 vma->vm_pgoff,
112 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 112 vma->vm_end - vma->vm_start, vma->vm_page_prot))
113 return -EAGAIN; 113 return -EAGAIN;
114 return 0; 114 return 0;
@@ -146,7 +146,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
146 if (IS_ERR((void *)virtual)) { /* ugh */ 146 if (IS_ERR((void *)virtual)) { /* ugh */
147 /* Real error */ 147 /* Real error */
148 DRM_ERROR("mmap error\n"); 148 DRM_ERROR("mmap error\n");
149 retcode = virtual; 149 retcode = PTR_ERR((void *)virtual);
150 buf_priv->virtual = NULL; 150 buf_priv->virtual = NULL;
151 } else { 151 } else {
152 buf_priv->virtual = (void __user *)virtual; 152 buf_priv->virtual = (void __user *)virtual;
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index a94233bdbc0e..fb7913ff5286 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -31,6 +31,11 @@
31#include "i915_drm.h" 31#include "i915_drm.h"
32#include "i915_drv.h" 32#include "i915_drv.h"
33 33
34#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
35 dev->pci_device == 0x2982 || \
36 dev->pci_device == 0x2992 || \
37 dev->pci_device == 0x29A2)
38
34/* Really want an OS-independent resettable timer. Would like to have 39/* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time 40 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring 41 * the head pointer changes, so that EBUSY only happens if the ring
@@ -255,7 +260,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
255 retcode = i915_dma_resume(dev); 260 retcode = i915_dma_resume(dev);
256 break; 261 break;
257 default: 262 default:
258 retcode = -EINVAL; 263 retcode = DRM_ERR(EINVAL);
259 break; 264 break;
260 } 265 }
261 266
@@ -347,7 +352,7 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
347 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 352 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
348 return DRM_ERR(EINVAL); 353 return DRM_ERR(EINVAL);
349 354
350 BEGIN_LP_RING(((dwords+1)&~1)); 355 BEGIN_LP_RING((dwords+1)&~1);
351 356
352 for (i = 0; i < dwords;) { 357 for (i = 0; i < dwords;) {
353 int cmd, sz; 358 int cmd, sz;
@@ -386,7 +391,7 @@ static int i915_emit_box(drm_device_t * dev,
386 RING_LOCALS; 391 RING_LOCALS;
387 392
388 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { 393 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
389 return EFAULT; 394 return DRM_ERR(EFAULT);
390 } 395 }
391 396
392 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 397 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
@@ -395,24 +400,40 @@ static int i915_emit_box(drm_device_t * dev,
395 return DRM_ERR(EINVAL); 400 return DRM_ERR(EINVAL);
396 } 401 }
397 402
398 BEGIN_LP_RING(6); 403 if (IS_I965G(dev)) {
399 OUT_RING(GFX_OP_DRAWRECT_INFO); 404 BEGIN_LP_RING(4);
400 OUT_RING(DR1); 405 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
401 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 406 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
402 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 407 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
403 OUT_RING(DR4); 408 OUT_RING(DR4);
404 OUT_RING(0); 409 ADVANCE_LP_RING();
405 ADVANCE_LP_RING(); 410 } else {
411 BEGIN_LP_RING(6);
412 OUT_RING(GFX_OP_DRAWRECT_INFO);
413 OUT_RING(DR1);
414 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
415 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
416 OUT_RING(DR4);
417 OUT_RING(0);
418 ADVANCE_LP_RING();
419 }
406 420
407 return 0; 421 return 0;
408} 422}
409 423
424/* XXX: Emitting the counter should really be moved to part of the IRQ
425 * emit. For now, do it in both places:
426 */
427
410static void i915_emit_breadcrumb(drm_device_t *dev) 428static void i915_emit_breadcrumb(drm_device_t *dev)
411{ 429{
412 drm_i915_private_t *dev_priv = dev->dev_private; 430 drm_i915_private_t *dev_priv = dev->dev_private;
413 RING_LOCALS; 431 RING_LOCALS;
414 432
415 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 433 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
434
435 if (dev_priv->counter > 0x7FFFFFFFUL)
436 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
416 437
417 BEGIN_LP_RING(4); 438 BEGIN_LP_RING(4);
418 OUT_RING(CMD_STORE_DWORD_IDX); 439 OUT_RING(CMD_STORE_DWORD_IDX);
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 5aa3e0e3bb45..6af83e613f27 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -98,6 +98,12 @@ typedef struct _drm_i915_sarea {
98 int rotated_size; 98 int rotated_size;
99 int rotated_pitch; 99 int rotated_pitch;
100 int virtualX, virtualY; 100 int virtualX, virtualY;
101
102 unsigned int front_tiled;
103 unsigned int back_tiled;
104 unsigned int depth_tiled;
105 unsigned int rotated_tiled;
106 unsigned int rotated2_tiled;
101} drm_i915_sarea_t; 107} drm_i915_sarea_t;
102 108
103/* Flags for perf_boxes 109/* Flags for perf_boxes
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 2d565031c002..fdc2bf192714 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -146,9 +146,9 @@ extern void i915_mem_release(drm_device_t * dev,
146#define BEGIN_LP_RING(n) do { \ 146#define BEGIN_LP_RING(n) do { \
147 if (I915_VERBOSE) \ 147 if (I915_VERBOSE) \
148 DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \ 148 DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
149 n, __FUNCTION__); \ 149 (n), __FUNCTION__); \
150 if (dev_priv->ring.space < n*4) \ 150 if (dev_priv->ring.space < (n)*4) \
151 i915_wait_ring(dev, n*4, __FUNCTION__); \ 151 i915_wait_ring(dev, (n)*4, __FUNCTION__); \
152 outcount = 0; \ 152 outcount = 0; \
153 outring = dev_priv->ring.tail; \ 153 outring = dev_priv->ring.tail; \
154 ringmask = dev_priv->ring.tail_mask; \ 154 ringmask = dev_priv->ring.tail_mask; \
@@ -157,7 +157,7 @@ extern void i915_mem_release(drm_device_t * dev,
157 157
158#define OUT_RING(n) do { \ 158#define OUT_RING(n) do { \
159 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 159 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
160 *(volatile unsigned int *)(virt + outring) = n; \ 160 *(volatile unsigned int *)(virt + outring) = (n); \
161 outcount++; \ 161 outcount++; \
162 outring += 4; \ 162 outring += 4; \
163 outring &= ringmask; \ 163 outring &= ringmask; \
@@ -254,6 +254,8 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
254#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) 254#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
255#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 255#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
256 256
257#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
258
257#define MI_BATCH_BUFFER ((0x30<<23)|1) 259#define MI_BATCH_BUFFER ((0x30<<23)|1)
258#define MI_BATCH_BUFFER_START (0x31<<23) 260#define MI_BATCH_BUFFER_START (0x31<<23)
259#define MI_BATCH_BUFFER_END (0xA<<23) 261#define MI_BATCH_BUFFER_END (0xA<<23)
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index cd96cfa430db..0d4a162aa385 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -71,21 +71,27 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
71static int i915_emit_irq(drm_device_t * dev) 71static int i915_emit_irq(drm_device_t * dev)
72{ 72{
73 drm_i915_private_t *dev_priv = dev->dev_private; 73 drm_i915_private_t *dev_priv = dev->dev_private;
74 u32 ret;
75 RING_LOCALS; 74 RING_LOCALS;
76 75
77 i915_kernel_lost_context(dev); 76 i915_kernel_lost_context(dev);
78 77
79 DRM_DEBUG("%s\n", __FUNCTION__); 78 DRM_DEBUG("%s\n", __FUNCTION__);
80 79
81 ret = dev_priv->counter; 80 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
82 81
83 BEGIN_LP_RING(2); 82 if (dev_priv->counter > 0x7FFFFFFFUL)
83 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
84
85 BEGIN_LP_RING(6);
86 OUT_RING(CMD_STORE_DWORD_IDX);
87 OUT_RING(20);
88 OUT_RING(dev_priv->counter);
89 OUT_RING(0);
84 OUT_RING(0); 90 OUT_RING(0);
85 OUT_RING(GFX_OP_USER_INTERRUPT); 91 OUT_RING(GFX_OP_USER_INTERRUPT);
86 ADVANCE_LP_RING(); 92 ADVANCE_LP_RING();
87 93
88 return ret; 94 return dev_priv->counter;
89} 95}
90 96
91static int i915_wait_irq(drm_device_t * dev, int irq_nr) 97static int i915_wait_irq(drm_device_t * dev, int irq_nr)
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 5ad43ba7b5aa..5ed965688293 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -864,13 +864,13 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
864 864
865 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 865 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
866 866
867 tmp = RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT); 867 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
868 tmp |= RADEON_RB2D_DC_FLUSH_ALL; 868 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
869 RADEON_WRITE(RADEON_RB2D_DSTCACHE_CTLSTAT, tmp); 869 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
870 870
871 for (i = 0; i < dev_priv->usec_timeout; i++) { 871 for (i = 0; i < dev_priv->usec_timeout; i++) {
872 if (!(RADEON_READ(RADEON_RB2D_DSTCACHE_CTLSTAT) 872 if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
873 & RADEON_RB2D_DC_BUSY)) { 873 & RADEON_RB3D_DC_BUSY)) {
874 return 0; 874 return 0;
875 } 875 }
876 DRM_UDELAY(1); 876 DRM_UDELAY(1);
@@ -1130,7 +1130,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1130 | (dev_priv->fb_location >> 16)); 1130 | (dev_priv->fb_location >> 16));
1131 1131
1132#if __OS_HAS_AGP 1132#if __OS_HAS_AGP
1133 if (dev_priv->flags & CHIP_IS_AGP) { 1133 if (dev_priv->flags & RADEON_IS_AGP) {
1134 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); 1134 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1135 RADEON_WRITE(RADEON_MC_AGP_LOCATION, 1135 RADEON_WRITE(RADEON_MC_AGP_LOCATION,
1136 (((dev_priv->gart_vm_start - 1 + 1136 (((dev_priv->gart_vm_start - 1 +
@@ -1158,7 +1158,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1158 dev_priv->ring.tail = cur_read_ptr; 1158 dev_priv->ring.tail = cur_read_ptr;
1159 1159
1160#if __OS_HAS_AGP 1160#if __OS_HAS_AGP
1161 if (dev_priv->flags & CHIP_IS_AGP) { 1161 if (dev_priv->flags & RADEON_IS_AGP) {
1162 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, 1162 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
1163 dev_priv->ring_rptr->offset 1163 dev_priv->ring_rptr->offset
1164 - dev->agp->base + dev_priv->gart_vm_start); 1164 - dev->agp->base + dev_priv->gart_vm_start);
@@ -1258,6 +1258,13 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
1258 dev_priv->writeback_works = 0; 1258 dev_priv->writeback_works = 0;
1259 DRM_INFO("writeback forced off\n"); 1259 DRM_INFO("writeback forced off\n");
1260 } 1260 }
1261
1262 if (!dev_priv->writeback_works) {
1263 /* Disable writeback to avoid unnecessary bus master transfer */
1264 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
1265 RADEON_RB_NO_UPDATE);
1266 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
1267 }
1261} 1268}
1262 1269
1263/* Enable or disable PCI-E GART on the chip */ 1270/* Enable or disable PCI-E GART on the chip */
@@ -1295,7 +1302,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1295{ 1302{
1296 u32 tmp; 1303 u32 tmp;
1297 1304
1298 if (dev_priv->flags & CHIP_IS_PCIE) { 1305 if (dev_priv->flags & RADEON_IS_PCIE) {
1299 radeon_set_pciegart(dev_priv, on); 1306 radeon_set_pciegart(dev_priv, on);
1300 return; 1307 return;
1301 } 1308 }
@@ -1333,20 +1340,22 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1333 DRM_DEBUG("\n"); 1340 DRM_DEBUG("\n");
1334 1341
1335 /* if we require new memory map but we don't have it fail */ 1342 /* if we require new memory map but we don't have it fail */
1336 if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap) 1343 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1337 { 1344 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
1338 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX\n");
1339 radeon_do_cleanup_cp(dev); 1345 radeon_do_cleanup_cp(dev);
1340 return DRM_ERR(EINVAL); 1346 return DRM_ERR(EINVAL);
1341 } 1347 }
1342 1348
1343 if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP)) 1349 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
1344 {
1345 DRM_DEBUG("Forcing AGP card to PCI mode\n"); 1350 DRM_DEBUG("Forcing AGP card to PCI mode\n");
1346 dev_priv->flags &= ~CHIP_IS_AGP; 1351 dev_priv->flags &= ~RADEON_IS_AGP;
1352 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
1353 && !init->is_pci) {
1354 DRM_DEBUG("Restoring AGP flag\n");
1355 dev_priv->flags |= RADEON_IS_AGP;
1347 } 1356 }
1348 1357
1349 if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) { 1358 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
1350 DRM_ERROR("PCI GART memory not allocated!\n"); 1359 DRM_ERROR("PCI GART memory not allocated!\n");
1351 radeon_do_cleanup_cp(dev); 1360 radeon_do_cleanup_cp(dev);
1352 return DRM_ERR(EINVAL); 1361 return DRM_ERR(EINVAL);
@@ -1489,7 +1498,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1489 init->sarea_priv_offset); 1498 init->sarea_priv_offset);
1490 1499
1491#if __OS_HAS_AGP 1500#if __OS_HAS_AGP
1492 if (dev_priv->flags & CHIP_IS_AGP) { 1501 if (dev_priv->flags & RADEON_IS_AGP) {
1493 drm_core_ioremap(dev_priv->cp_ring, dev); 1502 drm_core_ioremap(dev_priv->cp_ring, dev);
1494 drm_core_ioremap(dev_priv->ring_rptr, dev); 1503 drm_core_ioremap(dev_priv->ring_rptr, dev);
1495 drm_core_ioremap(dev->agp_buffer_map, dev); 1504 drm_core_ioremap(dev->agp_buffer_map, dev);
@@ -1548,7 +1557,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1548 * align it down. 1557 * align it down.
1549 */ 1558 */
1550#if __OS_HAS_AGP 1559#if __OS_HAS_AGP
1551 if (dev_priv->flags & CHIP_IS_AGP) { 1560 if (dev_priv->flags & RADEON_IS_AGP) {
1552 base = dev->agp->base; 1561 base = dev->agp->base;
1553 /* Check if valid */ 1562 /* Check if valid */
1554 if ((base + dev_priv->gart_size) > dev_priv->fb_location && 1563 if ((base + dev_priv->gart_size) > dev_priv->fb_location &&
@@ -1578,7 +1587,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1578 } 1587 }
1579 1588
1580#if __OS_HAS_AGP 1589#if __OS_HAS_AGP
1581 if (dev_priv->flags & CHIP_IS_AGP) 1590 if (dev_priv->flags & RADEON_IS_AGP)
1582 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1591 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1583 - dev->agp->base 1592 - dev->agp->base
1584 + dev_priv->gart_vm_start); 1593 + dev_priv->gart_vm_start);
@@ -1604,7 +1613,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1604 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1613 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1605 1614
1606#if __OS_HAS_AGP 1615#if __OS_HAS_AGP
1607 if (dev_priv->flags & CHIP_IS_AGP) { 1616 if (dev_priv->flags & RADEON_IS_AGP) {
1608 /* Turn off PCI GART */ 1617 /* Turn off PCI GART */
1609 radeon_set_pcigart(dev_priv, 0); 1618 radeon_set_pcigart(dev_priv, 0);
1610 } else 1619 } else
@@ -1624,7 +1633,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1624 dev_priv->gart_info.mapping.handle; 1633 dev_priv->gart_info.mapping.handle;
1625 1634
1626 dev_priv->gart_info.is_pcie = 1635 dev_priv->gart_info.is_pcie =
1627 !!(dev_priv->flags & CHIP_IS_PCIE); 1636 !!(dev_priv->flags & RADEON_IS_PCIE);
1628 dev_priv->gart_info.gart_table_location = 1637 dev_priv->gart_info.gart_table_location =
1629 DRM_ATI_GART_FB; 1638 DRM_ATI_GART_FB;
1630 1639
@@ -1636,7 +1645,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1636 DRM_ATI_GART_MAIN; 1645 DRM_ATI_GART_MAIN;
1637 dev_priv->gart_info.addr = NULL; 1646 dev_priv->gart_info.addr = NULL;
1638 dev_priv->gart_info.bus_addr = 0; 1647 dev_priv->gart_info.bus_addr = 0;
1639 if (dev_priv->flags & CHIP_IS_PCIE) { 1648 if (dev_priv->flags & RADEON_IS_PCIE) {
1640 DRM_ERROR 1649 DRM_ERROR
1641 ("Cannot use PCI Express without GART in FB memory\n"); 1650 ("Cannot use PCI Express without GART in FB memory\n");
1642 radeon_do_cleanup_cp(dev); 1651 radeon_do_cleanup_cp(dev);
@@ -1678,7 +1687,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1678 drm_irq_uninstall(dev); 1687 drm_irq_uninstall(dev);
1679 1688
1680#if __OS_HAS_AGP 1689#if __OS_HAS_AGP
1681 if (dev_priv->flags & CHIP_IS_AGP) { 1690 if (dev_priv->flags & RADEON_IS_AGP) {
1682 if (dev_priv->cp_ring != NULL) { 1691 if (dev_priv->cp_ring != NULL) {
1683 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1692 drm_core_ioremapfree(dev_priv->cp_ring, dev);
1684 dev_priv->cp_ring = NULL; 1693 dev_priv->cp_ring = NULL;
@@ -1733,7 +1742,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
1733 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1742 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1734 1743
1735#if __OS_HAS_AGP 1744#if __OS_HAS_AGP
1736 if (dev_priv->flags & CHIP_IS_AGP) { 1745 if (dev_priv->flags & RADEON_IS_AGP) {
1737 /* Turn off PCI GART */ 1746 /* Turn off PCI GART */
1738 radeon_set_pcigart(dev_priv, 0); 1747 radeon_set_pcigart(dev_priv, 0);
1739 } else 1748 } else
@@ -2177,13 +2186,15 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2177 dev->dev_private = (void *)dev_priv; 2186 dev->dev_private = (void *)dev_priv;
2178 dev_priv->flags = flags; 2187 dev_priv->flags = flags;
2179 2188
2180 switch (flags & CHIP_FAMILY_MASK) { 2189 switch (flags & RADEON_FAMILY_MASK) {
2181 case CHIP_R100: 2190 case CHIP_R100:
2182 case CHIP_RV200: 2191 case CHIP_RV200:
2183 case CHIP_R200: 2192 case CHIP_R200:
2184 case CHIP_R300: 2193 case CHIP_R300:
2194 case CHIP_R350:
2185 case CHIP_R420: 2195 case CHIP_R420:
2186 dev_priv->flags |= CHIP_HAS_HIERZ; 2196 case CHIP_RV410:
2197 dev_priv->flags |= RADEON_HAS_HIERZ;
2187 break; 2198 break;
2188 default: 2199 default:
2189 /* all other chips have no hierarchical z buffer */ 2200 /* all other chips have no hierarchical z buffer */
@@ -2191,13 +2202,14 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2191 } 2202 }
2192 2203
2193 if (drm_device_is_agp(dev)) 2204 if (drm_device_is_agp(dev))
2194 dev_priv->flags |= CHIP_IS_AGP; 2205 dev_priv->flags |= RADEON_IS_AGP;
2195 2206 else if (drm_device_is_pcie(dev))
2196 if (drm_device_is_pcie(dev)) 2207 dev_priv->flags |= RADEON_IS_PCIE;
2197 dev_priv->flags |= CHIP_IS_PCIE; 2208 else
2209 dev_priv->flags |= RADEON_IS_PCI;
2198 2210
2199 DRM_DEBUG("%s card detected\n", 2211 DRM_DEBUG("%s card detected\n",
2200 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI")))); 2212 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
2201 return ret; 2213 return ret;
2202} 2214}
2203 2215
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index eb985c2a31e9..2eb652ec6745 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -44,7 +44,7 @@ module_param_named(no_wb, radeon_no_wb, int, 0444);
44static int dri_library_name(struct drm_device *dev, char *buf) 44static int dri_library_name(struct drm_device *dev, char *buf)
45{ 45{
46 drm_radeon_private_t *dev_priv = dev->dev_private; 46 drm_radeon_private_t *dev_priv = dev->dev_private;
47 int family = dev_priv->flags & CHIP_FAMILY_MASK; 47 int family = dev_priv->flags & RADEON_FAMILY_MASK;
48 48
49 return snprintf(buf, PAGE_SIZE, "%s\n", 49 return snprintf(buf, PAGE_SIZE, "%s\n",
50 (family < CHIP_R200) ? "radeon" : 50 (family < CHIP_R200) ? "radeon" :
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index e5a256f5429c..f45cd7f147a5 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -133,15 +133,16 @@ enum radeon_cp_microcode_version {
133 * Chip flags 133 * Chip flags
134 */ 134 */
135enum radeon_chip_flags { 135enum radeon_chip_flags {
136 CHIP_FAMILY_MASK = 0x0000ffffUL, 136 RADEON_FAMILY_MASK = 0x0000ffffUL,
137 CHIP_FLAGS_MASK = 0xffff0000UL, 137 RADEON_FLAGS_MASK = 0xffff0000UL,
138 CHIP_IS_MOBILITY = 0x00010000UL, 138 RADEON_IS_MOBILITY = 0x00010000UL,
139 CHIP_IS_IGP = 0x00020000UL, 139 RADEON_IS_IGP = 0x00020000UL,
140 CHIP_SINGLE_CRTC = 0x00040000UL, 140 RADEON_SINGLE_CRTC = 0x00040000UL,
141 CHIP_IS_AGP = 0x00080000UL, 141 RADEON_IS_AGP = 0x00080000UL,
142 CHIP_HAS_HIERZ = 0x00100000UL, 142 RADEON_HAS_HIERZ = 0x00100000UL,
143 CHIP_IS_PCIE = 0x00200000UL, 143 RADEON_IS_PCIE = 0x00200000UL,
144 CHIP_NEW_MEMMAP = 0x00400000UL, 144 RADEON_NEW_MEMMAP = 0x00400000UL,
145 RADEON_IS_PCI = 0x00800000UL,
145}; 146};
146 147
147#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ 148#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
@@ -424,6 +425,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
424#define RADEON_RB3D_COLOROFFSET 0x1c40 425#define RADEON_RB3D_COLOROFFSET 0x1c40
425#define RADEON_RB3D_COLORPITCH 0x1c48 426#define RADEON_RB3D_COLORPITCH 0x1c48
426 427
428#define RADEON_SRC_X_Y 0x1590
429
427#define RADEON_DP_GUI_MASTER_CNTL 0x146c 430#define RADEON_DP_GUI_MASTER_CNTL 0x146c
428# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) 431# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
429# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) 432# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
@@ -441,6 +444,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
441# define RADEON_ROP3_S 0x00cc0000 444# define RADEON_ROP3_S 0x00cc0000
442# define RADEON_ROP3_P 0x00f00000 445# define RADEON_ROP3_P 0x00f00000
443#define RADEON_DP_WRITE_MASK 0x16cc 446#define RADEON_DP_WRITE_MASK 0x16cc
447#define RADEON_SRC_PITCH_OFFSET 0x1428
444#define RADEON_DST_PITCH_OFFSET 0x142c 448#define RADEON_DST_PITCH_OFFSET 0x142c
445#define RADEON_DST_PITCH_OFFSET_C 0x1c80 449#define RADEON_DST_PITCH_OFFSET_C 0x1c80
446# define RADEON_DST_TILE_LINEAR (0 << 30) 450# define RADEON_DST_TILE_LINEAR (0 << 30)
@@ -545,6 +549,11 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
545# define RADEON_RB3D_ZC_FREE (1 << 2) 549# define RADEON_RB3D_ZC_FREE (1 << 2)
546# define RADEON_RB3D_ZC_FLUSH_ALL 0x5 550# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
547# define RADEON_RB3D_ZC_BUSY (1 << 31) 551# define RADEON_RB3D_ZC_BUSY (1 << 31)
552#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
553# define RADEON_RB3D_DC_FLUSH (3 << 0)
554# define RADEON_RB3D_DC_FREE (3 << 2)
555# define RADEON_RB3D_DC_FLUSH_ALL 0xf
556# define RADEON_RB3D_DC_BUSY (1 << 31)
548#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c 557#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
549# define RADEON_Z_TEST_MASK (7 << 4) 558# define RADEON_Z_TEST_MASK (7 << 4)
550# define RADEON_Z_TEST_ALWAYS (7 << 4) 559# define RADEON_Z_TEST_ALWAYS (7 << 4)
@@ -681,6 +690,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
681#define RADEON_CP_RB_BASE 0x0700 690#define RADEON_CP_RB_BASE 0x0700
682#define RADEON_CP_RB_CNTL 0x0704 691#define RADEON_CP_RB_CNTL 0x0704
683# define RADEON_BUF_SWAP_32BIT (2 << 16) 692# define RADEON_BUF_SWAP_32BIT (2 << 16)
693# define RADEON_RB_NO_UPDATE (1 << 27)
684#define RADEON_CP_RB_RPTR_ADDR 0x070c 694#define RADEON_CP_RB_RPTR_ADDR 0x070c
685#define RADEON_CP_RB_RPTR 0x0710 695#define RADEON_CP_RB_RPTR 0x0710
686#define RADEON_CP_RB_WPTR 0x0714 696#define RADEON_CP_RB_WPTR 0x0714
@@ -986,13 +996,13 @@ do { \
986} while (0) 996} while (0)
987 997
988#define RADEON_FLUSH_CACHE() do { \ 998#define RADEON_FLUSH_CACHE() do { \
989 OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \ 999 OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
990 OUT_RING( RADEON_RB2D_DC_FLUSH ); \ 1000 OUT_RING( RADEON_RB3D_DC_FLUSH ); \
991} while (0) 1001} while (0)
992 1002
993#define RADEON_PURGE_CACHE() do { \ 1003#define RADEON_PURGE_CACHE() do { \
994 OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \ 1004 OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
995 OUT_RING( RADEON_RB2D_DC_FLUSH_ALL ); \ 1005 OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \
996} while (0) 1006} while (0)
997 1007
998#define RADEON_FLUSH_ZCACHE() do { \ 1008#define RADEON_FLUSH_ZCACHE() do { \
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 39a7f685e3fd..feac5f005d47 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -42,7 +42,11 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
42 drm_file_t * filp_priv, 42 drm_file_t * filp_priv,
43 u32 *offset) 43 u32 *offset)
44{ 44{
45 u32 off = *offset; 45 u64 off = *offset;
46 u32 fb_start = dev_priv->fb_location;
47 u32 fb_end = fb_start + dev_priv->fb_size - 1;
48 u32 gart_start = dev_priv->gart_vm_start;
49 u32 gart_end = gart_start + dev_priv->gart_size - 1;
46 struct drm_radeon_driver_file_fields *radeon_priv; 50 struct drm_radeon_driver_file_fields *radeon_priv;
47 51
48 /* Hrm ... the story of the offset ... So this function converts 52 /* Hrm ... the story of the offset ... So this function converts
@@ -62,10 +66,8 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
62 /* First, the best case, the offset already lands in either the 66 /* First, the best case, the offset already lands in either the
63 * framebuffer or the GART mapped space 67 * framebuffer or the GART mapped space
64 */ 68 */
65 if ((off >= dev_priv->fb_location && 69 if ((off >= fb_start && off <= fb_end) ||
66 off < (dev_priv->fb_location + dev_priv->fb_size)) || 70 (off >= gart_start && off <= gart_end))
67 (off >= dev_priv->gart_vm_start &&
68 off < (dev_priv->gart_vm_start + dev_priv->gart_size)))
69 return 0; 71 return 0;
70 72
71 /* Ok, that didn't happen... now check if we have a zero based 73 /* Ok, that didn't happen... now check if we have a zero based
@@ -78,16 +80,13 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
78 } 80 }
79 81
80 /* Finally, assume we aimed at a GART offset if beyond the fb */ 82 /* Finally, assume we aimed at a GART offset if beyond the fb */
81 if (off > (dev_priv->fb_location + dev_priv->fb_size)) 83 if (off > fb_end)
82 off = off - (dev_priv->fb_location + dev_priv->fb_size) + 84 off = off - fb_end - 1 + gart_start;
83 dev_priv->gart_vm_start;
84 85
85 /* Now recheck and fail if out of bounds */ 86 /* Now recheck and fail if out of bounds */
86 if ((off >= dev_priv->fb_location && 87 if ((off >= fb_start && off <= fb_end) ||
87 off < (dev_priv->fb_location + dev_priv->fb_size)) || 88 (off >= gart_start && off <= gart_end)) {
88 (off >= dev_priv->gart_vm_start && 89 DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
89 off < (dev_priv->gart_vm_start + dev_priv->gart_size))) {
90 DRM_DEBUG("offset fixed up to 0x%x\n", off);
91 *offset = off; 90 *offset = off;
92 return 0; 91 return 0;
93 } 92 }
@@ -869,7 +868,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
869 */ 868 */
870 dev_priv->sarea_priv->ctx_owner = 0; 869 dev_priv->sarea_priv->ctx_owner = 0;
871 870
872 if ((dev_priv->flags & CHIP_HAS_HIERZ) 871 if ((dev_priv->flags & RADEON_HAS_HIERZ)
873 && (flags & RADEON_USE_HIERZ)) { 872 && (flags & RADEON_USE_HIERZ)) {
874 /* FIXME : reverse engineer that for Rx00 cards */ 873 /* FIXME : reverse engineer that for Rx00 cards */
875 /* FIXME : the mask supposedly contains low-res z values. So can't set 874 /* FIXME : the mask supposedly contains low-res z values. So can't set
@@ -914,7 +913,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
914 for (i = 0; i < nbox; i++) { 913 for (i = 0; i < nbox; i++) {
915 int tileoffset, nrtilesx, nrtilesy, j; 914 int tileoffset, nrtilesx, nrtilesy, j;
916 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ 915 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
917 if ((dev_priv->flags & CHIP_HAS_HIERZ) 916 if ((dev_priv->flags & RADEON_HAS_HIERZ)
918 && !(dev_priv->microcode_version == UCODE_R200)) { 917 && !(dev_priv->microcode_version == UCODE_R200)) {
919 /* FIXME : figure this out for r200 (when hierz is enabled). Or 918 /* FIXME : figure this out for r200 (when hierz is enabled). Or
920 maybe r200 actually doesn't need to put the low-res z value into 919 maybe r200 actually doesn't need to put the low-res z value into
@@ -998,7 +997,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
998 } 997 }
999 998
1000 /* TODO don't always clear all hi-level z tiles */ 999 /* TODO don't always clear all hi-level z tiles */
1001 if ((dev_priv->flags & CHIP_HAS_HIERZ) 1000 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1002 && (dev_priv->microcode_version == UCODE_R200) 1001 && (dev_priv->microcode_version == UCODE_R200)
1003 && (flags & RADEON_USE_HIERZ)) 1002 && (flags & RADEON_USE_HIERZ))
1004 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ 1003 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
@@ -1270,9 +1269,9 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
1270 1269
1271 DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h); 1270 DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
1272 1271
1273 BEGIN_RING(7); 1272 BEGIN_RING(9);
1274 1273
1275 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); 1274 OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
1276 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 1275 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1277 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 1276 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1278 RADEON_GMC_BRUSH_NONE | 1277 RADEON_GMC_BRUSH_NONE |
@@ -1284,6 +1283,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
1284 1283
1285 /* Make this work even if front & back are flipped: 1284 /* Make this work even if front & back are flipped:
1286 */ 1285 */
1286 OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
1287 if (dev_priv->current_page == 0) { 1287 if (dev_priv->current_page == 0) {
1288 OUT_RING(dev_priv->back_pitch_offset); 1288 OUT_RING(dev_priv->back_pitch_offset);
1289 OUT_RING(dev_priv->front_pitch_offset); 1289 OUT_RING(dev_priv->front_pitch_offset);
@@ -1292,6 +1292,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
1292 OUT_RING(dev_priv->back_pitch_offset); 1292 OUT_RING(dev_priv->back_pitch_offset);
1293 } 1293 }
1294 1294
1295 OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
1295 OUT_RING((x << 16) | y); 1296 OUT_RING((x << 16) | y);
1296 OUT_RING((x << 16) | y); 1297 OUT_RING((x << 16) | y);
1297 OUT_RING((w << 16) | h); 1298 OUT_RING((w << 16) | h);
@@ -2987,16 +2988,21 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
2987 case RADEON_PARAM_GART_TEX_HANDLE: 2988 case RADEON_PARAM_GART_TEX_HANDLE:
2988 value = dev_priv->gart_textures_offset; 2989 value = dev_priv->gart_textures_offset;
2989 break; 2990 break;
2990 2991 case RADEON_PARAM_SCRATCH_OFFSET:
2992 if (!dev_priv->writeback_works)
2993 return DRM_ERR(EINVAL);
2994 value = RADEON_SCRATCH_REG_OFFSET;
2995 break;
2991 case RADEON_PARAM_CARD_TYPE: 2996 case RADEON_PARAM_CARD_TYPE:
2992 if (dev_priv->flags & CHIP_IS_PCIE) 2997 if (dev_priv->flags & RADEON_IS_PCIE)
2993 value = RADEON_CARD_PCIE; 2998 value = RADEON_CARD_PCIE;
2994 else if (dev_priv->flags & CHIP_IS_AGP) 2999 else if (dev_priv->flags & RADEON_IS_AGP)
2995 value = RADEON_CARD_AGP; 3000 value = RADEON_CARD_AGP;
2996 else 3001 else
2997 value = RADEON_CARD_PCI; 3002 value = RADEON_CARD_PCI;
2998 break; 3003 break;
2999 default: 3004 default:
3005 DRM_DEBUG("Invalid parameter %d\n", param.param);
3000 return DRM_ERR(EINVAL); 3006 return DRM_ERR(EINVAL);
3001 } 3007 }
3002 3008
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 5e9dc86f2956..3d5b3218b6ff 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -35,11 +35,44 @@ static struct pci_device_id pciidlist[] = {
35 sisdrv_PCI_IDS 35 sisdrv_PCI_IDS
36}; 36};
37 37
38static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
39{
40 drm_sis_private_t *dev_priv;
41 int ret;
42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
44 if (dev_priv == NULL)
45 return DRM_ERR(ENOMEM);
46
47 dev->dev_private = (void *)dev_priv;
48 dev_priv->chipset = chipset;
49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
50 if (ret) {
51 drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
52 }
53
54 return ret;
55}
56
57static int sis_driver_unload(drm_device_t *dev)
58{
59 drm_sis_private_t *dev_priv = dev->dev_private;
60
61 drm_sman_takedown(&dev_priv->sman);
62 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
63
64 return 0;
65}
66
38static struct drm_driver driver = { 67static struct drm_driver driver = {
39 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, 68 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
40 .context_ctor = sis_init_context, 69 .load = sis_driver_load,
41 .context_dtor = sis_final_context, 70 .unload = sis_driver_unload,
42 .reclaim_buffers = drm_core_reclaim_buffers, 71 .context_dtor = NULL,
72 .dma_quiescent = sis_idle,
73 .reclaim_buffers = NULL,
74 .reclaim_buffers_locked = sis_reclaim_buffers_locked,
75 .lastclose = sis_lastclose,
43 .get_map_ofs = drm_core_get_map_ofs, 76 .get_map_ofs = drm_core_get_map_ofs,
44 .get_reg_ofs = drm_core_get_reg_ofs, 77 .get_reg_ofs = drm_core_get_reg_ofs,
45 .ioctls = sis_ioctls, 78 .ioctls = sis_ioctls,
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index e218e5269503..2b8d6f6ed7c0 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -31,23 +31,39 @@
31/* General customization: 31/* General customization:
32 */ 32 */
33 33
34#define DRIVER_AUTHOR "SIS" 34#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
35#define DRIVER_NAME "sis" 35#define DRIVER_NAME "sis"
36#define DRIVER_DESC "SIS 300/630/540" 36#define DRIVER_DESC "SIS 300/630/540"
37#define DRIVER_DATE "20030826" 37#define DRIVER_DATE "20060704"
38#define DRIVER_MAJOR 1 38#define DRIVER_MAJOR 1
39#define DRIVER_MINOR 1 39#define DRIVER_MINOR 2
40#define DRIVER_PATCHLEVEL 0 40#define DRIVER_PATCHLEVEL 1
41 41
42#include "sis_ds.h" 42enum sis_family {
43 SIS_OTHER = 0,
44 SIS_CHIP_315 = 1,
45};
46
47#include "drm_sman.h"
48
49#define SIS_BASE (dev_priv->mmio)
50#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
51#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
43 52
44typedef struct drm_sis_private { 53typedef struct drm_sis_private {
45 memHeap_t *AGPHeap; 54 drm_local_map_t *mmio;
46 memHeap_t *FBHeap; 55 unsigned int idle_fault;
56 drm_sman_t sman;
57 unsigned int chipset;
58 int vram_initialized;
59 int agp_initialized;
60 unsigned long vram_offset;
61 unsigned long agp_offset;
47} drm_sis_private_t; 62} drm_sis_private_t;
48 63
49extern int sis_init_context(drm_device_t * dev, int context); 64extern int sis_idle(drm_device_t *dev);
50extern int sis_final_context(drm_device_t * dev, int context); 65extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
66extern void sis_lastclose(drm_device_t *dev);
51 67
52extern drm_ioctl_desc_t sis_ioctls[]; 68extern drm_ioctl_desc_t sis_ioctls[];
53extern int sis_max_ioctl; 69extern int sis_max_ioctl;
diff --git a/drivers/char/drm/sis_ds.c b/drivers/char/drm/sis_ds.c
deleted file mode 100644
index 2e485d482943..000000000000
--- a/drivers/char/drm/sis_ds.c
+++ /dev/null
@@ -1,299 +0,0 @@
1/* sis_ds.c -- Private header for Direct Rendering Manager -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
3 *
4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
5 * All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Sung-Ching Lin <sclin@sis.com.tw>
28 *
29 */
30
31#include "drmP.h"
32#include "drm.h"
33#include "sis_ds.h"
34
35/* Set Data Structure, not check repeated value
36 * temporarily used
37 */
38
39set_t *setInit(void)
40{
41 int i;
42 set_t *set;
43
44 set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
45 if (set != NULL) {
46 for (i = 0; i < SET_SIZE; i++) {
47 set->list[i].free_next = i + 1;
48 set->list[i].alloc_next = -1;
49 }
50 set->list[SET_SIZE - 1].free_next = -1;
51 set->free = 0;
52 set->alloc = -1;
53 set->trace = -1;
54 }
55 return set;
56}
57
58int setAdd(set_t * set, ITEM_TYPE item)
59{
60 int free = set->free;
61
62 if (free != -1) {
63 set->list[free].val = item;
64 set->free = set->list[free].free_next;
65 } else {
66 return 0;
67 }
68
69 set->list[free].alloc_next = set->alloc;
70 set->alloc = free;
71 set->list[free].free_next = -1;
72
73 return 1;
74}
75
76int setDel(set_t * set, ITEM_TYPE item)
77{
78 int alloc = set->alloc;
79 int prev = -1;
80
81 while (alloc != -1) {
82 if (set->list[alloc].val == item) {
83 if (prev != -1)
84 set->list[prev].alloc_next =
85 set->list[alloc].alloc_next;
86 else
87 set->alloc = set->list[alloc].alloc_next;
88 break;
89 }
90 prev = alloc;
91 alloc = set->list[alloc].alloc_next;
92 }
93
94 if (alloc == -1)
95 return 0;
96
97 set->list[alloc].free_next = set->free;
98 set->free = alloc;
99 set->list[alloc].alloc_next = -1;
100
101 return 1;
102}
103
104/* setFirst -> setAdd -> setNext is wrong */
105
106int setFirst(set_t * set, ITEM_TYPE * item)
107{
108 if (set->alloc == -1)
109 return 0;
110
111 *item = set->list[set->alloc].val;
112 set->trace = set->list[set->alloc].alloc_next;
113
114 return 1;
115}
116
117int setNext(set_t * set, ITEM_TYPE * item)
118{
119 if (set->trace == -1)
120 return 0;
121
122 *item = set->list[set->trace].val;
123 set->trace = set->list[set->trace].alloc_next;
124
125 return 1;
126}
127
128int setDestroy(set_t * set)
129{
130 drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
131
132 return 1;
133}
134
135/*
136 * GLX Hardware Device Driver common code
137 * Copyright (C) 1999 Wittawat Yamwong
138 *
139 * Permission is hereby granted, free of charge, to any person obtaining a
140 * copy of this software and associated documentation files (the "Software"),
141 * to deal in the Software without restriction, including without limitation
142 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
143 * and/or sell copies of the Software, and to permit persons to whom the
144 * Software is furnished to do so, subject to the following conditions:
145 *
146 * The above copyright notice and this permission notice shall be included
147 * in all copies or substantial portions of the Software.
148 *
149 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
150 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
151 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
152 * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
153 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
154 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
155 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
156 *
157 */
158
159#define ISFREE(bptr) ((bptr)->free)
160
161memHeap_t *mmInit(int ofs, int size)
162{
163 PMemBlock blocks;
164
165 if (size <= 0)
166 return NULL;
167
168 blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
169 if (blocks != NULL) {
170 blocks->ofs = ofs;
171 blocks->size = size;
172 blocks->free = 1;
173 return (memHeap_t *) blocks;
174 } else
175 return NULL;
176}
177
178/* Checks if a pointer 'b' is part of the heap 'heap' */
179int mmBlockInHeap(memHeap_t * heap, PMemBlock b)
180{
181 TMemBlock *p;
182
183 if (heap == NULL || b == NULL)
184 return 0;
185
186 p = heap;
187 while (p != NULL && p != b) {
188 p = p->next;
189 }
190 if (p == b)
191 return 1;
192 else
193 return 0;
194}
195
196static TMemBlock *SliceBlock(TMemBlock * p,
197 int startofs, int size,
198 int reserved, int alignment)
199{
200 TMemBlock *newblock;
201
202 /* break left */
203 if (startofs > p->ofs) {
204 newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
205 DRM_MEM_DRIVER);
206 newblock->ofs = startofs;
207 newblock->size = p->size - (startofs - p->ofs);
208 newblock->free = 1;
209 newblock->next = p->next;
210 p->size -= newblock->size;
211 p->next = newblock;
212 p = newblock;
213 }
214
215 /* break right */
216 if (size < p->size) {
217 newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
218 DRM_MEM_DRIVER);
219 newblock->ofs = startofs + size;
220 newblock->size = p->size - size;
221 newblock->free = 1;
222 newblock->next = p->next;
223 p->size = size;
224 p->next = newblock;
225 }
226
227 /* p = middle block */
228 p->align = alignment;
229 p->free = 0;
230 p->reserved = reserved;
231 return p;
232}
233
234PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch)
235{
236 int mask, startofs, endofs;
237 TMemBlock *p;
238
239 if (heap == NULL || align2 < 0 || size <= 0)
240 return NULL;
241
242 mask = (1 << align2) - 1;
243 startofs = 0;
244 p = (TMemBlock *) heap;
245 while (p != NULL) {
246 if (ISFREE(p)) {
247 startofs = (p->ofs + mask) & ~mask;
248 if (startofs < startSearch) {
249 startofs = startSearch;
250 }
251 endofs = startofs + size;
252 if (endofs <= (p->ofs + p->size))
253 break;
254 }
255 p = p->next;
256 }
257 if (p == NULL)
258 return NULL;
259 p = SliceBlock(p, startofs, size, 0, mask + 1);
260 p->heap = heap;
261 return p;
262}
263
264static __inline__ int Join2Blocks(TMemBlock * p)
265{
266 if (p->free && p->next && p->next->free) {
267 TMemBlock *q = p->next;
268 p->size += q->size;
269 p->next = q->next;
270 drm_free(q, sizeof(TMemBlock), DRM_MEM_DRIVER);
271 return 1;
272 }
273 return 0;
274}
275
276int mmFreeMem(PMemBlock b)
277{
278 TMemBlock *p, *prev;
279
280 if (b == NULL)
281 return 0;
282 if (b->heap == NULL)
283 return -1;
284
285 p = b->heap;
286 prev = NULL;
287 while (p != NULL && p != b) {
288 prev = p;
289 p = p->next;
290 }
291 if (p == NULL || p->free || p->reserved)
292 return -1;
293
294 p->free = 1;
295 Join2Blocks(p);
296 if (prev)
297 Join2Blocks(prev);
298 return 0;
299}
diff --git a/drivers/char/drm/sis_ds.h b/drivers/char/drm/sis_ds.h
deleted file mode 100644
index 94f2b4728b63..000000000000
--- a/drivers/char/drm/sis_ds.h
+++ /dev/null
@@ -1,146 +0,0 @@
1/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
3 */
4/*
5 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Sung-Ching Lin <sclin@sis.com.tw>
29 *
30 */
31
32#ifndef __SIS_DS_H__
33#define __SIS_DS_H__
34
35/* Set Data Structure */
36
37#define SET_SIZE 5000
38
39typedef unsigned long ITEM_TYPE;
40
41typedef struct {
42 ITEM_TYPE val;
43 int alloc_next, free_next;
44} list_item_t;
45
46typedef struct {
47 int alloc;
48 int free;
49 int trace;
50 list_item_t list[SET_SIZE];
51} set_t;
52
53set_t *setInit(void);
54int setAdd(set_t * set, ITEM_TYPE item);
55int setDel(set_t * set, ITEM_TYPE item);
56int setFirst(set_t * set, ITEM_TYPE * item);
57int setNext(set_t * set, ITEM_TYPE * item);
58int setDestroy(set_t * set);
59
60/*
61 * GLX Hardware Device Driver common code
62 * Copyright (C) 1999 Wittawat Yamwong
63 *
64 * Permission is hereby granted, free of charge, to any person obtaining a
65 * copy of this software and associated documentation files (the "Software"),
66 * to deal in the Software without restriction, including without limitation
67 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
68 * and/or sell copies of the Software, and to permit persons to whom the
69 * Software is furnished to do so, subject to the following conditions:
70 *
71 * The above copyright notice and this permission notice shall be included
72 * in all copies or substantial portions of the Software.
73 *
74 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
75 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
76 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
77 * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
78 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
79 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
80 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
81 *
82 */
83
84struct mem_block_t {
85 struct mem_block_t *next;
86 struct mem_block_t *heap;
87 int ofs, size;
88 int align;
89 unsigned int free:1;
90 unsigned int reserved:1;
91};
92typedef struct mem_block_t TMemBlock;
93typedef struct mem_block_t *PMemBlock;
94
95/* a heap is just the first block in a chain */
96typedef struct mem_block_t memHeap_t;
97
98static __inline__ int mmBlockSize(PMemBlock b)
99{
100 return b->size;
101}
102
103static __inline__ int mmOffset(PMemBlock b)
104{
105 return b->ofs;
106}
107
108static __inline__ void mmMarkReserved(PMemBlock b)
109{
110 b->reserved = 1;
111}
112
113/*
114 * input: total size in bytes
115 * return: a heap pointer if OK, NULL if error
116 */
117memHeap_t *mmInit(int ofs, int size);
118
119/*
120 * Allocate 'size' bytes with 2^align2 bytes alignment,
121 * restrict the search to free memory after 'startSearch'
122 * depth and back buffers should be in different 4mb banks
123 * to get better page hits if possible
124 * input: size = size of block
125 * align2 = 2^align2 bytes alignment
126 * startSearch = linear offset from start of heap to begin search
127 * return: pointer to the allocated block, 0 if error
128 */
129PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch);
130
131/*
132 * Returns 1 if the block 'b' is part of the heap 'heap'
133 */
134int mmBlockInHeap(PMemBlock heap, PMemBlock b);
135
136/*
137 * Free block starts at offset
138 * input: pointer to a block
139 * return: 0 if OK, -1 if error
140 */
141int mmFreeMem(PMemBlock b);
142
143/* For debuging purpose. */
144void mmDumpMemInfo(memHeap_t * mmInit);
145
146#endif /* __SIS_DS_H__ */
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 5e9936bc307f..d26f5dbb7853 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -1,414 +1,348 @@
1/* sis_mm.c -- Private header for Direct Rendering Manager -*- linux-c -*- 1/**************************************************************************
2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
3 * 2 *
4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan. 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
5 * All rights reserved. 4 * All Rights Reserved.
6 * 5 *
7 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"), 7 * copy of this software and associated documentation files (the
9 * to deal in the Software without restriction, including without limitation 8 * "Software"), to deal in the Software without restriction, including
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * without limitation the rights to use, copy, modify, merge, publish,
11 * and/or sell copies of the Software, and to permit persons to whom the 10 * distribute, sub license, and/or sell copies of the Software, and to
12 * Software is furnished to do so, subject to the following conditions: 11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 * 13 *
14 * The above copyright notice and this permission notice (including the next 14 * The above copyright notice and this permission notice (including the
15 * paragraph) shall be included in all copies or substantial portions of the 15 * next paragraph) shall be included in all copies or substantial portions
16 * Software. 16 * of the Software.
17 * 17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * DEALINGS IN THE SOFTWARE. 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * 25 *
26 * Authors:
27 * Sung-Ching Lin <sclin@sis.com.tw>
28 * 26 *
27 **************************************************************************/
28
29/*
30 * Authors:
31 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29 */ 32 */
30 33
31#include "drmP.h" 34#include "drmP.h"
32#include "sis_drm.h" 35#include "sis_drm.h"
33#include "sis_drv.h" 36#include "sis_drv.h"
34#include "sis_ds.h" 37
35#if defined(__linux__) && defined(CONFIG_FB_SIS)
36#include <video/sisfb.h> 38#include <video/sisfb.h>
37#endif
38 39
39#define MAX_CONTEXT 100
40#define VIDEO_TYPE 0 40#define VIDEO_TYPE 0
41#define AGP_TYPE 1 41#define AGP_TYPE 1
42 42
43typedef struct {
44 int used;
45 int context;
46 set_t *sets[2]; /* 0 for video, 1 for AGP */
47} sis_context_t;
48 43
49static sis_context_t global_ppriv[MAX_CONTEXT]; 44#if defined(CONFIG_FB_SIS)
45/* fb management via fb device */
50 46
51static int add_alloc_set(int context, int type, unsigned int val) 47#define SIS_MM_ALIGN_SHIFT 0
52{ 48#define SIS_MM_ALIGN_MASK 0
53 int i, retval = 0;
54 49
55 for (i = 0; i < MAX_CONTEXT; i++) { 50static void *sis_sman_mm_allocate(void *private, unsigned long size,
56 if (global_ppriv[i].used && global_ppriv[i].context == context) { 51 unsigned alignment)
57 retval = setAdd(global_ppriv[i].sets[type], val);
58 break;
59 }
60 }
61 return retval;
62}
63
64static int del_alloc_set(int context, int type, unsigned int val)
65{ 52{
66 int i, retval = 0; 53 struct sis_memreq req;
67 54
68 for (i = 0; i < MAX_CONTEXT; i++) { 55 req.size = size;
69 if (global_ppriv[i].used && global_ppriv[i].context == context) { 56 sis_malloc(&req);
70 retval = setDel(global_ppriv[i].sets[type], val); 57 if (req.size == 0)
71 break; 58 return NULL;
72 } 59 else
73 } 60 return (void *)~req.offset;
74 return retval;
75} 61}
76 62
77/* fb management via fb device */ 63static void sis_sman_mm_free(void *private, void *ref)
78#if defined(__linux__) && defined(CONFIG_FB_SIS)
79
80static int sis_fb_init(DRM_IOCTL_ARGS)
81{ 64{
82 return 0; 65 sis_free(~((unsigned long)ref));
83} 66}
84 67
85static int sis_fb_alloc(DRM_IOCTL_ARGS) 68static void sis_sman_mm_destroy(void *private)
86{ 69{
87 drm_sis_mem_t fb; 70 ;
88 struct sis_memreq req;
89 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
90 int retval = 0;
91
92 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
93
94 req.size = fb.size;
95 sis_malloc(&req);
96 if (req.offset) {
97 /* TODO */
98 fb.offset = req.offset;
99 fb.free = req.offset;
100 if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
101 DRM_DEBUG("adding to allocation set fails\n");
102 sis_free(req.offset);
103 retval = DRM_ERR(EINVAL);
104 }
105 } else {
106 fb.offset = 0;
107 fb.size = 0;
108 fb.free = 0;
109 }
110
111 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
112
113 DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset);
114
115 return retval;
116} 71}
117 72
118static int sis_fb_free(DRM_IOCTL_ARGS) 73static unsigned long sis_sman_mm_offset(void *private, void *ref)
119{ 74{
120 drm_sis_mem_t fb; 75 return ~((unsigned long)ref);
121 int retval = 0; 76}
122
123 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
124
125 if (!fb.free)
126 return DRM_ERR(EINVAL);
127 77
128 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) 78#else /* CONFIG_FB_SIS */
129 retval = DRM_ERR(EINVAL);
130 sis_free(fb.free);
131 79
132 DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free); 80#define SIS_MM_ALIGN_SHIFT 4
81#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
133 82
134 return retval; 83#endif /* CONFIG_FB_SIS */
135}
136 84
137#else
138
139/* Called by the X Server to initialize the FB heap. Allocations will fail
140 * unless this is called. Offset is the beginning of the heap from the
141 * framebuffer offset (MaxXFBMem in XFree86).
142 *
143 * Memory layout according to Thomas Winischofer:
144 * |------------------|DDDDDDDDDDDDDDDDDDDDDDDDDDDDD|HHHH|CCCCCCCCCCC|
145 *
146 * X driver/sisfb HW- Command-
147 * framebuffer memory DRI heap Cursor queue
148 */
149static int sis_fb_init(DRM_IOCTL_ARGS) 85static int sis_fb_init(DRM_IOCTL_ARGS)
150{ 86{
151 DRM_DEVICE; 87 DRM_DEVICE;
152 drm_sis_private_t *dev_priv = dev->dev_private; 88 drm_sis_private_t *dev_priv = dev->dev_private;
153 drm_sis_fb_t fb; 89 drm_sis_fb_t fb;
90 int ret;
154 91
155 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); 92 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
156 93
157 if (dev_priv == NULL) { 94 mutex_lock(&dev->struct_mutex);
158 dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t), 95#if defined(CONFIG_FB_SIS)
159 DRM_MEM_DRIVER); 96 {
160 dev_priv = dev->dev_private; 97 drm_sman_mm_t sman_mm;
161 if (dev_priv == NULL) 98 sman_mm.private = (void *)0xFFFFFFFF;
162 return ENOMEM; 99 sman_mm.allocate = sis_sman_mm_allocate;
100 sman_mm.free = sis_sman_mm_free;
101 sman_mm.destroy = sis_sman_mm_destroy;
102 sman_mm.offset = sis_sman_mm_offset;
103 ret =
104 drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
163 } 105 }
106#else
107 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
108 fb.size >> SIS_MM_ALIGN_SHIFT);
109#endif
164 110
165 if (dev_priv->FBHeap != NULL) 111 if (ret) {
166 return DRM_ERR(EINVAL); 112 DRM_ERROR("VRAM memory manager initialisation error\n");
113 mutex_unlock(&dev->struct_mutex);
114 return ret;
115 }
167 116
168 dev_priv->FBHeap = mmInit(fb.offset, fb.size); 117 dev_priv->vram_initialized = 1;
118 dev_priv->vram_offset = fb.offset;
169 119
120 mutex_unlock(&dev->struct_mutex);
170 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); 121 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
171 122
172 return 0; 123 return 0;
173} 124}
174 125
175static int sis_fb_alloc(DRM_IOCTL_ARGS) 126static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv,
127 unsigned long data, int pool)
176{ 128{
177 DRM_DEVICE;
178 drm_sis_private_t *dev_priv = dev->dev_private; 129 drm_sis_private_t *dev_priv = dev->dev_private;
179 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data; 130 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data;
180 drm_sis_mem_t fb; 131 drm_sis_mem_t mem;
181 PMemBlock block;
182 int retval = 0; 132 int retval = 0;
133 drm_memblock_item_t *item;
134
135 DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
183 136
184 if (dev_priv == NULL || dev_priv->FBHeap == NULL) 137 mutex_lock(&dev->struct_mutex);
138
139 if (0 == ((pool == 0) ? dev_priv->vram_initialized :
140 dev_priv->agp_initialized)) {
141 DRM_ERROR
142 ("Attempt to allocate from uninitialized memory manager.\n");
185 return DRM_ERR(EINVAL); 143 return DRM_ERR(EINVAL);
144 }
186 145
187 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); 146 mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
188 147 item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0,
189 block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0); 148 (unsigned long)priv);
190 if (block) { 149
191 /* TODO */ 150 mutex_unlock(&dev->struct_mutex);
192 fb.offset = block->ofs; 151 if (item) {
193 fb.free = (unsigned long)block; 152 mem.offset = ((pool == 0) ?
194 if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { 153 dev_priv->vram_offset : dev_priv->agp_offset) +
195 DRM_DEBUG("adding to allocation set fails\n"); 154 (item->mm->
196 mmFreeMem((PMemBlock) fb.free); 155 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
197 retval = DRM_ERR(EINVAL); 156 mem.free = item->user_hash.key;
198 } 157 mem.size = mem.size << SIS_MM_ALIGN_SHIFT;
199 } else { 158 } else {
200 fb.offset = 0; 159 mem.offset = 0;
201 fb.size = 0; 160 mem.size = 0;
202 fb.free = 0; 161 mem.free = 0;
162 retval = DRM_ERR(ENOMEM);
203 } 163 }
204 164
205 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); 165 DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem));
206 166
207 DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset); 167 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size,
168 mem.offset);
208 169
209 return retval; 170 return retval;
210} 171}
211 172
212static int sis_fb_free(DRM_IOCTL_ARGS) 173static int sis_drm_free(DRM_IOCTL_ARGS)
213{ 174{
214 DRM_DEVICE; 175 DRM_DEVICE;
215 drm_sis_private_t *dev_priv = dev->dev_private; 176 drm_sis_private_t *dev_priv = dev->dev_private;
216 drm_sis_mem_t fb; 177 drm_sis_mem_t mem;
178 int ret;
217 179
218 if (dev_priv == NULL || dev_priv->FBHeap == NULL) 180 DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data,
219 return DRM_ERR(EINVAL); 181 sizeof(mem));
220 182
221 DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb)); 183 mutex_lock(&dev->struct_mutex);
184 ret = drm_sman_free_key(&dev_priv->sman, mem.free);
185 mutex_unlock(&dev->struct_mutex);
186 DRM_DEBUG("free = 0x%lx\n", mem.free);
222 187
223 if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free)) 188 return ret;
224 return DRM_ERR(EINVAL);
225
226 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
227 return DRM_ERR(EINVAL);
228 mmFreeMem((PMemBlock) fb.free);
229
230 DRM_DEBUG("free fb, free = 0x%lx\n", fb.free);
231
232 return 0;
233} 189}
234 190
235#endif 191static int sis_fb_alloc(DRM_IOCTL_ARGS)
236 192{
237/* agp memory management */ 193 DRM_DEVICE;
194 return sis_drm_alloc(dev, priv, data, VIDEO_TYPE);
195}
238 196
239static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) 197static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
240{ 198{
241 DRM_DEVICE; 199 DRM_DEVICE;
242 drm_sis_private_t *dev_priv = dev->dev_private; 200 drm_sis_private_t *dev_priv = dev->dev_private;
243 drm_sis_agp_t agp; 201 drm_sis_agp_t agp;
244 202 int ret;
245 if (dev_priv == NULL) { 203 dev_priv = dev->dev_private;
246 dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
247 DRM_MEM_DRIVER);
248 dev_priv = dev->dev_private;
249 if (dev_priv == NULL)
250 return ENOMEM;
251 }
252
253 if (dev_priv->AGPHeap != NULL)
254 return DRM_ERR(EINVAL);
255 204
256 DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, 205 DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
257 sizeof(agp)); 206 sizeof(agp));
207 mutex_lock(&dev->struct_mutex);
208 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
209 agp.size >> SIS_MM_ALIGN_SHIFT);
210
211 if (ret) {
212 DRM_ERROR("AGP memory manager initialisation error\n");
213 mutex_unlock(&dev->struct_mutex);
214 return ret;
215 }
258 216
259 dev_priv->AGPHeap = mmInit(agp.offset, agp.size); 217 dev_priv->agp_initialized = 1;
218 dev_priv->agp_offset = agp.offset;
219 mutex_unlock(&dev->struct_mutex);
260 220
261 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); 221 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
262
263 return 0; 222 return 0;
264} 223}
265 224
266static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) 225static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
267{ 226{
268 DRM_DEVICE; 227 DRM_DEVICE;
269 drm_sis_private_t *dev_priv = dev->dev_private;
270 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
271 drm_sis_mem_t agp;
272 PMemBlock block;
273 int retval = 0;
274 228
275 if (dev_priv == NULL || dev_priv->AGPHeap == NULL) 229 return sis_drm_alloc(dev, priv, data, AGP_TYPE);
276 return DRM_ERR(EINVAL); 230}
277 231
278 DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp)); 232static drm_local_map_t *sis_reg_init(drm_device_t *dev)
279 233{
280 block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0); 234 drm_map_list_t *entry;
281 if (block) { 235 drm_local_map_t *map;
282 /* TODO */ 236
283 agp.offset = block->ofs; 237 list_for_each_entry(entry, &dev->maplist->head, head) {
284 agp.free = (unsigned long)block; 238 map = entry->map;
285 if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) { 239 if (!map)
286 DRM_DEBUG("adding to allocation set fails\n"); 240 continue;
287 mmFreeMem((PMemBlock) agp.free); 241 if (map->type == _DRM_REGISTERS) {
288 retval = -1; 242 return map;
289 } 243 }
290 } else {
291 agp.offset = 0;
292 agp.size = 0;
293 agp.free = 0;
294 } 244 }
295 245 return NULL;
296 DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
297
298 DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
299
300 return retval;
301} 246}
302 247
303static int sis_ioctl_agp_free(DRM_IOCTL_ARGS) 248int sis_idle(drm_device_t *dev)
304{ 249{
305 DRM_DEVICE;
306 drm_sis_private_t *dev_priv = dev->dev_private; 250 drm_sis_private_t *dev_priv = dev->dev_private;
307 drm_sis_mem_t agp; 251 uint32_t idle_reg;
308 252 unsigned long end;
309 if (dev_priv == NULL || dev_priv->AGPHeap == NULL) 253 int i;
310 return DRM_ERR(EINVAL);
311 254
312 DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data, 255 if (dev_priv->idle_fault)
313 sizeof(agp)); 256 return 0;
314 257
315 if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free)) 258 if (dev_priv->mmio == NULL) {
316 return DRM_ERR(EINVAL); 259 dev_priv->mmio = sis_reg_init(dev);
260 if (dev_priv->mmio == NULL) {
261 DRM_ERROR("Could not find register map.\n");
262 return 0;
263 }
264 }
265
266 /*
267 * Implement a device switch here if needed
268 */
269
270 if (dev_priv->chipset != SIS_CHIP_315)
271 return 0;
272
273 /*
274 * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
275 * because its polling frequency is too low.
276 */
277
278 end = jiffies + (DRM_HZ * 3);
279
280 for (i=0; i<4; ++i) {
281 do {
282 idle_reg = SIS_READ(0x85cc);
283 } while ( !time_after_eq(jiffies, end) &&
284 ((idle_reg & 0x80000000) != 0x80000000));
285 }
317 286
318 mmFreeMem((PMemBlock) agp.free); 287 if (time_after_eq(jiffies, end)) {
319 if (!del_alloc_set(agp.context, AGP_TYPE, agp.free)) 288 DRM_ERROR("Graphics engine idle timeout. "
320 return DRM_ERR(EINVAL); 289 "Disabling idle check\n");
290 dev_priv->idle_fault = 1;
291 }
321 292
322 DRM_DEBUG("free agp, free = 0x%lx\n", agp.free); 293 /*
294 * The caller never sees an error code. It gets trapped
295 * in libdrm.
296 */
323 297
324 return 0; 298 return 0;
325} 299}
326 300
327int sis_init_context(struct drm_device *dev, int context)
328{
329 int i;
330 301
331 for (i = 0; i < MAX_CONTEXT; i++) { 302void sis_lastclose(struct drm_device *dev)
332 if (global_ppriv[i].used && 303{
333 (global_ppriv[i].context == context)) 304 drm_sis_private_t *dev_priv = dev->dev_private;
334 break;
335 }
336 305
337 if (i >= MAX_CONTEXT) { 306 if (!dev_priv)
338 for (i = 0; i < MAX_CONTEXT; i++) { 307 return;
339 if (!global_ppriv[i].used) {
340 global_ppriv[i].context = context;
341 global_ppriv[i].used = 1;
342 global_ppriv[i].sets[0] = setInit();
343 global_ppriv[i].sets[1] = setInit();
344 DRM_DEBUG("init allocation set, socket=%d, "
345 "context = %d\n", i, context);
346 break;
347 }
348 }
349 if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
350 (global_ppriv[i].sets[1] == NULL)) {
351 return 0;
352 }
353 }
354 308
355 return 1; 309 mutex_lock(&dev->struct_mutex);
310 drm_sman_cleanup(&dev_priv->sman);
311 dev_priv->vram_initialized = 0;
312 dev_priv->agp_initialized = 0;
313 dev_priv->mmio = NULL;
314 mutex_unlock(&dev->struct_mutex);
356} 315}
357 316
358int sis_final_context(struct drm_device *dev, int context) 317void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
359{ 318{
360 int i; 319 drm_sis_private_t *dev_priv = dev->dev_private;
320 drm_file_t *priv = filp->private_data;
361 321
362 for (i = 0; i < MAX_CONTEXT; i++) { 322 mutex_lock(&dev->struct_mutex);
363 if (global_ppriv[i].used && 323 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
364 (global_ppriv[i].context == context)) 324 mutex_unlock(&dev->struct_mutex);
365 break; 325 return;
366 } 326 }
367 327
368 if (i < MAX_CONTEXT) { 328 if (dev->driver->dma_quiescent) {
369 set_t *set; 329 dev->driver->dma_quiescent(dev);
370 ITEM_TYPE item;
371 int retval;
372
373 DRM_DEBUG("find socket %d, context = %d\n", i, context);
374
375 /* Video Memory */
376 set = global_ppriv[i].sets[0];
377 retval = setFirst(set, &item);
378 while (retval) {
379 DRM_DEBUG("free video memory 0x%lx\n", item);
380#if defined(__linux__) && defined(CONFIG_FB_SIS)
381 sis_free(item);
382#else
383 mmFreeMem((PMemBlock) item);
384#endif
385 retval = setNext(set, &item);
386 }
387 setDestroy(set);
388
389 /* AGP Memory */
390 set = global_ppriv[i].sets[1];
391 retval = setFirst(set, &item);
392 while (retval) {
393 DRM_DEBUG("free agp memory 0x%lx\n", item);
394 mmFreeMem((PMemBlock) item);
395 retval = setNext(set, &item);
396 }
397 setDestroy(set);
398
399 global_ppriv[i].used = 0;
400 } 330 }
401 331
402 return 1; 332 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv);
333 mutex_unlock(&dev->struct_mutex);
334 return;
403} 335}
404 336
405drm_ioctl_desc_t sis_ioctls[] = { 337drm_ioctl_desc_t sis_ioctls[] = {
406 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, 338 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
407 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH}, 339 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH},
408 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 340 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] =
341 {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY},
409 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, 342 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
410 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH}, 343 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH},
411 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY} 344 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] =
345 {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}
412}; 346};
413 347
414int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 348int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 78a81a4a99c5..60c1695db300 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -41,9 +41,9 @@
41 41
42#include <linux/pagemap.h> 42#include <linux/pagemap.h>
43 43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) 46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47 47
48typedef struct _drm_via_descriptor { 48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr; 49 uint32_t mem_addr;
@@ -121,19 +121,19 @@ via_map_blit_for_device(struct pci_dev *pdev,
121 121
122 while (line_len > 0) { 122 while (line_len > 0) {
123 123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); 124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len; 125 line_len -= remaining_len;
126 126
127 if (mode == 1) { 127 if (mode == 1) {
128 desc_ptr->mem_addr = 128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev, 129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) - 130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)], 131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len, 132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction); 133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb; 134 desc_ptr->dev_addr = cur_fb;
135 135
136 desc_ptr->size = remaining_len; 136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next; 137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE); 139 DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
162 162
163/* 163/*
164 * Function that frees up all resources for a blit. It is usable even if the 164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only be partially built as long as the status enum is consistent 165 * blit info has only been partially built as long as the status enum is consistent
166 * with the actual status of the used resources. 166 * with the actual status of the used resources.
167 */ 167 */
168 168
@@ -238,8 +238,11 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
238 return DRM_ERR(ENOMEM); 238 return DRM_ERR(ENOMEM);
239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); 239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
240 down_read(&current->mm->mmap_sem); 240 down_read(&current->mm->mmap_sem);
241 ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, 241 ret = get_user_pages(current, current->mm,
242 vsg->num_pages, vsg->direction, 0, vsg->pages, NULL); 242 (unsigned long)xfer->mem_addr,
243 vsg->num_pages,
244 (vsg->direction == DMA_FROM_DEVICE),
245 0, vsg->pages, NULL);
243 246
244 up_read(&current->mm->mmap_sem); 247 up_read(&current->mm->mmap_sem);
245 if (ret != vsg->num_pages) { 248 if (ret != vsg->num_pages) {
@@ -475,9 +478,15 @@ via_dmablit_timer(unsigned long data)
475 if (!timer_pending(&blitq->poll_timer)) { 478 if (!timer_pending(&blitq->poll_timer)) {
476 blitq->poll_timer.expires = jiffies+1; 479 blitq->poll_timer.expires = jiffies+1;
477 add_timer(&blitq->poll_timer); 480 add_timer(&blitq->poll_timer);
478 }
479 via_dmablit_handler(dev, engine, 0);
480 481
482 /*
483 * Rerun handler to delete timer if engines are off, and
484 * to shorten abort latency. This is a little nasty.
485 */
486
487 via_dmablit_handler(dev, engine, 0);
488
489 }
481} 490}
482 491
483 492
@@ -597,15 +606,27 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
597 * (Not a big limitation anyway.) 606 * (Not a big limitation anyway.)
598 */ 607 */
599 608
600 if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) || 609 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
601 (xfer->mem_stride > 2048*4)) {
602 DRM_ERROR("Too large system memory stride. Stride: %d, " 610 DRM_ERROR("Too large system memory stride. Stride: %d, "
603 "Length: %d\n", xfer->mem_stride, xfer->line_length); 611 "Length: %d\n", xfer->mem_stride, xfer->line_length);
604 return DRM_ERR(EINVAL); 612 return DRM_ERR(EINVAL);
605 } 613 }
606 614
607 if (xfer->num_lines > 2048) { 615 if ((xfer->mem_stride == xfer->line_length) &&
608 DRM_ERROR("Too many PCI DMA bitblt lines.\n"); 616 (xfer->fb_stride == xfer->line_length)) {
617 xfer->mem_stride *= xfer->num_lines;
618 xfer->line_length = xfer->mem_stride;
619 xfer->fb_stride = xfer->mem_stride;
620 xfer->num_lines = 1;
621 }
622
623 /*
624 * Don't lock an arbitrary large number of pages, since that causes a
625 * DOS security hole.
626 */
627
628 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
629 DRM_ERROR("Too large PCI DMA bitblt.\n");
609 return DRM_ERR(EINVAL); 630 return DRM_ERR(EINVAL);
610 } 631 }
611 632
@@ -628,16 +649,17 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
628 649
629#ifdef VIA_BUGFREE 650#ifdef VIA_BUGFREE
630 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || 651 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
631 ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) { 652 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
632 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 653 DRM_ERROR("Invalid DRM bitblt alignment.\n");
633 return DRM_ERR(EINVAL); 654 return DRM_ERR(EINVAL);
634 } 655 }
635#else 656#else
636 if ((((unsigned long)xfer->mem_addr & 15) || 657 if ((((unsigned long)xfer->mem_addr & 15) ||
637 ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) || 658 ((unsigned long)xfer->fb_addr & 3)) ||
638 (xfer->fb_stride & 3)) { 659 ((xfer->num_lines > 1) &&
660 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
639 DRM_ERROR("Invalid DRM bitblt alignment.\n"); 661 DRM_ERROR("Invalid DRM bitblt alignment.\n");
640 return DRM_ERR(EINVAL); 662 return DRM_ERR(EINVAL);
641 } 663 }
642#endif 664#endif
643 665
@@ -715,7 +737,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
715 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 737 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
716 drm_via_sg_info_t *vsg; 738 drm_via_sg_info_t *vsg;
717 drm_via_blitq_t *blitq; 739 drm_via_blitq_t *blitq;
718 int ret; 740 int ret;
719 int engine; 741 int engine;
720 unsigned long irqsave; 742 unsigned long irqsave;
721 743
@@ -756,7 +778,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
756 778
757/* 779/*
758 * Sync on a previously submitted blit. Note that the X server use signals extensively, and 780 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
759 * that there is a very big proability that this IOCTL will be interrupted by a signal. In that 781 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
760 * case it returns with -EAGAIN for the signal to be delivered. 782 * case it returns with -EAGAIN for the signal to be delivered.
761 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). 783 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
762 */ 784 */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index 47f0b5b26379..e4ee97d7156f 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -250,6 +250,12 @@ typedef struct drm_via_blitsync {
250 unsigned engine; 250 unsigned engine;
251} drm_via_blitsync_t; 251} drm_via_blitsync_t;
252 252
253/* - * Below,"flags" is currently unused but will be used for possible future
254 * extensions like kernel space bounce buffers for bad alignments and
255 * blit engine busy-wait polling for better latency in the absence of
256 * interrupts.
257 */
258
253typedef struct drm_via_dmablit { 259typedef struct drm_via_dmablit {
254 uint32_t num_lines; 260 uint32_t num_lines;
255 uint32_t line_length; 261 uint32_t line_length;
@@ -260,7 +266,7 @@ typedef struct drm_via_dmablit {
260 unsigned char *mem_addr; 266 unsigned char *mem_addr;
261 uint32_t mem_stride; 267 uint32_t mem_stride;
262 268
263 int bounce_buffer; 269 uint32_t flags;
264 int to_fb; 270 int to_fb;
265 271
266 drm_via_blitsync_t sync; 272 drm_via_blitsync_t sync;
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index b3d364d793d7..bb9dde8b1911 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -43,7 +43,6 @@ static struct drm_driver driver = {
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
44 .load = via_driver_load, 44 .load = via_driver_load,
45 .unload = via_driver_unload, 45 .unload = via_driver_unload,
46 .context_ctor = via_init_context,
47 .context_dtor = via_final_context, 46 .context_dtor = via_final_context,
48 .vblank_wait = via_driver_vblank_wait, 47 .vblank_wait = via_driver_vblank_wait,
49 .irq_preinstall = via_driver_irq_preinstall, 48 .irq_preinstall = via_driver_irq_preinstall,
@@ -53,6 +52,8 @@ static struct drm_driver driver = {
53 .dma_quiescent = via_driver_dma_quiescent, 52 .dma_quiescent = via_driver_dma_quiescent,
54 .dri_library_name = dri_library_name, 53 .dri_library_name = dri_library_name,
55 .reclaim_buffers = drm_core_reclaim_buffers, 54 .reclaim_buffers = drm_core_reclaim_buffers,
55 .reclaim_buffers_locked = via_reclaim_buffers_locked,
56 .lastclose = via_lastclose,
56 .get_map_ofs = drm_core_get_map_ofs, 57 .get_map_ofs = drm_core_get_map_ofs,
57 .get_reg_ofs = drm_core_get_reg_ofs, 58 .get_reg_ofs = drm_core_get_reg_ofs,
58 .ioctls = via_ioctls, 59 .ioctls = via_ioctls,
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 52bcc7b1ba45..d21b5b75da0f 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -24,15 +24,16 @@
24#ifndef _VIA_DRV_H_ 24#ifndef _VIA_DRV_H_
25#define _VIA_DRV_H_ 25#define _VIA_DRV_H_
26 26
27#include "drm_sman.h"
27#define DRIVER_AUTHOR "Various" 28#define DRIVER_AUTHOR "Various"
28 29
29#define DRIVER_NAME "via" 30#define DRIVER_NAME "via"
30#define DRIVER_DESC "VIA Unichrome / Pro" 31#define DRIVER_DESC "VIA Unichrome / Pro"
31#define DRIVER_DATE "20051116" 32#define DRIVER_DATE "20060529"
32 33
33#define DRIVER_MAJOR 2 34#define DRIVER_MAJOR 2
34#define DRIVER_MINOR 7 35#define DRIVER_MINOR 10
35#define DRIVER_PATCHLEVEL 4 36#define DRIVER_PATCHLEVEL 0
36 37
37#include "via_verifier.h" 38#include "via_verifier.h"
38 39
@@ -85,6 +86,12 @@ typedef struct drm_via_private {
85 uint32_t irq_enable_mask; 86 uint32_t irq_enable_mask;
86 uint32_t irq_pending_mask; 87 uint32_t irq_pending_mask;
87 int *irq_map; 88 int *irq_map;
89 unsigned int idle_fault;
90 drm_sman_t sman;
91 int vram_initialized;
92 int agp_initialized;
93 unsigned long vram_offset;
94 unsigned long agp_offset;
88 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; 95 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
89} drm_via_private_t; 96} drm_via_private_t;
90 97
@@ -135,6 +142,9 @@ extern void via_init_futex(drm_via_private_t * dev_priv);
135extern void via_cleanup_futex(drm_via_private_t * dev_priv); 142extern void via_cleanup_futex(drm_via_private_t * dev_priv);
136extern void via_release_futex(drm_via_private_t * dev_priv, int context); 143extern void via_release_futex(drm_via_private_t * dev_priv, int context);
137 144
145extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
146extern void via_lastclose(drm_device_t *dev);
147
138extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq); 148extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
139extern void via_init_dmablit(drm_device_t *dev); 149extern void via_init_dmablit(drm_device_t *dev);
140 150
diff --git a/drivers/char/drm/via_ds.c b/drivers/char/drm/via_ds.c
deleted file mode 100644
index 9429736b3b96..000000000000
--- a/drivers/char/drm/via_ds.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26
27#include "via_ds.h"
28extern unsigned int VIA_DEBUG;
29
30set_t *via_setInit(void)
31{
32 int i;
33 set_t *set;
34 set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
35 for (i = 0; i < SET_SIZE; i++) {
36 set->list[i].free_next = i + 1;
37 set->list[i].alloc_next = -1;
38 }
39 set->list[SET_SIZE - 1].free_next = -1;
40 set->free = 0;
41 set->alloc = -1;
42 set->trace = -1;
43 return set;
44}
45
46int via_setAdd(set_t * set, ITEM_TYPE item)
47{
48 int free = set->free;
49 if (free != -1) {
50 set->list[free].val = item;
51 set->free = set->list[free].free_next;
52 } else {
53 return 0;
54 }
55 set->list[free].alloc_next = set->alloc;
56 set->alloc = free;
57 set->list[free].free_next = -1;
58 return 1;
59}
60
61int via_setDel(set_t * set, ITEM_TYPE item)
62{
63 int alloc = set->alloc;
64 int prev = -1;
65
66 while (alloc != -1) {
67 if (set->list[alloc].val == item) {
68 if (prev != -1)
69 set->list[prev].alloc_next =
70 set->list[alloc].alloc_next;
71 else
72 set->alloc = set->list[alloc].alloc_next;
73 break;
74 }
75 prev = alloc;
76 alloc = set->list[alloc].alloc_next;
77 }
78
79 if (alloc == -1)
80 return 0;
81
82 set->list[alloc].free_next = set->free;
83 set->free = alloc;
84 set->list[alloc].alloc_next = -1;
85
86 return 1;
87}
88
89/* setFirst -> setAdd -> setNext is wrong */
90
91int via_setFirst(set_t * set, ITEM_TYPE * item)
92{
93 if (set->alloc == -1)
94 return 0;
95
96 *item = set->list[set->alloc].val;
97 set->trace = set->list[set->alloc].alloc_next;
98
99 return 1;
100}
101
102int via_setNext(set_t * set, ITEM_TYPE * item)
103{
104 if (set->trace == -1)
105 return 0;
106
107 *item = set->list[set->trace].val;
108 set->trace = set->list[set->trace].alloc_next;
109
110 return 1;
111}
112
113int via_setDestroy(set_t * set)
114{
115 drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
116
117 return 1;
118}
119
120#define ISFREE(bptr) ((bptr)->free)
121
122#define fprintf(fmt, arg...) do{}while(0)
123
124memHeap_t *via_mmInit(int ofs, int size)
125{
126 PMemBlock blocks;
127
128 if (size <= 0)
129 return NULL;
130
131 blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
132
133 if (blocks) {
134 blocks->ofs = ofs;
135 blocks->size = size;
136 blocks->free = 1;
137 return (memHeap_t *) blocks;
138 } else
139 return NULL;
140}
141
142static TMemBlock *SliceBlock(TMemBlock * p,
143 int startofs, int size,
144 int reserved, int alignment)
145{
146 TMemBlock *newblock;
147
148 /* break left */
149 if (startofs > p->ofs) {
150 newblock =
151 (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
152 DRM_MEM_DRIVER);
153 newblock->ofs = startofs;
154 newblock->size = p->size - (startofs - p->ofs);
155 newblock->free = 1;
156 newblock->next = p->next;
157 p->size -= newblock->size;
158 p->next = newblock;
159 p = newblock;
160 }
161
162 /* break right */
163 if (size < p->size) {
164 newblock =
165 (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
166 DRM_MEM_DRIVER);
167 newblock->ofs = startofs + size;
168 newblock->size = p->size - size;
169 newblock->free = 1;
170 newblock->next = p->next;
171 p->size = size;
172 p->next = newblock;
173 }
174
175 /* p = middle block */
176 p->align = alignment;
177 p->free = 0;
178 p->reserved = reserved;
179 return p;
180}
181
182PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
183 int startSearch)
184{
185 int mask, startofs, endofs;
186 TMemBlock *p;
187
188 if (!heap || align2 < 0 || size <= 0)
189 return NULL;
190
191 mask = (1 << align2) - 1;
192 startofs = 0;
193 p = (TMemBlock *) heap;
194
195 while (p) {
196 if (ISFREE(p)) {
197 startofs = (p->ofs + mask) & ~mask;
198
199 if (startofs < startSearch)
200 startofs = startSearch;
201
202 endofs = startofs + size;
203
204 if (endofs <= (p->ofs + p->size))
205 break;
206 }
207
208 p = p->next;
209 }
210
211 if (!p)
212 return NULL;
213
214 p = SliceBlock(p, startofs, size, 0, mask + 1);
215 p->heap = heap;
216
217 return p;
218}
219
220static __inline__ int Join2Blocks(TMemBlock * p)
221{
222 if (p->free && p->next && p->next->free) {
223 TMemBlock *q = p->next;
224 p->size += q->size;
225 p->next = q->next;
226 drm_free(q, sizeof(TMemBlock), DRM_MEM_DRIVER);
227
228 return 1;
229 }
230
231 return 0;
232}
233
234int via_mmFreeMem(PMemBlock b)
235{
236 TMemBlock *p, *prev;
237
238 if (!b)
239 return 0;
240
241 if (!b->heap) {
242 fprintf(stderr, "no heap\n");
243
244 return -1;
245 }
246
247 p = b->heap;
248 prev = NULL;
249
250 while (p && p != b) {
251 prev = p;
252 p = p->next;
253 }
254
255 if (!p || p->free || p->reserved) {
256 if (!p)
257 fprintf(stderr, "block not found in heap\n");
258 else if (p->free)
259 fprintf(stderr, "block already free\n");
260 else
261 fprintf(stderr, "block is reserved\n");
262
263 return -1;
264 }
265
266 p->free = 1;
267 Join2Blocks(p);
268
269 if (prev)
270 Join2Blocks(prev);
271
272 return 0;
273}
diff --git a/drivers/char/drm/via_ds.h b/drivers/char/drm/via_ds.h
deleted file mode 100644
index d2bb9f37ca38..000000000000
--- a/drivers/char/drm/via_ds.h
+++ /dev/null
@@ -1,104 +0,0 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
5 * All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26#ifndef _via_ds_h_
27#define _via_ds_h_
28
29#include "drmP.h"
30
31/* Set Data Structure */
32#define SET_SIZE 5000
33typedef unsigned long ITEM_TYPE;
34
35typedef struct {
36 ITEM_TYPE val;
37 int alloc_next, free_next;
38} list_item_t;
39
40typedef struct {
41 int alloc;
42 int free;
43 int trace;
44 list_item_t list[SET_SIZE];
45} set_t;
46
47set_t *via_setInit(void);
48int via_setAdd(set_t * set, ITEM_TYPE item);
49int via_setDel(set_t * set, ITEM_TYPE item);
50int via_setFirst(set_t * set, ITEM_TYPE * item);
51int via_setNext(set_t * set, ITEM_TYPE * item);
52int via_setDestroy(set_t * set);
53
54#endif
55
56#ifndef MM_INC
57#define MM_INC
58
59struct mem_block_t {
60 struct mem_block_t *next;
61 struct mem_block_t *heap;
62 int ofs, size;
63 int align;
64 unsigned int free:1;
65 unsigned int reserved:1;
66};
67typedef struct mem_block_t TMemBlock;
68typedef struct mem_block_t *PMemBlock;
69
70/* a heap is just the first block in a chain */
71typedef struct mem_block_t memHeap_t;
72
73static __inline__ int mmBlockSize(PMemBlock b)
74{
75 return b->size;
76}
77
78static __inline__ int mmOffset(PMemBlock b)
79{
80 return b->ofs;
81}
82
83static __inline__ void mmMarkReserved(PMemBlock b)
84{
85 b->reserved = 1;
86}
87
88/*
89 * input: total size in bytes
90 * return: a heap pointer if OK, NULL if error
91 */
92memHeap_t *via_mmInit(int ofs, int size);
93
94PMemBlock via_mmAllocMem(memHeap_t * heap, int size, int align2,
95 int startSearch);
96
97/*
98 * Free block starts at offset
99 * input: pointer to a block
100 * return: 0 if OK, -1 if error
101 */
102int via_mmFreeMem(PMemBlock b);
103
104#endif
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index c6a08e96285b..782011e0a58d 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -98,6 +98,7 @@ int via_map_init(DRM_IOCTL_ARGS)
98int via_driver_load(drm_device_t *dev, unsigned long chipset) 98int via_driver_load(drm_device_t *dev, unsigned long chipset)
99{ 99{
100 drm_via_private_t *dev_priv; 100 drm_via_private_t *dev_priv;
101 int ret = 0;
101 102
102 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 103 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
103 if (dev_priv == NULL) 104 if (dev_priv == NULL)
@@ -108,13 +109,19 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset)
108 if (chipset == VIA_PRO_GROUP_A) 109 if (chipset == VIA_PRO_GROUP_A)
109 dev_priv->pro_group_a = 1; 110 dev_priv->pro_group_a = 1;
110 111
111 return 0; 112 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
113 if (ret) {
114 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
115 }
116 return ret;
112} 117}
113 118
114int via_driver_unload(drm_device_t *dev) 119int via_driver_unload(drm_device_t *dev)
115{ 120{
116 drm_via_private_t *dev_priv = dev->dev_private; 121 drm_via_private_t *dev_priv = dev->dev_private;
117 122
123 drm_sman_takedown(&dev_priv->sman);
124
118 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); 125 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
119 126
120 return 0; 127 return 0;
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 33e0cb12e4c3..2fcf0577a7aa 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. 2 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. 3 * All rights reserved.
4 * 4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
@@ -16,347 +16,194 @@
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24/*
25 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26 */
27
24#include "drmP.h" 28#include "drmP.h"
25#include "via_drm.h" 29#include "via_drm.h"
26#include "via_drv.h" 30#include "via_drv.h"
27#include "via_ds.h" 31#include "drm_sman.h"
28#include "via_mm.h"
29
30#define MAX_CONTEXT 100
31
32typedef struct {
33 int used;
34 int context;
35 set_t *sets[2]; /* 0 for frame buffer, 1 for AGP , 2 for System */
36} via_context_t;
37
38static via_context_t global_ppriv[MAX_CONTEXT];
39 32
40static int via_agp_alloc(drm_via_mem_t * mem); 33#define VIA_MM_ALIGN_SHIFT 4
41static int via_agp_free(drm_via_mem_t * mem); 34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
42static int via_fb_alloc(drm_via_mem_t * mem);
43static int via_fb_free(drm_via_mem_t * mem);
44
45static int add_alloc_set(int context, int type, unsigned long val)
46{
47 int i, retval = 0;
48
49 for (i = 0; i < MAX_CONTEXT; i++) {
50 if (global_ppriv[i].used && global_ppriv[i].context == context) {
51 retval = via_setAdd(global_ppriv[i].sets[type], val);
52 break;
53 }
54 }
55
56 return retval;
57}
58
59static int del_alloc_set(int context, int type, unsigned long val)
60{
61 int i, retval = 0;
62
63 for (i = 0; i < MAX_CONTEXT; i++)
64 if (global_ppriv[i].used && global_ppriv[i].context == context) {
65 retval = via_setDel(global_ppriv[i].sets[type], val);
66 break;
67 }
68
69 return retval;
70}
71
72/* agp memory management */
73static memHeap_t *AgpHeap = NULL;
74 35
75int via_agp_init(DRM_IOCTL_ARGS) 36int via_agp_init(DRM_IOCTL_ARGS)
76{ 37{
38 DRM_DEVICE;
77 drm_via_agp_t agp; 39 drm_via_agp_t agp;
40 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
41 int ret;
78 42
79 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, 43 DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
80 sizeof(agp)); 44 sizeof(agp));
45 mutex_lock(&dev->struct_mutex);
46 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
47 agp.size >> VIA_MM_ALIGN_SHIFT);
48
49 if (ret) {
50 DRM_ERROR("AGP memory manager initialisation error\n");
51 mutex_unlock(&dev->struct_mutex);
52 return ret;
53 }
81 54
82 AgpHeap = via_mmInit(agp.offset, agp.size); 55 dev_priv->agp_initialized = 1;
83 56 dev_priv->agp_offset = agp.offset;
84 DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp.offset, 57 mutex_unlock(&dev->struct_mutex);
85 (unsigned long)agp.size);
86 58
59 DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
87 return 0; 60 return 0;
88} 61}
89 62
90/* fb memory management */
91static memHeap_t *FBHeap = NULL;
92
93int via_fb_init(DRM_IOCTL_ARGS) 63int via_fb_init(DRM_IOCTL_ARGS)
94{ 64{
65 DRM_DEVICE;
95 drm_via_fb_t fb; 66 drm_via_fb_t fb;
67 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
68 int ret;
96 69
97 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); 70 DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
98 71
99 FBHeap = via_mmInit(fb.offset, fb.size); 72 mutex_lock(&dev->struct_mutex);
73 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
74 fb.size >> VIA_MM_ALIGN_SHIFT);
100 75
101 DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)fb.offset, 76 if (ret) {
102 (unsigned long)fb.size); 77 DRM_ERROR("VRAM memory manager initialisation error\n");
78 mutex_unlock(&dev->struct_mutex);
79 return ret;
80 }
103 81
104 return 0; 82 dev_priv->vram_initialized = 1;
105} 83 dev_priv->vram_offset = fb.offset;
106 84
107int via_init_context(struct drm_device *dev, int context) 85 mutex_unlock(&dev->struct_mutex);
108{ 86 DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
109 int i; 87
110 88 return 0;
111 for (i = 0; i < MAX_CONTEXT; i++)
112 if (global_ppriv[i].used &&
113 (global_ppriv[i].context == context))
114 break;
115
116 if (i >= MAX_CONTEXT) {
117 for (i = 0; i < MAX_CONTEXT; i++) {
118 if (!global_ppriv[i].used) {
119 global_ppriv[i].context = context;
120 global_ppriv[i].used = 1;
121 global_ppriv[i].sets[0] = via_setInit();
122 global_ppriv[i].sets[1] = via_setInit();
123 DRM_DEBUG("init allocation set, socket=%d,"
124 " context = %d\n", i, context);
125 break;
126 }
127 }
128
129 if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
130 (global_ppriv[i].sets[1] == NULL)) {
131 return 0;
132 }
133 }
134 89
135 return 1;
136} 90}
137 91
138int via_final_context(struct drm_device *dev, int context) 92int via_final_context(struct drm_device *dev, int context)
139{ 93{
140 int i;
141 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 94 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
142 95
143 for (i = 0; i < MAX_CONTEXT; i++)
144 if (global_ppriv[i].used &&
145 (global_ppriv[i].context == context))
146 break;
147
148 if (i < MAX_CONTEXT) {
149 set_t *set;
150 ITEM_TYPE item;
151 int retval;
152
153 DRM_DEBUG("find socket %d, context = %d\n", i, context);
154
155 /* Video Memory */
156 set = global_ppriv[i].sets[0];
157 retval = via_setFirst(set, &item);
158 while (retval) {
159 DRM_DEBUG("free video memory 0x%lx\n", item);
160 via_mmFreeMem((PMemBlock) item);
161 retval = via_setNext(set, &item);
162 }
163 via_setDestroy(set);
164
165 /* AGP Memory */
166 set = global_ppriv[i].sets[1];
167 retval = via_setFirst(set, &item);
168 while (retval) {
169 DRM_DEBUG("free agp memory 0x%lx\n", item);
170 via_mmFreeMem((PMemBlock) item);
171 retval = via_setNext(set, &item);
172 }
173 via_setDestroy(set);
174 global_ppriv[i].used = 0;
175 }
176 via_release_futex(dev_priv, context); 96 via_release_futex(dev_priv, context);
177 97
178#if defined(__linux__)
179 /* Linux specific until context tracking code gets ported to BSD */ 98 /* Linux specific until context tracking code gets ported to BSD */
180 /* Last context, perform cleanup */ 99 /* Last context, perform cleanup */
181 if (dev->ctx_count == 1 && dev->dev_private) { 100 if (dev->ctx_count == 1 && dev->dev_private) {
182 DRM_DEBUG("Last Context\n"); 101 DRM_DEBUG("Last Context\n");
183 if (dev->irq) 102 if (dev->irq)
184 drm_irq_uninstall(dev); 103 drm_irq_uninstall(dev);
185
186 via_cleanup_futex(dev_priv); 104 via_cleanup_futex(dev_priv);
187 via_do_cleanup_map(dev); 105 via_do_cleanup_map(dev);
188 } 106 }
189#endif
190
191 return 1; 107 return 1;
192} 108}
193 109
110void via_lastclose(struct drm_device *dev)
111{
112 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
113
114 if (!dev_priv)
115 return;
116
117 mutex_lock(&dev->struct_mutex);
118 drm_sman_cleanup(&dev_priv->sman);
119 dev_priv->vram_initialized = 0;
120 dev_priv->agp_initialized = 0;
121 mutex_unlock(&dev->struct_mutex);
122}
123
194int via_mem_alloc(DRM_IOCTL_ARGS) 124int via_mem_alloc(DRM_IOCTL_ARGS)
195{ 125{
126 DRM_DEVICE;
127
196 drm_via_mem_t mem; 128 drm_via_mem_t mem;
129 int retval = 0;
130 drm_memblock_item_t *item;
131 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
132 unsigned long tmpSize;
197 133
198 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 134 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
199 sizeof(mem)); 135 sizeof(mem));
200 136
201 switch (mem.type) { 137 if (mem.type > VIA_MEM_AGP) {
202 case VIA_MEM_VIDEO: 138 DRM_ERROR("Unknown memory type allocation\n");
203 if (via_fb_alloc(&mem) < 0) 139 return DRM_ERR(EINVAL);
204 return -EFAULT;
205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
206 sizeof(mem));
207 return 0;
208 case VIA_MEM_AGP:
209 if (via_agp_alloc(&mem) < 0)
210 return -EFAULT;
211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
212 sizeof(mem));
213 return 0;
214 } 140 }
215 141 mutex_lock(&dev->struct_mutex);
216 return -EFAULT; 142 if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
217} 143 dev_priv->agp_initialized)) {
218 144 DRM_ERROR
219static int via_fb_alloc(drm_via_mem_t * mem) 145 ("Attempt to allocate from uninitialized memory manager.\n");
220{ 146 mutex_unlock(&dev->struct_mutex);
221 drm_via_mm_t fb; 147 return DRM_ERR(EINVAL);
222 PMemBlock block;
223 int retval = 0;
224
225 if (!FBHeap)
226 return -1;
227
228 fb.size = mem->size;
229 fb.context = mem->context;
230
231 block = via_mmAllocMem(FBHeap, fb.size, 5, 0);
232 if (block) {
233 fb.offset = block->ofs;
234 fb.free = (unsigned long)block;
235 if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
236 DRM_DEBUG("adding to allocation set fails\n");
237 via_mmFreeMem((PMemBlock) fb.free);
238 retval = -1;
239 }
240 } else {
241 fb.offset = 0;
242 fb.size = 0;
243 fb.free = 0;
244 retval = -1;
245 } 148 }
246 149
247 mem->offset = fb.offset; 150 tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
248 mem->index = fb.free; 151 item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0,
249 152 (unsigned long)priv);
250 DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, 153 mutex_unlock(&dev->struct_mutex);
251 (int)fb.offset); 154 if (item) {
252 155 mem.offset = ((mem.type == VIA_MEM_VIDEO) ?
253 return retval; 156 dev_priv->vram_offset : dev_priv->agp_offset) +
254} 157 (item->mm->
255 158 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
256static int via_agp_alloc(drm_via_mem_t * mem) 159 mem.index = item->user_hash.key;
257{
258 drm_via_mm_t agp;
259 PMemBlock block;
260 int retval = 0;
261
262 if (!AgpHeap)
263 return -1;
264
265 agp.size = mem->size;
266 agp.context = mem->context;
267
268 block = via_mmAllocMem(AgpHeap, agp.size, 5, 0);
269 if (block) {
270 agp.offset = block->ofs;
271 agp.free = (unsigned long)block;
272 if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
273 DRM_DEBUG("adding to allocation set fails\n");
274 via_mmFreeMem((PMemBlock) agp.free);
275 retval = -1;
276 }
277 } else { 160 } else {
278 agp.offset = 0; 161 mem.offset = 0;
279 agp.size = 0; 162 mem.size = 0;
280 agp.free = 0; 163 mem.index = 0;
164 DRM_DEBUG("Video memory allocation failed\n");
165 retval = DRM_ERR(ENOMEM);
281 } 166 }
167 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem));
282 168
283 mem->offset = agp.offset;
284 mem->index = agp.free;
285
286 DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size,
287 (unsigned int)agp.offset);
288 return retval; 169 return retval;
289} 170}
290 171
291int via_mem_free(DRM_IOCTL_ARGS) 172int via_mem_free(DRM_IOCTL_ARGS)
292{ 173{
174 DRM_DEVICE;
175 drm_via_private_t *dev_priv = dev->dev_private;
293 drm_via_mem_t mem; 176 drm_via_mem_t mem;
177 int ret;
294 178
295 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, 179 DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
296 sizeof(mem)); 180 sizeof(mem));
297 181
298 switch (mem.type) { 182 mutex_lock(&dev->struct_mutex);
183 ret = drm_sman_free_key(&dev_priv->sman, mem.index);
184 mutex_unlock(&dev->struct_mutex);
185 DRM_DEBUG("free = 0x%lx\n", mem.index);
299 186
300 case VIA_MEM_VIDEO: 187 return ret;
301 if (via_fb_free(&mem) == 0)
302 return 0;
303 break;
304 case VIA_MEM_AGP:
305 if (via_agp_free(&mem) == 0)
306 return 0;
307 break;
308 }
309
310 return -EFAULT;
311} 188}
312 189
313static int via_fb_free(drm_via_mem_t * mem)
314{
315 drm_via_mm_t fb;
316 int retval = 0;
317
318 if (!FBHeap) {
319 return -1;
320 }
321
322 fb.free = mem->index;
323 fb.context = mem->context;
324
325 if (!fb.free) {
326 return -1;
327
328 }
329
330 via_mmFreeMem((PMemBlock) fb.free);
331
332 if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
333 retval = -1;
334 }
335
336 DRM_DEBUG("free fb, free = %ld\n", fb.free);
337 190
338 return retval; 191void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
339}
340
341static int via_agp_free(drm_via_mem_t * mem)
342{ 192{
343 drm_via_mm_t agp; 193 drm_via_private_t *dev_priv = dev->dev_private;
344 194 drm_file_t *priv = filp->private_data;
345 int retval = 0;
346 195
347 agp.free = mem->index; 196 mutex_lock(&dev->struct_mutex);
348 agp.context = mem->context; 197 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
349 198 mutex_unlock(&dev->struct_mutex);
350 if (!agp.free) 199 return;
351 return -1;
352
353 via_mmFreeMem((PMemBlock) agp.free);
354
355 if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
356 retval = -1;
357 } 200 }
358 201
359 DRM_DEBUG("free agp, free = %ld\n", agp.free); 202 if (dev->driver->dma_quiescent) {
203 dev->driver->dma_quiescent(dev);
204 }
360 205
361 return retval; 206 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv);
207 mutex_unlock(&dev->struct_mutex);
208 return;
362} 209}