aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 16:53:40 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-12 16:53:40 -0500
commit37ef4399a6bb265d3035e6d6e45f7677b132a3ba (patch)
tree31adbac36ea310a44562a335f501d69d5ce2c78c /drivers
parentbf785ee0aeea7a3e717cb1e11df4135b6cbde7da (diff)
parent9c7d462eda13ca211b7b4a62f191f4cfda135e2d (diff)
Merge branch 'drm-forlinus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/drm/Makefile4
-rw-r--r--drivers/char/drm/ati_pcigart.c23
-rw-r--r--drivers/char/drm/drm.h4
-rw-r--r--drivers/char/drm/drmP.h122
-rw-r--r--drivers/char/drm/drm_agpsupport.c133
-rw-r--r--drivers/char/drm/drm_bufs.c49
-rw-r--r--drivers/char/drm/drm_context.c2
-rw-r--r--drivers/char/drm/drm_core.h4
-rw-r--r--drivers/char/drm/drm_drv.c152
-rw-r--r--drivers/char/drm/drm_fops.c317
-rw-r--r--drivers/char/drm/drm_init.c53
-rw-r--r--drivers/char/drm/drm_ioctl.c27
-rw-r--r--drivers/char/drm/drm_lock.c1
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_memory_debug.h269
-rw-r--r--drivers/char/drm/drm_os_linux.h1
-rw-r--r--drivers/char/drm/drm_pciids.h12
-rw-r--r--drivers/char/drm/drm_proc.c16
-rw-r--r--drivers/char/drm/drm_stub.c63
-rw-r--r--drivers/char/drm/drm_sysfs.c68
-rw-r--r--drivers/char/drm/i810_dma.c49
-rw-r--r--drivers/char/drm/i810_drv.c60
-rw-r--r--drivers/char/drm/i810_drv.h10
-rw-r--r--drivers/char/drm/i830_dma.c47
-rw-r--r--drivers/char/drm/i830_drv.c57
-rw-r--r--drivers/char/drm/i830_drv.h8
-rw-r--r--drivers/char/drm/i915_dma.c52
-rw-r--r--drivers/char/drm/i915_drm.h6
-rw-r--r--drivers/char/drm/i915_drv.c66
-rw-r--r--drivers/char/drm/i915_drv.h44
-rw-r--r--drivers/char/drm/i915_irq.c48
-rw-r--r--drivers/char/drm/i915_mem.c5
-rw-r--r--drivers/char/drm/mga_dma.c158
-rw-r--r--drivers/char/drm/mga_drv.c58
-rw-r--r--drivers/char/drm/mga_drv.h14
-rw-r--r--drivers/char/drm/mga_state.c26
-rw-r--r--drivers/char/drm/r128_cce.c15
-rw-r--r--drivers/char/drm/r128_drm.h4
-rw-r--r--drivers/char/drm/r128_drv.c48
-rw-r--r--drivers/char/drm/r128_drv.h8
-rw-r--r--drivers/char/drm/r128_irq.c4
-rw-r--r--drivers/char/drm/r128_state.c42
-rw-r--r--drivers/char/drm/r300_cmdbuf.c38
-rw-r--r--drivers/char/drm/r300_reg.h1
-rw-r--r--drivers/char/drm/radeon_cp.c106
-rw-r--r--drivers/char/drm/radeon_drm.h6
-rw-r--r--drivers/char/drm/radeon_drv.c62
-rw-r--r--drivers/char/drm/radeon_drv.h41
-rw-r--r--drivers/char/drm/radeon_state.c246
-rw-r--r--drivers/char/drm/savage_bci.c81
-rw-r--r--drivers/char/drm/savage_drv.c50
-rw-r--r--drivers/char/drm/savage_drv.h29
-rw-r--r--drivers/char/drm/savage_state.c324
-rw-r--r--drivers/char/drm/sis_drm.h25
-rw-r--r--drivers/char/drm/sis_drv.c42
-rw-r--r--drivers/char/drm/sis_drv.h4
-rw-r--r--drivers/char/drm/sis_ds.h7
-rw-r--r--drivers/char/drm/sis_mm.c30
-rw-r--r--drivers/char/drm/tdfx_drv.c42
-rw-r--r--drivers/char/drm/tdfx_drv.h7
-rw-r--r--drivers/char/drm/via_dma.c38
-rw-r--r--drivers/char/drm/via_dmablit.c805
-rw-r--r--drivers/char/drm/via_dmablit.h140
-rw-r--r--drivers/char/drm/via_drm.h60
-rw-r--r--drivers/char/drm/via_drv.c63
-rw-r--r--drivers/char/drm/via_drv.h56
-rw-r--r--drivers/char/drm/via_ds.c9
-rw-r--r--drivers/char/drm/via_irq.c53
-rw-r--r--drivers/char/drm/via_map.c47
-rw-r--r--drivers/char/drm/via_mm.c20
-rw-r--r--drivers/char/drm/via_verifier.c6
-rw-r--r--drivers/char/drm/via_verifier.h4
-rw-r--r--drivers/char/drm/via_video.c7
73 files changed, 2794 insertions, 1812 deletions
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index e41060c76226..9d180c42816c 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -3,7 +3,7 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 5drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
6 drm_drv.o drm_fops.o drm_init.o drm_ioctl.o drm_irq.o \ 6 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 7 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o 9 drm_sysfs.o
@@ -18,7 +18,7 @@ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
18ffb-objs := ffb_drv.o ffb_context.o 18ffb-objs := ffb_drv.o ffb_context.o
19sis-objs := sis_drv.o sis_ds.o sis_mm.o 19sis-objs := sis_drv.o sis_ds.o sis_mm.o
20savage-objs := savage_drv.o savage_bci.o savage_state.o 20savage-objs := savage_drv.o savage_bci.o savage_state.o
21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
22 22
23ifeq ($(CONFIG_COMPAT),y) 23ifeq ($(CONFIG_COMPAT),y)
24drm-objs += drm_ioc32.o 24drm-objs += drm_ioc32.o
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index efff0eec618c..5485382cadec 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -52,7 +52,7 @@
52# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */ 52# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
53# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 53# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
54 54
55static unsigned long drm_ati_alloc_pcigart_table(void) 55static void *drm_ati_alloc_pcigart_table(void)
56{ 56{
57 unsigned long address; 57 unsigned long address;
58 struct page *page; 58 struct page *page;
@@ -72,27 +72,26 @@ static unsigned long drm_ati_alloc_pcigart_table(void)
72 } 72 }
73 73
74 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); 74 DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
75 return address; 75 return (void *)address;
76} 76}
77 77
78static void drm_ati_free_pcigart_table(unsigned long address) 78static void drm_ati_free_pcigart_table(void *address)
79{ 79{
80 struct page *page; 80 struct page *page;
81 int i; 81 int i;
82 DRM_DEBUG("%s\n", __FUNCTION__); 82 DRM_DEBUG("%s\n", __FUNCTION__);
83 83
84 page = virt_to_page(address); 84 page = virt_to_page((unsigned long)address);
85 85
86 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) { 86 for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
87 __put_page(page); 87 __put_page(page);
88 ClearPageReserved(page); 88 ClearPageReserved(page);
89 } 89 }
90 90
91 free_pages(address, ATI_PCIGART_TABLE_ORDER); 91 free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
92} 92}
93 93
94int drm_ati_pcigart_cleanup(drm_device_t * dev, 94int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
95 drm_ati_pcigart_info * gart_info)
96{ 95{
97 drm_sg_mem_t *entry = dev->sg; 96 drm_sg_mem_t *entry = dev->sg;
98 unsigned long pages; 97 unsigned long pages;
@@ -136,10 +135,10 @@ int drm_ati_pcigart_cleanup(drm_device_t * dev,
136 135
137EXPORT_SYMBOL(drm_ati_pcigart_cleanup); 136EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
138 137
139int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info) 138int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
140{ 139{
141 drm_sg_mem_t *entry = dev->sg; 140 drm_sg_mem_t *entry = dev->sg;
142 unsigned long address = 0; 141 void *address = NULL;
143 unsigned long pages; 142 unsigned long pages;
144 u32 *pci_gart, page_base, bus_address = 0; 143 u32 *pci_gart, page_base, bus_address = 0;
145 int i, j, ret = 0; 144 int i, j, ret = 0;
@@ -163,7 +162,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
163 goto done; 162 goto done;
164 } 163 }
165 164
166 bus_address = pci_map_single(dev->pdev, (void *)address, 165 bus_address = pci_map_single(dev->pdev, address,
167 ATI_PCIGART_TABLE_PAGES * 166 ATI_PCIGART_TABLE_PAGES *
168 PAGE_SIZE, PCI_DMA_TODEVICE); 167 PAGE_SIZE, PCI_DMA_TODEVICE);
169 if (bus_address == 0) { 168 if (bus_address == 0) {
@@ -176,7 +175,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
176 address = gart_info->addr; 175 address = gart_info->addr;
177 bus_address = gart_info->bus_addr; 176 bus_address = gart_info->bus_addr;
178 DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n", 177 DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
179 bus_address, address); 178 bus_address, (unsigned long)address);
180 } 179 }
181 180
182 pci_gart = (u32 *) address; 181 pci_gart = (u32 *) address;
@@ -195,7 +194,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
195 if (entry->busaddr[i] == 0) { 194 if (entry->busaddr[i] == 0) {
196 DRM_ERROR("unable to map PCIGART pages!\n"); 195 DRM_ERROR("unable to map PCIGART pages!\n");
197 drm_ati_pcigart_cleanup(dev, gart_info); 196 drm_ati_pcigart_cleanup(dev, gart_info);
198 address = 0; 197 address = NULL;
199 bus_address = 0; 198 bus_address = 0;
200 goto done; 199 goto done;
201 } 200 }
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 64d6237fdd0b..9da0ddb892b5 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -90,8 +90,8 @@
90#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ 90#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
91#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ 91#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
92 92
93#define _DRM_LOCK_HELD 0x80000000 /**< Hardware lock is held */ 93#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
94#define _DRM_LOCK_CONT 0x40000000 /**< Hardware lock is contended */ 94#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
95#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) 95#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
96#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 96#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
97#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 97#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 3dc3c9d79ae4..54b561e69486 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -144,20 +144,6 @@
144/** \name Backward compatibility section */ 144/** \name Backward compatibility section */
145/*@{*/ 145/*@{*/
146 146
147#ifndef MODULE_LICENSE
148#define MODULE_LICENSE(x)
149#endif
150
151#ifndef preempt_disable
152#define preempt_disable()
153#define preempt_enable()
154#endif
155
156#ifndef pte_offset_map
157#define pte_offset_map pte_offset
158#define pte_unmap(pte)
159#endif
160
161#define DRM_RPR_ARG(vma) vma, 147#define DRM_RPR_ARG(vma) vma,
162 148
163#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) 149#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
@@ -286,10 +272,13 @@ typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
286typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 272typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
287 unsigned long arg); 273 unsigned long arg);
288 274
275#define DRM_AUTH 0x1
276#define DRM_MASTER 0x2
277#define DRM_ROOT_ONLY 0x4
278
289typedef struct drm_ioctl_desc { 279typedef struct drm_ioctl_desc {
290 drm_ioctl_t *func; 280 drm_ioctl_t *func;
291 int auth_needed; 281 int flags;
292 int root_only;
293} drm_ioctl_desc_t; 282} drm_ioctl_desc_t;
294 283
295typedef struct drm_devstate { 284typedef struct drm_devstate {
@@ -384,6 +373,7 @@ typedef struct drm_buf_entry {
384/** File private data */ 373/** File private data */
385typedef struct drm_file { 374typedef struct drm_file {
386 int authenticated; 375 int authenticated;
376 int master;
387 int minor; 377 int minor;
388 pid_t pid; 378 pid_t pid;
389 uid_t uid; 379 uid_t uid;
@@ -532,8 +522,9 @@ typedef struct drm_vbl_sig {
532typedef struct ati_pcigart_info { 522typedef struct ati_pcigart_info {
533 int gart_table_location; 523 int gart_table_location;
534 int is_pcie; 524 int is_pcie;
535 unsigned long addr; 525 void *addr;
536 dma_addr_t bus_addr; 526 dma_addr_t bus_addr;
527 drm_local_map_t mapping;
537} drm_ati_pcigart_info; 528} drm_ati_pcigart_info;
538 529
539/** 530/**
@@ -544,16 +535,14 @@ typedef struct ati_pcigart_info {
544struct drm_device; 535struct drm_device;
545 536
546struct drm_driver { 537struct drm_driver {
547 int (*preinit) (struct drm_device *, unsigned long flags); 538 int (*load) (struct drm_device *, unsigned long flags);
548 void (*prerelease) (struct drm_device *, struct file * filp); 539 int (*firstopen) (struct drm_device *);
549 void (*pretakedown) (struct drm_device *); 540 int (*open) (struct drm_device *, drm_file_t *);
550 int (*postcleanup) (struct drm_device *); 541 void (*preclose) (struct drm_device *, struct file * filp);
551 int (*presetup) (struct drm_device *); 542 void (*postclose) (struct drm_device *, drm_file_t *);
552 int (*postsetup) (struct drm_device *); 543 void (*lastclose) (struct drm_device *);
544 int (*unload) (struct drm_device *);
553 int (*dma_ioctl) (DRM_IOCTL_ARGS); 545 int (*dma_ioctl) (DRM_IOCTL_ARGS);
554 int (*open_helper) (struct drm_device *, drm_file_t *);
555 void (*free_filp_priv) (struct drm_device *, drm_file_t *);
556 void (*release) (struct drm_device *, struct file * filp);
557 void (*dma_ready) (struct drm_device *); 546 void (*dma_ready) (struct drm_device *);
558 int (*dma_quiescent) (struct drm_device *); 547 int (*dma_quiescent) (struct drm_device *);
559 int (*context_ctor) (struct drm_device * dev, int context); 548 int (*context_ctor) (struct drm_device * dev, int context);
@@ -561,8 +550,9 @@ struct drm_driver {
561 int (*kernel_context_switch) (struct drm_device * dev, int old, 550 int (*kernel_context_switch) (struct drm_device * dev, int old,
562 int new); 551 int new);
563 void (*kernel_context_switch_unlock) (struct drm_device * dev, 552 void (*kernel_context_switch_unlock) (struct drm_device * dev,
564 drm_lock_t * lock); 553 drm_lock_t *lock);
565 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); 554 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
555 int (*dri_library_name) (struct drm_device *dev, char *buf);
566 556
567 /** 557 /**
568 * Called by \c drm_device_is_agp. Typically used to determine if a 558 * Called by \c drm_device_is_agp. Typically used to determine if a
@@ -579,16 +569,24 @@ struct drm_driver {
579 569
580 /* these have to be filled in */ 570 /* these have to be filled in */
581 571
582 int (*postinit) (struct drm_device *, unsigned long flags); 572 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
583 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
584 void (*irq_preinstall) (struct drm_device * dev); 573 void (*irq_preinstall) (struct drm_device * dev);
585 void (*irq_postinstall) (struct drm_device * dev); 574 void (*irq_postinstall) (struct drm_device * dev);
586 void (*irq_uninstall) (struct drm_device * dev); 575 void (*irq_uninstall) (struct drm_device * dev);
587 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); 576 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
577 void (*reclaim_buffers_locked) (struct drm_device *dev,
578 struct file *filp);
588 unsigned long (*get_map_ofs) (drm_map_t * map); 579 unsigned long (*get_map_ofs) (drm_map_t * map);
589 unsigned long (*get_reg_ofs) (struct drm_device * dev); 580 unsigned long (*get_reg_ofs) (struct drm_device * dev);
590 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); 581 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
591 int (*version) (drm_version_t * version); 582
583 int major;
584 int minor;
585 int patchlevel;
586 char *name;
587 char *desc;
588 char *date;
589
592 u32 driver_features; 590 u32 driver_features;
593 int dev_priv_size; 591 int dev_priv_size;
594 drm_ioctl_desc_t *ioctls; 592 drm_ioctl_desc_t *ioctls;
@@ -752,19 +750,43 @@ static inline int drm_core_has_MTRR(struct drm_device *dev)
752{ 750{
753 return drm_core_check_feature(dev, DRIVER_USE_MTRR); 751 return drm_core_check_feature(dev, DRIVER_USE_MTRR);
754} 752}
753
754#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
755
756static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
757 unsigned int flags)
758{
759 return mtrr_add(offset, size, flags, 1);
760}
761
762static inline int drm_mtrr_del(int handle, unsigned long offset,
763 unsigned long size, unsigned int flags)
764{
765 return mtrr_del(handle, offset, size);
766}
767
755#else 768#else
756#define drm_core_has_MTRR(dev) (0) 769#define drm_core_has_MTRR(dev) (0)
770
771#define DRM_MTRR_WC 0
772
773static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
774 unsigned int flags)
775{
776 return 0;
777}
778
779static inline int drm_mtrr_del(int handle, unsigned long offset,
780 unsigned long size, unsigned int flags)
781{
782 return 0;
783}
757#endif 784#endif
758 785
759/******************************************************************/ 786/******************************************************************/
760/** \name Internal function definitions */ 787/** \name Internal function definitions */
761/*@{*/ 788/*@{*/
762 789
763 /* Misc. support (drm_init.h) */
764extern int drm_flags;
765extern void drm_parse_options(char *s);
766extern int drm_cpu_valid(void);
767
768 /* Driver support (drm_drv.h) */ 790 /* Driver support (drm_drv.h) */
769extern int drm_init(struct drm_driver *driver); 791extern int drm_init(struct drm_driver *driver);
770extern void drm_exit(struct drm_driver *driver); 792extern void drm_exit(struct drm_driver *driver);
@@ -772,12 +794,11 @@ extern int drm_ioctl(struct inode *inode, struct file *filp,
772 unsigned int cmd, unsigned long arg); 794 unsigned int cmd, unsigned long arg);
773extern long drm_compat_ioctl(struct file *filp, 795extern long drm_compat_ioctl(struct file *filp,
774 unsigned int cmd, unsigned long arg); 796 unsigned int cmd, unsigned long arg);
775extern int drm_takedown(drm_device_t * dev); 797extern int drm_lastclose(drm_device_t *dev);
776 798
777 /* Device support (drm_fops.h) */ 799 /* Device support (drm_fops.h) */
778extern int drm_open(struct inode *inode, struct file *filp); 800extern int drm_open(struct inode *inode, struct file *filp);
779extern int drm_stub_open(struct inode *inode, struct file *filp); 801extern int drm_stub_open(struct inode *inode, struct file *filp);
780extern int drm_flush(struct file *filp);
781extern int drm_fasync(int fd, struct file *filp, int on); 802extern int drm_fasync(int fd, struct file *filp, int on);
782extern int drm_release(struct inode *inode, struct file *filp); 803extern int drm_release(struct inode *inode, struct file *filp);
783 804
@@ -819,6 +840,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
819 unsigned int cmd, unsigned long arg); 840 unsigned int cmd, unsigned long arg);
820extern int drm_setversion(struct inode *inode, struct file *filp, 841extern int drm_setversion(struct inode *inode, struct file *filp,
821 unsigned int cmd, unsigned long arg); 842 unsigned int cmd, unsigned long arg);
843extern int drm_noop(struct inode *inode, struct file *filp,
844 unsigned int cmd, unsigned long arg);
822 845
823 /* Context IOCTL support (drm_context.h) */ 846 /* Context IOCTL support (drm_context.h) */
824extern int drm_resctx(struct inode *inode, struct file *filp, 847extern int drm_resctx(struct inode *inode, struct file *filp,
@@ -857,10 +880,6 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
857extern int drm_authmagic(struct inode *inode, struct file *filp, 880extern int drm_authmagic(struct inode *inode, struct file *filp,
858 unsigned int cmd, unsigned long arg); 881 unsigned int cmd, unsigned long arg);
859 882
860 /* Placeholder for ioctls past */
861extern int drm_noop(struct inode *inode, struct file *filp,
862 unsigned int cmd, unsigned long arg);
863
864 /* Locking IOCTL support (drm_lock.h) */ 883 /* Locking IOCTL support (drm_lock.h) */
865extern int drm_lock(struct inode *inode, struct file *filp, 884extern int drm_lock(struct inode *inode, struct file *filp,
866 unsigned int cmd, unsigned long arg); 885 unsigned int cmd, unsigned long arg);
@@ -873,6 +892,7 @@ extern int drm_lock_free(drm_device_t * dev,
873 /* Buffer management support (drm_bufs.h) */ 892 /* Buffer management support (drm_bufs.h) */
874extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); 893extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
875extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); 894extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
895extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
876extern int drm_addmap(drm_device_t * dev, unsigned int offset, 896extern int drm_addmap(drm_device_t * dev, unsigned int offset,
877 unsigned int size, drm_map_type_t type, 897 unsigned int size, drm_map_type_t type,
878 drm_map_flags_t flags, drm_local_map_t ** map_ptr); 898 drm_map_flags_t flags, drm_local_map_t ** map_ptr);
@@ -908,8 +928,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
908 /* IRQ support (drm_irq.h) */ 928 /* IRQ support (drm_irq.h) */
909extern int drm_control(struct inode *inode, struct file *filp, 929extern int drm_control(struct inode *inode, struct file *filp,
910 unsigned int cmd, unsigned long arg); 930 unsigned int cmd, unsigned long arg);
911extern int drm_irq_uninstall(drm_device_t * dev);
912extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 931extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
932extern int drm_irq_uninstall(drm_device_t * dev);
913extern void drm_driver_irq_preinstall(drm_device_t * dev); 933extern void drm_driver_irq_preinstall(drm_device_t * dev);
914extern void drm_driver_irq_postinstall(drm_device_t * dev); 934extern void drm_driver_irq_postinstall(drm_device_t * dev);
915extern void drm_driver_irq_uninstall(drm_device_t * dev); 935extern void drm_driver_irq_uninstall(drm_device_t * dev);
@@ -933,13 +953,17 @@ extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
933extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info); 953extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
934extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, 954extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
935 unsigned int cmd, unsigned long arg); 955 unsigned int cmd, unsigned long arg);
936extern int drm_agp_alloc(struct inode *inode, struct file *filp, 956extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
957extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
937 unsigned int cmd, unsigned long arg); 958 unsigned int cmd, unsigned long arg);
938extern int drm_agp_free(struct inode *inode, struct file *filp, 959extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
960extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
939 unsigned int cmd, unsigned long arg); 961 unsigned int cmd, unsigned long arg);
940extern int drm_agp_unbind(struct inode *inode, struct file *filp, 962extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
963extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
941 unsigned int cmd, unsigned long arg); 964 unsigned int cmd, unsigned long arg);
942extern int drm_agp_bind(struct inode *inode, struct file *filp, 965extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
966extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
943 unsigned int cmd, unsigned long arg); 967 unsigned int cmd, unsigned long arg);
944extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, 968extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
945 size_t pages, u32 type); 969 size_t pages, u32 type);
@@ -991,10 +1015,8 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
991 char *name); 1015 char *name);
992extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); 1016extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
993extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, 1017extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
994 dev_t dev, 1018 drm_head_t *head);
995 struct device *device, 1019extern void drm_sysfs_device_remove(struct class_device *class_dev);
996 const char *fmt, ...);
997extern void drm_sysfs_device_remove(dev_t dev);
998 1020
999/* Inline replacements for DRM_IOREMAP macros */ 1021/* Inline replacements for DRM_IOREMAP macros */
1000static __inline__ void drm_core_ioremap(struct drm_map *map, 1022static __inline__ void drm_core_ioremap(struct drm_map *map,
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 2b6453a9ffce..fabc930c67a2 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * \file drm_agpsupport.h 2 * \file drm_agpsupport.c
3 * DRM support for AGP/GART backend 3 * DRM support for AGP/GART backend
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
91/** 91/**
92 * Acquire the AGP device. 92 * Acquire the AGP device.
93 * 93 *
94 * \param dev DRM device that is to acquire AGP 94 * \param dev DRM device that is to acquire AGP.
95 * \return zero on success or a negative number on failure. 95 * \return zero on success or a negative number on failure.
96 * 96 *
97 * Verifies the AGP device hasn't been acquired before and calls 97 * Verifies the AGP device hasn't been acquired before and calls
@@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
134/** 134/**
135 * Release the AGP device. 135 * Release the AGP device.
136 * 136 *
137 * \param dev DRM device that is to release AGP 137 * \param dev DRM device that is to release AGP.
138 * \return zero on success or a negative number on failure. 138 * \return zero on success or a negative number on failure.
139 * 139 *
140 * Verifies the AGP device has been acquired and calls \c agp_backend_release. 140 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
@@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
147 dev->agp->acquired = 0; 147 dev->agp->acquired = 0;
148 return 0; 148 return 0;
149} 149}
150
151EXPORT_SYMBOL(drm_agp_release); 150EXPORT_SYMBOL(drm_agp_release);
152 151
153int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 152int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
@@ -208,30 +207,22 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
208 * Verifies the AGP device is present and has been acquired, allocates the 207 * Verifies the AGP device is present and has been acquired, allocates the
209 * memory via alloc_agp() and creates a drm_agp_mem entry for it. 208 * memory via alloc_agp() and creates a drm_agp_mem entry for it.
210 */ 209 */
211int drm_agp_alloc(struct inode *inode, struct file *filp, 210int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
212 unsigned int cmd, unsigned long arg)
213{ 211{
214 drm_file_t *priv = filp->private_data;
215 drm_device_t *dev = priv->head->dev;
216 drm_agp_buffer_t request;
217 drm_agp_mem_t *entry; 212 drm_agp_mem_t *entry;
218 DRM_AGP_MEM *memory; 213 DRM_AGP_MEM *memory;
219 unsigned long pages; 214 unsigned long pages;
220 u32 type; 215 u32 type;
221 drm_agp_buffer_t __user *argp = (void __user *)arg;
222 216
223 if (!dev->agp || !dev->agp->acquired) 217 if (!dev->agp || !dev->agp->acquired)
224 return -EINVAL; 218 return -EINVAL;
225 if (copy_from_user(&request, argp, sizeof(request)))
226 return -EFAULT;
227 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS))) 219 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
228 return -ENOMEM; 220 return -ENOMEM;
229 221
230 memset(entry, 0, sizeof(*entry)); 222 memset(entry, 0, sizeof(*entry));
231 223
232 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 224 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
233 type = (u32) request.type; 225 type = (u32) request->type;
234
235 if (!(memory = drm_alloc_agp(dev, pages, type))) { 226 if (!(memory = drm_alloc_agp(dev, pages, type))) {
236 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 227 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
237 return -ENOMEM; 228 return -ENOMEM;
@@ -247,16 +238,39 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
247 dev->agp->memory->prev = entry; 238 dev->agp->memory->prev = entry;
248 dev->agp->memory = entry; 239 dev->agp->memory = entry;
249 240
250 request.handle = entry->handle; 241 request->handle = entry->handle;
251 request.physical = memory->physical; 242 request->physical = memory->physical;
243
244 return 0;
245}
246EXPORT_SYMBOL(drm_agp_alloc);
247
248int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
249 unsigned int cmd, unsigned long arg)
250{
251 drm_file_t *priv = filp->private_data;
252 drm_device_t *dev = priv->head->dev;
253 drm_agp_buffer_t request;
254 drm_agp_buffer_t __user *argp = (void __user *)arg;
255 int err;
256
257 if (copy_from_user(&request, argp, sizeof(request)))
258 return -EFAULT;
259
260 err = drm_agp_alloc(dev, &request);
261 if (err)
262 return err;
252 263
253 if (copy_to_user(argp, &request, sizeof(request))) { 264 if (copy_to_user(argp, &request, sizeof(request))) {
265 drm_agp_mem_t *entry = dev->agp->memory;
266
254 dev->agp->memory = entry->next; 267 dev->agp->memory = entry->next;
255 dev->agp->memory->prev = NULL; 268 dev->agp->memory->prev = NULL;
256 drm_free_agp(memory, pages); 269 drm_free_agp(entry->memory, entry->pages);
257 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 270 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
258 return -EFAULT; 271 return -EFAULT;
259 } 272 }
273
260 return 0; 274 return 0;
261} 275}
262 276
@@ -293,21 +307,14 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
293 * Verifies the AGP device is present and acquired, looks-up the AGP memory 307 * Verifies the AGP device is present and acquired, looks-up the AGP memory
294 * entry and passes it to the unbind_agp() function. 308 * entry and passes it to the unbind_agp() function.
295 */ 309 */
296int drm_agp_unbind(struct inode *inode, struct file *filp, 310int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
297 unsigned int cmd, unsigned long arg)
298{ 311{
299 drm_file_t *priv = filp->private_data;
300 drm_device_t *dev = priv->head->dev;
301 drm_agp_binding_t request;
302 drm_agp_mem_t *entry; 312 drm_agp_mem_t *entry;
303 int ret; 313 int ret;
304 314
305 if (!dev->agp || !dev->agp->acquired) 315 if (!dev->agp || !dev->agp->acquired)
306 return -EINVAL; 316 return -EINVAL;
307 if (copy_from_user 317 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
308 (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
309 return -EFAULT;
310 if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
311 return -EINVAL; 318 return -EINVAL;
312 if (!entry->bound) 319 if (!entry->bound)
313 return -EINVAL; 320 return -EINVAL;
@@ -316,6 +323,21 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
316 entry->bound = 0; 323 entry->bound = 0;
317 return ret; 324 return ret;
318} 325}
326EXPORT_SYMBOL(drm_agp_unbind);
327
328int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
329 unsigned int cmd, unsigned long arg)
330{
331 drm_file_t *priv = filp->private_data;
332 drm_device_t *dev = priv->head->dev;
333 drm_agp_binding_t request;
334
335 if (copy_from_user
336 (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
337 return -EFAULT;
338
339 return drm_agp_unbind(dev, &request);
340}
319 341
320/** 342/**
321 * Bind AGP memory into the GATT (ioctl) 343 * Bind AGP memory into the GATT (ioctl)
@@ -330,26 +352,19 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
330 * is currently bound into the GATT. Looks-up the AGP memory entry and passes 352 * is currently bound into the GATT. Looks-up the AGP memory entry and passes
331 * it to bind_agp() function. 353 * it to bind_agp() function.
332 */ 354 */
333int drm_agp_bind(struct inode *inode, struct file *filp, 355int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
334 unsigned int cmd, unsigned long arg)
335{ 356{
336 drm_file_t *priv = filp->private_data;
337 drm_device_t *dev = priv->head->dev;
338 drm_agp_binding_t request;
339 drm_agp_mem_t *entry; 357 drm_agp_mem_t *entry;
340 int retcode; 358 int retcode;
341 int page; 359 int page;
342 360
343 if (!dev->agp || !dev->agp->acquired) 361 if (!dev->agp || !dev->agp->acquired)
344 return -EINVAL; 362 return -EINVAL;
345 if (copy_from_user 363 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
346 (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
347 return -EFAULT;
348 if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
349 return -EINVAL; 364 return -EINVAL;
350 if (entry->bound) 365 if (entry->bound)
351 return -EINVAL; 366 return -EINVAL;
352 page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE; 367 page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
353 if ((retcode = drm_bind_agp(entry->memory, page))) 368 if ((retcode = drm_bind_agp(entry->memory, page)))
354 return retcode; 369 return retcode;
355 entry->bound = dev->agp->base + (page << PAGE_SHIFT); 370 entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -357,6 +372,21 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
357 dev->agp->base, entry->bound); 372 dev->agp->base, entry->bound);
358 return 0; 373 return 0;
359} 374}
375EXPORT_SYMBOL(drm_agp_bind);
376
377int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
378 unsigned int cmd, unsigned long arg)
379{
380 drm_file_t *priv = filp->private_data;
381 drm_device_t *dev = priv->head->dev;
382 drm_agp_binding_t request;
383
384 if (copy_from_user
385 (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
386 return -EFAULT;
387
388 return drm_agp_bind(dev, &request);
389}
360 390
361/** 391/**
362 * Free AGP memory (ioctl). 392 * Free AGP memory (ioctl).
@@ -372,20 +402,13 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
372 * unbind_agp(). Frees it via free_agp() as well as the entry itself 402 * unbind_agp(). Frees it via free_agp() as well as the entry itself
373 * and unlinks from the doubly linked list it's inserted in. 403 * and unlinks from the doubly linked list it's inserted in.
374 */ 404 */
375int drm_agp_free(struct inode *inode, struct file *filp, 405int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
376 unsigned int cmd, unsigned long arg)
377{ 406{
378 drm_file_t *priv = filp->private_data;
379 drm_device_t *dev = priv->head->dev;
380 drm_agp_buffer_t request;
381 drm_agp_mem_t *entry; 407 drm_agp_mem_t *entry;
382 408
383 if (!dev->agp || !dev->agp->acquired) 409 if (!dev->agp || !dev->agp->acquired)
384 return -EINVAL; 410 return -EINVAL;
385 if (copy_from_user 411 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
386 (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
387 return -EFAULT;
388 if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
389 return -EINVAL; 412 return -EINVAL;
390 if (entry->bound) 413 if (entry->bound)
391 drm_unbind_agp(entry->memory); 414 drm_unbind_agp(entry->memory);
@@ -402,12 +425,30 @@ int drm_agp_free(struct inode *inode, struct file *filp,
402 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 425 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
403 return 0; 426 return 0;
404} 427}
428EXPORT_SYMBOL(drm_agp_free);
429
430int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
431 unsigned int cmd, unsigned long arg)
432{
433 drm_file_t *priv = filp->private_data;
434 drm_device_t *dev = priv->head->dev;
435 drm_agp_buffer_t request;
436
437 if (copy_from_user
438 (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
439 return -EFAULT;
440
441 return drm_agp_free(dev, &request);
442}
405 443
406/** 444/**
407 * Initialize the AGP resources. 445 * Initialize the AGP resources.
408 * 446 *
409 * \return pointer to a drm_agp_head structure. 447 * \return pointer to a drm_agp_head structure.
410 * 448 *
449 * Gets the drm_agp_t structure which is made available by the agpgart module
450 * via the inter_module_* functions. Creates and initializes a drm_agp_head
451 * structure.
411 */ 452 */
412drm_agp_head_t *drm_agp_init(drm_device_t * dev) 453drm_agp_head_t *drm_agp_init(drm_device_t * dev)
413{ 454{
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 319bdea8de8a..1db12dcb6802 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,22 +36,21 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource) 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40{ 40{
41 return pci_resource_start(dev->pdev, resource); 41 return pci_resource_start(dev->pdev, resource);
42} 42}
43
44EXPORT_SYMBOL(drm_get_resource_start); 43EXPORT_SYMBOL(drm_get_resource_start);
45 44
46unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource) 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47{ 46{
48 return pci_resource_len(dev->pdev, resource); 47 return pci_resource_len(dev->pdev, resource);
49} 48}
50 49
51EXPORT_SYMBOL(drm_get_resource_len); 50EXPORT_SYMBOL(drm_get_resource_len);
52 51
53static drm_map_list_t *drm_find_matching_map(drm_device_t * dev, 52static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
54 drm_local_map_t * map) 53 drm_local_map_t *map)
55{ 54{
56 struct list_head *list; 55 struct list_head *list;
57 56
@@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
74 73
75#ifdef _LP64 74#ifdef _LP64
76static __inline__ unsigned int HandleID(unsigned long lhandle, 75static __inline__ unsigned int HandleID(unsigned long lhandle,
77 drm_device_t * dev) 76 drm_device_t *dev)
78{ 77{
79 static unsigned int map32_handle = START_RANGE; 78 static unsigned int map32_handle = START_RANGE;
80 unsigned int hash; 79 unsigned int hash;
@@ -155,7 +154,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
155 case _DRM_REGISTERS: 154 case _DRM_REGISTERS:
156 case _DRM_FRAME_BUFFER: 155 case _DRM_FRAME_BUFFER:
157#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) 156#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
158 if (map->offset + map->size < map->offset || 157 if (map->offset + (map->size-1) < map->offset ||
159 map->offset < virt_to_phys(high_memory)) { 158 map->offset < virt_to_phys(high_memory)) {
160 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 159 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
161 return -EINVAL; 160 return -EINVAL;
@@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
301 return -EFAULT; 300 return -EFAULT;
302 } 301 }
303 302
303 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
304 return -EPERM;
305
304 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 306 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
305 &maplist); 307 &maplist);
306 308
@@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
332 * 334 *
333 * \sa drm_addmap 335 * \sa drm_addmap
334 */ 336 */
335int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map) 337int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
336{ 338{
337 struct list_head *list; 339 struct list_head *list;
338 drm_map_list_t *r_list = NULL; 340 drm_map_list_t *r_list = NULL;
@@ -384,10 +386,9 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
384 386
385 return 0; 387 return 0;
386} 388}
387
388EXPORT_SYMBOL(drm_rmmap_locked); 389EXPORT_SYMBOL(drm_rmmap_locked);
389 390
390int drm_rmmap(drm_device_t * dev, drm_local_map_t * map) 391int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
391{ 392{
392 int ret; 393 int ret;
393 394
@@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
397 398
398 return ret; 399 return ret;
399} 400}
400
401EXPORT_SYMBOL(drm_rmmap); 401EXPORT_SYMBOL(drm_rmmap);
402 402
403/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 403/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
@@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
548 DRM_DEBUG("count: %d\n", count); 548 DRM_DEBUG("count: %d\n", count);
549 DRM_DEBUG("order: %d\n", order); 549 DRM_DEBUG("order: %d\n", order);
550 DRM_DEBUG("size: %d\n", size); 550 DRM_DEBUG("size: %d\n", size);
551 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 551 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
552 DRM_DEBUG("alignment: %d\n", alignment); 552 DRM_DEBUG("alignment: %d\n", alignment);
553 DRM_DEBUG("page_order: %d\n", page_order); 553 DRM_DEBUG("page_order: %d\n", page_order);
554 DRM_DEBUG("total: %d\n", total); 554 DRM_DEBUG("total: %d\n", total);
@@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
649 } 649 }
650 650
651 dma->buf_count += entry->buf_count; 651 dma->buf_count += entry->buf_count;
652 dma->seg_count += entry->seg_count;
653 dma->page_count += byte_count >> PAGE_SHIFT;
652 dma->byte_count += byte_count; 654 dma->byte_count += byte_count;
653 655
654 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
664 atomic_dec(&dev->buf_alloc); 666 atomic_dec(&dev->buf_alloc);
665 return 0; 667 return 0;
666} 668}
667
668EXPORT_SYMBOL(drm_addbufs_agp); 669EXPORT_SYMBOL(drm_addbufs_agp);
669#endif /* __OS_HAS_AGP */ 670#endif /* __OS_HAS_AGP */
670 671
@@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
689 690
690 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 691 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
691 return -EINVAL; 692 return -EINVAL;
693
692 if (!dma) 694 if (!dma)
693 return -EINVAL; 695 return -EINVAL;
694 696
697 if (!capable(CAP_SYS_ADMIN))
698 return -EPERM;
699
695 count = request->count; 700 count = request->count;
696 order = drm_order(request->size); 701 order = drm_order(request->size);
697 size = 1 << order; 702 size = 1 << order;
@@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
882 return 0; 887 return 0;
883 888
884} 889}
885
886EXPORT_SYMBOL(drm_addbufs_pci); 890EXPORT_SYMBOL(drm_addbufs_pci);
887 891
888static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) 892static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
908 if (!dma) 912 if (!dma)
909 return -EINVAL; 913 return -EINVAL;
910 914
915 if (!capable(CAP_SYS_ADMIN))
916 return -EPERM;
917
911 count = request->count; 918 count = request->count;
912 order = drm_order(request->size); 919 order = drm_order(request->size);
913 size = 1 << order; 920 size = 1 << order;
@@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1026 } 1033 }
1027 1034
1028 dma->buf_count += entry->buf_count; 1035 dma->buf_count += entry->buf_count;
1036 dma->seg_count += entry->seg_count;
1037 dma->page_count += byte_count >> PAGE_SHIFT;
1029 dma->byte_count += byte_count; 1038 dma->byte_count += byte_count;
1030 1039
1031 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1042 return 0; 1051 return 0;
1043} 1052}
1044 1053
1045static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) 1054int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1046{ 1055{
1047 drm_device_dma_t *dma = dev->dma; 1056 drm_device_dma_t *dma = dev->dma;
1048 drm_buf_entry_t *entry; 1057 drm_buf_entry_t *entry;
@@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1065 if (!dma) 1074 if (!dma)
1066 return -EINVAL; 1075 return -EINVAL;
1067 1076
1077 if (!capable(CAP_SYS_ADMIN))
1078 return -EPERM;
1079
1068 count = request->count; 1080 count = request->count;
1069 order = drm_order(request->size); 1081 order = drm_order(request->size);
1070 size = 1 << order; 1082 size = 1 << order;
@@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1181 } 1193 }
1182 1194
1183 dma->buf_count += entry->buf_count; 1195 dma->buf_count += entry->buf_count;
1196 dma->seg_count += entry->seg_count;
1197 dma->page_count += byte_count >> PAGE_SHIFT;
1184 dma->byte_count += byte_count; 1198 dma->byte_count += byte_count;
1185 1199
1186 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1196 atomic_dec(&dev->buf_alloc); 1210 atomic_dec(&dev->buf_alloc);
1197 return 0; 1211 return 0;
1198} 1212}
1213EXPORT_SYMBOL(drm_addbufs_fb);
1214
1199 1215
1200/** 1216/**
1201 * Add buffers for DMA transfers (ioctl). 1217 * Add buffers for DMA transfers (ioctl).
@@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
1577 1593
1578 return order; 1594 return order;
1579} 1595}
1580
1581EXPORT_SYMBOL(drm_order); 1596EXPORT_SYMBOL(drm_order);
1597
1598
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index bd958d69a2ac..f84254526949 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -433,7 +433,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
433 if (ctx.handle != DRM_KERNEL_CONTEXT) { 433 if (ctx.handle != DRM_KERNEL_CONTEXT) {
434 if (dev->driver->context_ctor) 434 if (dev->driver->context_ctor)
435 if (!dev->driver->context_ctor(dev, ctx.handle)) { 435 if (!dev->driver->context_ctor(dev, ctx.handle)) {
436 DRM_DEBUG( "Running out of ctxs or memory.\n"); 436 DRM_DEBUG("Running out of ctxs or memory.\n");
437 return -ENOMEM; 437 return -ENOMEM;
438 } 438 }
439 } 439 }
diff --git a/drivers/char/drm/drm_core.h b/drivers/char/drm/drm_core.h
index cc97bb906dda..f4f9db6c7ed4 100644
--- a/drivers/char/drm/drm_core.h
+++ b/drivers/char/drm/drm_core.h
@@ -24,11 +24,11 @@
24 24
25#define CORE_NAME "drm" 25#define CORE_NAME "drm"
26#define CORE_DESC "DRM shared core routines" 26#define CORE_DESC "DRM shared core routines"
27#define CORE_DATE "20040925" 27#define CORE_DATE "20051102"
28 28
29#define DRM_IF_MAJOR 1 29#define DRM_IF_MAJOR 1
30#define DRM_IF_MINOR 2 30#define DRM_IF_MINOR 2
31 31
32#define CORE_MAJOR 1 32#define CORE_MAJOR 1
33#define CORE_MINOR 0 33#define CORE_MINOR 0
34#define CORE_PATCHLEVEL 0 34#define CORE_PATCHLEVEL 1
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 4dff7554eb08..c4fa5a29582b 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -56,66 +56,66 @@ static int drm_version(struct inode *inode, struct file *filp,
56 56
57/** Ioctl table */ 57/** Ioctl table */
58static drm_ioctl_desc_t drm_ioctls[] = { 58static drm_ioctl_desc_t drm_ioctls[] = {
59 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0}, 59 [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
60 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0}, 60 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
61 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0}, 61 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
62 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1}, 62 [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
63 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0}, 63 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
64 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0}, 64 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
65 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0}, 65 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
66 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1}, 66 [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
67 67 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
68 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1}, 68 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
69 [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1}, 69 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1}, 70 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1}, 71
72 72 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1}, 73 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0}, 74
75 75 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1}, 76 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0}, 77
78 78 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
79 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1}, 79 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
80 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1}, 80 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
81 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1}, 81 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
82 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0}, 82 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
83 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1}, 83 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
84 [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1}, 84 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
85 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0}, 85
86 86 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
87 [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1}, 87 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
88 [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1}, 88
89 89 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
90 [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0}, 90 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
91 [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0}, 91
92 92 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
93 [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0}, 93
94 94 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
95 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1}, 95 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
96 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1}, 96 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
97 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0}, 97 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
98 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0}, 98 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
99 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
100 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ 99 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
100 [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
101 101
102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1}, 102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
103 103
104#if __OS_HAS_AGP 104#if __OS_HAS_AGP
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1}, 105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1}, 106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1}, 107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0}, 108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1}, 109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1}, 110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1}, 111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
112 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1}, 112 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
113#endif 113#endif
114 114
115 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1}, 115 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
116 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1}, 116 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
117 117
118 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0}, 118 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
119}; 119};
120 120
121#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) 121#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
@@ -129,7 +129,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
129 * 129 *
130 * \sa drm_device 130 * \sa drm_device
131 */ 131 */
132int drm_takedown(drm_device_t * dev) 132int drm_lastclose(drm_device_t * dev)
133{ 133{
134 drm_magic_entry_t *pt, *next; 134 drm_magic_entry_t *pt, *next;
135 drm_map_list_t *r_list; 135 drm_map_list_t *r_list;
@@ -138,9 +138,9 @@ int drm_takedown(drm_device_t * dev)
138 138
139 DRM_DEBUG("\n"); 139 DRM_DEBUG("\n");
140 140
141 if (dev->driver->pretakedown) 141 if (dev->driver->lastclose)
142 dev->driver->pretakedown(dev); 142 dev->driver->lastclose(dev);
143 DRM_DEBUG("driver pretakedown completed\n"); 143 DRM_DEBUG("driver lastclose completed\n");
144 144
145 if (dev->unique) { 145 if (dev->unique) {
146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -233,7 +233,7 @@ int drm_takedown(drm_device_t * dev)
233 } 233 }
234 up(&dev->struct_sem); 234 up(&dev->struct_sem);
235 235
236 DRM_DEBUG("takedown completed\n"); 236 DRM_DEBUG("lastclose completed\n");
237 return 0; 237 return 0;
238} 238}
239 239
@@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_init);
281/** 281/**
282 * Called via cleanup_module() at module unload time. 282 * Called via cleanup_module() at module unload time.
283 * 283 *
284 * Cleans up all DRM device, calling takedown(). 284 * Cleans up all DRM device, calling drm_lastclose().
285 * 285 *
286 * \sa drm_init 286 * \sa drm_init
287 */ 287 */
@@ -294,7 +294,7 @@ static void drm_cleanup(drm_device_t * dev)
294 return; 294 return;
295 } 295 }
296 296
297 drm_takedown(dev); 297 drm_lastclose(dev);
298 298
299 if (dev->maplist) { 299 if (dev->maplist) {
300 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); 300 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
@@ -317,8 +317,8 @@ static void drm_cleanup(drm_device_t * dev)
317 dev->agp = NULL; 317 dev->agp = NULL;
318 } 318 }
319 319
320 if (dev->driver->postcleanup) 320 if (dev->driver->unload)
321 dev->driver->postcleanup(dev); 321 dev->driver->unload(dev);
322 322
323 drm_put_head(&dev->primary); 323 drm_put_head(&dev->primary);
324 if (drm_put_dev(dev)) 324 if (drm_put_dev(dev))
@@ -342,12 +342,12 @@ void drm_exit(struct drm_driver *driver)
342 if (head->dev->driver != driver) 342 if (head->dev->driver != driver)
343 continue; 343 continue;
344 dev = head->dev; 344 dev = head->dev;
345 } 345 if (dev) {
346 if (dev) { 346 /* release the pci driver */
347 /* release the pci driver */ 347 if (dev->pdev)
348 if (dev->pdev) 348 pci_dev_put(dev->pdev);
349 pci_dev_put(dev->pdev); 349 drm_cleanup(dev);
350 drm_cleanup(dev); 350 }
351 } 351 }
352 DRM_INFO("Module unloaded\n"); 352 DRM_INFO("Module unloaded\n");
353} 353}
@@ -432,14 +432,17 @@ static int drm_version(struct inode *inode, struct file *filp,
432 drm_device_t *dev = priv->head->dev; 432 drm_device_t *dev = priv->head->dev;
433 drm_version_t __user *argp = (void __user *)arg; 433 drm_version_t __user *argp = (void __user *)arg;
434 drm_version_t version; 434 drm_version_t version;
435 int ret; 435 int len;
436 436
437 if (copy_from_user(&version, argp, sizeof(version))) 437 if (copy_from_user(&version, argp, sizeof(version)))
438 return -EFAULT; 438 return -EFAULT;
439 439
440 /* version is a required function to return the personality module version */ 440 version.version_major = dev->driver->major;
441 if ((ret = dev->driver->version(&version))) 441 version.version_minor = dev->driver->minor;
442 return ret; 442 version.version_patchlevel = dev->driver->patchlevel;
443 DRM_COPY(version.name, dev->driver->name);
444 DRM_COPY(version.date, dev->driver->date);
445 DRM_COPY(version.desc, dev->driver->desc);
443 446
444 if (copy_to_user(argp, &version, sizeof(version))) 447 if (copy_to_user(argp, &version, sizeof(version)))
445 return -EFAULT; 448 return -EFAULT;
@@ -493,8 +496,9 @@ int drm_ioctl(struct inode *inode, struct file *filp,
493 if (!func) { 496 if (!func) {
494 DRM_DEBUG("no function\n"); 497 DRM_DEBUG("no function\n");
495 retcode = -EINVAL; 498 retcode = -EINVAL;
496 } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) || 499 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
497 (ioctl->auth_needed && !priv->authenticated)) { 500 ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
501 ((ioctl->flags & DRM_MASTER) && !priv->master)) {
498 retcode = -EACCES; 502 retcode = -EACCES;
499 } else { 503 } else {
500 retcode = func(inode, filp, cmd, arg); 504 retcode = func(inode, filp, cmd, arg);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index bf0a740122bf..403f44a1bf01 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -35,6 +35,7 @@
35 */ 35 */
36 36
37#include "drmP.h" 37#include "drmP.h"
38#include "drm_sarea.h"
38#include <linux/poll.h> 39#include <linux/poll.h>
39 40
40static int drm_open_helper(struct inode *inode, struct file *filp, 41static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -42,15 +43,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
42 43
43static int drm_setup(drm_device_t * dev) 44static int drm_setup(drm_device_t * dev)
44{ 45{
46 drm_local_map_t *map;
45 int i; 47 int i;
46 int ret; 48 int ret;
47 49
48 if (dev->driver->presetup) { 50 if (dev->driver->firstopen) {
49 ret = dev->driver->presetup(dev); 51 ret = dev->driver->firstopen(dev);
50 if (ret != 0) 52 if (ret != 0)
51 return ret; 53 return ret;
52 } 54 }
53 55
56 /* prebuild the SAREA */
57 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
58 if (i != 0)
59 return i;
60
54 atomic_set(&dev->ioctl_count, 0); 61 atomic_set(&dev->ioctl_count, 0);
55 atomic_set(&dev->vma_count, 0); 62 atomic_set(&dev->vma_count, 0);
56 dev->buf_use = 0; 63 dev->buf_use = 0;
@@ -109,8 +116,6 @@ static int drm_setup(drm_device_t * dev)
109 * drm_select_queue fails between the time the interrupt is 116 * drm_select_queue fails between the time the interrupt is
110 * initialized and the time the queues are initialized. 117 * initialized and the time the queues are initialized.
111 */ 118 */
112 if (dev->driver->postsetup)
113 dev->driver->postsetup(dev);
114 119
115 return 0; 120 return 0;
116} 121}
@@ -154,10 +159,168 @@ int drm_open(struct inode *inode, struct file *filp)
154 159
155 return retcode; 160 return retcode;
156} 161}
157
158EXPORT_SYMBOL(drm_open); 162EXPORT_SYMBOL(drm_open);
159 163
160/** 164/**
165 * File \c open operation.
166 *
167 * \param inode device inode.
168 * \param filp file pointer.
169 *
170 * Puts the dev->fops corresponding to the device minor number into
171 * \p filp, call the \c open method, and restore the file operations.
172 */
173int drm_stub_open(struct inode *inode, struct file *filp)
174{
175 drm_device_t *dev = NULL;
176 int minor = iminor(inode);
177 int err = -ENODEV;
178 struct file_operations *old_fops;
179
180 DRM_DEBUG("\n");
181
182 if (!((minor >= 0) && (minor < drm_cards_limit)))
183 return -ENODEV;
184
185 if (!drm_heads[minor])
186 return -ENODEV;
187
188 if (!(dev = drm_heads[minor]->dev))
189 return -ENODEV;
190
191 old_fops = filp->f_op;
192 filp->f_op = fops_get(&dev->driver->fops);
193 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
194 fops_put(filp->f_op);
195 filp->f_op = fops_get(old_fops);
196 }
197 fops_put(old_fops);
198
199 return err;
200}
201
202/**
203 * Check whether DRI will run on this CPU.
204 *
205 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
206 */
207static int drm_cpu_valid(void)
208{
209#if defined(__i386__)
210 if (boot_cpu_data.x86 == 3)
211 return 0; /* No cmpxchg on a 386 */
212#endif
213#if defined(__sparc__) && !defined(__sparc_v9__)
214 return 0; /* No cmpxchg before v9 sparc. */
215#endif
216 return 1;
217}
218
219/**
220 * Called whenever a process opens /dev/drm.
221 *
222 * \param inode device inode.
223 * \param filp file pointer.
224 * \param dev device.
225 * \return zero on success or a negative number on failure.
226 *
227 * Creates and initializes a drm_file structure for the file private data in \p
228 * filp and add it into the double linked list in \p dev.
229 */
230static int drm_open_helper(struct inode *inode, struct file *filp,
231 drm_device_t * dev)
232{
233 int minor = iminor(inode);
234 drm_file_t *priv;
235 int ret;
236
237 if (filp->f_flags & O_EXCL)
238 return -EBUSY; /* No exclusive opens */
239 if (!drm_cpu_valid())
240 return -EINVAL;
241
242 DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
243
244 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
245 if (!priv)
246 return -ENOMEM;
247
248 memset(priv, 0, sizeof(*priv));
249 filp->private_data = priv;
250 priv->uid = current->euid;
251 priv->pid = current->pid;
252 priv->minor = minor;
253 priv->head = drm_heads[minor];
254 priv->ioctl_count = 0;
255 /* for compatibility root is always authenticated */
256 priv->authenticated = capable(CAP_SYS_ADMIN);
257 priv->lock_count = 0;
258
259 if (dev->driver->open) {
260 ret = dev->driver->open(dev, priv);
261 if (ret < 0)
262 goto out_free;
263 }
264
265 down(&dev->struct_sem);
266 if (!dev->file_last) {
267 priv->next = NULL;
268 priv->prev = NULL;
269 dev->file_first = priv;
270 dev->file_last = priv;
271 /* first opener automatically becomes master */
272 priv->master = 1;
273 } else {
274 priv->next = NULL;
275 priv->prev = dev->file_last;
276 dev->file_last->next = priv;
277 dev->file_last = priv;
278 }
279 up(&dev->struct_sem);
280
281#ifdef __alpha__
282 /*
283 * Default the hose
284 */
285 if (!dev->hose) {
286 struct pci_dev *pci_dev;
287 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
288 if (pci_dev) {
289 dev->hose = pci_dev->sysdata;
290 pci_dev_put(pci_dev);
291 }
292 if (!dev->hose) {
293 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
294 if (b)
295 dev->hose = b->sysdata;
296 }
297 }
298#endif
299
300 return 0;
301 out_free:
302 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
303 filp->private_data = NULL;
304 return ret;
305}
306
307/** No-op. */
308int drm_fasync(int fd, struct file *filp, int on)
309{
310 drm_file_t *priv = filp->private_data;
311 drm_device_t *dev = priv->head->dev;
312 int retcode;
313
314 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
315 (long)old_encode_dev(priv->head->device));
316 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
317 if (retcode < 0)
318 return retcode;
319 return 0;
320}
321EXPORT_SYMBOL(drm_fasync);
322
323/**
161 * Release file. 324 * Release file.
162 * 325 *
163 * \param inode device inode 326 * \param inode device inode
@@ -167,7 +330,7 @@ EXPORT_SYMBOL(drm_open);
167 * If the hardware lock is held then free it, and take it again for the kernel 330 * If the hardware lock is held then free it, and take it again for the kernel
168 * context since it's necessary to reclaim buffers. Unlink the file private 331 * context since it's necessary to reclaim buffers. Unlink the file private
169 * data from its list and free it. Decreases the open count and if it reaches 332 * data from its list and free it. Decreases the open count and if it reaches
170 * zero calls takedown(). 333 * zero calls drm_lastclose().
171 */ 334 */
172int drm_release(struct inode *inode, struct file *filp) 335int drm_release(struct inode *inode, struct file *filp)
173{ 336{
@@ -180,8 +343,8 @@ int drm_release(struct inode *inode, struct file *filp)
180 343
181 DRM_DEBUG("open_count = %d\n", dev->open_count); 344 DRM_DEBUG("open_count = %d\n", dev->open_count);
182 345
183 if (dev->driver->prerelease) 346 if (dev->driver->preclose)
184 dev->driver->prerelease(dev, filp); 347 dev->driver->preclose(dev, filp);
185 348
186 /* ======================================================== 349 /* ========================================================
187 * Begin inline drm_release 350 * Begin inline drm_release
@@ -197,8 +360,8 @@ int drm_release(struct inode *inode, struct file *filp)
197 DRM_DEBUG("File %p released, freeing lock for context %d\n", 360 DRM_DEBUG("File %p released, freeing lock for context %d\n",
198 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 361 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
199 362
200 if (dev->driver->release) 363 if (dev->driver->reclaim_buffers_locked)
201 dev->driver->release(dev, filp); 364 dev->driver->reclaim_buffers_locked(dev, filp);
202 365
203 drm_lock_free(dev, &dev->lock.hw_lock->lock, 366 drm_lock_free(dev, &dev->lock.hw_lock->lock,
204 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); 367 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -207,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
207 hardware at this point, possibly 370 hardware at this point, possibly
208 processed via a callback to the X 371 processed via a callback to the X
209 server. */ 372 server. */
210 } else if (dev->driver->release && priv->lock_count 373 } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
211 && dev->lock.hw_lock) { 374 && dev->lock.hw_lock) {
212 /* The lock is required to reclaim buffers */ 375 /* The lock is required to reclaim buffers */
213 DECLARE_WAITQUEUE(entry, current); 376 DECLARE_WAITQUEUE(entry, current);
@@ -237,15 +400,14 @@ int drm_release(struct inode *inode, struct file *filp)
237 __set_current_state(TASK_RUNNING); 400 __set_current_state(TASK_RUNNING);
238 remove_wait_queue(&dev->lock.lock_queue, &entry); 401 remove_wait_queue(&dev->lock.lock_queue, &entry);
239 if (!retcode) { 402 if (!retcode) {
240 if (dev->driver->release) 403 dev->driver->reclaim_buffers_locked(dev, filp);
241 dev->driver->release(dev, filp);
242 drm_lock_free(dev, &dev->lock.hw_lock->lock, 404 drm_lock_free(dev, &dev->lock.hw_lock->lock,
243 DRM_KERNEL_CONTEXT); 405 DRM_KERNEL_CONTEXT);
244 } 406 }
245 } 407 }
246 408
247 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) 409 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
248 && !dev->driver->release) { 410 !dev->driver->reclaim_buffers_locked) {
249 dev->driver->reclaim_buffers(dev, filp); 411 dev->driver->reclaim_buffers(dev, filp);
250 } 412 }
251 413
@@ -292,9 +454,8 @@ int drm_release(struct inode *inode, struct file *filp)
292 } 454 }
293 up(&dev->struct_sem); 455 up(&dev->struct_sem);
294 456
295 if (dev->driver->free_filp_priv) 457 if (dev->driver->postclose)
296 dev->driver->free_filp_priv(dev, priv); 458 dev->driver->postclose(dev, priv);
297
298 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 459 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
299 460
300 /* ======================================================== 461 /* ========================================================
@@ -313,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp)
313 } 474 }
314 spin_unlock(&dev->count_lock); 475 spin_unlock(&dev->count_lock);
315 unlock_kernel(); 476 unlock_kernel();
316 return drm_takedown(dev); 477 return drm_lastclose(dev);
317 } 478 }
318 spin_unlock(&dev->count_lock); 479 spin_unlock(&dev->count_lock);
319 480
@@ -321,129 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
321 482
322 return retcode; 483 return retcode;
323} 484}
324
325EXPORT_SYMBOL(drm_release); 485EXPORT_SYMBOL(drm_release);
326 486
327/**
328 * Called whenever a process opens /dev/drm.
329 *
330 * \param inode device inode.
331 * \param filp file pointer.
332 * \param dev device.
333 * \return zero on success or a negative number on failure.
334 *
335 * Creates and initializes a drm_file structure for the file private data in \p
336 * filp and add it into the double linked list in \p dev.
337 */
338static int drm_open_helper(struct inode *inode, struct file *filp,
339 drm_device_t * dev)
340{
341 int minor = iminor(inode);
342 drm_file_t *priv;
343 int ret;
344
345 if (filp->f_flags & O_EXCL)
346 return -EBUSY; /* No exclusive opens */
347 if (!drm_cpu_valid())
348 return -EINVAL;
349
350 DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
351
352 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
353 if (!priv)
354 return -ENOMEM;
355
356 memset(priv, 0, sizeof(*priv));
357 filp->private_data = priv;
358 priv->uid = current->euid;
359 priv->pid = current->pid;
360 priv->minor = minor;
361 priv->head = drm_heads[minor];
362 priv->ioctl_count = 0;
363 priv->authenticated = capable(CAP_SYS_ADMIN);
364 priv->lock_count = 0;
365
366 if (dev->driver->open_helper) {
367 ret = dev->driver->open_helper(dev, priv);
368 if (ret < 0)
369 goto out_free;
370 }
371
372 down(&dev->struct_sem);
373 if (!dev->file_last) {
374 priv->next = NULL;
375 priv->prev = NULL;
376 dev->file_first = priv;
377 dev->file_last = priv;
378 } else {
379 priv->next = NULL;
380 priv->prev = dev->file_last;
381 dev->file_last->next = priv;
382 dev->file_last = priv;
383 }
384 up(&dev->struct_sem);
385
386#ifdef __alpha__
387 /*
388 * Default the hose
389 */
390 if (!dev->hose) {
391 struct pci_dev *pci_dev;
392 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
393 if (pci_dev) {
394 dev->hose = pci_dev->sysdata;
395 pci_dev_put(pci_dev);
396 }
397 if (!dev->hose) {
398 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
399 if (b)
400 dev->hose = b->sysdata;
401 }
402 }
403#endif
404
405 return 0;
406 out_free:
407 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
408 filp->private_data = NULL;
409 return ret;
410}
411
412/** No-op. */
413int drm_flush(struct file *filp)
414{
415 drm_file_t *priv = filp->private_data;
416 drm_device_t *dev = priv->head->dev;
417
418 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
419 current->pid, (long)old_encode_dev(priv->head->device),
420 dev->open_count);
421 return 0;
422}
423
424EXPORT_SYMBOL(drm_flush);
425
426/** No-op. */
427int drm_fasync(int fd, struct file *filp, int on)
428{
429 drm_file_t *priv = filp->private_data;
430 drm_device_t *dev = priv->head->dev;
431 int retcode;
432
433 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
434 (long)old_encode_dev(priv->head->device));
435 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
436 if (retcode < 0)
437 return retcode;
438 return 0;
439}
440
441EXPORT_SYMBOL(drm_fasync);
442
443/** No-op. */ 487/** No-op. */
444unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 488unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
445{ 489{
446 return 0; 490 return 0;
447} 491}
448
449EXPORT_SYMBOL(drm_poll); 492EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/char/drm/drm_init.c b/drivers/char/drm/drm_init.c
deleted file mode 100644
index 754b934715c4..000000000000
--- a/drivers/char/drm/drm_init.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/**
2 * \file drm_init.c
3 * Setup/Cleanup for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38/**
39 * Check whether DRI will run on this CPU.
40 *
41 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
42 */
43int drm_cpu_valid(void)
44{
45#if defined(__i386__)
46 if (boot_cpu_data.x86 == 3)
47 return 0; /* No cmpxchg on a 386 */
48#endif
49#if defined(__sparc__) && !defined(__sparc_v9__)
50 return 0; /* No cmpxchg before v9 sparc. */
51#endif
52 return 1;
53}
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 9b0feba6b063..bcd4e604d3ec 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -137,17 +137,22 @@ int drm_setunique(struct inode *inode, struct file *filp,
137 137
138static int drm_set_busid(drm_device_t * dev) 138static int drm_set_busid(drm_device_t * dev)
139{ 139{
140 int len;
141
140 if (dev->unique != NULL) 142 if (dev->unique != NULL)
141 return EBUSY; 143 return EBUSY;
142 144
143 dev->unique_len = 20; 145 dev->unique_len = 40;
144 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); 146 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
145 if (dev->unique == NULL) 147 if (dev->unique == NULL)
146 return ENOMEM; 148 return ENOMEM;
147 149
148 snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", 150 len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
149 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func); 151 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
150 152
153 if (len > dev->unique_len)
154 DRM_ERROR("Unique buffer overflowed\n");
155
151 dev->devname = 156 dev->devname =
152 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 157 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
153 2, DRM_MEM_DRIVER); 158 2, DRM_MEM_DRIVER);
@@ -239,7 +244,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
239{ 244{
240 drm_file_t *priv = filp->private_data; 245 drm_file_t *priv = filp->private_data;
241 drm_device_t *dev = priv->head->dev; 246 drm_device_t *dev = priv->head->dev;
242 drm_client_t __user *argp = (void __user *)arg; 247 drm_client_t __user *argp = (drm_client_t __user *)arg;
243 drm_client_t client; 248 drm_client_t client;
244 drm_file_t *pt; 249 drm_file_t *pt;
245 int idx; 250 int idx;
@@ -262,7 +267,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
262 client.iocs = pt->ioctl_count; 267 client.iocs = pt->ioctl_count;
263 up(&dev->struct_sem); 268 up(&dev->struct_sem);
264 269
265 if (copy_to_user((drm_client_t __user *) arg, &client, sizeof(client))) 270 if (copy_to_user(argp, &client, sizeof(client)))
266 return -EFAULT; 271 return -EFAULT;
267 return 0; 272 return 0;
268} 273}
@@ -325,17 +330,13 @@ int drm_setversion(DRM_IOCTL_ARGS)
325 drm_set_version_t retv; 330 drm_set_version_t retv;
326 int if_version; 331 int if_version;
327 drm_set_version_t __user *argp = (void __user *)data; 332 drm_set_version_t __user *argp = (void __user *)data;
328 drm_version_t version;
329 333
330 DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv)); 334 DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
331 335
332 memset(&version, 0, sizeof(version));
333
334 dev->driver->version(&version);
335 retv.drm_di_major = DRM_IF_MAJOR; 336 retv.drm_di_major = DRM_IF_MAJOR;
336 retv.drm_di_minor = DRM_IF_MINOR; 337 retv.drm_di_minor = DRM_IF_MINOR;
337 retv.drm_dd_major = version.version_major; 338 retv.drm_dd_major = dev->driver->major;
338 retv.drm_dd_minor = version.version_minor; 339 retv.drm_dd_minor = dev->driver->minor;
339 340
340 DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv)); 341 DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
341 342
@@ -343,7 +344,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
343 if (sv.drm_di_major != DRM_IF_MAJOR || 344 if (sv.drm_di_major != DRM_IF_MAJOR ||
344 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) 345 sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
345 return EINVAL; 346 return EINVAL;
346 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor); 347 if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
347 dev->if_version = DRM_MAX(if_version, dev->if_version); 348 dev->if_version = DRM_MAX(if_version, dev->if_version);
348 if (sv.drm_di_minor >= 1) { 349 if (sv.drm_di_minor >= 1) {
349 /* 350 /*
@@ -354,9 +355,9 @@ int drm_setversion(DRM_IOCTL_ARGS)
354 } 355 }
355 356
356 if (sv.drm_dd_major != -1) { 357 if (sv.drm_dd_major != -1) {
357 if (sv.drm_dd_major != version.version_major || 358 if (sv.drm_dd_major != dev->driver->major ||
358 sv.drm_dd_minor < 0 359 sv.drm_dd_minor < 0
359 || sv.drm_dd_minor > version.version_minor) 360 || sv.drm_dd_minor > dev->driver->minor)
360 return EINVAL; 361 return EINVAL;
361 362
362 if (dev->driver->set_version) 363 if (dev->driver->set_version)
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index b48a595d54ec..f9e45303498d 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -130,7 +130,6 @@ int drm_lock(struct inode *inode, struct file *filp,
130 /* dev->driver->kernel_context_switch isn't used by any of the x86 130 /* dev->driver->kernel_context_switch isn't used by any of the x86
131 * drivers but is used by the Sparc driver. 131 * drivers but is used by the Sparc driver.
132 */ 132 */
133
134 if (dev->driver->kernel_context_switch && 133 if (dev->driver->kernel_context_switch &&
135 dev->last_context != lock.context) { 134 dev->last_context != lock.context) {
136 dev->driver->kernel_context_switch(dev, dev->last_context, 135 dev->driver->kernel_context_switch(dev, dev->last_context,
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index abef2acf99f5..8074771e348f 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -145,30 +145,22 @@ DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
145 return drm_agp_allocate_memory(dev->agp->bridge, pages, type); 145 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
146} 146}
147 147
148EXPORT_SYMBOL(drm_alloc_agp);
149
150/** Wrapper around agp_free_memory() */ 148/** Wrapper around agp_free_memory() */
151int drm_free_agp(DRM_AGP_MEM * handle, int pages) 149int drm_free_agp(DRM_AGP_MEM * handle, int pages)
152{ 150{
153 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 151 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
154} 152}
155 153
156EXPORT_SYMBOL(drm_free_agp);
157
158/** Wrapper around agp_bind_memory() */ 154/** Wrapper around agp_bind_memory() */
159int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) 155int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
160{ 156{
161 return drm_agp_bind_memory(handle, start); 157 return drm_agp_bind_memory(handle, start);
162} 158}
163 159
164EXPORT_SYMBOL(drm_bind_agp);
165
166/** Wrapper around agp_unbind_memory() */ 160/** Wrapper around agp_unbind_memory() */
167int drm_unbind_agp(DRM_AGP_MEM * handle) 161int drm_unbind_agp(DRM_AGP_MEM * handle)
168{ 162{
169 return drm_agp_unbind_memory(handle); 163 return drm_agp_unbind_memory(handle);
170} 164}
171
172EXPORT_SYMBOL(drm_unbind_agp);
173#endif /* agp */ 165#endif /* agp */
174#endif /* debug_memory */ 166#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index b370aca718d2..e84605fc54af 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * \file drm_memory.h 2 * \file drm_memory_debug.h
3 * Memory management wrappers for DRM. 3 * Memory management wrappers for DRM.
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -43,42 +43,41 @@ typedef struct drm_mem_stats {
43 unsigned long bytes_freed; 43 unsigned long bytes_freed;
44} drm_mem_stats_t; 44} drm_mem_stats_t;
45 45
46static DEFINE_SPINLOCK(DRM(mem_lock)); 46static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
47static unsigned long DRM(ram_available) = 0; /* In pages */ 47static unsigned long drm_ram_available = 0; /* In pages */
48static unsigned long DRM(ram_used) = 0; 48static unsigned long drm_ram_used = 0;
49static drm_mem_stats_t DRM(mem_stats)[] = 49static drm_mem_stats_t drm_mem_stats[] =
50{ 50{
51 [DRM_MEM_DMA] = { 51 [DRM_MEM_DMA] = {"dmabufs"},
52 "dmabufs"},[DRM_MEM_SAREA] = { 52 [DRM_MEM_SAREA] = {"sareas"},
53 "sareas"},[DRM_MEM_DRIVER] = { 53 [DRM_MEM_DRIVER] = {"driver"},
54 "driver"},[DRM_MEM_MAGIC] = { 54 [DRM_MEM_MAGIC] = {"magic"},
55 "magic"},[DRM_MEM_IOCTLS] = { 55 [DRM_MEM_IOCTLS] = {"ioctltab"},
56 "ioctltab"},[DRM_MEM_MAPS] = { 56 [DRM_MEM_MAPS] = {"maplist"},
57 "maplist"},[DRM_MEM_VMAS] = { 57 [DRM_MEM_VMAS] = {"vmalist"},
58 "vmalist"},[DRM_MEM_BUFS] = { 58 [DRM_MEM_BUFS] = {"buflist"},
59 "buflist"},[DRM_MEM_SEGS] = { 59 [DRM_MEM_SEGS] = {"seglist"},
60 "seglist"},[DRM_MEM_PAGES] = { 60 [DRM_MEM_PAGES] = {"pagelist"},
61 "pagelist"},[DRM_MEM_FILES] = { 61 [DRM_MEM_FILES] = {"files"},
62 "files"},[DRM_MEM_QUEUES] = { 62 [DRM_MEM_QUEUES] = {"queues"},
63 "queues"},[DRM_MEM_CMDS] = { 63 [DRM_MEM_CMDS] = {"commands"},
64 "commands"},[DRM_MEM_MAPPINGS] = { 64 [DRM_MEM_MAPPINGS] = {"mappings"},
65 "mappings"},[DRM_MEM_BUFLISTS] = { 65 [DRM_MEM_BUFLISTS] = {"buflists"},
66 "buflists"},[DRM_MEM_AGPLISTS] = { 66 [DRM_MEM_AGPLISTS] = {"agplist"},
67 "agplist"},[DRM_MEM_SGLISTS] = { 67 [DRM_MEM_SGLISTS] = {"sglist"},
68 "sglist"},[DRM_MEM_TOTALAGP] = { 68 [DRM_MEM_TOTALAGP] = {"totalagp"},
69 "totalagp"},[DRM_MEM_BOUNDAGP] = { 69 [DRM_MEM_BOUNDAGP] = {"boundagp"},
70 "boundagp"},[DRM_MEM_CTXBITMAP] = { 70 [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
71 "ctxbitmap"},[DRM_MEM_CTXLIST] = { 71 [DRM_MEM_CTXLIST] = {"ctxlist"},
72 "ctxlist"},[DRM_MEM_STUB] = { 72 [DRM_MEM_STUB] = {"stub"},
73 "stub"}, { 73 {NULL, 0,} /* Last entry must be null */
74 NULL, 0,} /* Last entry must be null */
75}; 74};
76 75
77void DRM(mem_init) (void) { 76void drm_mem_init (void) {
78 drm_mem_stats_t *mem; 77 drm_mem_stats_t *mem;
79 struct sysinfo si; 78 struct sysinfo si;
80 79
81 for (mem = DRM(mem_stats); mem->name; ++mem) { 80 for (mem = drm_mem_stats; mem->name; ++mem) {
82 mem->succeed_count = 0; 81 mem->succeed_count = 0;
83 mem->free_count = 0; 82 mem->free_count = 0;
84 mem->fail_count = 0; 83 mem->fail_count = 0;
@@ -87,13 +86,13 @@ void DRM(mem_init) (void) {
87 } 86 }
88 87
89 si_meminfo(&si); 88 si_meminfo(&si);
90 DRM(ram_available) = si.totalram; 89 drm_ram_available = si.totalram;
91 DRM(ram_used) = 0; 90 drm_ram_used = 0;
92} 91}
93 92
94/* drm_mem_info is called whenever a process reads /dev/drm/mem. */ 93/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
95 94
96static int DRM(_mem_info) (char *buf, char **start, off_t offset, 95static int drm__mem_info (char *buf, char **start, off_t offset,
97 int request, int *eof, void *data) { 96 int request, int *eof, void *data) {
98 drm_mem_stats_t *pt; 97 drm_mem_stats_t *pt;
99 int len = 0; 98 int len = 0;
@@ -112,11 +111,11 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
112 " | allocs bytes\n\n"); 111 " | allocs bytes\n\n");
113 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 112 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
114 "system", 0, 0, 0, 113 "system", 0, 0, 0,
115 DRM(ram_available) << (PAGE_SHIFT - 10)); 114 drm_ram_available << (PAGE_SHIFT - 10));
116 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n", 115 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
117 "locked", 0, 0, 0, DRM(ram_used) >> 10); 116 "locked", 0, 0, 0, drm_ram_used >> 10);
118 DRM_PROC_PRINT("\n"); 117 DRM_PROC_PRINT("\n");
119 for (pt = DRM(mem_stats); pt->name; pt++) { 118 for (pt = drm_mem_stats; pt->name; pt++) {
120 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n", 119 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
121 pt->name, 120 pt->name,
122 pt->succeed_count, 121 pt->succeed_count,
@@ -135,17 +134,17 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
135 return len - offset; 134 return len - offset;
136} 135}
137 136
138int DRM(mem_info) (char *buf, char **start, off_t offset, 137int drm_mem_info (char *buf, char **start, off_t offset,
139 int len, int *eof, void *data) { 138 int len, int *eof, void *data) {
140 int ret; 139 int ret;
141 140
142 spin_lock(&DRM(mem_lock)); 141 spin_lock(&drm_mem_lock);
143 ret = DRM(_mem_info) (buf, start, offset, len, eof, data); 142 ret = drm__mem_info (buf, start, offset, len, eof, data);
144 spin_unlock(&DRM(mem_lock)); 143 spin_unlock(&drm_mem_lock);
145 return ret; 144 return ret;
146} 145}
147 146
148void *DRM(alloc) (size_t size, int area) { 147void *drm_alloc (size_t size, int area) {
149 void *pt; 148 void *pt;
150 149
151 if (!size) { 150 if (!size) {
@@ -154,41 +153,41 @@ void *DRM(alloc) (size_t size, int area) {
154 } 153 }
155 154
156 if (!(pt = kmalloc(size, GFP_KERNEL))) { 155 if (!(pt = kmalloc(size, GFP_KERNEL))) {
157 spin_lock(&DRM(mem_lock)); 156 spin_lock(&drm_mem_lock);
158 ++DRM(mem_stats)[area].fail_count; 157 ++drm_mem_stats[area].fail_count;
159 spin_unlock(&DRM(mem_lock)); 158 spin_unlock(&drm_mem_lock);
160 return NULL; 159 return NULL;
161 } 160 }
162 spin_lock(&DRM(mem_lock)); 161 spin_lock(&drm_mem_lock);
163 ++DRM(mem_stats)[area].succeed_count; 162 ++drm_mem_stats[area].succeed_count;
164 DRM(mem_stats)[area].bytes_allocated += size; 163 drm_mem_stats[area].bytes_allocated += size;
165 spin_unlock(&DRM(mem_lock)); 164 spin_unlock(&drm_mem_lock);
166 return pt; 165 return pt;
167} 166}
168 167
169void *DRM(calloc) (size_t nmemb, size_t size, int area) { 168void *drm_calloc (size_t nmemb, size_t size, int area) {
170 void *addr; 169 void *addr;
171 170
172 addr = DRM(alloc) (nmemb * size, area); 171 addr = drm_alloc (nmemb * size, area);
173 if (addr != NULL) 172 if (addr != NULL)
174 memset((void *)addr, 0, size * nmemb); 173 memset((void *)addr, 0, size * nmemb);
175 174
176 return addr; 175 return addr;
177} 176}
178 177
179void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) { 178void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
180 void *pt; 179 void *pt;
181 180
182 if (!(pt = DRM(alloc) (size, area))) 181 if (!(pt = drm_alloc (size, area)))
183 return NULL; 182 return NULL;
184 if (oldpt && oldsize) { 183 if (oldpt && oldsize) {
185 memcpy(pt, oldpt, oldsize); 184 memcpy(pt, oldpt, oldsize);
186 DRM(free) (oldpt, oldsize, area); 185 drm_free (oldpt, oldsize, area);
187 } 186 }
188 return pt; 187 return pt;
189} 188}
190 189
191void DRM(free) (void *pt, size_t size, int area) { 190void drm_free (void *pt, size_t size, int area) {
192 int alloc_count; 191 int alloc_count;
193 int free_count; 192 int free_count;
194 193
@@ -196,43 +195,43 @@ void DRM(free) (void *pt, size_t size, int area) {
196 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n"); 195 DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
197 else 196 else
198 kfree(pt); 197 kfree(pt);
199 spin_lock(&DRM(mem_lock)); 198 spin_lock(&drm_mem_lock);
200 DRM(mem_stats)[area].bytes_freed += size; 199 drm_mem_stats[area].bytes_freed += size;
201 free_count = ++DRM(mem_stats)[area].free_count; 200 free_count = ++drm_mem_stats[area].free_count;
202 alloc_count = DRM(mem_stats)[area].succeed_count; 201 alloc_count = drm_mem_stats[area].succeed_count;
203 spin_unlock(&DRM(mem_lock)); 202 spin_unlock(&drm_mem_lock);
204 if (free_count > alloc_count) { 203 if (free_count > alloc_count) {
205 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n", 204 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
206 free_count, alloc_count); 205 free_count, alloc_count);
207 } 206 }
208} 207}
209 208
210unsigned long DRM(alloc_pages) (int order, int area) { 209unsigned long drm_alloc_pages (int order, int area) {
211 unsigned long address; 210 unsigned long address;
212 unsigned long bytes = PAGE_SIZE << order; 211 unsigned long bytes = PAGE_SIZE << order;
213 unsigned long addr; 212 unsigned long addr;
214 unsigned int sz; 213 unsigned int sz;
215 214
216 spin_lock(&DRM(mem_lock)); 215 spin_lock(&drm_mem_lock);
217 if ((DRM(ram_used) >> PAGE_SHIFT) 216 if ((drm_ram_used >> PAGE_SHIFT)
218 > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) { 217 > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
219 spin_unlock(&DRM(mem_lock)); 218 spin_unlock(&drm_mem_lock);
220 return 0; 219 return 0;
221 } 220 }
222 spin_unlock(&DRM(mem_lock)); 221 spin_unlock(&drm_mem_lock);
223 222
224 address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order); 223 address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
225 if (!address) { 224 if (!address) {
226 spin_lock(&DRM(mem_lock)); 225 spin_lock(&drm_mem_lock);
227 ++DRM(mem_stats)[area].fail_count; 226 ++drm_mem_stats[area].fail_count;
228 spin_unlock(&DRM(mem_lock)); 227 spin_unlock(&drm_mem_lock);
229 return 0; 228 return 0;
230 } 229 }
231 spin_lock(&DRM(mem_lock)); 230 spin_lock(&drm_mem_lock);
232 ++DRM(mem_stats)[area].succeed_count; 231 ++drm_mem_stats[area].succeed_count;
233 DRM(mem_stats)[area].bytes_allocated += bytes; 232 drm_mem_stats[area].bytes_allocated += bytes;
234 DRM(ram_used) += bytes; 233 drm_ram_used += bytes;
235 spin_unlock(&DRM(mem_lock)); 234 spin_unlock(&drm_mem_lock);
236 235
237 /* Zero outside the lock */ 236 /* Zero outside the lock */
238 memset((void *)address, 0, bytes); 237 memset((void *)address, 0, bytes);
@@ -246,7 +245,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
246 return address; 245 return address;
247} 246}
248 247
249void DRM(free_pages) (unsigned long address, int order, int area) { 248void drm_free_pages (unsigned long address, int order, int area) {
250 unsigned long bytes = PAGE_SIZE << order; 249 unsigned long bytes = PAGE_SIZE << order;
251 int alloc_count; 250 int alloc_count;
252 int free_count; 251 int free_count;
@@ -264,12 +263,12 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
264 free_pages(address, order); 263 free_pages(address, order);
265 } 264 }
266 265
267 spin_lock(&DRM(mem_lock)); 266 spin_lock(&drm_mem_lock);
268 free_count = ++DRM(mem_stats)[area].free_count; 267 free_count = ++drm_mem_stats[area].free_count;
269 alloc_count = DRM(mem_stats)[area].succeed_count; 268 alloc_count = drm_mem_stats[area].succeed_count;
270 DRM(mem_stats)[area].bytes_freed += bytes; 269 drm_mem_stats[area].bytes_freed += bytes;
271 DRM(ram_used) -= bytes; 270 drm_ram_used -= bytes;
272 spin_unlock(&DRM(mem_lock)); 271 spin_unlock(&drm_mem_lock);
273 if (free_count > alloc_count) { 272 if (free_count > alloc_count) {
274 DRM_MEM_ERROR(area, 273 DRM_MEM_ERROR(area,
275 "Excess frees: %d frees, %d allocs\n", 274 "Excess frees: %d frees, %d allocs\n",
@@ -277,7 +276,7 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
277 } 276 }
278} 277}
279 278
280void *DRM(ioremap) (unsigned long offset, unsigned long size, 279void *drm_ioremap (unsigned long offset, unsigned long size,
281 drm_device_t * dev) { 280 drm_device_t * dev) {
282 void *pt; 281 void *pt;
283 282
@@ -288,19 +287,19 @@ void *DRM(ioremap) (unsigned long offset, unsigned long size,
288 } 287 }
289 288
290 if (!(pt = drm_ioremap(offset, size, dev))) { 289 if (!(pt = drm_ioremap(offset, size, dev))) {
291 spin_lock(&DRM(mem_lock)); 290 spin_lock(&drm_mem_lock);
292 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 291 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
293 spin_unlock(&DRM(mem_lock)); 292 spin_unlock(&drm_mem_lock);
294 return NULL; 293 return NULL;
295 } 294 }
296 spin_lock(&DRM(mem_lock)); 295 spin_lock(&drm_mem_lock);
297 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 296 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
298 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 297 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
299 spin_unlock(&DRM(mem_lock)); 298 spin_unlock(&drm_mem_lock);
300 return pt; 299 return pt;
301} 300}
302 301
303void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size, 302void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
304 drm_device_t * dev) { 303 drm_device_t * dev) {
305 void *pt; 304 void *pt;
306 305
@@ -311,19 +310,19 @@ void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
311 } 310 }
312 311
313 if (!(pt = drm_ioremap_nocache(offset, size, dev))) { 312 if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
314 spin_lock(&DRM(mem_lock)); 313 spin_lock(&drm_mem_lock);
315 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; 314 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
316 spin_unlock(&DRM(mem_lock)); 315 spin_unlock(&drm_mem_lock);
317 return NULL; 316 return NULL;
318 } 317 }
319 spin_lock(&DRM(mem_lock)); 318 spin_lock(&drm_mem_lock);
320 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 319 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
321 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size; 320 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
322 spin_unlock(&DRM(mem_lock)); 321 spin_unlock(&drm_mem_lock);
323 return pt; 322 return pt;
324} 323}
325 324
326void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) { 325void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
327 int alloc_count; 326 int alloc_count;
328 int free_count; 327 int free_count;
329 328
@@ -333,11 +332,11 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
333 else 332 else
334 drm_ioremapfree(pt, size, dev); 333 drm_ioremapfree(pt, size, dev);
335 334
336 spin_lock(&DRM(mem_lock)); 335 spin_lock(&drm_mem_lock);
337 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; 336 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
338 free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count; 337 free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
339 alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count; 338 alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
340 spin_unlock(&DRM(mem_lock)); 339 spin_unlock(&drm_mem_lock);
341 if (free_count > alloc_count) { 340 if (free_count > alloc_count) {
342 DRM_MEM_ERROR(DRM_MEM_MAPPINGS, 341 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
343 "Excess frees: %d frees, %d allocs\n", 342 "Excess frees: %d frees, %d allocs\n",
@@ -347,7 +346,7 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
347 346
348#if __OS_HAS_AGP 347#if __OS_HAS_AGP
349 348
350DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) { 349DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
351 DRM_AGP_MEM *handle; 350 DRM_AGP_MEM *handle;
352 351
353 if (!pages) { 352 if (!pages) {
@@ -355,21 +354,21 @@ DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
355 return NULL; 354 return NULL;
356 } 355 }
357 356
358 if ((handle = DRM(agp_allocate_memory) (pages, type))) { 357 if ((handle = drm_agp_allocate_memory (pages, type))) {
359 spin_lock(&DRM(mem_lock)); 358 spin_lock(&drm_mem_lock);
360 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 359 ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
361 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated 360 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
362 += pages << PAGE_SHIFT; 361 += pages << PAGE_SHIFT;
363 spin_unlock(&DRM(mem_lock)); 362 spin_unlock(&drm_mem_lock);
364 return handle; 363 return handle;
365 } 364 }
366 spin_lock(&DRM(mem_lock)); 365 spin_lock(&drm_mem_lock);
367 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count; 366 ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
368 spin_unlock(&DRM(mem_lock)); 367 spin_unlock(&drm_mem_lock);
369 return NULL; 368 return NULL;
370} 369}
371 370
372int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) { 371int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
373 int alloc_count; 372 int alloc_count;
374 int free_count; 373 int free_count;
375 int retval = -EINVAL; 374 int retval = -EINVAL;
@@ -380,13 +379,13 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
380 return retval; 379 return retval;
381 } 380 }
382 381
383 if (DRM(agp_free_memory) (handle)) { 382 if (drm_agp_free_memory (handle)) {
384 spin_lock(&DRM(mem_lock)); 383 spin_lock(&drm_mem_lock);
385 free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count; 384 free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
386 alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count; 385 alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
387 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed 386 drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
388 += pages << PAGE_SHIFT; 387 += pages << PAGE_SHIFT;
389 spin_unlock(&DRM(mem_lock)); 388 spin_unlock(&drm_mem_lock);
390 if (free_count > alloc_count) { 389 if (free_count > alloc_count) {
391 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, 390 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
392 "Excess frees: %d frees, %d allocs\n", 391 "Excess frees: %d frees, %d allocs\n",
@@ -397,7 +396,7 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
397 return retval; 396 return retval;
398} 397}
399 398
400int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) { 399int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
401 int retcode = -EINVAL; 400 int retcode = -EINVAL;
402 401
403 if (!handle) { 402 if (!handle) {
@@ -406,21 +405,21 @@ int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
406 return retcode; 405 return retcode;
407 } 406 }
408 407
409 if (!(retcode = DRM(agp_bind_memory) (handle, start))) { 408 if (!(retcode = drm_agp_bind_memory (handle, start))) {
410 spin_lock(&DRM(mem_lock)); 409 spin_lock(&drm_mem_lock);
411 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 410 ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
412 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated 411 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
413 += handle->page_count << PAGE_SHIFT; 412 += handle->page_count << PAGE_SHIFT;
414 spin_unlock(&DRM(mem_lock)); 413 spin_unlock(&drm_mem_lock);
415 return retcode; 414 return retcode;
416 } 415 }
417 spin_lock(&DRM(mem_lock)); 416 spin_lock(&drm_mem_lock);
418 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count; 417 ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
419 spin_unlock(&DRM(mem_lock)); 418 spin_unlock(&drm_mem_lock);
420 return retcode; 419 return retcode;
421} 420}
422 421
423int DRM(unbind_agp) (DRM_AGP_MEM * handle) { 422int drm_unbind_agp (DRM_AGP_MEM * handle) {
424 int alloc_count; 423 int alloc_count;
425 int free_count; 424 int free_count;
426 int retcode = -EINVAL; 425 int retcode = -EINVAL;
@@ -431,14 +430,14 @@ int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
431 return retcode; 430 return retcode;
432 } 431 }
433 432
434 if ((retcode = DRM(agp_unbind_memory) (handle))) 433 if ((retcode = drm_agp_unbind_memory (handle)))
435 return retcode; 434 return retcode;
436 spin_lock(&DRM(mem_lock)); 435 spin_lock(&drm_mem_lock);
437 free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count; 436 free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
438 alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count; 437 alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
439 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed 438 drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
440 += handle->page_count << PAGE_SHIFT; 439 += handle->page_count << PAGE_SHIFT;
441 spin_unlock(&DRM(mem_lock)); 440 spin_unlock(&drm_mem_lock);
442 if (free_count > alloc_count) { 441 if (free_count > alloc_count) {
443 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP, 442 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
444 "Excess frees: %d frees, %d allocs\n", 443 "Excess frees: %d frees, %d allocs\n",
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index d51aeb4966f4..695115d70382 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -13,6 +13,7 @@
13#define DRM_ERR(d) -(d) 13#define DRM_ERR(d) -(d)
14/** Current process ID */ 14/** Current process ID */
15#define DRM_CURRENTPID current->pid 15#define DRM_CURRENTPID current->pid
16#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
16#define DRM_UDELAY(d) udelay(d) 17#define DRM_UDELAY(d) udelay(d)
17/** Read a byte from a MMIO region */ 18/** Read a byte from a MMIO region */
18#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) 19#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index d66dc55e29a0..5b1d3a04458d 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -46,6 +46,7 @@
46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
49 {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
49 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 50 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
50 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
51 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 52 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -69,6 +70,7 @@
69 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 70 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
70 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 71 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
71 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ 72 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
73 {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
72 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
73 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 75 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
74 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 76 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -82,10 +84,13 @@
82 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 84 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
83 {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 85 {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
84 {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 86 {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
87 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
85 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 88 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
86 {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 89 {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
87 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \ 90 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
88 {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 91 {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
92 {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
93 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
89 {0, 0, 0} 94 {0, 0, 0}
90 95
91#define r128_PCI_IDS \ 96#define r128_PCI_IDS \
@@ -176,7 +181,7 @@
176 181
177#define viadrv_PCI_IDS \ 182#define viadrv_PCI_IDS \
178 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 183 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
179 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 184 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
180 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 185 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
181 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 186 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
182 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 187 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -196,6 +201,10 @@
196 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
197 {0, 0, 0} 202 {0, 0, 0}
198 203
204#define gamma_PCI_IDS \
205 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
206 {0, 0, 0}
207
199#define savage_PCI_IDS \ 208#define savage_PCI_IDS \
200 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 209 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
201 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ 210 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -234,3 +243,4 @@
234 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 243 {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
235 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 244 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
236 {0, 0, 0} 245 {0, 0, 0}
246
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 3f452f763f0f..6f943e3309ef 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -61,16 +61,14 @@ static struct drm_proc_list {
61 const char *name; /**< file name */ 61 const char *name; /**< file name */
62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
63} drm_proc_list[] = { 63} drm_proc_list[] = {
64 { 64 {"name", drm_name_info},
65 "name", drm_name_info}, { 65 {"mem", drm_mem_info},
66 "mem", drm_mem_info}, { 66 {"vm", drm_vm_info},
67 "vm", drm_vm_info}, { 67 {"clients", drm_clients_info},
68 "clients", drm_clients_info}, { 68 {"queues", drm_queues_info},
69 "queues", drm_queues_info}, { 69 {"bufs", drm_bufs_info},
70 "bufs", drm_bufs_info},
71#if DRM_DEBUG_CODE 70#if DRM_DEBUG_CODE
72 { 71 {"vma", drm_vma_info},
73 "vma", drm_vma_info},
74#endif 72#endif
75}; 73};
76 74
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 60b6f8e8bf69..42d766359caa 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -93,8 +93,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
93 93
94 dev->driver = driver; 94 dev->driver = driver;
95 95
96 if (dev->driver->preinit) 96 if (dev->driver->load)
97 if ((retcode = dev->driver->preinit(dev, ent->driver_data))) 97 if ((retcode = dev->driver->load(dev, ent->driver_data)))
98 goto error_out_unreg; 98 goto error_out_unreg;
99 99
100 if (drm_core_has_AGP(dev)) { 100 if (drm_core_has_AGP(dev)) {
@@ -124,47 +124,10 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
124 return 0; 124 return 0;
125 125
126 error_out_unreg: 126 error_out_unreg:
127 drm_takedown(dev); 127 drm_lastclose(dev);
128 return retcode; 128 return retcode;
129} 129}
130 130
131/**
132 * File \c open operation.
133 *
134 * \param inode device inode.
135 * \param filp file pointer.
136 *
137 * Puts the dev->fops corresponding to the device minor number into
138 * \p filp, call the \c open method, and restore the file operations.
139 */
140int drm_stub_open(struct inode *inode, struct file *filp)
141{
142 drm_device_t *dev = NULL;
143 int minor = iminor(inode);
144 int err = -ENODEV;
145 struct file_operations *old_fops;
146
147 DRM_DEBUG("\n");
148
149 if (!((minor >= 0) && (minor < drm_cards_limit)))
150 return -ENODEV;
151
152 if (!drm_heads[minor])
153 return -ENODEV;
154
155 if (!(dev = drm_heads[minor]->dev))
156 return -ENODEV;
157
158 old_fops = filp->f_op;
159 filp->f_op = fops_get(&dev->driver->fops);
160 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
161 fops_put(filp->f_op);
162 filp->f_op = fops_get(old_fops);
163 }
164 fops_put(old_fops);
165
166 return err;
167}
168 131
169/** 132/**
170 * Get a secondary minor number. 133 * Get a secondary minor number.
@@ -200,11 +163,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
200 goto err_g1; 163 goto err_g1;
201 } 164 }
202 165
203 head->dev_class = drm_sysfs_device_add(drm_class, 166 head->dev_class = drm_sysfs_device_add(drm_class, head);
204 MKDEV(DRM_MAJOR,
205 minor),
206 &dev->pdev->dev,
207 "card%d", minor);
208 if (IS_ERR(head->dev_class)) { 167 if (IS_ERR(head->dev_class)) {
209 printk(KERN_ERR 168 printk(KERN_ERR
210 "DRM: Error sysfs_device_add.\n"); 169 "DRM: Error sysfs_device_add.\n");
@@ -258,11 +217,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
258 } 217 }
259 if ((ret = drm_get_head(dev, &dev->primary))) 218 if ((ret = drm_get_head(dev, &dev->primary)))
260 goto err_g1; 219 goto err_g1;
261 220
262 /* postinit is a required function to display the signon banner */ 221 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
263 /* drivers add secondary heads here if needed */ 222 driver->name, driver->major, driver->minor, driver->patchlevel,
264 if ((ret = dev->driver->postinit(dev, ent->driver_data))) 223 driver->date, dev->primary.minor);
265 goto err_g1;
266 224
267 return 0; 225 return 0;
268 226
@@ -318,10 +276,9 @@ int drm_put_head(drm_head_t * head)
318 DRM_DEBUG("release secondary minor %d\n", minor); 276 DRM_DEBUG("release secondary minor %d\n", minor);
319 277
320 drm_proc_cleanup(minor, drm_proc_root, head->dev_root); 278 drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
321 drm_sysfs_device_remove(MKDEV(DRM_MAJOR, head->minor)); 279 drm_sysfs_device_remove(head->dev_class);
322 280
323 *head = (drm_head_t) { 281 *head = (drm_head_t) {.dev = NULL};
324 .dev = NULL};
325 282
326 drm_heads[minor] = NULL; 283 drm_heads[minor] = NULL;
327 284
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 6d3449761914..68e43ddc16ae 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -15,8 +15,6 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20 18
21#include "drm_core.h" 19#include "drm_core.h"
22#include "drmP.h" 20#include "drmP.h"
@@ -28,15 +26,11 @@ struct drm_sysfs_class {
28#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) 26#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class)
29 27
30struct simple_dev { 28struct simple_dev {
31 struct list_head node;
32 dev_t dev; 29 dev_t dev;
33 struct class_device class_dev; 30 struct class_device class_dev;
34}; 31};
35#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) 32#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev)
36 33
37static LIST_HEAD(simple_dev_list);
38static DEFINE_SPINLOCK(simple_dev_list_lock);
39
40static void release_simple_dev(struct class_device *class_dev) 34static void release_simple_dev(struct class_device *class_dev)
41{ 35{
42 struct simple_dev *s_dev = to_simple_dev(class_dev); 36 struct simple_dev *s_dev = to_simple_dev(class_dev);
@@ -124,6 +118,18 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
124 class_unregister(&cs->class); 118 class_unregister(&cs->class);
125} 119}
126 120
121static ssize_t show_dri(struct class_device *class_device, char *buf)
122{
123 drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
124 if (dev->driver->dri_library_name)
125 return dev->driver->dri_library_name(dev, buf);
126 return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
127}
128
129static struct class_device_attribute class_device_attrs[] = {
130 __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
131};
132
127/** 133/**
128 * drm_sysfs_device_add - adds a class device to sysfs for a character driver 134 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
129 * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. 135 * @cs: pointer to the struct drm_sysfs_class that this device should be registered to.
@@ -138,13 +144,11 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
138 * Note: the struct drm_sysfs_class passed to this function must have previously been 144 * Note: the struct drm_sysfs_class passed to this function must have previously been
139 * created with a call to drm_sysfs_create(). 145 * created with a call to drm_sysfs_create().
140 */ 146 */
141struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev, 147struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
142 struct device *device, 148 drm_head_t *head)
143 const char *fmt, ...)
144{ 149{
145 va_list args;
146 struct simple_dev *s_dev = NULL; 150 struct simple_dev *s_dev = NULL;
147 int retval; 151 int i, retval;
148 152
149 if ((cs == NULL) || (IS_ERR(cs))) { 153 if ((cs == NULL) || (IS_ERR(cs))) {
150 retval = -ENODEV; 154 retval = -ENODEV;
@@ -158,26 +162,23 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
158 } 162 }
159 memset(s_dev, 0x00, sizeof(*s_dev)); 163 memset(s_dev, 0x00, sizeof(*s_dev));
160 164
161 s_dev->dev = dev; 165 s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
162 s_dev->class_dev.dev = device; 166 s_dev->class_dev.dev = &(head->dev->pdev)->dev;
163 s_dev->class_dev.class = &cs->class; 167 s_dev->class_dev.class = &cs->class;
164 168
165 va_start(args, fmt); 169 snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
166 vsnprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, fmt, args);
167 va_end(args);
168 retval = class_device_register(&s_dev->class_dev); 170 retval = class_device_register(&s_dev->class_dev);
169 if (retval) 171 if (retval)
170 goto error; 172 goto error;
171 173
172 class_device_create_file(&s_dev->class_dev, &cs->attr); 174 class_device_create_file(&s_dev->class_dev, &cs->attr);
175 class_set_devdata(&s_dev->class_dev, head);
173 176
174 spin_lock(&simple_dev_list_lock); 177 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
175 list_add(&s_dev->node, &simple_dev_list); 178 class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
176 spin_unlock(&simple_dev_list_lock);
177
178 return &s_dev->class_dev; 179 return &s_dev->class_dev;
179 180
180 error: 181error:
181 kfree(s_dev); 182 kfree(s_dev);
182 return ERR_PTR(retval); 183 return ERR_PTR(retval);
183} 184}
@@ -189,23 +190,12 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
189 * This call unregisters and cleans up a class device that was created with a 190 * This call unregisters and cleans up a class device that was created with a
190 * call to drm_sysfs_device_add() 191 * call to drm_sysfs_device_add()
191 */ 192 */
192void drm_sysfs_device_remove(dev_t dev) 193void drm_sysfs_device_remove(struct class_device *class_dev)
193{ 194{
194 struct simple_dev *s_dev = NULL; 195 struct simple_dev *s_dev = to_simple_dev(class_dev);
195 int found = 0; 196 int i;
196 197
197 spin_lock(&simple_dev_list_lock); 198 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
198 list_for_each_entry(s_dev, &simple_dev_list, node) { 199 class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]);
199 if (s_dev->dev == dev) { 200 class_device_unregister(&s_dev->class_dev);
200 found = 1;
201 break;
202 }
203 }
204 if (found) {
205 list_del(&s_dev->node);
206 spin_unlock(&simple_dev_list_lock);
207 class_device_unregister(&s_dev->class_dev);
208 } else {
209 spin_unlock(&simple_dev_list_lock);
210 }
211} 201}
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index dba502373da1..cc1b89086876 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -114,7 +114,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
114 114
115static struct file_operations i810_buffer_fops = { 115static struct file_operations i810_buffer_fops = {
116 .open = drm_open, 116 .open = drm_open,
117 .flush = drm_flush,
118 .release = drm_release, 117 .release = drm_release,
119 .ioctl = drm_ioctl, 118 .ioctl = drm_ioctl,
120 .mmap = i810_mmap_buffers, 119 .mmap = i810_mmap_buffers,
@@ -1319,12 +1318,24 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
1319 return 0; 1318 return 0;
1320} 1319}
1321 1320
1322void i810_driver_pretakedown(drm_device_t * dev) 1321int i810_driver_load(drm_device_t *dev, unsigned long flags)
1322{
1323 /* i810 has 4 more counters */
1324 dev->counters += 4;
1325 dev->types[6] = _DRM_STAT_IRQ;
1326 dev->types[7] = _DRM_STAT_PRIMARY;
1327 dev->types[8] = _DRM_STAT_SECONDARY;
1328 dev->types[9] = _DRM_STAT_DMA;
1329
1330 return 0;
1331}
1332
1333void i810_driver_lastclose(drm_device_t * dev)
1323{ 1334{
1324 i810_dma_cleanup(dev); 1335 i810_dma_cleanup(dev);
1325} 1336}
1326 1337
1327void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1338void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
1328{ 1339{
1329 if (dev->dev_private) { 1340 if (dev->dev_private) {
1330 drm_i810_private_t *dev_priv = dev->dev_private; 1341 drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1334,7 +1345,7 @@ void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
1334 } 1345 }
1335} 1346}
1336 1347
1337void i810_driver_release(drm_device_t * dev, struct file *filp) 1348void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
1338{ 1349{
1339 i810_reclaim_buffers(dev, filp); 1350 i810_reclaim_buffers(dev, filp);
1340} 1351}
@@ -1346,21 +1357,21 @@ int i810_driver_dma_quiescent(drm_device_t * dev)
1346} 1357}
1347 1358
1348drm_ioctl_desc_t i810_ioctls[] = { 1359drm_ioctl_desc_t i810_ioctls[] = {
1349 [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1}, 1360 [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1350 [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0}, 1361 [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH},
1351 [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0}, 1362 [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH},
1352 [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0}, 1363 [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH},
1353 [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0}, 1364 [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH},
1354 [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0}, 1365 [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH},
1355 [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0}, 1366 [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH},
1356 [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0}, 1367 [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH},
1357 [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0}, 1368 [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH},
1358 [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0}, 1369 [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH},
1359 [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0}, 1370 [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH},
1360 [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0}, 1371 [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH},
1361 [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1}, 1372 [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1362 [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0}, 1373 [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH},
1363 [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0} 1374 [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH}
1364}; 1375};
1365 1376
1366int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1377int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index 070cef6c2b46..dfe6ad2b6a6e 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -38,38 +38,6 @@
38 38
39#include "drm_pciids.h" 39#include "drm_pciids.h"
40 40
41static int postinit(struct drm_device *dev, unsigned long flags)
42{
43 /* i810 has 4 more counters */
44 dev->counters += 4;
45 dev->types[6] = _DRM_STAT_IRQ;
46 dev->types[7] = _DRM_STAT_PRIMARY;
47 dev->types[8] = _DRM_STAT_SECONDARY;
48 dev->types[9] = _DRM_STAT_DMA;
49
50 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
51 DRIVER_NAME,
52 DRIVER_MAJOR,
53 DRIVER_MINOR,
54 DRIVER_PATCHLEVEL,
55 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
56 );
57 return 0;
58}
59
60static int version(drm_version_t * version)
61{
62 int len;
63
64 version->version_major = DRIVER_MAJOR;
65 version->version_minor = DRIVER_MINOR;
66 version->version_patchlevel = DRIVER_PATCHLEVEL;
67 DRM_COPY(version->name, DRIVER_NAME);
68 DRM_COPY(version->date, DRIVER_DATE);
69 DRM_COPY(version->desc, DRIVER_DESC);
70 return 0;
71}
72
73static struct pci_device_id pciidlist[] = { 41static struct pci_device_id pciidlist[] = {
74 i810_PCI_IDS 42 i810_PCI_IDS
75}; 43};
@@ -79,16 +47,14 @@ static struct drm_driver driver = {
79 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 47 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
80 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, 48 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
81 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 49 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
82 .pretakedown = i810_driver_pretakedown, 50 .load = i810_driver_load,
83 .prerelease = i810_driver_prerelease, 51 .lastclose = i810_driver_lastclose,
52 .preclose = i810_driver_preclose,
84 .device_is_agp = i810_driver_device_is_agp, 53 .device_is_agp = i810_driver_device_is_agp,
85 .release = i810_driver_release, 54 .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
86 .dma_quiescent = i810_driver_dma_quiescent, 55 .dma_quiescent = i810_driver_dma_quiescent,
87 .reclaim_buffers = i810_reclaim_buffers,
88 .get_map_ofs = drm_core_get_map_ofs, 56 .get_map_ofs = drm_core_get_map_ofs,
89 .get_reg_ofs = drm_core_get_reg_ofs, 57 .get_reg_ofs = drm_core_get_reg_ofs,
90 .postinit = postinit,
91 .version = version,
92 .ioctls = i810_ioctls, 58 .ioctls = i810_ioctls,
93 .fops = { 59 .fops = {
94 .owner = THIS_MODULE, 60 .owner = THIS_MODULE,
@@ -98,13 +64,19 @@ static struct drm_driver driver = {
98 .mmap = drm_mmap, 64 .mmap = drm_mmap,
99 .poll = drm_poll, 65 .poll = drm_poll,
100 .fasync = drm_fasync, 66 .fasync = drm_fasync,
101 } 67 },
102 , 68
103 .pci_driver = { 69 .pci_driver = {
104 .name = DRIVER_NAME, 70 .name = DRIVER_NAME,
105 .id_table = pciidlist, 71 .id_table = pciidlist,
106 } 72 },
107 , 73
74 .name = DRIVER_NAME,
75 .desc = DRIVER_DESC,
76 .date = DRIVER_DATE,
77 .major = DRIVER_MAJOR,
78 .minor = DRIVER_MINOR,
79 .patchlevel = DRIVER_PATCHLEVEL,
108}; 80};
109 81
110static int __init i810_init(void) 82static int __init i810_init(void)
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index c78f36aaa2f0..a18b80d91920 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -116,9 +116,13 @@ typedef struct drm_i810_private {
116extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp); 116extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp);
117 117
118extern int i810_driver_dma_quiescent(drm_device_t * dev); 118extern int i810_driver_dma_quiescent(drm_device_t * dev);
119extern void i810_driver_release(drm_device_t * dev, struct file *filp); 119extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
120extern void i810_driver_pretakedown(drm_device_t * dev); 120 struct file *filp);
121extern void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp); 121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(drm_device_t * dev);
123extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
124extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
125 struct file *filp);
122extern int i810_driver_device_is_agp(drm_device_t * dev); 126extern int i810_driver_device_is_agp(drm_device_t * dev);
123 127
124extern drm_ioctl_desc_t i810_ioctls[]; 128extern drm_ioctl_desc_t i810_ioctls[];
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc94f1914425..4fea32aed6d2 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -116,7 +116,6 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
116 116
117static struct file_operations i830_buffer_fops = { 117static struct file_operations i830_buffer_fops = {
118 .open = drm_open, 118 .open = drm_open,
119 .flush = drm_flush,
120 .release = drm_release, 119 .release = drm_release,
121 .ioctl = drm_ioctl, 120 .ioctl = drm_ioctl,
122 .mmap = i830_mmap_buffers, 121 .mmap = i830_mmap_buffers,
@@ -1517,12 +1516,24 @@ static int i830_setparam(struct inode *inode, struct file *filp,
1517 return 0; 1516 return 0;
1518} 1517}
1519 1518
1520void i830_driver_pretakedown(drm_device_t * dev) 1519int i830_driver_load(drm_device_t *dev, unsigned long flags)
1520{
1521 /* i830 has 4 more counters */
1522 dev->counters += 4;
1523 dev->types[6] = _DRM_STAT_IRQ;
1524 dev->types[7] = _DRM_STAT_PRIMARY;
1525 dev->types[8] = _DRM_STAT_SECONDARY;
1526 dev->types[9] = _DRM_STAT_DMA;
1527
1528 return 0;
1529}
1530
1531void i830_driver_lastclose(drm_device_t * dev)
1521{ 1532{
1522 i830_dma_cleanup(dev); 1533 i830_dma_cleanup(dev);
1523} 1534}
1524 1535
1525void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1536void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
1526{ 1537{
1527 if (dev->dev_private) { 1538 if (dev->dev_private) {
1528 drm_i830_private_t *dev_priv = dev->dev_private; 1539 drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1532,7 +1543,7 @@ void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
1532 } 1543 }
1533} 1544}
1534 1545
1535void i830_driver_release(drm_device_t * dev, struct file *filp) 1546void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
1536{ 1547{
1537 i830_reclaim_buffers(dev, filp); 1548 i830_reclaim_buffers(dev, filp);
1538} 1549}
@@ -1544,20 +1555,20 @@ int i830_driver_dma_quiescent(drm_device_t * dev)
1544} 1555}
1545 1556
1546drm_ioctl_desc_t i830_ioctls[] = { 1557drm_ioctl_desc_t i830_ioctls[] = {
1547 [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1}, 1558 [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1548 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0}, 1559 [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH},
1549 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0}, 1560 [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH},
1550 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0}, 1561 [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH},
1551 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0}, 1562 [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH},
1552 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0}, 1563 [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH},
1553 [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0}, 1564 [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH},
1554 [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0}, 1565 [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH},
1555 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0}, 1566 [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH},
1556 [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0}, 1567 [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH},
1557 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0}, 1568 [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH},
1558 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0}, 1569 [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH},
1559 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0}, 1570 [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH},
1560 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0} 1571 [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH}
1561}; 1572};
1562 1573
1563int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1574int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index acd821e8fe4d..722658188f5f 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -40,37 +40,6 @@
40 40
41#include "drm_pciids.h" 41#include "drm_pciids.h"
42 42
43static int postinit(struct drm_device *dev, unsigned long flags)
44{
45 dev->counters += 4;
46 dev->types[6] = _DRM_STAT_IRQ;
47 dev->types[7] = _DRM_STAT_PRIMARY;
48 dev->types[8] = _DRM_STAT_SECONDARY;
49 dev->types[9] = _DRM_STAT_DMA;
50
51 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
52 DRIVER_NAME,
53 DRIVER_MAJOR,
54 DRIVER_MINOR,
55 DRIVER_PATCHLEVEL,
56 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
57 );
58 return 0;
59}
60
61static int version(drm_version_t * version)
62{
63 int len;
64
65 version->version_major = DRIVER_MAJOR;
66 version->version_minor = DRIVER_MINOR;
67 version->version_patchlevel = DRIVER_PATCHLEVEL;
68 DRM_COPY(version->name, DRIVER_NAME);
69 DRM_COPY(version->date, DRIVER_DATE);
70 DRM_COPY(version->desc, DRIVER_DESC);
71 return 0;
72}
73
74static struct pci_device_id pciidlist[] = { 43static struct pci_device_id pciidlist[] = {
75 i830_PCI_IDS 44 i830_PCI_IDS
76}; 45};
@@ -83,12 +52,12 @@ static struct drm_driver driver = {
83 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ, 52 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
84#endif 53#endif
85 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 54 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
86 .pretakedown = i830_driver_pretakedown, 55 .load = i830_driver_load,
87 .prerelease = i830_driver_prerelease, 56 .lastclose = i830_driver_lastclose,
57 .preclose = i830_driver_preclose,
88 .device_is_agp = i830_driver_device_is_agp, 58 .device_is_agp = i830_driver_device_is_agp,
89 .release = i830_driver_release, 59 .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
90 .dma_quiescent = i830_driver_dma_quiescent, 60 .dma_quiescent = i830_driver_dma_quiescent,
91 .reclaim_buffers = i830_reclaim_buffers,
92 .get_map_ofs = drm_core_get_map_ofs, 61 .get_map_ofs = drm_core_get_map_ofs,
93 .get_reg_ofs = drm_core_get_reg_ofs, 62 .get_reg_ofs = drm_core_get_reg_ofs,
94#if USE_IRQS 63#if USE_IRQS
@@ -97,8 +66,6 @@ static struct drm_driver driver = {
97 .irq_uninstall = i830_driver_irq_uninstall, 66 .irq_uninstall = i830_driver_irq_uninstall,
98 .irq_handler = i830_driver_irq_handler, 67 .irq_handler = i830_driver_irq_handler,
99#endif 68#endif
100 .postinit = postinit,
101 .version = version,
102 .ioctls = i830_ioctls, 69 .ioctls = i830_ioctls,
103 .fops = { 70 .fops = {
104 .owner = THIS_MODULE, 71 .owner = THIS_MODULE,
@@ -108,13 +75,19 @@ static struct drm_driver driver = {
108 .mmap = drm_mmap, 75 .mmap = drm_mmap,
109 .poll = drm_poll, 76 .poll = drm_poll,
110 .fasync = drm_fasync, 77 .fasync = drm_fasync,
111 } 78 },
112 , 79
113 .pci_driver = { 80 .pci_driver = {
114 .name = DRIVER_NAME, 81 .name = DRIVER_NAME,
115 .id_table = pciidlist, 82 .id_table = pciidlist,
116 } 83 },
117 84
85 .name = DRIVER_NAME,
86 .desc = DRIVER_DESC,
87 .date = DRIVER_DATE,
88 .major = DRIVER_MAJOR,
89 .minor = DRIVER_MINOR,
90 .patchlevel = DRIVER_PATCHLEVEL,
118}; 91};
119 92
120static int __init i830_init(void) 93static int __init i830_init(void)
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index bc4bd49fb0cc..bf9075b576bd 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -136,10 +136,12 @@ extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
136extern void i830_driver_irq_preinstall(drm_device_t * dev); 136extern void i830_driver_irq_preinstall(drm_device_t * dev);
137extern void i830_driver_irq_postinstall(drm_device_t * dev); 137extern void i830_driver_irq_postinstall(drm_device_t * dev);
138extern void i830_driver_irq_uninstall(drm_device_t * dev); 138extern void i830_driver_irq_uninstall(drm_device_t * dev);
139extern void i830_driver_pretakedown(drm_device_t * dev); 139extern int i830_driver_load(struct drm_device *, unsigned long flags);
140extern void i830_driver_release(drm_device_t * dev, struct file *filp); 140extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
141extern void i830_driver_lastclose(drm_device_t * dev);
142extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
143 struct file *filp);
141extern int i830_driver_dma_quiescent(drm_device_t * dev); 144extern int i830_driver_dma_quiescent(drm_device_t * dev);
142extern void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp);
143extern int i830_driver_device_is_agp(drm_device_t * dev); 145extern int i830_driver_device_is_agp(drm_device_t * dev);
144 146
145#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) 147#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index f3aa0c370127..9140703da1ba 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -1,7 +1,6 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */ 2 */
3/************************************************************************** 3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 5 * All Rights Reserved.
7 * 6 *
@@ -25,7 +24,7 @@
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 26 *
28 **************************************************************************/ 27 */
29 28
30#include "drmP.h" 29#include "drmP.h"
31#include "drm.h" 30#include "drm.h"
@@ -196,7 +195,7 @@ static int i915_initialize(drm_device_t * dev,
196 return 0; 195 return 0;
197} 196}
198 197
199static int i915_resume(drm_device_t * dev) 198static int i915_dma_resume(drm_device_t * dev)
200{ 199{
201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 200 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
202 201
@@ -253,7 +252,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
253 retcode = i915_dma_cleanup(dev); 252 retcode = i915_dma_cleanup(dev);
254 break; 253 break;
255 case I915_RESUME_DMA: 254 case I915_RESUME_DMA:
256 retcode = i915_resume(dev); 255 retcode = i915_dma_resume(dev);
257 break; 256 break;
258 default: 257 default:
259 retcode = -EINVAL; 258 retcode = -EINVAL;
@@ -654,6 +653,9 @@ static int i915_getparam(DRM_IOCTL_ARGS)
654 case I915_PARAM_ALLOW_BATCHBUFFER: 653 case I915_PARAM_ALLOW_BATCHBUFFER:
655 value = dev_priv->allow_batchbuffer ? 1 : 0; 654 value = dev_priv->allow_batchbuffer ? 1 : 0;
656 break; 655 break;
656 case I915_PARAM_LAST_DISPATCH:
657 value = READ_BREADCRUMB(dev_priv);
658 break;
657 default: 659 default:
658 DRM_ERROR("Unkown parameter %d\n", param.param); 660 DRM_ERROR("Unkown parameter %d\n", param.param);
659 return DRM_ERR(EINVAL); 661 return DRM_ERR(EINVAL);
@@ -699,7 +701,19 @@ static int i915_setparam(DRM_IOCTL_ARGS)
699 return 0; 701 return 0;
700} 702}
701 703
702void i915_driver_pretakedown(drm_device_t * dev) 704int i915_driver_load(drm_device_t *dev, unsigned long flags)
705{
706 /* i915 has 4 more counters */
707 dev->counters += 4;
708 dev->types[6] = _DRM_STAT_IRQ;
709 dev->types[7] = _DRM_STAT_PRIMARY;
710 dev->types[8] = _DRM_STAT_SECONDARY;
711 dev->types[9] = _DRM_STAT_DMA;
712
713 return 0;
714}
715
716void i915_driver_lastclose(drm_device_t * dev)
703{ 717{
704 if (dev->dev_private) { 718 if (dev->dev_private) {
705 drm_i915_private_t *dev_priv = dev->dev_private; 719 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -708,7 +722,7 @@ void i915_driver_pretakedown(drm_device_t * dev)
708 i915_dma_cleanup(dev); 722 i915_dma_cleanup(dev);
709} 723}
710 724
711void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp) 725void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
712{ 726{
713 if (dev->dev_private) { 727 if (dev->dev_private) {
714 drm_i915_private_t *dev_priv = dev->dev_private; 728 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -717,18 +731,18 @@ void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
717} 731}
718 732
719drm_ioctl_desc_t i915_ioctls[] = { 733drm_ioctl_desc_t i915_ioctls[] = {
720 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1}, 734 [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
721 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0}, 735 [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
722 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0}, 736 [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
723 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0}, 737 [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
724 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0}, 738 [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
725 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0}, 739 [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
726 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0}, 740 [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
727 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1}, 741 [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
728 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0}, 742 [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
729 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0}, 743 [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
730 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1}, 744 [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
731 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0} 745 [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}
732}; 746};
733 747
734int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 748int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 23e027d29080..77412ddac007 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -1,5 +1,4 @@
1/************************************************************************** 1/*
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved. 3 * All Rights Reserved.
5 * 4 *
@@ -23,7 +22,7 @@
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * 24 *
26 **************************************************************************/ 25 */
27 26
28#ifndef _I915_DRM_H_ 27#ifndef _I915_DRM_H_
29#define _I915_DRM_H_ 28#define _I915_DRM_H_
@@ -152,6 +151,7 @@ typedef struct drm_i915_irq_wait {
152 */ 151 */
153#define I915_PARAM_IRQ_ACTIVE 1 152#define I915_PARAM_IRQ_ACTIVE 1
154#define I915_PARAM_ALLOW_BATCHBUFFER 2 153#define I915_PARAM_ALLOW_BATCHBUFFER 2
154#define I915_PARAM_LAST_DISPATCH 3
155 155
156typedef struct drm_i915_getparam { 156typedef struct drm_i915_getparam {
157 int param; 157 int param;
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 0508240f4e3b..8e2e6095c4b3 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -1,6 +1,6 @@
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */ 2 */
3/************************************************************************** 3/*
4 * 4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 6 * All Rights Reserved.
@@ -25,7 +25,7 @@
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 27 *
28 **************************************************************************/ 28 */
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
@@ -34,48 +34,22 @@
34 34
35#include "drm_pciids.h" 35#include "drm_pciids.h"
36 36
37static int postinit(struct drm_device *dev, unsigned long flags)
38{
39 dev->counters += 4;
40 dev->types[6] = _DRM_STAT_IRQ;
41 dev->types[7] = _DRM_STAT_PRIMARY;
42 dev->types[8] = _DRM_STAT_SECONDARY;
43 dev->types[9] = _DRM_STAT_DMA;
44
45 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
46 DRIVER_NAME,
47 DRIVER_MAJOR,
48 DRIVER_MINOR,
49 DRIVER_PATCHLEVEL,
50 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
51 );
52 return 0;
53}
54
55static int version(drm_version_t * version)
56{
57 int len;
58
59 version->version_major = DRIVER_MAJOR;
60 version->version_minor = DRIVER_MINOR;
61 version->version_patchlevel = DRIVER_PATCHLEVEL;
62 DRM_COPY(version->name, DRIVER_NAME);
63 DRM_COPY(version->date, DRIVER_DATE);
64 DRM_COPY(version->desc, DRIVER_DESC);
65 return 0;
66}
67
68static struct pci_device_id pciidlist[] = { 37static struct pci_device_id pciidlist[] = {
69 i915_PCI_IDS 38 i915_PCI_IDS
70}; 39};
71 40
72static struct drm_driver driver = { 41static struct drm_driver driver = {
42 /* don't use mtrr's here, the Xserver or user space app should
43 * deal with them for intel hardware.
44 */
73 .driver_features = 45 .driver_features =
74 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 46 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
75 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 47 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
76 .pretakedown = i915_driver_pretakedown, 48 .load = i915_driver_load,
77 .prerelease = i915_driver_prerelease, 49 .lastclose = i915_driver_lastclose,
50 .preclose = i915_driver_preclose,
78 .device_is_agp = i915_driver_device_is_agp, 51 .device_is_agp = i915_driver_device_is_agp,
52 .vblank_wait = i915_driver_vblank_wait,
79 .irq_preinstall = i915_driver_irq_preinstall, 53 .irq_preinstall = i915_driver_irq_preinstall,
80 .irq_postinstall = i915_driver_irq_postinstall, 54 .irq_postinstall = i915_driver_irq_postinstall,
81 .irq_uninstall = i915_driver_irq_uninstall, 55 .irq_uninstall = i915_driver_irq_uninstall,
@@ -83,8 +57,6 @@ static struct drm_driver driver = {
83 .reclaim_buffers = drm_core_reclaim_buffers, 57 .reclaim_buffers = drm_core_reclaim_buffers,
84 .get_map_ofs = drm_core_get_map_ofs, 58 .get_map_ofs = drm_core_get_map_ofs,
85 .get_reg_ofs = drm_core_get_reg_ofs, 59 .get_reg_ofs = drm_core_get_reg_ofs,
86 .postinit = postinit,
87 .version = version,
88 .ioctls = i915_ioctls, 60 .ioctls = i915_ioctls,
89 .fops = { 61 .fops = {
90 .owner = THIS_MODULE, 62 .owner = THIS_MODULE,
@@ -97,11 +69,19 @@ static struct drm_driver driver = {
97#ifdef CONFIG_COMPAT 69#ifdef CONFIG_COMPAT
98 .compat_ioctl = i915_compat_ioctl, 70 .compat_ioctl = i915_compat_ioctl,
99#endif 71#endif
100 }, 72 },
73
101 .pci_driver = { 74 .pci_driver = {
102 .name = DRIVER_NAME, 75 .name = DRIVER_NAME,
103 .id_table = pciidlist, 76 .id_table = pciidlist,
104 } 77 },
78
79 .name = DRIVER_NAME,
80 .desc = DRIVER_DESC,
81 .date = DRIVER_DATE,
82 .major = DRIVER_MAJOR,
83 .minor = DRIVER_MINOR,
84 .patchlevel = DRIVER_PATCHLEVEL,
105}; 85};
106 86
107static int __init i915_init(void) 87static int __init i915_init(void)
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 17e457c73dc7..c6c71b45f101 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -1,6 +1,6 @@
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */ 2 */
3/************************************************************************** 3/*
4 * 4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 6 * All Rights Reserved.
@@ -25,7 +25,7 @@
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 27 *
28 **************************************************************************/ 28 */
29 29
30#ifndef _I915_DRV_H_ 30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
@@ -37,21 +37,18 @@
37 37
38#define DRIVER_NAME "i915" 38#define DRIVER_NAME "i915"
39#define DRIVER_DESC "Intel Graphics" 39#define DRIVER_DESC "Intel Graphics"
40#define DRIVER_DATE "20040405" 40#define DRIVER_DATE "20051209"
41 41
42/* Interface history: 42/* Interface history:
43 * 43 *
44 * 1.1: Original. 44 * 1.1: Original.
45 * 1.2: Add Power Management
46 * 1.3: Add vblank support
45 */ 47 */
46#define DRIVER_MAJOR 1 48#define DRIVER_MAJOR 1
47#define DRIVER_MINOR 1 49#define DRIVER_MINOR 3
48#define DRIVER_PATCHLEVEL 0 50#define DRIVER_PATCHLEVEL 0
49 51
50/* We use our own dma mechanisms, not the drm template code. However,
51 * the shared IRQ code is useful to us:
52 */
53#define __HAVE_PM 1
54
55typedef struct _drm_i915_ring_buffer { 52typedef struct _drm_i915_ring_buffer {
56 int tail_mask; 53 int tail_mask;
57 unsigned long Start; 54 unsigned long Start;
@@ -97,6 +94,7 @@ typedef struct drm_i915_private {
97 int tex_lru_log_granularity; 94 int tex_lru_log_granularity;
98 int allow_batchbuffer; 95 int allow_batchbuffer;
99 struct mem_block *agp_heap; 96 struct mem_block *agp_heap;
97 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
100} drm_i915_private_t; 98} drm_i915_private_t;
101 99
102extern drm_ioctl_desc_t i915_ioctls[]; 100extern drm_ioctl_desc_t i915_ioctls[];
@@ -104,14 +102,18 @@ extern int i915_max_ioctl;
104 102
105 /* i915_dma.c */ 103 /* i915_dma.c */
106extern void i915_kernel_lost_context(drm_device_t * dev); 104extern void i915_kernel_lost_context(drm_device_t * dev);
107extern void i915_driver_pretakedown(drm_device_t * dev); 105extern int i915_driver_load(struct drm_device *, unsigned long flags);
108extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp); 106extern void i915_driver_lastclose(drm_device_t * dev);
107extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
109extern int i915_driver_device_is_agp(drm_device_t * dev); 108extern int i915_driver_device_is_agp(drm_device_t * dev);
109extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
110 unsigned long arg);
110 111
111/* i915_irq.c */ 112/* i915_irq.c */
112extern int i915_irq_emit(DRM_IOCTL_ARGS); 113extern int i915_irq_emit(DRM_IOCTL_ARGS);
113extern int i915_irq_wait(DRM_IOCTL_ARGS); 114extern int i915_irq_wait(DRM_IOCTL_ARGS);
114 115
116extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
115extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 117extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
116extern void i915_driver_irq_preinstall(drm_device_t * dev); 118extern void i915_driver_irq_preinstall(drm_device_t * dev);
117extern void i915_driver_irq_postinstall(drm_device_t * dev); 119extern void i915_driver_irq_postinstall(drm_device_t * dev);
@@ -125,13 +127,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
125extern void i915_mem_release(drm_device_t * dev, 127extern void i915_mem_release(drm_device_t * dev,
126 DRMFILE filp, struct mem_block *heap); 128 DRMFILE filp, struct mem_block *heap);
127 129
128extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 130#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
129 unsigned long arg); 131#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
130 132#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
131#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) 133#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
132#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
133#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
134#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
135 134
136#define I915_VERBOSE 0 135#define I915_VERBOSE 0
137 136
@@ -195,6 +194,13 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
195#define PPCR 0x61204 194#define PPCR 0x61204
196#define PPCR_ON (1<<0) 195#define PPCR_ON (1<<0)
197 196
197#define DVOB 0x61140
198#define DVOB_ON (1<<31)
199#define DVOC 0x61160
200#define DVOC_ON (1<<31)
201#define LVDS 0x61180
202#define LVDS_ON (1<<31)
203
198#define ADPA 0x61100 204#define ADPA 0x61100
199#define ADPA_DPMS_MASK (~(3<<10)) 205#define ADPA_DPMS_MASK (~(3<<10))
200#define ADPA_DPMS_ON (0<<10) 206#define ADPA_DPMS_ON (0<<10)
@@ -258,4 +264,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
258 264
259#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 265#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
260 266
267#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
268
261#endif 269#endif
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 4fa448ee846b..a1381c61aa63 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -1,7 +1,6 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */ 2 */
3/************************************************************************** 3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 5 * All Rights Reserved.
7 * 6 *
@@ -25,16 +24,18 @@
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 26 *
28 **************************************************************************/ 27 */
29 28
30#include "drmP.h" 29#include "drmP.h"
31#include "drm.h" 30#include "drm.h"
32#include "i915_drm.h" 31#include "i915_drm.h"
33#include "i915_drv.h" 32#include "i915_drv.h"
34 33
35#define USER_INT_FLAG 0x2 34#define USER_INT_FLAG (1<<1)
35#define VSYNC_PIPEB_FLAG (1<<5)
36#define VSYNC_PIPEA_FLAG (1<<7)
37
36#define MAX_NOPID ((u32)~0) 38#define MAX_NOPID ((u32)~0)
37#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
38 39
39irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 40irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
40{ 41{
@@ -43,7 +44,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
43 u16 temp; 44 u16 temp;
44 45
45 temp = I915_READ16(I915REG_INT_IDENTITY_R); 46 temp = I915_READ16(I915REG_INT_IDENTITY_R);
46 temp &= USER_INT_FLAG; 47 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG);
47 48
48 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 49 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
49 50
@@ -51,7 +52,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
51 return IRQ_NONE; 52 return IRQ_NONE;
52 53
53 I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 54 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
54 DRM_WAKEUP(&dev_priv->irq_queue); 55
56 if (temp & USER_INT_FLAG)
57 DRM_WAKEUP(&dev_priv->irq_queue);
58
59 if (temp & VSYNC_PIPEA_FLAG) {
60 atomic_inc(&dev->vbl_received);
61 DRM_WAKEUP(&dev->vbl_queue);
62 drm_vbl_send_signals(dev);
63 }
55 64
56 return IRQ_HANDLED; 65 return IRQ_HANDLED;
57} 66}
@@ -102,6 +111,27 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
102 return ret; 111 return ret;
103} 112}
104 113
114int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
115{
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 unsigned int cur_vblank;
118 int ret = 0;
119
120 if (!dev_priv) {
121 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
122 return DRM_ERR(EINVAL);
123 }
124
125 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
126 (((cur_vblank = atomic_read(&dev->vbl_received))
127 - *sequence) <= (1<<23)));
128
129 *sequence = cur_vblank;
130
131 return ret;
132}
133
134
105/* Needs the lock as it touches the ring. 135/* Needs the lock as it touches the ring.
106 */ 136 */
107int i915_irq_emit(DRM_IOCTL_ARGS) 137int i915_irq_emit(DRM_IOCTL_ARGS)
@@ -165,7 +195,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
165{ 195{
166 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
167 197
168 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG); 198 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG);
169 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 199 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
170} 200}
171 201
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 13176d136a99..ba87ff17ff64 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -1,7 +1,6 @@
1/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- 1/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */ 2 */
3/************************************************************************** 3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved. 5 * All Rights Reserved.
7 * 6 *
@@ -25,7 +24,7 @@
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 26 *
28 **************************************************************************/ 27 */
29 28
30#include "drmP.h" 29#include "drmP.h"
31#include "drm.h" 30#include "drm.h"
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 70dc7f64b7b9..c2a4bac14521 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -44,7 +44,9 @@
44#define MGA_DEFAULT_USEC_TIMEOUT 10000 44#define MGA_DEFAULT_USEC_TIMEOUT 10000
45#define MGA_FREELIST_DEBUG 0 45#define MGA_FREELIST_DEBUG 0
46 46
47static int mga_do_cleanup_dma(drm_device_t * dev); 47#define MINIMAL_CLEANUP 0
48#define FULL_CLEANUP 1
49static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
48 50
49/* ================================================================ 51/* ================================================================
50 * Engine control 52 * Engine control
@@ -391,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
391 * DMA initialization, cleanup 393 * DMA initialization, cleanup
392 */ 394 */
393 395
394int mga_driver_preinit(drm_device_t * dev, unsigned long flags) 396int mga_driver_load(drm_device_t * dev, unsigned long flags)
395{ 397{
396 drm_mga_private_t *dev_priv; 398 drm_mga_private_t *dev_priv;
397 399
@@ -405,6 +407,14 @@ int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
405 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 407 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
406 dev_priv->chipset = flags; 408 dev_priv->chipset = flags;
407 409
410 dev_priv->mmio_base = drm_get_resource_start(dev, 1);
411 dev_priv->mmio_size = drm_get_resource_len(dev, 1);
412
413 dev->counters += 3;
414 dev->types[6] = _DRM_STAT_IRQ;
415 dev->types[7] = _DRM_STAT_PRIMARY;
416 dev->types[8] = _DRM_STAT_SECONDARY;
417
408 return 0; 418 return 0;
409} 419}
410 420
@@ -438,17 +448,19 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
438 drm_buf_desc_t req; 448 drm_buf_desc_t req;
439 drm_agp_mode_t mode; 449 drm_agp_mode_t mode;
440 drm_agp_info_t info; 450 drm_agp_info_t info;
451 drm_agp_buffer_t agp_req;
452 drm_agp_binding_t bind_req;
441 453
442 /* Acquire AGP. */ 454 /* Acquire AGP. */
443 err = drm_agp_acquire(dev); 455 err = drm_agp_acquire(dev);
444 if (err) { 456 if (err) {
445 DRM_ERROR("Unable to acquire AGP\n"); 457 DRM_ERROR("Unable to acquire AGP: %d\n", err);
446 return err; 458 return err;
447 } 459 }
448 460
449 err = drm_agp_info(dev, &info); 461 err = drm_agp_info(dev, &info);
450 if (err) { 462 if (err) {
451 DRM_ERROR("Unable to get AGP info\n"); 463 DRM_ERROR("Unable to get AGP info: %d\n", err);
452 return err; 464 return err;
453 } 465 }
454 466
@@ -472,18 +484,24 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
472 } 484 }
473 485
474 /* Allocate and bind AGP memory. */ 486 /* Allocate and bind AGP memory. */
475 dev_priv->agp_pages = agp_size / PAGE_SIZE; 487 agp_req.size = agp_size;
476 dev_priv->agp_mem = drm_alloc_agp(dev, dev_priv->agp_pages, 0); 488 agp_req.type = 0;
477 if (dev_priv->agp_mem == NULL) { 489 err = drm_agp_alloc(dev, &agp_req);
478 dev_priv->agp_pages = 0; 490 if (err) {
491 dev_priv->agp_size = 0;
479 DRM_ERROR("Unable to allocate %uMB AGP memory\n", 492 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
480 dma_bs->agp_size); 493 dma_bs->agp_size);
481 return DRM_ERR(ENOMEM); 494 return err;
482 } 495 }
496
497 dev_priv->agp_size = agp_size;
498 dev_priv->agp_handle = agp_req.handle;
483 499
484 err = drm_bind_agp(dev_priv->agp_mem, 0); 500 bind_req.handle = agp_req.handle;
501 bind_req.offset = 0;
502 err = drm_agp_bind(dev, &bind_req);
485 if (err) { 503 if (err) {
486 DRM_ERROR("Unable to bind AGP memory\n"); 504 DRM_ERROR("Unable to bind AGP memory: %d\n", err);
487 return err; 505 return err;
488 } 506 }
489 507
@@ -497,7 +515,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
497 err = drm_addmap(dev, offset, warp_size, 515 err = drm_addmap(dev, offset, warp_size,
498 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); 516 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
499 if (err) { 517 if (err) {
500 DRM_ERROR("Unable to map WARP microcode\n"); 518 DRM_ERROR("Unable to map WARP microcode: %d\n", err);
501 return err; 519 return err;
502 } 520 }
503 521
@@ -505,7 +523,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
505 err = drm_addmap(dev, offset, dma_bs->primary_size, 523 err = drm_addmap(dev, offset, dma_bs->primary_size,
506 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); 524 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
507 if (err) { 525 if (err) {
508 DRM_ERROR("Unable to map primary DMA region\n"); 526 DRM_ERROR("Unable to map primary DMA region: %d\n", err);
509 return err; 527 return err;
510 } 528 }
511 529
@@ -513,7 +531,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
513 err = drm_addmap(dev, offset, secondary_size, 531 err = drm_addmap(dev, offset, secondary_size,
514 _DRM_AGP, 0, &dev->agp_buffer_map); 532 _DRM_AGP, 0, &dev->agp_buffer_map);
515 if (err) { 533 if (err) {
516 DRM_ERROR("Unable to map secondary DMA region\n"); 534 DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
517 return err; 535 return err;
518 } 536 }
519 537
@@ -525,15 +543,29 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
525 543
526 err = drm_addbufs_agp(dev, &req); 544 err = drm_addbufs_agp(dev, &req);
527 if (err) { 545 if (err) {
528 DRM_ERROR("Unable to add secondary DMA buffers\n"); 546 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
529 return err; 547 return err;
530 } 548 }
531 549
550 {
551 drm_map_list_t *_entry;
552 unsigned long agp_token = 0;
553
554 list_for_each_entry(_entry, &dev->maplist->head, head) {
555 if (_entry->map == dev->agp_buffer_map)
556 agp_token = _entry->user_token;
557 }
558 if (!agp_token)
559 return -EFAULT;
560
561 dev->agp_buffer_token = agp_token;
562 }
563
532 offset += secondary_size; 564 offset += secondary_size;
533 err = drm_addmap(dev, offset, agp_size - offset, 565 err = drm_addmap(dev, offset, agp_size - offset,
534 _DRM_AGP, 0, &dev_priv->agp_textures); 566 _DRM_AGP, 0, &dev_priv->agp_textures);
535 if (err) { 567 if (err) {
536 DRM_ERROR("Unable to map AGP texture region\n"); 568 DRM_ERROR("Unable to map AGP texture region %d\n", err);
537 return err; 569 return err;
538 } 570 }
539 571
@@ -603,7 +635,8 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
603 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, 635 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
604 _DRM_READ_ONLY, &dev_priv->warp); 636 _DRM_READ_ONLY, &dev_priv->warp);
605 if (err != 0) { 637 if (err != 0) {
606 DRM_ERROR("Unable to create mapping for WARP microcode\n"); 638 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
639 err);
607 return err; 640 return err;
608 } 641 }
609 642
@@ -622,7 +655,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
622 } 655 }
623 656
624 if (err != 0) { 657 if (err != 0) {
625 DRM_ERROR("Unable to allocate primary DMA region\n"); 658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
626 return DRM_ERR(ENOMEM); 659 return DRM_ERR(ENOMEM);
627 } 660 }
628 661
@@ -646,7 +679,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
646 } 679 }
647 680
648 if (bin_count == 0) { 681 if (bin_count == 0) {
649 DRM_ERROR("Unable to add secondary DMA buffers\n"); 682 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
650 return err; 683 return err;
651 } 684 }
652 685
@@ -682,7 +715,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
682 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, 715 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
683 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); 716 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
684 if (err) { 717 if (err) {
685 DRM_ERROR("Unable to map MMIO region\n"); 718 DRM_ERROR("Unable to map MMIO region: %d\n", err);
686 return err; 719 return err;
687 } 720 }
688 721
@@ -690,7 +723,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
690 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, 723 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
691 &dev_priv->status); 724 &dev_priv->status);
692 if (err) { 725 if (err) {
693 DRM_ERROR("Unable to map status region\n"); 726 DRM_ERROR("Unable to map status region: %d\n", err);
694 return err; 727 return err;
695 } 728 }
696 729
@@ -708,7 +741,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
708 */ 741 */
709 742
710 if (err) { 743 if (err) {
711 mga_do_cleanup_dma(dev); 744 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
712 } 745 }
713 746
714 /* Not only do we want to try and initialized PCI cards for PCI DMA, 747 /* Not only do we want to try and initialized PCI cards for PCI DMA,
@@ -731,35 +764,32 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS)
731 DRM_DEVICE; 764 DRM_DEVICE;
732 drm_mga_dma_bootstrap_t bootstrap; 765 drm_mga_dma_bootstrap_t bootstrap;
733 int err; 766 int err;
767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
768 const drm_mga_private_t *const dev_priv =
769 (drm_mga_private_t *) dev->dev_private;
734 770
735 DRM_COPY_FROM_USER_IOCTL(bootstrap, 771 DRM_COPY_FROM_USER_IOCTL(bootstrap,
736 (drm_mga_dma_bootstrap_t __user *) data, 772 (drm_mga_dma_bootstrap_t __user *) data,
737 sizeof(bootstrap)); 773 sizeof(bootstrap));
738 774
739 err = mga_do_dma_bootstrap(dev, &bootstrap); 775 err = mga_do_dma_bootstrap(dev, &bootstrap);
740 if (!err) { 776 if (err) {
741 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; 777 mga_do_cleanup_dma(dev, FULL_CLEANUP);
742 const drm_mga_private_t *const dev_priv = 778 return err;
743 (drm_mga_private_t *) dev->dev_private; 779 }
744
745 if (dev_priv->agp_textures != NULL) {
746 bootstrap.texture_handle =
747 dev_priv->agp_textures->offset;
748 bootstrap.texture_size = dev_priv->agp_textures->size;
749 } else {
750 bootstrap.texture_handle = 0;
751 bootstrap.texture_size = 0;
752 }
753 780
754 bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07]; 781 if (dev_priv->agp_textures != NULL) {
755 if (DRM_COPY_TO_USER((void __user *)data, &bootstrap, 782 bootstrap.texture_handle = dev_priv->agp_textures->offset;
756 sizeof(bootstrap))) { 783 bootstrap.texture_size = dev_priv->agp_textures->size;
757 err = DRM_ERR(EFAULT);
758 }
759 } else { 784 } else {
760 mga_do_cleanup_dma(dev); 785 bootstrap.texture_handle = 0;
786 bootstrap.texture_size = 0;
761 } 787 }
762 788
789 bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
790 DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
791 bootstrap, sizeof(bootstrap));
792
763 return err; 793 return err;
764} 794}
765 795
@@ -853,13 +883,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
853 883
854 ret = mga_warp_install_microcode(dev_priv); 884 ret = mga_warp_install_microcode(dev_priv);
855 if (ret < 0) { 885 if (ret < 0) {
856 DRM_ERROR("failed to install WARP ucode!\n"); 886 DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
857 return ret; 887 return ret;
858 } 888 }
859 889
860 ret = mga_warp_init(dev_priv); 890 ret = mga_warp_init(dev_priv);
861 if (ret < 0) { 891 if (ret < 0) {
862 DRM_ERROR("failed to init WARP engine!\n"); 892 DRM_ERROR("failed to init WARP engine!: %d\n", ret);
863 return ret; 893 return ret;
864 } 894 }
865 895
@@ -904,7 +934,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
904 return 0; 934 return 0;
905} 935}
906 936
907static int mga_do_cleanup_dma(drm_device_t * dev) 937static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
908{ 938{
909 int err = 0; 939 int err = 0;
910 DRM_DEBUG("\n"); 940 DRM_DEBUG("\n");
@@ -932,31 +962,39 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
932 962
933 if (dev_priv->used_new_dma_init) { 963 if (dev_priv->used_new_dma_init) {
934#if __OS_HAS_AGP 964#if __OS_HAS_AGP
935 if (dev_priv->agp_mem != NULL) { 965 if (dev_priv->agp_handle != 0) {
936 dev_priv->agp_textures = NULL; 966 drm_agp_binding_t unbind_req;
937 drm_unbind_agp(dev_priv->agp_mem); 967 drm_agp_buffer_t free_req;
968
969 unbind_req.handle = dev_priv->agp_handle;
970 drm_agp_unbind(dev, &unbind_req);
938 971
939 drm_free_agp(dev_priv->agp_mem, 972 free_req.handle = dev_priv->agp_handle;
940 dev_priv->agp_pages); 973 drm_agp_free(dev, &free_req);
941 dev_priv->agp_pages = 0; 974
942 dev_priv->agp_mem = NULL; 975 dev_priv->agp_textures = NULL;
976 dev_priv->agp_size = 0;
977 dev_priv->agp_handle = 0;
943 } 978 }
944 979
945 if ((dev->agp != NULL) && dev->agp->acquired) { 980 if ((dev->agp != NULL) && dev->agp->acquired) {
946 err = drm_agp_release(dev); 981 err = drm_agp_release(dev);
947 } 982 }
948#endif 983#endif
949 dev_priv->used_new_dma_init = 0;
950 } 984 }
951 985
952 dev_priv->warp = NULL; 986 dev_priv->warp = NULL;
953 dev_priv->primary = NULL; 987 dev_priv->primary = NULL;
954 dev_priv->mmio = NULL;
955 dev_priv->status = NULL;
956 dev_priv->sarea = NULL; 988 dev_priv->sarea = NULL;
957 dev_priv->sarea_priv = NULL; 989 dev_priv->sarea_priv = NULL;
958 dev->agp_buffer_map = NULL; 990 dev->agp_buffer_map = NULL;
959 991
992 if (full_cleanup) {
993 dev_priv->mmio = NULL;
994 dev_priv->status = NULL;
995 dev_priv->used_new_dma_init = 0;
996 }
997
960 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); 998 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
961 dev_priv->warp_pipe = 0; 999 dev_priv->warp_pipe = 0;
962 memset(dev_priv->warp_pipe_phys, 0, 1000 memset(dev_priv->warp_pipe_phys, 0,
@@ -967,7 +1005,7 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
967 } 1005 }
968 } 1006 }
969 1007
970 return err; 1008 return 0;
971} 1009}
972 1010
973int mga_dma_init(DRM_IOCTL_ARGS) 1011int mga_dma_init(DRM_IOCTL_ARGS)
@@ -985,11 +1023,11 @@ int mga_dma_init(DRM_IOCTL_ARGS)
985 case MGA_INIT_DMA: 1023 case MGA_INIT_DMA:
986 err = mga_do_init_dma(dev, &init); 1024 err = mga_do_init_dma(dev, &init);
987 if (err) { 1025 if (err) {
988 (void)mga_do_cleanup_dma(dev); 1026 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
989 } 1027 }
990 return err; 1028 return err;
991 case MGA_CLEANUP_DMA: 1029 case MGA_CLEANUP_DMA:
992 return mga_do_cleanup_dma(dev); 1030 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
993 } 1031 }
994 1032
995 return DRM_ERR(EINVAL); 1033 return DRM_ERR(EINVAL);
@@ -1118,7 +1156,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
1118/** 1156/**
1119 * Called just before the module is unloaded. 1157 * Called just before the module is unloaded.
1120 */ 1158 */
1121int mga_driver_postcleanup(drm_device_t * dev) 1159int mga_driver_unload(drm_device_t * dev)
1122{ 1160{
1123 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 1161 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1124 dev->dev_private = NULL; 1162 dev->dev_private = NULL;
@@ -1129,9 +1167,9 @@ int mga_driver_postcleanup(drm_device_t * dev)
1129/** 1167/**
1130 * Called when the last opener of the device is closed. 1168 * Called when the last opener of the device is closed.
1131 */ 1169 */
1132void mga_driver_pretakedown(drm_device_t * dev) 1170void mga_driver_lastclose(drm_device_t * dev)
1133{ 1171{
1134 mga_do_cleanup_dma(dev); 1172 mga_do_cleanup_dma(dev, FULL_CLEANUP);
1135} 1173}
1136 1174
1137int mga_driver_dma_quiescent(drm_device_t * dev) 1175int mga_driver_dma_quiescent(drm_device_t * dev)
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 1713451a5cc6..9f7ed0e0351b 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,41 +38,6 @@
38#include "drm_pciids.h" 38#include "drm_pciids.h"
39 39
40static int mga_driver_device_is_agp(drm_device_t * dev); 40static int mga_driver_device_is_agp(drm_device_t * dev);
41static int postinit(struct drm_device *dev, unsigned long flags)
42{
43 drm_mga_private_t *const dev_priv =
44 (drm_mga_private_t *) dev->dev_private;
45
46 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
47 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
48
49 dev->counters += 3;
50 dev->types[6] = _DRM_STAT_IRQ;
51 dev->types[7] = _DRM_STAT_PRIMARY;
52 dev->types[8] = _DRM_STAT_SECONDARY;
53
54 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
55 DRIVER_NAME,
56 DRIVER_MAJOR,
57 DRIVER_MINOR,
58 DRIVER_PATCHLEVEL,
59 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
60 );
61 return 0;
62}
63
64static int version(drm_version_t * version)
65{
66 int len;
67
68 version->version_major = DRIVER_MAJOR;
69 version->version_minor = DRIVER_MINOR;
70 version->version_patchlevel = DRIVER_PATCHLEVEL;
71 DRM_COPY(version->name, DRIVER_NAME);
72 DRM_COPY(version->date, DRIVER_DATE);
73 DRM_COPY(version->desc, DRIVER_DESC);
74 return 0;
75}
76 41
77static struct pci_device_id pciidlist[] = { 42static struct pci_device_id pciidlist[] = {
78 mga_PCI_IDS 43 mga_PCI_IDS
@@ -80,12 +45,12 @@ static struct pci_device_id pciidlist[] = {
80 45
81static struct drm_driver driver = { 46static struct drm_driver driver = {
82 .driver_features = 47 .driver_features =
83 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | 48 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
84 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 49 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
85 DRIVER_IRQ_VBL, 50 DRIVER_IRQ_VBL,
86 .preinit = mga_driver_preinit, 51 .load = mga_driver_load,
87 .postcleanup = mga_driver_postcleanup, 52 .unload = mga_driver_unload,
88 .pretakedown = mga_driver_pretakedown, 53 .lastclose = mga_driver_lastclose,
89 .dma_quiescent = mga_driver_dma_quiescent, 54 .dma_quiescent = mga_driver_dma_quiescent,
90 .device_is_agp = mga_driver_device_is_agp, 55 .device_is_agp = mga_driver_device_is_agp,
91 .vblank_wait = mga_driver_vblank_wait, 56 .vblank_wait = mga_driver_vblank_wait,
@@ -96,8 +61,6 @@ static struct drm_driver driver = {
96 .reclaim_buffers = drm_core_reclaim_buffers, 61 .reclaim_buffers = drm_core_reclaim_buffers,
97 .get_map_ofs = drm_core_get_map_ofs, 62 .get_map_ofs = drm_core_get_map_ofs,
98 .get_reg_ofs = drm_core_get_reg_ofs, 63 .get_reg_ofs = drm_core_get_reg_ofs,
99 .postinit = postinit,
100 .version = version,
101 .ioctls = mga_ioctls, 64 .ioctls = mga_ioctls,
102 .dma_ioctl = mga_dma_buffers, 65 .dma_ioctl = mga_dma_buffers,
103 .fops = { 66 .fops = {
@@ -113,9 +76,16 @@ static struct drm_driver driver = {
113#endif 76#endif
114 }, 77 },
115 .pci_driver = { 78 .pci_driver = {
116 .name = DRIVER_NAME, 79 .name = DRIVER_NAME,
117 .id_table = pciidlist, 80 .id_table = pciidlist,
118 } 81 },
82
83 .name = DRIVER_NAME,
84 .desc = DRIVER_DESC,
85 .date = DRIVER_DATE,
86 .major = DRIVER_MAJOR,
87 .minor = DRIVER_MINOR,
88 .patchlevel = DRIVER_PATCHLEVEL,
119}; 89};
120 90
121static int __init mga_init(void) 91static int __init mga_init(void)
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 461728e6a58a..6b0c53193506 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,11 +38,11 @@
38 38
39#define DRIVER_NAME "mga" 39#define DRIVER_NAME "mga"
40#define DRIVER_DESC "Matrox G200/G400" 40#define DRIVER_DESC "Matrox G200/G400"
41#define DRIVER_DATE "20050607" 41#define DRIVER_DATE "20051102"
42 42
43#define DRIVER_MAJOR 3 43#define DRIVER_MAJOR 3
44#define DRIVER_MINOR 2 44#define DRIVER_MINOR 2
45#define DRIVER_PATCHLEVEL 0 45#define DRIVER_PATCHLEVEL 1
46 46
47typedef struct drm_mga_primary_buffer { 47typedef struct drm_mga_primary_buffer {
48 u8 *start; 48 u8 *start;
@@ -144,22 +144,22 @@ typedef struct drm_mga_private {
144 drm_local_map_t *primary; 144 drm_local_map_t *primary;
145 drm_local_map_t *agp_textures; 145 drm_local_map_t *agp_textures;
146 146
147 DRM_AGP_MEM *agp_mem; 147 unsigned long agp_handle;
148 unsigned int agp_pages; 148 unsigned int agp_size;
149} drm_mga_private_t; 149} drm_mga_private_t;
150 150
151extern drm_ioctl_desc_t mga_ioctls[]; 151extern drm_ioctl_desc_t mga_ioctls[];
152extern int mga_max_ioctl; 152extern int mga_max_ioctl;
153 153
154 /* mga_dma.c */ 154 /* mga_dma.c */
155extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
156extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); 155extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
157extern int mga_dma_init(DRM_IOCTL_ARGS); 156extern int mga_dma_init(DRM_IOCTL_ARGS);
158extern int mga_dma_flush(DRM_IOCTL_ARGS); 157extern int mga_dma_flush(DRM_IOCTL_ARGS);
159extern int mga_dma_reset(DRM_IOCTL_ARGS); 158extern int mga_dma_reset(DRM_IOCTL_ARGS);
160extern int mga_dma_buffers(DRM_IOCTL_ARGS); 159extern int mga_dma_buffers(DRM_IOCTL_ARGS);
161extern int mga_driver_postcleanup(drm_device_t * dev); 160extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
162extern void mga_driver_pretakedown(drm_device_t * dev); 161extern int mga_driver_unload(drm_device_t * dev);
162extern void mga_driver_lastclose(drm_device_t * dev);
163extern int mga_driver_dma_quiescent(drm_device_t * dev); 163extern int mga_driver_dma_quiescent(drm_device_t * dev);
164 164
165extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); 165extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 47f54b5ae956..2837e669183a 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -1127,19 +1127,19 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
1127} 1127}
1128 1128
1129drm_ioctl_desc_t mga_ioctls[] = { 1129drm_ioctl_desc_t mga_ioctls[] = {
1130 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1}, 1130 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1131 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0}, 1131 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
1132 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0}, 1132 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
1133 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0}, 1133 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
1134 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0}, 1134 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
1135 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0}, 1135 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
1136 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0}, 1136 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
1137 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0}, 1137 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
1138 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0}, 1138 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
1139 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0}, 1139 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
1140 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0}, 1140 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
1141 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0}, 1141 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
1142 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1}, 1142 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1143}; 1143};
1144 1144
1145int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1145int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 7452753d4d01..db5a60450e68 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -1,6 +1,7 @@
1/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- 1/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 * 3 */
4/*
4 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved. 7 * All Rights Reserved.
@@ -559,7 +560,8 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
559 if (dev_priv->is_pci) { 560 if (dev_priv->is_pci) {
560#endif 561#endif
561 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN; 562 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
562 dev_priv->gart_info.addr = dev_priv->gart_info.bus_addr = 0; 563 dev_priv->gart_info.addr = NULL;
564 dev_priv->gart_info.bus_addr = 0;
563 dev_priv->gart_info.is_pcie = 0; 565 dev_priv->gart_info.is_pcie = 0;
564 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 566 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
565 DRM_ERROR("failed to init PCI GART!\n"); 567 DRM_ERROR("failed to init PCI GART!\n");
@@ -601,15 +603,16 @@ int r128_do_cleanup_cce(drm_device_t * dev)
601 drm_core_ioremapfree(dev_priv->cce_ring, dev); 603 drm_core_ioremapfree(dev_priv->cce_ring, dev);
602 if (dev_priv->ring_rptr != NULL) 604 if (dev_priv->ring_rptr != NULL)
603 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 605 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
604 if (dev->agp_buffer_map != NULL) 606 if (dev->agp_buffer_map != NULL) {
605 drm_core_ioremapfree(dev->agp_buffer_map, dev); 607 drm_core_ioremapfree(dev->agp_buffer_map, dev);
608 dev->agp_buffer_map = NULL;
609 }
606 } else 610 } else
607#endif 611#endif
608 { 612 {
609 if (dev_priv->gart_info.bus_addr) 613 if (dev_priv->gart_info.bus_addr)
610 if (!drm_ati_pcigart_cleanup(dev, 614 if (!drm_ati_pcigart_cleanup(dev,
611 &dev_priv-> 615 &dev_priv->gart_info))
612 gart_info))
613 DRM_ERROR 616 DRM_ERROR
614 ("failed to cleanup PCI GART!\n"); 617 ("failed to cleanup PCI GART!\n");
615 } 618 }
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 5ddc03202411..5d835b006f55 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -1,7 +1,7 @@
1/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*- 1/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com 2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 * 3 */
4 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 4/* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
diff --git a/drivers/char/drm/r128_drv.c b/drivers/char/drm/r128_drv.c
index 1661e7351402..e20450ae220e 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/char/drm/r128_drv.c
@@ -37,31 +37,6 @@
37 37
38#include "drm_pciids.h" 38#include "drm_pciids.h"
39 39
40static int postinit(struct drm_device *dev, unsigned long flags)
41{
42 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
43 DRIVER_NAME,
44 DRIVER_MAJOR,
45 DRIVER_MINOR,
46 DRIVER_PATCHLEVEL,
47 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
48 );
49 return 0;
50}
51
52static int version(drm_version_t * version)
53{
54 int len;
55
56 version->version_major = DRIVER_MAJOR;
57 version->version_minor = DRIVER_MINOR;
58 version->version_patchlevel = DRIVER_PATCHLEVEL;
59 DRM_COPY(version->name, DRIVER_NAME);
60 DRM_COPY(version->date, DRIVER_DATE);
61 DRM_COPY(version->desc, DRIVER_DESC);
62 return 0;
63}
64
65static struct pci_device_id pciidlist[] = { 40static struct pci_device_id pciidlist[] = {
66 r128_PCI_IDS 41 r128_PCI_IDS
67}; 42};
@@ -72,8 +47,8 @@ static struct drm_driver driver = {
72 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 47 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
73 DRIVER_IRQ_VBL, 48 DRIVER_IRQ_VBL,
74 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 49 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
75 .prerelease = r128_driver_prerelease, 50 .preclose = r128_driver_preclose,
76 .pretakedown = r128_driver_pretakedown, 51 .lastclose = r128_driver_lastclose,
77 .vblank_wait = r128_driver_vblank_wait, 52 .vblank_wait = r128_driver_vblank_wait,
78 .irq_preinstall = r128_driver_irq_preinstall, 53 .irq_preinstall = r128_driver_irq_preinstall,
79 .irq_postinstall = r128_driver_irq_postinstall, 54 .irq_postinstall = r128_driver_irq_postinstall,
@@ -82,8 +57,6 @@ static struct drm_driver driver = {
82 .reclaim_buffers = drm_core_reclaim_buffers, 57 .reclaim_buffers = drm_core_reclaim_buffers,
83 .get_map_ofs = drm_core_get_map_ofs, 58 .get_map_ofs = drm_core_get_map_ofs,
84 .get_reg_ofs = drm_core_get_reg_ofs, 59 .get_reg_ofs = drm_core_get_reg_ofs,
85 .postinit = postinit,
86 .version = version,
87 .ioctls = r128_ioctls, 60 .ioctls = r128_ioctls,
88 .dma_ioctl = r128_cce_buffers, 61 .dma_ioctl = r128_cce_buffers,
89 .fops = { 62 .fops = {
@@ -97,12 +70,19 @@ static struct drm_driver driver = {
97#ifdef CONFIG_COMPAT 70#ifdef CONFIG_COMPAT
98 .compat_ioctl = r128_compat_ioctl, 71 .compat_ioctl = r128_compat_ioctl,
99#endif 72#endif
100 } 73 },
101 , 74
102 .pci_driver = { 75 .pci_driver = {
103 .name = DRIVER_NAME, 76 .name = DRIVER_NAME,
104 .id_table = pciidlist, 77 .id_table = pciidlist,
105 } 78 },
79
80 .name = DRIVER_NAME,
81 .desc = DRIVER_DESC,
82 .date = DRIVER_DATE,
83 .major = DRIVER_MAJOR,
84 .minor = DRIVER_MINOR,
85 .patchlevel = DRIVER_PATCHLEVEL,
106}; 86};
107 87
108static int __init r128_init(void) 88static int __init r128_init(void)
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 5c79e40eb88f..94abffb2cca5 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -1,7 +1,7 @@
1/* r128_drv.h -- Private header for r128 driver -*- linux-c -*- 1/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
2 * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com 2 * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
3 * 3 */
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 4/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -154,8 +154,8 @@ extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
154extern void r128_driver_irq_preinstall(drm_device_t * dev); 154extern void r128_driver_irq_preinstall(drm_device_t * dev);
155extern void r128_driver_irq_postinstall(drm_device_t * dev); 155extern void r128_driver_irq_postinstall(drm_device_t * dev);
156extern void r128_driver_irq_uninstall(drm_device_t * dev); 156extern void r128_driver_irq_uninstall(drm_device_t * dev);
157extern void r128_driver_pretakedown(drm_device_t * dev); 157extern void r128_driver_lastclose(drm_device_t * dev);
158extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp); 158extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
159 159
160extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, 160extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
161 unsigned long arg); 161 unsigned long arg);
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index 27eb0e31bd3b..87f8ca2b0685 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -1,5 +1,5 @@
1/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- 1/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
2 * 2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 * 4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 14479cc08a57..caeecc2c36da 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -1,7 +1,7 @@
1/* r128_state.c -- State support for r128 -*- linux-c -*- 1/* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com 2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 * 3 */
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4/* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved. 5 * All Rights Reserved.
6 * 6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a 7 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -1674,7 +1674,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
1674 return 0; 1674 return 0;
1675} 1675}
1676 1676
1677void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp) 1677void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
1678{ 1678{
1679 if (dev->dev_private) { 1679 if (dev->dev_private) {
1680 drm_r128_private_t *dev_priv = dev->dev_private; 1680 drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1684,29 +1684,29 @@ void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
1684 } 1684 }
1685} 1685}
1686 1686
1687void r128_driver_pretakedown(drm_device_t * dev) 1687void r128_driver_lastclose(drm_device_t * dev)
1688{ 1688{
1689 r128_do_cleanup_cce(dev); 1689 r128_do_cleanup_cce(dev);
1690} 1690}
1691 1691
1692drm_ioctl_desc_t r128_ioctls[] = { 1692drm_ioctl_desc_t r128_ioctls[] = {
1693 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1}, 1693 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1694 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1}, 1694 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1695 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1}, 1695 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1696 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1}, 1696 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1697 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0}, 1697 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
1698 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0}, 1698 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
1699 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0}, 1699 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
1700 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0}, 1700 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
1701 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0}, 1701 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
1702 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0}, 1702 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
1703 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0}, 1703 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
1704 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0}, 1704 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
1705 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0}, 1705 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
1706 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0}, 1706 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
1707 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0}, 1707 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
1708 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1}, 1708 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1709 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0}, 1709 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
1710}; 1710};
1711 1711
1712int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1712int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 3a1ac5f78b43..291dbf4c8186 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -52,8 +52,8 @@ static const int r300_cliprect_cntl[4] = {
52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command 52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
53 * buffer, starting with index n. 53 * buffer, starting with index n.
54 */ 54 */
55static int r300_emit_cliprects(drm_radeon_private_t * dev_priv, 55static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
56 drm_radeon_kcmd_buffer_t * cmdbuf, int n) 56 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
57{ 57{
58 drm_clip_rect_t box; 58 drm_clip_rect_t box;
59 int nr; 59 int nr;
@@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
216 ADD_RANGE(R300_TX_UNK1_0, 16); 216 ADD_RANGE(R300_TX_UNK1_0, 16);
217 ADD_RANGE(R300_TX_SIZE_0, 16); 217 ADD_RANGE(R300_TX_SIZE_0, 16);
218 ADD_RANGE(R300_TX_FORMAT_0, 16); 218 ADD_RANGE(R300_TX_FORMAT_0, 16);
219 ADD_RANGE(R300_TX_PITCH_0, 16);
219 /* Texture offset is dangerous and needs more checking */ 220 /* Texture offset is dangerous and needs more checking */
220 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); 221 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
221 ADD_RANGE(R300_TX_UNK4_0, 16); 222 ADD_RANGE(R300_TX_UNK4_0, 16);
@@ -242,7 +243,7 @@ static __inline__ int r300_check_range(unsigned reg, int count)
242 243
243 /* we expect offsets passed to the framebuffer to be either within video memory or 244 /* we expect offsets passed to the framebuffer to be either within video memory or
244 within AGP space */ 245 within AGP space */
245static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv, 246static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
246 u32 offset) 247 u32 offset)
247{ 248{
248 /* we realy want to check against end of video aperture 249 /* we realy want to check against end of video aperture
@@ -317,8 +318,8 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
317 * 318 *
318 * Note that checks are performed on contents and addresses of the registers 319 * Note that checks are performed on contents and addresses of the registers
319 */ 320 */
320static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv, 321static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
321 drm_radeon_kcmd_buffer_t * cmdbuf, 322 drm_radeon_kcmd_buffer_t *cmdbuf,
322 drm_r300_cmd_header_t header) 323 drm_r300_cmd_header_t header)
323{ 324{
324 int reg; 325 int reg;
@@ -363,8 +364,8 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
363 * the graphics card. 364 * the graphics card.
364 * Called by r300_do_cp_cmdbuf. 365 * Called by r300_do_cp_cmdbuf.
365 */ 366 */
366static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv, 367static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
367 drm_radeon_kcmd_buffer_t * cmdbuf, 368 drm_radeon_kcmd_buffer_t *cmdbuf,
368 drm_r300_cmd_header_t header) 369 drm_r300_cmd_header_t header)
369{ 370{
370 int sz; 371 int sz;
@@ -400,8 +401,8 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
400 * Emit a clear packet from userspace. 401 * Emit a clear packet from userspace.
401 * Called by r300_emit_packet3. 402 * Called by r300_emit_packet3.
402 */ 403 */
403static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv, 404static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
404 drm_radeon_kcmd_buffer_t * cmdbuf) 405 drm_radeon_kcmd_buffer_t *cmdbuf)
405{ 406{
406 RING_LOCALS; 407 RING_LOCALS;
407 408
@@ -421,8 +422,8 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
421 return 0; 422 return 0;
422} 423}
423 424
424static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv, 425static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
425 drm_radeon_kcmd_buffer_t * cmdbuf, 426 drm_radeon_kcmd_buffer_t *cmdbuf,
426 u32 header) 427 u32 header)
427{ 428{
428 int count, i, k; 429 int count, i, k;
@@ -489,8 +490,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
489 return 0; 490 return 0;
490} 491}
491 492
492static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv, 493static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
493 drm_radeon_kcmd_buffer_t * cmdbuf) 494 drm_radeon_kcmd_buffer_t *cmdbuf)
494{ 495{
495 u32 header; 496 u32 header;
496 int count; 497 int count;
@@ -554,8 +555,8 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
554 * Emit a rendering packet3 from userspace. 555 * Emit a rendering packet3 from userspace.
555 * Called by r300_do_cp_cmdbuf. 556 * Called by r300_do_cp_cmdbuf.
556 */ 557 */
557static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv, 558static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
558 drm_radeon_kcmd_buffer_t * cmdbuf, 559 drm_radeon_kcmd_buffer_t *cmdbuf,
559 drm_r300_cmd_header_t header) 560 drm_r300_cmd_header_t header)
560{ 561{
561 int n; 562 int n;
@@ -623,7 +624,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
623/** 624/**
624 * Emit the sequence to pacify R300. 625 * Emit the sequence to pacify R300.
625 */ 626 */
626static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv) 627static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
627{ 628{
628 RING_LOCALS; 629 RING_LOCALS;
629 630
@@ -657,9 +658,10 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
657 * commands on the DMA ring buffer. 658 * commands on the DMA ring buffer.
658 * Called by the ioctl handler function radeon_cp_cmdbuf. 659 * Called by the ioctl handler function radeon_cp_cmdbuf.
659 */ 660 */
660int r300_do_cp_cmdbuf(drm_device_t * dev, 661int r300_do_cp_cmdbuf(drm_device_t *dev,
661 DRMFILE filp, 662 DRMFILE filp,
662 drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf) 663 drm_file_t *filp_priv,
664 drm_radeon_kcmd_buffer_t *cmdbuf)
663{ 665{
664 drm_radeon_private_t *dev_priv = dev->dev_private; 666 drm_radeon_private_t *dev_priv = dev->dev_private;
665 drm_device_dma_t *dma = dev->dma; 667 drm_device_dma_t *dma = dev->dma;
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index e5b73c002394..a0ed20e25221 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
797 797
798# define R300_TX_FORMAT_YUV_MODE 0x00800000 798# define R300_TX_FORMAT_YUV_MODE 0x00800000
799 799
800#define R300_TX_PITCH_0 0x4500
800#define R300_TX_OFFSET_0 0x4540 801#define R300_TX_OFFSET_0 0x4540
801/* BEGIN: Guess from R200 */ 802/* BEGIN: Guess from R200 */
802# define R300_TXO_ENDIAN_NO_SWAP (0 << 0) 803# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 342302d46743..915665c7fe7c 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -1,5 +1,5 @@
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- 1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2 * 2/*
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * All Rights Reserved. 5 * All Rights Reserved.
@@ -824,7 +824,7 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA); 824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
825} 825}
826 826
827static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr) 827static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
828{ 828{
829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); 829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
830 return RADEON_READ(RADEON_PCIE_DATA); 830 return RADEON_READ(RADEON_PCIE_DATA);
@@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1125 | (dev_priv->fb_location >> 16)); 1125 | (dev_priv->fb_location >> 16));
1126 1126
1127#if __OS_HAS_AGP 1127#if __OS_HAS_AGP
1128 if (!dev_priv->is_pci) { 1128 if (dev_priv->flags & CHIP_IS_AGP) {
1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION, 1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION,
1130 (((dev_priv->gart_vm_start - 1 + 1130 (((dev_priv->gart_vm_start - 1 +
1131 dev_priv->gart_size) & 0xffff0000) | 1131 dev_priv->gart_size) & 0xffff0000) |
@@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1152 dev_priv->ring.tail = cur_read_ptr; 1152 dev_priv->ring.tail = cur_read_ptr;
1153 1153
1154#if __OS_HAS_AGP 1154#if __OS_HAS_AGP
1155 if (!dev_priv->is_pci) { 1155 if (dev_priv->flags & CHIP_IS_AGP) {
1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */ 1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */
1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); 1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, 1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
@@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
1278/* Enable or disable PCI GART on the chip */ 1278/* Enable or disable PCI GART on the chip */
1279static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) 1279static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1280{ 1280{
1281 u32 tmp = RADEON_READ(RADEON_AIC_CNTL); 1281 u32 tmp;
1282 1282
1283 if (dev_priv->flags & CHIP_IS_PCIE) { 1283 if (dev_priv->flags & CHIP_IS_PCIE) {
1284 radeon_set_pciegart(dev_priv, on); 1284 radeon_set_pciegart(dev_priv, on);
1285 return; 1285 return;
1286 } 1286 }
1287 1287
1288 tmp = RADEON_READ(RADEON_AIC_CNTL);
1289
1288 if (on) { 1290 if (on) {
1289 RADEON_WRITE(RADEON_AIC_CNTL, 1291 RADEON_WRITE(RADEON_AIC_CNTL,
1290 tmp | RADEON_PCIGART_TRANSLATE_EN); 1292 tmp | RADEON_PCIGART_TRANSLATE_EN);
@@ -1312,13 +1314,17 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1312static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) 1314static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1313{ 1315{
1314 drm_radeon_private_t *dev_priv = dev->dev_private; 1316 drm_radeon_private_t *dev_priv = dev->dev_private;
1317
1315 DRM_DEBUG("\n"); 1318 DRM_DEBUG("\n");
1316 1319
1317 dev_priv->is_pci = init->is_pci; 1320 if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
1321 {
1322 DRM_DEBUG("Forcing AGP card to PCI mode\n");
1323 dev_priv->flags &= ~CHIP_IS_AGP;
1324 }
1318 1325
1319 if (dev_priv->is_pci && !dev->sg) { 1326 if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
1320 DRM_ERROR("PCI GART memory not allocated!\n"); 1327 DRM_ERROR("PCI GART memory not allocated!\n");
1321 dev->dev_private = (void *)dev_priv;
1322 radeon_do_cleanup_cp(dev); 1328 radeon_do_cleanup_cp(dev);
1323 return DRM_ERR(EINVAL); 1329 return DRM_ERR(EINVAL);
1324 } 1330 }
@@ -1327,12 +1333,11 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1327 if (dev_priv->usec_timeout < 1 || 1333 if (dev_priv->usec_timeout < 1 ||
1328 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1334 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
1329 DRM_DEBUG("TIMEOUT problem!\n"); 1335 DRM_DEBUG("TIMEOUT problem!\n");
1330 dev->dev_private = (void *)dev_priv;
1331 radeon_do_cleanup_cp(dev); 1336 radeon_do_cleanup_cp(dev);
1332 return DRM_ERR(EINVAL); 1337 return DRM_ERR(EINVAL);
1333 } 1338 }
1334 1339
1335 switch (init->func) { 1340 switch(init->func) {
1336 case RADEON_INIT_R200_CP: 1341 case RADEON_INIT_R200_CP:
1337 dev_priv->microcode_version = UCODE_R200; 1342 dev_priv->microcode_version = UCODE_R200;
1338 break; 1343 break;
@@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1353 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 1358 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
1354 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1359 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
1355 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1360 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1356 dev->dev_private = (void *)dev_priv;
1357 radeon_do_cleanup_cp(dev); 1361 radeon_do_cleanup_cp(dev);
1358 return DRM_ERR(EINVAL); 1362 return DRM_ERR(EINVAL);
1359 } 1363 }
@@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1416 1420
1417 DRM_GETSAREA(); 1421 DRM_GETSAREA();
1418 1422
1419 dev_priv->fb_offset = init->fb_offset;
1420 dev_priv->mmio_offset = init->mmio_offset;
1421 dev_priv->ring_offset = init->ring_offset; 1423 dev_priv->ring_offset = init->ring_offset;
1422 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 1424 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
1423 dev_priv->buffers_offset = init->buffers_offset; 1425 dev_priv->buffers_offset = init->buffers_offset;
@@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1425 1427
1426 if (!dev_priv->sarea) { 1428 if (!dev_priv->sarea) {
1427 DRM_ERROR("could not find sarea!\n"); 1429 DRM_ERROR("could not find sarea!\n");
1428 dev->dev_private = (void *)dev_priv;
1429 radeon_do_cleanup_cp(dev); 1430 radeon_do_cleanup_cp(dev);
1430 return DRM_ERR(EINVAL); 1431 return DRM_ERR(EINVAL);
1431 } 1432 }
1432 1433
1433 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1434 if (!dev_priv->mmio) {
1435 DRM_ERROR("could not find mmio region!\n");
1436 dev->dev_private = (void *)dev_priv;
1437 radeon_do_cleanup_cp(dev);
1438 return DRM_ERR(EINVAL);
1439 }
1440 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1434 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
1441 if (!dev_priv->cp_ring) { 1435 if (!dev_priv->cp_ring) {
1442 DRM_ERROR("could not find cp ring region!\n"); 1436 DRM_ERROR("could not find cp ring region!\n");
1443 dev->dev_private = (void *)dev_priv;
1444 radeon_do_cleanup_cp(dev); 1437 radeon_do_cleanup_cp(dev);
1445 return DRM_ERR(EINVAL); 1438 return DRM_ERR(EINVAL);
1446 } 1439 }
1447 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1440 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
1448 if (!dev_priv->ring_rptr) { 1441 if (!dev_priv->ring_rptr) {
1449 DRM_ERROR("could not find ring read pointer!\n"); 1442 DRM_ERROR("could not find ring read pointer!\n");
1450 dev->dev_private = (void *)dev_priv;
1451 radeon_do_cleanup_cp(dev); 1443 radeon_do_cleanup_cp(dev);
1452 return DRM_ERR(EINVAL); 1444 return DRM_ERR(EINVAL);
1453 } 1445 }
@@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1455 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1447 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1456 if (!dev->agp_buffer_map) { 1448 if (!dev->agp_buffer_map) {
1457 DRM_ERROR("could not find dma buffer region!\n"); 1449 DRM_ERROR("could not find dma buffer region!\n");
1458 dev->dev_private = (void *)dev_priv;
1459 radeon_do_cleanup_cp(dev); 1450 radeon_do_cleanup_cp(dev);
1460 return DRM_ERR(EINVAL); 1451 return DRM_ERR(EINVAL);
1461 } 1452 }
@@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1465 drm_core_findmap(dev, init->gart_textures_offset); 1456 drm_core_findmap(dev, init->gart_textures_offset);
1466 if (!dev_priv->gart_textures) { 1457 if (!dev_priv->gart_textures) {
1467 DRM_ERROR("could not find GART texture region!\n"); 1458 DRM_ERROR("could not find GART texture region!\n");
1468 dev->dev_private = (void *)dev_priv;
1469 radeon_do_cleanup_cp(dev); 1459 radeon_do_cleanup_cp(dev);
1470 return DRM_ERR(EINVAL); 1460 return DRM_ERR(EINVAL);
1471 } 1461 }
@@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1476 init->sarea_priv_offset); 1466 init->sarea_priv_offset);
1477 1467
1478#if __OS_HAS_AGP 1468#if __OS_HAS_AGP
1479 if (!dev_priv->is_pci) { 1469 if (dev_priv->flags & CHIP_IS_AGP) {
1480 drm_core_ioremap(dev_priv->cp_ring, dev); 1470 drm_core_ioremap(dev_priv->cp_ring, dev);
1481 drm_core_ioremap(dev_priv->ring_rptr, dev); 1471 drm_core_ioremap(dev_priv->ring_rptr, dev);
1482 drm_core_ioremap(dev->agp_buffer_map, dev); 1472 drm_core_ioremap(dev->agp_buffer_map, dev);
@@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1484 !dev_priv->ring_rptr->handle || 1474 !dev_priv->ring_rptr->handle ||
1485 !dev->agp_buffer_map->handle) { 1475 !dev->agp_buffer_map->handle) {
1486 DRM_ERROR("could not find ioremap agp regions!\n"); 1476 DRM_ERROR("could not find ioremap agp regions!\n");
1487 dev->dev_private = (void *)dev_priv;
1488 radeon_do_cleanup_cp(dev); 1477 radeon_do_cleanup_cp(dev);
1489 return DRM_ERR(EINVAL); 1478 return DRM_ERR(EINVAL);
1490 } 1479 }
@@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1525 + RADEON_READ(RADEON_CONFIG_APER_SIZE); 1514 + RADEON_READ(RADEON_CONFIG_APER_SIZE);
1526 1515
1527#if __OS_HAS_AGP 1516#if __OS_HAS_AGP
1528 if (!dev_priv->is_pci) 1517 if (dev_priv->flags & CHIP_IS_AGP)
1529 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1518 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1530 - dev->agp->base 1519 - dev->agp->base
1531 + dev_priv->gart_vm_start); 1520 + dev_priv->gart_vm_start);
@@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1551 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1540 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1552 1541
1553#if __OS_HAS_AGP 1542#if __OS_HAS_AGP
1554 if (!dev_priv->is_pci) { 1543 if (dev_priv->flags & CHIP_IS_AGP) {
1555 /* Turn off PCI GART */ 1544 /* Turn off PCI GART */
1556 radeon_set_pcigart(dev_priv, 0); 1545 radeon_set_pcigart(dev_priv, 0);
1557 } else 1546 } else
@@ -1561,25 +1550,28 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1561 if (dev_priv->pcigart_offset) { 1550 if (dev_priv->pcigart_offset) {
1562 dev_priv->gart_info.bus_addr = 1551 dev_priv->gart_info.bus_addr =
1563 dev_priv->pcigart_offset + dev_priv->fb_location; 1552 dev_priv->pcigart_offset + dev_priv->fb_location;
1553 dev_priv->gart_info.mapping.offset =
1554 dev_priv->gart_info.bus_addr;
1555 dev_priv->gart_info.mapping.size =
1556 RADEON_PCIGART_TABLE_SIZE;
1557
1558 drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
1564 dev_priv->gart_info.addr = 1559 dev_priv->gart_info.addr =
1565 (unsigned long)drm_ioremap(dev_priv->gart_info. 1560 dev_priv->gart_info.mapping.handle;
1566 bus_addr,
1567 RADEON_PCIGART_TABLE_SIZE,
1568 dev);
1569 1561
1570 dev_priv->gart_info.is_pcie = 1562 dev_priv->gart_info.is_pcie =
1571 !!(dev_priv->flags & CHIP_IS_PCIE); 1563 !!(dev_priv->flags & CHIP_IS_PCIE);
1572 dev_priv->gart_info.gart_table_location = 1564 dev_priv->gart_info.gart_table_location =
1573 DRM_ATI_GART_FB; 1565 DRM_ATI_GART_FB;
1574 1566
1575 DRM_DEBUG("Setting phys_pci_gart to %08lX %08lX\n", 1567 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
1576 dev_priv->gart_info.addr, 1568 dev_priv->gart_info.addr,
1577 dev_priv->pcigart_offset); 1569 dev_priv->pcigart_offset);
1578 } else { 1570 } else {
1579 dev_priv->gart_info.gart_table_location = 1571 dev_priv->gart_info.gart_table_location =
1580 DRM_ATI_GART_MAIN; 1572 DRM_ATI_GART_MAIN;
1581 dev_priv->gart_info.addr = 1573 dev_priv->gart_info.addr = NULL;
1582 dev_priv->gart_info.bus_addr = 0; 1574 dev_priv->gart_info.bus_addr = 0;
1583 if (dev_priv->flags & CHIP_IS_PCIE) { 1575 if (dev_priv->flags & CHIP_IS_PCIE) {
1584 DRM_ERROR 1576 DRM_ERROR
1585 ("Cannot use PCI Express without GART in FB memory\n"); 1577 ("Cannot use PCI Express without GART in FB memory\n");
@@ -1590,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1590 1582
1591 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1583 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1592 DRM_ERROR("failed to init PCI GART!\n"); 1584 DRM_ERROR("failed to init PCI GART!\n");
1593 dev->dev_private = (void *)dev_priv;
1594 radeon_do_cleanup_cp(dev); 1585 radeon_do_cleanup_cp(dev);
1595 return DRM_ERR(ENOMEM); 1586 return DRM_ERR(ENOMEM);
1596 } 1587 }
@@ -1604,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1604 1595
1605 dev_priv->last_buf = 0; 1596 dev_priv->last_buf = 0;
1606 1597
1607 dev->dev_private = (void *)dev_priv;
1608
1609 radeon_do_engine_reset(dev); 1598 radeon_do_engine_reset(dev);
1610 1599
1611 return 0; 1600 return 0;
@@ -1624,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1624 drm_irq_uninstall(dev); 1613 drm_irq_uninstall(dev);
1625 1614
1626#if __OS_HAS_AGP 1615#if __OS_HAS_AGP
1627 if (!dev_priv->is_pci) { 1616 if (dev_priv->flags & CHIP_IS_AGP) {
1628 if (dev_priv->cp_ring != NULL) 1617 if (dev_priv->cp_ring != NULL) {
1629 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1618 drm_core_ioremapfree(dev_priv->cp_ring, dev);
1630 if (dev_priv->ring_rptr != NULL) 1619 dev_priv->cp_ring = NULL;
1620 }
1621 if (dev_priv->ring_rptr != NULL) {
1631 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1622 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
1623 dev_priv->ring_rptr = NULL;
1624 }
1632 if (dev->agp_buffer_map != NULL) { 1625 if (dev->agp_buffer_map != NULL) {
1633 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1626 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1634 dev->agp_buffer_map = NULL; 1627 dev->agp_buffer_map = NULL;
@@ -1636,17 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1636 } else 1629 } else
1637#endif 1630#endif
1638 { 1631 {
1639 if (dev_priv->gart_info.bus_addr) 1632
1633 if (dev_priv->gart_info.bus_addr) {
1634 /* Turn off PCI GART */
1635 radeon_set_pcigart(dev_priv, 0);
1640 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) 1636 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1641 DRM_ERROR("failed to cleanup PCI GART!\n"); 1637 DRM_ERROR("failed to cleanup PCI GART!\n");
1638 }
1642 1639
1643 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1640 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1644 drm_ioremapfree((void *)dev_priv->gart_info.addr, 1641 {
1645 RADEON_PCIGART_TABLE_SIZE, dev); 1642 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1646 dev_priv->gart_info.addr = 0; 1643 dev_priv->gart_info.addr = 0;
1647 } 1644 }
1648 } 1645 }
1649
1650 /* only clear to the start of flags */ 1646 /* only clear to the start of flags */
1651 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1647 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1652 1648
@@ -1672,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
1672 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1668 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1673 1669
1674#if __OS_HAS_AGP 1670#if __OS_HAS_AGP
1675 if (!dev_priv->is_pci) { 1671 if (dev_priv->flags & CHIP_IS_AGP) {
1676 /* Turn off PCI GART */ 1672 /* Turn off PCI GART */
1677 radeon_set_pcigart(dev_priv, 0); 1673 radeon_set_pcigart(dev_priv, 0);
1678 } else 1674 } else
@@ -2103,7 +2099,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
2103 return ret; 2099 return ret;
2104} 2100}
2105 2101
2106int radeon_driver_preinit(struct drm_device *dev, unsigned long flags) 2102int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2107{ 2103{
2108 drm_radeon_private_t *dev_priv; 2104 drm_radeon_private_t *dev_priv;
2109 int ret = 0; 2105 int ret = 0;
@@ -2136,11 +2132,14 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
2136 dev_priv->flags |= CHIP_IS_PCIE; 2132 dev_priv->flags |= CHIP_IS_PCIE;
2137 2133
2138 DRM_DEBUG("%s card detected\n", 2134 DRM_DEBUG("%s card detected\n",
2139 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI")); 2135 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
2140 return ret; 2136 return ret;
2141} 2137}
2142 2138
2143int radeon_presetup(struct drm_device *dev) 2139/* Create mappings for registers and framebuffer so userland doesn't necessarily
2140 * have to find them.
2141 */
2142int radeon_driver_firstopen(struct drm_device *dev)
2144{ 2143{
2145 int ret; 2144 int ret;
2146 drm_local_map_t *map; 2145 drm_local_map_t *map;
@@ -2161,12 +2160,11 @@ int radeon_presetup(struct drm_device *dev)
2161 return 0; 2160 return 0;
2162} 2161}
2163 2162
2164int radeon_driver_postcleanup(struct drm_device *dev) 2163int radeon_driver_unload(struct drm_device *dev)
2165{ 2164{
2166 drm_radeon_private_t *dev_priv = dev->dev_private; 2165 drm_radeon_private_t *dev_priv = dev->dev_private;
2167 2166
2168 DRM_DEBUG("\n"); 2167 DRM_DEBUG("\n");
2169
2170 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2168 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
2171 2169
2172 dev->dev_private = NULL; 2170 dev->dev_private = NULL;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 1cd81a671a36..9c177a6b2a4c 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
624 int discard; 624 int discard;
625} drm_radeon_indirect_t; 625} drm_radeon_indirect_t;
626 626
627/* enum for card type parameters */
628#define RADEON_CARD_PCI 0
629#define RADEON_CARD_AGP 1
630#define RADEON_CARD_PCIE 2
631
627/* 1.3: An ioctl to get parameters that aren't available to the 3d 632/* 1.3: An ioctl to get parameters that aren't available to the 3d
628 * client any other way. 633 * client any other way.
629 */ 634 */
@@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
640#define RADEON_PARAM_SAREA_HANDLE 9 645#define RADEON_PARAM_SAREA_HANDLE 9
641#define RADEON_PARAM_GART_TEX_HANDLE 10 646#define RADEON_PARAM_GART_TEX_HANDLE 10
642#define RADEON_PARAM_SCRATCH_OFFSET 11 647#define RADEON_PARAM_SCRATCH_OFFSET 11
648#define RADEON_PARAM_CARD_TYPE 12
643 649
644typedef struct drm_radeon_getparam { 650typedef struct drm_radeon_getparam {
645 int param; 651 int param;
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index ee49670d8162..b04ed1b562b9 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -42,29 +42,15 @@ int radeon_no_wb;
42MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); 42MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
43module_param_named(no_wb, radeon_no_wb, int, 0444); 43module_param_named(no_wb, radeon_no_wb, int, 0444);
44 44
45static int postinit(struct drm_device *dev, unsigned long flags) 45static int dri_library_name(struct drm_device *dev, char *buf)
46{ 46{
47 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 47 drm_radeon_private_t *dev_priv = dev->dev_private;
48 DRIVER_NAME, 48 int family = dev_priv->flags & CHIP_FAMILY_MASK;
49 DRIVER_MAJOR,
50 DRIVER_MINOR,
51 DRIVER_PATCHLEVEL,
52 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
53 );
54 return 0;
55}
56
57static int version(drm_version_t * version)
58{
59 int len;
60 49
61 version->version_major = DRIVER_MAJOR; 50 return snprintf(buf, PAGE_SIZE, "%s\n",
62 version->version_minor = DRIVER_MINOR; 51 (family < CHIP_R200) ? "radeon" :
63 version->version_patchlevel = DRIVER_PATCHLEVEL; 52 ((family < CHIP_R300) ? "r200" :
64 DRM_COPY(version->name, DRIVER_NAME); 53 "r300"));
65 DRM_COPY(version->date, DRIVER_DATE);
66 DRM_COPY(version->desc, DRIVER_DESC);
67 return 0;
68} 54}
69 55
70static struct pci_device_id pciidlist[] = { 56static struct pci_device_id pciidlist[] = {
@@ -77,23 +63,22 @@ static struct drm_driver driver = {
77 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 63 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
78 DRIVER_IRQ_VBL, 64 DRIVER_IRQ_VBL,
79 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 65 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
80 .preinit = radeon_driver_preinit, 66 .load = radeon_driver_load,
81 .presetup = radeon_presetup, 67 .firstopen = radeon_driver_firstopen,
82 .postcleanup = radeon_driver_postcleanup, 68 .open = radeon_driver_open,
83 .prerelease = radeon_driver_prerelease, 69 .preclose = radeon_driver_preclose,
84 .pretakedown = radeon_driver_pretakedown, 70 .postclose = radeon_driver_postclose,
85 .open_helper = radeon_driver_open_helper, 71 .lastclose = radeon_driver_lastclose,
72 .unload = radeon_driver_unload,
86 .vblank_wait = radeon_driver_vblank_wait, 73 .vblank_wait = radeon_driver_vblank_wait,
74 .dri_library_name = dri_library_name,
87 .irq_preinstall = radeon_driver_irq_preinstall, 75 .irq_preinstall = radeon_driver_irq_preinstall,
88 .irq_postinstall = radeon_driver_irq_postinstall, 76 .irq_postinstall = radeon_driver_irq_postinstall,
89 .irq_uninstall = radeon_driver_irq_uninstall, 77 .irq_uninstall = radeon_driver_irq_uninstall,
90 .irq_handler = radeon_driver_irq_handler, 78 .irq_handler = radeon_driver_irq_handler,
91 .free_filp_priv = radeon_driver_free_filp_priv,
92 .reclaim_buffers = drm_core_reclaim_buffers, 79 .reclaim_buffers = drm_core_reclaim_buffers,
93 .get_map_ofs = drm_core_get_map_ofs, 80 .get_map_ofs = drm_core_get_map_ofs,
94 .get_reg_ofs = drm_core_get_reg_ofs, 81 .get_reg_ofs = drm_core_get_reg_ofs,
95 .postinit = postinit,
96 .version = version,
97 .ioctls = radeon_ioctls, 82 .ioctls = radeon_ioctls,
98 .dma_ioctl = radeon_cp_buffers, 83 .dma_ioctl = radeon_cp_buffers,
99 .fops = { 84 .fops = {
@@ -107,12 +92,19 @@ static struct drm_driver driver = {
107#ifdef CONFIG_COMPAT 92#ifdef CONFIG_COMPAT
108 .compat_ioctl = radeon_compat_ioctl, 93 .compat_ioctl = radeon_compat_ioctl,
109#endif 94#endif
110 } 95 },
111 , 96
112 .pci_driver = { 97 .pci_driver = {
113 .name = DRIVER_NAME, 98 .name = DRIVER_NAME,
114 .id_table = pciidlist, 99 .id_table = pciidlist,
115 } 100 },
101
102 .name = DRIVER_NAME,
103 .desc = DRIVER_DESC,
104 .date = DRIVER_DATE,
105 .major = DRIVER_MAJOR,
106 .minor = DRIVER_MINOR,
107 .patchlevel = DRIVER_PATCHLEVEL,
116}; 108};
117 109
118static int __init radeon_init(void) 110static int __init radeon_init(void)
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index d92ccee3e54c..498b19b1d641 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -38,7 +38,7 @@
38 38
39#define DRIVER_NAME "radeon" 39#define DRIVER_NAME "radeon"
40#define DRIVER_DESC "ATI Radeon" 40#define DRIVER_DESC "ATI Radeon"
41#define DRIVER_DATE "20050911" 41#define DRIVER_DATE "20051229"
42 42
43/* Interface history: 43/* Interface history:
44 * 44 *
@@ -73,7 +73,7 @@
73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color 73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
74 * and GL_EXT_blend_[func|equation]_separate on r200 74 * and GL_EXT_blend_[func|equation]_separate on r200
75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300
76 * (No 3D support yet - just microcode loading) 76 * (No 3D support yet - just microcode loading).
77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters 77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
78 * - Add hyperz support, add hyperz flags to clear ioctl. 78 * - Add hyperz support, add hyperz flags to clear ioctl.
79 * 1.14- Add support for color tiling 79 * 1.14- Add support for color tiling
@@ -88,14 +88,13 @@
88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR 88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) 89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
90 * 1.19- Add support for gart table in FB memory and PCIE r300 90 * 1.19- Add support for gart table in FB memory and PCIE r300
91 * 1.20- Add support for r300 texrect
92 * 1.21- Add support for card type getparam
91 */ 93 */
92#define DRIVER_MAJOR 1 94#define DRIVER_MAJOR 1
93#define DRIVER_MINOR 19 95#define DRIVER_MINOR 21
94#define DRIVER_PATCHLEVEL 0 96#define DRIVER_PATCHLEVEL 0
95 97
96#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
97#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
98
99/* 98/*
100 * Radeon chip families 99 * Radeon chip families
101 */ 100 */
@@ -103,8 +102,8 @@ enum radeon_family {
103 CHIP_R100, 102 CHIP_R100,
104 CHIP_RS100, 103 CHIP_RS100,
105 CHIP_RV100, 104 CHIP_RV100,
106 CHIP_R200,
107 CHIP_RV200, 105 CHIP_RV200,
106 CHIP_R200,
108 CHIP_RS200, 107 CHIP_RS200,
109 CHIP_R250, 108 CHIP_R250,
110 CHIP_RS250, 109 CHIP_RS250,
@@ -138,6 +137,9 @@ enum radeon_chip_flags {
138 CHIP_IS_PCIE = 0x00200000UL, 137 CHIP_IS_PCIE = 0x00200000UL,
139}; 138};
140 139
140#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
141#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
142
141typedef struct drm_radeon_freelist { 143typedef struct drm_radeon_freelist {
142 unsigned int age; 144 unsigned int age;
143 drm_buf_t *buf; 145 drm_buf_t *buf;
@@ -245,8 +247,6 @@ typedef struct drm_radeon_private {
245 247
246 drm_radeon_depth_clear_t depth_clear; 248 drm_radeon_depth_clear_t depth_clear;
247 249
248 unsigned long fb_offset;
249 unsigned long mmio_offset;
250 unsigned long ring_offset; 250 unsigned long ring_offset;
251 unsigned long ring_rptr_offset; 251 unsigned long ring_rptr_offset;
252 unsigned long buffers_offset; 252 unsigned long buffers_offset;
@@ -273,7 +273,6 @@ typedef struct drm_radeon_private {
273 273
274 /* starting from here on, data is preserved accross an open */ 274 /* starting from here on, data is preserved accross an open */
275 uint32_t flags; /* see radeon_chip_flags */ 275 uint32_t flags; /* see radeon_chip_flags */
276 int is_pci;
277} drm_radeon_private_t; 276} drm_radeon_private_t;
278 277
279typedef struct drm_radeon_buf_priv { 278typedef struct drm_radeon_buf_priv {
@@ -330,17 +329,14 @@ extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
330extern void radeon_driver_irq_preinstall(drm_device_t * dev); 329extern void radeon_driver_irq_preinstall(drm_device_t * dev);
331extern void radeon_driver_irq_postinstall(drm_device_t * dev); 330extern void radeon_driver_irq_postinstall(drm_device_t * dev);
332extern void radeon_driver_irq_uninstall(drm_device_t * dev); 331extern void radeon_driver_irq_uninstall(drm_device_t * dev);
333extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
334extern void radeon_driver_pretakedown(drm_device_t * dev);
335extern int radeon_driver_open_helper(drm_device_t * dev,
336 drm_file_t * filp_priv);
337extern void radeon_driver_free_filp_priv(drm_device_t * dev,
338 drm_file_t * filp_priv);
339
340extern int radeon_preinit(struct drm_device *dev, unsigned long flags);
341extern int radeon_postinit(struct drm_device *dev, unsigned long flags);
342extern int radeon_postcleanup(struct drm_device *dev);
343 332
333extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
334extern int radeon_driver_unload(struct drm_device *dev);
335extern int radeon_driver_firstopen(struct drm_device *dev);
336extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
337extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
338extern void radeon_driver_lastclose(drm_device_t * dev);
339extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
344extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 340extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
345 unsigned long arg); 341 unsigned long arg);
346 342
@@ -364,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
364 */ 360 */
365 361
366#define RADEON_AGP_COMMAND 0x0f60 362#define RADEON_AGP_COMMAND 0x0f60
363#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
364# define RADEON_AGP_ENABLE (1<<8)
367#define RADEON_AUX_SCISSOR_CNTL 0x26f0 365#define RADEON_AUX_SCISSOR_CNTL 0x26f0
368# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) 366# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
369# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) 367# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
@@ -651,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
651 649
652#define RADEON_WAIT_UNTIL 0x1720 650#define RADEON_WAIT_UNTIL 0x1720
653# define RADEON_WAIT_CRTC_PFLIP (1 << 0) 651# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
652# define RADEON_WAIT_2D_IDLE (1 << 14)
653# define RADEON_WAIT_3D_IDLE (1 << 15)
654# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) 654# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
655# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) 655# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
656# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) 656# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
@@ -1105,7 +1105,6 @@ do { \
1105 write = 0; \ 1105 write = 0; \
1106 _tab += _i; \ 1106 _tab += _i; \
1107 } \ 1107 } \
1108 \
1109 while (_size > 0) { \ 1108 while (_size > 0) { \
1110 *(ring + write) = *_tab++; \ 1109 *(ring + write) = *_tab++; \
1111 write++; \ 1110 write++; \
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 231ac1438c69..7bc27516d425 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1,5 +1,5 @@
1/* radeon_state.c -- State support for Radeon -*- linux-c -*- 1/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2 * 2/*
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
@@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
72 72
73 case RADEON_EMIT_PP_MISC: 73 case RADEON_EMIT_PP_MISC:
74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
75 &data[(RADEON_RB3D_DEPTHOFFSET 75 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
76 -
77 RADEON_PP_MISC) /
78 4])) {
79 DRM_ERROR("Invalid depth buffer offset\n"); 76 DRM_ERROR("Invalid depth buffer offset\n");
80 return DRM_ERR(EINVAL); 77 return DRM_ERR(EINVAL);
81 } 78 }
@@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
83 80
84 case RADEON_EMIT_PP_CNTL: 81 case RADEON_EMIT_PP_CNTL:
85 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 82 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
86 &data[(RADEON_RB3D_COLOROFFSET 83 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
87 -
88 RADEON_PP_CNTL) /
89 4])) {
90 DRM_ERROR("Invalid colour buffer offset\n"); 84 DRM_ERROR("Invalid colour buffer offset\n");
91 return DRM_ERR(EINVAL); 85 return DRM_ERR(EINVAL);
92 } 86 }
@@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
109 case RADEON_EMIT_PP_TXFILTER_1: 103 case RADEON_EMIT_PP_TXFILTER_1:
110 case RADEON_EMIT_PP_TXFILTER_2: 104 case RADEON_EMIT_PP_TXFILTER_2:
111 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 105 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
112 &data[(RADEON_PP_TXOFFSET_0 106 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
113 -
114 RADEON_PP_TXFILTER_0) /
115 4])) {
116 DRM_ERROR("Invalid R100 texture offset\n"); 107 DRM_ERROR("Invalid R100 texture offset\n");
117 return DRM_ERR(EINVAL); 108 return DRM_ERR(EINVAL);
118 } 109 }
@@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
126 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 117 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
127 int i; 118 int i;
128 for (i = 0; i < 5; i++) { 119 for (i = 0; i < 5; i++) {
129 if (radeon_check_and_fixup_offset 120 if (radeon_check_and_fixup_offset(dev_priv,
130 (dev_priv, filp_priv, &data[i])) { 121 filp_priv,
122 &data[i])) {
131 DRM_ERROR 123 DRM_ERROR
132 ("Invalid R200 cubic texture offset\n"); 124 ("Invalid R200 cubic texture offset\n");
133 return DRM_ERR(EINVAL); 125 return DRM_ERR(EINVAL);
@@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
239 231
240static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 232static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
241 dev_priv, 233 dev_priv,
242 drm_file_t * filp_priv, 234 drm_file_t *filp_priv,
243 drm_radeon_kcmd_buffer_t *cmdbuf, 235 drm_radeon_kcmd_buffer_t *
236 cmdbuf,
244 unsigned int *cmdsz) 237 unsigned int *cmdsz)
245{ 238{
246 u32 *cmd = (u32 *) cmdbuf->buf; 239 u32 *cmd = (u32 *) cmdbuf->buf;
@@ -555,7 +548,8 @@ static struct {
555 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, 548 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
556 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, 549 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
557 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, 550 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
558 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, 551 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
552 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
559 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, 553 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
560 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, 554 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
561 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, 555 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@@ -569,7 +563,7 @@ static struct {
569 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, 563 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
570 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, 564 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
571 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ 565 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
572 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ 566 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
573 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, 567 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
574 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, 568 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
575 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, 569 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
@@ -592,7 +586,7 @@ static struct {
592 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, 586 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
593 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, 587 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
594 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, 588 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
595 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ 589 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
596 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, 590 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
597 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, 591 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
598 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, 592 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
@@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
985 * rendering a quad into just those buffers. Thus, we have to 979 * rendering a quad into just those buffers. Thus, we have to
986 * make sure the 3D engine is configured correctly. 980 * make sure the 3D engine is configured correctly.
987 */ 981 */
988 if ((dev_priv->microcode_version == UCODE_R200) && 982 else if ((dev_priv->microcode_version == UCODE_R200) &&
989 (flags & (RADEON_DEPTH | RADEON_STENCIL))) { 983 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
990 984
991 int tempPP_CNTL; 985 int tempPP_CNTL;
992 int tempRE_CNTL; 986 int tempRE_CNTL;
@@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1637 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 1631 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1638 dwords = size / 4; 1632 dwords = size / 4;
1639 1633
1634#define RADEON_COPY_MT(_buf, _data, _width) \
1635 do { \
1636 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1637 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1638 return DRM_ERR(EFAULT); \
1639 } \
1640 } while(0)
1641
1640 if (microtile) { 1642 if (microtile) {
1641 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1643 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1642 however, we cannot use blitter directly for texture width < 64 bytes, 1644 however, we cannot use blitter directly for texture width < 64 bytes,
@@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1648 from user space. */ 1650 from user space. */
1649 if (tex->height == 1) { 1651 if (tex->height == 1) {
1650 if (tex_width >= 64 || tex_width <= 16) { 1652 if (tex_width >= 64 || tex_width <= 16) {
1651 if (DRM_COPY_FROM_USER(buffer, data, 1653 RADEON_COPY_MT(buffer, data,
1652 tex_width * 1654 (int)(tex_width * sizeof(u32)));
1653 sizeof(u32))) {
1654 DRM_ERROR
1655 ("EFAULT on pad, %d bytes\n",
1656 tex_width);
1657 return DRM_ERR(EFAULT);
1658 }
1659 } else if (tex_width == 32) { 1655 } else if (tex_width == 32) {
1660 if (DRM_COPY_FROM_USER 1656 RADEON_COPY_MT(buffer, data, 16);
1661 (buffer, data, 16)) { 1657 RADEON_COPY_MT(buffer + 8,
1662 DRM_ERROR 1658 data + 16, 16);
1663 ("EFAULT on pad, %d bytes\n",
1664 tex_width);
1665 return DRM_ERR(EFAULT);
1666 }
1667 if (DRM_COPY_FROM_USER
1668 (buffer + 8, data + 16, 16)) {
1669 DRM_ERROR
1670 ("EFAULT on pad, %d bytes\n",
1671 tex_width);
1672 return DRM_ERR(EFAULT);
1673 }
1674 } 1659 }
1675 } else if (tex_width >= 64 || tex_width == 16) { 1660 } else if (tex_width >= 64 || tex_width == 16) {
1676 if (DRM_COPY_FROM_USER(buffer, data, 1661 RADEON_COPY_MT(buffer, data,
1677 dwords * sizeof(u32))) { 1662 (int)(dwords * sizeof(u32)));
1678 DRM_ERROR("EFAULT on data, %d dwords\n",
1679 dwords);
1680 return DRM_ERR(EFAULT);
1681 }
1682 } else if (tex_width < 16) { 1663 } else if (tex_width < 16) {
1683 for (i = 0; i < tex->height; i++) { 1664 for (i = 0; i < tex->height; i++) {
1684 if (DRM_COPY_FROM_USER 1665 RADEON_COPY_MT(buffer, data, tex_width);
1685 (buffer, data, tex_width)) {
1686 DRM_ERROR
1687 ("EFAULT on pad, %d bytes\n",
1688 tex_width);
1689 return DRM_ERR(EFAULT);
1690 }
1691 buffer += 4; 1666 buffer += 4;
1692 data += tex_width; 1667 data += tex_width;
1693 } 1668 }
@@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1695 /* TODO: make sure this works when not fitting in one buffer 1670 /* TODO: make sure this works when not fitting in one buffer
1696 (i.e. 32bytes x 2048...) */ 1671 (i.e. 32bytes x 2048...) */
1697 for (i = 0; i < tex->height; i += 2) { 1672 for (i = 0; i < tex->height; i += 2) {
1698 if (DRM_COPY_FROM_USER 1673 RADEON_COPY_MT(buffer, data, 16);
1699 (buffer, data, 16)) {
1700 DRM_ERROR
1701 ("EFAULT on pad, %d bytes\n",
1702 tex_width);
1703 return DRM_ERR(EFAULT);
1704 }
1705 data += 16; 1674 data += 16;
1706 if (DRM_COPY_FROM_USER 1675 RADEON_COPY_MT(buffer + 8, data, 16);
1707 (buffer + 8, data, 16)) {
1708 DRM_ERROR
1709 ("EFAULT on pad, %d bytes\n",
1710 tex_width);
1711 return DRM_ERR(EFAULT);
1712 }
1713 data += 16; 1676 data += 16;
1714 if (DRM_COPY_FROM_USER 1677 RADEON_COPY_MT(buffer + 4, data, 16);
1715 (buffer + 4, data, 16)) {
1716 DRM_ERROR
1717 ("EFAULT on pad, %d bytes\n",
1718 tex_width);
1719 return DRM_ERR(EFAULT);
1720 }
1721 data += 16; 1678 data += 16;
1722 if (DRM_COPY_FROM_USER 1679 RADEON_COPY_MT(buffer + 12, data, 16);
1723 (buffer + 12, data, 16)) {
1724 DRM_ERROR
1725 ("EFAULT on pad, %d bytes\n",
1726 tex_width);
1727 return DRM_ERR(EFAULT);
1728 }
1729 data += 16; 1680 data += 16;
1730 buffer += 16; 1681 buffer += 16;
1731 } 1682 }
@@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1735 /* Texture image width is larger than the minimum, so we 1686 /* Texture image width is larger than the minimum, so we
1736 * can upload it directly. 1687 * can upload it directly.
1737 */ 1688 */
1738 if (DRM_COPY_FROM_USER(buffer, data, 1689 RADEON_COPY_MT(buffer, data,
1739 dwords * sizeof(u32))) { 1690 (int)(dwords * sizeof(u32)));
1740 DRM_ERROR("EFAULT on data, %d dwords\n",
1741 dwords);
1742 return DRM_ERR(EFAULT);
1743 }
1744 } else { 1691 } else {
1745 /* Texture image width is less than the minimum, so we 1692 /* Texture image width is less than the minimum, so we
1746 * need to pad out each image scanline to the minimum 1693 * need to pad out each image scanline to the minimum
1747 * width. 1694 * width.
1748 */ 1695 */
1749 for (i = 0; i < tex->height; i++) { 1696 for (i = 0; i < tex->height; i++) {
1750 if (DRM_COPY_FROM_USER 1697 RADEON_COPY_MT(buffer, data, tex_width);
1751 (buffer, data, tex_width)) {
1752 DRM_ERROR
1753 ("EFAULT on pad, %d bytes\n",
1754 tex_width);
1755 return DRM_ERR(EFAULT);
1756 }
1757 buffer += 8; 1698 buffer += 8;
1758 data += tex_width; 1699 data += tex_width;
1759 } 1700 }
1760 } 1701 }
1761 } 1702 }
1762 1703
1704#undef RADEON_COPY_MT
1763 buf->filp = filp; 1705 buf->filp = filp;
1764 buf->used = size; 1706 buf->used = size;
1765 offset = dev_priv->gart_buffers_offset + buf->offset; 1707 offset = dev_priv->gart_buffers_offset + buf->offset;
@@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1821} 1763}
1822 1764
1823static void radeon_apply_surface_regs(int surf_index, 1765static void radeon_apply_surface_regs(int surf_index,
1824 drm_radeon_private_t * dev_priv) 1766 drm_radeon_private_t *dev_priv)
1825{ 1767{
1826 if (!dev_priv->mmio) 1768 if (!dev_priv->mmio)
1827 return; 1769 return;
@@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
1847 * freed, we suddenly need two surfaces to store A and C, which might 1789 * freed, we suddenly need two surfaces to store A and C, which might
1848 * not always be available. 1790 * not always be available.
1849 */ 1791 */
1850static int alloc_surface(drm_radeon_surface_alloc_t * new, 1792static int alloc_surface(drm_radeon_surface_alloc_t *new,
1851 drm_radeon_private_t * dev_priv, DRMFILE filp) 1793 drm_radeon_private_t *dev_priv, DRMFILE filp)
1852{ 1794{
1853 struct radeon_virt_surface *s; 1795 struct radeon_virt_surface *s;
1854 int i; 1796 int i;
@@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2158 2100
2159 LOCK_TEST_WITH_RETURN(dev, filp); 2101 LOCK_TEST_WITH_RETURN(dev, filp);
2160 2102
2103 if (!dev_priv) {
2104 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2105 return DRM_ERR(EINVAL);
2106 }
2107
2161 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 2108 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2162 2109
2163 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, 2110 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2596 return 0; 2543 return 0;
2597} 2544}
2598 2545
2599static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, 2546static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2600 drm_radeon_cmd_header_t header, 2547 drm_radeon_cmd_header_t header,
2601 drm_radeon_kcmd_buffer_t * cmdbuf) 2548 drm_radeon_kcmd_buffer_t *cmdbuf)
2602{ 2549{
2603 int sz = header.scalars.count; 2550 int sz = header.scalars.count;
2604 int start = header.scalars.offset; 2551 int start = header.scalars.offset;
@@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
2618 2565
2619/* God this is ugly 2566/* God this is ugly
2620 */ 2567 */
2621static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, 2568static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2622 drm_radeon_cmd_header_t header, 2569 drm_radeon_cmd_header_t header,
2623 drm_radeon_kcmd_buffer_t * cmdbuf) 2570 drm_radeon_kcmd_buffer_t *cmdbuf)
2624{ 2571{
2625 int sz = header.scalars.count; 2572 int sz = header.scalars.count;
2626 int start = ((unsigned int)header.scalars.offset) + 0x100; 2573 int start = ((unsigned int)header.scalars.offset) + 0x100;
@@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
2638 return 0; 2585 return 0;
2639} 2586}
2640 2587
2641static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv, 2588static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2642 drm_radeon_cmd_header_t header, 2589 drm_radeon_cmd_header_t header,
2643 drm_radeon_kcmd_buffer_t * cmdbuf) 2590 drm_radeon_kcmd_buffer_t *cmdbuf)
2644{ 2591{
2645 int sz = header.vectors.count; 2592 int sz = header.vectors.count;
2646 int start = header.vectors.offset; 2593 int start = header.vectors.offset;
@@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
2685 return 0; 2632 return 0;
2686} 2633}
2687 2634
2688static int radeon_emit_packet3_cliprect(drm_device_t * dev, 2635static int radeon_emit_packet3_cliprect(drm_device_t *dev,
2689 drm_file_t * filp_priv, 2636 drm_file_t *filp_priv,
2690 drm_radeon_kcmd_buffer_t *cmdbuf, 2637 drm_radeon_kcmd_buffer_t *cmdbuf,
2691 int orig_nbox) 2638 int orig_nbox)
2692{ 2639{
@@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2818 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2765 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
2819 if (kbuf == NULL) 2766 if (kbuf == NULL)
2820 return DRM_ERR(ENOMEM); 2767 return DRM_ERR(ENOMEM);
2821 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { 2768 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
2769 cmdbuf.bufsz)) {
2822 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2770 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2823 return DRM_ERR(EFAULT); 2771 return DRM_ERR(EFAULT);
2824 } 2772 }
@@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
2981 value = dev_priv->gart_vm_start; 2929 value = dev_priv->gart_vm_start;
2982 break; 2930 break;
2983 case RADEON_PARAM_REGISTER_HANDLE: 2931 case RADEON_PARAM_REGISTER_HANDLE:
2984 value = dev_priv->mmio_offset; 2932 value = dev_priv->mmio->offset;
2985 break; 2933 break;
2986 case RADEON_PARAM_STATUS_HANDLE: 2934 case RADEON_PARAM_STATUS_HANDLE:
2987 value = dev_priv->ring_rptr_offset; 2935 value = dev_priv->ring_rptr_offset;
@@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3004 case RADEON_PARAM_GART_TEX_HANDLE: 2952 case RADEON_PARAM_GART_TEX_HANDLE:
3005 value = dev_priv->gart_textures_offset; 2953 value = dev_priv->gart_textures_offset;
3006 break; 2954 break;
2955
2956 case RADEON_PARAM_CARD_TYPE:
2957 if (dev_priv->flags & CHIP_IS_PCIE)
2958 value = RADEON_CARD_PCIE;
2959 else if (dev_priv->flags & CHIP_IS_AGP)
2960 value = RADEON_CARD_AGP;
2961 else
2962 value = RADEON_CARD_PCI;
2963 break;
3007 default: 2964 default:
3008 return DRM_ERR(EINVAL); 2965 return DRM_ERR(EINVAL);
3009 } 2966 }
@@ -3066,10 +3023,11 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3066/* When a client dies: 3023/* When a client dies:
3067 * - Check for and clean up flipped page state 3024 * - Check for and clean up flipped page state
3068 * - Free any alloced GART memory. 3025 * - Free any alloced GART memory.
3026 * - Free any alloced radeon surfaces.
3069 * 3027 *
3070 * DRM infrastructure takes care of reclaiming dma buffers. 3028 * DRM infrastructure takes care of reclaiming dma buffers.
3071 */ 3029 */
3072void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp) 3030void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
3073{ 3031{
3074 if (dev->dev_private) { 3032 if (dev->dev_private) {
3075 drm_radeon_private_t *dev_priv = dev->dev_private; 3033 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3082,16 +3040,17 @@ void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
3082 } 3040 }
3083} 3041}
3084 3042
3085void radeon_driver_pretakedown(drm_device_t * dev) 3043void radeon_driver_lastclose(drm_device_t * dev)
3086{ 3044{
3087 radeon_do_release(dev); 3045 radeon_do_release(dev);
3088} 3046}
3089 3047
3090int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv) 3048int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3091{ 3049{
3092 drm_radeon_private_t *dev_priv = dev->dev_private; 3050 drm_radeon_private_t *dev_priv = dev->dev_private;
3093 struct drm_radeon_driver_file_fields *radeon_priv; 3051 struct drm_radeon_driver_file_fields *radeon_priv;
3094 3052
3053 DRM_DEBUG("\n");
3095 radeon_priv = 3054 radeon_priv =
3096 (struct drm_radeon_driver_file_fields *) 3055 (struct drm_radeon_driver_file_fields *)
3097 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); 3056 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@@ -3100,6 +3059,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
3100 return -ENOMEM; 3059 return -ENOMEM;
3101 3060
3102 filp_priv->driver_priv = radeon_priv; 3061 filp_priv->driver_priv = radeon_priv;
3062
3103 if (dev_priv) 3063 if (dev_priv)
3104 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3064 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3105 else 3065 else
@@ -3107,7 +3067,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
3107 return 0; 3067 return 0;
3108} 3068}
3109 3069
3110void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv) 3070void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
3111{ 3071{
3112 struct drm_radeon_driver_file_fields *radeon_priv = 3072 struct drm_radeon_driver_file_fields *radeon_priv =
3113 filp_priv->driver_priv; 3073 filp_priv->driver_priv;
@@ -3116,33 +3076,33 @@ void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
3116} 3076}
3117 3077
3118drm_ioctl_desc_t radeon_ioctls[] = { 3078drm_ioctl_desc_t radeon_ioctls[] = {
3119 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1}, 3079 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3120 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1}, 3080 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3121 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1}, 3081 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3122 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1}, 3082 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3123 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0}, 3083 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
3124 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0}, 3084 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
3125 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0}, 3085 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
3126 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0}, 3086 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
3127 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0}, 3087 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
3128 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0}, 3088 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
3129 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0}, 3089 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
3130 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0}, 3090 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
3131 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0}, 3091 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
3132 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0}, 3092 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
3133 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1}, 3093 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3134 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0}, 3094 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
3135 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0}, 3095 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
3136 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0}, 3096 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
3137 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0}, 3097 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
3138 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0}, 3098 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
3139 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0}, 3099 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
3140 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1}, 3100 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3141 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0}, 3101 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
3142 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0}, 3102 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
3143 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0}, 3103 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
3144 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0}, 3104 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
3145 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0} 3105 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
3146}; 3106};
3147 3107
3148int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3108int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 6d10515795cc..0d426deeefec 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -533,16 +533,32 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
533 dev_priv->first_dma_page = dev_priv->current_dma_page = 0; 533 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
534} 534}
535 535
536int savage_driver_load(drm_device_t *dev, unsigned long chipset)
537{
538 drm_savage_private_t *dev_priv;
539
540 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
541 if (dev_priv == NULL)
542 return DRM_ERR(ENOMEM);
543
544 memset(dev_priv, 0, sizeof(drm_savage_private_t));
545 dev->dev_private = (void *)dev_priv;
546
547 dev_priv->chipset = (enum savage_family)chipset;
548
549 return 0;
550}
551
552
536/* 553/*
537 * Initalize mappings. On Savage4 and SavageIX the alignment 554 * Initalize mappings. On Savage4 and SavageIX the alignment
538 * and size of the aperture is not suitable for automatic MTRR setup 555 * and size of the aperture is not suitable for automatic MTRR setup
539 * in drm_addmap. Therefore we do it manually before the maps are 556 * in drm_addmap. Therefore we add them manually before the maps are
540 * initialized. We also need to take care of deleting the MTRRs in 557 * initialized, and tear them down on last close.
541 * postcleanup.
542 */ 558 */
543int savage_preinit(drm_device_t * dev, unsigned long chipset) 559int savage_driver_firstopen(drm_device_t *dev)
544{ 560{
545 drm_savage_private_t *dev_priv; 561 drm_savage_private_t *dev_priv = dev->dev_private;
546 unsigned long mmio_base, fb_base, fb_size, aperture_base; 562 unsigned long mmio_base, fb_base, fb_size, aperture_base;
547 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist 563 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
548 * in case we decide we need information on the BAR for BSD in the 564 * in case we decide we need information on the BAR for BSD in the
@@ -551,14 +567,6 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
551 unsigned int fb_rsrc, aper_rsrc; 567 unsigned int fb_rsrc, aper_rsrc;
552 int ret = 0; 568 int ret = 0;
553 569
554 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
555 if (dev_priv == NULL)
556 return DRM_ERR(ENOMEM);
557
558 memset(dev_priv, 0, sizeof(drm_savage_private_t));
559 dev->dev_private = (void *)dev_priv;
560 dev_priv->chipset = (enum savage_family)chipset;
561
562 dev_priv->mtrr[0].handle = -1; 570 dev_priv->mtrr[0].handle = -1;
563 dev_priv->mtrr[1].handle = -1; 571 dev_priv->mtrr[1].handle = -1;
564 dev_priv->mtrr[2].handle = -1; 572 dev_priv->mtrr[2].handle = -1;
@@ -576,26 +584,24 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
576 dev_priv->mtrr[0].base = fb_base; 584 dev_priv->mtrr[0].base = fb_base;
577 dev_priv->mtrr[0].size = 0x01000000; 585 dev_priv->mtrr[0].size = 0x01000000;
578 dev_priv->mtrr[0].handle = 586 dev_priv->mtrr[0].handle =
579 mtrr_add(dev_priv->mtrr[0].base, 587 drm_mtrr_add(dev_priv->mtrr[0].base,
580 dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 588 dev_priv->mtrr[0].size, DRM_MTRR_WC);
581 1);
582 dev_priv->mtrr[1].base = fb_base + 0x02000000; 589 dev_priv->mtrr[1].base = fb_base + 0x02000000;
583 dev_priv->mtrr[1].size = 0x02000000; 590 dev_priv->mtrr[1].size = 0x02000000;
584 dev_priv->mtrr[1].handle = 591 dev_priv->mtrr[1].handle =
585 mtrr_add(dev_priv->mtrr[1].base, 592 drm_mtrr_add(dev_priv->mtrr[1].base,
586 dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB, 593 dev_priv->mtrr[1].size, DRM_MTRR_WC);
587 1);
588 dev_priv->mtrr[2].base = fb_base + 0x04000000; 594 dev_priv->mtrr[2].base = fb_base + 0x04000000;
589 dev_priv->mtrr[2].size = 0x04000000; 595 dev_priv->mtrr[2].size = 0x04000000;
590 dev_priv->mtrr[2].handle = 596 dev_priv->mtrr[2].handle =
591 mtrr_add(dev_priv->mtrr[2].base, 597 drm_mtrr_add(dev_priv->mtrr[2].base,
592 dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB, 598 dev_priv->mtrr[2].size, DRM_MTRR_WC);
593 1);
594 } else { 599 } else {
595 DRM_ERROR("strange pci_resource_len %08lx\n", 600 DRM_ERROR("strange pci_resource_len %08lx\n",
596 drm_get_resource_len(dev, 0)); 601 drm_get_resource_len(dev, 0));
597 } 602 }
598 } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) { 603 } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
604 dev_priv->chipset != S3_SAVAGE2000) {
599 mmio_base = drm_get_resource_start(dev, 0); 605 mmio_base = drm_get_resource_start(dev, 0);
600 fb_rsrc = 1; 606 fb_rsrc = 1;
601 fb_base = drm_get_resource_start(dev, 1); 607 fb_base = drm_get_resource_start(dev, 1);
@@ -609,9 +615,8 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
609 dev_priv->mtrr[0].base = fb_base; 615 dev_priv->mtrr[0].base = fb_base;
610 dev_priv->mtrr[0].size = 0x08000000; 616 dev_priv->mtrr[0].size = 0x08000000;
611 dev_priv->mtrr[0].handle = 617 dev_priv->mtrr[0].handle =
612 mtrr_add(dev_priv->mtrr[0].base, 618 drm_mtrr_add(dev_priv->mtrr[0].base,
613 dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 619 dev_priv->mtrr[0].size, DRM_MTRR_WC);
614 1);
615 } else { 620 } else {
616 DRM_ERROR("strange pci_resource_len %08lx\n", 621 DRM_ERROR("strange pci_resource_len %08lx\n",
617 drm_get_resource_len(dev, 1)); 622 drm_get_resource_len(dev, 1));
@@ -648,16 +653,21 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
648/* 653/*
649 * Delete MTRRs and free device-private data. 654 * Delete MTRRs and free device-private data.
650 */ 655 */
651int savage_postcleanup(drm_device_t * dev) 656void savage_driver_lastclose(drm_device_t *dev)
652{ 657{
653 drm_savage_private_t *dev_priv = dev->dev_private; 658 drm_savage_private_t *dev_priv = dev->dev_private;
654 int i; 659 int i;
655 660
656 for (i = 0; i < 3; ++i) 661 for (i = 0; i < 3; ++i)
657 if (dev_priv->mtrr[i].handle >= 0) 662 if (dev_priv->mtrr[i].handle >= 0)
658 mtrr_del(dev_priv->mtrr[i].handle, 663 drm_mtrr_del(dev_priv->mtrr[i].handle,
659 dev_priv->mtrr[i].base, 664 dev_priv->mtrr[i].base,
660 dev_priv->mtrr[i].size); 665 dev_priv->mtrr[i].size, DRM_MTRR_WC);
666}
667
668int savage_driver_unload(drm_device_t *dev)
669{
670 drm_savage_private_t *dev_priv = dev->dev_private;
661 671
662 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); 672 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
663 673
@@ -994,8 +1004,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
994 * DMA buffer management 1004 * DMA buffer management
995 */ 1005 */
996 1006
997static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev, 1007static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
998 drm_dma_t * d)
999{ 1008{
1000 drm_buf_t *buf; 1009 drm_buf_t *buf;
1001 int i; 1010 int i;
@@ -1057,7 +1066,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
1057 return ret; 1066 return ret;
1058} 1067}
1059 1068
1060void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp) 1069void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
1061{ 1070{
1062 drm_device_dma_t *dma = dev->dma; 1071 drm_device_dma_t *dma = dev->dma;
1063 drm_savage_private_t *dev_priv = dev->dev_private; 1072 drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1090,10 +1099,10 @@ void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
1090} 1099}
1091 1100
1092drm_ioctl_desc_t savage_ioctls[] = { 1101drm_ioctl_desc_t savage_ioctls[] = {
1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1}, 1102 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1094 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0}, 1103 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
1095 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0}, 1104 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
1096 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0}, 1105 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
1097}; 1106};
1098 1107
1099int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1108int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
index 22d799cde41c..aa6c0d1a82f8 100644
--- a/drivers/char/drm/savage_drv.c
+++ b/drivers/char/drm/savage_drv.c
@@ -30,31 +30,6 @@
30 30
31#include "drm_pciids.h" 31#include "drm_pciids.h"
32 32
33static int postinit(struct drm_device *dev, unsigned long flags)
34{
35 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
36 DRIVER_NAME,
37 DRIVER_MAJOR,
38 DRIVER_MINOR,
39 DRIVER_PATCHLEVEL,
40 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
41 );
42 return 0;
43}
44
45static int version(drm_version_t * version)
46{
47 int len;
48
49 version->version_major = DRIVER_MAJOR;
50 version->version_minor = DRIVER_MINOR;
51 version->version_patchlevel = DRIVER_PATCHLEVEL;
52 DRM_COPY(version->name, DRIVER_NAME);
53 DRM_COPY(version->date, DRIVER_DATE);
54 DRM_COPY(version->desc, DRIVER_DESC);
55 return 0;
56}
57
58static struct pci_device_id pciidlist[] = { 33static struct pci_device_id pciidlist[] = {
59 savage_PCI_IDS 34 savage_PCI_IDS
60}; 35};
@@ -63,13 +38,13 @@ static struct drm_driver driver = {
63 .driver_features = 38 .driver_features =
64 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, 39 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
65 .dev_priv_size = sizeof(drm_savage_buf_priv_t), 40 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
66 .preinit = savage_preinit, 41 .load = savage_driver_load,
67 .postinit = postinit, 42 .firstopen = savage_driver_firstopen,
68 .postcleanup = savage_postcleanup, 43 .lastclose = savage_driver_lastclose,
44 .unload = savage_driver_unload,
69 .reclaim_buffers = savage_reclaim_buffers, 45 .reclaim_buffers = savage_reclaim_buffers,
70 .get_map_ofs = drm_core_get_map_ofs, 46 .get_map_ofs = drm_core_get_map_ofs,
71 .get_reg_ofs = drm_core_get_reg_ofs, 47 .get_reg_ofs = drm_core_get_reg_ofs,
72 .version = version,
73 .ioctls = savage_ioctls, 48 .ioctls = savage_ioctls,
74 .dma_ioctl = savage_bci_buffers, 49 .dma_ioctl = savage_bci_buffers,
75 .fops = { 50 .fops = {
@@ -80,12 +55,19 @@ static struct drm_driver driver = {
80 .mmap = drm_mmap, 55 .mmap = drm_mmap,
81 .poll = drm_poll, 56 .poll = drm_poll,
82 .fasync = drm_fasync, 57 .fasync = drm_fasync,
83 } 58 },
84 , 59
85 .pci_driver = { 60 .pci_driver = {
86 .name = DRIVER_NAME, 61 .name = DRIVER_NAME,
87 .id_table = pciidlist, 62 .id_table = pciidlist,
88 } 63 },
64
65 .name = DRIVER_NAME,
66 .desc = DRIVER_DESC,
67 .date = DRIVER_DATE,
68 .major = DRIVER_MAJOR,
69 .minor = DRIVER_MINOR,
70 .patchlevel = DRIVER_PATCHLEVEL,
89}; 71};
90 72
91static int __init savage_init(void) 73static int __init savage_init(void)
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index a4b0fa998a95..dd46cb85439c 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -1,5 +1,5 @@
1/* savage_drv.h -- Private header for the savage driver 1/* savage_drv.h -- Private header for the savage driver */
2 * 2/*
3 * Copyright 2004 Felix Kuehling 3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
@@ -192,7 +192,7 @@ typedef struct drm_savage_private {
192 /* Err, there is a macro wait_event in include/linux/wait.h. 192 /* Err, there is a macro wait_event in include/linux/wait.h.
193 * Avoid unwanted macro expansion. */ 193 * Avoid unwanted macro expansion. */
194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv, 194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
195 drm_clip_rect_t * pbox); 195 const drm_clip_rect_t * pbox);
196 void (*dma_flush) (struct drm_savage_private * dev_priv); 196 void (*dma_flush) (struct drm_savage_private * dev_priv);
197} drm_savage_private_t; 197} drm_savage_private_t;
198 198
@@ -208,16 +208,18 @@ extern void savage_dma_reset(drm_savage_private_t * dev_priv);
208extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page); 208extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
209extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, 209extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
210 unsigned int n); 210 unsigned int n);
211extern int savage_preinit(drm_device_t * dev, unsigned long chipset); 211extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
212extern int savage_postcleanup(drm_device_t * dev); 212extern int savage_driver_firstopen(drm_device_t *dev);
213extern void savage_driver_lastclose(drm_device_t *dev);
214extern int savage_driver_unload(drm_device_t *dev);
213extern int savage_do_cleanup_bci(drm_device_t * dev); 215extern int savage_do_cleanup_bci(drm_device_t * dev);
214extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp); 216extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
215 217
216/* state functions */ 218/* state functions */
217extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 219extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
218 drm_clip_rect_t * pbox); 220 const drm_clip_rect_t * pbox);
219extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 221extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
220 drm_clip_rect_t * pbox); 222 const drm_clip_rect_t * pbox);
221 223
222#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ 224#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
223#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ 225#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
@@ -500,15 +502,6 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
500 502
501#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) 503#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
502 504
503#define BCI_COPY_FROM_USER(src,n) do { \
504 unsigned int i; \
505 for (i = 0; i < n; ++i) { \
506 uint32_t val; \
507 DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
508 BCI_WRITE(val); \
509 } \
510} while(0)
511
512/* 505/*
513 * command DMA support 506 * command DMA support
514 */ 507 */
@@ -534,8 +527,8 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
534 527
535#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) 528#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
536 529
537#define DMA_COPY_FROM_USER(src,n) do { \ 530#define DMA_COPY(src, n) do { \
538 DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \ 531 memcpy(dma_ptr, (src), (n)*4); \
539 dma_ptr += n; \ 532 dma_ptr += n; \
540} while(0) 533} while(0)
541 534
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index e87a5d59b99c..ef2581d16146 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -27,7 +27,7 @@
27#include "savage_drv.h" 27#include "savage_drv.h"
28 28
29void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, 29void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
30 drm_clip_rect_t * pbox) 30 const drm_clip_rect_t * pbox)
31{ 31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart; 32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend; 33 uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
53} 53}
54 54
55void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, 55void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
56 drm_clip_rect_t * pbox) 56 const drm_clip_rect_t * pbox)
57{ 57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; 58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; 59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -115,18 +115,19 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
115 115
116#define SAVE_STATE(reg,where) \ 116#define SAVE_STATE(reg,where) \
117 if(start <= reg && start+count > reg) \ 117 if(start <= reg && start+count > reg) \
118 DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start]) 118 dev_priv->state.where = regs[reg - start]
119#define SAVE_STATE_MASK(reg,where,mask) do { \ 119#define SAVE_STATE_MASK(reg,where,mask) do { \
120 if(start <= reg && start+count > reg) { \ 120 if(start <= reg && start+count > reg) { \
121 uint32_t tmp; \ 121 uint32_t tmp; \
122 DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \ 122 tmp = regs[reg - start]; \
123 dev_priv->state.where = (tmp & (mask)) | \ 123 dev_priv->state.where = (tmp & (mask)) | \
124 (dev_priv->state.where & ~(mask)); \ 124 (dev_priv->state.where & ~(mask)); \
125 } \ 125 } \
126} while (0) 126} while (0)
127
127static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, 128static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
128 unsigned int start, unsigned int count, 129 unsigned int start, unsigned int count,
129 const uint32_t __user * regs) 130 const uint32_t *regs)
130{ 131{
131 if (start < SAVAGE_TEXPALADDR_S3D || 132 if (start < SAVAGE_TEXPALADDR_S3D ||
132 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { 133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
@@ -148,8 +149,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); 149 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) 150 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
150 return savage_verify_texaddr(dev_priv, 0, 151 return savage_verify_texaddr(dev_priv, 0,
151 dev_priv->state.s3d. 152 dev_priv->state.s3d.texaddr);
152 texaddr);
153 } 153 }
154 154
155 return 0; 155 return 0;
@@ -157,7 +157,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
157 157
158static int savage_verify_state_s4(drm_savage_private_t * dev_priv, 158static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
159 unsigned int start, unsigned int count, 159 unsigned int start, unsigned int count,
160 const uint32_t __user * regs) 160 const uint32_t *regs)
161{ 161{
162 int ret = 0; 162 int ret = 0;
163 163
@@ -174,19 +174,18 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
174 ~SAVAGE_SCISSOR_MASK_S4); 174 ~SAVAGE_SCISSOR_MASK_S4);
175 175
176 /* if any texture regs were changed ... */ 176 /* if any texture regs were changed ... */
177 if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) { 177 if (start <= SAVAGE_TEXDESCR_S4 &&
178 start + count > SAVAGE_TEXPALADDR_S4) {
178 /* ... check texture state */ 179 /* ... check texture state */
179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); 180 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); 181 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); 182 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) 183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
183 ret |= 184 ret |= savage_verify_texaddr(dev_priv, 0,
184 savage_verify_texaddr(dev_priv, 0, 185 dev_priv->state.s4.texaddr0);
185 dev_priv->state.s4.texaddr0);
186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) 186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
187 ret |= 187 ret |= savage_verify_texaddr(dev_priv, 1,
188 savage_verify_texaddr(dev_priv, 1, 188 dev_priv->state.s4.texaddr1);
189 dev_priv->state.s4.texaddr1);
190 } 189 }
191 190
192 return ret; 191 return ret;
@@ -197,7 +196,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
197 196
198static int savage_dispatch_state(drm_savage_private_t * dev_priv, 197static int savage_dispatch_state(drm_savage_private_t * dev_priv,
199 const drm_savage_cmd_header_t * cmd_header, 198 const drm_savage_cmd_header_t * cmd_header,
200 const uint32_t __user * regs) 199 const uint32_t *regs)
201{ 200{
202 unsigned int count = cmd_header->state.count; 201 unsigned int count = cmd_header->state.count;
203 unsigned int start = cmd_header->state.start; 202 unsigned int start = cmd_header->state.start;
@@ -209,9 +208,6 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
209 if (!count) 208 if (!count)
210 return 0; 209 return 0;
211 210
212 if (DRM_VERIFYAREA_READ(regs, count * 4))
213 return DRM_ERR(EFAULT);
214
215 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 211 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
216 ret = savage_verify_state_s3d(dev_priv, start, count, regs); 212 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
217 if (ret != 0) 213 if (ret != 0)
@@ -236,8 +232,8 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
236 /* scissor regs are emitted in savage_dispatch_draw */ 232 /* scissor regs are emitted in savage_dispatch_draw */
237 if (start < SAVAGE_DRAWCTRL0_S4) { 233 if (start < SAVAGE_DRAWCTRL0_S4) {
238 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) 234 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
239 count2 = 235 count2 = count -
240 count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); 236 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
241 if (start + count > SAVAGE_DRAWCTRL0_S4) 237 if (start + count > SAVAGE_DRAWCTRL0_S4)
242 count = SAVAGE_DRAWCTRL0_S4 - start; 238 count = SAVAGE_DRAWCTRL0_S4 - start;
243 } else if (start <= SAVAGE_DRAWCTRL1_S4) { 239 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
@@ -263,7 +259,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
263 while (count > 0) { 259 while (count > 0) {
264 unsigned int n = count < 255 ? count : 255; 260 unsigned int n = count < 255 ? count : 255;
265 DMA_SET_REGISTERS(start, n); 261 DMA_SET_REGISTERS(start, n);
266 DMA_COPY_FROM_USER(regs, n); 262 DMA_COPY(regs, n);
267 count -= n; 263 count -= n;
268 start += n; 264 start += n;
269 regs += n; 265 regs += n;
@@ -421,8 +417,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
421 417
422static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, 418static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
423 const drm_savage_cmd_header_t * cmd_header, 419 const drm_savage_cmd_header_t * cmd_header,
424 const uint32_t __user * vtxbuf, 420 const uint32_t *vtxbuf, unsigned int vb_size,
425 unsigned int vb_size, unsigned int vb_stride) 421 unsigned int vb_stride)
426{ 422{
427 unsigned char reorder = 0; 423 unsigned char reorder = 0;
428 unsigned int prim = cmd_header->prim.prim; 424 unsigned int prim = cmd_header->prim.prim;
@@ -507,8 +503,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
507 503
508 for (i = start; i < start + count; ++i) { 504 for (i = start; i < start + count; ++i) {
509 unsigned int j = i + reorder[i % 3]; 505 unsigned int j = i + reorder[i % 3];
510 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 506 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
511 vtx_size);
512 } 507 }
513 508
514 DMA_COMMIT(); 509 DMA_COMMIT();
@@ -517,13 +512,12 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
517 DMA_DRAW_PRIMITIVE(count, prim, skip); 512 DMA_DRAW_PRIMITIVE(count, prim, skip);
518 513
519 if (vb_stride == vtx_size) { 514 if (vb_stride == vtx_size) {
520 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start], 515 DMA_COPY(&vtxbuf[vb_stride * start],
521 vtx_size * count); 516 vtx_size * count);
522 } else { 517 } else {
523 for (i = start; i < start + count; ++i) { 518 for (i = start; i < start + count; ++i) {
524 DMA_COPY_FROM_USER(&vtxbuf 519 DMA_COPY(&vtxbuf [vb_stride * i],
525 [vb_stride * i], 520 vtx_size);
526 vtx_size);
527 } 521 }
528 } 522 }
529 523
@@ -541,7 +535,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
541 535
542static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, 536static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
543 const drm_savage_cmd_header_t * cmd_header, 537 const drm_savage_cmd_header_t * cmd_header,
544 const uint16_t __user * usr_idx, 538 const uint16_t *idx,
545 const drm_buf_t * dmabuf) 539 const drm_buf_t * dmabuf)
546{ 540{
547 unsigned char reorder = 0; 541 unsigned char reorder = 0;
@@ -628,11 +622,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
628 while (n != 0) { 622 while (n != 0) {
629 /* Can emit up to 255 indices (85 triangles) at once. */ 623 /* Can emit up to 255 indices (85 triangles) at once. */
630 unsigned int count = n > 255 ? 255 : n; 624 unsigned int count = n > 255 ? 255 : n;
631 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
632 uint16_t idx[255];
633 625
634 /* Copy and check indices */ 626 /* check indices */
635 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
636 for (i = 0; i < count; ++i) { 627 for (i = 0; i < count; ++i) {
637 if (idx[i] > dmabuf->total / 32) { 628 if (idx[i] > dmabuf->total / 32) {
638 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -652,8 +643,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
652 643
653 for (i = 1; i + 1 < count; i += 2) 644 for (i = 1; i + 1 < count; i += 2)
654 BCI_WRITE(idx[i + reorder[i % 3]] | 645 BCI_WRITE(idx[i + reorder[i % 3]] |
655 (idx[i + 1 + reorder[(i + 1) % 3]] << 646 (idx[i + 1 +
656 16)); 647 reorder[(i + 1) % 3]] << 16));
657 if (i < count) 648 if (i < count)
658 BCI_WRITE(idx[i + reorder[i % 3]]); 649 BCI_WRITE(idx[i + reorder[i % 3]]);
659 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { 650 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@@ -674,7 +665,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
674 BCI_WRITE(idx[i]); 665 BCI_WRITE(idx[i]);
675 } 666 }
676 667
677 usr_idx += count; 668 idx += count;
678 n -= count; 669 n -= count;
679 670
680 prim |= BCI_CMD_DRAW_CONT; 671 prim |= BCI_CMD_DRAW_CONT;
@@ -685,8 +676,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
685 676
686static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, 677static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
687 const drm_savage_cmd_header_t * cmd_header, 678 const drm_savage_cmd_header_t * cmd_header,
688 const uint16_t __user * usr_idx, 679 const uint16_t *idx,
689 const uint32_t __user * vtxbuf, 680 const uint32_t *vtxbuf,
690 unsigned int vb_size, unsigned int vb_stride) 681 unsigned int vb_size, unsigned int vb_stride)
691{ 682{
692 unsigned char reorder = 0; 683 unsigned char reorder = 0;
@@ -751,11 +742,8 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
751 while (n != 0) { 742 while (n != 0) {
752 /* Can emit up to 255 vertices (85 triangles) at once. */ 743 /* Can emit up to 255 vertices (85 triangles) at once. */
753 unsigned int count = n > 255 ? 255 : n; 744 unsigned int count = n > 255 ? 255 : n;
754 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ 745
755 uint16_t idx[255]; 746 /* Check indices */
756
757 /* Copy and check indices */
758 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
759 for (i = 0; i < count; ++i) { 747 for (i = 0; i < count; ++i) {
760 if (idx[i] > vb_size / (vb_stride * 4)) { 748 if (idx[i] > vb_size / (vb_stride * 4)) {
761 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", 749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -775,8 +763,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
775 763
776 for (i = 0; i < count; ++i) { 764 for (i = 0; i < count; ++i) {
777 unsigned int j = idx[i + reorder[i % 3]]; 765 unsigned int j = idx[i + reorder[i % 3]];
778 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 766 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
779 vtx_size);
780 } 767 }
781 768
782 DMA_COMMIT(); 769 DMA_COMMIT();
@@ -786,14 +773,13 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
786 773
787 for (i = 0; i < count; ++i) { 774 for (i = 0; i < count; ++i) {
788 unsigned int j = idx[i]; 775 unsigned int j = idx[i];
789 DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j], 776 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
790 vtx_size);
791 } 777 }
792 778
793 DMA_COMMIT(); 779 DMA_COMMIT();
794 } 780 }
795 781
796 usr_idx += count; 782 idx += count;
797 n -= count; 783 n -= count;
798 784
799 prim |= BCI_CMD_DRAW_CONT; 785 prim |= BCI_CMD_DRAW_CONT;
@@ -804,11 +790,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
804 790
805static int savage_dispatch_clear(drm_savage_private_t * dev_priv, 791static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
806 const drm_savage_cmd_header_t * cmd_header, 792 const drm_savage_cmd_header_t * cmd_header,
807 const drm_savage_cmd_header_t __user * data, 793 const drm_savage_cmd_header_t *data,
808 unsigned int nbox, 794 unsigned int nbox,
809 const drm_clip_rect_t __user * usr_boxes) 795 const drm_clip_rect_t *boxes)
810{ 796{
811 unsigned int flags = cmd_header->clear0.flags, mask, value; 797 unsigned int flags = cmd_header->clear0.flags;
812 unsigned int clear_cmd; 798 unsigned int clear_cmd;
813 unsigned int i, nbufs; 799 unsigned int i, nbufs;
814 DMA_LOCALS; 800 DMA_LOCALS;
@@ -816,9 +802,6 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
816 if (nbox == 0) 802 if (nbox == 0)
817 return 0; 803 return 0;
818 804
819 DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask);
820 DRM_GET_USER_UNCHECKED(value, &data->clear1.value);
821
822 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | 805 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; 806 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824 BCI_CMD_SET_ROP(clear_cmd, 0xCC); 807 BCI_CMD_SET_ROP(clear_cmd, 0xCC);
@@ -828,21 +811,19 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
828 if (nbufs == 0) 811 if (nbufs == 0)
829 return 0; 812 return 0;
830 813
831 if (mask != 0xffffffff) { 814 if (data->clear1.mask != 0xffffffff) {
832 /* set mask */ 815 /* set mask */
833 BEGIN_DMA(2); 816 BEGIN_DMA(2);
834 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 817 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
835 DMA_WRITE(mask); 818 DMA_WRITE(data->clear1.mask);
836 DMA_COMMIT(); 819 DMA_COMMIT();
837 } 820 }
838 for (i = 0; i < nbox; ++i) { 821 for (i = 0; i < nbox; ++i) {
839 drm_clip_rect_t box;
840 unsigned int x, y, w, h; 822 unsigned int x, y, w, h;
841 unsigned int buf; 823 unsigned int buf;
842 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); 824 x = boxes[i].x1, y = boxes[i].y1;
843 x = box.x1, y = box.y1; 825 w = boxes[i].x2 - boxes[i].x1;
844 w = box.x2 - box.x1; 826 h = boxes[i].y2 - boxes[i].y1;
845 h = box.y2 - box.y1;
846 BEGIN_DMA(nbufs * 6); 827 BEGIN_DMA(nbufs * 6);
847 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { 828 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
848 if (!(flags & buf)) 829 if (!(flags & buf))
@@ -862,13 +843,13 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
862 DMA_WRITE(dev_priv->depth_bd); 843 DMA_WRITE(dev_priv->depth_bd);
863 break; 844 break;
864 } 845 }
865 DMA_WRITE(value); 846 DMA_WRITE(data->clear1.value);
866 DMA_WRITE(BCI_X_Y(x, y)); 847 DMA_WRITE(BCI_X_Y(x, y));
867 DMA_WRITE(BCI_W_H(w, h)); 848 DMA_WRITE(BCI_W_H(w, h));
868 } 849 }
869 DMA_COMMIT(); 850 DMA_COMMIT();
870 } 851 }
871 if (mask != 0xffffffff) { 852 if (data->clear1.mask != 0xffffffff) {
872 /* reset mask */ 853 /* reset mask */
873 BEGIN_DMA(2); 854 BEGIN_DMA(2);
874 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); 855 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
@@ -880,8 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
880} 861}
881 862
882static int savage_dispatch_swap(drm_savage_private_t * dev_priv, 863static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
883 unsigned int nbox, 864 unsigned int nbox, const drm_clip_rect_t *boxes)
884 const drm_clip_rect_t __user * usr_boxes)
885{ 865{
886 unsigned int swap_cmd; 866 unsigned int swap_cmd;
887 unsigned int i; 867 unsigned int i;
@@ -895,16 +875,14 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
895 BCI_CMD_SET_ROP(swap_cmd, 0xCC); 875 BCI_CMD_SET_ROP(swap_cmd, 0xCC);
896 876
897 for (i = 0; i < nbox; ++i) { 877 for (i = 0; i < nbox; ++i) {
898 drm_clip_rect_t box;
899 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
900
901 BEGIN_DMA(6); 878 BEGIN_DMA(6);
902 DMA_WRITE(swap_cmd); 879 DMA_WRITE(swap_cmd);
903 DMA_WRITE(dev_priv->back_offset); 880 DMA_WRITE(dev_priv->back_offset);
904 DMA_WRITE(dev_priv->back_bd); 881 DMA_WRITE(dev_priv->back_bd);
905 DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 882 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
906 DMA_WRITE(BCI_X_Y(box.x1, box.y1)); 883 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
907 DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1)); 884 DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
885 boxes[i].y2 - boxes[i].y1));
908 DMA_COMMIT(); 886 DMA_COMMIT();
909 } 887 }
910 888
@@ -912,68 +890,52 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
912} 890}
913 891
914static int savage_dispatch_draw(drm_savage_private_t * dev_priv, 892static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
915 const drm_savage_cmd_header_t __user * start, 893 const drm_savage_cmd_header_t *start,
916 const drm_savage_cmd_header_t __user * end, 894 const drm_savage_cmd_header_t *end,
917 const drm_buf_t * dmabuf, 895 const drm_buf_t * dmabuf,
918 const unsigned int __user * usr_vtxbuf, 896 const unsigned int *vtxbuf,
919 unsigned int vb_size, unsigned int vb_stride, 897 unsigned int vb_size, unsigned int vb_stride,
920 unsigned int nbox, 898 unsigned int nbox,
921 const drm_clip_rect_t __user * usr_boxes) 899 const drm_clip_rect_t *boxes)
922{ 900{
923 unsigned int i, j; 901 unsigned int i, j;
924 int ret; 902 int ret;
925 903
926 for (i = 0; i < nbox; ++i) { 904 for (i = 0; i < nbox; ++i) {
927 drm_clip_rect_t box; 905 const drm_savage_cmd_header_t *cmdbuf;
928 const drm_savage_cmd_header_t __user *usr_cmdbuf; 906 dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
929 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
930 dev_priv->emit_clip_rect(dev_priv, &box);
931 907
932 usr_cmdbuf = start; 908 cmdbuf = start;
933 while (usr_cmdbuf < end) { 909 while (cmdbuf < end) {
934 drm_savage_cmd_header_t cmd_header; 910 drm_savage_cmd_header_t cmd_header;
935 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 911 cmd_header = *cmdbuf;
936 sizeof(cmd_header)); 912 cmdbuf++;
937 usr_cmdbuf++;
938 switch (cmd_header.cmd.cmd) { 913 switch (cmd_header.cmd.cmd) {
939 case SAVAGE_CMD_DMA_PRIM: 914 case SAVAGE_CMD_DMA_PRIM:
940 ret = 915 ret = savage_dispatch_dma_prim(
941 savage_dispatch_dma_prim(dev_priv, 916 dev_priv, &cmd_header, dmabuf);
942 &cmd_header,
943 dmabuf);
944 break; 917 break;
945 case SAVAGE_CMD_VB_PRIM: 918 case SAVAGE_CMD_VB_PRIM:
946 ret = 919 ret = savage_dispatch_vb_prim(
947 savage_dispatch_vb_prim(dev_priv, 920 dev_priv, &cmd_header,
948 &cmd_header, 921 vtxbuf, vb_size, vb_stride);
949 (const uint32_t
950 __user *)
951 usr_vtxbuf, vb_size,
952 vb_stride);
953 break; 922 break;
954 case SAVAGE_CMD_DMA_IDX: 923 case SAVAGE_CMD_DMA_IDX:
955 j = (cmd_header.idx.count + 3) / 4; 924 j = (cmd_header.idx.count + 3) / 4;
956 /* j was check in savage_bci_cmdbuf */ 925 /* j was check in savage_bci_cmdbuf */
957 ret = 926 ret = savage_dispatch_dma_idx(dev_priv,
958 savage_dispatch_dma_idx(dev_priv, 927 &cmd_header, (const uint16_t *)cmdbuf,
959 &cmd_header, 928 dmabuf);
960 (const uint16_t 929 cmdbuf += j;
961 __user *)
962 usr_cmdbuf, dmabuf);
963 usr_cmdbuf += j;
964 break; 930 break;
965 case SAVAGE_CMD_VB_IDX: 931 case SAVAGE_CMD_VB_IDX:
966 j = (cmd_header.idx.count + 3) / 4; 932 j = (cmd_header.idx.count + 3) / 4;
967 /* j was check in savage_bci_cmdbuf */ 933 /* j was check in savage_bci_cmdbuf */
968 ret = 934 ret = savage_dispatch_vb_idx(dev_priv,
969 savage_dispatch_vb_idx(dev_priv, 935 &cmd_header, (const uint16_t *)cmdbuf,
970 &cmd_header, 936 (const uint32_t *)vtxbuf, vb_size,
971 (const uint16_t 937 vb_stride);
972 __user *)usr_cmdbuf, 938 cmdbuf += j;
973 (const uint32_t
974 __user *)usr_vtxbuf,
975 vb_size, vb_stride);
976 usr_cmdbuf += j;
977 break; 939 break;
978 default: 940 default:
979 /* What's the best return code? EFAULT? */ 941 /* What's the best return code? EFAULT? */
@@ -998,10 +960,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
998 drm_device_dma_t *dma = dev->dma; 960 drm_device_dma_t *dma = dev->dma;
999 drm_buf_t *dmabuf; 961 drm_buf_t *dmabuf;
1000 drm_savage_cmdbuf_t cmdbuf; 962 drm_savage_cmdbuf_t cmdbuf;
1001 drm_savage_cmd_header_t __user *usr_cmdbuf; 963 drm_savage_cmd_header_t *kcmd_addr = NULL;
1002 drm_savage_cmd_header_t __user *first_draw_cmd; 964 drm_savage_cmd_header_t *first_draw_cmd;
1003 unsigned int __user *usr_vtxbuf; 965 unsigned int *kvb_addr = NULL;
1004 drm_clip_rect_t __user *usr_boxes; 966 drm_clip_rect_t *kbox_addr = NULL;
1005 unsigned int i, j; 967 unsigned int i, j;
1006 int ret = 0; 968 int ret = 0;
1007 969
@@ -1024,15 +986,53 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1024 dmabuf = NULL; 986 dmabuf = NULL;
1025 } 987 }
1026 988
1027 usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr; 989 /* Copy the user buffers into kernel temporary areas. This hasn't been
1028 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; 990 * a performance loss compared to VERIFYAREA_READ/
1029 usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr; 991 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
1030 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) || 992 * for locking on FreeBSD.
1031 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size)) 993 */
1032 || (cmdbuf.nbox 994 if (cmdbuf.size) {
1033 && DRM_VERIFYAREA_READ(usr_boxes, 995 kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
1034 cmdbuf.nbox * sizeof(drm_clip_rect_t)))) 996 if (kcmd_addr == NULL)
1035 return DRM_ERR(EFAULT); 997 return ENOMEM;
998
999 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
1000 cmdbuf.size * 8))
1001 {
1002 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
1003 return DRM_ERR(EFAULT);
1004 }
1005 cmdbuf.cmd_addr = kcmd_addr;
1006 }
1007 if (cmdbuf.vb_size) {
1008 kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
1009 if (kvb_addr == NULL) {
1010 ret = DRM_ERR(ENOMEM);
1011 goto done;
1012 }
1013
1014 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
1015 cmdbuf.vb_size)) {
1016 ret = DRM_ERR(EFAULT);
1017 goto done;
1018 }
1019 cmdbuf.vb_addr = kvb_addr;
1020 }
1021 if (cmdbuf.nbox) {
1022 kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
1023 DRM_MEM_DRIVER);
1024 if (kbox_addr == NULL) {
1025 ret = DRM_ERR(ENOMEM);
1026 goto done;
1027 }
1028
1029 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
1030 cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
1031 ret = DRM_ERR(EFAULT);
1032 goto done;
1033 }
1034 cmdbuf.box_addr = kbox_addr;
1035 }
1036 1036
1037 /* Make sure writes to DMA buffers are finished before sending 1037 /* Make sure writes to DMA buffers are finished before sending
1038 * DMA commands to the graphics hardware. */ 1038 * DMA commands to the graphics hardware. */
@@ -1046,9 +1046,8 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1046 first_draw_cmd = NULL; 1046 first_draw_cmd = NULL;
1047 while (i < cmdbuf.size) { 1047 while (i < cmdbuf.size) {
1048 drm_savage_cmd_header_t cmd_header; 1048 drm_savage_cmd_header_t cmd_header;
1049 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, 1049 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
1050 sizeof(cmd_header)); 1050 cmdbuf.cmd_addr++;
1051 usr_cmdbuf++;
1052 i++; 1051 i++;
1053 1052
1054 /* Group drawing commands with same state to minimize 1053 /* Group drawing commands with same state to minimize
@@ -1068,21 +1067,18 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1068 case SAVAGE_CMD_DMA_PRIM: 1067 case SAVAGE_CMD_DMA_PRIM:
1069 case SAVAGE_CMD_VB_PRIM: 1068 case SAVAGE_CMD_VB_PRIM:
1070 if (!first_draw_cmd) 1069 if (!first_draw_cmd)
1071 first_draw_cmd = usr_cmdbuf - 1; 1070 first_draw_cmd = cmdbuf.cmd_addr - 1;
1072 usr_cmdbuf += j; 1071 cmdbuf.cmd_addr += j;
1073 i += j; 1072 i += j;
1074 break; 1073 break;
1075 default: 1074 default:
1076 if (first_draw_cmd) { 1075 if (first_draw_cmd) {
1077 ret = 1076 ret = savage_dispatch_draw(
1078 savage_dispatch_draw(dev_priv, 1077 dev_priv, first_draw_cmd,
1079 first_draw_cmd, 1078 cmdbuf.cmd_addr - 1,
1080 usr_cmdbuf - 1, dmabuf, 1079 dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
1081 usr_vtxbuf, 1080 cmdbuf.vb_stride,
1082 cmdbuf.vb_size, 1081 cmdbuf.nbox, cmdbuf.box_addr);
1083 cmdbuf.vb_stride,
1084 cmdbuf.nbox,
1085 usr_boxes);
1086 if (ret != 0) 1082 if (ret != 0)
1087 return ret; 1083 return ret;
1088 first_draw_cmd = NULL; 1084 first_draw_cmd = NULL;
@@ -1098,12 +1094,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1098 DRM_ERROR("command SAVAGE_CMD_STATE extends " 1094 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1099 "beyond end of command buffer\n"); 1095 "beyond end of command buffer\n");
1100 DMA_FLUSH(); 1096 DMA_FLUSH();
1101 return DRM_ERR(EINVAL); 1097 ret = DRM_ERR(EINVAL);
1098 goto done;
1102 } 1099 }
1103 ret = savage_dispatch_state(dev_priv, &cmd_header, 1100 ret = savage_dispatch_state(dev_priv, &cmd_header,
1104 (uint32_t __user *) 1101 (const uint32_t *)cmdbuf.cmd_addr);
1105 usr_cmdbuf); 1102 cmdbuf.cmd_addr += j;
1106 usr_cmdbuf += j;
1107 i += j; 1103 i += j;
1108 break; 1104 break;
1109 case SAVAGE_CMD_CLEAR: 1105 case SAVAGE_CMD_CLEAR:
@@ -1111,39 +1107,40 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1111 DRM_ERROR("command SAVAGE_CMD_CLEAR extends " 1107 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1112 "beyond end of command buffer\n"); 1108 "beyond end of command buffer\n");
1113 DMA_FLUSH(); 1109 DMA_FLUSH();
1114 return DRM_ERR(EINVAL); 1110 ret = DRM_ERR(EINVAL);
1111 goto done;
1115 } 1112 }
1116 ret = savage_dispatch_clear(dev_priv, &cmd_header, 1113 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1117 usr_cmdbuf, 1114 cmdbuf.cmd_addr,
1118 cmdbuf.nbox, usr_boxes); 1115 cmdbuf.nbox, cmdbuf.box_addr);
1119 usr_cmdbuf++; 1116 cmdbuf.cmd_addr++;
1120 i++; 1117 i++;
1121 break; 1118 break;
1122 case SAVAGE_CMD_SWAP: 1119 case SAVAGE_CMD_SWAP:
1123 ret = savage_dispatch_swap(dev_priv, 1120 ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
1124 cmdbuf.nbox, usr_boxes); 1121 cmdbuf.box_addr);
1125 break; 1122 break;
1126 default: 1123 default:
1127 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); 1124 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1128 DMA_FLUSH(); 1125 DMA_FLUSH();
1129 return DRM_ERR(EINVAL); 1126 ret = DRM_ERR(EINVAL);
1127 goto done;
1130 } 1128 }
1131 1129
1132 if (ret != 0) { 1130 if (ret != 0) {
1133 DMA_FLUSH(); 1131 DMA_FLUSH();
1134 return ret; 1132 goto done;
1135 } 1133 }
1136 } 1134 }
1137 1135
1138 if (first_draw_cmd) { 1136 if (first_draw_cmd) {
1139 ret = 1137 ret = savage_dispatch_draw (
1140 savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf, 1138 dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
1141 dmabuf, usr_vtxbuf, cmdbuf.vb_size, 1139 cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
1142 cmdbuf.vb_stride, cmdbuf.nbox, 1140 cmdbuf.nbox, cmdbuf.box_addr);
1143 usr_boxes);
1144 if (ret != 0) { 1141 if (ret != 0) {
1145 DMA_FLUSH(); 1142 DMA_FLUSH();
1146 return ret; 1143 goto done;
1147 } 1144 }
1148 } 1145 }
1149 1146
@@ -1157,5 +1154,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
1157 savage_freelist_put(dev, dmabuf); 1154 savage_freelist_put(dev, dmabuf);
1158 } 1155 }
1159 1156
1160 return 0; 1157done:
1158 /* If we didn't need to allocate them, these'll be NULL */
1159 drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
1160 drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
1161 drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
1162 DRM_MEM_DRIVER);
1163
1164 return ret;
1161} 1165}
diff --git a/drivers/char/drm/sis_drm.h b/drivers/char/drm/sis_drm.h
index 8f273da76ddb..30f7b3827466 100644
--- a/drivers/char/drm/sis_drm.h
+++ b/drivers/char/drm/sis_drm.h
@@ -1,3 +1,28 @@
1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
2/*
3 * Copyright 2005 Eric Anholt
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 */
1 26
2#ifndef __SIS_DRM_H__ 27#ifndef __SIS_DRM_H__
3#define __SIS_DRM_H__ 28#define __SIS_DRM_H__
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 3cef10643a8f..6f6d7d613ede 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -32,31 +32,6 @@
32 32
33#include "drm_pciids.h" 33#include "drm_pciids.h"
34 34
35static int postinit(struct drm_device *dev, unsigned long flags)
36{
37 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
38 DRIVER_NAME,
39 DRIVER_MAJOR,
40 DRIVER_MINOR,
41 DRIVER_PATCHLEVEL,
42 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
43 );
44 return 0;
45}
46
47static int version(drm_version_t * version)
48{
49 int len;
50
51 version->version_major = DRIVER_MAJOR;
52 version->version_minor = DRIVER_MINOR;
53 version->version_patchlevel = DRIVER_PATCHLEVEL;
54 DRM_COPY(version->name, DRIVER_NAME);
55 DRM_COPY(version->date, DRIVER_DATE);
56 DRM_COPY(version->desc, DRIVER_DESC);
57 return 0;
58}
59
60static struct pci_device_id pciidlist[] = { 35static struct pci_device_id pciidlist[] = {
61 sisdrv_PCI_IDS 36 sisdrv_PCI_IDS
62}; 37};
@@ -68,8 +43,6 @@ static struct drm_driver driver = {
68 .reclaim_buffers = drm_core_reclaim_buffers, 43 .reclaim_buffers = drm_core_reclaim_buffers,
69 .get_map_ofs = drm_core_get_map_ofs, 44 .get_map_ofs = drm_core_get_map_ofs,
70 .get_reg_ofs = drm_core_get_reg_ofs, 45 .get_reg_ofs = drm_core_get_reg_ofs,
71 .postinit = postinit,
72 .version = version,
73 .ioctls = sis_ioctls, 46 .ioctls = sis_ioctls,
74 .fops = { 47 .fops = {
75 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
@@ -79,11 +52,18 @@ static struct drm_driver driver = {
79 .mmap = drm_mmap, 52 .mmap = drm_mmap,
80 .poll = drm_poll, 53 .poll = drm_poll,
81 .fasync = drm_fasync, 54 .fasync = drm_fasync,
82 }, 55 },
83 .pci_driver = { 56 .pci_driver = {
84 .name = DRIVER_NAME, 57 .name = DRIVER_NAME,
85 .id_table = pciidlist, 58 .id_table = pciidlist,
86 } 59 },
60
61 .name = DRIVER_NAME,
62 .desc = DRIVER_DESC,
63 .date = DRIVER_DATE,
64 .major = DRIVER_MAJOR,
65 .minor = DRIVER_MINOR,
66 .patchlevel = DRIVER_PATCHLEVEL,
87}; 67};
88 68
89static int __init sis_init(void) 69static int __init sis_init(void)
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index b1fddad83a93..e218e5269503 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -1,5 +1,5 @@
1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- 1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
2 * 2/*
3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All rights reserved. 5 * All rights reserved.
diff --git a/drivers/char/drm/sis_ds.h b/drivers/char/drm/sis_ds.h
index da850b4f5440..94f2b4728b63 100644
--- a/drivers/char/drm/sis_ds.h
+++ b/drivers/char/drm/sis_ds.h
@@ -1,6 +1,7 @@
1/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*- 1/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw 2 * Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
3 * 3 */
4/*
4 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan. 5 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
5 * All rights reserved. 6 * All rights reserved.
6 * 7 *
@@ -35,7 +36,7 @@
35 36
36#define SET_SIZE 5000 37#define SET_SIZE 5000
37 38
38typedef unsigned int ITEM_TYPE; 39typedef unsigned long ITEM_TYPE;
39 40
40typedef struct { 41typedef struct {
41 ITEM_TYPE val; 42 ITEM_TYPE val;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index a8529728fa63..6774d2fe3452 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -86,7 +86,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
86{ 86{
87 drm_sis_mem_t fb; 87 drm_sis_mem_t fb;
88 struct sis_memreq req; 88 struct sis_memreq req;
89 drm_sis_mem_t __user *argp = (void __user *)data; 89 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
90 int retval = 0; 90 int retval = 0;
91 91
92 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb)); 92 DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
@@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
110 110
111 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb)); 111 DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
112 112
113 DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset); 113 DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
114 114
115 return retval; 115 return retval;
116} 116}
@@ -127,9 +127,9 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
127 127
128 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) 128 if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
129 retval = DRM_ERR(EINVAL); 129 retval = DRM_ERR(EINVAL);
130 sis_free((u32) fb.free); 130 sis_free(fb.free);
131 131
132 DRM_DEBUG("free fb, offset = %lu\n", fb.free); 132 DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
133 133
134 return retval; 134 return retval;
135} 135}
@@ -176,7 +176,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
176{ 176{
177 DRM_DEVICE; 177 DRM_DEVICE;
178 drm_sis_private_t *dev_priv = dev->dev_private; 178 drm_sis_private_t *dev_priv = dev->dev_private;
179 drm_sis_mem_t __user *argp = (void __user *)data; 179 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
180 drm_sis_mem_t fb; 180 drm_sis_mem_t fb;
181 PMemBlock block; 181 PMemBlock block;
182 int retval = 0; 182 int retval = 0;
@@ -267,7 +267,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
267{ 267{
268 DRM_DEVICE; 268 DRM_DEVICE;
269 drm_sis_private_t *dev_priv = dev->dev_private; 269 drm_sis_private_t *dev_priv = dev->dev_private;
270 drm_sis_mem_t __user *argp = (void __user *)data; 270 drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
271 drm_sis_mem_t agp; 271 drm_sis_mem_t agp;
272 PMemBlock block; 272 PMemBlock block;
273 int retval = 0; 273 int retval = 0;
@@ -367,7 +367,7 @@ int sis_final_context(struct drm_device *dev, int context)
367 367
368 if (i < MAX_CONTEXT) { 368 if (i < MAX_CONTEXT) {
369 set_t *set; 369 set_t *set;
370 unsigned int item; 370 ITEM_TYPE item;
371 int retval; 371 int retval;
372 372
373 DRM_DEBUG("find socket %d, context = %d\n", i, context); 373 DRM_DEBUG("find socket %d, context = %d\n", i, context);
@@ -376,7 +376,7 @@ int sis_final_context(struct drm_device *dev, int context)
376 set = global_ppriv[i].sets[0]; 376 set = global_ppriv[i].sets[0];
377 retval = setFirst(set, &item); 377 retval = setFirst(set, &item);
378 while (retval) { 378 while (retval) {
379 DRM_DEBUG("free video memory 0x%x\n", item); 379 DRM_DEBUG("free video memory 0x%lx\n", item);
380#if defined(__linux__) && defined(CONFIG_FB_SIS) 380#if defined(__linux__) && defined(CONFIG_FB_SIS)
381 sis_free(item); 381 sis_free(item);
382#else 382#else
@@ -390,7 +390,7 @@ int sis_final_context(struct drm_device *dev, int context)
390 set = global_ppriv[i].sets[1]; 390 set = global_ppriv[i].sets[1];
391 retval = setFirst(set, &item); 391 retval = setFirst(set, &item);
392 while (retval) { 392 while (retval) {
393 DRM_DEBUG("free agp memory 0x%x\n", item); 393 DRM_DEBUG("free agp memory 0x%lx\n", item);
394 mmFreeMem((PMemBlock) item); 394 mmFreeMem((PMemBlock) item);
395 retval = setNext(set, &item); 395 retval = setNext(set, &item);
396 } 396 }
@@ -403,12 +403,12 @@ int sis_final_context(struct drm_device *dev, int context)
403} 403}
404 404
405drm_ioctl_desc_t sis_ioctls[] = { 405drm_ioctl_desc_t sis_ioctls[] = {
406 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0}, 406 [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
407 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0}, 407 [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
408 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1}, 408 [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
409 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0}, 409 [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
410 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0}, 410 [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
411 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1} 411 [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
412}; 412};
413 413
414int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 414int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/char/drm/tdfx_drv.c b/drivers/char/drm/tdfx_drv.c
index c275cbb6e9ce..baa4416032a8 100644
--- a/drivers/char/drm/tdfx_drv.c
+++ b/drivers/char/drm/tdfx_drv.c
@@ -36,31 +36,6 @@
36 36
37#include "drm_pciids.h" 37#include "drm_pciids.h"
38 38
39static int postinit(struct drm_device *dev, unsigned long flags)
40{
41 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
42 DRIVER_NAME,
43 DRIVER_MAJOR,
44 DRIVER_MINOR,
45 DRIVER_PATCHLEVEL,
46 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
47 );
48 return 0;
49}
50
51static int version(drm_version_t * version)
52{
53 int len;
54
55 version->version_major = DRIVER_MAJOR;
56 version->version_minor = DRIVER_MINOR;
57 version->version_patchlevel = DRIVER_PATCHLEVEL;
58 DRM_COPY(version->name, DRIVER_NAME);
59 DRM_COPY(version->date, DRIVER_DATE);
60 DRM_COPY(version->desc, DRIVER_DESC);
61 return 0;
62}
63
64static struct pci_device_id pciidlist[] = { 39static struct pci_device_id pciidlist[] = {
65 tdfx_PCI_IDS 40 tdfx_PCI_IDS
66}; 41};
@@ -70,8 +45,6 @@ static struct drm_driver driver = {
70 .reclaim_buffers = drm_core_reclaim_buffers, 45 .reclaim_buffers = drm_core_reclaim_buffers,
71 .get_map_ofs = drm_core_get_map_ofs, 46 .get_map_ofs = drm_core_get_map_ofs,
72 .get_reg_ofs = drm_core_get_reg_ofs, 47 .get_reg_ofs = drm_core_get_reg_ofs,
73 .postinit = postinit,
74 .version = version,
75 .fops = { 48 .fops = {
76 .owner = THIS_MODULE, 49 .owner = THIS_MODULE,
77 .open = drm_open, 50 .open = drm_open,
@@ -80,11 +53,18 @@ static struct drm_driver driver = {
80 .mmap = drm_mmap, 53 .mmap = drm_mmap,
81 .poll = drm_poll, 54 .poll = drm_poll,
82 .fasync = drm_fasync, 55 .fasync = drm_fasync,
83 }, 56 },
84 .pci_driver = { 57 .pci_driver = {
85 .name = DRIVER_NAME, 58 .name = DRIVER_NAME,
86 .id_table = pciidlist, 59 .id_table = pciidlist,
87 } 60 },
61
62 .name = DRIVER_NAME,
63 .desc = DRIVER_DESC,
64 .date = DRIVER_DATE,
65 .major = DRIVER_MAJOR,
66 .minor = DRIVER_MINOR,
67 .patchlevel = DRIVER_PATCHLEVEL,
88}; 68};
89 69
90static int __init tdfx_init(void) 70static int __init tdfx_init(void)
diff --git a/drivers/char/drm/tdfx_drv.h b/drivers/char/drm/tdfx_drv.h
index a582a3db4c75..84204ec1b046 100644
--- a/drivers/char/drm/tdfx_drv.h
+++ b/drivers/char/drm/tdfx_drv.h
@@ -1,6 +1,7 @@
1/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*- 1/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
2 * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com 2 * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
3 * 3 */
4/*
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved. 6 * All Rights Reserved.
6 * 7 *
@@ -30,10 +31,6 @@
30#ifndef __TDFX_H__ 31#ifndef __TDFX_H__
31#define __TDFX_H__ 32#define __TDFX_H__
32 33
33/* This remains constant for all DRM template files.
34 */
35#define DRM(x) tdfx_##x
36
37/* General customization: 34/* General customization:
38 */ 35 */
39 36
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index d4b1766608b0..593c0b8f650a 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -213,7 +213,9 @@ static int via_initialize(drm_device_t * dev,
213 dev_priv->dma_wrap = init->size; 213 dev_priv->dma_wrap = init->size;
214 dev_priv->dma_offset = init->offset; 214 dev_priv->dma_offset = init->offset;
215 dev_priv->last_pause_ptr = NULL; 215 dev_priv->last_pause_ptr = NULL;
216 dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr; 216 dev_priv->hw_addr_ptr =
217 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
218 init->reg_pause_addr);
217 219
218 via_cmdbuf_start(dev_priv); 220 via_cmdbuf_start(dev_priv);
219 221
@@ -232,13 +234,13 @@ int via_dma_init(DRM_IOCTL_ARGS)
232 234
233 switch (init.func) { 235 switch (init.func) {
234 case VIA_INIT_DMA: 236 case VIA_INIT_DMA:
235 if (!capable(CAP_SYS_ADMIN)) 237 if (!DRM_SUSER(DRM_CURPROC))
236 retcode = DRM_ERR(EPERM); 238 retcode = DRM_ERR(EPERM);
237 else 239 else
238 retcode = via_initialize(dev, dev_priv, &init); 240 retcode = via_initialize(dev, dev_priv, &init);
239 break; 241 break;
240 case VIA_CLEANUP_DMA: 242 case VIA_CLEANUP_DMA:
241 if (!capable(CAP_SYS_ADMIN)) 243 if (!DRM_SUSER(DRM_CURPROC))
242 retcode = DRM_ERR(EPERM); 244 retcode = DRM_ERR(EPERM);
243 else 245 else
244 retcode = via_dma_cleanup(dev); 246 retcode = via_dma_cleanup(dev);
@@ -349,9 +351,6 @@ int via_cmdbuffer(DRM_IOCTL_ARGS)
349 return 0; 351 return 0;
350} 352}
351 353
352extern int
353via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
354 unsigned int size);
355static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, 354static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
356 drm_via_cmdbuffer_t * cmd) 355 drm_via_cmdbuffer_t * cmd)
357{ 356{
@@ -450,9 +449,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
450 if ((count <= 8) && (count >= 0)) { 449 if ((count <= 8) && (count >= 0)) {
451 uint32_t rgtr, ptr; 450 uint32_t rgtr, ptr;
452 rgtr = *(dev_priv->hw_addr_ptr); 451 rgtr = *(dev_priv->hw_addr_ptr);
453 ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + 452 ptr = ((volatile char *)dev_priv->last_pause_ptr -
454 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 - 453 dev_priv->dma_ptr) + dev_priv->dma_offset +
455 CMDBUF_ALIGNMENT_SIZE; 454 (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
456 if (rgtr <= ptr) { 455 if (rgtr <= ptr) {
457 DRM_ERROR 456 DRM_ERROR
458 ("Command regulator\npaused at count %d, address %x, " 457 ("Command regulator\npaused at count %d, address %x, "
@@ -472,7 +471,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
472 && count--) ; 471 && count--) ;
473 472
474 rgtr = *(dev_priv->hw_addr_ptr); 473 rgtr = *(dev_priv->hw_addr_ptr);
475 ptr = ((char *)paused_at - dev_priv->dma_ptr) + 474 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
476 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 475 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
477 476
478 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ? 477 ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
@@ -724,3 +723,22 @@ int via_cmdbuf_size(DRM_IOCTL_ARGS)
724 sizeof(d_siz)); 723 sizeof(d_siz));
725 return ret; 724 return ret;
726} 725}
726
727drm_ioctl_desc_t via_ioctls[] = {
728 [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
729 [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
730 [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
731 [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
732 [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
733 [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
734 [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
735 [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
736 [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
737 [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
738 [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
739 [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
740 [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
741 [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
742};
743
744int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
new file mode 100644
index 000000000000..9d5e027dae0e
--- /dev/null
+++ b/drivers/char/drm/via_dmablit.c
@@ -0,0 +1,805 @@
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Thomas Hellstrom.
26 * Partially based on code obtained from Digeo Inc.
27 */
28
29
30/*
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
35 */
36
37#include "drmP.h"
38#include "via_drm.h"
39#include "via_drv.h"
40#include "via_dmablit.h"
41
42#include <linux/pagemap.h>
43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47
48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr;
50 uint32_t dev_addr;
51 uint32_t size;
52 uint32_t next;
53} drm_via_descriptor_t;
54
55
56/*
57 * Unmap a DMA mapping.
58 */
59
60
61
62static void
63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
64{
65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 descriptor_this_page;
70 dma_addr_t next = vsg->chain_start;
71
72 while(num_desc--) {
73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 descriptor_this_page;
78 }
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
81 next = (dma_addr_t) desc_ptr->next;
82 desc_ptr--;
83 }
84}
85
86/*
87 * If mode = 0, count how many descriptors are needed.
88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
90 * 'next' field without syncing calls when the descriptor is already mapped.
91 */
92
93static void
94via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg,
97 int mode)
98{
99 unsigned cur_descriptor_page = 0;
100 unsigned num_descriptors_this_page = 0;
101 unsigned char *mem_addr = xfer->mem_addr;
102 unsigned char *cur_mem;
103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
104 uint32_t fb_addr = xfer->fb_addr;
105 uint32_t cur_fb;
106 unsigned long line_len;
107 unsigned remaining_len;
108 int num_desc = 0;
109 int cur_line;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = 0;
112
113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
117
118 line_len = xfer->line_length;
119 cur_fb = fb_addr;
120 cur_mem = mem_addr;
121
122 while (line_len > 0) {
123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len;
126
127 if (mode == 1) {
128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb;
135
136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE);
140 desc_ptr++;
141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
142 num_descriptors_this_page = 0;
143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 }
145 }
146
147 num_desc++;
148 cur_mem += remaining_len;
149 cur_fb += remaining_len;
150 }
151
152 mem_addr += xfer->mem_stride;
153 fb_addr += xfer->fb_stride;
154 }
155
156 if (mode == 1) {
157 vsg->chain_start = next;
158 vsg->state = dr_via_device_mapped;
159 }
160 vsg->num_desc = num_desc;
161}
162
163/*
164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only be partially built as long as the status enum is consistent
166 * with the actual status of the used resources.
167 */
168
169
170void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{
173 struct page *page;
174 int i;
175
176 switch(vsg->state) {
177 case dr_via_device_mapped:
178 via_unmap_blit_from_device(pdev, vsg);
179 case dr_via_desc_pages_alloc:
180 for (i=0; i<vsg->num_desc_pages; ++i) {
181 if (vsg->desc_pages[i] != NULL)
182 free_page((unsigned long)vsg->desc_pages[i]);
183 }
184 kfree(vsg->desc_pages);
185 case dr_via_pages_locked:
186 for (i=0; i<vsg->num_pages; ++i) {
187 if ( NULL != (page = vsg->pages[i])) {
188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 SetPageDirty(page);
190 page_cache_release(page);
191 }
192 }
193 case dr_via_pages_alloc:
194 vfree(vsg->pages);
195 default:
196 vsg->state = dr_via_sg_init;
197 }
198 if (vsg->bounce_buffer) {
199 vfree(vsg->bounce_buffer);
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0;
203}
204
205/*
206 * Fire a blit engine.
207 */
208
209static void
210via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
211{
212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213
214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_DMA_CSR_DE);
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
222}
223
224/*
225 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
226 * occur here if the calling user does not have access to the submitted address.
227 */
228
229static int
230via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
231{
232 int ret;
233 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
234 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
235 first_pfn + 1;
236
237 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
238 return DRM_ERR(ENOMEM);
239 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
240 down_read(&current->mm->mmap_sem);
241 ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
242 vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);
243
244 up_read(&current->mm->mmap_sem);
245 if (ret != vsg->num_pages) {
246 if (ret < 0)
247 return ret;
248 vsg->state = dr_via_pages_locked;
249 return DRM_ERR(EINVAL);
250 }
251 vsg->state = dr_via_pages_locked;
252 DRM_DEBUG("DMA pages locked\n");
253 return 0;
254}
255
256/*
257 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
258 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
259 * quite large for some blits, and pages don't need to be contingous.
260 */
261
262static int
263via_alloc_desc_pages(drm_via_sg_info_t *vsg)
264{
265 int i;
266
267 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
268 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
269 vsg->descriptors_per_page;
270
271 if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
272 return DRM_ERR(ENOMEM);
273
274 memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
275 vsg->state = dr_via_desc_pages_alloc;
276 for (i=0; i<vsg->num_desc_pages; ++i) {
277 if (NULL == (vsg->desc_pages[i] =
278 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
279 return DRM_ERR(ENOMEM);
280 }
281 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
282 vsg->num_desc);
283 return 0;
284}
285
286static void
287via_abort_dmablit(drm_device_t *dev, int engine)
288{
289 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
290
291 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
292}
293
294static void
295via_dmablit_engine_off(drm_device_t *dev, int engine)
296{
297 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
298
299 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
300}
301
302
303
304/*
305 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
306 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
307 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
308 * the workqueue task takes care of processing associated with the old blit.
309 */
310
311void
312via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
313{
314 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
315 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
316 int cur;
317 int done_transfer;
318 unsigned long irqsave=0;
319 uint32_t status = 0;
320
321 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
322 engine, from_irq, (unsigned long) blitq);
323
324 if (from_irq) {
325 spin_lock(&blitq->blit_lock);
326 } else {
327 spin_lock_irqsave(&blitq->blit_lock, irqsave);
328 }
329
330 done_transfer = blitq->is_active &&
331 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
332 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
333
334 cur = blitq->cur;
335 if (done_transfer) {
336
337 blitq->blits[cur]->aborted = blitq->aborting;
338 blitq->done_blit_handle++;
339 DRM_WAKEUP(blitq->blit_queue + cur);
340
341 cur++;
342 if (cur >= VIA_NUM_BLIT_SLOTS)
343 cur = 0;
344 blitq->cur = cur;
345
346 /*
347 * Clear transfer done flag.
348 */
349
350 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
351
352 blitq->is_active = 0;
353 blitq->aborting = 0;
354 schedule_work(&blitq->wq);
355
356 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
357
358 /*
359 * Abort transfer after one second.
360 */
361
362 via_abort_dmablit(dev, engine);
363 blitq->aborting = 1;
364 blitq->end = jiffies + DRM_HZ;
365 }
366
367 if (!blitq->is_active) {
368 if (blitq->num_outstanding) {
369 via_fire_dmablit(dev, blitq->blits[cur], engine);
370 blitq->is_active = 1;
371 blitq->cur = cur;
372 blitq->num_outstanding--;
373 blitq->end = jiffies + DRM_HZ;
374 if (!timer_pending(&blitq->poll_timer)) {
375 blitq->poll_timer.expires = jiffies+1;
376 add_timer(&blitq->poll_timer);
377 }
378 } else {
379 if (timer_pending(&blitq->poll_timer)) {
380 del_timer(&blitq->poll_timer);
381 }
382 via_dmablit_engine_off(dev, engine);
383 }
384 }
385
386 if (from_irq) {
387 spin_unlock(&blitq->blit_lock);
388 } else {
389 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
390 }
391}
392
393
394
395/*
396 * Check whether this blit is still active, performing necessary locking.
397 */
398
399static int
400via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
401{
402 unsigned long irqsave;
403 uint32_t slot;
404 int active;
405
406 spin_lock_irqsave(&blitq->blit_lock, irqsave);
407
408 /*
409 * Allow for handle wraparounds.
410 */
411
412 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
413 ((blitq->cur_blit_handle - handle) <= (1 << 23));
414
415 if (queue && active) {
416 slot = handle - blitq->done_blit_handle + blitq->cur -1;
417 if (slot >= VIA_NUM_BLIT_SLOTS) {
418 slot -= VIA_NUM_BLIT_SLOTS;
419 }
420 *queue = blitq->blit_queue + slot;
421 }
422
423 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
424
425 return active;
426}
427
428/*
429 * Sync. Wait for at least three seconds for the blit to be performed.
430 */
431
432static int
433via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
434{
435
436 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
437 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
438 wait_queue_head_t *queue;
439 int ret = 0;
440
441 if (via_dmablit_active(blitq, engine, handle, &queue)) {
442 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
443 !via_dmablit_active(blitq, engine, handle, NULL));
444 }
445 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
446 handle, engine, ret);
447
448 return ret;
449}
450
451
452/*
453 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
454 * a) Broken hardware (typically those that don't have any video capture facility).
455 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
456 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
457 * irqs, it will shorten the latency somewhat.
458 */
459
460
461
462static void
463via_dmablit_timer(unsigned long data)
464{
465 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
466 drm_device_t *dev = blitq->dev;
467 int engine = (int)
468 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
469
470 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
471 (unsigned long) jiffies);
472
473 via_dmablit_handler(dev, engine, 0);
474
475 if (!timer_pending(&blitq->poll_timer)) {
476 blitq->poll_timer.expires = jiffies+1;
477 add_timer(&blitq->poll_timer);
478 }
479 via_dmablit_handler(dev, engine, 0);
480
481}
482
483
484
485
486/*
487 * Workqueue task that frees data and mappings associated with a blit.
488 * Also wakes up waiting processes. Each of these tasks handles one
489 * blit engine only and may not be called on each interrupt.
490 */
491
492
493static void
494via_dmablit_workqueue(void *data)
495{
496 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
497 drm_device_t *dev = blitq->dev;
498 unsigned long irqsave;
499 drm_via_sg_info_t *cur_sg;
500 int cur_released;
501
502
503 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
504 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
505
506 spin_lock_irqsave(&blitq->blit_lock, irqsave);
507
508 while(blitq->serviced != blitq->cur) {
509
510 cur_released = blitq->serviced++;
511
512 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
513
514 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
515 blitq->serviced = 0;
516
517 cur_sg = blitq->blits[cur_released];
518 blitq->num_free++;
519
520 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
521
522 DRM_WAKEUP(&blitq->busy_queue);
523
524 via_free_sg_info(dev->pdev, cur_sg);
525 kfree(cur_sg);
526
527 spin_lock_irqsave(&blitq->blit_lock, irqsave);
528 }
529
530 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
531}
532
533
534/*
535 * Init all blit engines. Currently we use two, but some hardware have 4.
536 */
537
538
539void
540via_init_dmablit(drm_device_t *dev)
541{
542 int i,j;
543 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
544 drm_via_blitq_t *blitq;
545
546 pci_set_master(dev->pdev);
547
548 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
549 blitq = dev_priv->blit_queues + i;
550 blitq->dev = dev;
551 blitq->cur_blit_handle = 0;
552 blitq->done_blit_handle = 0;
553 blitq->head = 0;
554 blitq->cur = 0;
555 blitq->serviced = 0;
556 blitq->num_free = VIA_NUM_BLIT_SLOTS;
557 blitq->num_outstanding = 0;
558 blitq->is_active = 0;
559 blitq->aborting = 0;
560 blitq->blit_lock = SPIN_LOCK_UNLOCKED;
561 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
562 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
563 }
564 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
565 INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
566 init_timer(&blitq->poll_timer);
567 blitq->poll_timer.function = &via_dmablit_timer;
568 blitq->poll_timer.data = (unsigned long) blitq;
569 }
570}
571
572/*
573 * Build all info and do all mappings required for a blit.
574 */
575
576
577static int
578via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
579{
580 int draw = xfer->to_fb;
581 int ret = 0;
582
583 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
584 vsg->bounce_buffer = 0;
585
586 vsg->state = dr_via_sg_init;
587
588 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
589 DRM_ERROR("Zero size bitblt.\n");
590 return DRM_ERR(EINVAL);
591 }
592
593 /*
594 * Below check is a driver limitation, not a hardware one. We
595 * don't want to lock unused pages, and don't want to incoporate the
596 * extra logic of avoiding them. Make sure there are no.
597 * (Not a big limitation anyway.)
598 */
599
600 if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
601 (xfer->mem_stride > 2048*4)) {
602 DRM_ERROR("Too large system memory stride. Stride: %d, "
603 "Length: %d\n", xfer->mem_stride, xfer->line_length);
604 return DRM_ERR(EINVAL);
605 }
606
607 if (xfer->num_lines > 2048) {
608 DRM_ERROR("Too many PCI DMA bitblt lines.\n");
609 return DRM_ERR(EINVAL);
610 }
611
612 /*
613 * we allow a negative fb stride to allow flipping of images in
614 * transfer.
615 */
616
617 if (xfer->mem_stride < xfer->line_length ||
618 abs(xfer->fb_stride) < xfer->line_length) {
619 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
620 return DRM_ERR(EINVAL);
621 }
622
623 /*
624 * A hardware bug seems to be worked around if system memory addresses start on
625 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
626 * about this. Meanwhile, impose the following restrictions:
627 */
628
629#ifdef VIA_BUGFREE
630 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
631 ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
632 DRM_ERROR("Invalid DRM bitblt alignment.\n");
633 return DRM_ERR(EINVAL);
634 }
635#else
636 if ((((unsigned long)xfer->mem_addr & 15) ||
637 ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||
638 (xfer->fb_stride & 3)) {
639 DRM_ERROR("Invalid DRM bitblt alignment.\n");
640 return DRM_ERR(EINVAL);
641 }
642#endif
643
644 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
645 DRM_ERROR("Could not lock DMA pages.\n");
646 via_free_sg_info(dev->pdev, vsg);
647 return ret;
648 }
649
650 via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
651 if (0 != (ret = via_alloc_desc_pages(vsg))) {
652 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
653 via_free_sg_info(dev->pdev, vsg);
654 return ret;
655 }
656 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
657
658 return 0;
659}
660
661
662/*
663 * Reserve one free slot in the blit queue. Will wait for one second for one
664 * to become available. Otherwise -EBUSY is returned.
665 */
666
667static int
668via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
669{
670 int ret=0;
671 unsigned long irqsave;
672
673 DRM_DEBUG("Num free is %d\n", blitq->num_free);
674 spin_lock_irqsave(&blitq->blit_lock, irqsave);
675 while(blitq->num_free == 0) {
676 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
677
678 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
679 if (ret) {
680 return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
681 }
682
683 spin_lock_irqsave(&blitq->blit_lock, irqsave);
684 }
685
686 blitq->num_free--;
687 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
688
689 return 0;
690}
691
692/*
693 * Hand back a free slot if we changed our mind.
694 */
695
696static void
697via_dmablit_release_slot(drm_via_blitq_t *blitq)
698{
699 unsigned long irqsave;
700
701 spin_lock_irqsave(&blitq->blit_lock, irqsave);
702 blitq->num_free++;
703 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
704 DRM_WAKEUP( &blitq->busy_queue );
705}
706
707/*
708 * Grab a free slot. Build blit info and queue a blit.
709 */
710
711
712static int
713via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
714{
715 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
716 drm_via_sg_info_t *vsg;
717 drm_via_blitq_t *blitq;
718 int ret;
719 int engine;
720 unsigned long irqsave;
721
722 if (dev_priv == NULL) {
723 DRM_ERROR("Called without initialization.\n");
724 return DRM_ERR(EINVAL);
725 }
726
727 engine = (xfer->to_fb) ? 0 : 1;
728 blitq = dev_priv->blit_queues + engine;
729 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
730 return ret;
731 }
732 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
733 via_dmablit_release_slot(blitq);
734 return DRM_ERR(ENOMEM);
735 }
736 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
737 via_dmablit_release_slot(blitq);
738 kfree(vsg);
739 return ret;
740 }
741 spin_lock_irqsave(&blitq->blit_lock, irqsave);
742
743 blitq->blits[blitq->head++] = vsg;
744 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
745 blitq->head = 0;
746 blitq->num_outstanding++;
747 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
748
749 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
750 xfer->sync.engine = engine;
751
752 via_dmablit_handler(dev, engine, 0);
753
754 return 0;
755}
756
757/*
758 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
759 * that there is a very big proability that this IOCTL will be interrupted by a signal. In that
760 * case it returns with -EAGAIN for the signal to be delivered.
761 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
762 */
763
764int
765via_dma_blit_sync( DRM_IOCTL_ARGS )
766{
767 drm_via_blitsync_t sync;
768 int err;
769 DRM_DEVICE;
770
771 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
772
773 if (sync.engine >= VIA_NUM_BLIT_ENGINES)
774 return DRM_ERR(EINVAL);
775
776 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
777
778 if (DRM_ERR(EINTR) == err)
779 err = DRM_ERR(EAGAIN);
780
781 return err;
782}
783
784
785/*
786 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
787 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
788 * be reissued. See the above IOCTL code.
789 */
790
791int
792via_dma_blit( DRM_IOCTL_ARGS )
793{
794 drm_via_dmablit_t xfer;
795 int err;
796 DRM_DEVICE;
797
798 DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
799
800 err = via_dmablit(dev, &xfer);
801
802 DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
803
804 return err;
805}
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
new file mode 100644
index 000000000000..f4036cd5d0e0
--- /dev/null
+++ b/drivers/char/drm/via_dmablit.h
@@ -0,0 +1,140 @@
1/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright 2005 Thomas Hellstrom.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Thomas Hellstrom.
27 * Register info from Digeo Inc.
28 */
29
30#ifndef _VIA_DMABLIT_H
31#define _VIA_DMABLIT_H
32
33#include <linux/dma-mapping.h>
34
35#define VIA_NUM_BLIT_ENGINES 2
36#define VIA_NUM_BLIT_SLOTS 8
37
38struct _drm_via_descriptor;
39
40typedef struct _drm_via_sg_info {
41 struct page **pages;
42 unsigned long num_pages;
43 struct _drm_via_descriptor **desc_pages;
44 int num_desc_pages;
45 int num_desc;
46 enum dma_data_direction direction;
47 unsigned char *bounce_buffer;
48 dma_addr_t chain_start;
49 uint32_t free_on_sequence;
50 unsigned int descriptors_per_page;
51 int aborted;
52 enum {
53 dr_via_device_mapped,
54 dr_via_desc_pages_alloc,
55 dr_via_pages_locked,
56 dr_via_pages_alloc,
57 dr_via_sg_init
58 } state;
59} drm_via_sg_info_t;
60
61typedef struct _drm_via_blitq {
62 drm_device_t *dev;
63 uint32_t cur_blit_handle;
64 uint32_t done_blit_handle;
65 unsigned serviced;
66 unsigned head;
67 unsigned cur;
68 unsigned num_free;
69 unsigned num_outstanding;
70 unsigned long end;
71 int aborting;
72 int is_active;
73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
74 spinlock_t blit_lock;
75 wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
76 wait_queue_head_t busy_queue;
77 struct work_struct wq;
78 struct timer_list poll_timer;
79} drm_via_blitq_t;
80
81
82/*
83 * PCI DMA Registers
84 * Channels 2 & 3 don't seem to be implemented in hardware.
85 */
86
87#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
88#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
89#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
90#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
91
92#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
93#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
94#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
95#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
96
97#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
98#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
99#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
100#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
101
102#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
103#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
104#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
105#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
106
107#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
108#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
109#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
110#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
111
112#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
113#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
114#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
115#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
116
117#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
118
119/* Define for DMA engine */
120/* DPR */
121#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
122#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
123#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
124
125/* MR */
126#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
127#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
128#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
129
130/* CSR */
131#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
132#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
133#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
134#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
135#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
136#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
137
138
139
140#endif
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index ebde9206115e..47f0b5b26379 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -75,6 +75,8 @@
75#define DRM_VIA_CMDBUF_SIZE 0x0b 75#define DRM_VIA_CMDBUF_SIZE 0x0b
76#define NOT_USED 76#define NOT_USED
77#define DRM_VIA_WAIT_IRQ 0x0d 77#define DRM_VIA_WAIT_IRQ 0x0d
78#define DRM_VIA_DMA_BLIT 0x0e
79#define DRM_VIA_BLIT_SYNC 0x0f
78 80
79#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) 81#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
80#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t) 82#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
@@ -89,6 +91,8 @@
89#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \ 91#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
90 drm_via_cmdbuf_size_t) 92 drm_via_cmdbuf_size_t)
91#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t) 93#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
94#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
95#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
92 96
93/* Indices into buf.Setup where various bits of state are mirrored per 97/* Indices into buf.Setup where various bits of state are mirrored per
94 * context and per buffer. These can be fired at the card as a unit, 98 * context and per buffer. These can be fired at the card as a unit,
@@ -103,8 +107,12 @@
103#define VIA_BACK 0x2 107#define VIA_BACK 0x2
104#define VIA_DEPTH 0x4 108#define VIA_DEPTH 0x4
105#define VIA_STENCIL 0x8 109#define VIA_STENCIL 0x8
106#define VIDEO 0 110#define VIA_MEM_VIDEO 0 /* matches drm constant */
107#define AGP 1 111#define VIA_MEM_AGP 1 /* matches drm constant */
112#define VIA_MEM_SYSTEM 2
113#define VIA_MEM_MIXED 3
114#define VIA_MEM_UNKNOWN 4
115
108typedef struct { 116typedef struct {
109 uint32_t offset; 117 uint32_t offset;
110 uint32_t size; 118 uint32_t size;
@@ -192,6 +200,9 @@ typedef struct _drm_via_sarea {
192 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; 200 unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
193 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */ 201 unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
194 202
203 /* Used by the 3d driver only at this point, for pageflipping:
204 */
205 unsigned int pfCurrentOffset;
195} drm_via_sarea_t; 206} drm_via_sarea_t;
196 207
197typedef struct _drm_via_cmdbuf_size { 208typedef struct _drm_via_cmdbuf_size {
@@ -212,6 +223,16 @@ typedef enum {
212 223
213#define VIA_IRQ_FLAGS_MASK 0xF0000000 224#define VIA_IRQ_FLAGS_MASK 0xF0000000
214 225
226enum drm_via_irqs {
227 drm_via_irq_hqv0 = 0,
228 drm_via_irq_hqv1,
229 drm_via_irq_dma0_dd,
230 drm_via_irq_dma0_td,
231 drm_via_irq_dma1_dd,
232 drm_via_irq_dma1_td,
233 drm_via_irq_num
234};
235
215struct drm_via_wait_irq_request { 236struct drm_via_wait_irq_request {
216 unsigned irq; 237 unsigned irq;
217 via_irq_seq_type_t type; 238 via_irq_seq_type_t type;
@@ -224,20 +245,25 @@ typedef union drm_via_irqwait {
224 struct drm_wait_vblank_reply reply; 245 struct drm_wait_vblank_reply reply;
225} drm_via_irqwait_t; 246} drm_via_irqwait_t;
226 247
227#ifdef __KERNEL__ 248typedef struct drm_via_blitsync {
228 249 uint32_t sync_handle;
229int via_fb_init(DRM_IOCTL_ARGS); 250 unsigned engine;
230int via_mem_alloc(DRM_IOCTL_ARGS); 251} drm_via_blitsync_t;
231int via_mem_free(DRM_IOCTL_ARGS); 252
232int via_agp_init(DRM_IOCTL_ARGS); 253typedef struct drm_via_dmablit {
233int via_map_init(DRM_IOCTL_ARGS); 254 uint32_t num_lines;
234int via_decoder_futex(DRM_IOCTL_ARGS); 255 uint32_t line_length;
235int via_dma_init(DRM_IOCTL_ARGS); 256
236int via_cmdbuffer(DRM_IOCTL_ARGS); 257 uint32_t fb_addr;
237int via_flush_ioctl(DRM_IOCTL_ARGS); 258 uint32_t fb_stride;
238int via_pci_cmdbuffer(DRM_IOCTL_ARGS); 259
239int via_cmdbuf_size(DRM_IOCTL_ARGS); 260 unsigned char *mem_addr;
240int via_wait_irq(DRM_IOCTL_ARGS); 261 uint32_t mem_stride;
262
263 int bounce_buffer;
264 int to_fb;
265
266 drm_via_blitsync_t sync;
267} drm_via_dmablit_t;
241 268
242#endif
243#endif /* _VIA_DRM_H_ */ 269#endif /* _VIA_DRM_H_ */
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 016665e0c69f..3f012255d315 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -29,54 +29,21 @@
29 29
30#include "drm_pciids.h" 30#include "drm_pciids.h"
31 31
32static int postinit(struct drm_device *dev, unsigned long flags) 32static int dri_library_name(struct drm_device *dev, char *buf)
33{ 33{
34 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", 34 return snprintf(buf, PAGE_SIZE, "unichrome");
35 DRIVER_NAME,
36 DRIVER_MAJOR,
37 DRIVER_MINOR,
38 DRIVER_PATCHLEVEL,
39 DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
40 );
41 return 0;
42}
43
44static int version(drm_version_t * version)
45{
46 int len;
47
48 version->version_major = DRIVER_MAJOR;
49 version->version_minor = DRIVER_MINOR;
50 version->version_patchlevel = DRIVER_PATCHLEVEL;
51 DRM_COPY(version->name, DRIVER_NAME);
52 DRM_COPY(version->date, DRIVER_DATE);
53 DRM_COPY(version->desc, DRIVER_DESC);
54 return 0;
55} 35}
56 36
57static struct pci_device_id pciidlist[] = { 37static struct pci_device_id pciidlist[] = {
58 viadrv_PCI_IDS 38 viadrv_PCI_IDS
59}; 39};
60 40
61static drm_ioctl_desc_t ioctls[] = {
62 [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
63 [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
64 [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
65 [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
66 [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
67 [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
68 [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
69 [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
70 [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
71 [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
72 [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
73 [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
74};
75
76static struct drm_driver driver = { 41static struct drm_driver driver = {
77 .driver_features = 42 .driver_features =
78 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 43 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
79 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 44 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
45 .load = via_driver_load,
46 .unload = via_driver_unload,
80 .context_ctor = via_init_context, 47 .context_ctor = via_init_context,
81 .context_dtor = via_final_context, 48 .context_dtor = via_final_context,
82 .vblank_wait = via_driver_vblank_wait, 49 .vblank_wait = via_driver_vblank_wait,
@@ -85,13 +52,11 @@ static struct drm_driver driver = {
85 .irq_uninstall = via_driver_irq_uninstall, 52 .irq_uninstall = via_driver_irq_uninstall,
86 .irq_handler = via_driver_irq_handler, 53 .irq_handler = via_driver_irq_handler,
87 .dma_quiescent = via_driver_dma_quiescent, 54 .dma_quiescent = via_driver_dma_quiescent,
55 .dri_library_name = dri_library_name,
88 .reclaim_buffers = drm_core_reclaim_buffers, 56 .reclaim_buffers = drm_core_reclaim_buffers,
89 .get_map_ofs = drm_core_get_map_ofs, 57 .get_map_ofs = drm_core_get_map_ofs,
90 .get_reg_ofs = drm_core_get_reg_ofs, 58 .get_reg_ofs = drm_core_get_reg_ofs,
91 .postinit = postinit, 59 .ioctls = via_ioctls,
92 .version = version,
93 .ioctls = ioctls,
94 .num_ioctls = DRM_ARRAY_SIZE(ioctls),
95 .fops = { 60 .fops = {
96 .owner = THIS_MODULE, 61 .owner = THIS_MODULE,
97 .open = drm_open, 62 .open = drm_open,
@@ -100,15 +65,23 @@ static struct drm_driver driver = {
100 .mmap = drm_mmap, 65 .mmap = drm_mmap,
101 .poll = drm_poll, 66 .poll = drm_poll,
102 .fasync = drm_fasync, 67 .fasync = drm_fasync,
103 }, 68 },
104 .pci_driver = { 69 .pci_driver = {
105 .name = DRIVER_NAME, 70 .name = DRIVER_NAME,
106 .id_table = pciidlist, 71 .id_table = pciidlist,
107 } 72 },
73
74 .name = DRIVER_NAME,
75 .desc = DRIVER_DESC,
76 .date = DRIVER_DATE,
77 .major = DRIVER_MAJOR,
78 .minor = DRIVER_MINOR,
79 .patchlevel = DRIVER_PATCHLEVEL,
108}; 80};
109 81
110static int __init via_init(void) 82static int __init via_init(void)
111{ 83{
84 driver.num_ioctls = via_max_ioctl;
112 via_init_command_verifier(); 85 via_init_command_verifier();
113 return drm_init(&driver); 86 return drm_init(&driver);
114} 87}
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 7d5daf43797e..aad4f99f5405 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -24,24 +24,26 @@
24#ifndef _VIA_DRV_H_ 24#ifndef _VIA_DRV_H_
25#define _VIA_DRV_H_ 25#define _VIA_DRV_H_
26 26
27#define DRIVER_AUTHOR "VIA" 27#define DRIVER_AUTHOR "Various"
28 28
29#define DRIVER_NAME "via" 29#define DRIVER_NAME "via"
30#define DRIVER_DESC "VIA Unichrome / Pro" 30#define DRIVER_DESC "VIA Unichrome / Pro"
31#define DRIVER_DATE "20050523" 31#define DRIVER_DATE "20051116"
32 32
33#define DRIVER_MAJOR 2 33#define DRIVER_MAJOR 2
34#define DRIVER_MINOR 6 34#define DRIVER_MINOR 7
35#define DRIVER_PATCHLEVEL 3 35#define DRIVER_PATCHLEVEL 4
36 36
37#include "via_verifier.h" 37#include "via_verifier.h"
38 38
39#include "via_dmablit.h"
40
39#define VIA_PCI_BUF_SIZE 60000 41#define VIA_PCI_BUF_SIZE 60000
40#define VIA_FIRE_BUF_SIZE 1024 42#define VIA_FIRE_BUF_SIZE 1024
41#define VIA_NUM_IRQS 2 43#define VIA_NUM_IRQS 4
42 44
43typedef struct drm_via_ring_buffer { 45typedef struct drm_via_ring_buffer {
44 drm_map_t map; 46 drm_local_map_t map;
45 char *virtual_start; 47 char *virtual_start;
46} drm_via_ring_buffer_t; 48} drm_via_ring_buffer_t;
47 49
@@ -56,9 +58,9 @@ typedef struct drm_via_irq {
56 58
57typedef struct drm_via_private { 59typedef struct drm_via_private {
58 drm_via_sarea_t *sarea_priv; 60 drm_via_sarea_t *sarea_priv;
59 drm_map_t *sarea; 61 drm_local_map_t *sarea;
60 drm_map_t *fb; 62 drm_local_map_t *fb;
61 drm_map_t *mmio; 63 drm_local_map_t *mmio;
62 unsigned long agpAddr; 64 unsigned long agpAddr;
63 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 65 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
64 char *dma_ptr; 66 char *dma_ptr;
@@ -82,8 +84,15 @@ typedef struct drm_via_private {
82 maskarray_t *irq_masks; 84 maskarray_t *irq_masks;
83 uint32_t irq_enable_mask; 85 uint32_t irq_enable_mask;
84 uint32_t irq_pending_mask; 86 uint32_t irq_pending_mask;
87 int *irq_map;
88 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
85} drm_via_private_t; 89} drm_via_private_t;
86 90
91enum via_family {
92 VIA_OTHER = 0,
93 VIA_PRO_GROUP_A,
94};
95
87/* VIA MMIO register access */ 96/* VIA MMIO register access */
88#define VIA_BASE ((dev_priv->mmio)) 97#define VIA_BASE ((dev_priv->mmio))
89 98
@@ -92,12 +101,31 @@ typedef struct drm_via_private {
92#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) 101#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
93#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) 102#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
94 103
104extern drm_ioctl_desc_t via_ioctls[];
105extern int via_max_ioctl;
106
107extern int via_fb_init(DRM_IOCTL_ARGS);
108extern int via_mem_alloc(DRM_IOCTL_ARGS);
109extern int via_mem_free(DRM_IOCTL_ARGS);
110extern int via_agp_init(DRM_IOCTL_ARGS);
111extern int via_map_init(DRM_IOCTL_ARGS);
112extern int via_decoder_futex(DRM_IOCTL_ARGS);
113extern int via_dma_init(DRM_IOCTL_ARGS);
114extern int via_cmdbuffer(DRM_IOCTL_ARGS);
115extern int via_flush_ioctl(DRM_IOCTL_ARGS);
116extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
117extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
118extern int via_wait_irq(DRM_IOCTL_ARGS);
119extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
120extern int via_dma_blit( DRM_IOCTL_ARGS );
121
122extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
123extern int via_driver_unload(drm_device_t *dev);
124
95extern int via_init_context(drm_device_t * dev, int context); 125extern int via_init_context(drm_device_t * dev, int context);
96extern int via_final_context(drm_device_t * dev, int context); 126extern int via_final_context(drm_device_t * dev, int context);
97 127
98extern int via_do_cleanup_map(drm_device_t * dev); 128extern int via_do_cleanup_map(drm_device_t * dev);
99extern int via_map_init(struct inode *inode, struct file *filp,
100 unsigned int cmd, unsigned long arg);
101extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); 129extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
102 130
103extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 131extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
@@ -111,8 +139,10 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
111extern void via_init_futex(drm_via_private_t * dev_priv); 139extern void via_init_futex(drm_via_private_t * dev_priv);
112extern void via_cleanup_futex(drm_via_private_t * dev_priv); 140extern void via_cleanup_futex(drm_via_private_t * dev_priv);
113extern void via_release_futex(drm_via_private_t * dev_priv, int context); 141extern void via_release_futex(drm_via_private_t * dev_priv, int context);
142extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
143 int force_sequence, unsigned int *sequence);
114 144
115extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf, 145extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
116 unsigned int size); 146extern void via_init_dmablit(drm_device_t *dev);
117 147
118#endif 148#endif
diff --git a/drivers/char/drm/via_ds.c b/drivers/char/drm/via_ds.c
index 5c71e089246c..9429736b3b96 100644
--- a/drivers/char/drm/via_ds.c
+++ b/drivers/char/drm/via_ds.c
@@ -22,14 +22,7 @@
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25#include <linux/module.h> 25#include "drmP.h"
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/poll.h>
31#include <linux/pci.h>
32#include <asm/io.h>
33 26
34#include "via_ds.h" 27#include "via_ds.h"
35extern unsigned int VIA_DEBUG; 28extern unsigned int VIA_DEBUG;
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index d023add1929b..56d7e3daea12 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -50,6 +50,15 @@
50#define VIA_IRQ_HQV1_ENABLE (1 << 25) 50#define VIA_IRQ_HQV1_ENABLE (1 << 25)
51#define VIA_IRQ_HQV0_PENDING (1 << 9) 51#define VIA_IRQ_HQV0_PENDING (1 << 9)
52#define VIA_IRQ_HQV1_PENDING (1 << 10) 52#define VIA_IRQ_HQV1_PENDING (1 << 10)
53#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
54#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
55#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
56#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
57#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
58#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
59#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
60#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
61
53 62
54/* 63/*
55 * Device-specific IRQs go here. This type might need to be extended with 64 * Device-specific IRQs go here. This type might need to be extended with
@@ -61,13 +70,24 @@ static maskarray_t via_pro_group_a_irqs[] = {
61 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
62 0x00000000}, 71 0x00000000},
63 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
64 0x00000000} 73 0x00000000},
74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
65}; 78};
66static int via_num_pro_group_a = 79static int via_num_pro_group_a =
67 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 80 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
81static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
68 82
69static maskarray_t via_unichrome_irqs[] = { }; 83static maskarray_t via_unichrome_irqs[] = {
84 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
85 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
86 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88};
70static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 89static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
90static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
71 91
72static unsigned time_diff(struct timeval *now, struct timeval *then) 92static unsigned time_diff(struct timeval *now, struct timeval *then)
73{ 93{
@@ -113,6 +133,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
113 atomic_inc(&cur_irq->irq_received); 133 atomic_inc(&cur_irq->irq_received);
114 DRM_WAKEUP(&cur_irq->irq_queue); 134 DRM_WAKEUP(&cur_irq->irq_queue);
115 handled = 1; 135 handled = 1;
136 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
137 via_dmablit_handler(dev, 0, 1);
138 } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
139 via_dmablit_handler(dev, 1, 1);
140 }
116 } 141 }
117 cur_irq++; 142 cur_irq++;
118 } 143 }
@@ -165,7 +190,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
165 return ret; 190 return ret;
166} 191}
167 192
168static int 193int
169via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, 194via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
170 unsigned int *sequence) 195 unsigned int *sequence)
171{ 196{
@@ -174,6 +199,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
174 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 199 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
175 int ret = 0; 200 int ret = 0;
176 maskarray_t *masks = dev_priv->irq_masks; 201 maskarray_t *masks = dev_priv->irq_masks;
202 int real_irq;
177 203
178 DRM_DEBUG("%s\n", __FUNCTION__); 204 DRM_DEBUG("%s\n", __FUNCTION__);
179 205
@@ -182,15 +208,23 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
182 return DRM_ERR(EINVAL); 208 return DRM_ERR(EINVAL);
183 } 209 }
184 210
185 if (irq >= dev_priv->num_irqs) { 211 if (irq >= drm_via_irq_num) {
186 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, 212 DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
187 irq); 213 irq);
188 return DRM_ERR(EINVAL); 214 return DRM_ERR(EINVAL);
189 } 215 }
190 216
191 cur_irq += irq; 217 real_irq = dev_priv->irq_map[irq];
218
219 if (real_irq < 0) {
220 DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
221 __FUNCTION__, irq);
222 return DRM_ERR(EINVAL);
223 }
224
225 cur_irq += real_irq;
192 226
193 if (masks[irq][2] && !force_sequence) { 227 if (masks[real_irq][2] && !force_sequence) {
194 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 228 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
195 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 229 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
196 masks[irq][4])); 230 masks[irq][4]));
@@ -226,6 +260,8 @@ void via_driver_irq_preinstall(drm_device_t * dev)
226 via_pro_group_a_irqs : via_unichrome_irqs; 260 via_pro_group_a_irqs : via_unichrome_irqs;
227 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 261 dev_priv->num_irqs = (dev_priv->pro_group_a) ?
228 via_num_pro_group_a : via_num_unichrome; 262 via_num_pro_group_a : via_num_unichrome;
263 dev_priv->irq_map = (dev_priv->pro_group_a) ?
264 via_irqmap_pro_group_a : via_irqmap_unichrome;
229 265
230 for (i = 0; i < dev_priv->num_irqs; ++i) { 266 for (i = 0; i < dev_priv->num_irqs; ++i) {
231 atomic_set(&cur_irq->irq_received, 0); 267 atomic_set(&cur_irq->irq_received, 0);
@@ -241,7 +277,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
241 277
242 dev_priv->last_vblank_valid = 0; 278 dev_priv->last_vblank_valid = 0;
243 279
244 // Clear VSync interrupt regs 280 /* Clear VSync interrupt regs */
245 status = VIA_READ(VIA_REG_INTERRUPT); 281 status = VIA_READ(VIA_REG_INTERRUPT);
246 VIA_WRITE(VIA_REG_INTERRUPT, status & 282 VIA_WRITE(VIA_REG_INTERRUPT, status &
247 ~(dev_priv->irq_enable_mask)); 283 ~(dev_priv->irq_enable_mask));
@@ -291,8 +327,7 @@ void via_driver_irq_uninstall(drm_device_t * dev)
291 327
292int via_wait_irq(DRM_IOCTL_ARGS) 328int via_wait_irq(DRM_IOCTL_ARGS)
293{ 329{
294 drm_file_t *priv = filp->private_data; 330 DRM_DEVICE;
295 drm_device_t *dev = priv->head->dev;
296 drm_via_irqwait_t __user *argp = (void __user *)data; 331 drm_via_irqwait_t __user *argp = (void __user *)data;
297 drm_via_irqwait_t irqwait; 332 drm_via_irqwait_t irqwait;
298 struct timeval now; 333 struct timeval now;
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 6bd6ac52ad1b..c6a08e96285b 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -27,16 +27,10 @@
27 27
28static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) 28static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
29{ 29{
30 drm_via_private_t *dev_priv; 30 drm_via_private_t *dev_priv = dev->dev_private;
31 31
32 DRM_DEBUG("%s\n", __FUNCTION__); 32 DRM_DEBUG("%s\n", __FUNCTION__);
33 33
34 dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
35 if (dev_priv == NULL)
36 return -ENOMEM;
37
38 memset(dev_priv, 0, sizeof(drm_via_private_t));
39
40 DRM_GETSAREA(); 34 DRM_GETSAREA();
41 if (!dev_priv->sarea) { 35 if (!dev_priv->sarea) {
42 DRM_ERROR("could not find sarea!\n"); 36 DRM_ERROR("could not find sarea!\n");
@@ -67,7 +61,8 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
67 dev_priv->agpAddr = init->agpAddr; 61 dev_priv->agpAddr = init->agpAddr;
68 62
69 via_init_futex(dev_priv); 63 via_init_futex(dev_priv);
70 dev_priv->pro_group_a = (dev->pdev->device == 0x3118); 64
65 via_init_dmablit(dev);
71 66
72 dev->dev_private = (void *)dev_priv; 67 dev->dev_private = (void *)dev_priv;
73 return 0; 68 return 0;
@@ -75,15 +70,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
75 70
76int via_do_cleanup_map(drm_device_t * dev) 71int via_do_cleanup_map(drm_device_t * dev)
77{ 72{
78 if (dev->dev_private) { 73 via_dma_cleanup(dev);
79
80 drm_via_private_t *dev_priv = dev->dev_private;
81
82 via_dma_cleanup(dev);
83
84 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
85 dev->dev_private = NULL;
86 }
87 74
88 return 0; 75 return 0;
89} 76}
@@ -107,3 +94,29 @@ int via_map_init(DRM_IOCTL_ARGS)
107 94
108 return -EINVAL; 95 return -EINVAL;
109} 96}
97
98int via_driver_load(drm_device_t *dev, unsigned long chipset)
99{
100 drm_via_private_t *dev_priv;
101
102 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
103 if (dev_priv == NULL)
104 return DRM_ERR(ENOMEM);
105
106 dev->dev_private = (void *)dev_priv;
107
108 if (chipset == VIA_PRO_GROUP_A)
109 dev_priv->pro_group_a = 1;
110
111 return 0;
112}
113
114int via_driver_unload(drm_device_t *dev)
115{
116 drm_via_private_t *dev_priv = dev->dev_private;
117
118 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
119
120 return 0;
121}
122
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 3baddacdff26..33e0cb12e4c3 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -42,7 +42,7 @@ static int via_agp_free(drm_via_mem_t * mem);
42static int via_fb_alloc(drm_via_mem_t * mem); 42static int via_fb_alloc(drm_via_mem_t * mem);
43static int via_fb_free(drm_via_mem_t * mem); 43static int via_fb_free(drm_via_mem_t * mem);
44 44
45static int add_alloc_set(int context, int type, unsigned int val) 45static int add_alloc_set(int context, int type, unsigned long val)
46{ 46{
47 int i, retval = 0; 47 int i, retval = 0;
48 48
@@ -56,7 +56,7 @@ static int add_alloc_set(int context, int type, unsigned int val)
56 return retval; 56 return retval;
57} 57}
58 58
59static int del_alloc_set(int context, int type, unsigned int val) 59static int del_alloc_set(int context, int type, unsigned long val)
60{ 60{
61 int i, retval = 0; 61 int i, retval = 0;
62 62
@@ -199,13 +199,13 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
199 sizeof(mem)); 199 sizeof(mem));
200 200
201 switch (mem.type) { 201 switch (mem.type) {
202 case VIDEO: 202 case VIA_MEM_VIDEO:
203 if (via_fb_alloc(&mem) < 0) 203 if (via_fb_alloc(&mem) < 0)
204 return -EFAULT; 204 return -EFAULT;
205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 205 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
206 sizeof(mem)); 206 sizeof(mem));
207 return 0; 207 return 0;
208 case AGP: 208 case VIA_MEM_AGP:
209 if (via_agp_alloc(&mem) < 0) 209 if (via_agp_alloc(&mem) < 0)
210 return -EFAULT; 210 return -EFAULT;
211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, 211 DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
@@ -232,7 +232,7 @@ static int via_fb_alloc(drm_via_mem_t * mem)
232 if (block) { 232 if (block) {
233 fb.offset = block->ofs; 233 fb.offset = block->ofs;
234 fb.free = (unsigned long)block; 234 fb.free = (unsigned long)block;
235 if (!add_alloc_set(fb.context, VIDEO, fb.free)) { 235 if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
236 DRM_DEBUG("adding to allocation set fails\n"); 236 DRM_DEBUG("adding to allocation set fails\n");
237 via_mmFreeMem((PMemBlock) fb.free); 237 via_mmFreeMem((PMemBlock) fb.free);
238 retval = -1; 238 retval = -1;
@@ -269,7 +269,7 @@ static int via_agp_alloc(drm_via_mem_t * mem)
269 if (block) { 269 if (block) {
270 agp.offset = block->ofs; 270 agp.offset = block->ofs;
271 agp.free = (unsigned long)block; 271 agp.free = (unsigned long)block;
272 if (!add_alloc_set(agp.context, AGP, agp.free)) { 272 if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
273 DRM_DEBUG("adding to allocation set fails\n"); 273 DRM_DEBUG("adding to allocation set fails\n");
274 via_mmFreeMem((PMemBlock) agp.free); 274 via_mmFreeMem((PMemBlock) agp.free);
275 retval = -1; 275 retval = -1;
@@ -297,11 +297,11 @@ int via_mem_free(DRM_IOCTL_ARGS)
297 297
298 switch (mem.type) { 298 switch (mem.type) {
299 299
300 case VIDEO: 300 case VIA_MEM_VIDEO:
301 if (via_fb_free(&mem) == 0) 301 if (via_fb_free(&mem) == 0)
302 return 0; 302 return 0;
303 break; 303 break;
304 case AGP: 304 case VIA_MEM_AGP:
305 if (via_agp_free(&mem) == 0) 305 if (via_agp_free(&mem) == 0)
306 return 0; 306 return 0;
307 break; 307 break;
@@ -329,7 +329,7 @@ static int via_fb_free(drm_via_mem_t * mem)
329 329
330 via_mmFreeMem((PMemBlock) fb.free); 330 via_mmFreeMem((PMemBlock) fb.free);
331 331
332 if (!del_alloc_set(fb.context, VIDEO, fb.free)) { 332 if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
333 retval = -1; 333 retval = -1;
334 } 334 }
335 335
@@ -352,7 +352,7 @@ static int via_agp_free(drm_via_mem_t * mem)
352 352
353 via_mmFreeMem((PMemBlock) agp.free); 353 via_mmFreeMem((PMemBlock) agp.free);
354 354
355 if (!del_alloc_set(agp.context, AGP, agp.free)) { 355 if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
356 retval = -1; 356 retval = -1;
357 } 357 }
358 358
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 4ac495f297f7..70c897c88766 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -237,7 +237,7 @@ static hazard_t table3[256];
237static __inline__ int 237static __inline__ int
238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) 238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
239{ 239{
240 if ((*buf - buf_end) >= num_words) { 240 if ((buf_end - *buf) >= num_words) {
241 *buf += num_words; 241 *buf += num_words;
242 return 0; 242 return 0;
243 } 243 }
@@ -249,14 +249,14 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
249 * Partially stolen from drm_memory.h 249 * Partially stolen from drm_memory.h
250 */ 250 */
251 251
252static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, 252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253 unsigned long offset, 253 unsigned long offset,
254 unsigned long size, 254 unsigned long size,
255 drm_device_t * dev) 255 drm_device_t * dev)
256{ 256{
257 struct list_head *list; 257 struct list_head *list;
258 drm_map_list_t *r_list; 258 drm_map_list_t *r_list;
259 drm_map_t *map = seq->map_cache; 259 drm_local_map_t *map = seq->map_cache;
260 260
261 if (map && map->offset <= offset 261 if (map && map->offset <= offset
262 && (offset + size) <= (map->offset + map->size)) { 262 && (offset + size) <= (map->offset + map->size)) {
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index eb4eda344345..256590fcc22a 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -47,7 +47,7 @@ typedef struct {
47 int agp_texture; 47 int agp_texture;
48 int multitex; 48 int multitex;
49 drm_device_t *dev; 49 drm_device_t *dev;
50 drm_map_t *map_cache; 50 drm_local_map_t *map_cache;
51 uint32_t vertex_count; 51 uint32_t vertex_count;
52 int agp; 52 int agp;
53 const uint32_t *buf_start; 53 const uint32_t *buf_start;
@@ -55,5 +55,7 @@ typedef struct {
55 55
56extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, 56extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
57 drm_device_t * dev, int agp); 57 drm_device_t * dev, int agp);
58extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
59 unsigned int size);
58 60
59#endif 61#endif
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index 7fab9fbdf424..300ac61b09ed 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -50,8 +50,11 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
50 unsigned int i; 50 unsigned int i;
51 volatile int *lock; 51 volatile int *lock;
52 52
53 if (!dev_priv->sarea_priv)
54 return;
55
53 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 56 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
54 lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
55 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
56 if (_DRM_LOCK_IS_HELD(*lock) 59 if (_DRM_LOCK_IS_HELD(*lock)
57 && (*lock & _DRM_LOCK_CONT)) { 60 && (*lock & _DRM_LOCK_CONT)) {
@@ -79,7 +82,7 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
79 if (fx.lock > VIA_NR_XVMC_LOCKS) 82 if (fx.lock > VIA_NR_XVMC_LOCKS)
80 return -EFAULT; 83 return -EFAULT;
81 84
82 lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock); 85 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
83 86
84 switch (fx.func) { 87 switch (fx.func) {
85 case VIA_FUTEX_WAIT: 88 case VIA_FUTEX_WAIT: