aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/drm/drmP.h18
-rw-r--r--drivers/char/drm/drm_agpsupport.c10
-rw-r--r--drivers/char/drm/drm_bufs.c47
-rw-r--r--drivers/char/drm/drm_fops.c289
-rw-r--r--drivers/char/drm/drm_lock.c18
-rw-r--r--drivers/char/drm/drm_stub.c37
-rw-r--r--drivers/char/drm/r300_cmdbuf.c38
-rw-r--r--drivers/char/drm/r300_reg.h1
-rw-r--r--drivers/char/drm/radeon_cp.c75
-rw-r--r--drivers/char/drm/radeon_drm.h6
-rw-r--r--drivers/char/drm/radeon_drv.h24
-rw-r--r--drivers/char/drm/radeon_state.c184
12 files changed, 366 insertions, 381 deletions
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 43c49ad3069b..307254da7d08 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -550,7 +550,7 @@ struct drm_driver {
550 int (*kernel_context_switch) (struct drm_device * dev, int old, 550 int (*kernel_context_switch) (struct drm_device * dev, int old,
551 int new); 551 int new);
552 void (*kernel_context_switch_unlock) (struct drm_device * dev, 552 void (*kernel_context_switch_unlock) (struct drm_device * dev,
553 drm_lock_t * lock); 553 drm_lock_t *lock);
554 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence); 554 int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
555 int (*dri_library_name) (struct drm_device *dev, char *buf); 555 int (*dri_library_name) (struct drm_device *dev, char *buf);
556 556
@@ -574,12 +574,11 @@ struct drm_driver {
574 void (*irq_postinstall) (struct drm_device * dev); 574 void (*irq_postinstall) (struct drm_device * dev);
575 void (*irq_uninstall) (struct drm_device * dev); 575 void (*irq_uninstall) (struct drm_device * dev);
576 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); 576 void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
577 void (*reclaim_buffers_locked) (struct drm_device *drv, 577 void (*reclaim_buffers_locked) (struct drm_device *dev,
578 struct file *filp); 578 struct file *filp);
579 unsigned long (*get_map_ofs) (drm_map_t * map); 579 unsigned long (*get_map_ofs) (drm_map_t * map);
580 unsigned long (*get_reg_ofs) (struct drm_device * dev); 580 unsigned long (*get_reg_ofs) (struct drm_device * dev);
581 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); 581 void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
582 int (*version) (drm_version_t * version);
583 582
584 int major; 583 int major;
585 int minor; 584 int minor;
@@ -774,10 +773,6 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
774/** \name Internal function definitions */ 773/** \name Internal function definitions */
775/*@{*/ 774/*@{*/
776 775
777 /* Misc. support (drm_init.h) */
778extern int drm_flags;
779extern void drm_parse_options(char *s);
780
781 /* Driver support (drm_drv.h) */ 776 /* Driver support (drm_drv.h) */
782extern int drm_init(struct drm_driver *driver); 777extern int drm_init(struct drm_driver *driver);
783extern void drm_exit(struct drm_driver *driver); 778extern void drm_exit(struct drm_driver *driver);
@@ -831,6 +826,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
831 unsigned int cmd, unsigned long arg); 826 unsigned int cmd, unsigned long arg);
832extern int drm_setversion(struct inode *inode, struct file *filp, 827extern int drm_setversion(struct inode *inode, struct file *filp,
833 unsigned int cmd, unsigned long arg); 828 unsigned int cmd, unsigned long arg);
829extern int drm_noop(struct inode *inode, struct file *filp,
830 unsigned int cmd, unsigned long arg);
834 831
835 /* Context IOCTL support (drm_context.h) */ 832 /* Context IOCTL support (drm_context.h) */
836extern int drm_resctx(struct inode *inode, struct file *filp, 833extern int drm_resctx(struct inode *inode, struct file *filp,
@@ -869,10 +866,6 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
869extern int drm_authmagic(struct inode *inode, struct file *filp, 866extern int drm_authmagic(struct inode *inode, struct file *filp,
870 unsigned int cmd, unsigned long arg); 867 unsigned int cmd, unsigned long arg);
871 868
872 /* Placeholder for ioctls past */
873extern int drm_noop(struct inode *inode, struct file *filp,
874 unsigned int cmd, unsigned long arg);
875
876 /* Locking IOCTL support (drm_lock.h) */ 869 /* Locking IOCTL support (drm_lock.h) */
877extern int drm_lock(struct inode *inode, struct file *filp, 870extern int drm_lock(struct inode *inode, struct file *filp,
878 unsigned int cmd, unsigned long arg); 871 unsigned int cmd, unsigned long arg);
@@ -885,6 +878,7 @@ extern int drm_lock_free(drm_device_t * dev,
885 /* Buffer management support (drm_bufs.h) */ 878 /* Buffer management support (drm_bufs.h) */
886extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); 879extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
887extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); 880extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
881extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
888extern int drm_addmap(drm_device_t * dev, unsigned int offset, 882extern int drm_addmap(drm_device_t * dev, unsigned int offset,
889 unsigned int size, drm_map_type_t type, 883 unsigned int size, drm_map_type_t type,
890 drm_map_flags_t flags, drm_local_map_t ** map_ptr); 884 drm_map_flags_t flags, drm_local_map_t ** map_ptr);
@@ -920,8 +914,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
920 /* IRQ support (drm_irq.h) */ 914 /* IRQ support (drm_irq.h) */
921extern int drm_control(struct inode *inode, struct file *filp, 915extern int drm_control(struct inode *inode, struct file *filp,
922 unsigned int cmd, unsigned long arg); 916 unsigned int cmd, unsigned long arg);
923extern int drm_irq_uninstall(drm_device_t * dev);
924extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 917extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
918extern int drm_irq_uninstall(drm_device_t * dev);
925extern void drm_driver_irq_preinstall(drm_device_t * dev); 919extern void drm_driver_irq_preinstall(drm_device_t * dev);
926extern void drm_driver_irq_postinstall(drm_device_t * dev); 920extern void drm_driver_irq_postinstall(drm_device_t * dev);
927extern void drm_driver_irq_uninstall(drm_device_t * dev); 921extern void drm_driver_irq_uninstall(drm_device_t * dev);
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index b80e61a4c40b..fabc930c67a2 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * \file drm_agpsupport.h 2 * \file drm_agpsupport.c
3 * DRM support for AGP/GART backend 3 * DRM support for AGP/GART backend
4 * 4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
91/** 91/**
92 * Acquire the AGP device. 92 * Acquire the AGP device.
93 * 93 *
94 * \param dev DRM device that is to acquire AGP 94 * \param dev DRM device that is to acquire AGP.
95 * \return zero on success or a negative number on failure. 95 * \return zero on success or a negative number on failure.
96 * 96 *
97 * Verifies the AGP device hasn't been acquired before and calls 97 * Verifies the AGP device hasn't been acquired before and calls
@@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
134/** 134/**
135 * Release the AGP device. 135 * Release the AGP device.
136 * 136 *
137 * \param dev DRM device that is to release AGP 137 * \param dev DRM device that is to release AGP.
138 * \return zero on success or a negative number on failure. 138 * \return zero on success or a negative number on failure.
139 * 139 *
140 * Verifies the AGP device has been acquired and calls \c agp_backend_release. 140 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
@@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
147 dev->agp->acquired = 0; 147 dev->agp->acquired = 0;
148 return 0; 148 return 0;
149} 149}
150
151EXPORT_SYMBOL(drm_agp_release); 150EXPORT_SYMBOL(drm_agp_release);
152 151
153int drm_agp_release_ioctl(struct inode *inode, struct file *filp, 152int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
@@ -447,6 +446,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
447 * 446 *
448 * \return pointer to a drm_agp_head structure. 447 * \return pointer to a drm_agp_head structure.
449 * 448 *
449 * Gets the drm_agp_t structure which is made available by the agpgart module
450 * via the inter_module_* functions. Creates and initializes a drm_agp_head
451 * structure.
450 */ 452 */
451drm_agp_head_t *drm_agp_init(drm_device_t * dev) 453drm_agp_head_t *drm_agp_init(drm_device_t * dev)
452{ 454{
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 319bdea8de8a..1845dd062816 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,22 +36,21 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource) 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40{ 40{
41 return pci_resource_start(dev->pdev, resource); 41 return pci_resource_start(dev->pdev, resource);
42} 42}
43
44EXPORT_SYMBOL(drm_get_resource_start); 43EXPORT_SYMBOL(drm_get_resource_start);
45 44
46unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource) 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47{ 46{
48 return pci_resource_len(dev->pdev, resource); 47 return pci_resource_len(dev->pdev, resource);
49} 48}
50 49
51EXPORT_SYMBOL(drm_get_resource_len); 50EXPORT_SYMBOL(drm_get_resource_len);
52 51
53static drm_map_list_t *drm_find_matching_map(drm_device_t * dev, 52static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
54 drm_local_map_t * map) 53 drm_local_map_t *map)
55{ 54{
56 struct list_head *list; 55 struct list_head *list;
57 56
@@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
74 73
75#ifdef _LP64 74#ifdef _LP64
76static __inline__ unsigned int HandleID(unsigned long lhandle, 75static __inline__ unsigned int HandleID(unsigned long lhandle,
77 drm_device_t * dev) 76 drm_device_t *dev)
78{ 77{
79 static unsigned int map32_handle = START_RANGE; 78 static unsigned int map32_handle = START_RANGE;
80 unsigned int hash; 79 unsigned int hash;
@@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
301 return -EFAULT; 300 return -EFAULT;
302 } 301 }
303 302
303 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
304 return -EPERM;
305
304 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, 306 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
305 &maplist); 307 &maplist);
306 308
@@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
332 * 334 *
333 * \sa drm_addmap 335 * \sa drm_addmap
334 */ 336 */
335int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map) 337int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
336{ 338{
337 struct list_head *list; 339 struct list_head *list;
338 drm_map_list_t *r_list = NULL; 340 drm_map_list_t *r_list = NULL;
@@ -384,10 +386,9 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
384 386
385 return 0; 387 return 0;
386} 388}
387
388EXPORT_SYMBOL(drm_rmmap_locked); 389EXPORT_SYMBOL(drm_rmmap_locked);
389 390
390int drm_rmmap(drm_device_t * dev, drm_local_map_t * map) 391int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
391{ 392{
392 int ret; 393 int ret;
393 394
@@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
397 398
398 return ret; 399 return ret;
399} 400}
400
401EXPORT_SYMBOL(drm_rmmap); 401EXPORT_SYMBOL(drm_rmmap);
402 402
403/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 403/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
@@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
548 DRM_DEBUG("count: %d\n", count); 548 DRM_DEBUG("count: %d\n", count);
549 DRM_DEBUG("order: %d\n", order); 549 DRM_DEBUG("order: %d\n", order);
550 DRM_DEBUG("size: %d\n", size); 550 DRM_DEBUG("size: %d\n", size);
551 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 551 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
552 DRM_DEBUG("alignment: %d\n", alignment); 552 DRM_DEBUG("alignment: %d\n", alignment);
553 DRM_DEBUG("page_order: %d\n", page_order); 553 DRM_DEBUG("page_order: %d\n", page_order);
554 DRM_DEBUG("total: %d\n", total); 554 DRM_DEBUG("total: %d\n", total);
@@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
649 } 649 }
650 650
651 dma->buf_count += entry->buf_count; 651 dma->buf_count += entry->buf_count;
652 dma->seg_count += entry->seg_count;
653 dma->page_count += byte_count >> PAGE_SHIFT;
652 dma->byte_count += byte_count; 654 dma->byte_count += byte_count;
653 655
654 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 656 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
664 atomic_dec(&dev->buf_alloc); 666 atomic_dec(&dev->buf_alloc);
665 return 0; 667 return 0;
666} 668}
667
668EXPORT_SYMBOL(drm_addbufs_agp); 669EXPORT_SYMBOL(drm_addbufs_agp);
669#endif /* __OS_HAS_AGP */ 670#endif /* __OS_HAS_AGP */
670 671
@@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
689 690
690 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 691 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
691 return -EINVAL; 692 return -EINVAL;
693
692 if (!dma) 694 if (!dma)
693 return -EINVAL; 695 return -EINVAL;
694 696
697 if (!capable(CAP_SYS_ADMIN))
698 return -EPERM;
699
695 count = request->count; 700 count = request->count;
696 order = drm_order(request->size); 701 order = drm_order(request->size);
697 size = 1 << order; 702 size = 1 << order;
@@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
882 return 0; 887 return 0;
883 888
884} 889}
885
886EXPORT_SYMBOL(drm_addbufs_pci); 890EXPORT_SYMBOL(drm_addbufs_pci);
887 891
888static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) 892static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
908 if (!dma) 912 if (!dma)
909 return -EINVAL; 913 return -EINVAL;
910 914
915 if (!capable(CAP_SYS_ADMIN))
916 return -EPERM;
917
911 count = request->count; 918 count = request->count;
912 order = drm_order(request->size); 919 order = drm_order(request->size);
913 size = 1 << order; 920 size = 1 << order;
@@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1026 } 1033 }
1027 1034
1028 dma->buf_count += entry->buf_count; 1035 dma->buf_count += entry->buf_count;
1036 dma->seg_count += entry->seg_count;
1037 dma->page_count += byte_count >> PAGE_SHIFT;
1029 dma->byte_count += byte_count; 1038 dma->byte_count += byte_count;
1030 1039
1031 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1040 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
1042 return 0; 1051 return 0;
1043} 1052}
1044 1053
1045static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) 1054int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1046{ 1055{
1047 drm_device_dma_t *dma = dev->dma; 1056 drm_device_dma_t *dma = dev->dma;
1048 drm_buf_entry_t *entry; 1057 drm_buf_entry_t *entry;
@@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1065 if (!dma) 1074 if (!dma)
1066 return -EINVAL; 1075 return -EINVAL;
1067 1076
1077 if (!capable(CAP_SYS_ADMIN))
1078 return -EPERM;
1079
1068 count = request->count; 1080 count = request->count;
1069 order = drm_order(request->size); 1081 order = drm_order(request->size);
1070 size = 1 << order; 1082 size = 1 << order;
@@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1181 } 1193 }
1182 1194
1183 dma->buf_count += entry->buf_count; 1195 dma->buf_count += entry->buf_count;
1196 dma->seg_count += entry->seg_count;
1197 dma->page_count += byte_count >> PAGE_SHIFT;
1184 dma->byte_count += byte_count; 1198 dma->byte_count += byte_count;
1185 1199
1186 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1200 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1196 atomic_dec(&dev->buf_alloc); 1210 atomic_dec(&dev->buf_alloc);
1197 return 0; 1211 return 0;
1198} 1212}
1213EXPORT_SYMBOL(drm_addbufs_fb);
1214
1199 1215
1200/** 1216/**
1201 * Add buffers for DMA transfers (ioctl). 1217 * Add buffers for DMA transfers (ioctl).
@@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
1577 1593
1578 return order; 1594 return order;
1579} 1595}
1580
1581EXPORT_SYMBOL(drm_order); 1596EXPORT_SYMBOL(drm_order);
1597
1598
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index b73543c694a9..403f44a1bf01 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -35,6 +35,7 @@
35 */ 35 */
36 36
37#include "drmP.h" 37#include "drmP.h"
38#include "drm_sarea.h"
38#include <linux/poll.h> 39#include <linux/poll.h>
39 40
40static int drm_open_helper(struct inode *inode, struct file *filp, 41static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -42,6 +43,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
42 43
43static int drm_setup(drm_device_t * dev) 44static int drm_setup(drm_device_t * dev)
44{ 45{
46 drm_local_map_t *map;
45 int i; 47 int i;
46 int ret; 48 int ret;
47 49
@@ -51,6 +53,11 @@ static int drm_setup(drm_device_t * dev)
51 return ret; 53 return ret;
52 } 54 }
53 55
56 /* prebuild the SAREA */
57 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
58 if (i != 0)
59 return i;
60
54 atomic_set(&dev->ioctl_count, 0); 61 atomic_set(&dev->ioctl_count, 0);
55 atomic_set(&dev->vma_count, 0); 62 atomic_set(&dev->vma_count, 0);
56 dev->buf_use = 0; 63 dev->buf_use = 0;
@@ -152,10 +159,168 @@ int drm_open(struct inode *inode, struct file *filp)
152 159
153 return retcode; 160 return retcode;
154} 161}
155
156EXPORT_SYMBOL(drm_open); 162EXPORT_SYMBOL(drm_open);
157 163
158/** 164/**
165 * File \c open operation.
166 *
167 * \param inode device inode.
168 * \param filp file pointer.
169 *
170 * Puts the dev->fops corresponding to the device minor number into
171 * \p filp, call the \c open method, and restore the file operations.
172 */
173int drm_stub_open(struct inode *inode, struct file *filp)
174{
175 drm_device_t *dev = NULL;
176 int minor = iminor(inode);
177 int err = -ENODEV;
178 struct file_operations *old_fops;
179
180 DRM_DEBUG("\n");
181
182 if (!((minor >= 0) && (minor < drm_cards_limit)))
183 return -ENODEV;
184
185 if (!drm_heads[minor])
186 return -ENODEV;
187
188 if (!(dev = drm_heads[minor]->dev))
189 return -ENODEV;
190
191 old_fops = filp->f_op;
192 filp->f_op = fops_get(&dev->driver->fops);
193 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
194 fops_put(filp->f_op);
195 filp->f_op = fops_get(old_fops);
196 }
197 fops_put(old_fops);
198
199 return err;
200}
201
202/**
203 * Check whether DRI will run on this CPU.
204 *
205 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
206 */
207static int drm_cpu_valid(void)
208{
209#if defined(__i386__)
210 if (boot_cpu_data.x86 == 3)
211 return 0; /* No cmpxchg on a 386 */
212#endif
213#if defined(__sparc__) && !defined(__sparc_v9__)
214 return 0; /* No cmpxchg before v9 sparc. */
215#endif
216 return 1;
217}
218
219/**
220 * Called whenever a process opens /dev/drm.
221 *
222 * \param inode device inode.
223 * \param filp file pointer.
224 * \param dev device.
225 * \return zero on success or a negative number on failure.
226 *
227 * Creates and initializes a drm_file structure for the file private data in \p
228 * filp and add it into the double linked list in \p dev.
229 */
230static int drm_open_helper(struct inode *inode, struct file *filp,
231 drm_device_t * dev)
232{
233 int minor = iminor(inode);
234 drm_file_t *priv;
235 int ret;
236
237 if (filp->f_flags & O_EXCL)
238 return -EBUSY; /* No exclusive opens */
239 if (!drm_cpu_valid())
240 return -EINVAL;
241
242 DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
243
244 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
245 if (!priv)
246 return -ENOMEM;
247
248 memset(priv, 0, sizeof(*priv));
249 filp->private_data = priv;
250 priv->uid = current->euid;
251 priv->pid = current->pid;
252 priv->minor = minor;
253 priv->head = drm_heads[minor];
254 priv->ioctl_count = 0;
255 /* for compatibility root is always authenticated */
256 priv->authenticated = capable(CAP_SYS_ADMIN);
257 priv->lock_count = 0;
258
259 if (dev->driver->open) {
260 ret = dev->driver->open(dev, priv);
261 if (ret < 0)
262 goto out_free;
263 }
264
265 down(&dev->struct_sem);
266 if (!dev->file_last) {
267 priv->next = NULL;
268 priv->prev = NULL;
269 dev->file_first = priv;
270 dev->file_last = priv;
271 /* first opener automatically becomes master */
272 priv->master = 1;
273 } else {
274 priv->next = NULL;
275 priv->prev = dev->file_last;
276 dev->file_last->next = priv;
277 dev->file_last = priv;
278 }
279 up(&dev->struct_sem);
280
281#ifdef __alpha__
282 /*
283 * Default the hose
284 */
285 if (!dev->hose) {
286 struct pci_dev *pci_dev;
287 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
288 if (pci_dev) {
289 dev->hose = pci_dev->sysdata;
290 pci_dev_put(pci_dev);
291 }
292 if (!dev->hose) {
293 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
294 if (b)
295 dev->hose = b->sysdata;
296 }
297 }
298#endif
299
300 return 0;
301 out_free:
302 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
303 filp->private_data = NULL;
304 return ret;
305}
306
307/** No-op. */
308int drm_fasync(int fd, struct file *filp, int on)
309{
310 drm_file_t *priv = filp->private_data;
311 drm_device_t *dev = priv->head->dev;
312 int retcode;
313
314 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
315 (long)old_encode_dev(priv->head->device));
316 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
317 if (retcode < 0)
318 return retcode;
319 return 0;
320}
321EXPORT_SYMBOL(drm_fasync);
322
323/**
159 * Release file. 324 * Release file.
160 * 325 *
161 * \param inode device inode 326 * \param inode device inode
@@ -291,7 +456,6 @@ int drm_release(struct inode *inode, struct file *filp)
291 456
292 if (dev->driver->postclose) 457 if (dev->driver->postclose)
293 dev->driver->postclose(dev, priv); 458 dev->driver->postclose(dev, priv);
294
295 drm_free(priv, sizeof(*priv), DRM_MEM_FILES); 459 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
296 460
297 /* ======================================================== 461 /* ========================================================
@@ -318,132 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
318 482
319 return retcode; 483 return retcode;
320} 484}
321
322EXPORT_SYMBOL(drm_release); 485EXPORT_SYMBOL(drm_release);
323 486
324/**
325 * Check whether DRI will run on this CPU.
326 *
327 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
328 */
329static int drm_cpu_valid(void)
330{
331#if defined(__i386__)
332 if (boot_cpu_data.x86 == 3)
333 return 0; /* No cmpxchg on a 386 */
334#endif
335#if defined(__sparc__) && !defined(__sparc_v9__)
336 return 0; /* No cmpxchg before v9 sparc. */
337#endif
338 return 1;
339}
340
341/**
342 * Called whenever a process opens /dev/drm.
343 *
344 * \param inode device inode.
345 * \param filp file pointer.
346 * \param dev device.
347 * \return zero on success or a negative number on failure.
348 *
349 * Creates and initializes a drm_file structure for the file private data in \p
350 * filp and add it into the double linked list in \p dev.
351 */
352static int drm_open_helper(struct inode *inode, struct file *filp,
353 drm_device_t * dev)
354{
355 int minor = iminor(inode);
356 drm_file_t *priv;
357 int ret;
358
359 if (filp->f_flags & O_EXCL)
360 return -EBUSY; /* No exclusive opens */
361 if (!drm_cpu_valid())
362 return -EINVAL;
363
364 DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
365
366 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
367 if (!priv)
368 return -ENOMEM;
369
370 memset(priv, 0, sizeof(*priv));
371 filp->private_data = priv;
372 priv->uid = current->euid;
373 priv->pid = current->pid;
374 priv->minor = minor;
375 priv->head = drm_heads[minor];
376 priv->ioctl_count = 0;
377 priv->authenticated = capable(CAP_SYS_ADMIN);
378 priv->lock_count = 0;
379
380 if (dev->driver->open) {
381 ret = dev->driver->open(dev, priv);
382 if (ret < 0)
383 goto out_free;
384 }
385
386 down(&dev->struct_sem);
387 if (!dev->file_last) {
388 priv->next = NULL;
389 priv->prev = NULL;
390 dev->file_first = priv;
391 dev->file_last = priv;
392 } else {
393 priv->next = NULL;
394 priv->prev = dev->file_last;
395 dev->file_last->next = priv;
396 dev->file_last = priv;
397 }
398 up(&dev->struct_sem);
399
400#ifdef __alpha__
401 /*
402 * Default the hose
403 */
404 if (!dev->hose) {
405 struct pci_dev *pci_dev;
406 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
407 if (pci_dev) {
408 dev->hose = pci_dev->sysdata;
409 pci_dev_put(pci_dev);
410 }
411 if (!dev->hose) {
412 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
413 if (b)
414 dev->hose = b->sysdata;
415 }
416 }
417#endif
418
419 return 0;
420 out_free:
421 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
422 filp->private_data = NULL;
423 return ret;
424}
425
426/** No-op. */
427int drm_fasync(int fd, struct file *filp, int on)
428{
429 drm_file_t *priv = filp->private_data;
430 drm_device_t *dev = priv->head->dev;
431 int retcode;
432
433 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
434 (long)old_encode_dev(priv->head->device));
435 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
436 if (retcode < 0)
437 return retcode;
438 return 0;
439}
440
441EXPORT_SYMBOL(drm_fasync);
442
443/** No-op. */ 487/** No-op. */
444unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 488unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
445{ 489{
446 return 0; 490 return 0;
447} 491}
448
449EXPORT_SYMBOL(drm_poll); 492EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index b276ae8a6633..f970dc36c18f 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -104,6 +104,9 @@ int drm_lock(struct inode *inode, struct file *filp,
104 __set_current_state(TASK_RUNNING); 104 __set_current_state(TASK_RUNNING);
105 remove_wait_queue(&dev->lock.lock_queue, &entry); 105 remove_wait_queue(&dev->lock.lock_queue, &entry);
106 106
107 DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
108 if (ret) return ret;
109
107 sigemptyset(&dev->sigmask); 110 sigemptyset(&dev->sigmask);
108 sigaddset(&dev->sigmask, SIGSTOP); 111 sigaddset(&dev->sigmask, SIGSTOP);
109 sigaddset(&dev->sigmask, SIGTSTP); 112 sigaddset(&dev->sigmask, SIGTSTP);
@@ -116,21 +119,20 @@ int drm_lock(struct inode *inode, struct file *filp,
116 if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) 119 if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
117 dev->driver->dma_ready(dev); 120 dev->driver->dma_ready(dev);
118 121
119 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) 122 if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
120 return dev->driver->dma_quiescent(dev); 123 if (dev->driver->dma_quiescent(dev)) {
121 124 DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context);
122 /* dev->driver->kernel_context_switch isn't used by any of the x86 125 return DRM_ERR(EBUSY);
123 * drivers but is used by the Sparc driver. 126 }
124 */ 127 }
125 128
126 if (dev->driver->kernel_context_switch && 129 if (dev->driver->kernel_context_switch &&
127 dev->last_context != lock.context) { 130 dev->last_context != lock.context) {
128 dev->driver->kernel_context_switch(dev, dev->last_context, 131 dev->driver->kernel_context_switch(dev, dev->last_context,
129 lock.context); 132 lock.context);
130 } 133 }
131 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
132 134
133 return ret; 135 return 0;
134} 136}
135 137
136/** 138/**
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index b7f2a851f45c..42d766359caa 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -128,43 +128,6 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
128 return retcode; 128 return retcode;
129} 129}
130 130
131/**
132 * File \c open operation.
133 *
134 * \param inode device inode.
135 * \param filp file pointer.
136 *
137 * Puts the dev->fops corresponding to the device minor number into
138 * \p filp, call the \c open method, and restore the file operations.
139 */
140int drm_stub_open(struct inode *inode, struct file *filp)
141{
142 drm_device_t *dev = NULL;
143 int minor = iminor(inode);
144 int err = -ENODEV;
145 struct file_operations *old_fops;
146
147 DRM_DEBUG("\n");
148
149 if (!((minor >= 0) && (minor < drm_cards_limit)))
150 return -ENODEV;
151
152 if (!drm_heads[minor])
153 return -ENODEV;
154
155 if (!(dev = drm_heads[minor]->dev))
156 return -ENODEV;
157
158 old_fops = filp->f_op;
159 filp->f_op = fops_get(&dev->driver->fops);
160 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
161 fops_put(filp->f_op);
162 filp->f_op = fops_get(old_fops);
163 }
164 fops_put(old_fops);
165
166 return err;
167}
168 131
169/** 132/**
170 * Get a secondary minor number. 133 * Get a secondary minor number.
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 3a1ac5f78b43..291dbf4c8186 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -52,8 +52,8 @@ static const int r300_cliprect_cntl[4] = {
52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command 52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
53 * buffer, starting with index n. 53 * buffer, starting with index n.
54 */ 54 */
55static int r300_emit_cliprects(drm_radeon_private_t * dev_priv, 55static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
56 drm_radeon_kcmd_buffer_t * cmdbuf, int n) 56 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
57{ 57{
58 drm_clip_rect_t box; 58 drm_clip_rect_t box;
59 int nr; 59 int nr;
@@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
216 ADD_RANGE(R300_TX_UNK1_0, 16); 216 ADD_RANGE(R300_TX_UNK1_0, 16);
217 ADD_RANGE(R300_TX_SIZE_0, 16); 217 ADD_RANGE(R300_TX_SIZE_0, 16);
218 ADD_RANGE(R300_TX_FORMAT_0, 16); 218 ADD_RANGE(R300_TX_FORMAT_0, 16);
219 ADD_RANGE(R300_TX_PITCH_0, 16);
219 /* Texture offset is dangerous and needs more checking */ 220 /* Texture offset is dangerous and needs more checking */
220 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); 221 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
221 ADD_RANGE(R300_TX_UNK4_0, 16); 222 ADD_RANGE(R300_TX_UNK4_0, 16);
@@ -242,7 +243,7 @@ static __inline__ int r300_check_range(unsigned reg, int count)
242 243
243 /* we expect offsets passed to the framebuffer to be either within video memory or 244 /* we expect offsets passed to the framebuffer to be either within video memory or
244 within AGP space */ 245 within AGP space */
245static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv, 246static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
246 u32 offset) 247 u32 offset)
247{ 248{
248 /* we realy want to check against end of video aperture 249 /* we realy want to check against end of video aperture
@@ -317,8 +318,8 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
317 * 318 *
318 * Note that checks are performed on contents and addresses of the registers 319 * Note that checks are performed on contents and addresses of the registers
319 */ 320 */
320static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv, 321static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
321 drm_radeon_kcmd_buffer_t * cmdbuf, 322 drm_radeon_kcmd_buffer_t *cmdbuf,
322 drm_r300_cmd_header_t header) 323 drm_r300_cmd_header_t header)
323{ 324{
324 int reg; 325 int reg;
@@ -363,8 +364,8 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
363 * the graphics card. 364 * the graphics card.
364 * Called by r300_do_cp_cmdbuf. 365 * Called by r300_do_cp_cmdbuf.
365 */ 366 */
366static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv, 367static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
367 drm_radeon_kcmd_buffer_t * cmdbuf, 368 drm_radeon_kcmd_buffer_t *cmdbuf,
368 drm_r300_cmd_header_t header) 369 drm_r300_cmd_header_t header)
369{ 370{
370 int sz; 371 int sz;
@@ -400,8 +401,8 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
400 * Emit a clear packet from userspace. 401 * Emit a clear packet from userspace.
401 * Called by r300_emit_packet3. 402 * Called by r300_emit_packet3.
402 */ 403 */
403static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv, 404static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
404 drm_radeon_kcmd_buffer_t * cmdbuf) 405 drm_radeon_kcmd_buffer_t *cmdbuf)
405{ 406{
406 RING_LOCALS; 407 RING_LOCALS;
407 408
@@ -421,8 +422,8 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
421 return 0; 422 return 0;
422} 423}
423 424
424static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv, 425static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
425 drm_radeon_kcmd_buffer_t * cmdbuf, 426 drm_radeon_kcmd_buffer_t *cmdbuf,
426 u32 header) 427 u32 header)
427{ 428{
428 int count, i, k; 429 int count, i, k;
@@ -489,8 +490,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
489 return 0; 490 return 0;
490} 491}
491 492
492static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv, 493static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
493 drm_radeon_kcmd_buffer_t * cmdbuf) 494 drm_radeon_kcmd_buffer_t *cmdbuf)
494{ 495{
495 u32 header; 496 u32 header;
496 int count; 497 int count;
@@ -554,8 +555,8 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
554 * Emit a rendering packet3 from userspace. 555 * Emit a rendering packet3 from userspace.
555 * Called by r300_do_cp_cmdbuf. 556 * Called by r300_do_cp_cmdbuf.
556 */ 557 */
557static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv, 558static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
558 drm_radeon_kcmd_buffer_t * cmdbuf, 559 drm_radeon_kcmd_buffer_t *cmdbuf,
559 drm_r300_cmd_header_t header) 560 drm_r300_cmd_header_t header)
560{ 561{
561 int n; 562 int n;
@@ -623,7 +624,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
623/** 624/**
624 * Emit the sequence to pacify R300. 625 * Emit the sequence to pacify R300.
625 */ 626 */
626static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv) 627static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
627{ 628{
628 RING_LOCALS; 629 RING_LOCALS;
629 630
@@ -657,9 +658,10 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
657 * commands on the DMA ring buffer. 658 * commands on the DMA ring buffer.
658 * Called by the ioctl handler function radeon_cp_cmdbuf. 659 * Called by the ioctl handler function radeon_cp_cmdbuf.
659 */ 660 */
660int r300_do_cp_cmdbuf(drm_device_t * dev, 661int r300_do_cp_cmdbuf(drm_device_t *dev,
661 DRMFILE filp, 662 DRMFILE filp,
662 drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf) 663 drm_file_t *filp_priv,
664 drm_radeon_kcmd_buffer_t *cmdbuf)
663{ 665{
664 drm_radeon_private_t *dev_priv = dev->dev_private; 666 drm_radeon_private_t *dev_priv = dev->dev_private;
665 drm_device_dma_t *dma = dev->dma; 667 drm_device_dma_t *dma = dev->dma;
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index e5b73c002394..a0ed20e25221 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
797 797
798# define R300_TX_FORMAT_YUV_MODE 0x00800000 798# define R300_TX_FORMAT_YUV_MODE 0x00800000
799 799
800#define R300_TX_PITCH_0 0x4500
800#define R300_TX_OFFSET_0 0x4540 801#define R300_TX_OFFSET_0 0x4540
801/* BEGIN: Guess from R200 */ 802/* BEGIN: Guess from R200 */
802# define R300_TXO_ENDIAN_NO_SWAP (0 << 0) 803# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index eaa0e2b7c2f8..915665c7fe7c 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -824,7 +824,7 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA); 824 return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
825} 825}
826 826
827static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr) 827static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
828{ 828{
829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); 829 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
830 return RADEON_READ(RADEON_PCIE_DATA); 830 return RADEON_READ(RADEON_PCIE_DATA);
@@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1125 | (dev_priv->fb_location >> 16)); 1125 | (dev_priv->fb_location >> 16));
1126 1126
1127#if __OS_HAS_AGP 1127#if __OS_HAS_AGP
1128 if (!dev_priv->is_pci) { 1128 if (dev_priv->flags & CHIP_IS_AGP) {
1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION, 1129 RADEON_WRITE(RADEON_MC_AGP_LOCATION,
1130 (((dev_priv->gart_vm_start - 1 + 1130 (((dev_priv->gart_vm_start - 1 +
1131 dev_priv->gart_size) & 0xffff0000) | 1131 dev_priv->gart_size) & 0xffff0000) |
@@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
1152 dev_priv->ring.tail = cur_read_ptr; 1152 dev_priv->ring.tail = cur_read_ptr;
1153 1153
1154#if __OS_HAS_AGP 1154#if __OS_HAS_AGP
1155 if (!dev_priv->is_pci) { 1155 if (dev_priv->flags & CHIP_IS_AGP) {
1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */ 1156 /* set RADEON_AGP_BASE here instead of relying on X from user space */
1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); 1157 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, 1158 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
@@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
1278/* Enable or disable PCI GART on the chip */ 1278/* Enable or disable PCI GART on the chip */
1279static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) 1279static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1280{ 1280{
1281 u32 tmp = RADEON_READ(RADEON_AIC_CNTL); 1281 u32 tmp;
1282 1282
1283 if (dev_priv->flags & CHIP_IS_PCIE) { 1283 if (dev_priv->flags & CHIP_IS_PCIE) {
1284 radeon_set_pciegart(dev_priv, on); 1284 radeon_set_pciegart(dev_priv, on);
1285 return; 1285 return;
1286 } 1286 }
1287 1287
1288 tmp = RADEON_READ(RADEON_AIC_CNTL);
1289
1288 if (on) { 1290 if (on) {
1289 RADEON_WRITE(RADEON_AIC_CNTL, 1291 RADEON_WRITE(RADEON_AIC_CNTL,
1290 tmp | RADEON_PCIGART_TRANSLATE_EN); 1292 tmp | RADEON_PCIGART_TRANSLATE_EN);
@@ -1311,14 +1313,18 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1311 1313
1312static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) 1314static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1313{ 1315{
1314 drm_radeon_private_t *dev_priv = dev->dev_private;; 1316 drm_radeon_private_t *dev_priv = dev->dev_private;
1317
1315 DRM_DEBUG("\n"); 1318 DRM_DEBUG("\n");
1316 1319
1317 dev_priv->is_pci = init->is_pci; 1320 if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
1321 {
1322 DRM_DEBUG("Forcing AGP card to PCI mode\n");
1323 dev_priv->flags &= ~CHIP_IS_AGP;
1324 }
1318 1325
1319 if (dev_priv->is_pci && !dev->sg) { 1326 if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
1320 DRM_ERROR("PCI GART memory not allocated!\n"); 1327 DRM_ERROR("PCI GART memory not allocated!\n");
1321 dev->dev_private = (void *)dev_priv;
1322 radeon_do_cleanup_cp(dev); 1328 radeon_do_cleanup_cp(dev);
1323 return DRM_ERR(EINVAL); 1329 return DRM_ERR(EINVAL);
1324 } 1330 }
@@ -1327,12 +1333,11 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1327 if (dev_priv->usec_timeout < 1 || 1333 if (dev_priv->usec_timeout < 1 ||
1328 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 1334 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
1329 DRM_DEBUG("TIMEOUT problem!\n"); 1335 DRM_DEBUG("TIMEOUT problem!\n");
1330 dev->dev_private = (void *)dev_priv;
1331 radeon_do_cleanup_cp(dev); 1336 radeon_do_cleanup_cp(dev);
1332 return DRM_ERR(EINVAL); 1337 return DRM_ERR(EINVAL);
1333 } 1338 }
1334 1339
1335 switch (init->func) { 1340 switch(init->func) {
1336 case RADEON_INIT_R200_CP: 1341 case RADEON_INIT_R200_CP:
1337 dev_priv->microcode_version = UCODE_R200; 1342 dev_priv->microcode_version = UCODE_R200;
1338 break; 1343 break;
@@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1353 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 1358 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
1354 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 1359 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
1355 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 1360 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
1356 dev->dev_private = (void *)dev_priv;
1357 radeon_do_cleanup_cp(dev); 1361 radeon_do_cleanup_cp(dev);
1358 return DRM_ERR(EINVAL); 1362 return DRM_ERR(EINVAL);
1359 } 1363 }
@@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1416 1420
1417 DRM_GETSAREA(); 1421 DRM_GETSAREA();
1418 1422
1419 dev_priv->fb_offset = init->fb_offset;
1420 dev_priv->mmio_offset = init->mmio_offset;
1421 dev_priv->ring_offset = init->ring_offset; 1423 dev_priv->ring_offset = init->ring_offset;
1422 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 1424 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
1423 dev_priv->buffers_offset = init->buffers_offset; 1425 dev_priv->buffers_offset = init->buffers_offset;
@@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1425 1427
1426 if (!dev_priv->sarea) { 1428 if (!dev_priv->sarea) {
1427 DRM_ERROR("could not find sarea!\n"); 1429 DRM_ERROR("could not find sarea!\n");
1428 dev->dev_private = (void *)dev_priv;
1429 radeon_do_cleanup_cp(dev); 1430 radeon_do_cleanup_cp(dev);
1430 return DRM_ERR(EINVAL); 1431 return DRM_ERR(EINVAL);
1431 } 1432 }
1432 1433
1433 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1434 if (!dev_priv->mmio) {
1435 DRM_ERROR("could not find mmio region!\n");
1436 dev->dev_private = (void *)dev_priv;
1437 radeon_do_cleanup_cp(dev);
1438 return DRM_ERR(EINVAL);
1439 }
1440 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1434 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
1441 if (!dev_priv->cp_ring) { 1435 if (!dev_priv->cp_ring) {
1442 DRM_ERROR("could not find cp ring region!\n"); 1436 DRM_ERROR("could not find cp ring region!\n");
1443 dev->dev_private = (void *)dev_priv;
1444 radeon_do_cleanup_cp(dev); 1437 radeon_do_cleanup_cp(dev);
1445 return DRM_ERR(EINVAL); 1438 return DRM_ERR(EINVAL);
1446 } 1439 }
1447 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1440 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
1448 if (!dev_priv->ring_rptr) { 1441 if (!dev_priv->ring_rptr) {
1449 DRM_ERROR("could not find ring read pointer!\n"); 1442 DRM_ERROR("could not find ring read pointer!\n");
1450 dev->dev_private = (void *)dev_priv;
1451 radeon_do_cleanup_cp(dev); 1443 radeon_do_cleanup_cp(dev);
1452 return DRM_ERR(EINVAL); 1444 return DRM_ERR(EINVAL);
1453 } 1445 }
@@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1455 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1447 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1456 if (!dev->agp_buffer_map) { 1448 if (!dev->agp_buffer_map) {
1457 DRM_ERROR("could not find dma buffer region!\n"); 1449 DRM_ERROR("could not find dma buffer region!\n");
1458 dev->dev_private = (void *)dev_priv;
1459 radeon_do_cleanup_cp(dev); 1450 radeon_do_cleanup_cp(dev);
1460 return DRM_ERR(EINVAL); 1451 return DRM_ERR(EINVAL);
1461 } 1452 }
@@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1465 drm_core_findmap(dev, init->gart_textures_offset); 1456 drm_core_findmap(dev, init->gart_textures_offset);
1466 if (!dev_priv->gart_textures) { 1457 if (!dev_priv->gart_textures) {
1467 DRM_ERROR("could not find GART texture region!\n"); 1458 DRM_ERROR("could not find GART texture region!\n");
1468 dev->dev_private = (void *)dev_priv;
1469 radeon_do_cleanup_cp(dev); 1459 radeon_do_cleanup_cp(dev);
1470 return DRM_ERR(EINVAL); 1460 return DRM_ERR(EINVAL);
1471 } 1461 }
@@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1476 init->sarea_priv_offset); 1466 init->sarea_priv_offset);
1477 1467
1478#if __OS_HAS_AGP 1468#if __OS_HAS_AGP
1479 if (!dev_priv->is_pci) { 1469 if (dev_priv->flags & CHIP_IS_AGP) {
1480 drm_core_ioremap(dev_priv->cp_ring, dev); 1470 drm_core_ioremap(dev_priv->cp_ring, dev);
1481 drm_core_ioremap(dev_priv->ring_rptr, dev); 1471 drm_core_ioremap(dev_priv->ring_rptr, dev);
1482 drm_core_ioremap(dev->agp_buffer_map, dev); 1472 drm_core_ioremap(dev->agp_buffer_map, dev);
@@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1484 !dev_priv->ring_rptr->handle || 1474 !dev_priv->ring_rptr->handle ||
1485 !dev->agp_buffer_map->handle) { 1475 !dev->agp_buffer_map->handle) {
1486 DRM_ERROR("could not find ioremap agp regions!\n"); 1476 DRM_ERROR("could not find ioremap agp regions!\n");
1487 dev->dev_private = (void *)dev_priv;
1488 radeon_do_cleanup_cp(dev); 1477 radeon_do_cleanup_cp(dev);
1489 return DRM_ERR(EINVAL); 1478 return DRM_ERR(EINVAL);
1490 } 1479 }
@@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1525 + RADEON_READ(RADEON_CONFIG_APER_SIZE); 1514 + RADEON_READ(RADEON_CONFIG_APER_SIZE);
1526 1515
1527#if __OS_HAS_AGP 1516#if __OS_HAS_AGP
1528 if (!dev_priv->is_pci) 1517 if (dev_priv->flags & CHIP_IS_AGP)
1529 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 1518 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1530 - dev->agp->base 1519 - dev->agp->base
1531 + dev_priv->gart_vm_start); 1520 + dev_priv->gart_vm_start);
@@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1551 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 1540 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1552 1541
1553#if __OS_HAS_AGP 1542#if __OS_HAS_AGP
1554 if (!dev_priv->is_pci) { 1543 if (dev_priv->flags & CHIP_IS_AGP) {
1555 /* Turn off PCI GART */ 1544 /* Turn off PCI GART */
1556 radeon_set_pcigart(dev_priv, 0); 1545 radeon_set_pcigart(dev_priv, 0);
1557 } else 1546 } else
@@ -1593,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1593 1582
1594 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { 1583 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1595 DRM_ERROR("failed to init PCI GART!\n"); 1584 DRM_ERROR("failed to init PCI GART!\n");
1596 dev->dev_private = (void *)dev_priv;
1597 radeon_do_cleanup_cp(dev); 1585 radeon_do_cleanup_cp(dev);
1598 return DRM_ERR(ENOMEM); 1586 return DRM_ERR(ENOMEM);
1599 } 1587 }
@@ -1607,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
1607 1595
1608 dev_priv->last_buf = 0; 1596 dev_priv->last_buf = 0;
1609 1597
1610 dev->dev_private = (void *)dev_priv;
1611
1612 radeon_do_engine_reset(dev); 1598 radeon_do_engine_reset(dev);
1613 1599
1614 return 0; 1600 return 0;
@@ -1627,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1627 drm_irq_uninstall(dev); 1613 drm_irq_uninstall(dev);
1628 1614
1629#if __OS_HAS_AGP 1615#if __OS_HAS_AGP
1630 if (!dev_priv->is_pci) { 1616 if (dev_priv->flags & CHIP_IS_AGP) {
1631 if (dev_priv->cp_ring != NULL) 1617 if (dev_priv->cp_ring != NULL) {
1632 drm_core_ioremapfree(dev_priv->cp_ring, dev); 1618 drm_core_ioremapfree(dev_priv->cp_ring, dev);
1633 if (dev_priv->ring_rptr != NULL) 1619 dev_priv->cp_ring = NULL;
1620 }
1621 if (dev_priv->ring_rptr != NULL) {
1634 drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1622 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
1623 dev_priv->ring_rptr = NULL;
1624 }
1635 if (dev->agp_buffer_map != NULL) { 1625 if (dev->agp_buffer_map != NULL) {
1636 drm_core_ioremapfree(dev->agp_buffer_map, dev); 1626 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1637 dev->agp_buffer_map = NULL; 1627 dev->agp_buffer_map = NULL;
@@ -1639,16 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
1639 } else 1629 } else
1640#endif 1630#endif
1641 { 1631 {
1642 if (dev_priv->gart_info.bus_addr) 1632
1633 if (dev_priv->gart_info.bus_addr) {
1634 /* Turn off PCI GART */
1635 radeon_set_pcigart(dev_priv, 0);
1643 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) 1636 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1644 DRM_ERROR("failed to cleanup PCI GART!\n"); 1637 DRM_ERROR("failed to cleanup PCI GART!\n");
1638 }
1645 1639
1646 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1640 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1641 {
1647 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1642 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1648 dev_priv->gart_info.addr = 0; 1643 dev_priv->gart_info.addr = 0;
1649 } 1644 }
1650 } 1645 }
1651
1652 /* only clear to the start of flags */ 1646 /* only clear to the start of flags */
1653 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1647 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1654 1648
@@ -1674,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
1674 DRM_DEBUG("Starting radeon_do_resume_cp()\n"); 1668 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1675 1669
1676#if __OS_HAS_AGP 1670#if __OS_HAS_AGP
1677 if (!dev_priv->is_pci) { 1671 if (dev_priv->flags & CHIP_IS_AGP) {
1678 /* Turn off PCI GART */ 1672 /* Turn off PCI GART */
1679 radeon_set_pcigart(dev_priv, 0); 1673 radeon_set_pcigart(dev_priv, 0);
1680 } else 1674 } else
@@ -2138,7 +2132,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
2138 dev_priv->flags |= CHIP_IS_PCIE; 2132 dev_priv->flags |= CHIP_IS_PCIE;
2139 2133
2140 DRM_DEBUG("%s card detected\n", 2134 DRM_DEBUG("%s card detected\n",
2141 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI")); 2135 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
2142 return ret; 2136 return ret;
2143} 2137}
2144 2138
@@ -2171,7 +2165,6 @@ int radeon_driver_unload(struct drm_device *dev)
2171 drm_radeon_private_t *dev_priv = dev->dev_private; 2165 drm_radeon_private_t *dev_priv = dev->dev_private;
2172 2166
2173 DRM_DEBUG("\n"); 2167 DRM_DEBUG("\n");
2174
2175 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 2168 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
2176 2169
2177 dev->dev_private = NULL; 2170 dev->dev_private = NULL;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 1cd81a671a36..9c177a6b2a4c 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
624 int discard; 624 int discard;
625} drm_radeon_indirect_t; 625} drm_radeon_indirect_t;
626 626
627/* enum for card type parameters */
628#define RADEON_CARD_PCI 0
629#define RADEON_CARD_AGP 1
630#define RADEON_CARD_PCIE 2
631
627/* 1.3: An ioctl to get parameters that aren't available to the 3d 632/* 1.3: An ioctl to get parameters that aren't available to the 3d
628 * client any other way. 633 * client any other way.
629 */ 634 */
@@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
640#define RADEON_PARAM_SAREA_HANDLE 9 645#define RADEON_PARAM_SAREA_HANDLE 9
641#define RADEON_PARAM_GART_TEX_HANDLE 10 646#define RADEON_PARAM_GART_TEX_HANDLE 10
642#define RADEON_PARAM_SCRATCH_OFFSET 11 647#define RADEON_PARAM_SCRATCH_OFFSET 11
648#define RADEON_PARAM_CARD_TYPE 12
643 649
644typedef struct drm_radeon_getparam { 650typedef struct drm_radeon_getparam {
645 int param; 651 int param;
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index c37f2ea20783..498b19b1d641 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -38,7 +38,7 @@
38 38
39#define DRIVER_NAME "radeon" 39#define DRIVER_NAME "radeon"
40#define DRIVER_DESC "ATI Radeon" 40#define DRIVER_DESC "ATI Radeon"
41#define DRIVER_DATE "20050911" 41#define DRIVER_DATE "20051229"
42 42
43/* Interface history: 43/* Interface history:
44 * 44 *
@@ -73,7 +73,7 @@
73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color 73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
74 * and GL_EXT_blend_[func|equation]_separate on r200 74 * and GL_EXT_blend_[func|equation]_separate on r200
75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300 75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300
76 * (No 3D support yet - just microcode loading) 76 * (No 3D support yet - just microcode loading).
77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters 77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
78 * - Add hyperz support, add hyperz flags to clear ioctl. 78 * - Add hyperz support, add hyperz flags to clear ioctl.
79 * 1.14- Add support for color tiling 79 * 1.14- Add support for color tiling
@@ -88,14 +88,13 @@
88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR 88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6) 89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
90 * 1.19- Add support for gart table in FB memory and PCIE r300 90 * 1.19- Add support for gart table in FB memory and PCIE r300
91 * 1.20- Add support for r300 texrect
92 * 1.21- Add support for card type getparam
91 */ 93 */
92#define DRIVER_MAJOR 1 94#define DRIVER_MAJOR 1
93#define DRIVER_MINOR 19 95#define DRIVER_MINOR 21
94#define DRIVER_PATCHLEVEL 0 96#define DRIVER_PATCHLEVEL 0
95 97
96#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
97#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
98
99/* 98/*
100 * Radeon chip families 99 * Radeon chip families
101 */ 100 */
@@ -138,6 +137,9 @@ enum radeon_chip_flags {
138 CHIP_IS_PCIE = 0x00200000UL, 137 CHIP_IS_PCIE = 0x00200000UL,
139}; 138};
140 139
140#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
141#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
142
141typedef struct drm_radeon_freelist { 143typedef struct drm_radeon_freelist {
142 unsigned int age; 144 unsigned int age;
143 drm_buf_t *buf; 145 drm_buf_t *buf;
@@ -214,8 +216,6 @@ typedef struct drm_radeon_private {
214 216
215 int microcode_version; 217 int microcode_version;
216 218
217 int is_pci;
218
219 struct { 219 struct {
220 u32 boxes; 220 u32 boxes;
221 int freelist_timeouts; 221 int freelist_timeouts;
@@ -247,8 +247,6 @@ typedef struct drm_radeon_private {
247 247
248 drm_radeon_depth_clear_t depth_clear; 248 drm_radeon_depth_clear_t depth_clear;
249 249
250 unsigned long fb_offset;
251 unsigned long mmio_offset;
252 unsigned long ring_offset; 250 unsigned long ring_offset;
253 unsigned long ring_rptr_offset; 251 unsigned long ring_rptr_offset;
254 unsigned long buffers_offset; 252 unsigned long buffers_offset;
@@ -362,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
362 */ 360 */
363 361
364#define RADEON_AGP_COMMAND 0x0f60 362#define RADEON_AGP_COMMAND 0x0f60
363#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
364# define RADEON_AGP_ENABLE (1<<8)
365#define RADEON_AUX_SCISSOR_CNTL 0x26f0 365#define RADEON_AUX_SCISSOR_CNTL 0x26f0
366# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) 366# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
367# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) 367# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
@@ -377,6 +377,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
377# define RADEON_PLL_WR_EN (1 << 7) 377# define RADEON_PLL_WR_EN (1 << 7)
378#define RADEON_CLOCK_CNTL_INDEX 0x0008 378#define RADEON_CLOCK_CNTL_INDEX 0x0008
379#define RADEON_CONFIG_APER_SIZE 0x0108 379#define RADEON_CONFIG_APER_SIZE 0x0108
380#define RADEON_CONFIG_MEMSIZE 0x00f8
380#define RADEON_CRTC_OFFSET 0x0224 381#define RADEON_CRTC_OFFSET 0x0224
381#define RADEON_CRTC_OFFSET_CNTL 0x0228 382#define RADEON_CRTC_OFFSET_CNTL 0x0228
382# define RADEON_CRTC_TILE_EN (1 << 15) 383# define RADEON_CRTC_TILE_EN (1 << 15)
@@ -648,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
648 649
649#define RADEON_WAIT_UNTIL 0x1720 650#define RADEON_WAIT_UNTIL 0x1720
650# define RADEON_WAIT_CRTC_PFLIP (1 << 0) 651# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
652# define RADEON_WAIT_2D_IDLE (1 << 14)
653# define RADEON_WAIT_3D_IDLE (1 << 15)
651# define RADEON_WAIT_2D_IDLECLEAN (1 << 16) 654# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
652# define RADEON_WAIT_3D_IDLECLEAN (1 << 17) 655# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
653# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18) 656# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
@@ -1102,7 +1105,6 @@ do { \
1102 write = 0; \ 1105 write = 0; \
1103 _tab += _i; \ 1106 _tab += _i; \
1104 } \ 1107 } \
1105 \
1106 while (_size > 0) { \ 1108 while (_size > 0) { \
1107 *(ring + write) = *_tab++; \ 1109 *(ring + write) = *_tab++; \
1108 write++; \ 1110 write++; \
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 9e816c63a8a3..e9d8ec3a0994 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1,5 +1,5 @@
1/* radeon_state.c -- State support for Radeon -*- linux-c -*- 1/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2 * 2/*
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
@@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
72 72
73 case RADEON_EMIT_PP_MISC: 73 case RADEON_EMIT_PP_MISC:
74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
75 &data[(RADEON_RB3D_DEPTHOFFSET 75 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
76 -
77 RADEON_PP_MISC) /
78 4])) {
79 DRM_ERROR("Invalid depth buffer offset\n"); 76 DRM_ERROR("Invalid depth buffer offset\n");
80 return DRM_ERR(EINVAL); 77 return DRM_ERR(EINVAL);
81 } 78 }
@@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
83 80
84 case RADEON_EMIT_PP_CNTL: 81 case RADEON_EMIT_PP_CNTL:
85 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 82 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
86 &data[(RADEON_RB3D_COLOROFFSET 83 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
87 -
88 RADEON_PP_CNTL) /
89 4])) {
90 DRM_ERROR("Invalid colour buffer offset\n"); 84 DRM_ERROR("Invalid colour buffer offset\n");
91 return DRM_ERR(EINVAL); 85 return DRM_ERR(EINVAL);
92 } 86 }
@@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
109 case RADEON_EMIT_PP_TXFILTER_1: 103 case RADEON_EMIT_PP_TXFILTER_1:
110 case RADEON_EMIT_PP_TXFILTER_2: 104 case RADEON_EMIT_PP_TXFILTER_2:
111 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 105 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
112 &data[(RADEON_PP_TXOFFSET_0 106 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
113 -
114 RADEON_PP_TXFILTER_0) /
115 4])) {
116 DRM_ERROR("Invalid R100 texture offset\n"); 107 DRM_ERROR("Invalid R100 texture offset\n");
117 return DRM_ERR(EINVAL); 108 return DRM_ERR(EINVAL);
118 } 109 }
@@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
126 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 117 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
127 int i; 118 int i;
128 for (i = 0; i < 5; i++) { 119 for (i = 0; i < 5; i++) {
129 if (radeon_check_and_fixup_offset 120 if (radeon_check_and_fixup_offset(dev_priv,
130 (dev_priv, filp_priv, &data[i])) { 121 filp_priv,
122 &data[i])) {
131 DRM_ERROR 123 DRM_ERROR
132 ("Invalid R200 cubic texture offset\n"); 124 ("Invalid R200 cubic texture offset\n");
133 return DRM_ERR(EINVAL); 125 return DRM_ERR(EINVAL);
@@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
239 231
240static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 232static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
241 dev_priv, 233 dev_priv,
242 drm_file_t * filp_priv, 234 drm_file_t *filp_priv,
243 drm_radeon_kcmd_buffer_t *cmdbuf, 235 drm_radeon_kcmd_buffer_t *
236 cmdbuf,
244 unsigned int *cmdsz) 237 unsigned int *cmdsz)
245{ 238{
246 u32 *cmd = (u32 *) cmdbuf->buf; 239 u32 *cmd = (u32 *) cmdbuf->buf;
@@ -555,7 +548,8 @@ static struct {
555 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, 548 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
556 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, 549 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
557 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, 550 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
558 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, 551 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
552 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
559 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, 553 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
560 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, 554 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
561 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, 555 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@@ -569,7 +563,7 @@ static struct {
569 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, 563 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
570 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, 564 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
571 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ 565 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
572 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ 566 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
573 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, 567 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
574 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, 568 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
575 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, 569 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
@@ -592,7 +586,7 @@ static struct {
592 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, 586 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
593 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, 587 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
594 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, 588 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
595 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ 589 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
596 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, 590 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
597 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, 591 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
598 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, 592 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
@@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
985 * rendering a quad into just those buffers. Thus, we have to 979 * rendering a quad into just those buffers. Thus, we have to
986 * make sure the 3D engine is configured correctly. 980 * make sure the 3D engine is configured correctly.
987 */ 981 */
988 if ((dev_priv->microcode_version == UCODE_R200) && 982 else if ((dev_priv->microcode_version == UCODE_R200) &&
989 (flags & (RADEON_DEPTH | RADEON_STENCIL))) { 983 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
990 984
991 int tempPP_CNTL; 985 int tempPP_CNTL;
992 int tempRE_CNTL; 986 int tempRE_CNTL;
@@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1637 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 1631 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1638 dwords = size / 4; 1632 dwords = size / 4;
1639 1633
1634#define RADEON_COPY_MT(_buf, _data, _width) \
1635 do { \
1636 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1637 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1638 return DRM_ERR(EFAULT); \
1639 } \
1640 } while(0)
1641
1640 if (microtile) { 1642 if (microtile) {
1641 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1643 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1642 however, we cannot use blitter directly for texture width < 64 bytes, 1644 however, we cannot use blitter directly for texture width < 64 bytes,
@@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1648 from user space. */ 1650 from user space. */
1649 if (tex->height == 1) { 1651 if (tex->height == 1) {
1650 if (tex_width >= 64 || tex_width <= 16) { 1652 if (tex_width >= 64 || tex_width <= 16) {
1651 if (DRM_COPY_FROM_USER(buffer, data, 1653 RADEON_COPY_MT(buffer, data,
1652 tex_width * 1654 tex_width * sizeof(u32));
1653 sizeof(u32))) {
1654 DRM_ERROR
1655 ("EFAULT on pad, %d bytes\n",
1656 tex_width);
1657 return DRM_ERR(EFAULT);
1658 }
1659 } else if (tex_width == 32) { 1655 } else if (tex_width == 32) {
1660 if (DRM_COPY_FROM_USER 1656 RADEON_COPY_MT(buffer, data, 16);
1661 (buffer, data, 16)) { 1657 RADEON_COPY_MT(buffer + 8,
1662 DRM_ERROR 1658 data + 16, 16);
1663 ("EFAULT on pad, %d bytes\n",
1664 tex_width);
1665 return DRM_ERR(EFAULT);
1666 }
1667 if (DRM_COPY_FROM_USER
1668 (buffer + 8, data + 16, 16)) {
1669 DRM_ERROR
1670 ("EFAULT on pad, %d bytes\n",
1671 tex_width);
1672 return DRM_ERR(EFAULT);
1673 }
1674 } 1659 }
1675 } else if (tex_width >= 64 || tex_width == 16) { 1660 } else if (tex_width >= 64 || tex_width == 16) {
1676 if (DRM_COPY_FROM_USER(buffer, data, 1661 RADEON_COPY_MT(buffer, data,
1677 dwords * sizeof(u32))) { 1662 dwords * sizeof(u32));
1678 DRM_ERROR("EFAULT on data, %d dwords\n",
1679 dwords);
1680 return DRM_ERR(EFAULT);
1681 }
1682 } else if (tex_width < 16) { 1663 } else if (tex_width < 16) {
1683 for (i = 0; i < tex->height; i++) { 1664 for (i = 0; i < tex->height; i++) {
1684 if (DRM_COPY_FROM_USER 1665 RADEON_COPY_MT(buffer, data, tex_width);
1685 (buffer, data, tex_width)) {
1686 DRM_ERROR
1687 ("EFAULT on pad, %d bytes\n",
1688 tex_width);
1689 return DRM_ERR(EFAULT);
1690 }
1691 buffer += 4; 1666 buffer += 4;
1692 data += tex_width; 1667 data += tex_width;
1693 } 1668 }
@@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1695 /* TODO: make sure this works when not fitting in one buffer 1670 /* TODO: make sure this works when not fitting in one buffer
1696 (i.e. 32bytes x 2048...) */ 1671 (i.e. 32bytes x 2048...) */
1697 for (i = 0; i < tex->height; i += 2) { 1672 for (i = 0; i < tex->height; i += 2) {
1698 if (DRM_COPY_FROM_USER 1673 RADEON_COPY_MT(buffer, data, 16);
1699 (buffer, data, 16)) {
1700 DRM_ERROR
1701 ("EFAULT on pad, %d bytes\n",
1702 tex_width);
1703 return DRM_ERR(EFAULT);
1704 }
1705 data += 16; 1674 data += 16;
1706 if (DRM_COPY_FROM_USER 1675 RADEON_COPY_MT(buffer + 8, data, 16);
1707 (buffer + 8, data, 16)) {
1708 DRM_ERROR
1709 ("EFAULT on pad, %d bytes\n",
1710 tex_width);
1711 return DRM_ERR(EFAULT);
1712 }
1713 data += 16; 1676 data += 16;
1714 if (DRM_COPY_FROM_USER 1677 RADEON_COPY_MT(buffer + 4, data, 16);
1715 (buffer + 4, data, 16)) {
1716 DRM_ERROR
1717 ("EFAULT on pad, %d bytes\n",
1718 tex_width);
1719 return DRM_ERR(EFAULT);
1720 }
1721 data += 16; 1678 data += 16;
1722 if (DRM_COPY_FROM_USER 1679 RADEON_COPY_MT(buffer + 12, data, 16);
1723 (buffer + 12, data, 16)) {
1724 DRM_ERROR
1725 ("EFAULT on pad, %d bytes\n",
1726 tex_width);
1727 return DRM_ERR(EFAULT);
1728 }
1729 data += 16; 1680 data += 16;
1730 buffer += 16; 1681 buffer += 16;
1731 } 1682 }
@@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1735 /* Texture image width is larger than the minimum, so we 1686 /* Texture image width is larger than the minimum, so we
1736 * can upload it directly. 1687 * can upload it directly.
1737 */ 1688 */
1738 if (DRM_COPY_FROM_USER(buffer, data, 1689 RADEON_COPY_MT(buffer, data,
1739 dwords * sizeof(u32))) { 1690 dwords * sizeof(u32));
1740 DRM_ERROR("EFAULT on data, %d dwords\n",
1741 dwords);
1742 return DRM_ERR(EFAULT);
1743 }
1744 } else { 1691 } else {
1745 /* Texture image width is less than the minimum, so we 1692 /* Texture image width is less than the minimum, so we
1746 * need to pad out each image scanline to the minimum 1693 * need to pad out each image scanline to the minimum
1747 * width. 1694 * width.
1748 */ 1695 */
1749 for (i = 0; i < tex->height; i++) { 1696 for (i = 0; i < tex->height; i++) {
1750 if (DRM_COPY_FROM_USER 1697 RADEON_COPY_MT(buffer, data, tex_width);
1751 (buffer, data, tex_width)) {
1752 DRM_ERROR
1753 ("EFAULT on pad, %d bytes\n",
1754 tex_width);
1755 return DRM_ERR(EFAULT);
1756 }
1757 buffer += 8; 1698 buffer += 8;
1758 data += tex_width; 1699 data += tex_width;
1759 } 1700 }
1760 } 1701 }
1761 } 1702 }
1762 1703
1704#undef RADEON_COPY_MT
1763 buf->filp = filp; 1705 buf->filp = filp;
1764 buf->used = size; 1706 buf->used = size;
1765 offset = dev_priv->gart_buffers_offset + buf->offset; 1707 offset = dev_priv->gart_buffers_offset + buf->offset;
@@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1821} 1763}
1822 1764
1823static void radeon_apply_surface_regs(int surf_index, 1765static void radeon_apply_surface_regs(int surf_index,
1824 drm_radeon_private_t * dev_priv) 1766 drm_radeon_private_t *dev_priv)
1825{ 1767{
1826 if (!dev_priv->mmio) 1768 if (!dev_priv->mmio)
1827 return; 1769 return;
@@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
1847 * freed, we suddenly need two surfaces to store A and C, which might 1789 * freed, we suddenly need two surfaces to store A and C, which might
1848 * not always be available. 1790 * not always be available.
1849 */ 1791 */
1850static int alloc_surface(drm_radeon_surface_alloc_t * new, 1792static int alloc_surface(drm_radeon_surface_alloc_t *new,
1851 drm_radeon_private_t * dev_priv, DRMFILE filp) 1793 drm_radeon_private_t *dev_priv, DRMFILE filp)
1852{ 1794{
1853 struct radeon_virt_surface *s; 1795 struct radeon_virt_surface *s;
1854 int i; 1796 int i;
@@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2158 2100
2159 LOCK_TEST_WITH_RETURN(dev, filp); 2101 LOCK_TEST_WITH_RETURN(dev, filp);
2160 2102
2103 if (!dev_priv) {
2104 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2105 return DRM_ERR(EINVAL);
2106 }
2107
2161 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 2108 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2162 2109
2163 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, 2110 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2596 return 0; 2543 return 0;
2597} 2544}
2598 2545
2599static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, 2546static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2600 drm_radeon_cmd_header_t header, 2547 drm_radeon_cmd_header_t header,
2601 drm_radeon_kcmd_buffer_t * cmdbuf) 2548 drm_radeon_kcmd_buffer_t *cmdbuf)
2602{ 2549{
2603 int sz = header.scalars.count; 2550 int sz = header.scalars.count;
2604 int start = header.scalars.offset; 2551 int start = header.scalars.offset;
@@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
2618 2565
2619/* God this is ugly 2566/* God this is ugly
2620 */ 2567 */
2621static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, 2568static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2622 drm_radeon_cmd_header_t header, 2569 drm_radeon_cmd_header_t header,
2623 drm_radeon_kcmd_buffer_t * cmdbuf) 2570 drm_radeon_kcmd_buffer_t *cmdbuf)
2624{ 2571{
2625 int sz = header.scalars.count; 2572 int sz = header.scalars.count;
2626 int start = ((unsigned int)header.scalars.offset) + 0x100; 2573 int start = ((unsigned int)header.scalars.offset) + 0x100;
@@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
2638 return 0; 2585 return 0;
2639} 2586}
2640 2587
2641static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv, 2588static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2642 drm_radeon_cmd_header_t header, 2589 drm_radeon_cmd_header_t header,
2643 drm_radeon_kcmd_buffer_t * cmdbuf) 2590 drm_radeon_kcmd_buffer_t *cmdbuf)
2644{ 2591{
2645 int sz = header.vectors.count; 2592 int sz = header.vectors.count;
2646 int start = header.vectors.offset; 2593 int start = header.vectors.offset;
@@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
2685 return 0; 2632 return 0;
2686} 2633}
2687 2634
2688static int radeon_emit_packet3_cliprect(drm_device_t * dev, 2635static int radeon_emit_packet3_cliprect(drm_device_t *dev,
2689 drm_file_t * filp_priv, 2636 drm_file_t *filp_priv,
2690 drm_radeon_kcmd_buffer_t *cmdbuf, 2637 drm_radeon_kcmd_buffer_t *cmdbuf,
2691 int orig_nbox) 2638 int orig_nbox)
2692{ 2639{
@@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2818 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2765 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
2819 if (kbuf == NULL) 2766 if (kbuf == NULL)
2820 return DRM_ERR(ENOMEM); 2767 return DRM_ERR(ENOMEM);
2821 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { 2768 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
2769 cmdbuf.bufsz)) {
2822 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2770 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2823 return DRM_ERR(EFAULT); 2771 return DRM_ERR(EFAULT);
2824 } 2772 }
@@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
2981 value = dev_priv->gart_vm_start; 2929 value = dev_priv->gart_vm_start;
2982 break; 2930 break;
2983 case RADEON_PARAM_REGISTER_HANDLE: 2931 case RADEON_PARAM_REGISTER_HANDLE:
2984 value = dev_priv->mmio_offset; 2932 value = dev_priv->mmio->offset;
2985 break; 2933 break;
2986 case RADEON_PARAM_STATUS_HANDLE: 2934 case RADEON_PARAM_STATUS_HANDLE:
2987 value = dev_priv->ring_rptr_offset; 2935 value = dev_priv->ring_rptr_offset;
@@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3004 case RADEON_PARAM_GART_TEX_HANDLE: 2952 case RADEON_PARAM_GART_TEX_HANDLE:
3005 value = dev_priv->gart_textures_offset; 2953 value = dev_priv->gart_textures_offset;
3006 break; 2954 break;
2955
2956 case RADEON_PARAM_CARD_TYPE:
2957 if (dev_priv->flags & CHIP_IS_PCIE)
2958 value = RADEON_CARD_PCIE;
2959 else if (dev_priv->flags & CHIP_IS_AGP)
2960 value = RADEON_CARD_AGP;
2961 else
2962 value = RADEON_CARD_PCI;
2963 break;
3007 default: 2964 default:
3008 return DRM_ERR(EINVAL); 2965 return DRM_ERR(EINVAL);
3009 } 2966 }
@@ -3066,6 +3023,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3066/* When a client dies: 3023/* When a client dies:
3067 * - Check for and clean up flipped page state 3024 * - Check for and clean up flipped page state
3068 * - Free any alloced GART memory. 3025 * - Free any alloced GART memory.
3026 * - Free any alloced radeon surfaces.
3069 * 3027 *
3070 * DRM infrastructure takes care of reclaiming dma buffers. 3028 * DRM infrastructure takes care of reclaiming dma buffers.
3071 */ 3029 */
@@ -3092,6 +3050,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3092 drm_radeon_private_t *dev_priv = dev->dev_private; 3050 drm_radeon_private_t *dev_priv = dev->dev_private;
3093 struct drm_radeon_driver_file_fields *radeon_priv; 3051 struct drm_radeon_driver_file_fields *radeon_priv;
3094 3052
3053 DRM_DEBUG("\n");
3095 radeon_priv = 3054 radeon_priv =
3096 (struct drm_radeon_driver_file_fields *) 3055 (struct drm_radeon_driver_file_fields *)
3097 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); 3056 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@@ -3100,6 +3059,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3100 return -ENOMEM; 3059 return -ENOMEM;
3101 3060
3102 filp_priv->driver_priv = radeon_priv; 3061 filp_priv->driver_priv = radeon_priv;
3062
3103 if (dev_priv) 3063 if (dev_priv)
3104 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3064 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3105 else 3065 else