aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/drm')
-rw-r--r--drivers/char/drm/Kconfig16
-rw-r--r--drivers/char/drm/Makefile7
-rw-r--r--drivers/char/drm/drm.h8
-rw-r--r--drivers/char/drm/drmP.h137
-rw-r--r--drivers/char/drm/drm_agpsupport.c143
-rw-r--r--drivers/char/drm/drm_bufs.c647
-rw-r--r--drivers/char/drm/drm_context.c17
-rw-r--r--drivers/char/drm/drm_drv.c79
-rw-r--r--drivers/char/drm/drm_fops.c6
-rw-r--r--drivers/char/drm/drm_ioctl.c2
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_pci.c55
-rw-r--r--drivers/char/drm/drm_pciids.h79
-rw-r--r--drivers/char/drm/drm_proc.c17
-rw-r--r--drivers/char/drm/drm_scatter.c11
-rw-r--r--drivers/char/drm/drm_stub.c8
-rw-r--r--drivers/char/drm/drm_vm.c92
-rw-r--r--drivers/char/drm/ffb_drv.c5
-rw-r--r--drivers/char/drm/gamma_context.h492
-rw-r--r--drivers/char/drm/gamma_dma.c946
-rw-r--r--drivers/char/drm/gamma_drm.h90
-rw-r--r--drivers/char/drm/gamma_drv.c59
-rw-r--r--drivers/char/drm/gamma_drv.h147
-rw-r--r--drivers/char/drm/gamma_lists.h215
-rw-r--r--drivers/char/drm/gamma_lock.h140
-rw-r--r--drivers/char/drm/gamma_old_dma.h313
-rw-r--r--drivers/char/drm/i810_dma.c22
-rw-r--r--drivers/char/drm/i810_drv.c1
-rw-r--r--drivers/char/drm/i810_drv.h1
-rw-r--r--drivers/char/drm/i830_dma.c22
-rw-r--r--drivers/char/drm/i830_drv.c1
-rw-r--r--drivers/char/drm/i830_drv.h1
-rw-r--r--drivers/char/drm/i915_dma.c31
-rw-r--r--drivers/char/drm/i915_drv.c1
-rw-r--r--drivers/char/drm/i915_drv.h4
-rw-r--r--drivers/char/drm/mga_dma.c602
-rw-r--r--drivers/char/drm/mga_drm.h98
-rw-r--r--drivers/char/drm/mga_drv.c45
-rw-r--r--drivers/char/drm/mga_drv.h94
-rw-r--r--drivers/char/drm/mga_ioc32.c67
-rw-r--r--drivers/char/drm/mga_irq.c72
-rw-r--r--drivers/char/drm/mga_state.c158
-rw-r--r--drivers/char/drm/mga_warp.c141
-rw-r--r--drivers/char/drm/r128_cce.c6
-rw-r--r--drivers/char/drm/r128_drm.h2
-rw-r--r--drivers/char/drm/r300_cmdbuf.c801
-rw-r--r--drivers/char/drm/r300_reg.h1412
-rw-r--r--drivers/char/drm/radeon_cp.c35
-rw-r--r--drivers/char/drm/radeon_drm.h46
-rw-r--r--drivers/char/drm/radeon_drv.c1
-rw-r--r--drivers/char/drm/radeon_drv.h30
-rw-r--r--drivers/char/drm/radeon_state.c75
-rw-r--r--drivers/char/drm/savage_bci.c1096
-rw-r--r--drivers/char/drm/savage_drm.h209
-rw-r--r--drivers/char/drm/savage_drv.c112
-rw-r--r--drivers/char/drm/savage_drv.h579
-rw-r--r--drivers/char/drm/savage_state.c1146
57 files changed, 7407 insertions, 3243 deletions
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 123417e43040..56ace9d5e2ae 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -23,13 +23,6 @@ config DRM_TDFX
23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), 23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
24 graphics card. If M is selected, the module will be called tdfx. 24 graphics card. If M is selected, the module will be called tdfx.
25 25
26config DRM_GAMMA
27 tristate "3dlabs GMX 2000"
28 depends on DRM && BROKEN
29 help
30 This is the old gamma driver, please tell me if it might actually
31 work.
32
33config DRM_R128 26config DRM_R128
34 tristate "ATI Rage 128" 27 tristate "ATI Rage 128"
35 depends on DRM && PCI 28 depends on DRM && PCI
@@ -82,7 +75,7 @@ endchoice
82 75
83config DRM_MGA 76config DRM_MGA
84 tristate "Matrox g200/g400" 77 tristate "Matrox g200/g400"
85 depends on DRM && AGP 78 depends on DRM
86 help 79 help
87 Choose this option if you have a Matrox G200, G400 or G450 graphics 80 Choose this option if you have a Matrox G200, G400 or G450 graphics
88 card. If M is selected, the module will be called mga. AGP 81 card. If M is selected, the module will be called mga. AGP
@@ -103,3 +96,10 @@ config DRM_VIA
103 Choose this option if you have a Via unichrome or compatible video 96 Choose this option if you have a Via unichrome or compatible video
104 chipset. If M is selected the module will be called via. 97 chipset. If M is selected the module will be called via.
105 98
99config DRM_SAVAGE
100 tristate "Savage video cards"
101 depends on DRM
102 help
103 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
104 chipset. If M is selected the module will be called savage.
105
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index ddd941045b1f..e41060c76226 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 8 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
9 drm_sysfs.o 9 drm_sysfs.o
10 10
11gamma-objs := gamma_drv.o gamma_dma.o
12tdfx-objs := tdfx_drv.o 11tdfx-objs := tdfx_drv.o
13r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o 12r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
14mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o 13mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
15i810-objs := i810_drv.o i810_dma.o 14i810-objs := i810_drv.o i810_dma.o
16i830-objs := i830_drv.o i830_dma.o i830_irq.o 15i830-objs := i830_drv.o i830_dma.o i830_irq.o
17i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 16i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
18radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o 17radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
19ffb-objs := ffb_drv.o ffb_context.o 18ffb-objs := ffb_drv.o ffb_context.o
20sis-objs := sis_drv.o sis_ds.o sis_mm.o 19sis-objs := sis_drv.o sis_ds.o sis_mm.o
20savage-objs := savage_drv.o savage_bci.o savage_state.o
21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o 21via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
22 22
23ifeq ($(CONFIG_COMPAT),y) 23ifeq ($(CONFIG_COMPAT),y)
@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o
29endif 29endif
30 30
31obj-$(CONFIG_DRM) += drm.o 31obj-$(CONFIG_DRM) += drm.o
32obj-$(CONFIG_DRM_GAMMA) += gamma.o
33obj-$(CONFIG_DRM_TDFX) += tdfx.o 32obj-$(CONFIG_DRM_TDFX) += tdfx.o
34obj-$(CONFIG_DRM_R128) += r128.o 33obj-$(CONFIG_DRM_R128) += r128.o
35obj-$(CONFIG_DRM_RADEON)+= radeon.o 34obj-$(CONFIG_DRM_RADEON)+= radeon.o
@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o
39obj-$(CONFIG_DRM_I915) += i915.o 38obj-$(CONFIG_DRM_I915) += i915.o
40obj-$(CONFIG_DRM_FFB) += ffb.o 39obj-$(CONFIG_DRM_FFB) += ffb.o
41obj-$(CONFIG_DRM_SIS) += sis.o 40obj-$(CONFIG_DRM_SIS) += sis.o
41obj-$(CONFIG_DRM_SAVAGE)+= savage.o
42obj-$(CONFIG_DRM_VIA) +=via.o 42obj-$(CONFIG_DRM_VIA) +=via.o
43 43
44
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index e8371dd87fbc..fc6598a81acd 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -98,7 +98,7 @@
98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 98#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
99 99
100 100
101typedef unsigned long drm_handle_t; 101typedef unsigned int drm_handle_t;
102typedef unsigned int drm_context_t; 102typedef unsigned int drm_context_t;
103typedef unsigned int drm_drawable_t; 103typedef unsigned int drm_drawable_t;
104typedef unsigned int drm_magic_t; 104typedef unsigned int drm_magic_t;
@@ -209,7 +209,8 @@ typedef enum drm_map_type {
209 _DRM_REGISTERS = 1, /**< no caching, no core dump */ 209 _DRM_REGISTERS = 1, /**< no caching, no core dump */
210 _DRM_SHM = 2, /**< shared, cached */ 210 _DRM_SHM = 2, /**< shared, cached */
211 _DRM_AGP = 3, /**< AGP/GART */ 211 _DRM_AGP = 3, /**< AGP/GART */
212 _DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ 212 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
213 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
213} drm_map_type_t; 214} drm_map_type_t;
214 215
215 216
@@ -368,7 +369,8 @@ typedef struct drm_buf_desc {
368 enum { 369 enum {
369 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ 370 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
370 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ 371 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
371 _DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ 372 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
373 _DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
372 } flags; 374 } flags;
373 unsigned long agp_start; /**< 375 unsigned long agp_start; /**<
374 * Start address of where the AGP buffers are 376 * Start address of where the AGP buffers are
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 5df09cc8c6db..6f98701dfe15 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -53,7 +53,6 @@
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/file.h> 54#include <linux/file.h>
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/version.h>
57#include <linux/jiffies.h> 56#include <linux/jiffies.h>
58#include <linux/smp_lock.h> /* For (un)lock_kernel */ 57#include <linux/smp_lock.h> /* For (un)lock_kernel */
59#include <linux/mm.h> 58#include <linux/mm.h>
@@ -96,6 +95,7 @@
96#define DRIVER_IRQ_SHARED 0x80 95#define DRIVER_IRQ_SHARED 0x80
97#define DRIVER_IRQ_VBL 0x100 96#define DRIVER_IRQ_VBL 0x100
98#define DRIVER_DMA_QUEUE 0x200 97#define DRIVER_DMA_QUEUE 0x200
98#define DRIVER_FB_DMA 0x400
99 99
100/***********************************************************************/ 100/***********************************************************************/
101/** \name Begin the DRM... */ 101/** \name Begin the DRM... */
@@ -160,36 +160,7 @@
160#define pte_unmap(pte) 160#define pte_unmap(pte)
161#endif 161#endif
162 162
163#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
164static inline struct page * vmalloc_to_page(void * vmalloc_addr)
165{
166 unsigned long addr = (unsigned long) vmalloc_addr;
167 struct page *page = NULL;
168 pgd_t *pgd = pgd_offset_k(addr);
169 pmd_t *pmd;
170 pte_t *ptep, pte;
171
172 if (!pgd_none(*pgd)) {
173 pmd = pmd_offset(pgd, addr);
174 if (!pmd_none(*pmd)) {
175 preempt_disable();
176 ptep = pte_offset_map(pmd, addr);
177 pte = *ptep;
178 if (pte_present(pte))
179 page = pte_page(pte);
180 pte_unmap(ptep);
181 preempt_enable();
182 }
183 }
184 return page;
185}
186#endif
187
188#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
189#define DRM_RPR_ARG(vma)
190#else
191#define DRM_RPR_ARG(vma) vma, 163#define DRM_RPR_ARG(vma) vma,
192#endif
193 164
194#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) 165#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
195 166
@@ -474,7 +445,8 @@ typedef struct drm_device_dma {
474 unsigned long byte_count; 445 unsigned long byte_count;
475 enum { 446 enum {
476 _DRM_DMA_USE_AGP = 0x01, 447 _DRM_DMA_USE_AGP = 0x01,
477 _DRM_DMA_USE_SG = 0x02 448 _DRM_DMA_USE_SG = 0x02,
449 _DRM_DMA_USE_FB = 0x04
478 } flags; 450 } flags;
479 451
480} drm_device_dma_t; 452} drm_device_dma_t;
@@ -525,12 +497,19 @@ typedef struct drm_sigdata {
525 drm_hw_lock_t *lock; 497 drm_hw_lock_t *lock;
526} drm_sigdata_t; 498} drm_sigdata_t;
527 499
500typedef struct drm_dma_handle {
501 dma_addr_t busaddr;
502 void *vaddr;
503 size_t size;
504} drm_dma_handle_t;
505
528/** 506/**
529 * Mappings list 507 * Mappings list
530 */ 508 */
531typedef struct drm_map_list { 509typedef struct drm_map_list {
532 struct list_head head; /**< list head */ 510 struct list_head head; /**< list head */
533 drm_map_t *map; /**< mapping */ 511 drm_map_t *map; /**< mapping */
512 unsigned int user_token;
534} drm_map_list_t; 513} drm_map_list_t;
535 514
536typedef drm_map_t drm_local_map_t; 515typedef drm_map_t drm_local_map_t;
@@ -578,7 +557,22 @@ struct drm_driver {
578 int (*kernel_context_switch)(struct drm_device *dev, int old, int new); 557 int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
579 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); 558 void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
580 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); 559 int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
560
561 /**
562 * Called by \c drm_device_is_agp. Typically used to determine if a
563 * card is really attached to AGP or not.
564 *
565 * \param dev DRM device handle
566 *
567 * \returns
568 * One of three values is returned depending on whether or not the
569 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
570 * (return of 1), or may or may not be AGP (return of 2).
571 */
572 int (*device_is_agp) (struct drm_device * dev);
573
581 /* these have to be filled in */ 574 /* these have to be filled in */
575
582 int (*postinit)(struct drm_device *, unsigned long flags); 576 int (*postinit)(struct drm_device *, unsigned long flags);
583 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); 577 irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
584 void (*irq_preinstall)(struct drm_device *dev); 578 void (*irq_preinstall)(struct drm_device *dev);
@@ -722,12 +716,8 @@ typedef struct drm_device {
722 int pci_slot; /**< PCI slot number */ 716 int pci_slot; /**< PCI slot number */
723 int pci_func; /**< PCI function number */ 717 int pci_func; /**< PCI function number */
724#ifdef __alpha__ 718#ifdef __alpha__
725#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
726 struct pci_controler *hose;
727#else
728 struct pci_controller *hose; 719 struct pci_controller *hose;
729#endif 720#endif
730#endif
731 drm_sg_mem_t *sg; /**< Scatter gather memory */ 721 drm_sg_mem_t *sg; /**< Scatter gather memory */
732 unsigned long *ctx_bitmap; /**< context bitmap */ 722 unsigned long *ctx_bitmap; /**< context bitmap */
733 void *dev_private; /**< device private data */ 723 void *dev_private; /**< device private data */
@@ -736,6 +726,7 @@ typedef struct drm_device {
736 726
737 struct drm_driver *driver; 727 struct drm_driver *driver;
738 drm_local_map_t *agp_buffer_map; 728 drm_local_map_t *agp_buffer_map;
729 unsigned int agp_buffer_token;
739 drm_head_t primary; /**< primary screen head */ 730 drm_head_t primary; /**< primary screen head */
740} drm_device_t; 731} drm_device_t;
741 732
@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
806 drm_device_t *dev); 797 drm_device_t *dev);
807extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); 798extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
808 799
809extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); 800extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
810extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); 801extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
811extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); 802extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
812extern int drm_unbind_agp(DRM_AGP_MEM *handle); 803extern int drm_unbind_agp(DRM_AGP_MEM *handle);
@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev,
881 unsigned int context); 872 unsigned int context);
882 873
883 /* Buffer management support (drm_bufs.h) */ 874 /* Buffer management support (drm_bufs.h) */
875extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
876extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
877extern int drm_addmap(drm_device_t *dev, unsigned int offset,
878 unsigned int size, drm_map_type_t type,
879 drm_map_flags_t flags, drm_local_map_t **map_ptr);
880extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
881 unsigned int cmd, unsigned long arg);
882extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
883extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
884extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
885 unsigned int cmd, unsigned long arg);
886
884extern int drm_order( unsigned long size ); 887extern int drm_order( unsigned long size );
885extern int drm_addmap( struct inode *inode, struct file *filp,
886 unsigned int cmd, unsigned long arg );
887extern int drm_rmmap( struct inode *inode, struct file *filp,
888 unsigned int cmd, unsigned long arg );
889extern int drm_addbufs( struct inode *inode, struct file *filp, 888extern int drm_addbufs( struct inode *inode, struct file *filp,
890 unsigned int cmd, unsigned long arg ); 889 unsigned int cmd, unsigned long arg );
891extern int drm_infobufs( struct inode *inode, struct file *filp, 890extern int drm_infobufs( struct inode *inode, struct file *filp,
@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp,
896 unsigned int cmd, unsigned long arg ); 895 unsigned int cmd, unsigned long arg );
897extern int drm_mapbufs( struct inode *inode, struct file *filp, 896extern int drm_mapbufs( struct inode *inode, struct file *filp,
898 unsigned int cmd, unsigned long arg ); 897 unsigned int cmd, unsigned long arg );
898extern unsigned long drm_get_resource_start(drm_device_t *dev,
899 unsigned int resource);
900extern unsigned long drm_get_resource_len(drm_device_t *dev,
901 unsigned int resource);
899 902
900 /* DMA support (drm_dma.h) */ 903 /* DMA support (drm_dma.h) */
901extern int drm_dma_setup(drm_device_t *dev); 904extern int drm_dma_setup(drm_device_t *dev);
@@ -919,15 +922,18 @@ extern void drm_vbl_send_signals( drm_device_t *dev );
919 922
920 /* AGP/GART support (drm_agpsupport.h) */ 923 /* AGP/GART support (drm_agpsupport.h) */
921extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); 924extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
922extern int drm_agp_acquire(struct inode *inode, struct file *filp, 925extern int drm_agp_acquire(drm_device_t * dev);
923 unsigned int cmd, unsigned long arg); 926extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
924extern void drm_agp_do_release(drm_device_t *dev); 927 unsigned int cmd, unsigned long arg);
925extern int drm_agp_release(struct inode *inode, struct file *filp, 928extern int drm_agp_release(drm_device_t *dev);
926 unsigned int cmd, unsigned long arg); 929extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
927extern int drm_agp_enable(struct inode *inode, struct file *filp, 930 unsigned int cmd, unsigned long arg);
928 unsigned int cmd, unsigned long arg); 931extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
929extern int drm_agp_info(struct inode *inode, struct file *filp, 932extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
930 unsigned int cmd, unsigned long arg); 933 unsigned int cmd, unsigned long arg);
934extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
935extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
936 unsigned int cmd, unsigned long arg);
931extern int drm_agp_alloc(struct inode *inode, struct file *filp, 937extern int drm_agp_alloc(struct inode *inode, struct file *filp,
932 unsigned int cmd, unsigned long arg); 938 unsigned int cmd, unsigned long arg);
933extern int drm_agp_free(struct inode *inode, struct file *filp, 939extern int drm_agp_free(struct inode *inode, struct file *filp,
@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev,
976 unsigned long addr, 982 unsigned long addr,
977 dma_addr_t bus_addr); 983 dma_addr_t bus_addr);
978 984
979extern void *drm_pci_alloc(drm_device_t * dev, size_t size, 985extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
980 size_t align, dma_addr_t maxaddr, 986 size_t align, dma_addr_t maxaddr);
981 dma_addr_t * busaddr); 987extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
982 988extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
983extern void drm_pci_free(drm_device_t * dev, size_t size,
984 void *vaddr, dma_addr_t busaddr);
985 989
986 /* sysfs support (drm_sysfs.c) */ 990 /* sysfs support (drm_sysfs.c) */
987struct drm_sysfs_class; 991struct drm_sysfs_class;
@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi
1012 drm_ioremapfree( map->handle, map->size, dev ); 1016 drm_ioremapfree( map->handle, map->size, dev );
1013} 1017}
1014 1018
1015static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) 1019static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
1016{ 1020{
1017 struct list_head *_list; 1021 drm_map_list_t *_entry;
1018 list_for_each( _list, &dev->maplist->head ) { 1022 list_for_each_entry(_entry, &dev->maplist->head, head)
1019 drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); 1023 if (_entry->user_token == token)
1020 if ( _entry->map &&
1021 _entry->map->offset == offset ) {
1022 return _entry->map; 1024 return _entry->map;
1025 return NULL;
1026}
1027
1028static __inline__ int drm_device_is_agp(drm_device_t *dev)
1029{
1030 if ( dev->driver->device_is_agp != NULL ) {
1031 int err = (*dev->driver->device_is_agp)( dev );
1032
1033 if (err != 2) {
1034 return err;
1023 } 1035 }
1024 } 1036 }
1025 return NULL; 1037
1038 return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1026} 1039}
1027 1040
1028static __inline__ void drm_core_dropmap(struct drm_map *map) 1041static __inline__ void drm_core_dropmap(struct drm_map *map)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 8d94c0b5fa44..8c215adcb4b2 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -37,7 +37,7 @@
37#if __OS_HAS_AGP 37#if __OS_HAS_AGP
38 38
39/** 39/**
40 * AGP information ioctl. 40 * Get AGP information.
41 * 41 *
42 * \param inode device inode. 42 * \param inode device inode.
43 * \param filp file pointer. 43 * \param filp file pointer.
@@ -48,51 +48,56 @@
48 * Verifies the AGP device has been initialized and acquired and fills in the 48 * Verifies the AGP device has been initialized and acquired and fills in the
49 * drm_agp_info structure with the information in drm_agp_head::agp_info. 49 * drm_agp_info structure with the information in drm_agp_head::agp_info.
50 */ 50 */
51int drm_agp_info(struct inode *inode, struct file *filp, 51int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
52 unsigned int cmd, unsigned long arg)
53{ 52{
54 drm_file_t *priv = filp->private_data;
55 drm_device_t *dev = priv->head->dev;
56 DRM_AGP_KERN *kern; 53 DRM_AGP_KERN *kern;
57 drm_agp_info_t info;
58 54
59 if (!dev->agp || !dev->agp->acquired) 55 if (!dev->agp || !dev->agp->acquired)
60 return -EINVAL; 56 return -EINVAL;
61 57
62 kern = &dev->agp->agp_info; 58 kern = &dev->agp->agp_info;
63 info.agp_version_major = kern->version.major; 59 info->agp_version_major = kern->version.major;
64 info.agp_version_minor = kern->version.minor; 60 info->agp_version_minor = kern->version.minor;
65 info.mode = kern->mode; 61 info->mode = kern->mode;
66 info.aperture_base = kern->aper_base; 62 info->aperture_base = kern->aper_base;
67 info.aperture_size = kern->aper_size * 1024 * 1024; 63 info->aperture_size = kern->aper_size * 1024 * 1024;
68 info.memory_allowed = kern->max_memory << PAGE_SHIFT; 64 info->memory_allowed = kern->max_memory << PAGE_SHIFT;
69 info.memory_used = kern->current_memory << PAGE_SHIFT; 65 info->memory_used = kern->current_memory << PAGE_SHIFT;
70 info.id_vendor = kern->device->vendor; 66 info->id_vendor = kern->device->vendor;
71 info.id_device = kern->device->device; 67 info->id_device = kern->device->device;
72 68
73 if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) 69 return 0;
70}
71EXPORT_SYMBOL(drm_agp_info);
72
73int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
74 unsigned int cmd, unsigned long arg)
75{
76 drm_file_t *priv = filp->private_data;
77 drm_device_t *dev = priv->head->dev;
78 drm_agp_info_t info;
79 int err;
80
81 err = drm_agp_info(dev, &info);
82 if (err)
83 return err;
84
85 if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
74 return -EFAULT; 86 return -EFAULT;
75 return 0; 87 return 0;
76} 88}
77 89
78/** 90/**
79 * Acquire the AGP device (ioctl). 91 * Acquire the AGP device.
80 * 92 *
81 * \param inode device inode. 93 * \param dev DRM device that is to acquire AGP
82 * \param filp file pointer.
83 * \param cmd command.
84 * \param arg user argument.
85 * \return zero on success or a negative number on failure. 94 * \return zero on success or a negative number on failure.
86 * 95 *
87 * Verifies the AGP device hasn't been acquired before and calls 96 * Verifies the AGP device hasn't been acquired before and calls
88 * agp_acquire(). 97 * \c agp_backend_acquire.
89 */ 98 */
90int drm_agp_acquire(struct inode *inode, struct file *filp, 99int drm_agp_acquire(drm_device_t *dev)
91 unsigned int cmd, unsigned long arg)
92{ 100{
93 drm_file_t *priv = filp->private_data;
94 drm_device_t *dev = priv->head->dev;
95
96 if (!dev->agp) 101 if (!dev->agp)
97 return -ENODEV; 102 return -ENODEV;
98 if (dev->agp->acquired) 103 if (dev->agp->acquired)
@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
102 dev->agp->acquired = 1; 107 dev->agp->acquired = 1;
103 return 0; 108 return 0;
104} 109}
110EXPORT_SYMBOL(drm_agp_acquire);
105 111
106/** 112/**
107 * Release the AGP device (ioctl). 113 * Acquire the AGP device (ioctl).
108 * 114 *
109 * \param inode device inode. 115 * \param inode device inode.
110 * \param filp file pointer. 116 * \param filp file pointer.
@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
112 * \param arg user argument. 118 * \param arg user argument.
113 * \return zero on success or a negative number on failure. 119 * \return zero on success or a negative number on failure.
114 * 120 *
115 * Verifies the AGP device has been acquired and calls agp_backend_release(). 121 * Verifies the AGP device hasn't been acquired before and calls
122 * \c agp_backend_acquire.
116 */ 123 */
117int drm_agp_release(struct inode *inode, struct file *filp, 124int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
118 unsigned int cmd, unsigned long arg) 125 unsigned int cmd, unsigned long arg)
119{ 126{
120 drm_file_t *priv = filp->private_data; 127 drm_file_t *priv = filp->private_data;
121 drm_device_t *dev = priv->head->dev; 128
129 return drm_agp_acquire( (drm_device_t *) priv->head->dev );
130}
122 131
132/**
133 * Release the AGP device.
134 *
135 * \param dev DRM device that is to release AGP
136 * \return zero on success or a negative number on failure.
137 *
138 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
139 */
140int drm_agp_release(drm_device_t *dev)
141{
123 if (!dev->agp || !dev->agp->acquired) 142 if (!dev->agp || !dev->agp->acquired)
124 return -EINVAL; 143 return -EINVAL;
125 agp_backend_release(dev->agp->bridge); 144 agp_backend_release(dev->agp->bridge);
126 dev->agp->acquired = 0; 145 dev->agp->acquired = 0;
127 return 0; 146 return 0;
128
129} 147}
148EXPORT_SYMBOL(drm_agp_release);
130 149
131/** 150int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
132 * Release the AGP device. 151 unsigned int cmd, unsigned long arg)
133 *
134 * Calls agp_backend_release().
135 */
136void drm_agp_do_release(drm_device_t *dev)
137{ 152{
138 agp_backend_release(dev->agp->bridge); 153 drm_file_t *priv = filp->private_data;
154 drm_device_t *dev = priv->head->dev;
155
156 return drm_agp_release(dev);
139} 157}
140 158
141/** 159/**
142 * Enable the AGP bus. 160 * Enable the AGP bus.
143 * 161 *
144 * \param inode device inode. 162 * \param dev DRM device that has previously acquired AGP.
145 * \param filp file pointer. 163 * \param mode Requested AGP mode.
146 * \param cmd command.
147 * \param arg pointer to a drm_agp_mode structure.
148 * \return zero on success or a negative number on failure. 164 * \return zero on success or a negative number on failure.
149 * 165 *
150 * Verifies the AGP device has been acquired but not enabled, and calls 166 * Verifies the AGP device has been acquired but not enabled, and calls
151 * agp_enable(). 167 * \c agp_enable.
152 */ 168 */
153int drm_agp_enable(struct inode *inode, struct file *filp, 169int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
154 unsigned int cmd, unsigned long arg)
155{ 170{
156 drm_file_t *priv = filp->private_data;
157 drm_device_t *dev = priv->head->dev;
158 drm_agp_mode_t mode;
159
160 if (!dev->agp || !dev->agp->acquired) 171 if (!dev->agp || !dev->agp->acquired)
161 return -EINVAL; 172 return -EINVAL;
162 173
163 if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
164 return -EFAULT;
165
166 dev->agp->mode = mode.mode; 174 dev->agp->mode = mode.mode;
167 agp_enable(dev->agp->bridge, mode.mode); 175 agp_enable(dev->agp->bridge, mode.mode);
168 dev->agp->base = dev->agp->agp_info.aper_base; 176 dev->agp->base = dev->agp->agp_info.aper_base;
169 dev->agp->enabled = 1; 177 dev->agp->enabled = 1;
170 return 0; 178 return 0;
171} 179}
180EXPORT_SYMBOL(drm_agp_enable);
181
182int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
183 unsigned int cmd, unsigned long arg)
184{
185 drm_file_t *priv = filp->private_data;
186 drm_device_t *dev = priv->head->dev;
187 drm_agp_mode_t mode;
188
189
190 if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
191 return -EFAULT;
192
193 return drm_agp_enable(dev, mode);
194}
172 195
173/** 196/**
174 * Allocate AGP memory. 197 * Allocate AGP memory.
@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
206 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; 229 pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
207 type = (u32) request.type; 230 type = (u32) request.type;
208 231
209 if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { 232 if (!(memory = drm_alloc_agp(dev, pages, type))) {
210 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); 233 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
211 return -ENOMEM; 234 return -ENOMEM;
212 } 235 }
@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
403 return NULL; 426 return NULL;
404 } 427 }
405 head->memory = NULL; 428 head->memory = NULL;
406#if LINUX_VERSION_CODE <= 0x020408
407 head->cant_use_aperture = 0;
408 head->page_mask = ~(0xfff);
409#else
410 head->cant_use_aperture = head->agp_info.cant_use_aperture; 429 head->cant_use_aperture = head->agp_info.cant_use_aperture;
411 head->page_mask = head->agp_info.page_mask; 430 head->page_mask = head->agp_info.page_mask;
412#endif
413 431
414 return head; 432 return head;
415} 433}
@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
436 return -EINVAL; 454 return -EINVAL;
437 return agp_bind_memory(handle, start); 455 return agp_bind_memory(handle, start);
438} 456}
457EXPORT_SYMBOL(drm_agp_bind_memory);
439 458
440/** Calls agp_unbind_memory() */ 459/** Calls agp_unbind_memory() */
441int drm_agp_unbind_memory(DRM_AGP_MEM *handle) 460int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 4c6191d231b8..e0743ebbe4bd 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,37 +36,69 @@
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include "drmP.h" 37#include "drmP.h"
38 38
39/** 39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
42 *
43 * \param size size.
44 * \return order.
45 *
46 * \todo Can be made faster.
47 */
48int drm_order( unsigned long size )
49{ 40{
50 int order; 41 return pci_resource_start(dev->pdev, resource);
51 unsigned long tmp; 42}
43EXPORT_SYMBOL(drm_get_resource_start);
52 44
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) 45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
54 ; 46{
47 return pci_resource_len(dev->pdev, resource);
48}
49EXPORT_SYMBOL(drm_get_resource_len);
55 50
56 if (size & (size - 1)) 51static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
57 ++order; 52 drm_local_map_t *map)
53{
54 struct list_head *list;
58 55
59 return order; 56 list_for_each(list, &dev->maplist->head) {
57 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58 if (entry->map && map->type == entry->map->type &&
59 entry->map->offset == map->offset) {
60 return entry->map;
61 }
62 }
63
64 return NULL;
60} 65}
61EXPORT_SYMBOL(drm_order);
62 66
63#ifdef CONFIG_COMPAT
64/* 67/*
65 * Used to allocate 32-bit handles for _DRM_SHM regions 68 * Used to allocate 32-bit handles for mappings.
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
68 */ 69 */
69static unsigned int map32_handle = 0x10000000; 70#define START_RANGE 0x10000000
71#define END_RANGE 0x40000000
72
73#ifdef _LP64
74static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
75{
76 static unsigned int map32_handle = START_RANGE;
77 unsigned int hash;
78
79 if (lhandle & 0xffffffff00000000) {
80 hash = map32_handle;
81 map32_handle += PAGE_SIZE;
82 if (map32_handle > END_RANGE)
83 map32_handle = START_RANGE;
84 } else
85 hash = lhandle;
86
87 while (1) {
88 drm_map_list_t *_entry;
89 list_for_each_entry(_entry, &dev->maplist->head,head) {
90 if (_entry->user_token == hash)
91 break;
92 }
93 if (&_entry->head == &dev->maplist->head)
94 return hash;
95
96 hash += PAGE_SIZE;
97 map32_handle += PAGE_SIZE;
98 }
99}
100#else
101# define HandleID(x,dev) (unsigned int)(x)
70#endif 102#endif
71 103
72/** 104/**
@@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000;
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 114 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel. 115 * applicable and if supported by the kernel.
84 */ 116 */
85int drm_addmap( struct inode *inode, struct file *filp, 117int drm_addmap(drm_device_t * dev, unsigned int offset,
86 unsigned int cmd, unsigned long arg ) 118 unsigned int size, drm_map_type_t type,
119 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
87{ 120{
88 drm_file_t *priv = filp->private_data;
89 drm_device_t *dev = priv->head->dev;
90 drm_map_t *map; 121 drm_map_t *map;
91 drm_map_t __user *argp = (void __user *)arg;
92 drm_map_list_t *list; 122 drm_map_list_t *list;
93 123 drm_dma_handle_t *dmah;
94 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ 124 drm_local_map_t *found_map;
95 125
96 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); 126 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
97 if ( !map ) 127 if ( !map )
98 return -ENOMEM; 128 return -ENOMEM;
99 129
100 if ( copy_from_user( map, argp, sizeof(*map) ) ) { 130 map->offset = offset;
101 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 131 map->size = size;
102 return -EFAULT; 132 map->flags = flags;
103 } 133 map->type = type;
104 134
105 /* Only allow shared memory to be removable since we only keep enough 135 /* Only allow shared memory to be removable since we only keep enough
106 * book keeping information about shared memory to allow for removal 136 * book keeping information about shared memory to allow for removal
@@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
122 switch ( map->type ) { 152 switch ( map->type ) {
123 case _DRM_REGISTERS: 153 case _DRM_REGISTERS:
124 case _DRM_FRAME_BUFFER: 154 case _DRM_FRAME_BUFFER:
125#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) 155#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
126 if ( map->offset + map->size < map->offset || 156 if ( map->offset + map->size < map->offset ||
127 map->offset < virt_to_phys(high_memory) ) { 157 map->offset < virt_to_phys(high_memory) ) {
128 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 158 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
@@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp,
132#ifdef __alpha__ 162#ifdef __alpha__
133 map->offset += dev->hose->mem_space->start; 163 map->offset += dev->hose->mem_space->start;
134#endif 164#endif
165 /* Some drivers preinitialize some maps, without the X Server
166 * needing to be aware of it. Therefore, we just return success
167 * when the server tries to create a duplicate map.
168 */
169 found_map = drm_find_matching_map(dev, map);
170 if (found_map != NULL) {
171 if (found_map->size != map->size) {
172 DRM_DEBUG("Matching maps of type %d with "
173 "mismatched sizes, (%ld vs %ld)\n",
174 map->type, map->size, found_map->size);
175 found_map->size = map->size;
176 }
177
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 *map_ptr = found_map;
180 return 0;
181 }
182
135 if (drm_core_has_MTRR(dev)) { 183 if (drm_core_has_MTRR(dev)) {
136 if ( map->type == _DRM_FRAME_BUFFER || 184 if ( map->type == _DRM_FRAME_BUFFER ||
137 (map->flags & _DRM_WRITE_COMBINING) ) { 185 (map->flags & _DRM_WRITE_COMBINING) ) {
@@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp,
178 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 return -EINVAL; 227 return -EINVAL;
180 } 228 }
181 map->offset += dev->sg->handle; 229 map->offset += (unsigned long)dev->sg->virtual;
230 break;
231 case _DRM_CONSISTENT:
232 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
233 * As we're limiting the address to 2^32-1 (or less),
234 * casting it down to 32 bits is no problem, but we
235 * need to point to a 64bit variable first. */
236 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
237 if (!dmah) {
238 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
239 return -ENOMEM;
240 }
241 map->handle = dmah->vaddr;
242 map->offset = (unsigned long)dmah->busaddr;
243 kfree(dmah);
182 break; 244 break;
183
184 default: 245 default:
185 drm_free( map, sizeof(*map), DRM_MEM_MAPS ); 246 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
186 return -EINVAL; 247 return -EINVAL;
@@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp,
196 257
197 down(&dev->struct_sem); 258 down(&dev->struct_sem);
198 list_add(&list->head, &dev->maplist->head); 259 list_add(&list->head, &dev->maplist->head);
199#ifdef CONFIG_COMPAT 260 /* Assign a 32-bit handle */
200 /* Assign a 32-bit handle for _DRM_SHM mappings */
201 /* We do it here so that dev->struct_sem protects the increment */ 261 /* We do it here so that dev->struct_sem protects the increment */
202 if (map->type == _DRM_SHM) 262 list->user_token = HandleID(map->type==_DRM_SHM
203 map->offset = map32_handle += PAGE_SIZE; 263 ? (unsigned long)map->handle
204#endif 264 : map->offset, dev);
205 up(&dev->struct_sem); 265 up(&dev->struct_sem);
206 266
207 if ( copy_to_user( argp, map, sizeof(*map) ) ) 267 *map_ptr = map;
268 return 0;
269}
270EXPORT_SYMBOL(drm_addmap);
271
272int drm_addmap_ioctl(struct inode *inode, struct file *filp,
273 unsigned int cmd, unsigned long arg)
274{
275 drm_file_t *priv = filp->private_data;
276 drm_device_t *dev = priv->head->dev;
277 drm_map_t map;
278 drm_map_t *map_ptr;
279 drm_map_t __user *argp = (void __user *)arg;
280 int err;
281 unsigned long handle = 0;
282
283 if (!(filp->f_mode & 3))
284 return -EACCES; /* Require read/write */
285
286 if (copy_from_user(& map, argp, sizeof(map))) {
287 return -EFAULT;
288 }
289
290 err = drm_addmap(dev, map.offset, map.size, map.type, map.flags,
291 &map_ptr);
292
293 if (err) {
294 return err;
295 }
296
297 {
298 drm_map_list_t *_entry;
299 list_for_each_entry(_entry, &dev->maplist->head, head) {
300 if (_entry->map == map_ptr)
301 handle = _entry->user_token;
302 }
303 if (!handle)
304 return -EFAULT;
305 }
306
307 if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
208 return -EFAULT; 308 return -EFAULT;
209 if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) 309 if (put_user(handle, &argp->handle))
210 return -EFAULT; 310 return -EFAULT;
211 return 0; 311 return 0;
212} 312}
@@ -226,81 +326,138 @@ int drm_addmap( struct inode *inode, struct file *filp,
226 * its being used, and free any associate resource (such as MTRR's) if it's not 326 * its being used, and free any associate resource (such as MTRR's) if it's not
227 * being on use. 327 * being on use.
228 * 328 *
229 * \sa addmap(). 329 * \sa drm_addmap
230 */ 330 */
231int drm_rmmap(struct inode *inode, struct file *filp, 331int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
232 unsigned int cmd, unsigned long arg)
233{ 332{
234 drm_file_t *priv = filp->private_data;
235 drm_device_t *dev = priv->head->dev;
236 struct list_head *list; 333 struct list_head *list;
237 drm_map_list_t *r_list = NULL; 334 drm_map_list_t *r_list = NULL;
238 drm_vma_entry_t *pt, *prev; 335 drm_dma_handle_t dmah;
239 drm_map_t *map; 336
337 /* Find the list entry for the map and remove it */
338 list_for_each(list, &dev->maplist->head) {
339 r_list = list_entry(list, drm_map_list_t, head);
340
341 if (r_list->map == map) {
342 list_del(list);
343 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
344 break;
345 }
346 }
347
348 /* List has wrapped around to the head pointer, or it's empty and we
349 * didn't find anything.
350 */
351 if (list == (&dev->maplist->head)) {
352 return -EINVAL;
353 }
354
355 switch (map->type) {
356 case _DRM_REGISTERS:
357 drm_ioremapfree(map->handle, map->size, dev);
358 /* FALLTHROUGH */
359 case _DRM_FRAME_BUFFER:
360 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
361 int retcode;
362 retcode = mtrr_del(map->mtrr, map->offset,
363 map->size);
364 DRM_DEBUG ("mtrr_del=%d\n", retcode);
365 }
366 break;
367 case _DRM_SHM:
368 vfree(map->handle);
369 break;
370 case _DRM_AGP:
371 case _DRM_SCATTER_GATHER:
372 break;
373 case _DRM_CONSISTENT:
374 dmah.vaddr = map->handle;
375 dmah.busaddr = map->offset;
376 dmah.size = map->size;
377 __drm_pci_free(dev, &dmah);
378 break;
379 }
380 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
381
382 return 0;
383}
384EXPORT_SYMBOL(drm_rmmap_locked);
385
386int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
387{
388 int ret;
389
390 down(&dev->struct_sem);
391 ret = drm_rmmap_locked(dev, map);
392 up(&dev->struct_sem);
393
394 return ret;
395}
396EXPORT_SYMBOL(drm_rmmap);
397
398/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
399 * the last close of the device, and this is necessary for cleanup when things
400 * exit uncleanly. Therefore, having userland manually remove mappings seems
401 * like a pointless exercise since they're going away anyway.
402 *
403 * One use case might be after addmap is allowed for normal users for SHM and
404 * gets used by drivers that the server doesn't need to care about. This seems
405 * unlikely.
406 */
407int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
408 unsigned int cmd, unsigned long arg)
409{
410 drm_file_t *priv = filp->private_data;
411 drm_device_t *dev = priv->head->dev;
240 drm_map_t request; 412 drm_map_t request;
241 int found_maps = 0; 413 drm_local_map_t *map = NULL;
414 struct list_head *list;
415 int ret;
242 416
243 if (copy_from_user(&request, (drm_map_t __user *)arg, 417 if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
244 sizeof(request))) {
245 return -EFAULT; 418 return -EFAULT;
246 } 419 }
247 420
248 down(&dev->struct_sem); 421 down(&dev->struct_sem);
249 list = &dev->maplist->head;
250 list_for_each(list, &dev->maplist->head) { 422 list_for_each(list, &dev->maplist->head) {
251 r_list = list_entry(list, drm_map_list_t, head); 423 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
252 424
253 if(r_list->map && 425 if (r_list->map &&
254 r_list->map->offset == (unsigned long) request.handle && 426 r_list->user_token == (unsigned long) request.handle &&
255 r_list->map->flags & _DRM_REMOVABLE) break; 427 r_list->map->flags & _DRM_REMOVABLE) {
428 map = r_list->map;
429 break;
430 }
256 } 431 }
257 432
258 /* List has wrapped around to the head pointer, or its empty we didn't 433 /* List has wrapped around to the head pointer, or its empty we didn't
259 * find anything. 434 * find anything.
260 */ 435 */
261 if(list == (&dev->maplist->head)) { 436 if (list == (&dev->maplist->head)) {
262 up(&dev->struct_sem); 437 up(&dev->struct_sem);
263 return -EINVAL; 438 return -EINVAL;
264 } 439 }
265 map = r_list->map;
266 list_del(list);
267 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
268 440
269 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 441 if (!map)
270 if (pt->vma->vm_private_data == map) found_maps++; 442 return -EINVAL;
271 }
272 443
273 if(!found_maps) { 444 /* Register and framebuffer maps are permanent */
274 switch (map->type) { 445 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
275 case _DRM_REGISTERS: 446 up(&dev->struct_sem);
276 case _DRM_FRAME_BUFFER: 447 return 0;
277 if (drm_core_has_MTRR(dev)) {
278 if (map->mtrr >= 0) {
279 int retcode;
280 retcode = mtrr_del(map->mtrr,
281 map->offset,
282 map->size);
283 DRM_DEBUG("mtrr_del = %d\n", retcode);
284 }
285 }
286 drm_ioremapfree(map->handle, map->size, dev);
287 break;
288 case _DRM_SHM:
289 vfree(map->handle);
290 break;
291 case _DRM_AGP:
292 case _DRM_SCATTER_GATHER:
293 break;
294 }
295 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
296 } 448 }
449
450 ret = drm_rmmap_locked(dev, map);
451
297 up(&dev->struct_sem); 452 up(&dev->struct_sem);
298 return 0; 453
454 return ret;
299} 455}
300 456
301/** 457/**
302 * Cleanup after an error on one of the addbufs() functions. 458 * Cleanup after an error on one of the addbufs() functions.
303 * 459 *
460 * \param dev DRM device.
304 * \param entry buffer entry where the error occurred. 461 * \param entry buffer entry where the error occurred.
305 * 462 *
306 * Frees any pages and buffers associated with the given entry. 463 * Frees any pages and buffers associated with the given entry.
@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
344 501
345#if __OS_HAS_AGP 502#if __OS_HAS_AGP
346/** 503/**
347 * Add AGP buffers for DMA transfers (ioctl). 504 * Add AGP buffers for DMA transfers.
348 * 505 *
349 * \param inode device inode. 506 * \param dev drm_device_t to which the buffers are to be added.
350 * \param filp file pointer. 507 * \param request pointer to a drm_buf_desc_t describing the request.
351 * \param cmd command.
352 * \param arg pointer to a drm_buf_desc_t request.
353 * \return zero on success or a negative number on failure. 508 * \return zero on success or a negative number on failure.
354 * 509 *
355 * After some sanity checks creates a drm_buf structure for each buffer and 510 * After some sanity checks creates a drm_buf structure for each buffer and
356 * reallocates the buffer list of the same size order to accommodate the new 511 * reallocates the buffer list of the same size order to accommodate the new
357 * buffers. 512 * buffers.
358 */ 513 */
359static int drm_addbufs_agp( struct inode *inode, struct file *filp, 514int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
360 unsigned int cmd, unsigned long arg )
361{ 515{
362 drm_file_t *priv = filp->private_data;
363 drm_device_t *dev = priv->head->dev;
364 drm_device_dma_t *dma = dev->dma; 516 drm_device_dma_t *dma = dev->dma;
365 drm_buf_desc_t request;
366 drm_buf_entry_t *entry; 517 drm_buf_entry_t *entry;
367 drm_buf_t *buf; 518 drm_buf_t *buf;
368 unsigned long offset; 519 unsigned long offset;
@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
376 int byte_count; 527 int byte_count;
377 int i; 528 int i;
378 drm_buf_t **temp_buflist; 529 drm_buf_t **temp_buflist;
379 drm_buf_desc_t __user *argp = (void __user *)arg;
380 530
381 if ( !dma ) return -EINVAL; 531 if ( !dma ) return -EINVAL;
382 532
383 if ( copy_from_user( &request, argp, 533 count = request->count;
384 sizeof(request) ) ) 534 order = drm_order(request->size);
385 return -EFAULT;
386
387 count = request.count;
388 order = drm_order( request.size );
389 size = 1 << order; 535 size = 1 << order;
390 536
391 alignment = (request.flags & _DRM_PAGE_ALIGN) 537 alignment = (request->flags & _DRM_PAGE_ALIGN)
392 ? PAGE_ALIGN(size) : size; 538 ? PAGE_ALIGN(size) : size;
393 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 539 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
394 total = PAGE_SIZE << page_order; 540 total = PAGE_SIZE << page_order;
395 541
396 byte_count = 0; 542 byte_count = 0;
397 agp_offset = dev->agp->base + request.agp_start; 543 agp_offset = dev->agp->base + request->agp_start;
398 544
399 DRM_DEBUG( "count: %d\n", count ); 545 DRM_DEBUG( "count: %d\n", count );
400 DRM_DEBUG( "order: %d\n", order ); 546 DRM_DEBUG( "order: %d\n", order );
@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
508 654
509 up( &dev->struct_sem ); 655 up( &dev->struct_sem );
510 656
511 request.count = entry->buf_count; 657 request->count = entry->buf_count;
512 request.size = size; 658 request->size = size;
513
514 if ( copy_to_user( argp, &request, sizeof(request) ) )
515 return -EFAULT;
516 659
517 dma->flags = _DRM_DMA_USE_AGP; 660 dma->flags = _DRM_DMA_USE_AGP;
518 661
519 atomic_dec( &dev->buf_alloc ); 662 atomic_dec( &dev->buf_alloc );
520 return 0; 663 return 0;
521} 664}
665EXPORT_SYMBOL(drm_addbufs_agp);
522#endif /* __OS_HAS_AGP */ 666#endif /* __OS_HAS_AGP */
523 667
524static int drm_addbufs_pci( struct inode *inode, struct file *filp, 668int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
525 unsigned int cmd, unsigned long arg )
526{ 669{
527 drm_file_t *priv = filp->private_data;
528 drm_device_t *dev = priv->head->dev;
529 drm_device_dma_t *dma = dev->dma; 670 drm_device_dma_t *dma = dev->dma;
530 drm_buf_desc_t request;
531 int count; 671 int count;
532 int order; 672 int order;
533 int size; 673 int size;
@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
543 int page_count; 683 int page_count;
544 unsigned long *temp_pagelist; 684 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist; 685 drm_buf_t **temp_buflist;
546 drm_buf_desc_t __user *argp = (void __user *)arg;
547 686
548 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; 687 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
549 if ( !dma ) return -EINVAL; 688 if ( !dma ) return -EINVAL;
550 689
551 if ( copy_from_user( &request, argp, sizeof(request) ) ) 690 count = request->count;
552 return -EFAULT; 691 order = drm_order(request->size);
553
554 count = request.count;
555 order = drm_order( request.size );
556 size = 1 << order; 692 size = 1 << order;
557 693
558 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", 694 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559 request.count, request.size, size, 695 request->count, request->size, size,
560 order, dev->queue_count ); 696 order, dev->queue_count );
561 697
562 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; 698 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563 if ( dev->queue_count ) return -EBUSY; /* Not while in use */ 699 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
564 700
565 alignment = (request.flags & _DRM_PAGE_ALIGN) 701 alignment = (request->flags & _DRM_PAGE_ALIGN)
566 ? PAGE_ALIGN(size) : size; 702 ? PAGE_ALIGN(size) : size;
567 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 703 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568 total = PAGE_SIZE << page_order; 704 total = PAGE_SIZE << page_order;
@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
740 876
741 up( &dev->struct_sem ); 877 up( &dev->struct_sem );
742 878
743 request.count = entry->buf_count; 879 request->count = entry->buf_count;
744 request.size = size; 880 request->size = size;
745
746 if ( copy_to_user( argp, &request, sizeof(request) ) )
747 return -EFAULT;
748 881
749 atomic_dec( &dev->buf_alloc ); 882 atomic_dec( &dev->buf_alloc );
750 return 0; 883 return 0;
751 884
752} 885}
886EXPORT_SYMBOL(drm_addbufs_pci);
753 887
754static int drm_addbufs_sg( struct inode *inode, struct file *filp, 888static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
755 unsigned int cmd, unsigned long arg )
756{ 889{
757 drm_file_t *priv = filp->private_data;
758 drm_device_t *dev = priv->head->dev;
759 drm_device_dma_t *dma = dev->dma; 890 drm_device_dma_t *dma = dev->dma;
760 drm_buf_desc_t __user *argp = (void __user *)arg;
761 drm_buf_desc_t request;
762 drm_buf_entry_t *entry; 891 drm_buf_entry_t *entry;
763 drm_buf_t *buf; 892 drm_buf_t *buf;
764 unsigned long offset; 893 unsigned long offset;
@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
777 906
778 if ( !dma ) return -EINVAL; 907 if ( !dma ) return -EINVAL;
779 908
780 if ( copy_from_user( &request, argp, sizeof(request) ) ) 909 count = request->count;
781 return -EFAULT; 910 order = drm_order(request->size);
782
783 count = request.count;
784 order = drm_order( request.size );
785 size = 1 << order; 911 size = 1 << order;
786 912
787 alignment = (request.flags & _DRM_PAGE_ALIGN) 913 alignment = (request->flags & _DRM_PAGE_ALIGN)
788 ? PAGE_ALIGN(size) : size; 914 ? PAGE_ALIGN(size) : size;
789 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 915 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
790 total = PAGE_SIZE << page_order; 916 total = PAGE_SIZE << page_order;
791 917
792 byte_count = 0; 918 byte_count = 0;
793 agp_offset = request.agp_start; 919 agp_offset = request->agp_start;
794 920
795 DRM_DEBUG( "count: %d\n", count ); 921 DRM_DEBUG( "count: %d\n", count );
796 DRM_DEBUG( "order: %d\n", order ); 922 DRM_DEBUG( "order: %d\n", order );
@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
848 974
849 buf->offset = (dma->byte_count + offset); 975 buf->offset = (dma->byte_count + offset);
850 buf->bus_address = agp_offset + offset; 976 buf->bus_address = agp_offset + offset;
851 buf->address = (void *)(agp_offset + offset + dev->sg->handle); 977 buf->address = (void *)(agp_offset + offset
978 + (unsigned long)dev->sg->virtual);
852 buf->next = NULL; 979 buf->next = NULL;
853 buf->waiting = 0; 980 buf->waiting = 0;
854 buf->pending = 0; 981 buf->pending = 0;
@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
905 1032
906 up( &dev->struct_sem ); 1033 up( &dev->struct_sem );
907 1034
908 request.count = entry->buf_count; 1035 request->count = entry->buf_count;
909 request.size = size; 1036 request->size = size;
910
911 if ( copy_to_user( argp, &request, sizeof(request) ) )
912 return -EFAULT;
913 1037
914 dma->flags = _DRM_DMA_USE_SG; 1038 dma->flags = _DRM_DMA_USE_SG;
915 1039
@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
917 return 0; 1041 return 0;
918} 1042}
919 1043
1044int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1045{
1046 drm_device_dma_t *dma = dev->dma;
1047 drm_buf_entry_t *entry;
1048 drm_buf_t *buf;
1049 unsigned long offset;
1050 unsigned long agp_offset;
1051 int count;
1052 int order;
1053 int size;
1054 int alignment;
1055 int page_order;
1056 int total;
1057 int byte_count;
1058 int i;
1059 drm_buf_t **temp_buflist;
1060
1061 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1062 return -EINVAL;
1063
1064 if (!dma)
1065 return -EINVAL;
1066
1067 count = request->count;
1068 order = drm_order(request->size);
1069 size = 1 << order;
1070
1071 alignment = (request->flags & _DRM_PAGE_ALIGN)
1072 ? PAGE_ALIGN(size) : size;
1073 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1074 total = PAGE_SIZE << page_order;
1075
1076 byte_count = 0;
1077 agp_offset = request->agp_start;
1078
1079 DRM_DEBUG("count: %d\n", count);
1080 DRM_DEBUG("order: %d\n", order);
1081 DRM_DEBUG("size: %d\n", size);
1082 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1083 DRM_DEBUG("alignment: %d\n", alignment);
1084 DRM_DEBUG("page_order: %d\n", page_order);
1085 DRM_DEBUG("total: %d\n", total);
1086
1087 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1088 return -EINVAL;
1089 if (dev->queue_count)
1090 return -EBUSY; /* Not while in use */
1091
1092 spin_lock(&dev->count_lock);
1093 if (dev->buf_use) {
1094 spin_unlock(&dev->count_lock);
1095 return -EBUSY;
1096 }
1097 atomic_inc(&dev->buf_alloc);
1098 spin_unlock(&dev->count_lock);
1099
1100 down(&dev->struct_sem);
1101 entry = &dma->bufs[order];
1102 if (entry->buf_count) {
1103 up(&dev->struct_sem);
1104 atomic_dec(&dev->buf_alloc);
1105 return -ENOMEM; /* May only call once for each order */
1106 }
1107
1108 if (count < 0 || count > 4096) {
1109 up(&dev->struct_sem);
1110 atomic_dec(&dev->buf_alloc);
1111 return -EINVAL;
1112 }
1113
1114 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1115 DRM_MEM_BUFS);
1116 if (!entry->buflist) {
1117 up(&dev->struct_sem);
1118 atomic_dec(&dev->buf_alloc);
1119 return -ENOMEM;
1120 }
1121 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1122
1123 entry->buf_size = size;
1124 entry->page_order = page_order;
1125
1126 offset = 0;
1127
1128 while (entry->buf_count < count) {
1129 buf = &entry->buflist[entry->buf_count];
1130 buf->idx = dma->buf_count + entry->buf_count;
1131 buf->total = alignment;
1132 buf->order = order;
1133 buf->used = 0;
1134
1135 buf->offset = (dma->byte_count + offset);
1136 buf->bus_address = agp_offset + offset;
1137 buf->address = (void *)(agp_offset + offset);
1138 buf->next = NULL;
1139 buf->waiting = 0;
1140 buf->pending = 0;
1141 init_waitqueue_head(&buf->dma_wait);
1142 buf->filp = NULL;
1143
1144 buf->dev_priv_size = dev->driver->dev_priv_size;
1145 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1146 if (!buf->dev_private) {
1147 /* Set count correctly so we free the proper amount. */
1148 entry->buf_count = count;
1149 drm_cleanup_buf_error(dev, entry);
1150 up(&dev->struct_sem);
1151 atomic_dec(&dev->buf_alloc);
1152 return -ENOMEM;
1153 }
1154 memset(buf->dev_private, 0, buf->dev_priv_size);
1155
1156 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1157
1158 offset += alignment;
1159 entry->buf_count++;
1160 byte_count += PAGE_SIZE << page_order;
1161 }
1162
1163 DRM_DEBUG("byte_count: %d\n", byte_count);
1164
1165 temp_buflist = drm_realloc(dma->buflist,
1166 dma->buf_count * sizeof(*dma->buflist),
1167 (dma->buf_count + entry->buf_count)
1168 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1169 if (!temp_buflist) {
1170 /* Free the entry because it isn't valid */
1171 drm_cleanup_buf_error(dev, entry);
1172 up(&dev->struct_sem);
1173 atomic_dec(&dev->buf_alloc);
1174 return -ENOMEM;
1175 }
1176 dma->buflist = temp_buflist;
1177
1178 for (i = 0; i < entry->buf_count; i++) {
1179 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1180 }
1181
1182 dma->buf_count += entry->buf_count;
1183 dma->byte_count += byte_count;
1184
1185 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1186 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1187
1188 up(&dev->struct_sem);
1189
1190 request->count = entry->buf_count;
1191 request->size = size;
1192
1193 dma->flags = _DRM_DMA_USE_FB;
1194
1195 atomic_dec(&dev->buf_alloc);
1196 return 0;
1197}
1198
920/** 1199/**
921 * Add buffers for DMA transfers (ioctl). 1200 * Add buffers for DMA transfers (ioctl).
922 * 1201 *
@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp,
937 drm_buf_desc_t request; 1216 drm_buf_desc_t request;
938 drm_file_t *priv = filp->private_data; 1217 drm_file_t *priv = filp->private_data;
939 drm_device_t *dev = priv->head->dev; 1218 drm_device_t *dev = priv->head->dev;
1219 int ret;
940 1220
941 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1221 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
942 return -EINVAL; 1222 return -EINVAL;
@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp,
947 1227
948#if __OS_HAS_AGP 1228#if __OS_HAS_AGP
949 if ( request.flags & _DRM_AGP_BUFFER ) 1229 if ( request.flags & _DRM_AGP_BUFFER )
950 return drm_addbufs_agp( inode, filp, cmd, arg ); 1230 ret=drm_addbufs_agp(dev, &request);
951 else 1231 else
952#endif 1232#endif
953 if ( request.flags & _DRM_SG_BUFFER ) 1233 if ( request.flags & _DRM_SG_BUFFER )
954 return drm_addbufs_sg( inode, filp, cmd, arg ); 1234 ret=drm_addbufs_sg(dev, &request);
1235 else if ( request.flags & _DRM_FB_BUFFER)
1236 ret=drm_addbufs_fb(dev, &request);
955 else 1237 else
956 return drm_addbufs_pci( inode, filp, cmd, arg ); 1238 ret=drm_addbufs_pci(dev, &request);
1239
1240 if (ret==0) {
1241 if (copy_to_user((void __user *)arg, &request,
1242 sizeof(request))) {
1243 ret = -EFAULT;
1244 }
1245 }
1246 return ret;
957} 1247}
958 1248
959 1249
@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1196 return -EFAULT; 1486 return -EFAULT;
1197 1487
1198 if ( request.count >= dma->buf_count ) { 1488 if ( request.count >= dma->buf_count ) {
1199 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || 1489 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1200 (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { 1490 || (drm_core_check_feature(dev, DRIVER_SG)
1491 && (dma->flags & _DRM_DMA_USE_SG))
1492 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493 && (dma->flags & _DRM_DMA_USE_FB))) {
1201 drm_map_t *map = dev->agp_buffer_map; 1494 drm_map_t *map = dev->agp_buffer_map;
1495 unsigned long token = dev->agp_buffer_token;
1202 1496
1203 if ( !map ) { 1497 if ( !map ) {
1204 retcode = -EINVAL; 1498 retcode = -EINVAL;
1205 goto done; 1499 goto done;
1206 } 1500 }
1207 1501
1208#if LINUX_VERSION_CODE <= 0x020402
1209 down( &current->mm->mmap_sem );
1210#else
1211 down_write( &current->mm->mmap_sem ); 1502 down_write( &current->mm->mmap_sem );
1212#endif
1213 virtual = do_mmap( filp, 0, map->size, 1503 virtual = do_mmap( filp, 0, map->size,
1214 PROT_READ | PROT_WRITE, 1504 PROT_READ | PROT_WRITE,
1215 MAP_SHARED, 1505 MAP_SHARED,
1216 (unsigned long)map->offset ); 1506 token );
1217#if LINUX_VERSION_CODE <= 0x020402
1218 up( &current->mm->mmap_sem );
1219#else
1220 up_write( &current->mm->mmap_sem ); 1507 up_write( &current->mm->mmap_sem );
1221#endif
1222 } else { 1508 } else {
1223#if LINUX_VERSION_CODE <= 0x020402
1224 down( &current->mm->mmap_sem );
1225#else
1226 down_write( &current->mm->mmap_sem ); 1509 down_write( &current->mm->mmap_sem );
1227#endif
1228 virtual = do_mmap( filp, 0, dma->byte_count, 1510 virtual = do_mmap( filp, 0, dma->byte_count,
1229 PROT_READ | PROT_WRITE, 1511 PROT_READ | PROT_WRITE,
1230 MAP_SHARED, 0 ); 1512 MAP_SHARED, 0 );
1231#if LINUX_VERSION_CODE <= 0x020402
1232 up( &current->mm->mmap_sem );
1233#else
1234 up_write( &current->mm->mmap_sem ); 1513 up_write( &current->mm->mmap_sem );
1235#endif
1236 } 1514 }
1237 if ( virtual > -1024UL ) { 1515 if ( virtual > -1024UL ) {
1238 /* Real error */ 1516 /* Real error */
@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
1279 return retcode; 1557 return retcode;
1280} 1558}
1281 1559
1560/**
1561 * Compute size order. Returns the exponent of the smaller power of two which
1562 * is greater or equal to given number.
1563 *
1564 * \param size size.
1565 * \return order.
1566 *
1567 * \todo Can be made faster.
1568 */
1569int drm_order( unsigned long size )
1570{
1571 int order;
1572 unsigned long tmp;
1573
1574 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1575 ;
1576
1577 if (size & (size - 1))
1578 ++order;
1579
1580 return order;
1581}
1582EXPORT_SYMBOL(drm_order);
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index a7cfabd1ca2e..f515567e5b6f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
212 drm_ctx_priv_map_t __user *argp = (void __user *)arg; 212 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
213 drm_ctx_priv_map_t request; 213 drm_ctx_priv_map_t request;
214 drm_map_t *map; 214 drm_map_t *map;
215 drm_map_list_t *_entry;
215 216
216 if (copy_from_user(&request, argp, sizeof(request))) 217 if (copy_from_user(&request, argp, sizeof(request)))
217 return -EFAULT; 218 return -EFAULT;
@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
225 map = dev->context_sareas[request.ctx_id]; 226 map = dev->context_sareas[request.ctx_id];
226 up(&dev->struct_sem); 227 up(&dev->struct_sem);
227 228
228 request.handle = (void *) map->offset; 229 request.handle = 0;
230 list_for_each_entry(_entry, &dev->maplist->head,head) {
231 if (_entry->map == map) {
232 request.handle = (void *)(unsigned long)_entry->user_token;
233 break;
234 }
235 }
236 if (request.handle == 0)
237 return -EINVAL;
238
239
229 if (copy_to_user(argp, &request, sizeof(request))) 240 if (copy_to_user(argp, &request, sizeof(request)))
230 return -EFAULT; 241 return -EFAULT;
231 return 0; 242 return 0;
@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
262 list_for_each(list, &dev->maplist->head) { 273 list_for_each(list, &dev->maplist->head) {
263 r_list = list_entry(list, drm_map_list_t, head); 274 r_list = list_entry(list, drm_map_list_t, head);
264 if (r_list->map 275 if (r_list->map
265 && r_list->map->offset == (unsigned long) request.handle) 276 && r_list->user_token == (unsigned long) request.handle)
266 goto found; 277 goto found;
267 } 278 }
268bad: 279bad:
@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
369 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { 380 for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
370 ctx.handle = i; 381 ctx.handle = i;
371 if ( copy_to_user( &res.contexts[i], 382 if ( copy_to_user( &res.contexts[i],
372 &i, sizeof(i) ) ) 383 &ctx, sizeof(ctx) ) )
373 return -EFAULT; 384 return -EFAULT;
374 } 385 }
375 } 386 }
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 3333c250c4d9..6ba48f346fcf 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, 70 [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, 71 [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
72 72
73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, 73 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 },
74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, 74 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 },
75 75
76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, 76 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, 77 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = {
102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, 102 [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
103 103
104#if __OS_HAS_AGP 104#if __OS_HAS_AGP
105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, 105 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 },
106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, 106 [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 },
107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, 107 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 },
108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, 108 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 },
109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, 109 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, 110 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, 111 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = {
127 * 127 *
128 * Frees every resource in \p dev. 128 * Frees every resource in \p dev.
129 * 129 *
130 * \sa drm_device and setup(). 130 * \sa drm_device
131 */ 131 */
132int drm_takedown( drm_device_t *dev ) 132int drm_takedown( drm_device_t *dev )
133{ 133{
134 drm_magic_entry_t *pt, *next; 134 drm_magic_entry_t *pt, *next;
135 drm_map_t *map;
136 drm_map_list_t *r_list; 135 drm_map_list_t *r_list;
137 struct list_head *list, *list_next;
138 drm_vma_entry_t *vma, *vma_next; 136 drm_vma_entry_t *vma, *vma_next;
139 int i; 137 int i;
140 138
@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev )
142 140
143 if (dev->driver->pretakedown) 141 if (dev->driver->pretakedown)
144 dev->driver->pretakedown(dev); 142 dev->driver->pretakedown(dev);
143 DRM_DEBUG("driver pretakedown completed\n");
145 144
146 if (dev->unique) { 145 if (dev->unique) {
147 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); 146 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev )
178 } 177 }
179 dev->agp->memory = NULL; 178 dev->agp->memory = NULL;
180 179
181 if ( dev->agp->acquired ) drm_agp_do_release(dev); 180 if (dev->agp->acquired)
181 drm_agp_release(dev);
182 182
183 dev->agp->acquired = 0; 183 dev->agp->acquired = 0;
184 dev->agp->enabled = 0; 184 dev->agp->enabled = 0;
185 } 185 }
186 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
187 drm_sg_cleanup(dev->sg);
188 dev->sg = NULL;
189 }
186 190
187 /* Clear vma list (only built for debugging) */ 191 /* Clear vma list (only built for debugging) */
188 if ( dev->vmalist ) { 192 if ( dev->vmalist ) {
@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev )
194 } 198 }
195 199
196 if( dev->maplist ) { 200 if( dev->maplist ) {
197 list_for_each_safe( list, list_next, &dev->maplist->head ) { 201 while (!list_empty(&dev->maplist->head)) {
198 r_list = (drm_map_list_t *)list; 202 struct list_head *list = dev->maplist->head.next;
199 203 r_list = list_entry(list, drm_map_list_t, head);
200 if ( ( map = r_list->map ) ) { 204 drm_rmmap_locked(dev, r_list->map);
201 switch ( map->type ) { 205 }
202 case _DRM_REGISTERS:
203 case _DRM_FRAME_BUFFER:
204 if (drm_core_has_MTRR(dev)) {
205 if ( map->mtrr >= 0 ) {
206 int retcode;
207 retcode = mtrr_del( map->mtrr,
208 map->offset,
209 map->size );
210 DRM_DEBUG( "mtrr_del=%d\n", retcode );
211 }
212 }
213 drm_ioremapfree( map->handle, map->size, dev );
214 break;
215 case _DRM_SHM:
216 vfree(map->handle);
217 break;
218
219 case _DRM_AGP:
220 /* Do nothing here, because this is all
221 * handled in the AGP/GART driver.
222 */
223 break;
224 case _DRM_SCATTER_GATHER:
225 /* Handle it */
226 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
227 drm_sg_cleanup(dev->sg);
228 dev->sg = NULL;
229 }
230 break;
231 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 }
234 list_del( list );
235 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
236 }
237 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
238 dev->maplist = NULL;
239 } 206 }
240 207
241 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { 208 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev )
264 } 231 }
265 up( &dev->struct_sem ); 232 up( &dev->struct_sem );
266 233
234 DRM_DEBUG("takedown completed\n");
267 return 0; 235 return 0;
268} 236}
269 237
@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init);
312 * 280 *
313 * Cleans up all DRM device, calling takedown(). 281 * Cleans up all DRM device, calling takedown().
314 * 282 *
315 * \sa drm_init(). 283 * \sa drm_init
316 */ 284 */
317static void drm_cleanup( drm_device_t *dev ) 285static void drm_cleanup( drm_device_t *dev )
318{ 286{
@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev )
325 293
326 drm_takedown( dev ); 294 drm_takedown( dev );
327 295
296 if (dev->maplist) {
297 drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
298 dev->maplist = NULL;
299 }
300
328 drm_ctxbitmap_cleanup( dev ); 301 drm_ctxbitmap_cleanup( dev );
329 302
330 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 303 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 10e64fde8d78..a1f4e9cd64ed 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev )
71 dev->magiclist[i].tail = NULL; 71 dev->magiclist[i].tail = NULL;
72 } 72 }
73 73
74 dev->maplist = drm_alloc(sizeof(*dev->maplist),
75 DRM_MEM_MAPS);
76 if(dev->maplist == NULL) return -ENOMEM;
77 memset(dev->maplist, 0, sizeof(*dev->maplist));
78 INIT_LIST_HEAD(&dev->maplist->head);
79
80 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), 74 dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
81 DRM_MEM_CTXLIST); 75 DRM_MEM_CTXLIST);
82 if(dev->ctxlist == NULL) return -ENOMEM; 76 if(dev->ctxlist == NULL) return -ENOMEM;
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 39afda0ccabe..d2ed3ba5aca9 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp,
208 map.size = r_list->map->size; 208 map.size = r_list->map->size;
209 map.type = r_list->map->type; 209 map.type = r_list->map->type;
210 map.flags = r_list->map->flags; 210 map.flags = r_list->map->flags;
211 map.handle = r_list->map->handle; 211 map.handle = (void *)(unsigned long) r_list->user_token;
212 map.mtrr = r_list->map->mtrr; 212 map.mtrr = r_list->map->mtrr;
213 up(&dev->struct_sem); 213 up(&dev->struct_sem);
214 214
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index ace3d42f4407..ff483fb418aa 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area)
142 142
143#if __OS_HAS_AGP 143#if __OS_HAS_AGP
144/** Wrapper around agp_allocate_memory() */ 144/** Wrapper around agp_allocate_memory() */
145DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) 145DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
146{ 146{
147 return drm_agp_allocate_memory(bridge, pages, type); 147 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
148} 148}
149EXPORT_SYMBOL(drm_alloc_agp);
149 150
150/** Wrapper around agp_free_memory() */ 151/** Wrapper around agp_free_memory() */
151int drm_free_agp(DRM_AGP_MEM *handle, int pages) 152int drm_free_agp(DRM_AGP_MEM *handle, int pages)
152{ 153{
153 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 154 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
154} 155}
156EXPORT_SYMBOL(drm_free_agp);
155 157
156/** Wrapper around agp_bind_memory() */ 158/** Wrapper around agp_bind_memory() */
157int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) 159int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
158{ 160{
159 return drm_agp_bind_memory(handle, start); 161 return drm_agp_bind_memory(handle, start);
160} 162}
163EXPORT_SYMBOL(drm_bind_agp);
161 164
162/** Wrapper around agp_unbind_memory() */ 165/** Wrapper around agp_unbind_memory() */
163int drm_unbind_agp(DRM_AGP_MEM *handle) 166int drm_unbind_agp(DRM_AGP_MEM *handle)
164{ 167{
165 return drm_agp_unbind_memory(handle); 168 return drm_agp_unbind_memory(handle);
166} 169}
170EXPORT_SYMBOL(drm_unbind_agp);
167#endif /* agp */ 171#endif /* agp */
168#endif /* debug_memory */ 172#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 192e8762571c..09ed712c1a7f 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -46,11 +46,11 @@
46/** 46/**
47 * \brief Allocate a PCI consistent memory block, for DMA. 47 * \brief Allocate a PCI consistent memory block, for DMA.
48 */ 48 */
49void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, 49drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
50 dma_addr_t maxaddr, dma_addr_t * busaddr) 50 dma_addr_t maxaddr)
51{ 51{
52 void *address; 52 drm_dma_handle_t *dmah;
53#if DRM_DEBUG_MEMORY 53#ifdef DRM_DEBUG_MEMORY
54 int area = DRM_MEM_DMA; 54 int area = DRM_MEM_DMA;
55 55
56 spin_lock(&drm_mem_lock); 56 spin_lock(&drm_mem_lock);
@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
74 return NULL; 74 return NULL;
75 } 75 }
76 76
77 address = pci_alloc_consistent(dev->pdev, size, busaddr); 77 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
78 if (!dmah)
79 return NULL;
80
81 dmah->size = size;
82 dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
78 83
79#if DRM_DEBUG_MEMORY 84#ifdef DRM_DEBUG_MEMORY
80 if (address == NULL) { 85 if (dmah->vaddr == NULL) {
81 spin_lock(&drm_mem_lock); 86 spin_lock(&drm_mem_lock);
82 ++drm_mem_stats[area].fail_count; 87 ++drm_mem_stats[area].fail_count;
83 spin_unlock(&drm_mem_lock); 88 spin_unlock(&drm_mem_lock);
89 kfree(dmah);
84 return NULL; 90 return NULL;
85 } 91 }
86 92
@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
90 drm_ram_used += size; 96 drm_ram_used += size;
91 spin_unlock(&drm_mem_lock); 97 spin_unlock(&drm_mem_lock);
92#else 98#else
93 if (address == NULL) 99 if (dmah->vaddr == NULL) {
100 kfree(dmah);
94 return NULL; 101 return NULL;
102 }
95#endif 103#endif
96 104
97 memset(address, 0, size); 105 memset(dmah->vaddr, 0, size);
98 106
99 return address; 107 return dmah;
100} 108}
101EXPORT_SYMBOL(drm_pci_alloc); 109EXPORT_SYMBOL(drm_pci_alloc);
102 110
103/** 111/**
104 * \brief Free a PCI consistent memory block. 112 * \brief Free a PCI consistent memory block with freeing its descriptor.
113 *
114 * This function is for internal use in the Linux-specific DRM core code.
105 */ 115 */
106void 116void
107drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) 117__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
108{ 118{
109#if DRM_DEBUG_MEMORY 119#ifdef DRM_DEBUG_MEMORY
110 int area = DRM_MEM_DMA; 120 int area = DRM_MEM_DMA;
111 int alloc_count; 121 int alloc_count;
112 int free_count; 122 int free_count;
113#endif 123#endif
114 124
115 if (!vaddr) { 125 if (!dmah->vaddr) {
116#if DRM_DEBUG_MEMORY 126#ifdef DRM_DEBUG_MEMORY
117 DRM_MEM_ERROR(area, "Attempt to free address 0\n"); 127 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
118#endif 128#endif
119 } else { 129 } else {
120 pci_free_consistent(dev->pdev, size, vaddr, busaddr); 130 pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
131 dmah->busaddr);
121 } 132 }
122 133
123#if DRM_DEBUG_MEMORY 134#ifdef DRM_DEBUG_MEMORY
124 spin_lock(&drm_mem_lock); 135 spin_lock(&drm_mem_lock);
125 free_count = ++drm_mem_stats[area].free_count; 136 free_count = ++drm_mem_stats[area].free_count;
126 alloc_count = drm_mem_stats[area].succeed_count; 137 alloc_count = drm_mem_stats[area].succeed_count;
@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
135#endif 146#endif
136 147
137} 148}
149
150/**
151 * \brief Free a PCI consistent memory block
152 */
153void
154drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
155{
156 __drm_pci_free(dev, dmah);
157 kfree(dmah);
158}
138EXPORT_SYMBOL(drm_pci_free); 159EXPORT_SYMBOL(drm_pci_free);
139 160
140/*@}*/ 161/*@}*/
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 70ca4fa55c9d..58b1747cd440 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -25,6 +25,8 @@
25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 25 {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 26 {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ 27 {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
28 {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
29 {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
28 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 30 {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
29 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ 31 {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
30 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ 32 {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
@@ -33,7 +35,17 @@
33 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 35 {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
34 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 36 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
35 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ 37 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
38 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
39 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
40 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
41 {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
42 {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
43 {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
44 {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
45 {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
36 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ 46 {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
47 {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
48 {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
37 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 49 {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
38 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 50 {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
39 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ 51 {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -56,6 +68,7 @@
56 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 68 {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
57 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 69 {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
58 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ 70 {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
71 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
59 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 72 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
60 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ 73 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
61 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ 74 {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -116,9 +129,10 @@
116 {0, 0, 0} 129 {0, 0, 0}
117 130
118#define mga_PCI_IDS \ 131#define mga_PCI_IDS \
119 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 132 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
120 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 133 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
121 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 134 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
135 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
122 {0, 0, 0} 136 {0, 0, 0}
123 137
124#define mach64_PCI_IDS \ 138#define mach64_PCI_IDS \
@@ -162,9 +176,10 @@
162 176
163#define viadrv_PCI_IDS \ 177#define viadrv_PCI_IDS \
164 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 178 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
179 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
165 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 180 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
166 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 181 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
167 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 182 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
168 {0, 0, 0} 183 {0, 0, 0}
169 184
170#define i810_PCI_IDS \ 185#define i810_PCI_IDS \
@@ -181,33 +196,30 @@
181 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 196 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
182 {0, 0, 0} 197 {0, 0, 0}
183 198
184#define gamma_PCI_IDS \
185 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
186 {0, 0, 0}
187
188#define savage_PCI_IDS \ 199#define savage_PCI_IDS \
189 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 200 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
190 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 201 {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
191 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 202 {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
192 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 203 {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
193 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 204 {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
194 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 205 {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
195 {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 206 {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
196 {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 207 {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
197 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 208 {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
198 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 209 {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
199 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 210 {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
200 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 211 {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
201 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 212 {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
202 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 213 {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
203 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 214 {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
204 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 215 {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
205 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 216 {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
206 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 217 {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
207 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 218 {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
208 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 219 {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
209 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 220 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
210 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 221 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
222 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
211 {0, 0, 0} 223 {0, 0, 0}
212 224
213#define ffb_PCI_IDS \ 225#define ffb_PCI_IDS \
@@ -223,10 +235,3 @@
223 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
224 {0, 0, 0} 236 {0, 0, 0}
225 237
226#define viadrv_PCI_IDS \
227 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0, 0, 0}
232
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 4774087d2e9e..32d2bb99462c 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
210 210
211 /* Hardcoded from _DRM_FRAME_BUFFER, 211 /* Hardcoded from _DRM_FRAME_BUFFER,
212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and 212 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
213 _DRM_SCATTER_GATHER. */ 213 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; 214 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
215 const char *type; 215 const char *type;
216 int i; 216 int i;
217 217
@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { 229 if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
230 r_list = list_entry(list, drm_map_list_t, head); 230 r_list = list_entry(list, drm_map_list_t, head);
231 map = r_list->map; 231 map = r_list->map;
232 if(!map) continue; 232 if(!map)
233 if (map->type < 0 || map->type > 4) type = "??"; 233 continue;
234 else type = types[map->type]; 234 if (map->type < 0 || map->type > 5)
235 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", 235 type = "??";
236 else
237 type = types[map->type];
238 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
236 i, 239 i,
237 map->offset, 240 map->offset,
238 map->size, 241 map->size,
239 type, 242 type,
240 map->flags, 243 map->flags,
241 (unsigned long)map->handle); 244 r_list->user_token);
242 if (map->mtrr < 0) { 245 if (map->mtrr < 0) {
243 DRM_PROC_PRINT("none\n"); 246 DRM_PROC_PRINT("none\n");
244 } else { 247 } else {
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 54fddb6ea2d1..ed267d49bc6a 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
61 DRM_MEM_SGLISTS ); 61 DRM_MEM_SGLISTS );
62} 62}
63 63
64#ifdef _LP64
65# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
66#else
67# define ScatterHandle(x) (unsigned int)(x)
68#endif
69
64int drm_sg_alloc( struct inode *inode, struct file *filp, 70int drm_sg_alloc( struct inode *inode, struct file *filp,
65 unsigned int cmd, unsigned long arg ) 71 unsigned int cmd, unsigned long arg )
66{ 72{
@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
133 */ 139 */
134 memset( entry->virtual, 0, pages << PAGE_SHIFT ); 140 memset( entry->virtual, 0, pages << PAGE_SHIFT );
135 141
136 entry->handle = (unsigned long)entry->virtual; 142 entry->handle = ScatterHandle((unsigned long)entry->virtual);
137 143
138 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); 144 DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
139 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); 145 DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
140 146
141 for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { 147 for (i = (unsigned long)entry->virtual, j = 0; j < pages;
148 i += PAGE_SIZE, j++) {
142 entry->pagelist[j] = vmalloc_to_page((void *)i); 149 entry->pagelist[j] = vmalloc_to_page((void *)i);
143 if (!entry->pagelist[j]) 150 if (!entry->pagelist[j])
144 goto failed; 151 goto failed;
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 48829a1a086a..95a976c96eb8 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
75 dev->pci_func = PCI_FUNC(pdev->devfn); 75 dev->pci_func = PCI_FUNC(pdev->devfn);
76 dev->irq = pdev->irq; 76 dev->irq = pdev->irq;
77 77
78 dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
79 if (dev->maplist == NULL)
80 return -ENOMEM;
81 INIT_LIST_HEAD(&dev->maplist->head);
82
78 /* the DRM has 6 basic counters */ 83 /* the DRM has 6 basic counters */
79 dev->counters = 6; 84 dev->counters = 6;
80 dev->types[0] = _DRM_STAT_LOCK; 85 dev->types[0] = _DRM_STAT_LOCK;
@@ -91,7 +96,8 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
91 goto error_out_unreg; 96 goto error_out_unreg;
92 97
93 if (drm_core_has_AGP(dev)) { 98 if (drm_core_has_AGP(dev)) {
94 dev->agp = drm_agp_init(dev); 99 if (drm_device_is_agp(dev))
100 dev->agp = drm_agp_init(dev);
95 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { 101 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
96 DRM_ERROR( "Cannot initialize the agpgart module.\n" ); 102 DRM_ERROR( "Cannot initialize the agpgart module.\n" );
97 retcode = -EINVAL; 103 retcode = -EINVAL;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 621220f3f372..ced4215e2275 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
73 r_list = list_entry(list, drm_map_list_t, head); 73 r_list = list_entry(list, drm_map_list_t, head);
74 map = r_list->map; 74 map = r_list->map;
75 if (!map) continue; 75 if (!map) continue;
76 if (map->offset == VM_OFFSET(vma)) break; 76 if (r_list->user_token == VM_OFFSET(vma))
77 break;
77 } 78 }
78 79
79 if (map && map->type == _DRM_AGP) { 80 if (map && map->type == _DRM_AGP) {
80 unsigned long offset = address - vma->vm_start; 81 unsigned long offset = address - vma->vm_start;
81 unsigned long baddr = VM_OFFSET(vma) + offset; 82 unsigned long baddr = map->offset + offset;
82 struct drm_agp_mem *agpmem; 83 struct drm_agp_mem *agpmem;
83 struct page *page; 84 struct page *page;
84 85
@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
210 } 211 }
211 212
212 if(!found_maps) { 213 if(!found_maps) {
214 drm_dma_handle_t dmah;
215
213 switch (map->type) { 216 switch (map->type) {
214 case _DRM_REGISTERS: 217 case _DRM_REGISTERS:
215 case _DRM_FRAME_BUFFER: 218 case _DRM_FRAME_BUFFER:
@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
228 case _DRM_AGP: 231 case _DRM_AGP:
229 case _DRM_SCATTER_GATHER: 232 case _DRM_SCATTER_GATHER:
230 break; 233 break;
234 case _DRM_CONSISTENT:
235 dmah.vaddr = map->handle;
236 dmah.busaddr = map->offset;
237 dmah.size = map->size;
238 __drm_pci_free(dev, &dmah);
239 break;
231 } 240 }
232 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 241 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
233 } 242 }
@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
296 305
297 306
298 offset = address - vma->vm_start; 307 offset = address - vma->vm_start;
299 map_offset = map->offset - dev->sg->handle; 308 map_offset = map->offset - (unsigned long)dev->sg->virtual;
300 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 309 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
301 page = entry->pagelist[page_offset]; 310 page = entry->pagelist[page_offset];
302 get_page(page); 311 get_page(page);
@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
305} 314}
306 315
307 316
308#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
309
310static struct page *drm_vm_nopage(struct vm_area_struct *vma, 317static struct page *drm_vm_nopage(struct vm_area_struct *vma,
311 unsigned long address, 318 unsigned long address,
312 int *type) { 319 int *type) {
@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
335 return drm_do_vm_sg_nopage(vma, address); 342 return drm_do_vm_sg_nopage(vma, address);
336} 343}
337 344
338#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
339
340static struct page *drm_vm_nopage(struct vm_area_struct *vma,
341 unsigned long address,
342 int unused) {
343 return drm_do_vm_nopage(vma, address);
344}
345
346static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
347 unsigned long address,
348 int unused) {
349 return drm_do_vm_shm_nopage(vma, address);
350}
351
352static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
353 unsigned long address,
354 int unused) {
355 return drm_do_vm_dma_nopage(vma, address);
356}
357
358static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
359 unsigned long address,
360 int unused) {
361 return drm_do_vm_sg_nopage(vma, address);
362}
363
364#endif
365
366
367/** AGP virtual memory operations */ 345/** AGP virtual memory operations */
368static struct vm_operations_struct drm_vm_ops = { 346static struct vm_operations_struct drm_vm_ops = {
369 .nopage = drm_vm_nopage, 347 .nopage = drm_vm_nopage,
@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
487 465
488 vma->vm_ops = &drm_vm_dma_ops; 466 vma->vm_ops = &drm_vm_dma_ops;
489 467
490#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
491 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
492#else
493 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 468 vma->vm_flags |= VM_RESERVED; /* Don't swap */
494#endif
495 469
496 vma->vm_file = filp; /* Needed for drm_vm_open() */ 470 vma->vm_file = filp; /* Needed for drm_vm_open() */
497 drm_vm_open(vma); 471 drm_vm_open(vma);
@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
560 for performance, even if the list was a 534 for performance, even if the list was a
561 bit longer. */ 535 bit longer. */
562 list_for_each(list, &dev->maplist->head) { 536 list_for_each(list, &dev->maplist->head) {
563 unsigned long off;
564 537
565 r_list = list_entry(list, drm_map_list_t, head); 538 r_list = list_entry(list, drm_map_list_t, head);
566 map = r_list->map; 539 map = r_list->map;
567 if (!map) continue; 540 if (!map) continue;
568 off = dev->driver->get_map_ofs(map); 541 if (r_list->user_token == VM_OFFSET(vma))
569 if (off == VM_OFFSET(vma)) break; 542 break;
570 } 543 }
571 544
572 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 545 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
605 /* fall through to _DRM_FRAME_BUFFER... */ 578 /* fall through to _DRM_FRAME_BUFFER... */
606 case _DRM_FRAME_BUFFER: 579 case _DRM_FRAME_BUFFER:
607 case _DRM_REGISTERS: 580 case _DRM_REGISTERS:
608 if (VM_OFFSET(vma) >= __pa(high_memory)) {
609#if defined(__i386__) || defined(__x86_64__) 581#if defined(__i386__) || defined(__x86_64__)
610 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { 582 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
611 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; 583 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
612 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; 584 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
613 } 585 }
614#elif defined(__powerpc__) 586#elif defined(__powerpc__)
615 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; 587 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
588 if (map->type == _DRM_REGISTERS)
589 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
616#endif 590#endif
617 vma->vm_flags |= VM_IO; /* not in core dump */ 591 vma->vm_flags |= VM_IO; /* not in core dump */
618 }
619#if defined(__ia64__) 592#if defined(__ia64__)
620 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 593 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
621 vma->vm_start)) 594 vma->vm_start))
@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
628 offset = dev->driver->get_reg_ofs(dev); 601 offset = dev->driver->get_reg_ofs(dev);
629#ifdef __sparc__ 602#ifdef __sparc__
630 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, 603 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
631 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 604 (map->offset + offset) >> PAGE_SHIFT,
632 vma->vm_end - vma->vm_start, 605 vma->vm_end - vma->vm_start,
633 vma->vm_page_prot)) 606 vma->vm_page_prot))
634#else 607#else
635 if (io_remap_pfn_range(vma, vma->vm_start, 608 if (io_remap_pfn_range(vma, vma->vm_start,
636 (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, 609 (map->offset + offset) >> PAGE_SHIFT,
637 vma->vm_end - vma->vm_start, 610 vma->vm_end - vma->vm_start,
638 vma->vm_page_prot)) 611 vma->vm_page_prot))
639#endif 612#endif
@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
641 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 614 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
642 " offset = 0x%lx\n", 615 " offset = 0x%lx\n",
643 map->type, 616 map->type,
644 vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); 617 vma->vm_start, vma->vm_end, map->offset + offset);
645 vma->vm_ops = &drm_vm_ops; 618 vma->vm_ops = &drm_vm_ops;
646 break; 619 break;
647 case _DRM_SHM: 620 case _DRM_SHM:
621 case _DRM_CONSISTENT:
622 /* Consistent memory is really like shared memory. It's only
623 * allocate in a different way */
648 vma->vm_ops = &drm_vm_shm_ops; 624 vma->vm_ops = &drm_vm_shm_ops;
649 vma->vm_private_data = (void *)map; 625 vma->vm_private_data = (void *)map;
650 /* Don't let this area swap. Change when 626 /* Don't let this area swap. Change when
651 DRM_KERNEL advisory is supported. */ 627 DRM_KERNEL advisory is supported. */
652#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
653 vma->vm_flags |= VM_LOCKED;
654#else
655 vma->vm_flags |= VM_RESERVED; 628 vma->vm_flags |= VM_RESERVED;
656#endif
657 break; 629 break;
658 case _DRM_SCATTER_GATHER: 630 case _DRM_SCATTER_GATHER:
659 vma->vm_ops = &drm_vm_sg_ops; 631 vma->vm_ops = &drm_vm_sg_ops;
660 vma->vm_private_data = (void *)map; 632 vma->vm_private_data = (void *)map;
661#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
662 vma->vm_flags |= VM_LOCKED;
663#else
664 vma->vm_flags |= VM_RESERVED; 633 vma->vm_flags |= VM_RESERVED;
665#endif
666 break; 634 break;
667 default: 635 default:
668 return -EINVAL; /* This should never happen. */ 636 return -EINVAL; /* This should never happen. */
669 } 637 }
670#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
671 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
672#else
673 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 638 vma->vm_flags |= VM_RESERVED; /* Don't swap */
674#endif
675 639
676 vma->vm_file = filp; /* Needed for drm_vm_open() */ 640 vma->vm_file = filp; /* Needed for drm_vm_open() */
677 drm_vm_open(vma); 641 drm_vm_open(vma);
diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c
index ec614fff8f04..1bd0d55ee0f0 100644
--- a/drivers/char/drm/ffb_drv.c
+++ b/drivers/char/drm/ffb_drv.c
@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
152 return NULL; 152 return NULL;
153 153
154 list_for_each(list, &dev->maplist->head) { 154 list_for_each(list, &dev->maplist->head) {
155 unsigned long uoff;
156
157 r_list = (drm_map_list_t *)list; 155 r_list = (drm_map_list_t *)list;
158 map = r_list->map; 156 map = r_list->map;
159 if (!map) 157 if (!map)
160 continue; 158 continue;
161 uoff = (map->offset & 0xffffffff); 159 if (r_list->user_token == off)
162 if (uoff == off)
163 return map; 160 return map;
164 } 161 }
165 162
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h
deleted file mode 100644
index d11b507f87ee..000000000000
--- a/drivers/char/drm/gamma_context.h
+++ /dev/null
@@ -1,492 +0,0 @@
1/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * ChangeLog:
31 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
32 * added context constructor/destructor hooks,
33 * needed by SiS driver's memory management.
34 */
35
36/* ================================================================
37 * Old-style context support -- only used by gamma.
38 */
39
40
41/* The drm_read and drm_write_string code (especially that which manages
42 the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
43 DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
44
45ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
46{
47 drm_file_t *priv = filp->private_data;
48 drm_device_t *dev = priv->dev;
49 int left;
50 int avail;
51 int send;
52 int cur;
53
54 DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
55
56 while (dev->buf_rp == dev->buf_wp) {
57 DRM_DEBUG(" sleeping\n");
58 if (filp->f_flags & O_NONBLOCK) {
59 return -EAGAIN;
60 }
61 interruptible_sleep_on(&dev->buf_readers);
62 if (signal_pending(current)) {
63 DRM_DEBUG(" interrupted\n");
64 return -ERESTARTSYS;
65 }
66 DRM_DEBUG(" awake\n");
67 }
68
69 left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
70 avail = DRM_BSZ - left;
71 send = DRM_MIN(avail, count);
72
73 while (send) {
74 if (dev->buf_wp > dev->buf_rp) {
75 cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
76 } else {
77 cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
78 }
79 if (copy_to_user(buf, dev->buf_rp, cur))
80 return -EFAULT;
81 dev->buf_rp += cur;
82 if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
83 send -= cur;
84 }
85
86 wake_up_interruptible(&dev->buf_writers);
87 return DRM_MIN(avail, count);
88}
89
90
91/* In an incredibly convoluted setup, the kernel module actually calls
92 * back into the X server to perform context switches on behalf of the
93 * 3d clients.
94 */
95int DRM(write_string)(drm_device_t *dev, const char *s)
96{
97 int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
98 int send = strlen(s);
99 int count;
100
101 DRM_DEBUG("%d left, %d to send (%p, %p)\n",
102 left, send, dev->buf_rp, dev->buf_wp);
103
104 if (left == 1 || dev->buf_wp != dev->buf_rp) {
105 DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
106 left,
107 dev->buf_wp,
108 dev->buf_rp);
109 }
110
111 while (send) {
112 if (dev->buf_wp >= dev->buf_rp) {
113 count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
114 if (count == left) --count; /* Leave a hole */
115 } else {
116 count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
117 }
118 strncpy(dev->buf_wp, s, count);
119 dev->buf_wp += count;
120 if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
121 send -= count;
122 }
123
124 if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
125
126 DRM_DEBUG("waking\n");
127 wake_up_interruptible(&dev->buf_readers);
128 return 0;
129}
130
131unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
132{
133 drm_file_t *priv = filp->private_data;
134 drm_device_t *dev = priv->dev;
135
136 poll_wait(filp, &dev->buf_readers, wait);
137 if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
138 return 0;
139}
140
141int DRM(context_switch)(drm_device_t *dev, int old, int new)
142{
143 char buf[64];
144 drm_queue_t *q;
145
146 if (test_and_set_bit(0, &dev->context_flag)) {
147 DRM_ERROR("Reentering -- FIXME\n");
148 return -EBUSY;
149 }
150
151 DRM_DEBUG("Context switch from %d to %d\n", old, new);
152
153 if (new >= dev->queue_count) {
154 clear_bit(0, &dev->context_flag);
155 return -EINVAL;
156 }
157
158 if (new == dev->last_context) {
159 clear_bit(0, &dev->context_flag);
160 return 0;
161 }
162
163 q = dev->queuelist[new];
164 atomic_inc(&q->use_count);
165 if (atomic_read(&q->use_count) == 1) {
166 atomic_dec(&q->use_count);
167 clear_bit(0, &dev->context_flag);
168 return -EINVAL;
169 }
170
171 /* This causes the X server to wake up & do a bunch of hardware
172 * interaction to actually effect the context switch.
173 */
174 sprintf(buf, "C %d %d\n", old, new);
175 DRM(write_string)(dev, buf);
176
177 atomic_dec(&q->use_count);
178
179 return 0;
180}
181
182int DRM(context_switch_complete)(drm_device_t *dev, int new)
183{
184 drm_device_dma_t *dma = dev->dma;
185
186 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
187 dev->last_switch = jiffies;
188
189 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
190 DRM_ERROR("Lock isn't held after context switch\n");
191 }
192
193 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
194 if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
195 DRM_KERNEL_CONTEXT)) {
196 DRM_ERROR("Cannot free lock\n");
197 }
198 }
199
200 clear_bit(0, &dev->context_flag);
201 wake_up_interruptible(&dev->context_wait);
202
203 return 0;
204}
205
206static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
207{
208 DRM_DEBUG("\n");
209
210 if (atomic_read(&q->use_count) != 1
211 || atomic_read(&q->finalization)
212 || atomic_read(&q->block_count)) {
213 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
214 atomic_read(&q->use_count),
215 atomic_read(&q->finalization),
216 atomic_read(&q->block_count));
217 }
218
219 atomic_set(&q->finalization, 0);
220 atomic_set(&q->block_count, 0);
221 atomic_set(&q->block_read, 0);
222 atomic_set(&q->block_write, 0);
223 atomic_set(&q->total_queued, 0);
224 atomic_set(&q->total_flushed, 0);
225 atomic_set(&q->total_locks, 0);
226
227 init_waitqueue_head(&q->write_queue);
228 init_waitqueue_head(&q->read_queue);
229 init_waitqueue_head(&q->flush_queue);
230
231 q->flags = ctx->flags;
232
233 DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
234
235 return 0;
236}
237
238
239/* drm_alloc_queue:
240PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
241 disappear (so all deallocation must be done after IOCTLs are off)
242 2) dev->queue_count < dev->queue_slots
243 3) dev->queuelist[i].use_count == 0 and
244 dev->queuelist[i].finalization == 0 if i not in use
245POST: 1) dev->queuelist[i].use_count == 1
246 2) dev->queue_count < dev->queue_slots */
247
248static int DRM(alloc_queue)(drm_device_t *dev)
249{
250 int i;
251 drm_queue_t *queue;
252 int oldslots;
253 int newslots;
254 /* Check for a free queue */
255 for (i = 0; i < dev->queue_count; i++) {
256 atomic_inc(&dev->queuelist[i]->use_count);
257 if (atomic_read(&dev->queuelist[i]->use_count) == 1
258 && !atomic_read(&dev->queuelist[i]->finalization)) {
259 DRM_DEBUG("%d (free)\n", i);
260 return i;
261 }
262 atomic_dec(&dev->queuelist[i]->use_count);
263 }
264 /* Allocate a new queue */
265 down(&dev->struct_sem);
266
267 queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
268 memset(queue, 0, sizeof(*queue));
269 atomic_set(&queue->use_count, 1);
270
271 ++dev->queue_count;
272 if (dev->queue_count >= dev->queue_slots) {
273 oldslots = dev->queue_slots * sizeof(*dev->queuelist);
274 if (!dev->queue_slots) dev->queue_slots = 1;
275 dev->queue_slots *= 2;
276 newslots = dev->queue_slots * sizeof(*dev->queuelist);
277
278 dev->queuelist = DRM(realloc)(dev->queuelist,
279 oldslots,
280 newslots,
281 DRM_MEM_QUEUES);
282 if (!dev->queuelist) {
283 up(&dev->struct_sem);
284 DRM_DEBUG("out of memory\n");
285 return -ENOMEM;
286 }
287 }
288 dev->queuelist[dev->queue_count-1] = queue;
289
290 up(&dev->struct_sem);
291 DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
292 return dev->queue_count - 1;
293}
294
295int DRM(resctx)(struct inode *inode, struct file *filp,
296 unsigned int cmd, unsigned long arg)
297{
298 drm_ctx_res_t __user *argp = (void __user *)arg;
299 drm_ctx_res_t res;
300 drm_ctx_t ctx;
301 int i;
302
303 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
304 if (copy_from_user(&res, argp, sizeof(res)))
305 return -EFAULT;
306 if (res.count >= DRM_RESERVED_CONTEXTS) {
307 memset(&ctx, 0, sizeof(ctx));
308 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
309 ctx.handle = i;
310 if (copy_to_user(&res.contexts[i],
311 &i,
312 sizeof(i)))
313 return -EFAULT;
314 }
315 }
316 res.count = DRM_RESERVED_CONTEXTS;
317 if (copy_to_user(argp, &res, sizeof(res)))
318 return -EFAULT;
319 return 0;
320}
321
322int DRM(addctx)(struct inode *inode, struct file *filp,
323 unsigned int cmd, unsigned long arg)
324{
325 drm_file_t *priv = filp->private_data;
326 drm_device_t *dev = priv->dev;
327 drm_ctx_t ctx;
328 drm_ctx_t __user *argp = (void __user *)arg;
329
330 if (copy_from_user(&ctx, argp, sizeof(ctx)))
331 return -EFAULT;
332 if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
333 /* Init kernel's context and get a new one. */
334 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
335 ctx.handle = DRM(alloc_queue)(dev);
336 }
337 DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
338 DRM_DEBUG("%d\n", ctx.handle);
339 if (copy_to_user(argp, &ctx, sizeof(ctx)))
340 return -EFAULT;
341 return 0;
342}
343
344int DRM(modctx)(struct inode *inode, struct file *filp,
345 unsigned int cmd, unsigned long arg)
346{
347 drm_file_t *priv = filp->private_data;
348 drm_device_t *dev = priv->dev;
349 drm_ctx_t ctx;
350 drm_queue_t *q;
351
352 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
353 return -EFAULT;
354
355 DRM_DEBUG("%d\n", ctx.handle);
356
357 if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
358 q = dev->queuelist[ctx.handle];
359
360 atomic_inc(&q->use_count);
361 if (atomic_read(&q->use_count) == 1) {
362 /* No longer in use */
363 atomic_dec(&q->use_count);
364 return -EINVAL;
365 }
366
367 if (DRM_BUFCOUNT(&q->waitlist)) {
368 atomic_dec(&q->use_count);
369 return -EBUSY;
370 }
371
372 q->flags = ctx.flags;
373
374 atomic_dec(&q->use_count);
375 return 0;
376}
377
378int DRM(getctx)(struct inode *inode, struct file *filp,
379 unsigned int cmd, unsigned long arg)
380{
381 drm_file_t *priv = filp->private_data;
382 drm_device_t *dev = priv->dev;
383 drm_ctx_t __user *argp = (void __user *)arg;
384 drm_ctx_t ctx;
385 drm_queue_t *q;
386
387 if (copy_from_user(&ctx, argp, sizeof(ctx)))
388 return -EFAULT;
389
390 DRM_DEBUG("%d\n", ctx.handle);
391
392 if (ctx.handle >= dev->queue_count) return -EINVAL;
393 q = dev->queuelist[ctx.handle];
394
395 atomic_inc(&q->use_count);
396 if (atomic_read(&q->use_count) == 1) {
397 /* No longer in use */
398 atomic_dec(&q->use_count);
399 return -EINVAL;
400 }
401
402 ctx.flags = q->flags;
403 atomic_dec(&q->use_count);
404
405 if (copy_to_user(argp, &ctx, sizeof(ctx)))
406 return -EFAULT;
407
408 return 0;
409}
410
411int DRM(switchctx)(struct inode *inode, struct file *filp,
412 unsigned int cmd, unsigned long arg)
413{
414 drm_file_t *priv = filp->private_data;
415 drm_device_t *dev = priv->dev;
416 drm_ctx_t ctx;
417
418 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
419 return -EFAULT;
420 DRM_DEBUG("%d\n", ctx.handle);
421 return DRM(context_switch)(dev, dev->last_context, ctx.handle);
422}
423
424int DRM(newctx)(struct inode *inode, struct file *filp,
425 unsigned int cmd, unsigned long arg)
426{
427 drm_file_t *priv = filp->private_data;
428 drm_device_t *dev = priv->dev;
429 drm_ctx_t ctx;
430
431 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
432 return -EFAULT;
433 DRM_DEBUG("%d\n", ctx.handle);
434 DRM(context_switch_complete)(dev, ctx.handle);
435
436 return 0;
437}
438
439int DRM(rmctx)(struct inode *inode, struct file *filp,
440 unsigned int cmd, unsigned long arg)
441{
442 drm_file_t *priv = filp->private_data;
443 drm_device_t *dev = priv->dev;
444 drm_ctx_t ctx;
445 drm_queue_t *q;
446 drm_buf_t *buf;
447
448 if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
449 return -EFAULT;
450 DRM_DEBUG("%d\n", ctx.handle);
451
452 if (ctx.handle >= dev->queue_count) return -EINVAL;
453 q = dev->queuelist[ctx.handle];
454
455 atomic_inc(&q->use_count);
456 if (atomic_read(&q->use_count) == 1) {
457 /* No longer in use */
458 atomic_dec(&q->use_count);
459 return -EINVAL;
460 }
461
462 atomic_inc(&q->finalization); /* Mark queue in finalization state */
463 atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
464 finalization) */
465
466 while (test_and_set_bit(0, &dev->interrupt_flag)) {
467 schedule();
468 if (signal_pending(current)) {
469 clear_bit(0, &dev->interrupt_flag);
470 return -EINTR;
471 }
472 }
473 /* Remove queued buffers */
474 while ((buf = DRM(waitlist_get)(&q->waitlist))) {
475 DRM(free_buffer)(dev, buf);
476 }
477 clear_bit(0, &dev->interrupt_flag);
478
479 /* Wakeup blocked processes */
480 wake_up_interruptible(&q->read_queue);
481 wake_up_interruptible(&q->write_queue);
482 wake_up_interruptible(&q->flush_queue);
483
484 /* Finalization over. Queue is made
485 available when both use_count and
486 finalization become 0, which won't
487 happen until all the waiting processes
488 stop waiting. */
489 atomic_dec(&q->finalization);
490 return 0;
491}
492
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c
deleted file mode 100644
index e486fb8d31e9..000000000000
--- a/drivers/char/drm/gamma_dma.c
+++ /dev/null
@@ -1,946 +0,0 @@
1/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#include "gamma.h"
33#include "drmP.h"
34#include "drm.h"
35#include "gamma_drm.h"
36#include "gamma_drv.h"
37
38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/delay.h>
40
41static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42 unsigned long length)
43{
44 drm_gamma_private_t *dev_priv =
45 (drm_gamma_private_t *)dev->dev_private;
46 mb();
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
48 cpu_relax();
49
50 GAMMA_WRITE(GAMMA_DMAADDRESS, address);
51
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
53 cpu_relax();
54
55 GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
56}
57
58void gamma_dma_quiescent_single(drm_device_t *dev)
59{
60 drm_gamma_private_t *dev_priv =
61 (drm_gamma_private_t *)dev->dev_private;
62 while (GAMMA_READ(GAMMA_DMACOUNT))
63 cpu_relax();
64
65 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
66 cpu_relax();
67
68 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC, 0);
70
71 do {
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
73 cpu_relax();
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
75}
76
77void gamma_dma_quiescent_dual(drm_device_t *dev)
78{
79 drm_gamma_private_t *dev_priv =
80 (drm_gamma_private_t *)dev->dev_private;
81 while (GAMMA_READ(GAMMA_DMACOUNT))
82 cpu_relax();
83
84 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
85 cpu_relax();
86
87 GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC, 0);
90
91 /* Read from first MX */
92 do {
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
94 cpu_relax();
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
96
97 /* Read from second MX */
98 do {
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
100 cpu_relax();
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
102}
103
104void gamma_dma_ready(drm_device_t *dev)
105{
106 drm_gamma_private_t *dev_priv =
107 (drm_gamma_private_t *)dev->dev_private;
108 while (GAMMA_READ(GAMMA_DMACOUNT))
109 cpu_relax();
110}
111
112static inline int gamma_dma_is_ready(drm_device_t *dev)
113{
114 drm_gamma_private_t *dev_priv =
115 (drm_gamma_private_t *)dev->dev_private;
116 return (!GAMMA_READ(GAMMA_DMACOUNT));
117}
118
119irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
120{
121 drm_device_t *dev = (drm_device_t *)arg;
122 drm_device_dma_t *dma = dev->dma;
123 drm_gamma_private_t *dev_priv =
124 (drm_gamma_private_t *)dev->dev_private;
125
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
128
129 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
130 cpu_relax();
131
132 GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
135 if (gamma_dma_is_ready(dev)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev->dma_flag))
138 return IRQ_HANDLED;
139 if (dma->this_buffer) {
140 gamma_free_buffer(dev, dma->this_buffer);
141 dma->this_buffer = NULL;
142 }
143 clear_bit(0, &dev->dma_flag);
144
145 /* Dispatch new buffer */
146 schedule_work(&dev->work);
147 }
148 return IRQ_HANDLED;
149}
150
151/* Only called by gamma_dma_schedule. */
152static int gamma_do_dma(drm_device_t *dev, int locked)
153{
154 unsigned long address;
155 unsigned long length;
156 drm_buf_t *buf;
157 int retcode = 0;
158 drm_device_dma_t *dma = dev->dma;
159
160 if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
161
162
163 if (!dma->next_buffer) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev->dma_flag);
166 return -EINVAL;
167 }
168
169 buf = dma->next_buffer;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address = buf->idx << 12;
173 length = buf->used;
174
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf->context, buf->idx, length);
177
178 if (buf->list == DRM_LIST_RECLAIM) {
179 gamma_clear_next_buffer(dev);
180 gamma_free_buffer(dev, buf);
181 clear_bit(0, &dev->dma_flag);
182 return -EINVAL;
183 }
184
185 if (!length) {
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev);
188 gamma_free_buffer(dev, buf);
189 clear_bit(0, &dev->dma_flag);
190 return 0;
191 }
192
193 if (!gamma_dma_is_ready(dev)) {
194 clear_bit(0, &dev->dma_flag);
195 return -EBUSY;
196 }
197
198 if (buf->while_locked) {
199 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf->idx, current->pid);
203 }
204 } else {
205 if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
206 DRM_KERNEL_CONTEXT)) {
207 clear_bit(0, &dev->dma_flag);
208 return -EBUSY;
209 }
210 }
211
212 if (dev->last_context != buf->context
213 && !(dev->queuelist[buf->context]->flags
214 & _DRM_CONTEXT_PRESERVED)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch)(dev, dev->last_context,
217 buf->context)) {
218 DRM(clear_next_buffer)(dev);
219 DRM(free_buffer)(dev, buf);
220 }
221 retcode = -EBUSY;
222 goto cleanup;
223
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
228 TIME! */
229 }
230
231 gamma_clear_next_buffer(dev);
232 buf->pending = 1;
233 buf->waiting = 0;
234 buf->list = DRM_LIST_PEND;
235
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address = buf->idx << 12;
238
239 gamma_dma_dispatch(dev, address, length);
240 gamma_free_buffer(dev, dma->this_buffer);
241 dma->this_buffer = buf;
242
243 atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
244 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
245
246 if (!buf->while_locked && !dev->context_flag && !locked) {
247 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
248 DRM_KERNEL_CONTEXT)) {
249 DRM_ERROR("\n");
250 }
251 }
252cleanup:
253
254 clear_bit(0, &dev->dma_flag);
255
256
257 return retcode;
258}
259
260static void gamma_dma_timer_bh(unsigned long dev)
261{
262 gamma_dma_schedule((drm_device_t *)dev, 0);
263}
264
265void gamma_irq_immediate_bh(void *dev)
266{
267 gamma_dma_schedule(dev, 0);
268}
269
270int gamma_dma_schedule(drm_device_t *dev, int locked)
271{
272 int next;
273 drm_queue_t *q;
274 drm_buf_t *buf;
275 int retcode = 0;
276 int processed = 0;
277 int missed;
278 int expire = 20;
279 drm_device_dma_t *dma = dev->dma;
280
281 if (test_and_set_bit(0, &dev->interrupt_flag)) {
282 /* Not reentrant */
283 atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284 return -EBUSY;
285 }
286 missed = atomic_read(&dev->counts[10]);
287
288
289again:
290 if (dev->context_flag) {
291 clear_bit(0, &dev->interrupt_flag);
292 return -EBUSY;
293 }
294 if (dma->next_buffer) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
299 again. */
300 if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
301 } else {
302 do {
303 next = gamma_select_queue(dev, gamma_dma_timer_bh);
304 if (next >= 0) {
305 q = dev->queuelist[next];
306 buf = gamma_waitlist_get(&q->waitlist);
307 dma->next_buffer = buf;
308 dma->next_queue = q;
309 if (buf && buf->list == DRM_LIST_RECLAIM) {
310 gamma_clear_next_buffer(dev);
311 gamma_free_buffer(dev, buf);
312 }
313 }
314 } while (next >= 0 && !dma->next_buffer);
315 if (dma->next_buffer) {
316 if (!(retcode = gamma_do_dma(dev, locked))) {
317 ++processed;
318 }
319 }
320 }
321
322 if (--expire) {
323 if (missed != atomic_read(&dev->counts[10])) {
324 if (gamma_dma_is_ready(dev)) goto again;
325 }
326 if (processed && gamma_dma_is_ready(dev)) {
327 processed = 0;
328 goto again;
329 }
330 }
331
332 clear_bit(0, &dev->interrupt_flag);
333
334 return retcode;
335}
336
337static int gamma_dma_priority(struct file *filp,
338 drm_device_t *dev, drm_dma_t *d)
339{
340 unsigned long address;
341 unsigned long length;
342 int must_free = 0;
343 int retcode = 0;
344 int i;
345 int idx;
346 drm_buf_t *buf;
347 drm_buf_t *last_buf = NULL;
348 drm_device_dma_t *dma = dev->dma;
349 int *send_indices = NULL;
350 int *send_sizes = NULL;
351
352 DECLARE_WAITQUEUE(entry, current);
353
354 /* Turn off interrupt handling */
355 while (test_and_set_bit(0, &dev->interrupt_flag)) {
356 schedule();
357 if (signal_pending(current)) return -EINTR;
358 }
359 if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
360 while (!gamma_lock_take(&dev->lock.hw_lock->lock,
361 DRM_KERNEL_CONTEXT)) {
362 schedule();
363 if (signal_pending(current)) {
364 clear_bit(0, &dev->interrupt_flag);
365 return -EINTR;
366 }
367 }
368 ++must_free;
369 }
370
371 send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
372 DRM_MEM_DRIVER);
373 if (send_indices == NULL)
374 return -ENOMEM;
375 if (copy_from_user(send_indices, d->send_indices,
376 d->send_count * sizeof(*send_indices))) {
377 retcode = -EFAULT;
378 goto cleanup;
379 }
380
381 send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
382 DRM_MEM_DRIVER);
383 if (send_sizes == NULL)
384 return -ENOMEM;
385 if (copy_from_user(send_sizes, d->send_sizes,
386 d->send_count * sizeof(*send_sizes))) {
387 retcode = -EFAULT;
388 goto cleanup;
389 }
390
391 for (i = 0; i < d->send_count; i++) {
392 idx = send_indices[i];
393 if (idx < 0 || idx >= dma->buf_count) {
394 DRM_ERROR("Index %d (of %d max)\n",
395 send_indices[i], dma->buf_count - 1);
396 continue;
397 }
398 buf = dma->buflist[ idx ];
399 if (buf->filp != filp) {
400 DRM_ERROR("Process %d using buffer not owned\n",
401 current->pid);
402 retcode = -EINVAL;
403 goto cleanup;
404 }
405 if (buf->list != DRM_LIST_NONE) {
406 DRM_ERROR("Process %d using buffer on list %d\n",
407 current->pid, buf->list);
408 retcode = -EINVAL;
409 goto cleanup;
410 }
411 /* This isn't a race condition on
412 buf->list, since our concern is the
413 buffer reclaim during the time the
414 process closes the /dev/drm? handle, so
415 it can't also be doing DMA. */
416 buf->list = DRM_LIST_PRIO;
417 buf->used = send_sizes[i];
418 buf->context = d->context;
419 buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
420 address = (unsigned long)buf->address;
421 length = buf->used;
422 if (!length) {
423 DRM_ERROR("0 length buffer\n");
424 }
425 if (buf->pending) {
426 DRM_ERROR("Sending pending buffer:"
427 " buffer %d, offset %d\n",
428 send_indices[i], i);
429 retcode = -EINVAL;
430 goto cleanup;
431 }
432 if (buf->waiting) {
433 DRM_ERROR("Sending waiting buffer:"
434 " buffer %d, offset %d\n",
435 send_indices[i], i);
436 retcode = -EINVAL;
437 goto cleanup;
438 }
439 buf->pending = 1;
440
441 if (dev->last_context != buf->context
442 && !(dev->queuelist[buf->context]->flags
443 & _DRM_CONTEXT_PRESERVED)) {
444 add_wait_queue(&dev->context_wait, &entry);
445 current->state = TASK_INTERRUPTIBLE;
446 /* PRE: dev->last_context != buf->context */
447 DRM(context_switch)(dev, dev->last_context,
448 buf->context);
449 /* POST: we will wait for the context
450 switch and will dispatch on a later call
451 when dev->last_context == buf->context.
452 NOTE WE HOLD THE LOCK THROUGHOUT THIS
453 TIME! */
454 schedule();
455 current->state = TASK_RUNNING;
456 remove_wait_queue(&dev->context_wait, &entry);
457 if (signal_pending(current)) {
458 retcode = -EINTR;
459 goto cleanup;
460 }
461 if (dev->last_context != buf->context) {
462 DRM_ERROR("Context mismatch: %d %d\n",
463 dev->last_context,
464 buf->context);
465 }
466 }
467
468 gamma_dma_dispatch(dev, address, length);
469 atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
470 atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
471
472 if (last_buf) {
473 gamma_free_buffer(dev, last_buf);
474 }
475 last_buf = buf;
476 }
477
478
479cleanup:
480 if (last_buf) {
481 gamma_dma_ready(dev);
482 gamma_free_buffer(dev, last_buf);
483 }
484 if (send_indices)
485 DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
486 DRM_MEM_DRIVER);
487 if (send_sizes)
488 DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
489 DRM_MEM_DRIVER);
490
491 if (must_free && !dev->context_flag) {
492 if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
493 DRM_KERNEL_CONTEXT)) {
494 DRM_ERROR("\n");
495 }
496 }
497 clear_bit(0, &dev->interrupt_flag);
498 return retcode;
499}
500
501static int gamma_dma_send_buffers(struct file *filp,
502 drm_device_t *dev, drm_dma_t *d)
503{
504 DECLARE_WAITQUEUE(entry, current);
505 drm_buf_t *last_buf = NULL;
506 int retcode = 0;
507 drm_device_dma_t *dma = dev->dma;
508 int send_index;
509
510 if (get_user(send_index, &d->send_indices[d->send_count-1]))
511 return -EFAULT;
512
513 if (d->flags & _DRM_DMA_BLOCK) {
514 last_buf = dma->buflist[send_index];
515 add_wait_queue(&last_buf->dma_wait, &entry);
516 }
517
518 if ((retcode = gamma_dma_enqueue(filp, d))) {
519 if (d->flags & _DRM_DMA_BLOCK)
520 remove_wait_queue(&last_buf->dma_wait, &entry);
521 return retcode;
522 }
523
524 gamma_dma_schedule(dev, 0);
525
526 if (d->flags & _DRM_DMA_BLOCK) {
527 DRM_DEBUG("%d waiting\n", current->pid);
528 for (;;) {
529 current->state = TASK_INTERRUPTIBLE;
530 if (!last_buf->waiting && !last_buf->pending)
531 break; /* finished */
532 schedule();
533 if (signal_pending(current)) {
534 retcode = -EINTR; /* Can't restart */
535 break;
536 }
537 }
538 current->state = TASK_RUNNING;
539 DRM_DEBUG("%d running\n", current->pid);
540 remove_wait_queue(&last_buf->dma_wait, &entry);
541 if (!retcode
542 || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
543 if (!waitqueue_active(&last_buf->dma_wait)) {
544 gamma_free_buffer(dev, last_buf);
545 }
546 }
547 if (retcode) {
548 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
549 d->context,
550 last_buf->waiting,
551 last_buf->pending,
552 (long)DRM_WAITCOUNT(dev, d->context),
553 last_buf->idx,
554 last_buf->list,
555 current->pid);
556 }
557 }
558 return retcode;
559}
560
561int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
562 unsigned long arg)
563{
564 drm_file_t *priv = filp->private_data;
565 drm_device_t *dev = priv->dev;
566 drm_device_dma_t *dma = dev->dma;
567 int retcode = 0;
568 drm_dma_t __user *argp = (void __user *)arg;
569 drm_dma_t d;
570
571 if (copy_from_user(&d, argp, sizeof(d)))
572 return -EFAULT;
573
574 if (d.send_count < 0 || d.send_count > dma->buf_count) {
575 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576 current->pid, d.send_count, dma->buf_count);
577 return -EINVAL;
578 }
579
580 if (d.request_count < 0 || d.request_count > dma->buf_count) {
581 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582 current->pid, d.request_count, dma->buf_count);
583 return -EINVAL;
584 }
585
586 if (d.send_count) {
587 if (d.flags & _DRM_DMA_PRIORITY)
588 retcode = gamma_dma_priority(filp, dev, &d);
589 else
590 retcode = gamma_dma_send_buffers(filp, dev, &d);
591 }
592
593 d.granted_count = 0;
594
595 if (!retcode && d.request_count) {
596 retcode = gamma_dma_get_buffers(filp, &d);
597 }
598
599 DRM_DEBUG("%d returning, granted = %d\n",
600 current->pid, d.granted_count);
601 if (copy_to_user(argp, &d, sizeof(d)))
602 return -EFAULT;
603
604 return retcode;
605}
606
607/* =============================================================
608 * DMA initialization, cleanup
609 */
610
611static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
612{
613 drm_gamma_private_t *dev_priv;
614 drm_device_dma_t *dma = dev->dma;
615 drm_buf_t *buf;
616 int i;
617 struct list_head *list;
618 unsigned long *pgt;
619
620 DRM_DEBUG( "%s\n", __FUNCTION__ );
621
622 dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
623 DRM_MEM_DRIVER );
624 if ( !dev_priv )
625 return -ENOMEM;
626
627 dev->dev_private = (void *)dev_priv;
628
629 memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
630
631 dev_priv->num_rast = init->num_rast;
632
633 list_for_each(list, &dev->maplist->head) {
634 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
635 if( r_list->map &&
636 r_list->map->type == _DRM_SHM &&
637 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
638 dev_priv->sarea = r_list->map;
639 break;
640 }
641 }
642
643 dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
644 dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
645 dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
646 dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
647
648 dev_priv->sarea_priv = (drm_gamma_sarea_t *)
649 ((u8 *)dev_priv->sarea->handle +
650 init->sarea_priv_offset);
651
652 if (init->pcimode) {
653 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
654 pgt = buf->address;
655
656 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
657 buf = dma->buflist[i];
658 *pgt = virt_to_phys((void*)buf->address) | 0x07;
659 pgt++;
660 }
661
662 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
663 } else {
664 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
665 drm_core_ioremap( dev->agp_buffer_map, dev);
666
667 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
668 pgt = buf->address;
669
670 for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
671 buf = dma->buflist[i];
672 *pgt = (unsigned long)buf->address + 0x07;
673 pgt++;
674 }
675
676 buf = dma->buflist[GLINT_DRI_BUF_COUNT];
677
678 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
679 GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
680 }
681 while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
682 GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
683 GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
684
685 return 0;
686}
687
688int gamma_do_cleanup_dma( drm_device_t *dev )
689{
690 DRM_DEBUG( "%s\n", __FUNCTION__ );
691
692 /* Make sure interrupts are disabled here because the uninstall ioctl
693 * may not have been called from userspace and after dev_private
694 * is freed, it's too late.
695 */
696 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
697 if ( dev->irq_enabled )
698 DRM(irq_uninstall)(dev);
699
700 if ( dev->dev_private ) {
701
702 if ( dev->agp_buffer_map != NULL )
703 drm_core_ioremapfree( dev->agp_buffer_map, dev );
704
705 DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
706 DRM_MEM_DRIVER );
707 dev->dev_private = NULL;
708 }
709
710 return 0;
711}
712
713int gamma_dma_init( struct inode *inode, struct file *filp,
714 unsigned int cmd, unsigned long arg )
715{
716 drm_file_t *priv = filp->private_data;
717 drm_device_t *dev = priv->dev;
718 drm_gamma_init_t init;
719
720 LOCK_TEST_WITH_RETURN( dev, filp );
721
722 if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
723 return -EFAULT;
724
725 switch ( init.func ) {
726 case GAMMA_INIT_DMA:
727 return gamma_do_init_dma( dev, &init );
728 case GAMMA_CLEANUP_DMA:
729 return gamma_do_cleanup_dma( dev );
730 }
731
732 return -EINVAL;
733}
734
735static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
736{
737 drm_device_dma_t *dma = dev->dma;
738 unsigned int *screenbuf;
739
740 DRM_DEBUG( "%s\n", __FUNCTION__ );
741
742 /* We've DRM_RESTRICTED this DMA buffer */
743
744 screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
745
746#if 0
747 *buffer++ = 0x180; /* Tag (FilterMode) */
748 *buffer++ = 0x200; /* Allow FBColor through */
749 *buffer++ = 0x53B; /* Tag */
750 *buffer++ = copy->Pitch;
751 *buffer++ = 0x53A; /* Tag */
752 *buffer++ = copy->SrcAddress;
753 *buffer++ = 0x539; /* Tag */
754 *buffer++ = copy->WidthHeight; /* Initiates transfer */
755 *buffer++ = 0x53C; /* Tag - DMAOutputAddress */
756 *buffer++ = virt_to_phys((void*)screenbuf);
757 *buffer++ = 0x53D; /* Tag - DMAOutputCount */
758 *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
759
760 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
761 /* Now put it back to the screen */
762
763 *buffer++ = 0x180; /* Tag (FilterMode) */
764 *buffer++ = 0x400; /* Allow Sync through */
765 *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
766 *buffer++ = 0x155; /* FBSourceData | count */
767 *buffer++ = 0x537; /* Tag */
768 *buffer++ = copy->Pitch;
769 *buffer++ = 0x536; /* Tag */
770 *buffer++ = copy->DstAddress;
771 *buffer++ = 0x535; /* Tag */
772 *buffer++ = copy->WidthHeight; /* Initiates transfer */
773 *buffer++ = 0x530; /* Tag - DMAAddr */
774 *buffer++ = virt_to_phys((void*)screenbuf);
775 *buffer++ = 0x531;
776 *buffer++ = copy->Count; /* initiates DMA transfer of color data */
777#endif
778
779 /* need to dispatch it now */
780
781 return 0;
782}
783
784int gamma_dma_copy( struct inode *inode, struct file *filp,
785 unsigned int cmd, unsigned long arg )
786{
787 drm_file_t *priv = filp->private_data;
788 drm_device_t *dev = priv->dev;
789 drm_gamma_copy_t copy;
790
791 if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
792 return -EFAULT;
793
794 return gamma_do_copy_dma( dev, &copy );
795}
796
797/* =============================================================
798 * Per Context SAREA Support
799 */
800
801int gamma_getsareactx(struct inode *inode, struct file *filp,
802 unsigned int cmd, unsigned long arg)
803{
804 drm_file_t *priv = filp->private_data;
805 drm_device_t *dev = priv->dev;
806 drm_ctx_priv_map_t __user *argp = (void __user *)arg;
807 drm_ctx_priv_map_t request;
808 drm_map_t *map;
809
810 if (copy_from_user(&request, argp, sizeof(request)))
811 return -EFAULT;
812
813 down(&dev->struct_sem);
814 if ((int)request.ctx_id >= dev->max_context) {
815 up(&dev->struct_sem);
816 return -EINVAL;
817 }
818
819 map = dev->context_sareas[request.ctx_id];
820 up(&dev->struct_sem);
821
822 request.handle = map->handle;
823 if (copy_to_user(argp, &request, sizeof(request)))
824 return -EFAULT;
825 return 0;
826}
827
828int gamma_setsareactx(struct inode *inode, struct file *filp,
829 unsigned int cmd, unsigned long arg)
830{
831 drm_file_t *priv = filp->private_data;
832 drm_device_t *dev = priv->dev;
833 drm_ctx_priv_map_t request;
834 drm_map_t *map = NULL;
835 drm_map_list_t *r_list;
836 struct list_head *list;
837
838 if (copy_from_user(&request,
839 (drm_ctx_priv_map_t __user *)arg,
840 sizeof(request)))
841 return -EFAULT;
842
843 down(&dev->struct_sem);
844 r_list = NULL;
845 list_for_each(list, &dev->maplist->head) {
846 r_list = list_entry(list, drm_map_list_t, head);
847 if(r_list->map &&
848 r_list->map->handle == request.handle) break;
849 }
850 if (list == &(dev->maplist->head)) {
851 up(&dev->struct_sem);
852 return -EINVAL;
853 }
854 map = r_list->map;
855 up(&dev->struct_sem);
856
857 if (!map) return -EINVAL;
858
859 down(&dev->struct_sem);
860 if ((int)request.ctx_id >= dev->max_context) {
861 up(&dev->struct_sem);
862 return -EINVAL;
863 }
864 dev->context_sareas[request.ctx_id] = map;
865 up(&dev->struct_sem);
866 return 0;
867}
868
869void gamma_driver_irq_preinstall( drm_device_t *dev ) {
870 drm_gamma_private_t *dev_priv =
871 (drm_gamma_private_t *)dev->dev_private;
872
873 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
874 cpu_relax();
875
876 GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
877 GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
878}
879
880void gamma_driver_irq_postinstall( drm_device_t *dev ) {
881 drm_gamma_private_t *dev_priv =
882 (drm_gamma_private_t *)dev->dev_private;
883
884 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
885 cpu_relax();
886
887 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
888 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
889 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
890}
891
892void gamma_driver_irq_uninstall( drm_device_t *dev ) {
893 drm_gamma_private_t *dev_priv =
894 (drm_gamma_private_t *)dev->dev_private;
895 if (!dev_priv)
896 return;
897
898 while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
899 cpu_relax();
900
901 GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
902 GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
903 GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
904}
905
906extern drm_ioctl_desc_t DRM(ioctls)[];
907
908static int gamma_driver_preinit(drm_device_t *dev)
909{
910 /* reset the finish ioctl */
911 DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
912 return 0;
913}
914
915static void gamma_driver_pretakedown(drm_device_t *dev)
916{
917 gamma_do_cleanup_dma(dev);
918}
919
920static void gamma_driver_dma_ready(drm_device_t *dev)
921{
922 gamma_dma_ready(dev);
923}
924
925static int gamma_driver_dma_quiescent(drm_device_t *dev)
926{
927 drm_gamma_private_t *dev_priv = (
928 drm_gamma_private_t *)dev->dev_private;
929 if (dev_priv->num_rast == 2)
930 gamma_dma_quiescent_dual(dev);
931 else gamma_dma_quiescent_single(dev);
932 return 0;
933}
934
935void gamma_driver_register_fns(drm_device_t *dev)
936{
937 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
938 DRM(fops).read = gamma_fops_read;
939 DRM(fops).poll = gamma_fops_poll;
940 dev->driver.preinit = gamma_driver_preinit;
941 dev->driver.pretakedown = gamma_driver_pretakedown;
942 dev->driver.dma_ready = gamma_driver_dma_ready;
943 dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
944 dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
945 dev->driver.dma_flush_unblock = gamma_flush_unblock;
946}
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h
deleted file mode 100644
index 20819ded0e15..000000000000
--- a/drivers/char/drm/gamma_drm.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef _GAMMA_DRM_H_
2#define _GAMMA_DRM_H_
3
4typedef struct _drm_gamma_tex_region {
5 unsigned char next, prev; /* indices to form a circular LRU */
6 unsigned char in_use; /* owned by a client, or free? */
7 int age; /* tracked by clients to update local LRU's */
8} drm_gamma_tex_region_t;
9
10typedef struct {
11 unsigned int GDeltaMode;
12 unsigned int GDepthMode;
13 unsigned int GGeometryMode;
14 unsigned int GTransformMode;
15} drm_gamma_context_regs_t;
16
17typedef struct _drm_gamma_sarea {
18 drm_gamma_context_regs_t context_state;
19
20 unsigned int dirty;
21
22
23 /* Maintain an LRU of contiguous regions of texture space. If
24 * you think you own a region of texture memory, and it has an
25 * age different to the one you set, then you are mistaken and
26 * it has been stolen by another client. If global texAge
27 * hasn't changed, there is no need to walk the list.
28 *
29 * These regions can be used as a proxy for the fine-grained
30 * texture information of other clients - by maintaining them
31 * in the same lru which is used to age their own textures,
32 * clients have an approximate lru for the whole of global
33 * texture space, and can make informed decisions as to which
34 * areas to kick out. There is no need to choose whether to
35 * kick out your own texture or someone else's - simply eject
36 * them all in LRU order.
37 */
38
39#define GAMMA_NR_TEX_REGIONS 64
40 drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
41 /* Last elt is sentinal */
42 int texAge; /* last time texture was uploaded */
43 int last_enqueue; /* last time a buffer was enqueued */
44 int last_dispatch; /* age of the most recently dispatched buffer */
45 int last_quiescent; /* */
46 int ctxOwner; /* last context to upload state */
47
48 int vertex_prim;
49} drm_gamma_sarea_t;
50
51/* WARNING: If you change any of these defines, make sure to change the
52 * defines in the Xserver file (xf86drmGamma.h)
53 */
54
55/* Gamma specific ioctls
56 * The device specific ioctl range is 0x40 to 0x79.
57 */
58#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
59#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
60
61typedef struct drm_gamma_copy {
62 unsigned int DMAOutputAddress;
63 unsigned int DMAOutputCount;
64 unsigned int DMAReadGLINTSource;
65 unsigned int DMARectangleWriteAddress;
66 unsigned int DMARectangleWriteLinePitch;
67 unsigned int DMARectangleWrite;
68 unsigned int DMARectangleReadAddress;
69 unsigned int DMARectangleReadLinePitch;
70 unsigned int DMARectangleRead;
71 unsigned int DMARectangleReadTarget;
72} drm_gamma_copy_t;
73
74typedef struct drm_gamma_init {
75 enum {
76 GAMMA_INIT_DMA = 0x01,
77 GAMMA_CLEANUP_DMA = 0x02
78 } func;
79
80 int sarea_priv_offset;
81 int pcimode;
82 unsigned int mmio0;
83 unsigned int mmio1;
84 unsigned int mmio2;
85 unsigned int mmio3;
86 unsigned int buffers_offset;
87 int num_rast;
88} drm_gamma_init_t;
89
90#endif /* _GAMMA_DRM_H_ */
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c
deleted file mode 100644
index e7e64b62792a..000000000000
--- a/drivers/char/drm/gamma_drv.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include <linux/config.h>
33#include "gamma.h"
34#include "drmP.h"
35#include "drm.h"
36#include "gamma_drm.h"
37#include "gamma_drv.h"
38
39#include "drm_auth.h"
40#include "drm_agpsupport.h"
41#include "drm_bufs.h"
42#include "gamma_context.h" /* NOTE! */
43#include "drm_dma.h"
44#include "gamma_old_dma.h" /* NOTE */
45#include "drm_drawable.h"
46#include "drm_drv.h"
47
48#include "drm_fops.h"
49#include "drm_init.h"
50#include "drm_ioctl.h"
51#include "drm_irq.h"
52#include "gamma_lists.h" /* NOTE */
53#include "drm_lock.h"
54#include "gamma_lock.h" /* NOTE */
55#include "drm_memory.h"
56#include "drm_proc.h"
57#include "drm_vm.h"
58#include "drm_stub.h"
59#include "drm_scatter.h"
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h
deleted file mode 100644
index 146fcc6253cd..000000000000
--- a/drivers/char/drm/gamma_drv.h
+++ /dev/null
@@ -1,147 +0,0 @@
1/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
2 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32#ifndef _GAMMA_DRV_H_
33#define _GAMMA_DRV_H_
34
35typedef struct drm_gamma_private {
36 drm_gamma_sarea_t *sarea_priv;
37 drm_map_t *sarea;
38 drm_map_t *mmio0;
39 drm_map_t *mmio1;
40 drm_map_t *mmio2;
41 drm_map_t *mmio3;
42 int num_rast;
43} drm_gamma_private_t;
44
45 /* gamma_dma.c */
46extern int gamma_dma_init( struct inode *inode, struct file *filp,
47 unsigned int cmd, unsigned long arg );
48extern int gamma_dma_copy( struct inode *inode, struct file *filp,
49 unsigned int cmd, unsigned long arg );
50
51extern int gamma_do_cleanup_dma( drm_device_t *dev );
52extern void gamma_dma_ready(drm_device_t *dev);
53extern void gamma_dma_quiescent_single(drm_device_t *dev);
54extern void gamma_dma_quiescent_dual(drm_device_t *dev);
55
56 /* gamma_dma.c */
57extern int gamma_dma_schedule(drm_device_t *dev, int locked);
58extern int gamma_dma(struct inode *inode, struct file *filp,
59 unsigned int cmd, unsigned long arg);
60extern int gamma_find_devices(void);
61extern int gamma_found(void);
62
63/* Gamma-specific code pulled from drm_fops.h:
64 */
65extern int DRM(finish)(struct inode *inode, struct file *filp,
66 unsigned int cmd, unsigned long arg);
67extern int DRM(flush_unblock)(drm_device_t *dev, int context,
68 drm_lock_flags_t flags);
69extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
70 drm_lock_flags_t flags);
71
72/* Gamma-specific code pulled from drm_dma.h:
73 */
74extern void DRM(clear_next_buffer)(drm_device_t *dev);
75extern int DRM(select_queue)(drm_device_t *dev,
76 void (*wrapper)(unsigned long));
77extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
78extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
79
80
81/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
82 */
83extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
84extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
85extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
86extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
87extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
88extern int DRM(freelist_destroy)(drm_freelist_t *bl);
89extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
90 drm_buf_t *buf);
91extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
92
93/* externs for gamma changes to the ops */
94extern struct file_operations DRM(fops);
95extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
96extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
97
98
99#define GLINT_DRI_BUF_COUNT 256
100
101#define GAMMA_OFF(reg) \
102 ((reg < 0x1000) \
103 ? reg \
104 : ((reg < 0x10000) \
105 ? (reg - 0x1000) \
106 : ((reg < 0x11000) \
107 ? (reg - 0x10000) \
108 : (reg - 0x11000))))
109
110#define GAMMA_BASE(reg) ((unsigned long) \
111 ((reg < 0x1000) ? dev_priv->mmio0->handle : \
112 ((reg < 0x10000) ? dev_priv->mmio1->handle : \
113 ((reg < 0x11000) ? dev_priv->mmio2->handle : \
114 dev_priv->mmio3->handle))))
115#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
116#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
117#define GAMMA_READ(reg) GAMMA_DEREF(reg)
118#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
119
120#define GAMMA_BROADCASTMASK 0x9378
121#define GAMMA_COMMANDINTENABLE 0x0c48
122#define GAMMA_DMAADDRESS 0x0028
123#define GAMMA_DMACOUNT 0x0030
124#define GAMMA_FILTERMODE 0x8c00
125#define GAMMA_GCOMMANDINTFLAGS 0x0c50
126#define GAMMA_GCOMMANDMODE 0x0c40
127#define GAMMA_QUEUED_DMA_MODE 1<<1
128#define GAMMA_GCOMMANDSTATUS 0x0c60
129#define GAMMA_GDELAYTIMER 0x0c38
130#define GAMMA_GDMACONTROL 0x0060
131#define GAMMA_USE_AGP 1<<1
132#define GAMMA_GINTENABLE 0x0808
133#define GAMMA_GINTFLAGS 0x0810
134#define GAMMA_INFIFOSPACE 0x0018
135#define GAMMA_OUTFIFOWORDS 0x0020
136#define GAMMA_OUTPUTFIFO 0x2000
137#define GAMMA_SYNC 0x8c40
138#define GAMMA_SYNC_TAG 0x0188
139#define GAMMA_PAGETABLEADDR 0x0C00
140#define GAMMA_PAGETABLELENGTH 0x0C08
141
142#define GAMMA_PASSTHROUGH 0x1FE
143#define GAMMA_DMAADDRTAG 0x530
144#define GAMMA_DMACOUNTTAG 0x531
145#define GAMMA_COMMANDINTTAG 0x532
146
147#endif
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h
deleted file mode 100644
index 2d93f412b96b..000000000000
--- a/drivers/char/drm/gamma_lists.h
+++ /dev/null
@@ -1,215 +0,0 @@
1/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
2 * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33
34
35int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
36{
37 if (bl->count) return -EINVAL;
38
39 bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
40 DRM_MEM_BUFLISTS);
41
42 if(!bl->bufs) return -ENOMEM;
43 memset(bl->bufs, 0, sizeof(*bl->bufs));
44 bl->count = count;
45 bl->rp = bl->bufs;
46 bl->wp = bl->bufs;
47 bl->end = &bl->bufs[bl->count+1];
48 spin_lock_init(&bl->write_lock);
49 spin_lock_init(&bl->read_lock);
50 return 0;
51}
52
53int DRM(waitlist_destroy)(drm_waitlist_t *bl)
54{
55 if (bl->rp != bl->wp) return -EINVAL;
56 if (bl->bufs) DRM(free)(bl->bufs,
57 (bl->count + 2) * sizeof(*bl->bufs),
58 DRM_MEM_BUFLISTS);
59 bl->count = 0;
60 bl->bufs = NULL;
61 bl->rp = NULL;
62 bl->wp = NULL;
63 bl->end = NULL;
64 return 0;
65}
66
67int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
68{
69 int left;
70 unsigned long flags;
71
72 left = DRM_LEFTCOUNT(bl);
73 if (!left) {
74 DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
75 buf->idx, buf->filp);
76 return -EINVAL;
77 }
78 buf->list = DRM_LIST_WAIT;
79
80 spin_lock_irqsave(&bl->write_lock, flags);
81 *bl->wp = buf;
82 if (++bl->wp >= bl->end) bl->wp = bl->bufs;
83 spin_unlock_irqrestore(&bl->write_lock, flags);
84
85 return 0;
86}
87
88drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
89{
90 drm_buf_t *buf;
91 unsigned long flags;
92
93 spin_lock_irqsave(&bl->read_lock, flags);
94 buf = *bl->rp;
95 if (bl->rp == bl->wp) {
96 spin_unlock_irqrestore(&bl->read_lock, flags);
97 return NULL;
98 }
99 if (++bl->rp >= bl->end) bl->rp = bl->bufs;
100 spin_unlock_irqrestore(&bl->read_lock, flags);
101
102 return buf;
103}
104
105int DRM(freelist_create)(drm_freelist_t *bl, int count)
106{
107 atomic_set(&bl->count, 0);
108 bl->next = NULL;
109 init_waitqueue_head(&bl->waiting);
110 bl->low_mark = 0;
111 bl->high_mark = 0;
112 atomic_set(&bl->wfh, 0);
113 spin_lock_init(&bl->lock);
114 ++bl->initialized;
115 return 0;
116}
117
118int DRM(freelist_destroy)(drm_freelist_t *bl)
119{
120 atomic_set(&bl->count, 0);
121 bl->next = NULL;
122 return 0;
123}
124
125int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
126{
127 drm_device_dma_t *dma = dev->dma;
128
129 if (!dma) {
130 DRM_ERROR("No DMA support\n");
131 return 1;
132 }
133
134 if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
135 DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
136 buf->idx, buf->waiting, buf->pending, buf->list);
137 }
138 if (!bl) return 1;
139 buf->list = DRM_LIST_FREE;
140
141 spin_lock(&bl->lock);
142 buf->next = bl->next;
143 bl->next = buf;
144 spin_unlock(&bl->lock);
145
146 atomic_inc(&bl->count);
147 if (atomic_read(&bl->count) > dma->buf_count) {
148 DRM_ERROR("%d of %d buffers free after addition of %d\n",
149 atomic_read(&bl->count), dma->buf_count, buf->idx);
150 return 1;
151 }
152 /* Check for high water mark */
153 if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
154 atomic_set(&bl->wfh, 0);
155 wake_up_interruptible(&bl->waiting);
156 }
157 return 0;
158}
159
160static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
161{
162 drm_buf_t *buf;
163
164 if (!bl) return NULL;
165
166 /* Get buffer */
167 spin_lock(&bl->lock);
168 if (!bl->next) {
169 spin_unlock(&bl->lock);
170 return NULL;
171 }
172 buf = bl->next;
173 bl->next = bl->next->next;
174 spin_unlock(&bl->lock);
175
176 atomic_dec(&bl->count);
177 buf->next = NULL;
178 buf->list = DRM_LIST_NONE;
179 if (buf->waiting || buf->pending) {
180 DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
181 buf->idx, buf->waiting, buf->pending, buf->list);
182 }
183
184 return buf;
185}
186
187drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
188{
189 drm_buf_t *buf = NULL;
190 DECLARE_WAITQUEUE(entry, current);
191
192 if (!bl || !bl->initialized) return NULL;
193
194 /* Check for low water mark */
195 if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
196 atomic_set(&bl->wfh, 1);
197 if (atomic_read(&bl->wfh)) {
198 if (block) {
199 add_wait_queue(&bl->waiting, &entry);
200 for (;;) {
201 current->state = TASK_INTERRUPTIBLE;
202 if (!atomic_read(&bl->wfh)
203 && (buf = DRM(freelist_try)(bl))) break;
204 schedule();
205 if (signal_pending(current)) break;
206 }
207 current->state = TASK_RUNNING;
208 remove_wait_queue(&bl->waiting, &entry);
209 }
210 return buf;
211 }
212
213 return DRM(freelist_try)(bl);
214}
215
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h
deleted file mode 100644
index ddec67e4ed16..000000000000
--- a/drivers/char/drm/gamma_lock.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/* lock.c -- IOCTLs for locking -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code extracted from drm_lock.h:
34 */
35static int DRM(flush_queue)(drm_device_t *dev, int context)
36{
37 DECLARE_WAITQUEUE(entry, current);
38 int ret = 0;
39 drm_queue_t *q = dev->queuelist[context];
40
41 DRM_DEBUG("\n");
42
43 atomic_inc(&q->use_count);
44 if (atomic_read(&q->use_count) > 1) {
45 atomic_inc(&q->block_write);
46 add_wait_queue(&q->flush_queue, &entry);
47 atomic_inc(&q->block_count);
48 for (;;) {
49 current->state = TASK_INTERRUPTIBLE;
50 if (!DRM_BUFCOUNT(&q->waitlist)) break;
51 schedule();
52 if (signal_pending(current)) {
53 ret = -EINTR; /* Can't restart */
54 break;
55 }
56 }
57 atomic_dec(&q->block_count);
58 current->state = TASK_RUNNING;
59 remove_wait_queue(&q->flush_queue, &entry);
60 }
61 atomic_dec(&q->use_count);
62
63 /* NOTE: block_write is still incremented!
64 Use drm_flush_unlock_queue to decrement. */
65 return ret;
66}
67
68static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
69{
70 drm_queue_t *q = dev->queuelist[context];
71
72 DRM_DEBUG("\n");
73
74 atomic_inc(&q->use_count);
75 if (atomic_read(&q->use_count) > 1) {
76 if (atomic_read(&q->block_write)) {
77 atomic_dec(&q->block_write);
78 wake_up_interruptible(&q->write_queue);
79 }
80 }
81 atomic_dec(&q->use_count);
82 return 0;
83}
84
85int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
86 drm_lock_flags_t flags)
87{
88 int ret = 0;
89 int i;
90
91 DRM_DEBUG("\n");
92
93 if (flags & _DRM_LOCK_FLUSH) {
94 ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
95 if (!ret) ret = DRM(flush_queue)(dev, context);
96 }
97 if (flags & _DRM_LOCK_FLUSH_ALL) {
98 for (i = 0; !ret && i < dev->queue_count; i++) {
99 ret = DRM(flush_queue)(dev, i);
100 }
101 }
102 return ret;
103}
104
105int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
106{
107 int ret = 0;
108 int i;
109
110 DRM_DEBUG("\n");
111
112 if (flags & _DRM_LOCK_FLUSH) {
113 ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
114 if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
115 }
116 if (flags & _DRM_LOCK_FLUSH_ALL) {
117 for (i = 0; !ret && i < dev->queue_count; i++) {
118 ret = DRM(flush_unblock_queue)(dev, i);
119 }
120 }
121
122 return ret;
123}
124
125int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
126 unsigned long arg)
127{
128 drm_file_t *priv = filp->private_data;
129 drm_device_t *dev = priv->dev;
130 int ret = 0;
131 drm_lock_t lock;
132
133 DRM_DEBUG("\n");
134
135 if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
136 return -EFAULT;
137 ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
138 DRM(flush_unblock)(dev, lock.context, lock.flags);
139 return ret;
140}
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h
deleted file mode 100644
index abdd454aab9f..000000000000
--- a/drivers/char/drm/gamma_old_dma.h
+++ /dev/null
@@ -1,313 +0,0 @@
1/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32
33/* Gamma-specific code pulled from drm_dma.h:
34 */
35
36void DRM(clear_next_buffer)(drm_device_t *dev)
37{
38 drm_device_dma_t *dma = dev->dma;
39
40 dma->next_buffer = NULL;
41 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
42 wake_up_interruptible(&dma->next_queue->flush_queue);
43 }
44 dma->next_queue = NULL;
45}
46
47int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
48{
49 int i;
50 int candidate = -1;
51 int j = jiffies;
52
53 if (!dev) {
54 DRM_ERROR("No device\n");
55 return -1;
56 }
57 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
58 /* This only happens between the time the
59 interrupt is initialized and the time
60 the queues are initialized. */
61 return -1;
62 }
63
64 /* Doing "while locked" DMA? */
65 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
66 return DRM_KERNEL_CONTEXT;
67 }
68
69 /* If there are buffers on the last_context
70 queue, and we have not been executing
71 this context very long, continue to
72 execute this context. */
73 if (dev->last_switch <= j
74 && dev->last_switch + DRM_TIME_SLICE > j
75 && DRM_WAITCOUNT(dev, dev->last_context)) {
76 return dev->last_context;
77 }
78
79 /* Otherwise, find a candidate */
80 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
81 if (DRM_WAITCOUNT(dev, i)) {
82 candidate = dev->last_checked = i;
83 break;
84 }
85 }
86
87 if (candidate < 0) {
88 for (i = 0; i < dev->queue_count; i++) {
89 if (DRM_WAITCOUNT(dev, i)) {
90 candidate = dev->last_checked = i;
91 break;
92 }
93 }
94 }
95
96 if (wrapper
97 && candidate >= 0
98 && candidate != dev->last_context
99 && dev->last_switch <= j
100 && dev->last_switch + DRM_TIME_SLICE > j) {
101 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
102 del_timer(&dev->timer);
103 dev->timer.function = wrapper;
104 dev->timer.data = (unsigned long)dev;
105 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
106 add_timer(&dev->timer);
107 }
108 return -1;
109 }
110
111 return candidate;
112}
113
114
115int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
116{
117 drm_file_t *priv = filp->private_data;
118 drm_device_t *dev = priv->dev;
119 int i;
120 drm_queue_t *q;
121 drm_buf_t *buf;
122 int idx;
123 int while_locked = 0;
124 drm_device_dma_t *dma = dev->dma;
125 int *ind;
126 int err;
127 DECLARE_WAITQUEUE(entry, current);
128
129 DRM_DEBUG("%d\n", d->send_count);
130
131 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
132 int context = dev->lock.hw_lock->lock;
133
134 if (!_DRM_LOCK_IS_HELD(context)) {
135 DRM_ERROR("No lock held during \"while locked\""
136 " request\n");
137 return -EINVAL;
138 }
139 if (d->context != _DRM_LOCKING_CONTEXT(context)
140 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
141 DRM_ERROR("Lock held by %d while %d makes"
142 " \"while locked\" request\n",
143 _DRM_LOCKING_CONTEXT(context),
144 d->context);
145 return -EINVAL;
146 }
147 q = dev->queuelist[DRM_KERNEL_CONTEXT];
148 while_locked = 1;
149 } else {
150 q = dev->queuelist[d->context];
151 }
152
153
154 atomic_inc(&q->use_count);
155 if (atomic_read(&q->block_write)) {
156 add_wait_queue(&q->write_queue, &entry);
157 atomic_inc(&q->block_count);
158 for (;;) {
159 current->state = TASK_INTERRUPTIBLE;
160 if (!atomic_read(&q->block_write)) break;
161 schedule();
162 if (signal_pending(current)) {
163 atomic_dec(&q->use_count);
164 remove_wait_queue(&q->write_queue, &entry);
165 return -EINTR;
166 }
167 }
168 atomic_dec(&q->block_count);
169 current->state = TASK_RUNNING;
170 remove_wait_queue(&q->write_queue, &entry);
171 }
172
173 ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
174 if (!ind)
175 return -ENOMEM;
176
177 if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
178 err = -EFAULT;
179 goto out;
180 }
181
182 err = -EINVAL;
183 for (i = 0; i < d->send_count; i++) {
184 idx = ind[i];
185 if (idx < 0 || idx >= dma->buf_count) {
186 DRM_ERROR("Index %d (of %d max)\n",
187 ind[i], dma->buf_count - 1);
188 goto out;
189 }
190 buf = dma->buflist[ idx ];
191 if (buf->filp != filp) {
192 DRM_ERROR("Process %d using buffer not owned\n",
193 current->pid);
194 goto out;
195 }
196 if (buf->list != DRM_LIST_NONE) {
197 DRM_ERROR("Process %d using buffer %d on list %d\n",
198 current->pid, buf->idx, buf->list);
199 goto out;
200 }
201 buf->used = ind[i];
202 buf->while_locked = while_locked;
203 buf->context = d->context;
204 if (!buf->used) {
205 DRM_ERROR("Queueing 0 length buffer\n");
206 }
207 if (buf->pending) {
208 DRM_ERROR("Queueing pending buffer:"
209 " buffer %d, offset %d\n",
210 ind[i], i);
211 goto out;
212 }
213 if (buf->waiting) {
214 DRM_ERROR("Queueing waiting buffer:"
215 " buffer %d, offset %d\n",
216 ind[i], i);
217 goto out;
218 }
219 buf->waiting = 1;
220 if (atomic_read(&q->use_count) == 1
221 || atomic_read(&q->finalization)) {
222 DRM(free_buffer)(dev, buf);
223 } else {
224 DRM(waitlist_put)(&q->waitlist, buf);
225 atomic_inc(&q->total_queued);
226 }
227 }
228 atomic_dec(&q->use_count);
229
230 return 0;
231
232out:
233 DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
234 atomic_dec(&q->use_count);
235 return err;
236}
237
238static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
239 int order)
240{
241 drm_file_t *priv = filp->private_data;
242 drm_device_t *dev = priv->dev;
243 int i;
244 drm_buf_t *buf;
245 drm_device_dma_t *dma = dev->dma;
246
247 for (i = d->granted_count; i < d->request_count; i++) {
248 buf = DRM(freelist_get)(&dma->bufs[order].freelist,
249 d->flags & _DRM_DMA_WAIT);
250 if (!buf) break;
251 if (buf->pending || buf->waiting) {
252 DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
253 buf->idx,
254 buf->filp,
255 buf->waiting,
256 buf->pending);
257 }
258 buf->filp = filp;
259 if (copy_to_user(&d->request_indices[i],
260 &buf->idx,
261 sizeof(buf->idx)))
262 return -EFAULT;
263
264 if (copy_to_user(&d->request_sizes[i],
265 &buf->total,
266 sizeof(buf->total)))
267 return -EFAULT;
268
269 ++d->granted_count;
270 }
271 return 0;
272}
273
274
275int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
276{
277 int order;
278 int retcode = 0;
279 int tmp_order;
280
281 order = DRM(order)(dma->request_size);
282
283 dma->granted_count = 0;
284 retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
285
286 if (dma->granted_count < dma->request_count
287 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
288 for (tmp_order = order - 1;
289 !retcode
290 && dma->granted_count < dma->request_count
291 && tmp_order >= DRM_MIN_ORDER;
292 --tmp_order) {
293
294 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
295 tmp_order);
296 }
297 }
298
299 if (dma->granted_count < dma->request_count
300 && (dma->flags & _DRM_DMA_LARGER_OK)) {
301 for (tmp_order = order + 1;
302 !retcode
303 && dma->granted_count < dma->request_count
304 && tmp_order <= DRM_MAX_ORDER;
305 ++tmp_order) {
306
307 retcode = DRM(dma_get_buffers_of_order)(filp, dma,
308 tmp_order);
309 }
310 }
311 return 0;
312}
313
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 18e0b7622893..2f1659b96fd1 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,11 +45,6 @@
45#define I810_BUF_UNMAPPED 0 45#define I810_BUF_UNMAPPED 0
46#define I810_BUF_MAPPED 1 46#define I810_BUF_MAPPED 1
47 47
48#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
49#define down_write down
50#define up_write up
51#endif
52
53static drm_buf_t *i810_freelist_get(drm_device_t *dev) 48static drm_buf_t *i810_freelist_get(drm_device_t *dev)
54{ 49{
55 drm_device_dma_t *dma = dev->dma; 50 drm_device_dma_t *dma = dev->dma;
@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev,
351 DRM_ERROR("can not find mmio map!\n"); 346 DRM_ERROR("can not find mmio map!\n");
352 return -EINVAL; 347 return -EINVAL;
353 } 348 }
349 dev->agp_buffer_token = init->buffers_offset;
354 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 350 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
355 if (!dev->agp_buffer_map) { 351 if (!dev->agp_buffer_map) {
356 dev->dev_private = (void *)dev_priv; 352 dev->dev_private = (void *)dev_priv;
@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = {
1383}; 1379};
1384 1380
1385int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1381int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1382
1383/**
1384 * Determine if the device really is AGP or not.
1385 *
1386 * All Intel graphics chipsets are treated as AGP, even if they are really
1387 * PCI-e.
1388 *
1389 * \param dev The device to be tested.
1390 *
1391 * \returns
1392 * A value of 1 is always retured to indictate every i810 is AGP.
1393 */
1394int i810_driver_device_is_agp(drm_device_t * dev)
1395{
1396 return 1;
1397}
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index ff51b3259af9..00609329d578 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -84,6 +84,7 @@ static struct drm_driver driver = {
84 .dev_priv_size = sizeof(drm_i810_buf_priv_t), 84 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
85 .pretakedown = i810_driver_pretakedown, 85 .pretakedown = i810_driver_pretakedown,
86 .prerelease = i810_driver_prerelease, 86 .prerelease = i810_driver_prerelease,
87 .device_is_agp = i810_driver_device_is_agp,
87 .release = i810_driver_release, 88 .release = i810_driver_release,
88 .dma_quiescent = i810_driver_dma_quiescent, 89 .dma_quiescent = i810_driver_dma_quiescent,
89 .reclaim_buffers = i810_reclaim_buffers, 90 .reclaim_buffers = i810_reclaim_buffers,
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 1b40538d1725..62ee4f58c59a 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev);
120extern void i810_driver_release(drm_device_t *dev, struct file *filp); 120extern void i810_driver_release(drm_device_t *dev, struct file *filp);
121extern void i810_driver_pretakedown(drm_device_t *dev); 121extern void i810_driver_pretakedown(drm_device_t *dev);
122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); 122extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
123extern int i810_driver_device_is_agp(drm_device_t * dev);
123 124
124#define I810_BASE(reg) ((unsigned long) \ 125#define I810_BASE(reg) ((unsigned long) \
125 dev_priv->mmio_map->handle) 126 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc7733035864..6f89d5796ef3 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,11 +47,6 @@
47#define I830_BUF_UNMAPPED 0 47#define I830_BUF_UNMAPPED 0
48#define I830_BUF_MAPPED 1 48#define I830_BUF_MAPPED 1
49 49
50#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
51#define down_write down
52#define up_write up
53#endif
54
55static drm_buf_t *i830_freelist_get(drm_device_t *dev) 50static drm_buf_t *i830_freelist_get(drm_device_t *dev)
56{ 51{
57 drm_device_dma_t *dma = dev->dma; 52 drm_device_dma_t *dma = dev->dma;
@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev,
358 DRM_ERROR("can not find mmio map!\n"); 353 DRM_ERROR("can not find mmio map!\n");
359 return -EINVAL; 354 return -EINVAL;
360 } 355 }
356 dev->agp_buffer_token = init->buffers_offset;
361 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 357 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
362 if(!dev->agp_buffer_map) { 358 if(!dev->agp_buffer_map) {
363 dev->dev_private = (void *)dev_priv; 359 dev->dev_private = (void *)dev_priv;
@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = {
1586}; 1582};
1587 1583
1588int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1584int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1585
1586/**
1587 * Determine if the device really is AGP or not.
1588 *
1589 * All Intel graphics chipsets are treated as AGP, even if they are really
1590 * PCI-e.
1591 *
1592 * \param dev The device to be tested.
1593 *
1594 * \returns
1595 * A value of 1 is always retured to indictate every i8xx is AGP.
1596 */
1597int i830_driver_device_is_agp(drm_device_t * dev)
1598{
1599 return 1;
1600}
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index bc36be76b8b2..0da9cd19919e 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -88,6 +88,7 @@ static struct drm_driver driver = {
88 .dev_priv_size = sizeof(drm_i830_buf_priv_t), 88 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
89 .pretakedown = i830_driver_pretakedown, 89 .pretakedown = i830_driver_pretakedown,
90 .prerelease = i830_driver_prerelease, 90 .prerelease = i830_driver_prerelease,
91 .device_is_agp = i830_driver_device_is_agp,
91 .release = i830_driver_release, 92 .release = i830_driver_release,
92 .dma_quiescent = i830_driver_dma_quiescent, 93 .dma_quiescent = i830_driver_dma_quiescent,
93 .reclaim_buffers = i830_reclaim_buffers, 94 .reclaim_buffers = i830_reclaim_buffers,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index df7746131dea..63f96a8b6a4a 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev);
137extern void i830_driver_release(drm_device_t *dev, struct file *filp); 137extern void i830_driver_release(drm_device_t *dev, struct file *filp);
138extern int i830_driver_dma_quiescent(drm_device_t *dev); 138extern int i830_driver_dma_quiescent(drm_device_t *dev);
139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); 139extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
140extern int i830_driver_device_is_agp(drm_device_t * dev);
140 141
141#define I830_BASE(reg) ((unsigned long) \ 142#define I830_BASE(reg) ((unsigned long) \
142 dev_priv->mmio_map->handle) 143 dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index acf9e52a9507..34f552f90c4a 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev)
95 drm_core_ioremapfree( &dev_priv->ring.map, dev); 95 drm_core_ioremapfree( &dev_priv->ring.map, dev);
96 } 96 }
97 97
98 if (dev_priv->hw_status_page) { 98 if (dev_priv->status_page_dmah) {
99 drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, 99 drm_pci_free(dev, dev_priv->status_page_dmah);
100 dev_priv->dma_status_page);
101 /* Need to rewrite hardware status page */ 100 /* Need to rewrite hardware status page */
102 I915_WRITE(0x02080, 0x1ffff000); 101 I915_WRITE(0x02080, 0x1ffff000);
103 } 102 }
@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev,
174 dev_priv->allow_batchbuffer = 1; 173 dev_priv->allow_batchbuffer = 1;
175 174
176 /* Program Hardware Status Page */ 175 /* Program Hardware Status Page */
177 dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 176 dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
178 0xffffffff, 177 0xffffffff);
179 &dev_priv->dma_status_page);
180 178
181 if (!dev_priv->hw_status_page) { 179 if (!dev_priv->status_page_dmah) {
182 dev->dev_private = (void *)dev_priv; 180 dev->dev_private = (void *)dev_priv;
183 i915_dma_cleanup(dev); 181 i915_dma_cleanup(dev);
184 DRM_ERROR("Can not allocate hardware status page\n"); 182 DRM_ERROR("Can not allocate hardware status page\n");
185 return DRM_ERR(ENOMEM); 183 return DRM_ERR(ENOMEM);
186 } 184 }
185 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
186 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
187
187 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 188 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
188 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 189 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
189 190
@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = {
731}; 732};
732 733
733int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 734int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
735
736/**
737 * Determine if the device really is AGP or not.
738 *
739 * All Intel graphics chipsets are treated as AGP, even if they are really
740 * PCI-e.
741 *
742 * \param dev The device to be tested.
743 *
744 * \returns
745 * A value of 1 is always retured to indictate every i9x5 is AGP.
746 */
747int i915_driver_device_is_agp(drm_device_t * dev)
748{
749 return 1;
750}
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 1f59d3fc79bc..106b9ec02213 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -79,6 +79,7 @@ static struct drm_driver driver = {
79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 79 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
80 .pretakedown = i915_driver_pretakedown, 80 .pretakedown = i915_driver_pretakedown,
81 .prerelease = i915_driver_prerelease, 81 .prerelease = i915_driver_prerelease,
82 .device_is_agp = i915_driver_device_is_agp,
82 .irq_preinstall = i915_driver_irq_preinstall, 83 .irq_preinstall = i915_driver_irq_preinstall,
83 .irq_postinstall = i915_driver_irq_postinstall, 84 .irq_postinstall = i915_driver_irq_postinstall,
84 .irq_uninstall = i915_driver_irq_uninstall, 85 .irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 9c37d2367dd5..70ed4e68eac8 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -79,9 +79,10 @@ typedef struct drm_i915_private {
79 drm_i915_sarea_t *sarea_priv; 79 drm_i915_sarea_t *sarea_priv;
80 drm_i915_ring_buffer_t ring; 80 drm_i915_ring_buffer_t ring;
81 81
82 drm_dma_handle_t *status_page_dmah;
82 void *hw_status_page; 83 void *hw_status_page;
83 unsigned long counter;
84 dma_addr_t dma_status_page; 84 dma_addr_t dma_status_page;
85 unsigned long counter;
85 86
86 int back_offset; 87 int back_offset;
87 int front_offset; 88 int front_offset;
@@ -102,6 +103,7 @@ typedef struct drm_i915_private {
102extern void i915_kernel_lost_context(drm_device_t * dev); 103extern void i915_kernel_lost_context(drm_device_t * dev);
103extern void i915_driver_pretakedown(drm_device_t *dev); 104extern void i915_driver_pretakedown(drm_device_t *dev);
104extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); 105extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
106extern int i915_driver_device_is_agp(drm_device_t *dev);
105 107
106/* i915_irq.c */ 108/* i915_irq.c */
107extern int i915_irq_emit(DRM_IOCTL_ARGS); 109extern int i915_irq_emit(DRM_IOCTL_ARGS);
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 832eaf8a5068..567b425b784f 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -23,18 +23,21 @@
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE. 25 * DEALINGS IN THE SOFTWARE.
26 * 26 */
27 * Authors: 27
28 * Rickard E. (Rik) Faith <faith@valinux.com> 28/**
29 * Jeff Hartmann <jhartmann@valinux.com> 29 * \file mga_dma.c
30 * Keith Whitwell <keith@tungstengraphics.com> 30 * DMA support for MGA G200 / G400.
31 * 31 *
32 * Rewritten by: 32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
33 * Gareth Hughes <gareth@valinux.com> 33 * \author Jeff Hartmann <jhartmann@valinux.com>
34 * \author Keith Whitwell <keith@tungstengraphics.com>
35 * \author Gareth Hughes <gareth@valinux.com>
34 */ 36 */
35 37
36#include "drmP.h" 38#include "drmP.h"
37#include "drm.h" 39#include "drm.h"
40#include "drm_sarea.h"
38#include "mga_drm.h" 41#include "mga_drm.h"
39#include "mga_drv.h" 42#include "mga_drv.h"
40 43
@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
148 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 151 DRM_DEBUG( " space = 0x%06x\n", primary->space );
149 152
150 mga_flush_write_combine(); 153 mga_flush_write_combine();
151 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 154 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
152 155
153 DRM_DEBUG( "done.\n" ); 156 DRM_DEBUG( "done.\n" );
154} 157}
@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
190 DRM_DEBUG( " space = 0x%06x\n", primary->space ); 193 DRM_DEBUG( " space = 0x%06x\n", primary->space );
191 194
192 mga_flush_write_combine(); 195 mga_flush_write_combine();
193 MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); 196 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
194 197
195 set_bit( 0, &primary->wrapped ); 198 set_bit( 0, &primary->wrapped );
196 DRM_DEBUG( "done.\n" ); 199 DRM_DEBUG( "done.\n" );
@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
396 * DMA initialization, cleanup 399 * DMA initialization, cleanup
397 */ 400 */
398 401
402
403int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
404{
405 drm_mga_private_t * dev_priv;
406
407 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
408 if (!dev_priv)
409 return DRM_ERR(ENOMEM);
410
411 dev->dev_private = (void *)dev_priv;
412 memset(dev_priv, 0, sizeof(drm_mga_private_t));
413
414 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
415 dev_priv->chipset = flags;
416
417 return 0;
418}
419
420/**
421 * Bootstrap the driver for AGP DMA.
422 *
423 * \todo
424 * Investigate whether there is any benifit to storing the WARP microcode in
425 * AGP memory. If not, the microcode may as well always be put in PCI
426 * memory.
427 *
428 * \todo
429 * This routine needs to set dma_bs->agp_mode to the mode actually configured
430 * in the hardware. Looking just at the Linux AGP driver code, I don't see
431 * an easy way to determine this.
432 *
433 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
434 */
435static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
436 drm_mga_dma_bootstrap_t * dma_bs)
437{
438 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
439 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
440 int err;
441 unsigned offset;
442 const unsigned secondary_size = dma_bs->secondary_bin_count
443 * dma_bs->secondary_bin_size;
444 const unsigned agp_size = (dma_bs->agp_size << 20);
445 drm_buf_desc_t req;
446 drm_agp_mode_t mode;
447 drm_agp_info_t info;
448
449
450 /* Acquire AGP. */
451 err = drm_agp_acquire(dev);
452 if (err) {
453 DRM_ERROR("Unable to acquire AGP\n");
454 return err;
455 }
456
457 err = drm_agp_info(dev, &info);
458 if (err) {
459 DRM_ERROR("Unable to get AGP info\n");
460 return err;
461 }
462
463 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
464 err = drm_agp_enable(dev, mode);
465 if (err) {
466 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
467 return err;
468 }
469
470
471 /* In addition to the usual AGP mode configuration, the G200 AGP cards
472 * need to have the AGP mode "manually" set.
473 */
474
475 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
476 if (mode.mode & 0x02) {
477 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
478 }
479 else {
480 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
481 }
482 }
483
484
485 /* Allocate and bind AGP memory. */
486 dev_priv->agp_pages = agp_size / PAGE_SIZE;
487 dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
488 if (dev_priv->agp_mem == NULL) {
489 dev_priv->agp_pages = 0;
490 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
491 dma_bs->agp_size);
492 return DRM_ERR(ENOMEM);
493 }
494
495 err = drm_bind_agp( dev_priv->agp_mem, 0 );
496 if (err) {
497 DRM_ERROR("Unable to bind AGP memory\n");
498 return err;
499 }
500
501 offset = 0;
502 err = drm_addmap( dev, offset, warp_size,
503 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
504 if (err) {
505 DRM_ERROR("Unable to map WARP microcode\n");
506 return err;
507 }
508
509 offset += warp_size;
510 err = drm_addmap( dev, offset, dma_bs->primary_size,
511 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
512 if (err) {
513 DRM_ERROR("Unable to map primary DMA region\n");
514 return err;
515 }
516
517 offset += dma_bs->primary_size;
518 err = drm_addmap( dev, offset, secondary_size,
519 _DRM_AGP, 0, & dev->agp_buffer_map );
520 if (err) {
521 DRM_ERROR("Unable to map secondary DMA region\n");
522 return err;
523 }
524
525 (void) memset( &req, 0, sizeof(req) );
526 req.count = dma_bs->secondary_bin_count;
527 req.size = dma_bs->secondary_bin_size;
528 req.flags = _DRM_AGP_BUFFER;
529 req.agp_start = offset;
530
531 err = drm_addbufs_agp( dev, & req );
532 if (err) {
533 DRM_ERROR("Unable to add secondary DMA buffers\n");
534 return err;
535 }
536
537 offset += secondary_size;
538 err = drm_addmap( dev, offset, agp_size - offset,
539 _DRM_AGP, 0, & dev_priv->agp_textures );
540 if (err) {
541 DRM_ERROR("Unable to map AGP texture region\n");
542 return err;
543 }
544
545 drm_core_ioremap(dev_priv->warp, dev);
546 drm_core_ioremap(dev_priv->primary, dev);
547 drm_core_ioremap(dev->agp_buffer_map, dev);
548
549 if (!dev_priv->warp->handle ||
550 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
551 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
552 dev_priv->warp->handle, dev_priv->primary->handle,
553 dev->agp_buffer_map->handle);
554 return DRM_ERR(ENOMEM);
555 }
556
557 dev_priv->dma_access = MGA_PAGPXFER;
558 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
559
560 DRM_INFO("Initialized card for AGP DMA.\n");
561 return 0;
562}
563
564/**
565 * Bootstrap the driver for PCI DMA.
566 *
567 * \todo
568 * The algorithm for decreasing the size of the primary DMA buffer could be
569 * better. The size should be rounded up to the nearest page size, then
570 * decrease the request size by a single page each pass through the loop.
571 *
572 * \todo
573 * Determine whether the maximum address passed to drm_pci_alloc is correct.
574 * The same goes for drm_addbufs_pci.
575 *
576 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
577 */
578static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
579 drm_mga_dma_bootstrap_t * dma_bs)
580{
581 drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
582 const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
583 unsigned int primary_size;
584 unsigned int bin_count;
585 int err;
586 drm_buf_desc_t req;
587
588
589 if (dev->dma == NULL) {
590 DRM_ERROR("dev->dma is NULL\n");
591 return DRM_ERR(EFAULT);
592 }
593
594 /* The proper alignment is 0x100 for this mapping */
595 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
596 _DRM_READ_ONLY, &dev_priv->warp);
597 if (err != 0) {
598 DRM_ERROR("Unable to create mapping for WARP microcode\n");
599 return err;
600 }
601
602 /* Other than the bottom two bits being used to encode other
603 * information, there don't appear to be any restrictions on the
604 * alignment of the primary or secondary DMA buffers.
605 */
606
607 for ( primary_size = dma_bs->primary_size
608 ; primary_size != 0
609 ; primary_size >>= 1 ) {
610 /* The proper alignment for this mapping is 0x04 */
611 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
612 _DRM_READ_ONLY, &dev_priv->primary);
613 if (!err)
614 break;
615 }
616
617 if (err != 0) {
618 DRM_ERROR("Unable to allocate primary DMA region\n");
619 return DRM_ERR(ENOMEM);
620 }
621
622 if (dev_priv->primary->size != dma_bs->primary_size) {
623 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
624 dma_bs->primary_size,
625 (unsigned) dev_priv->primary->size);
626 dma_bs->primary_size = dev_priv->primary->size;
627 }
628
629 for ( bin_count = dma_bs->secondary_bin_count
630 ; bin_count > 0
631 ; bin_count-- ) {
632 (void) memset( &req, 0, sizeof(req) );
633 req.count = bin_count;
634 req.size = dma_bs->secondary_bin_size;
635
636 err = drm_addbufs_pci( dev, & req );
637 if (!err) {
638 break;
639 }
640 }
641
642 if (bin_count == 0) {
643 DRM_ERROR("Unable to add secondary DMA buffers\n");
644 return err;
645 }
646
647 if (bin_count != dma_bs->secondary_bin_count) {
648 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
649 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
650
651 dma_bs->secondary_bin_count = bin_count;
652 }
653
654 dev_priv->dma_access = 0;
655 dev_priv->wagp_enable = 0;
656
657 dma_bs->agp_mode = 0;
658
659 DRM_INFO("Initialized card for PCI DMA.\n");
660 return 0;
661}
662
663
664static int mga_do_dma_bootstrap(drm_device_t * dev,
665 drm_mga_dma_bootstrap_t * dma_bs)
666{
667 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
668 int err;
669 drm_mga_private_t * const dev_priv =
670 (drm_mga_private_t *) dev->dev_private;
671
672
673 dev_priv->used_new_dma_init = 1;
674
675 /* The first steps are the same for both PCI and AGP based DMA. Map
676 * the cards MMIO registers and map a status page.
677 */
678 err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
679 _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
680 if (err) {
681 DRM_ERROR("Unable to map MMIO region\n");
682 return err;
683 }
684
685
686 err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
687 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
688 & dev_priv->status );
689 if (err) {
690 DRM_ERROR("Unable to map status region\n");
691 return err;
692 }
693
694
695 /* The DMA initialization procedure is slightly different for PCI and
696 * AGP cards. AGP cards just allocate a large block of AGP memory and
697 * carve off portions of it for internal uses. The remaining memory
698 * is returned to user-mode to be used for AGP textures.
699 */
700
701 if (is_agp) {
702 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
703 }
704
705 /* If we attempted to initialize the card for AGP DMA but failed,
706 * clean-up any mess that may have been created.
707 */
708
709 if (err) {
710 mga_do_cleanup_dma(dev);
711 }
712
713
714 /* Not only do we want to try and initialized PCI cards for PCI DMA,
715 * but we also try to initialized AGP cards that could not be
716 * initialized for AGP DMA. This covers the case where we have an AGP
717 * card in a system with an unsupported AGP chipset. In that case the
718 * card will be detected as AGP, but we won't be able to allocate any
719 * AGP memory, etc.
720 */
721
722 if (!is_agp || err) {
723 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
724 }
725
726
727 return err;
728}
729
730int mga_dma_bootstrap(DRM_IOCTL_ARGS)
731{
732 DRM_DEVICE;
733 drm_mga_dma_bootstrap_t bootstrap;
734 int err;
735
736
737 DRM_COPY_FROM_USER_IOCTL(bootstrap,
738 (drm_mga_dma_bootstrap_t __user *) data,
739 sizeof(bootstrap));
740
741 err = mga_do_dma_bootstrap(dev, & bootstrap);
742 if (! err) {
743 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
744 const drm_mga_private_t * const dev_priv =
745 (drm_mga_private_t *) dev->dev_private;
746
747 if (dev_priv->agp_textures != NULL) {
748 bootstrap.texture_handle = dev_priv->agp_textures->offset;
749 bootstrap.texture_size = dev_priv->agp_textures->size;
750 }
751 else {
752 bootstrap.texture_handle = 0;
753 bootstrap.texture_size = 0;
754 }
755
756 bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
757 if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
758 sizeof(bootstrap))) {
759 err = DRM_ERR(EFAULT);
760 }
761 }
762 else {
763 mga_do_cleanup_dma(dev);
764 }
765
766 return err;
767}
768
399static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) 769static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
400{ 770{
401 drm_mga_private_t *dev_priv; 771 drm_mga_private_t *dev_priv;
402 int ret; 772 int ret;
403 DRM_DEBUG( "\n" ); 773 DRM_DEBUG( "\n" );
404 774
405 dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
406 if ( !dev_priv )
407 return DRM_ERR(ENOMEM);
408
409 memset( dev_priv, 0, sizeof(drm_mga_private_t) );
410 775
411 dev_priv->chipset = init->chipset; 776 dev_priv = dev->dev_private;
412 777
413 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; 778 if (init->sgram) {
414
415 if ( init->sgram ) {
416 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; 779 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
417 } else { 780 } else {
418 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; 781 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
@@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
436 799
437 DRM_GETSAREA(); 800 DRM_GETSAREA();
438 801
439 if(!dev_priv->sarea) { 802 if (!dev_priv->sarea) {
440 DRM_ERROR( "failed to find sarea!\n" ); 803 DRM_ERROR("failed to find sarea!\n");
441 /* Assign dev_private so we can do cleanup. */
442 dev->dev_private = (void *)dev_priv;
443 mga_do_cleanup_dma( dev );
444 return DRM_ERR(EINVAL); 804 return DRM_ERR(EINVAL);
445 } 805 }
446 806
447 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 807 if (! dev_priv->used_new_dma_init) {
448 if(!dev_priv->mmio) { 808 dev_priv->status = drm_core_findmap(dev, init->status_offset);
449 DRM_ERROR( "failed to find mmio region!\n" ); 809 if (!dev_priv->status) {
450 /* Assign dev_private so we can do cleanup. */ 810 DRM_ERROR("failed to find status page!\n");
451 dev->dev_private = (void *)dev_priv; 811 return DRM_ERR(EINVAL);
452 mga_do_cleanup_dma( dev ); 812 }
453 return DRM_ERR(EINVAL); 813 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
454 } 814 if (!dev_priv->mmio) {
455 dev_priv->status = drm_core_findmap(dev, init->status_offset); 815 DRM_ERROR("failed to find mmio region!\n");
456 if(!dev_priv->status) { 816 return DRM_ERR(EINVAL);
457 DRM_ERROR( "failed to find status page!\n" ); 817 }
458 /* Assign dev_private so we can do cleanup. */ 818 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
459 dev->dev_private = (void *)dev_priv; 819 if (!dev_priv->warp) {
460 mga_do_cleanup_dma( dev ); 820 DRM_ERROR("failed to find warp microcode region!\n");
461 return DRM_ERR(EINVAL); 821 return DRM_ERR(EINVAL);
462 } 822 }
463 dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 823 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
464 if(!dev_priv->warp) { 824 if (!dev_priv->primary) {
465 DRM_ERROR( "failed to find warp microcode region!\n" ); 825 DRM_ERROR("failed to find primary dma region!\n");
466 /* Assign dev_private so we can do cleanup. */ 826 return DRM_ERR(EINVAL);
467 dev->dev_private = (void *)dev_priv; 827 }
468 mga_do_cleanup_dma( dev ); 828 dev->agp_buffer_token = init->buffers_offset;
469 return DRM_ERR(EINVAL); 829 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
470 } 830 if (!dev->agp_buffer_map) {
471 dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 831 DRM_ERROR("failed to find dma buffer region!\n");
472 if(!dev_priv->primary) { 832 return DRM_ERR(EINVAL);
473 DRM_ERROR( "failed to find primary dma region!\n" ); 833 }
474 /* Assign dev_private so we can do cleanup. */ 834
475 dev->dev_private = (void *)dev_priv; 835 drm_core_ioremap(dev_priv->warp, dev);
476 mga_do_cleanup_dma( dev ); 836 drm_core_ioremap(dev_priv->primary, dev);
477 return DRM_ERR(EINVAL); 837 drm_core_ioremap(dev->agp_buffer_map, dev);
478 }
479 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
480 if(!dev->agp_buffer_map) {
481 DRM_ERROR( "failed to find dma buffer region!\n" );
482 /* Assign dev_private so we can do cleanup. */
483 dev->dev_private = (void *)dev_priv;
484 mga_do_cleanup_dma( dev );
485 return DRM_ERR(EINVAL);
486 } 838 }
487 839
488 dev_priv->sarea_priv = 840 dev_priv->sarea_priv =
489 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + 841 (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
490 init->sarea_priv_offset); 842 init->sarea_priv_offset);
491 843
492 drm_core_ioremap( dev_priv->warp, dev ); 844 if (!dev_priv->warp->handle ||
493 drm_core_ioremap( dev_priv->primary, dev ); 845 !dev_priv->primary->handle ||
494 drm_core_ioremap( dev->agp_buffer_map, dev ); 846 ((dev_priv->dma_access != 0) &&
495 847 ((dev->agp_buffer_map == NULL) ||
496 if(!dev_priv->warp->handle || 848 (dev->agp_buffer_map->handle == NULL)))) {
497 !dev_priv->primary->handle || 849 DRM_ERROR("failed to ioremap agp regions!\n");
498 !dev->agp_buffer_map->handle ) {
499 DRM_ERROR( "failed to ioremap agp regions!\n" );
500 /* Assign dev_private so we can do cleanup. */
501 dev->dev_private = (void *)dev_priv;
502 mga_do_cleanup_dma( dev );
503 return DRM_ERR(ENOMEM); 850 return DRM_ERR(ENOMEM);
504 } 851 }
505 852
506 ret = mga_warp_install_microcode( dev_priv ); 853 ret = mga_warp_install_microcode(dev_priv);
507 if ( ret < 0 ) { 854 if (ret < 0) {
508 DRM_ERROR( "failed to install WARP ucode!\n" ); 855 DRM_ERROR("failed to install WARP ucode!\n");
509 /* Assign dev_private so we can do cleanup. */
510 dev->dev_private = (void *)dev_priv;
511 mga_do_cleanup_dma( dev );
512 return ret; 856 return ret;
513 } 857 }
514 858
515 ret = mga_warp_init( dev_priv ); 859 ret = mga_warp_init(dev_priv);
516 if ( ret < 0 ) { 860 if (ret < 0) {
517 DRM_ERROR( "failed to init WARP engine!\n" ); 861 DRM_ERROR("failed to init WARP engine!\n");
518 /* Assign dev_private so we can do cleanup. */
519 dev->dev_private = (void *)dev_priv;
520 mga_do_cleanup_dma( dev );
521 return ret; 862 return ret;
522 } 863 }
523 864
@@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
557 dev_priv->sarea_priv->last_frame.head = 0; 898 dev_priv->sarea_priv->last_frame.head = 0;
558 dev_priv->sarea_priv->last_frame.wrap = 0; 899 dev_priv->sarea_priv->last_frame.wrap = 0;
559 900
560 if ( mga_freelist_init( dev, dev_priv ) < 0 ) { 901 if (mga_freelist_init(dev, dev_priv) < 0) {
561 DRM_ERROR( "could not initialize freelist\n" ); 902 DRM_ERROR("could not initialize freelist\n");
562 /* Assign dev_private so we can do cleanup. */
563 dev->dev_private = (void *)dev_priv;
564 mga_do_cleanup_dma( dev );
565 return DRM_ERR(ENOMEM); 903 return DRM_ERR(ENOMEM);
566 } 904 }
567 905
568 /* Make dev_private visable to others. */
569 dev->dev_private = (void *)dev_priv;
570 return 0; 906 return 0;
571} 907}
572 908
573static int mga_do_cleanup_dma( drm_device_t *dev ) 909static int mga_do_cleanup_dma( drm_device_t *dev )
574{ 910{
575 DRM_DEBUG( "\n" ); 911 int err = 0;
912 DRM_DEBUG("\n");
576 913
577 /* Make sure interrupts are disabled here because the uninstall ioctl 914 /* Make sure interrupts are disabled here because the uninstall ioctl
578 * may not have been called from userspace and after dev_private 915 * may not have been called from userspace and after dev_private
@@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev )
583 if ( dev->dev_private ) { 920 if ( dev->dev_private ) {
584 drm_mga_private_t *dev_priv = dev->dev_private; 921 drm_mga_private_t *dev_priv = dev->dev_private;
585 922
586 if ( dev_priv->warp != NULL ) 923 if ((dev_priv->warp != NULL)
587 drm_core_ioremapfree( dev_priv->warp, dev ); 924 && (dev_priv->mmio->type != _DRM_CONSISTENT))
588 if ( dev_priv->primary != NULL ) 925 drm_core_ioremapfree(dev_priv->warp, dev);
589 drm_core_ioremapfree( dev_priv->primary, dev ); 926
590 if ( dev->agp_buffer_map != NULL ) 927 if ((dev_priv->primary != NULL)
591 drm_core_ioremapfree( dev->agp_buffer_map, dev ); 928 && (dev_priv->primary->type != _DRM_CONSISTENT))
929 drm_core_ioremapfree(dev_priv->primary, dev);
592 930
593 if ( dev_priv->head != NULL ) { 931 if (dev->agp_buffer_map != NULL)
594 mga_freelist_cleanup( dev ); 932 drm_core_ioremapfree(dev->agp_buffer_map, dev);
933
934 if (dev_priv->used_new_dma_init) {
935 if (dev_priv->agp_mem != NULL) {
936 dev_priv->agp_textures = NULL;
937 drm_unbind_agp(dev_priv->agp_mem);
938
939 drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
940 dev_priv->agp_pages = 0;
941 dev_priv->agp_mem = NULL;
942 }
943
944 if ((dev->agp != NULL) && dev->agp->acquired) {
945 err = drm_agp_release(dev);
946 }
947
948 dev_priv->used_new_dma_init = 0;
595 } 949 }
596 950
597 drm_free( dev->dev_private, sizeof(drm_mga_private_t), 951 dev_priv->warp = NULL;
598 DRM_MEM_DRIVER ); 952 dev_priv->primary = NULL;
599 dev->dev_private = NULL; 953 dev_priv->mmio = NULL;
954 dev_priv->status = NULL;
955 dev_priv->sarea = NULL;
956 dev_priv->sarea_priv = NULL;
957 dev->agp_buffer_map = NULL;
958
959 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
960 dev_priv->warp_pipe = 0;
961 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
962
963 if (dev_priv->head != NULL) {
964 mga_freelist_cleanup(dev);
965 }
600 } 966 }
601 967
602 return 0; 968 return 0;
@@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS )
606{ 972{
607 DRM_DEVICE; 973 DRM_DEVICE;
608 drm_mga_init_t init; 974 drm_mga_init_t init;
975 int err;
609 976
610 LOCK_TEST_WITH_RETURN( dev, filp ); 977 LOCK_TEST_WITH_RETURN( dev, filp );
611 978
612 DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); 979 DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
980 sizeof(init));
613 981
614 switch ( init.func ) { 982 switch ( init.func ) {
615 case MGA_INIT_DMA: 983 case MGA_INIT_DMA:
616 return mga_do_init_dma( dev, &init ); 984 err = mga_do_init_dma(dev, &init);
985 if (err) {
986 (void) mga_do_cleanup_dma(dev);
987 }
988 return err;
617 case MGA_CLEANUP_DMA: 989 case MGA_CLEANUP_DMA:
618 return mga_do_cleanup_dma( dev ); 990 return mga_do_cleanup_dma( dev );
619 } 991 }
@@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
742 return ret; 1114 return ret;
743} 1115}
744 1116
745void mga_driver_pretakedown(drm_device_t *dev) 1117/**
1118 * Called just before the module is unloaded.
1119 */
1120int mga_driver_postcleanup(drm_device_t * dev)
1121{
1122 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1123 dev->dev_private = NULL;
1124
1125 return 0;
1126}
1127
1128/**
1129 * Called when the last opener of the device is closed.
1130 */
1131void mga_driver_pretakedown(drm_device_t * dev)
746{ 1132{
747 mga_do_cleanup_dma( dev ); 1133 mga_do_cleanup_dma( dev );
748} 1134}
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 521d4451d012..d20aab3bd57b 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -73,7 +73,8 @@
73 73
74#define MGA_CARD_TYPE_G200 1 74#define MGA_CARD_TYPE_G200 1
75#define MGA_CARD_TYPE_G400 2 75#define MGA_CARD_TYPE_G400 2
76 76#define MGA_CARD_TYPE_G450 3 /* not currently used */
77#define MGA_CARD_TYPE_G550 4
77 78
78#define MGA_FRONT 0x1 79#define MGA_FRONT 0x1
79#define MGA_BACK 0x2 80#define MGA_BACK 0x2
@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea {
225} drm_mga_sarea_t; 226} drm_mga_sarea_t;
226 227
227 228
228/* WARNING: If you change any of these defines, make sure to change the
229 * defines in the Xserver file (xf86drmMga.h)
230 */
231
232/* MGA specific ioctls 229/* MGA specific ioctls
233 * The device specific ioctl range is 0x40 to 0x79. 230 * The device specific ioctl range is 0x40 to 0x79.
234 */ 231 */
@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea {
243#define DRM_MGA_BLIT 0x08 240#define DRM_MGA_BLIT 0x08
244#define DRM_MGA_GETPARAM 0x09 241#define DRM_MGA_GETPARAM 0x09
245 242
243/* 3.2:
244 * ioctls for operating on fences.
245 */
246#define DRM_MGA_SET_FENCE 0x0a
247#define DRM_MGA_WAIT_FENCE 0x0b
248#define DRM_MGA_DMA_BOOTSTRAP 0x0c
249
250
246#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) 251#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
247#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) 252#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
248#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) 253#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea {
253#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) 258#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
254#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) 259#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
255#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) 260#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
261#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
262#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
263#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
256 264
257typedef struct _drm_mga_warp_index { 265typedef struct _drm_mga_warp_index {
258 int installed; 266 int installed;
@@ -291,12 +299,72 @@ typedef struct drm_mga_init {
291 unsigned long buffers_offset; 299 unsigned long buffers_offset;
292} drm_mga_init_t; 300} drm_mga_init_t;
293 301
294typedef struct drm_mga_fullscreen { 302typedef struct drm_mga_dma_bootstrap {
295 enum { 303 /**
296 MGA_INIT_FULLSCREEN = 0x01, 304 * \name AGP texture region
297 MGA_CLEANUP_FULLSCREEN = 0x02 305 *
298 } func; 306 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
299} drm_mga_fullscreen_t; 307 * be filled in with the actual AGP texture settings.
308 *
309 * \warning
310 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
311 * is zero, it means that PCI memory (most likely through the use of
312 * an IOMMU) is being used for "AGP" textures.
313 */
314 /*@{*/
315 unsigned long texture_handle; /**< Handle used to map AGP textures. */
316 uint32_t texture_size; /**< Size of the AGP texture region. */
317 /*@}*/
318
319
320 /**
321 * Requested size of the primary DMA region.
322 *
323 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
324 * filled in with the actual AGP mode. If AGP was not available
325 */
326 uint32_t primary_size;
327
328
329 /**
330 * Requested number of secondary DMA buffers.
331 *
332 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
333 * filled in with the actual number of secondary DMA buffers
334 * allocated. Particularly when PCI DMA is used, this may be
335 * (subtantially) less than the number requested.
336 */
337 uint32_t secondary_bin_count;
338
339
340 /**
341 * Requested size of each secondary DMA buffer.
342 *
343 * While the kernel \b is free to reduce
344 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
345 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
346 */
347 uint32_t secondary_bin_size;
348
349
350 /**
351 * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
352 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
353 * zero, it means that PCI DMA should be used, even if AGP is
354 * possible.
355 *
356 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
357 * filled in with the actual AGP mode. If AGP was not available
358 * (i.e., PCI DMA was used), this value will be zero.
359 */
360 uint32_t agp_mode;
361
362
363 /**
364 * Desired AGP GART size, measured in megabytes.
365 */
366 uint8_t agp_size;
367} drm_mga_dma_bootstrap_t;
300 368
301typedef struct drm_mga_clear { 369typedef struct drm_mga_clear {
302 unsigned int flags; 370 unsigned int flags;
@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit {
341 */ 409 */
342#define MGA_PARAM_IRQ_NR 1 410#define MGA_PARAM_IRQ_NR 1
343 411
412/* 3.2: Query the actual card type. The DDX only distinguishes between
413 * G200 chips and non-G200 chips, which it calls G400. It turns out that
414 * there are some very sublte differences between the G4x0 chips and the G550
415 * chips. Using this parameter query, a client-side driver can detect the
416 * difference between a G4x0 and a G550.
417 */
418#define MGA_PARAM_CARD_TYPE 2
419
344typedef struct drm_mga_getparam { 420typedef struct drm_mga_getparam {
345 int param; 421 int param;
346 void __user *value; 422 void __user *value;
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 844cca9cb29d..daabbba3b297 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,8 +38,15 @@
38 38
39#include "drm_pciids.h" 39#include "drm_pciids.h"
40 40
41static int mga_driver_device_is_agp(drm_device_t * dev);
41static int postinit( struct drm_device *dev, unsigned long flags ) 42static int postinit( struct drm_device *dev, unsigned long flags )
42{ 43{
44 drm_mga_private_t * const dev_priv =
45 (drm_mga_private_t *) dev->dev_private;
46
47 dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
48 dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
49
43 dev->counters += 3; 50 dev->counters += 3;
44 dev->types[6] = _DRM_STAT_IRQ; 51 dev->types[6] = _DRM_STAT_IRQ;
45 dev->types[7] = _DRM_STAT_PRIMARY; 52 dev->types[7] = _DRM_STAT_PRIMARY;
@@ -79,8 +86,11 @@ extern int mga_max_ioctl;
79 86
80static struct drm_driver driver = { 87static struct drm_driver driver = {
81 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 88 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
89 .preinit = mga_driver_preinit,
90 .postcleanup = mga_driver_postcleanup,
82 .pretakedown = mga_driver_pretakedown, 91 .pretakedown = mga_driver_pretakedown,
83 .dma_quiescent = mga_driver_dma_quiescent, 92 .dma_quiescent = mga_driver_dma_quiescent,
93 .device_is_agp = mga_driver_device_is_agp,
84 .vblank_wait = mga_driver_vblank_wait, 94 .vblank_wait = mga_driver_vblank_wait,
85 .irq_preinstall = mga_driver_irq_preinstall, 95 .irq_preinstall = mga_driver_irq_preinstall,
86 .irq_postinstall = mga_driver_irq_postinstall, 96 .irq_postinstall = mga_driver_irq_postinstall,
@@ -128,3 +138,38 @@ module_exit(mga_exit);
128MODULE_AUTHOR( DRIVER_AUTHOR ); 138MODULE_AUTHOR( DRIVER_AUTHOR );
129MODULE_DESCRIPTION( DRIVER_DESC ); 139MODULE_DESCRIPTION( DRIVER_DESC );
130MODULE_LICENSE("GPL and additional rights"); 140MODULE_LICENSE("GPL and additional rights");
141
142/**
143 * Determine if the device really is AGP or not.
144 *
145 * In addition to the usual tests performed by \c drm_device_is_agp, this
146 * function detects PCI G450 cards that appear to the system exactly like
147 * AGP G450 cards.
148 *
149 * \param dev The device to be tested.
150 *
151 * \returns
152 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
153 */
154int mga_driver_device_is_agp(drm_device_t * dev)
155{
156 const struct pci_dev * const pdev = dev->pdev;
157
158
159 /* There are PCI versions of the G450. These cards have the
160 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
161 * bridge chip. We detect these cards, which are not currently
162 * supported by this driver, by looking at the device ID of the
163 * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
164 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
165 * device.
166 */
167
168 if ( (pdev->device == 0x0525)
169 && (pdev->bus->self->vendor == 0x3388)
170 && (pdev->bus->self->device == 0x0021) ) {
171 return 0;
172 }
173
174 return 2;
175}
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 9412e2816eb7..b22fdbd4f830 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,10 +38,10 @@
38 38
39#define DRIVER_NAME "mga" 39#define DRIVER_NAME "mga"
40#define DRIVER_DESC "Matrox G200/G400" 40#define DRIVER_DESC "Matrox G200/G400"
41#define DRIVER_DATE "20021029" 41#define DRIVER_DATE "20050607"
42 42
43#define DRIVER_MAJOR 3 43#define DRIVER_MAJOR 3
44#define DRIVER_MINOR 1 44#define DRIVER_MINOR 2
45#define DRIVER_PATCHLEVEL 0 45#define DRIVER_PATCHLEVEL 0
46 46
47typedef struct drm_mga_primary_buffer { 47typedef struct drm_mga_primary_buffer {
@@ -87,9 +87,43 @@ typedef struct drm_mga_private {
87 int chipset; 87 int chipset;
88 int usec_timeout; 88 int usec_timeout;
89 89
90 /**
91 * If set, the new DMA initialization sequence was used. This is
92 * primarilly used to select how the driver should uninitialized its
93 * internal DMA structures.
94 */
95 int used_new_dma_init;
96
97 /**
98 * If AGP memory is used for DMA buffers, this will be the value
99 * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
100 */
101 u32 dma_access;
102
103 /**
104 * If AGP memory is used for DMA buffers, this will be the value
105 * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
106 * transfer).
107 */
108 u32 wagp_enable;
109
110 /**
111 * \name MMIO region parameters.
112 *
113 * \sa drm_mga_private_t::mmio
114 */
115 /*@{*/
116 u32 mmio_base; /**< Bus address of base of MMIO. */
117 u32 mmio_size; /**< Size of the MMIO region. */
118 /*@}*/
119
90 u32 clear_cmd; 120 u32 clear_cmd;
91 u32 maccess; 121 u32 maccess;
92 122
123 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired;
125 u32 next_fence_to_post;
126
93 unsigned int fb_cpp; 127 unsigned int fb_cpp;
94 unsigned int front_offset; 128 unsigned int front_offset;
95 unsigned int front_pitch; 129 unsigned int front_pitch;
@@ -108,35 +142,43 @@ typedef struct drm_mga_private {
108 drm_local_map_t *status; 142 drm_local_map_t *status;
109 drm_local_map_t *warp; 143 drm_local_map_t *warp;
110 drm_local_map_t *primary; 144 drm_local_map_t *primary;
111 drm_local_map_t *buffers;
112 drm_local_map_t *agp_textures; 145 drm_local_map_t *agp_textures;
146
147 DRM_AGP_MEM *agp_mem;
148 unsigned int agp_pages;
113} drm_mga_private_t; 149} drm_mga_private_t;
114 150
115 /* mga_dma.c */ 151 /* mga_dma.c */
116extern int mga_dma_init( DRM_IOCTL_ARGS ); 152extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
117extern int mga_dma_flush( DRM_IOCTL_ARGS ); 153extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
118extern int mga_dma_reset( DRM_IOCTL_ARGS ); 154extern int mga_dma_init(DRM_IOCTL_ARGS);
119extern int mga_dma_buffers( DRM_IOCTL_ARGS ); 155extern int mga_dma_flush(DRM_IOCTL_ARGS);
120extern void mga_driver_pretakedown(drm_device_t *dev); 156extern int mga_dma_reset(DRM_IOCTL_ARGS);
121extern int mga_driver_dma_quiescent(drm_device_t *dev); 157extern int mga_dma_buffers(DRM_IOCTL_ARGS);
122 158extern int mga_driver_postcleanup(drm_device_t * dev);
123extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); 159extern void mga_driver_pretakedown(drm_device_t * dev);
124 160extern int mga_driver_dma_quiescent(drm_device_t * dev);
125extern void mga_do_dma_flush( drm_mga_private_t *dev_priv ); 161
126extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); 162extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
127extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ); 163
164extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
165extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
166extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
128 167
129extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); 168extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
130 169
131 /* mga_warp.c */ 170 /* mga_warp.c */
132extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); 171extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
133extern int mga_warp_init( drm_mga_private_t *dev_priv ); 172extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
134 173extern int mga_warp_init(drm_mga_private_t * dev_priv);
135extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); 174
136extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); 175 /* mga_irq.c */
137extern void mga_driver_irq_preinstall( drm_device_t *dev ); 176extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
138extern void mga_driver_irq_postinstall( drm_device_t *dev ); 177extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
139extern void mga_driver_irq_uninstall( drm_device_t *dev ); 178extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
179extern void mga_driver_irq_preinstall(drm_device_t * dev);
180extern void mga_driver_irq_postinstall(drm_device_t * dev);
181extern void mga_driver_irq_uninstall(drm_device_t * dev);
140extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 182extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
141 unsigned long arg); 183 unsigned long arg);
142 184
@@ -527,6 +569,12 @@ do { \
527 */ 569 */
528#define MGA_EXEC 0x0100 570#define MGA_EXEC 0x0100
529 571
572/* AGP PLL encoding (for G200 only).
573 */
574#define MGA_AGP_PLL 0x1e4c
575# define MGA_AGP2XPLL_DISABLE (0 << 0)
576# define MGA_AGP2XPLL_ENABLE (1 << 0)
577
530/* Warp registers 578/* Warp registers
531 */ 579 */
532#define MGA_WR0 0x2d00 580#define MGA_WR0 0x2d00
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c
index bc745cfa2095..77d738e75a4d 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/char/drm/mga_ioc32.c
@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); 129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130} 130}
131 131
132typedef struct drm_mga_drm_bootstrap32 {
133 u32 texture_handle;
134 u32 texture_size;
135 u32 primary_size;
136 u32 secondary_bin_count;
137 u32 secondary_bin_size;
138 u32 agp_mode;
139 u8 agp_size;
140} drm_mga_dma_bootstrap32_t;
141
142static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
143 unsigned long arg)
144{
145 drm_mga_dma_bootstrap32_t dma_bootstrap32;
146 drm_mga_dma_bootstrap_t __user *dma_bootstrap;
147 int err;
148
149 if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
150 sizeof(dma_bootstrap32)))
151 return -EFAULT;
152
153 dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
154 if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
155 || __put_user(dma_bootstrap32.texture_handle,
156 &dma_bootstrap->texture_handle)
157 || __put_user(dma_bootstrap32.texture_size,
158 &dma_bootstrap->texture_size)
159 || __put_user(dma_bootstrap32.primary_size,
160 &dma_bootstrap->primary_size)
161 || __put_user(dma_bootstrap32.secondary_bin_count,
162 &dma_bootstrap->secondary_bin_count)
163 || __put_user(dma_bootstrap32.secondary_bin_size,
164 &dma_bootstrap->secondary_bin_size)
165 || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT;
168
169 err = drm_ioctl(file->f_dentry->d_inode, file,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap);
172 if (err)
173 return err;
174
175 if (__get_user(dma_bootstrap32.texture_handle,
176 &dma_bootstrap->texture_handle)
177 || __get_user(dma_bootstrap32.texture_size,
178 &dma_bootstrap->texture_size)
179 || __get_user(dma_bootstrap32.primary_size,
180 &dma_bootstrap->primary_size)
181 || __get_user(dma_bootstrap32.secondary_bin_count,
182 &dma_bootstrap->secondary_bin_count)
183 || __get_user(dma_bootstrap32.secondary_bin_size,
184 &dma_bootstrap->secondary_bin_size)
185 || __get_user(dma_bootstrap32.agp_mode,
186 &dma_bootstrap->agp_mode)
187 || __get_user(dma_bootstrap32.agp_size,
188 &dma_bootstrap->agp_size))
189 return -EFAULT;
190
191 if (copy_to_user((void __user *)arg, &dma_bootstrap32,
192 sizeof(dma_bootstrap32)))
193 return -EFAULT;
194
195 return 0;
196}
197
132drm_ioctl_compat_t *mga_compat_ioctls[] = { 198drm_ioctl_compat_t *mga_compat_ioctls[] = {
133 [DRM_MGA_INIT] = compat_mga_init, 199 [DRM_MGA_INIT] = compat_mga_init,
134 [DRM_MGA_GETPARAM] = compat_mga_getparam, 200 [DRM_MGA_GETPARAM] = compat_mga_getparam,
201 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
135}; 202};
136 203
137/** 204/**
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index bc0b6b5d43a6..52eaa4e788f9 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
41 drm_mga_private_t *dev_priv = 41 drm_mga_private_t *dev_priv =
42 (drm_mga_private_t *)dev->dev_private; 42 (drm_mga_private_t *)dev->dev_private;
43 int status; 43 int status;
44 int handled = 0;
45
46 status = MGA_READ(MGA_STATUS);
44 47
45 status = MGA_READ( MGA_STATUS );
46
47 /* VBLANK interrupt */ 48 /* VBLANK interrupt */
48 if ( status & MGA_VLINEPEN ) { 49 if ( status & MGA_VLINEPEN ) {
49 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); 50 MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
50 atomic_inc(&dev->vbl_received); 51 atomic_inc(&dev->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue); 52 DRM_WAKEUP(&dev->vbl_queue);
52 drm_vbl_send_signals( dev ); 53 drm_vbl_send_signals(dev);
54 handled = 1;
55 }
56
57 /* SOFTRAP interrupt */
58 if (status & MGA_SOFTRAPEN) {
59 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
60 const u32 prim_end = MGA_READ(MGA_PRIMEND);
61
62
63 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
64
65 /* In addition to clearing the interrupt-pending bit, we
66 * have to write to MGA_PRIMEND to re-start the DMA operation.
67 */
68 if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
69 MGA_WRITE(MGA_PRIMEND, prim_end);
70 }
71
72 atomic_inc(&dev_priv->last_fence_retired);
73 DRM_WAKEUP(&dev_priv->fence_queue);
74 handled = 1;
75 }
76
77 if ( handled ) {
53 return IRQ_HANDLED; 78 return IRQ_HANDLED;
54 } 79 }
55 return IRQ_NONE; 80 return IRQ_NONE;
@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
73 return ret; 98 return ret;
74} 99}
75 100
76void mga_driver_irq_preinstall( drm_device_t *dev ) { 101int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
77 drm_mga_private_t *dev_priv = 102{
78 (drm_mga_private_t *)dev->dev_private; 103 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
104 unsigned int cur_fence;
105 int ret = 0;
106
107 /* Assume that the user has missed the current sequence number
108 * by about a day rather than she wants to wait for years
109 * using fences.
110 */
111 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
112 (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
113 - *sequence) <= (1 << 23)));
114
115 *sequence = cur_fence;
116
117 return ret;
118}
119
120void mga_driver_irq_preinstall(drm_device_t * dev)
121{
122 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
79 123
80 /* Disable *all* interrupts */ 124 /* Disable *all* interrupts */
81 MGA_WRITE( MGA_IEN, 0 ); 125 MGA_WRITE( MGA_IEN, 0 );
@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
83 MGA_WRITE( MGA_ICLEAR, ~0 ); 127 MGA_WRITE( MGA_ICLEAR, ~0 );
84} 128}
85 129
86void mga_driver_irq_postinstall( drm_device_t *dev ) { 130void mga_driver_irq_postinstall(drm_device_t * dev)
87 drm_mga_private_t *dev_priv = 131{
88 (drm_mga_private_t *)dev->dev_private; 132 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
133
134 DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
89 135
90 /* Turn on VBL interrupt */ 136 /* Turn on vertical blank interrupt and soft trap interrupt. */
91 MGA_WRITE( MGA_IEN, MGA_VLINEIEN ); 137 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
92} 138}
93 139
94void mga_driver_irq_uninstall( drm_device_t *dev ) { 140void mga_driver_irq_uninstall( drm_device_t *dev ) {
@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) {
98 return; 144 return;
99 145
100 /* Disable *all* interrupts */ 146 /* Disable *all* interrupts */
101 MGA_WRITE( MGA_IEN, 0 ); 147 MGA_WRITE(MGA_IEN, 0);
148
149 dev->irq_enabled = 0;
102} 150}
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 3c7a8f5ba501..05bbb4719376 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
53 53
54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit). 54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
55 */ 55 */
56 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 56 if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
57 DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, 57 DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
58 MGA_LEN + MGA_EXEC, 0x80000000, 58 MGA_LEN + MGA_EXEC, 0x80000000,
59 MGA_DWGCTL, ctx->dwgctl, 59 MGA_DWGCTL, ctx->dwgctl,
60 MGA_LEN + MGA_EXEC, 0x80000000 ); 60 MGA_LEN + MGA_EXEC, 0x80000000);
61 } 61 }
62 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 62 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
63 MGA_CXBNDRY, (box->x2 << 16) | box->x1, 63 MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
64 MGA_YTOP, box->y1 * pitch, 64 MGA_YTOP, box->y1 * pitch,
65 MGA_YBOT, box->y2 * pitch ); 65 MGA_YBOT, (box->y2 - 1) * pitch);
66 66
67 ADVANCE_DMA(); 67 ADVANCE_DMA();
68} 68}
@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
260 260
261 /* Padding required to to hardware bug. 261 /* Padding required to to hardware bug.
262 */ 262 */
263 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 263 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
264 MGA_DMAPAD, 0xffffffff, 264 MGA_DMAPAD, 0xffffffff,
265 MGA_DMAPAD, 0xffffffff, 265 MGA_DMAPAD, 0xffffffff,
266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | 266 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
267 MGA_WMODE_START | 267 MGA_WMODE_START | dev_priv->wagp_enable));
268 MGA_WAGP_ENABLE) );
269 268
270 ADVANCE_DMA(); 269 ADVANCE_DMA();
271} 270}
@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
342 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ 341 MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
343 342
344 /* Padding required to to hardware bug */ 343 /* Padding required to to hardware bug */
345 DMA_BLOCK( MGA_DMAPAD, 0xffffffff, 344 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
346 MGA_DMAPAD, 0xffffffff, 345 MGA_DMAPAD, 0xffffffff,
347 MGA_DMAPAD, 0xffffffff, 346 MGA_DMAPAD, 0xffffffff,
348 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | 347 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
349 MGA_WMODE_START | 348 MGA_WMODE_START | dev_priv->wagp_enable));
350 MGA_WAGP_ENABLE) );
351 349
352 ADVANCE_DMA(); 350 ADVANCE_DMA();
353} 351}
@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv )
459 if ( dirty & MGA_UPLOAD_TEX0 ) 457 if ( dirty & MGA_UPLOAD_TEX0 )
460 ret |= mga_verify_tex( dev_priv, 0 ); 458 ret |= mga_verify_tex( dev_priv, 0 );
461 459
462 if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { 460 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
463 if ( dirty & MGA_UPLOAD_TEX1 ) 461 if (dirty & MGA_UPLOAD_TEX1)
464 ret |= mga_verify_tex( dev_priv, 1 ); 462 ret |= mga_verify_tex(dev_priv, 1);
465 463
466 if ( dirty & MGA_UPLOAD_PIPE ) 464 if ( dirty & MGA_UPLOAD_PIPE )
467 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); 465 ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
686 684
687 BEGIN_DMA( 1 ); 685 BEGIN_DMA( 1 );
688 686
689 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 687 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
690 MGA_DMAPAD, 0x00000000, 688 MGA_DMAPAD, 0x00000000,
691 MGA_SECADDRESS, (address | 689 MGA_SECADDRESS, (address |
692 MGA_DMA_VERTEX), 690 MGA_DMA_VERTEX),
693 MGA_SECEND, ((address + length) | 691 MGA_SECEND, ((address + length) |
694 MGA_PAGPXFER) ); 692 dev_priv->dma_access));
695 693
696 ADVANCE_DMA(); 694 ADVANCE_DMA();
697 } while ( ++i < sarea_priv->nbox ); 695 } while ( ++i < sarea_priv->nbox );
@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
733 731
734 BEGIN_DMA( 1 ); 732 BEGIN_DMA( 1 );
735 733
736 DMA_BLOCK( MGA_DMAPAD, 0x00000000, 734 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
737 MGA_DMAPAD, 0x00000000, 735 MGA_DMAPAD, 0x00000000,
738 MGA_SETUPADDRESS, address + start, 736 MGA_SETUPADDRESS, address + start,
739 MGA_SETUPEND, ((address + end) | 737 MGA_SETUPEND, ((address + end) |
740 MGA_PAGPXFER) ); 738 dev_priv->dma_access));
741 739
742 ADVANCE_DMA(); 740 ADVANCE_DMA();
743 } while ( ++i < sarea_priv->nbox ); 741 } while ( ++i < sarea_priv->nbox );
@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
764 drm_mga_private_t *dev_priv = dev->dev_private; 762 drm_mga_private_t *dev_priv = dev->dev_private;
765 drm_mga_buf_priv_t *buf_priv = buf->dev_private; 763 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
766 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; 764 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
767 u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; 765 u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
768 u32 y2; 766 u32 y2;
769 DMA_LOCALS; 767 DMA_LOCALS;
770 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); 768 DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1095 case MGA_PARAM_IRQ_NR: 1093 case MGA_PARAM_IRQ_NR:
1096 value = dev->irq; 1094 value = dev->irq;
1097 break; 1095 break;
1096 case MGA_PARAM_CARD_TYPE:
1097 value = dev_priv->chipset;
1098 break;
1098 default: 1099 default:
1099 return DRM_ERR(EINVAL); 1100 return DRM_ERR(EINVAL);
1100 } 1101 }
@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS )
1107 return 0; 1108 return 0;
1108} 1109}
1109 1110
1111static int mga_set_fence(DRM_IOCTL_ARGS)
1112{
1113 DRM_DEVICE;
1114 drm_mga_private_t *dev_priv = dev->dev_private;
1115 u32 temp;
1116 DMA_LOCALS;
1117
1118 if (!dev_priv) {
1119 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1120 return DRM_ERR(EINVAL);
1121 }
1122
1123 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1124
1125 /* I would normal do this assignment in the declaration of temp,
1126 * but dev_priv may be NULL.
1127 */
1128
1129 temp = dev_priv->next_fence_to_post;
1130 dev_priv->next_fence_to_post++;
1131
1132 BEGIN_DMA(1);
1133 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
1134 MGA_DMAPAD, 0x00000000,
1135 MGA_DMAPAD, 0x00000000,
1136 MGA_SOFTRAP, 0x00000000);
1137 ADVANCE_DMA();
1138
1139 if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
1140 DRM_ERROR("copy_to_user\n");
1141 return DRM_ERR(EFAULT);
1142 }
1143
1144 return 0;
1145}
1146
1147static int mga_wait_fence(DRM_IOCTL_ARGS)
1148{
1149 DRM_DEVICE;
1150 drm_mga_private_t *dev_priv = dev->dev_private;
1151 u32 fence;
1152
1153 if (!dev_priv) {
1154 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1155 return DRM_ERR(EINVAL);
1156 }
1157
1158 DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
1159
1160 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1161
1162 mga_driver_fence_wait(dev, & fence);
1163
1164 if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
1165 DRM_ERROR("copy_to_user\n");
1166 return DRM_ERR(EFAULT);
1167 }
1168
1169 return 0;
1170}
1171
1110drm_ioctl_desc_t mga_ioctls[] = { 1172drm_ioctl_desc_t mga_ioctls[] = {
1111 [DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, 1173 [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
1112 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, 1174 [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
1113 [DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, 1175 [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
1114 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, 1176 [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
1115 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, 1177 [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
1116 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, 1178 [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
1117 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, 1179 [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
1118 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, 1180 [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
1119 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, 1181 [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
1120 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, 1182 [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
1183 [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
1184 [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
1185 [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
1186
1121}; 1187};
1122 1188
1123int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1189int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index 0a3a0cc700dc..55ccc8a0ac29 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -48,65 +48,52 @@ do { \
48 vcbase += WARP_UCODE_SIZE( which ); \ 48 vcbase += WARP_UCODE_SIZE( which ); \
49} while (0) 49} while (0)
50 50
51 51static const unsigned int mga_warp_g400_microcode_size =
52static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) 52 (WARP_UCODE_SIZE(warp_g400_tgz) +
53{ 53 WARP_UCODE_SIZE(warp_g400_tgza) +
54 unsigned int size; 54 WARP_UCODE_SIZE(warp_g400_tgzaf) +
55 55 WARP_UCODE_SIZE(warp_g400_tgzf) +
56 size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + 56 WARP_UCODE_SIZE(warp_g400_tgzs) +
57 WARP_UCODE_SIZE( warp_g400_tgza ) + 57 WARP_UCODE_SIZE(warp_g400_tgzsa) +
58 WARP_UCODE_SIZE( warp_g400_tgzaf ) + 58 WARP_UCODE_SIZE(warp_g400_tgzsaf) +
59 WARP_UCODE_SIZE( warp_g400_tgzf ) + 59 WARP_UCODE_SIZE(warp_g400_tgzsf) +
60 WARP_UCODE_SIZE( warp_g400_tgzs ) + 60 WARP_UCODE_SIZE(warp_g400_t2gz) +
61 WARP_UCODE_SIZE( warp_g400_tgzsa ) + 61 WARP_UCODE_SIZE(warp_g400_t2gza) +
62 WARP_UCODE_SIZE( warp_g400_tgzsaf ) + 62 WARP_UCODE_SIZE(warp_g400_t2gzaf) +
63 WARP_UCODE_SIZE( warp_g400_tgzsf ) + 63 WARP_UCODE_SIZE(warp_g400_t2gzf) +
64 WARP_UCODE_SIZE( warp_g400_t2gz ) + 64 WARP_UCODE_SIZE(warp_g400_t2gzs) +
65 WARP_UCODE_SIZE( warp_g400_t2gza ) + 65 WARP_UCODE_SIZE(warp_g400_t2gzsa) +
66 WARP_UCODE_SIZE( warp_g400_t2gzaf ) + 66 WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
67 WARP_UCODE_SIZE( warp_g400_t2gzf ) + 67 WARP_UCODE_SIZE(warp_g400_t2gzsf));
68 WARP_UCODE_SIZE( warp_g400_t2gzs ) + 68
69 WARP_UCODE_SIZE( warp_g400_t2gzsa ) + 69static const unsigned int mga_warp_g200_microcode_size =
70 WARP_UCODE_SIZE( warp_g400_t2gzsaf ) + 70 (WARP_UCODE_SIZE(warp_g200_tgz) +
71 WARP_UCODE_SIZE( warp_g400_t2gzsf ) ); 71 WARP_UCODE_SIZE(warp_g200_tgza) +
72 72 WARP_UCODE_SIZE(warp_g200_tgzaf) +
73 size = PAGE_ALIGN( size ); 73 WARP_UCODE_SIZE(warp_g200_tgzf) +
74 74 WARP_UCODE_SIZE(warp_g200_tgzs) +
75 DRM_DEBUG( "G400 ucode size = %d bytes\n", size ); 75 WARP_UCODE_SIZE(warp_g200_tgzsa) +
76 return size; 76 WARP_UCODE_SIZE(warp_g200_tgzsaf) +
77} 77 WARP_UCODE_SIZE(warp_g200_tgzsf));
78 78
79static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv ) 79
80unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
80{ 81{
81 unsigned int size; 82 switch (dev_priv->chipset) {
82 83 case MGA_CARD_TYPE_G400:
83 size = ( WARP_UCODE_SIZE( warp_g200_tgz ) + 84 case MGA_CARD_TYPE_G550:
84 WARP_UCODE_SIZE( warp_g200_tgza ) + 85 return PAGE_ALIGN(mga_warp_g400_microcode_size);
85 WARP_UCODE_SIZE( warp_g200_tgzaf ) + 86 case MGA_CARD_TYPE_G200:
86 WARP_UCODE_SIZE( warp_g200_tgzf ) + 87 return PAGE_ALIGN(mga_warp_g200_microcode_size);
87 WARP_UCODE_SIZE( warp_g200_tgzs ) + 88 default:
88 WARP_UCODE_SIZE( warp_g200_tgzsa ) + 89 return 0;
89 WARP_UCODE_SIZE( warp_g200_tgzsaf ) + 90 }
90 WARP_UCODE_SIZE( warp_g200_tgzsf ) );
91
92 size = PAGE_ALIGN( size );
93
94 DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
95 return size;
96} 91}
97 92
98static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) 93static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
99{ 94{
100 unsigned char *vcbase = dev_priv->warp->handle; 95 unsigned char *vcbase = dev_priv->warp->handle;
101 unsigned long pcbase = dev_priv->warp->offset; 96 unsigned long pcbase = dev_priv->warp->offset;
102 unsigned int size;
103
104 size = mga_warp_g400_microcode_size( dev_priv );
105 if ( size > dev_priv->warp->size ) {
106 DRM_ERROR( "microcode too large! (%u > %lu)\n",
107 size, dev_priv->warp->size );
108 return DRM_ERR(ENOMEM);
109 }
110 97
111 memset( dev_priv->warp_pipe_phys, 0, 98 memset( dev_priv->warp_pipe_phys, 0,
112 sizeof(dev_priv->warp_pipe_phys) ); 99 sizeof(dev_priv->warp_pipe_phys) );
@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
136{ 123{
137 unsigned char *vcbase = dev_priv->warp->handle; 124 unsigned char *vcbase = dev_priv->warp->handle;
138 unsigned long pcbase = dev_priv->warp->offset; 125 unsigned long pcbase = dev_priv->warp->offset;
139 unsigned int size;
140
141 size = mga_warp_g200_microcode_size( dev_priv );
142 if ( size > dev_priv->warp->size ) {
143 DRM_ERROR( "microcode too large! (%u > %lu)\n",
144 size, dev_priv->warp->size );
145 return DRM_ERR(ENOMEM);
146 }
147 126
148 memset( dev_priv->warp_pipe_phys, 0, 127 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
149 sizeof(dev_priv->warp_pipe_phys) );
150 128
151 WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); 129 WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
152 WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); 130 WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
153 WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); 131 WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
154 WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); 132 WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
155 WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); 133 WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
156 WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); 134 WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
157 WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); 135 WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
158 WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); 136 WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
159 137
160 return 0; 138 return 0;
161} 139}
162 140
163int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) 141int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
164{ 142{
165 switch ( dev_priv->chipset ) { 143 const unsigned int size = mga_warp_microcode_size(dev_priv);
144
145 DRM_DEBUG("MGA ucode size = %d bytes\n", size);
146 if (size > dev_priv->warp->size) {
147 DRM_ERROR("microcode too large! (%u > %lu)\n",
148 size, dev_priv->warp->size);
149 return DRM_ERR(ENOMEM);
150 }
151
152 switch (dev_priv->chipset) {
166 case MGA_CARD_TYPE_G400: 153 case MGA_CARD_TYPE_G400:
167 return mga_warp_install_g400_microcode( dev_priv ); 154 case MGA_CARD_TYPE_G550:
155 return mga_warp_install_g400_microcode(dev_priv);
168 case MGA_CARD_TYPE_G200: 156 case MGA_CARD_TYPE_G200:
169 return mga_warp_install_g200_microcode( dev_priv ); 157 return mga_warp_install_g200_microcode( dev_priv );
170 default: 158 default:
@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
182 */ 170 */
183 switch ( dev_priv->chipset ) { 171 switch ( dev_priv->chipset ) {
184 case MGA_CARD_TYPE_G400: 172 case MGA_CARD_TYPE_G400:
185 MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); 173 case MGA_CARD_TYPE_G550:
186 MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); 174 MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
187 MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); 175 MGA_WRITE(MGA_WGETMSB, 0x00000E00);
188 MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); 176 MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
177 MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
189 break; 178 break;
190 case MGA_CARD_TYPE_G200: 179 case MGA_CARD_TYPE_G200:
191 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); 180 MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 08ed8d01d9d9..895152206b31 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
326 ring_start = dev_priv->cce_ring->offset - dev->agp->base; 326 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
327 else 327 else
328#endif 328#endif
329 ring_start = dev_priv->cce_ring->offset - dev->sg->handle; 329 ring_start = dev_priv->cce_ring->offset -
330 (unsigned long)dev->sg->virtual;
330 331
331 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); 332 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
332 333
@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
487 r128_do_cleanup_cce( dev ); 488 r128_do_cleanup_cce( dev );
488 return DRM_ERR(EINVAL); 489 return DRM_ERR(EINVAL);
489 } 490 }
491 dev->agp_buffer_token = init->buffers_offset;
490 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 492 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
491 if(!dev->agp_buffer_map) { 493 if(!dev->agp_buffer_map) {
492 DRM_ERROR("could not find dma buffer region!\n"); 494 DRM_ERROR("could not find dma buffer region!\n");
@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
537 dev_priv->cce_buffers_offset = dev->agp->base; 539 dev_priv->cce_buffers_offset = dev->agp->base;
538 else 540 else
539#endif 541#endif
540 dev_priv->cce_buffers_offset = dev->sg->handle; 542 dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
541 543
542 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; 544 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle 545 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 0cba17d1e0ff..b616cd3ed2cd 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea {
215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) 215#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) 216#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) 217#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
218#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) 218#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) 219#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
220 220
221typedef struct drm_r128_init { 221typedef struct drm_r128_init {
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
new file mode 100644
index 000000000000..623f1f460cb5
--- /dev/null
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -0,0 +1,801 @@
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
40
41#define R300_SIMULTANEOUS_CLIPRECTS 4
42
43/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
44 */
45static const int r300_cliprect_cntl[4] = {
46 0xAAAA,
47 0xEEEE,
48 0xFEFE,
49 0xFFFE
50};
51
52
53/**
54 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55 * buffer, starting with index n.
56 */
57static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
58 drm_radeon_cmd_buffer_t* cmdbuf,
59 int n)
60{
61 drm_clip_rect_t box;
62 int nr;
63 int i;
64 RING_LOCALS;
65
66 nr = cmdbuf->nbox - n;
67 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
68 nr = R300_SIMULTANEOUS_CLIPRECTS;
69
70 DRM_DEBUG("%i cliprects\n", nr);
71
72 if (nr) {
73 BEGIN_RING(6 + nr*2);
74 OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
75
76 for(i = 0; i < nr; ++i) {
77 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
78 DRM_ERROR("copy cliprect faulted\n");
79 return DRM_ERR(EFAULT);
80 }
81
82 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
83 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
84 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
85 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
86
87 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
88 (box.y1 << R300_CLIPRECT_Y_SHIFT));
89 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
90 (box.y2 << R300_CLIPRECT_Y_SHIFT));
91 }
92
93 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
94
95 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
96 * client might be able to trample over memory.
97 * The impact should be very limited, but I'd rather be safe than
98 * sorry.
99 */
100 OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
101 OUT_RING( 0 );
102 OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
103 ADVANCE_RING();
104 } else {
105 /* Why we allow zero cliprect rendering:
106 * There are some commands in a command buffer that must be submitted
107 * even when there are no cliprects, e.g. DMA buffer discard
108 * or state setting (though state setting could be avoided by
109 * simulating a loss of context).
110 *
111 * Now since the cmdbuf interface is so chaotic right now (and is
112 * bound to remain that way for a bit until things settle down),
113 * it is basically impossible to filter out the commands that are
114 * necessary and those that aren't.
115 *
116 * So I choose the safe way and don't do any filtering at all;
117 * instead, I simply set up the engine so that all rendering
118 * can't produce any fragments.
119 */
120 BEGIN_RING(2);
121 OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
122 ADVANCE_RING();
123 }
124
125 return 0;
126}
127
128u8 r300_reg_flags[0x10000>>2];
129
130
131void r300_init_reg_flags(void)
132{
133 int i;
134 memset(r300_reg_flags, 0, 0x10000>>2);
135 #define ADD_RANGE_MARK(reg, count,mark) \
136 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
137 r300_reg_flags[i]|=(mark);
138
139 #define MARK_SAFE 1
140 #define MARK_CHECK_OFFSET 2
141
142 #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
143
144 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
145 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
146 ADD_RANGE(0x2080, 1);
147 ADD_RANGE(R300_SE_VTE_CNTL, 2);
148 ADD_RANGE(0x2134, 2);
149 ADD_RANGE(0x2140, 1);
150 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
151 ADD_RANGE(0x21DC, 1);
152 ADD_RANGE(0x221C, 1);
153 ADD_RANGE(0x2220, 4);
154 ADD_RANGE(0x2288, 1);
155 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
156 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
157 ADD_RANGE(R300_GB_ENABLE, 1);
158 ADD_RANGE(R300_GB_MSPOS0, 5);
159 ADD_RANGE(R300_TX_ENABLE, 1);
160 ADD_RANGE(0x4200, 4);
161 ADD_RANGE(0x4214, 1);
162 ADD_RANGE(R300_RE_POINTSIZE, 1);
163 ADD_RANGE(0x4230, 3);
164 ADD_RANGE(R300_RE_LINE_CNT, 1);
165 ADD_RANGE(0x4238, 1);
166 ADD_RANGE(0x4260, 3);
167 ADD_RANGE(0x4274, 4);
168 ADD_RANGE(0x4288, 5);
169 ADD_RANGE(0x42A0, 1);
170 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
171 ADD_RANGE(0x42B4, 1);
172 ADD_RANGE(R300_RE_CULL_CNTL, 1);
173 ADD_RANGE(0x42C0, 2);
174 ADD_RANGE(R300_RS_CNTL_0, 2);
175 ADD_RANGE(R300_RS_INTERP_0, 8);
176 ADD_RANGE(R300_RS_ROUTE_0, 8);
177 ADD_RANGE(0x43A4, 2);
178 ADD_RANGE(0x43E8, 1);
179 ADD_RANGE(R300_PFS_CNTL_0, 3);
180 ADD_RANGE(R300_PFS_NODE_0, 4);
181 ADD_RANGE(R300_PFS_TEXI_0, 64);
182 ADD_RANGE(0x46A4, 5);
183 ADD_RANGE(R300_PFS_INSTR0_0, 64);
184 ADD_RANGE(R300_PFS_INSTR1_0, 64);
185 ADD_RANGE(R300_PFS_INSTR2_0, 64);
186 ADD_RANGE(R300_PFS_INSTR3_0, 64);
187 ADD_RANGE(0x4BC0, 1);
188 ADD_RANGE(0x4BC8, 3);
189 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
190 ADD_RANGE(0x4BD8, 1);
191 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
192 ADD_RANGE(0x4E00, 1);
193 ADD_RANGE(R300_RB3D_CBLEND, 2);
194 ADD_RANGE(R300_RB3D_COLORMASK, 1);
195 ADD_RANGE(0x4E10, 3);
196 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
197 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
198 ADD_RANGE(0x4E50, 9);
199 ADD_RANGE(0x4E88, 1);
200 ADD_RANGE(0x4EA0, 2);
201 ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
202 ADD_RANGE(0x4F10, 4);
203 ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
204 ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
205 ADD_RANGE(0x4F28, 1);
206 ADD_RANGE(0x4F30, 2);
207 ADD_RANGE(0x4F44, 1);
208 ADD_RANGE(0x4F54, 1);
209
210 ADD_RANGE(R300_TX_FILTER_0, 16);
211 ADD_RANGE(R300_TX_UNK1_0, 16);
212 ADD_RANGE(R300_TX_SIZE_0, 16);
213 ADD_RANGE(R300_TX_FORMAT_0, 16);
214 /* Texture offset is dangerous and needs more checking */
215 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
216 ADD_RANGE(R300_TX_UNK4_0, 16);
217 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
218
219 /* Sporadic registers used as primitives are emitted */
220 ADD_RANGE(0x4f18, 1);
221 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
222 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
223 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
224
225}
226
227static __inline__ int r300_check_range(unsigned reg, int count)
228{
229 int i;
230 if(reg & ~0xffff)return -1;
231 for(i=(reg>>2);i<(reg>>2)+count;i++)
232 if(r300_reg_flags[i]!=MARK_SAFE)return 1;
233 return 0;
234}
235
236 /* we expect offsets passed to the framebuffer to be either within video memory or
237 within AGP space */
238static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
239{
240 /* we realy want to check against end of video aperture
241 but this value is not being kept.
242 This code is correct for now (does the same thing as the
243 code that sets MC_FB_LOCATION) in radeon_cp.c */
244 if((offset>=dev_priv->fb_location) &&
245 (offset<dev_priv->gart_vm_start))return 0;
246 if((offset>=dev_priv->gart_vm_start) &&
247 (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
248 return 1;
249}
250
251static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
252 drm_radeon_cmd_buffer_t* cmdbuf,
253 drm_r300_cmd_header_t header)
254{
255 int reg;
256 int sz;
257 int i;
258 int values[64];
259 RING_LOCALS;
260
261 sz = header.packet0.count;
262 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
263
264 if((sz>64)||(sz<0)){
265 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
266 return DRM_ERR(EINVAL);
267 }
268 for(i=0;i<sz;i++){
269 values[i]=((int __user*)cmdbuf->buf)[i];
270 switch(r300_reg_flags[(reg>>2)+i]){
271 case MARK_SAFE:
272 break;
273 case MARK_CHECK_OFFSET:
274 if(r300_check_offset(dev_priv, (u32)values[i])){
275 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
276 return DRM_ERR(EINVAL);
277 }
278 break;
279 default:
280 DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
281 return DRM_ERR(EINVAL);
282 }
283 }
284
285 BEGIN_RING(1+sz);
286 OUT_RING( CP_PACKET0( reg, sz-1 ) );
287 OUT_RING_TABLE( values, sz );
288 ADVANCE_RING();
289
290 cmdbuf->buf += sz*4;
291 cmdbuf->bufsz -= sz*4;
292
293 return 0;
294}
295
296/**
297 * Emits a packet0 setting arbitrary registers.
298 * Called by r300_do_cp_cmdbuf.
299 *
300 * Note that checks are performed on contents and addresses of the registers
301 */
302static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
303 drm_radeon_cmd_buffer_t* cmdbuf,
304 drm_r300_cmd_header_t header)
305{
306 int reg;
307 int sz;
308 RING_LOCALS;
309
310 sz = header.packet0.count;
311 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
312
313 if (!sz)
314 return 0;
315
316 if (sz*4 > cmdbuf->bufsz)
317 return DRM_ERR(EINVAL);
318
319 if (reg+sz*4 >= 0x10000){
320 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
321 return DRM_ERR(EINVAL);
322 }
323
324 if(r300_check_range(reg, sz)){
325 /* go and check everything */
326 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
327 }
328 /* the rest of the data is safe to emit, whatever the values the user passed */
329
330 BEGIN_RING(1+sz);
331 OUT_RING( CP_PACKET0( reg, sz-1 ) );
332 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
333 ADVANCE_RING();
334
335 cmdbuf->buf += sz*4;
336 cmdbuf->bufsz -= sz*4;
337
338 return 0;
339}
340
341
342/**
343 * Uploads user-supplied vertex program instructions or parameters onto
344 * the graphics card.
345 * Called by r300_do_cp_cmdbuf.
346 */
347static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
348 drm_radeon_cmd_buffer_t* cmdbuf,
349 drm_r300_cmd_header_t header)
350{
351 int sz;
352 int addr;
353 RING_LOCALS;
354
355 sz = header.vpu.count;
356 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
357
358 if (!sz)
359 return 0;
360 if (sz*16 > cmdbuf->bufsz)
361 return DRM_ERR(EINVAL);
362
363 BEGIN_RING(5+sz*4);
364 /* Wait for VAP to come to senses.. */
365 /* there is no need to emit it multiple times, (only once before VAP is programmed,
366 but this optimization is for later */
367 OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
368 OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
369 OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
370 OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
371
372 ADVANCE_RING();
373
374 cmdbuf->buf += sz*16;
375 cmdbuf->bufsz -= sz*16;
376
377 return 0;
378}
379
380
381/**
382 * Emit a clear packet from userspace.
383 * Called by r300_emit_packet3.
384 */
385static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
386 drm_radeon_cmd_buffer_t* cmdbuf)
387{
388 RING_LOCALS;
389
390 if (8*4 > cmdbuf->bufsz)
391 return DRM_ERR(EINVAL);
392
393 BEGIN_RING(10);
394 OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
395 OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
396 (1<<R300_PRIM_NUM_VERTICES_SHIFT) );
397 OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
398 ADVANCE_RING();
399
400 cmdbuf->buf += 8*4;
401 cmdbuf->bufsz -= 8*4;
402
403 return 0;
404}
405
406static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
407 drm_radeon_cmd_buffer_t* cmdbuf,
408 u32 header)
409{
410 int count, i,k;
411 #define MAX_ARRAY_PACKET 64
412 u32 payload[MAX_ARRAY_PACKET];
413 u32 narrays;
414 RING_LOCALS;
415
416 count=(header>>16) & 0x3fff;
417
418 if((count+1)>MAX_ARRAY_PACKET){
419 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
420 return DRM_ERR(EINVAL);
421 }
422 memset(payload, 0, MAX_ARRAY_PACKET*4);
423 memcpy(payload, cmdbuf->buf+4, (count+1)*4);
424
425 /* carefully check packet contents */
426
427 narrays=payload[0];
428 k=0;
429 i=1;
430 while((k<narrays) && (i<(count+1))){
431 i++; /* skip attribute field */
432 if(r300_check_offset(dev_priv, payload[i])){
433 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
434 return DRM_ERR(EINVAL);
435 }
436 k++;
437 i++;
438 if(k==narrays)break;
439 /* have one more to process, they come in pairs */
440 if(r300_check_offset(dev_priv, payload[i])){
441 DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
442 return DRM_ERR(EINVAL);
443 }
444 k++;
445 i++;
446 }
447 /* do the counts match what we expect ? */
448 if((k!=narrays) || (i!=(count+1))){
449 DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
450 return DRM_ERR(EINVAL);
451 }
452
453 /* all clear, output packet */
454
455 BEGIN_RING(count+2);
456 OUT_RING(header);
457 OUT_RING_TABLE(payload, count+1);
458 ADVANCE_RING();
459
460 cmdbuf->buf += (count+2)*4;
461 cmdbuf->bufsz -= (count+2)*4;
462
463 return 0;
464}
465
466static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
467 drm_radeon_cmd_buffer_t* cmdbuf)
468{
469 u32 header;
470 int count;
471 RING_LOCALS;
472
473 if (4 > cmdbuf->bufsz)
474 return DRM_ERR(EINVAL);
475
476 /* Fixme !! This simply emits a packet without much checking.
477 We need to be smarter. */
478
479 /* obtain first word - actual packet3 header */
480 header = *(u32 __user*)cmdbuf->buf;
481
482 /* Is it packet 3 ? */
483 if( (header>>30)!=0x3 ) {
484 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
485 return DRM_ERR(EINVAL);
486 }
487
488 count=(header>>16) & 0x3fff;
489
490 /* Check again now that we know how much data to expect */
491 if ((count+2)*4 > cmdbuf->bufsz){
492 DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
493 (count+2)*4, cmdbuf->bufsz);
494 return DRM_ERR(EINVAL);
495 }
496
497 /* Is it a packet type we know about ? */
498 switch(header & 0xff00){
499 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
500 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
501
502 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
503 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
504 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
505 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
506 case RADEON_WAIT_FOR_IDLE:
507 case RADEON_CP_NOP:
508 /* these packets are safe */
509 break;
510 default:
511 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
512 return DRM_ERR(EINVAL);
513 }
514
515
516 BEGIN_RING(count+2);
517 OUT_RING(header);
518 OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
519 ADVANCE_RING();
520
521 cmdbuf->buf += (count+2)*4;
522 cmdbuf->bufsz -= (count+2)*4;
523
524 return 0;
525}
526
527
528/**
529 * Emit a rendering packet3 from userspace.
530 * Called by r300_do_cp_cmdbuf.
531 */
532static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
533 drm_radeon_cmd_buffer_t* cmdbuf,
534 drm_r300_cmd_header_t header)
535{
536 int n;
537 int ret;
538 char __user* orig_buf = cmdbuf->buf;
539 int orig_bufsz = cmdbuf->bufsz;
540
541 /* This is a do-while-loop so that we run the interior at least once,
542 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
543 */
544 n = 0;
545 do {
546 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
547 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
548 if (ret)
549 return ret;
550
551 cmdbuf->buf = orig_buf;
552 cmdbuf->bufsz = orig_bufsz;
553 }
554
555 switch(header.packet3.packet) {
556 case R300_CMD_PACKET3_CLEAR:
557 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
558 ret = r300_emit_clear(dev_priv, cmdbuf);
559 if (ret) {
560 DRM_ERROR("r300_emit_clear failed\n");
561 return ret;
562 }
563 break;
564
565 case R300_CMD_PACKET3_RAW:
566 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
567 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
568 if (ret) {
569 DRM_ERROR("r300_emit_raw_packet3 failed\n");
570 return ret;
571 }
572 break;
573
574 default:
575 DRM_ERROR("bad packet3 type %i at %p\n",
576 header.packet3.packet,
577 cmdbuf->buf - sizeof(header));
578 return DRM_ERR(EINVAL);
579 }
580
581 n += R300_SIMULTANEOUS_CLIPRECTS;
582 } while(n < cmdbuf->nbox);
583
584 return 0;
585}
586
587/* Some of the R300 chips seem to be extremely touchy about the two registers
588 * that are configured in r300_pacify.
589 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
590 * sends a command buffer that contains only state setting commands and a
591 * vertex program/parameter upload sequence, this will eventually lead to a
592 * lockup, unless the sequence is bracketed by calls to r300_pacify.
593 * So we should take great care to *always* call r300_pacify before
594 * *anything* 3D related, and again afterwards. This is what the
595 * call bracket in r300_do_cp_cmdbuf is for.
596 */
597
598/**
599 * Emit the sequence to pacify R300.
600 */
601static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
602{
603 RING_LOCALS;
604
605 BEGIN_RING(6);
606 OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
607 OUT_RING( 0xa );
608 OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
609 OUT_RING( 0x3 );
610 OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
611 OUT_RING( 0x0 );
612 ADVANCE_RING();
613}
614
615
616/**
617 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
618 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
619 * be careful about how this function is called.
620 */
621static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
622{
623 drm_radeon_private_t *dev_priv = dev->dev_private;
624 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
625
626 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
627 buf->pending = 1;
628 buf->used = 0;
629}
630
631
632/**
633 * Parses and validates a user-supplied command buffer and emits appropriate
634 * commands on the DMA ring buffer.
635 * Called by the ioctl handler function radeon_cp_cmdbuf.
636 */
637int r300_do_cp_cmdbuf(drm_device_t* dev,
638 DRMFILE filp,
639 drm_file_t* filp_priv,
640 drm_radeon_cmd_buffer_t* cmdbuf)
641{
642 drm_radeon_private_t *dev_priv = dev->dev_private;
643 drm_device_dma_t *dma = dev->dma;
644 drm_buf_t *buf = NULL;
645 int emit_dispatch_age = 0;
646 int ret = 0;
647
648 DRM_DEBUG("\n");
649
650 /* See the comment above r300_emit_begin3d for why this call must be here,
651 * and what the cleanup gotos are for. */
652 r300_pacify(dev_priv);
653
654 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
655 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
656 if (ret)
657 goto cleanup;
658 }
659
660 while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
661 int idx;
662 drm_r300_cmd_header_t header;
663
664 header.u = *(unsigned int *)cmdbuf->buf;
665
666 cmdbuf->buf += sizeof(header);
667 cmdbuf->bufsz -= sizeof(header);
668
669 switch(header.header.cmd_type) {
670 case R300_CMD_PACKET0:
671 DRM_DEBUG("R300_CMD_PACKET0\n");
672 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
673 if (ret) {
674 DRM_ERROR("r300_emit_packet0 failed\n");
675 goto cleanup;
676 }
677 break;
678
679 case R300_CMD_VPU:
680 DRM_DEBUG("R300_CMD_VPU\n");
681 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
682 if (ret) {
683 DRM_ERROR("r300_emit_vpu failed\n");
684 goto cleanup;
685 }
686 break;
687
688 case R300_CMD_PACKET3:
689 DRM_DEBUG("R300_CMD_PACKET3\n");
690 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
691 if (ret) {
692 DRM_ERROR("r300_emit_packet3 failed\n");
693 goto cleanup;
694 }
695 break;
696
697 case R300_CMD_END3D:
698 DRM_DEBUG("R300_CMD_END3D\n");
699 /* TODO:
700 Ideally userspace driver should not need to issue this call,
701 i.e. the drm driver should issue it automatically and prevent
702 lockups.
703
704 In practice, we do not understand why this call is needed and what
705 it does (except for some vague guesses that it has to do with cache
706 coherence) and so the user space driver does it.
707
708 Once we are sure which uses prevent lockups the code could be moved
709 into the kernel and the userspace driver will not
710 need to use this command.
711
712 Note that issuing this command does not hurt anything
713 except, possibly, performance */
714 r300_pacify(dev_priv);
715 break;
716
717 case R300_CMD_CP_DELAY:
718 /* simple enough, we can do it here */
719 DRM_DEBUG("R300_CMD_CP_DELAY\n");
720 {
721 int i;
722 RING_LOCALS;
723
724 BEGIN_RING(header.delay.count);
725 for(i=0;i<header.delay.count;i++)
726 OUT_RING(RADEON_CP_PACKET2);
727 ADVANCE_RING();
728 }
729 break;
730
731 case R300_CMD_DMA_DISCARD:
732 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
733 idx = header.dma.buf_idx;
734 if (idx < 0 || idx >= dma->buf_count) {
735 DRM_ERROR("buffer index %d (of %d max)\n",
736 idx, dma->buf_count - 1);
737 ret = DRM_ERR(EINVAL);
738 goto cleanup;
739 }
740
741 buf = dma->buflist[idx];
742 if (buf->filp != filp || buf->pending) {
743 DRM_ERROR("bad buffer %p %p %d\n",
744 buf->filp, filp, buf->pending);
745 ret = DRM_ERR(EINVAL);
746 goto cleanup;
747 }
748
749 emit_dispatch_age = 1;
750 r300_discard_buffer(dev, buf);
751 break;
752
753 case R300_CMD_WAIT:
754 /* simple enough, we can do it here */
755 DRM_DEBUG("R300_CMD_WAIT\n");
756 if(header.wait.flags==0)break; /* nothing to do */
757
758 {
759 RING_LOCALS;
760
761 BEGIN_RING(2);
762 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
763 OUT_RING( (header.wait.flags & 0xf)<<14 );
764 ADVANCE_RING();
765 }
766 break;
767
768 default:
769 DRM_ERROR("bad cmd_type %i at %p\n",
770 header.header.cmd_type,
771 cmdbuf->buf - sizeof(header));
772 ret = DRM_ERR(EINVAL);
773 goto cleanup;
774 }
775 }
776
777 DRM_DEBUG("END\n");
778
779cleanup:
780 r300_pacify(dev_priv);
781
782 /* We emit the vertex buffer age here, outside the pacifier "brackets"
783 * for two reasons:
784 * (1) This may coalesce multiple age emissions into a single one and
785 * (2) more importantly, some chips lock up hard when scratch registers
786 * are written inside the pacifier bracket.
787 */
788 if (emit_dispatch_age) {
789 RING_LOCALS;
790
791 /* Emit the vertex buffer age */
792 BEGIN_RING(2);
793 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
794 ADVANCE_RING();
795 }
796
797 COMMIT_RING();
798
799 return ret;
800}
801
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
new file mode 100644
index 000000000000..c3e7ca3dbe3d
--- /dev/null
+++ b/drivers/char/drm/r300_reg.h
@@ -0,0 +1,1412 @@
1/**************************************************************************
2
3Copyright (C) 2004-2005 Nicolai Haehnle et al.
4
5Permission is hereby granted, free of charge, to any person obtaining a
6copy of this software and associated documentation files (the "Software"),
7to deal in the Software without restriction, including without limitation
8on the rights to use, copy, modify, merge, publish, distribute, sub
9license, and/or sell copies of the Software, and to permit persons to whom
10the Software is furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice (including the next
13paragraph) shall be included in all copies or substantial portions of the
14Software.
15
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24**************************************************************************/
25
26#ifndef _R300_REG_H
27#define _R300_REG_H
28
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
31# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
32# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
33# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
34# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
35# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
36# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
37# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
38
39
40#define R300_MC_INIT_GFX_LAT_TIMER 0x154
41# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
42# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
43# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
44# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
45# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
46# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
47# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
48# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
49
50/*
51This file contains registers and constants for the R300. They have been
52found mostly by examining command buffers captured using glxtest, as well
53as by extrapolating some known registers and constants from the R200.
54
55I am fairly certain that they are correct unless stated otherwise in comments.
56*/
57
58#define R300_SE_VPORT_XSCALE 0x1D98
59#define R300_SE_VPORT_XOFFSET 0x1D9C
60#define R300_SE_VPORT_YSCALE 0x1DA0
61#define R300_SE_VPORT_YOFFSET 0x1DA4
62#define R300_SE_VPORT_ZSCALE 0x1DA8
63#define R300_SE_VPORT_ZOFFSET 0x1DAC
64
65
66/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
67#define R300_VAP_VF_CNTL 0x2084
68
69# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
70# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
71# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
72# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
73# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
74# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
75# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
76# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
77# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
78# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
79# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
80# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
81
82# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
83 /* State based - direct writes to registers trigger vertex generation */
84# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
85# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
86# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
87# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
88
89 /* I don't think I saw these three used.. */
90# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
91# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
92# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
93
94 /* index size - when not set the indices are assumed to be 16 bit */
95# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
96 /* number of vertices */
97# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
98
99/* BEGIN: Wild guesses */
100#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
101# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
102# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
103# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
104# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
105# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
106# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
107
108#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
109# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
110# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
111# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
112# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
113# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
114# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
115# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
116# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
117/* END */
118
119#define R300_SE_VTE_CNTL 0x20b0
120# define R300_VPORT_X_SCALE_ENA 0x00000001
121# define R300_VPORT_X_OFFSET_ENA 0x00000002
122# define R300_VPORT_Y_SCALE_ENA 0x00000004
123# define R300_VPORT_Y_OFFSET_ENA 0x00000008
124# define R300_VPORT_Z_SCALE_ENA 0x00000010
125# define R300_VPORT_Z_OFFSET_ENA 0x00000020
126# define R300_VTX_XY_FMT 0x00000100
127# define R300_VTX_Z_FMT 0x00000200
128# define R300_VTX_W0_FMT 0x00000400
129# define R300_VTX_W0_NORMALIZE 0x00000800
130# define R300_VTX_ST_DENORMALIZED 0x00001000
131
132/* BEGIN: Vertex data assembly - lots of uncertainties */
133/* gap */
134/* Where do we get our vertex data?
135//
136// Vertex data either comes either from immediate mode registers or from
137// vertex arrays.
138// There appears to be no mixed mode (though we can force the pitch of
139// vertex arrays to 0, effectively reusing the same element over and over
140// again).
141//
142// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
143// if these registers influence vertex array processing.
144//
145// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
146//
147// In both cases, vertex attributes are then passed through INPUT_ROUTE.
148
149// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
150// into the vertex processor's input registers.
151// The first word routes the first input, the second word the second, etc.
152// The corresponding input is routed into the register with the given index.
153// The list is ended by a word with INPUT_ROUTE_END set.
154//
155// Always set COMPONENTS_4 in immediate mode. */
156
157#define R300_VAP_INPUT_ROUTE_0_0 0x2150
158# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
159# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
160# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
161# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
162# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
163# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
164# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
165# define R300_VAP_INPUT_ROUTE_END (1 << 13)
166# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
167# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
168# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
169# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
170#define R300_VAP_INPUT_ROUTE_0_1 0x2154
171#define R300_VAP_INPUT_ROUTE_0_2 0x2158
172#define R300_VAP_INPUT_ROUTE_0_3 0x215C
173#define R300_VAP_INPUT_ROUTE_0_4 0x2160
174#define R300_VAP_INPUT_ROUTE_0_5 0x2164
175#define R300_VAP_INPUT_ROUTE_0_6 0x2168
176#define R300_VAP_INPUT_ROUTE_0_7 0x216C
177
178/* gap */
179/* Notes:
180// - always set up to produce at least two attributes:
181// if vertex program uses only position, fglrx will set normal, too
182// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
183#define R300_VAP_INPUT_CNTL_0 0x2180
184# define R300_INPUT_CNTL_0_COLOR 0x00000001
185#define R300_VAP_INPUT_CNTL_1 0x2184
186# define R300_INPUT_CNTL_POS 0x00000001
187# define R300_INPUT_CNTL_NORMAL 0x00000002
188# define R300_INPUT_CNTL_COLOR 0x00000004
189# define R300_INPUT_CNTL_TC0 0x00000400
190# define R300_INPUT_CNTL_TC1 0x00000800
191# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
192# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
193# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
194# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
195# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
196# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
197
198/* gap */
199/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
200// are set to a swizzling bit pattern, other words are 0.
201//
202// In immediate mode, the pattern is always set to xyzw. In vertex array
203// mode, the swizzling pattern is e.g. used to set zw components in texture
204// coordinates with only tweo components. */
205#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
206# define R300_INPUT_ROUTE_SELECT_X 0
207# define R300_INPUT_ROUTE_SELECT_Y 1
208# define R300_INPUT_ROUTE_SELECT_Z 2
209# define R300_INPUT_ROUTE_SELECT_W 3
210# define R300_INPUT_ROUTE_SELECT_ZERO 4
211# define R300_INPUT_ROUTE_SELECT_ONE 5
212# define R300_INPUT_ROUTE_SELECT_MASK 7
213# define R300_INPUT_ROUTE_X_SHIFT 0
214# define R300_INPUT_ROUTE_Y_SHIFT 3
215# define R300_INPUT_ROUTE_Z_SHIFT 6
216# define R300_INPUT_ROUTE_W_SHIFT 9
217# define R300_INPUT_ROUTE_ENABLE (15 << 12)
218#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
219#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
220#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
221#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
222#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
223#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
224#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
225
226/* END */
227
228/* gap */
229/* BEGIN: Upload vertex program and data
230// The programmable vertex shader unit has a memory bank of unknown size
231// that can be written to in 16 byte units by writing the address into
232// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
233//
234// Pointers into the memory bank are always in multiples of 16 bytes.
235//
236// The memory bank is divided into areas with fixed meaning.
237//
238// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
239// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
240// whereas the difference between known addresses suggests size 512.
241//
242// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
243// Native reported limits and the VPI layout suggest size 256, whereas
244// difference between known addresses suggests size 512.
245//
246// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
247// floating point pointsize. The exact purpose of this state is uncertain,
248// as there is also the R300_RE_POINTSIZE register.
249//
250// Multiple vertex programs and parameter sets can be loaded at once,
251// which could explain the size discrepancy. */
252#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
253# define R300_PVS_UPLOAD_PROGRAM 0x00000000
254# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
255# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
256/* gap */
257#define R300_VAP_PVS_UPLOAD_DATA 0x2208
258/* END */
259
260/* gap */
261/* I do not know the purpose of this register. However, I do know that
262// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
263// for normal rendering. */
264#define R300_VAP_UNKNOWN_221C 0x221C
265# define R300_221C_NORMAL 0x00000000
266# define R300_221C_CLEAR 0x0001C000
267
268/* gap */
269/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
270// rendering commands and overwriting vertex program parameters.
271// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
272// avoids bugs caused by still running shaders reading bad data from memory. */
273#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
274
275/* Absolutely no clue what this register is about. */
276#define R300_VAP_UNKNOWN_2288 0x2288
277# define R300_2288_R300 0x00750000 /* -- nh */
278# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
279
280/* gap */
281/* Addresses are relative to the vertex program instruction area of the
282// memory bank. PROGRAM_END points to the last instruction of the active
283// program
284//
285// The meaning of the two UNKNOWN fields is obviously not known. However,
286// experiments so far have shown that both *must* point to an instruction
287// inside the vertex program, otherwise the GPU locks up.
288// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
289// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
290// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
291// For some reason this "section" is sometimes accepted other instruction that have
292// no relationship with position calculations.
293*/
294#define R300_VAP_PVS_CNTL_1 0x22D0
295# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
296# define R300_PVS_CNTL_1_POS_END_SHIFT 10
297# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
298/* Addresses are relative the the vertex program parameters area. */
299#define R300_VAP_PVS_CNTL_2 0x22D4
300# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
301# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
302#define R300_VAP_PVS_CNTL_3 0x22D8
303# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
304# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
305
306/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
307// immediate vertices */
308#define R300_VAP_VTX_COLOR_R 0x2464
309#define R300_VAP_VTX_COLOR_G 0x2468
310#define R300_VAP_VTX_COLOR_B 0x246C
311#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
312#define R300_VAP_VTX_POS_0_Y_1 0x2494
313#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
314#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
315#define R300_VAP_VTX_POS_0_Y_2 0x24A4
316#define R300_VAP_VTX_POS_0_Z_2 0x24A8
317#define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */
318
319/* gap */
320
321/* These are values from r300_reg/r300_reg.h - they are known to be correct
322 and are here so we can use one register file instead of several
323 - Vladimir */
324#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
325# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
326# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
327# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
328# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
329# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
330# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
331# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
332
333#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
334 /* each of the following is 3 bits wide, specifies number
335 of components */
336# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
337# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
338# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
339# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
340# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
341# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
342# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
343# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
344
345/* UNK30 seems to enables point to quad transformation on textures
346 (or something closely related to that).
347 This bit is rather fatal at the time being due to lackings at pixel shader side */
348#define R300_GB_ENABLE 0x4008
349# define R300_GB_POINT_STUFF_ENABLE (1<<0)
350# define R300_GB_LINE_STUFF_ENABLE (1<<1)
351# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
352# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
353# define R300_GB_UNK30 (1<<30)
354 /* each of the following is 2 bits wide */
355#define R300_GB_TEX_REPLICATE 0
356#define R300_GB_TEX_ST 1
357#define R300_GB_TEX_STR 2
358# define R300_GB_TEX0_SOURCE_SHIFT 16
359# define R300_GB_TEX1_SOURCE_SHIFT 18
360# define R300_GB_TEX2_SOURCE_SHIFT 20
361# define R300_GB_TEX3_SOURCE_SHIFT 22
362# define R300_GB_TEX4_SOURCE_SHIFT 24
363# define R300_GB_TEX5_SOURCE_SHIFT 26
364# define R300_GB_TEX6_SOURCE_SHIFT 28
365# define R300_GB_TEX7_SOURCE_SHIFT 30
366
367/* MSPOS - positions for multisample antialiasing (?) */
368#define R300_GB_MSPOS0 0x4010
369 /* shifts - each of the fields is 4 bits */
370# define R300_GB_MSPOS0__MS_X0_SHIFT 0
371# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
372# define R300_GB_MSPOS0__MS_X1_SHIFT 8
373# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
374# define R300_GB_MSPOS0__MS_X2_SHIFT 16
375# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
376# define R300_GB_MSPOS0__MSBD0_Y 24
377# define R300_GB_MSPOS0__MSBD0_X 28
378
379#define R300_GB_MSPOS1 0x4014
380# define R300_GB_MSPOS1__MS_X3_SHIFT 0
381# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
382# define R300_GB_MSPOS1__MS_X4_SHIFT 8
383# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
384# define R300_GB_MSPOS1__MS_X5_SHIFT 16
385# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
386# define R300_GB_MSPOS1__MSBD1 24
387
388
389#define R300_GB_TILE_CONFIG 0x4018
390# define R300_GB_TILE_ENABLE (1<<0)
391# define R300_GB_TILE_PIPE_COUNT_RV300 0
392# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
393# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
394# define R300_GB_TILE_SIZE_8 0
395# define R300_GB_TILE_SIZE_16 (1<<4)
396# define R300_GB_TILE_SIZE_32 (2<<4)
397# define R300_GB_SUPER_SIZE_1 (0<<6)
398# define R300_GB_SUPER_SIZE_2 (1<<6)
399# define R300_GB_SUPER_SIZE_4 (2<<6)
400# define R300_GB_SUPER_SIZE_8 (3<<6)
401# define R300_GB_SUPER_SIZE_16 (4<<6)
402# define R300_GB_SUPER_SIZE_32 (5<<6)
403# define R300_GB_SUPER_SIZE_64 (6<<6)
404# define R300_GB_SUPER_SIZE_128 (7<<6)
405# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
406# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
407# define R300_GB_SUPER_TILE_A 0
408# define R300_GB_SUPER_TILE_B (1<<15)
409# define R300_GB_SUBPIXEL_1_12 0
410# define R300_GB_SUBPIXEL_1_16 (1<<16)
411
412#define R300_GB_FIFO_SIZE 0x4024
413 /* each of the following is 2 bits wide */
414#define R300_GB_FIFO_SIZE_32 0
415#define R300_GB_FIFO_SIZE_64 1
416#define R300_GB_FIFO_SIZE_128 2
417#define R300_GB_FIFO_SIZE_256 3
418# define R300_SC_IFIFO_SIZE_SHIFT 0
419# define R300_SC_TZFIFO_SIZE_SHIFT 2
420# define R300_SC_BFIFO_SIZE_SHIFT 4
421
422# define R300_US_OFIFO_SIZE_SHIFT 12
423# define R300_US_WFIFO_SIZE_SHIFT 14
424 /* the following use the same constants as above, but meaning is
425 is times 2 (i.e. instead of 32 words it means 64 */
426# define R300_RS_TFIFO_SIZE_SHIFT 6
427# define R300_RS_CFIFO_SIZE_SHIFT 8
428# define R300_US_RAM_SIZE_SHIFT 10
429 /* watermarks, 3 bits wide */
430# define R300_RS_HIGHWATER_COL_SHIFT 16
431# define R300_RS_HIGHWATER_TEX_SHIFT 19
432# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
433# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
434
435#define R300_GB_SELECT 0x401C
436# define R300_GB_FOG_SELECT_C0A 0
437# define R300_GB_FOG_SELECT_C1A 1
438# define R300_GB_FOG_SELECT_C2A 2
439# define R300_GB_FOG_SELECT_C3A 3
440# define R300_GB_FOG_SELECT_1_1_W 4
441# define R300_GB_FOG_SELECT_Z 5
442# define R300_GB_DEPTH_SELECT_Z 0
443# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
444# define R300_GB_W_SELECT_1_W 0
445# define R300_GB_W_SELECT_1 (1<<4)
446
447#define R300_GB_AA_CONFIG 0x4020
448# define R300_AA_ENABLE 0x01
449# define R300_AA_SUBSAMPLES_2 0
450# define R300_AA_SUBSAMPLES_3 (1<<1)
451# define R300_AA_SUBSAMPLES_4 (2<<1)
452# define R300_AA_SUBSAMPLES_6 (3<<1)
453
454/* END */
455
456/* gap */
457/* The upper enable bits are guessed, based on fglrx reported limits. */
458#define R300_TX_ENABLE 0x4104
459# define R300_TX_ENABLE_0 (1 << 0)
460# define R300_TX_ENABLE_1 (1 << 1)
461# define R300_TX_ENABLE_2 (1 << 2)
462# define R300_TX_ENABLE_3 (1 << 3)
463# define R300_TX_ENABLE_4 (1 << 4)
464# define R300_TX_ENABLE_5 (1 << 5)
465# define R300_TX_ENABLE_6 (1 << 6)
466# define R300_TX_ENABLE_7 (1 << 7)
467# define R300_TX_ENABLE_8 (1 << 8)
468# define R300_TX_ENABLE_9 (1 << 9)
469# define R300_TX_ENABLE_10 (1 << 10)
470# define R300_TX_ENABLE_11 (1 << 11)
471# define R300_TX_ENABLE_12 (1 << 12)
472# define R300_TX_ENABLE_13 (1 << 13)
473# define R300_TX_ENABLE_14 (1 << 14)
474# define R300_TX_ENABLE_15 (1 << 15)
475
476/* The pointsize is given in multiples of 6. The pointsize can be
477// enormous: Clear() renders a single point that fills the entire
478// framebuffer. */
479#define R300_RE_POINTSIZE 0x421C
480# define R300_POINTSIZE_Y_SHIFT 0
481# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
482# define R300_POINTSIZE_X_SHIFT 16
483# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
484# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
485
486/* The line width is given in multiples of 6.
487 In default mode lines are classified as vertical lines.
488 HO: horizontal
489 VE: vertical or horizontal
490 HO & VE: no classification
491*/
492#define R300_RE_LINE_CNT 0x4234
493# define R300_LINESIZE_SHIFT 0
494# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
495# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
496# define R300_LINE_CNT_HO (1 << 16)
497# define R300_LINE_CNT_VE (1 << 17)
498
499/* Some sort of scale or clamp value for texcoordless textures. */
500#define R300_RE_UNK4238 0x4238
501
502#define R300_RE_SHADE_MODEL 0x4278
503# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
504# define R300_RE_SHADE_MODEL_FLAT 0x39595
505
506/* Dangerous */
507#define R300_RE_POLYGON_MODE 0x4288
508# define R300_PM_ENABLED (1 << 0)
509# define R300_PM_FRONT_POINT (0 << 0)
510# define R300_PM_BACK_POINT (0 << 0)
511# define R300_PM_FRONT_LINE (1 << 4)
512# define R300_PM_FRONT_FILL (1 << 5)
513# define R300_PM_BACK_LINE (1 << 7)
514# define R300_PM_BACK_FILL (1 << 8)
515
516/* Not sure why there are duplicate of factor and constant values.
517 My best guess so far is that there are seperate zbiases for test and write.
518 Ordering might be wrong.
519 Some of the tests indicate that fgl has a fallback implementation of zbias
520 via pixel shaders. */
521#define R300_RE_ZBIAS_T_FACTOR 0x42A4
522#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
523#define R300_RE_ZBIAS_W_FACTOR 0x42AC
524#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
525
526/* This register needs to be set to (1<<1) for RV350 to correctly
527 perform depth test (see --vb-triangles in r300_demo)
528 Don't know about other chips. - Vladimir
529 This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
530 My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
531 One to enable depth test and one for depth write.
532 Yet this doesnt explain why depth writes work ...
533 */
534#define R300_RE_OCCLUSION_CNTL 0x42B4
535# define R300_OCCLUSION_ON (1<<1)
536
537#define R300_RE_CULL_CNTL 0x42B8
538# define R300_CULL_FRONT (1 << 0)
539# define R300_CULL_BACK (1 << 1)
540# define R300_FRONT_FACE_CCW (0 << 2)
541# define R300_FRONT_FACE_CW (1 << 2)
542
543
544/* BEGIN: Rasterization / Interpolators - many guesses
545// 0_UNKNOWN_18 has always been set except for clear operations.
546// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
547// on the vertex program, *not* the fragment program) */
548#define R300_RS_CNTL_0 0x4300
549# define R300_RS_CNTL_TC_CNT_SHIFT 2
550# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
551# define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */
552# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
553/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
554#define R300_RS_CNTL_1 0x4304
555
556/* gap */
557/* Only used for texture coordinates.
558// Use the source field to route texture coordinate input from the vertex program
559// to the desired interpolator. Note that the source field is relative to the
560// outputs the vertex program *actually* writes. If a vertex program only writes
561// texcoord[1], this will be source index 0.
562// Set INTERP_USED on all interpolators that produce data used by the
563// fragment program. INTERP_USED looks like a swizzling mask, but
564// I haven't seen it used that way.
565//
566// Note: The _UNKNOWN constants are always set in their respective register.
567// I don't know if this is necessary. */
568#define R300_RS_INTERP_0 0x4310
569#define R300_RS_INTERP_1 0x4314
570# define R300_RS_INTERP_1_UNKNOWN 0x40
571#define R300_RS_INTERP_2 0x4318
572# define R300_RS_INTERP_2_UNKNOWN 0x80
573#define R300_RS_INTERP_3 0x431C
574# define R300_RS_INTERP_3_UNKNOWN 0xC0
575#define R300_RS_INTERP_4 0x4320
576#define R300_RS_INTERP_5 0x4324
577#define R300_RS_INTERP_6 0x4328
578#define R300_RS_INTERP_7 0x432C
579# define R300_RS_INTERP_SRC_SHIFT 2
580# define R300_RS_INTERP_SRC_MASK (7 << 2)
581# define R300_RS_INTERP_USED 0x00D10000
582
583/* These DWORDs control how vertex data is routed into fragment program
584// registers, after interpolators. */
585#define R300_RS_ROUTE_0 0x4330
586#define R300_RS_ROUTE_1 0x4334
587#define R300_RS_ROUTE_2 0x4338
588#define R300_RS_ROUTE_3 0x433C /* GUESS */
589#define R300_RS_ROUTE_4 0x4340 /* GUESS */
590#define R300_RS_ROUTE_5 0x4344 /* GUESS */
591#define R300_RS_ROUTE_6 0x4348 /* GUESS */
592#define R300_RS_ROUTE_7 0x434C /* GUESS */
593# define R300_RS_ROUTE_SOURCE_INTERP_0 0
594# define R300_RS_ROUTE_SOURCE_INTERP_1 1
595# define R300_RS_ROUTE_SOURCE_INTERP_2 2
596# define R300_RS_ROUTE_SOURCE_INTERP_3 3
597# define R300_RS_ROUTE_SOURCE_INTERP_4 4
598# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
599# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
600# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
601# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
602# define R300_RS_ROUTE_DEST_SHIFT 6
603# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
604
605/* Special handling for color: When the fragment program uses color,
606// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
607// color register index. */
608# define R300_RS_ROUTE_0_COLOR (1 << 14)
609# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
610# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
611/* As above, but for secondary color */
612# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
613# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
614# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
615# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
616/* END */
617
618/* BEGIN: Scissors and cliprects
619// There are four clipping rectangles. Their corner coordinates are inclusive.
620// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
621// on whether the pixel is inside cliprects 0-3, respectively. For example,
622// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
623// the number 3 (binary 0011).
624// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
625// the pixel is rasterized.
626//
627// In addition to this, there is a scissors rectangle. Only pixels inside the
628// scissors rectangle are drawn. (coordinates are inclusive)
629//
630// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
631// for the purpose of clipping and scissors. */
632#define R300_RE_CLIPRECT_TL_0 0x43B0
633#define R300_RE_CLIPRECT_BR_0 0x43B4
634#define R300_RE_CLIPRECT_TL_1 0x43B8
635#define R300_RE_CLIPRECT_BR_1 0x43BC
636#define R300_RE_CLIPRECT_TL_2 0x43C0
637#define R300_RE_CLIPRECT_BR_2 0x43C4
638#define R300_RE_CLIPRECT_TL_3 0x43C8
639#define R300_RE_CLIPRECT_BR_3 0x43CC
640# define R300_CLIPRECT_OFFSET 1440
641# define R300_CLIPRECT_MASK 0x1FFF
642# define R300_CLIPRECT_X_SHIFT 0
643# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
644# define R300_CLIPRECT_Y_SHIFT 13
645# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
646#define R300_RE_CLIPRECT_CNTL 0x43D0
647# define R300_CLIP_OUT (1 << 0)
648# define R300_CLIP_0 (1 << 1)
649# define R300_CLIP_1 (1 << 2)
650# define R300_CLIP_10 (1 << 3)
651# define R300_CLIP_2 (1 << 4)
652# define R300_CLIP_20 (1 << 5)
653# define R300_CLIP_21 (1 << 6)
654# define R300_CLIP_210 (1 << 7)
655# define R300_CLIP_3 (1 << 8)
656# define R300_CLIP_30 (1 << 9)
657# define R300_CLIP_31 (1 << 10)
658# define R300_CLIP_310 (1 << 11)
659# define R300_CLIP_32 (1 << 12)
660# define R300_CLIP_320 (1 << 13)
661# define R300_CLIP_321 (1 << 14)
662# define R300_CLIP_3210 (1 << 15)
663
664/* gap */
665#define R300_RE_SCISSORS_TL 0x43E0
666#define R300_RE_SCISSORS_BR 0x43E4
667# define R300_SCISSORS_OFFSET 1440
668# define R300_SCISSORS_X_SHIFT 0
669# define R300_SCISSORS_X_MASK (0x1FFF << 0)
670# define R300_SCISSORS_Y_SHIFT 13
671# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
672/* END */
673
674/* BEGIN: Texture specification
675// The texture specification dwords are grouped by meaning and not by texture unit.
676// This means that e.g. the offset for texture image unit N is found in register
677// TX_OFFSET_0 + (4*N) */
678#define R300_TX_FILTER_0 0x4400
679# define R300_TX_REPEAT 0
680# define R300_TX_MIRRORED 1
681# define R300_TX_CLAMP 4
682# define R300_TX_CLAMP_TO_EDGE 2
683# define R300_TX_CLAMP_TO_BORDER 6
684# define R300_TX_WRAP_S_SHIFT 0
685# define R300_TX_WRAP_S_MASK (7 << 0)
686# define R300_TX_WRAP_T_SHIFT 3
687# define R300_TX_WRAP_T_MASK (7 << 3)
688# define R300_TX_WRAP_Q_SHIFT 6
689# define R300_TX_WRAP_Q_MASK (7 << 6)
690# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
691# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
692# define R300_TX_MAG_FILTER_MASK (3 << 9)
693# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
694# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
695# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
696# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
697# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
698# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
699
700/* NOTE: NEAREST doesnt seem to exist.
701 Im not seting MAG_FILTER_MASK and (3 << 11) on for all
702 anisotropy modes because that would void selected mag filter */
703# define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
704# define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
705# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
706# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
707# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
708# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
709# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
710# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
711# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
712# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
713# define R300_TX_MAX_ANISO_MASK (14 << 21)
714
715#define R300_TX_UNK1_0 0x4440
716# define R300_LOD_BIAS_MASK 0x1fff
717
718#define R300_TX_SIZE_0 0x4480
719# define R300_TX_WIDTHMASK_SHIFT 0
720# define R300_TX_WIDTHMASK_MASK (2047 << 0)
721# define R300_TX_HEIGHTMASK_SHIFT 11
722# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
723# define R300_TX_UNK23 (1 << 23)
724# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
725# define R300_TX_SIZE_MASK (15 << 26)
726#define R300_TX_FORMAT_0 0x44C0
727 /* The interpretation of the format word by Wladimir van der Laan */
728 /* The X, Y, Z and W refer to the layout of the components.
729 They are given meanings as R, G, B and Alpha by the swizzle
730 specification */
731# define R300_TX_FORMAT_X8 0x0
732# define R300_TX_FORMAT_X16 0x1
733# define R300_TX_FORMAT_Y4X4 0x2
734# define R300_TX_FORMAT_Y8X8 0x3
735# define R300_TX_FORMAT_Y16X16 0x4
736# define R300_TX_FORMAT_Z3Y3X2 0x5
737# define R300_TX_FORMAT_Z5Y6X5 0x6
738# define R300_TX_FORMAT_Z6Y5X5 0x7
739# define R300_TX_FORMAT_Z11Y11X10 0x8
740# define R300_TX_FORMAT_Z10Y11X11 0x9
741# define R300_TX_FORMAT_W4Z4Y4X4 0xA
742# define R300_TX_FORMAT_W1Z5Y5X5 0xB
743# define R300_TX_FORMAT_W8Z8Y8X8 0xC
744# define R300_TX_FORMAT_W2Z10Y10X10 0xD
745# define R300_TX_FORMAT_W16Z16Y16X16 0xE
746# define R300_TX_FORMAT_DXT1 0xF
747# define R300_TX_FORMAT_DXT3 0x10
748# define R300_TX_FORMAT_DXT5 0x11
749# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
750# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
751# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
752# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
753 /* 0x16 - some 16 bit green format.. ?? */
754# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
755
756 /* gap */
757 /* Floating point formats */
758 /* Note - hardware supports both 16 and 32 bit floating point */
759# define R300_TX_FORMAT_FL_I16 0x18
760# define R300_TX_FORMAT_FL_I16A16 0x19
761# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
762# define R300_TX_FORMAT_FL_I32 0x1B
763# define R300_TX_FORMAT_FL_I32A32 0x1C
764# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
765 /* alpha modes, convenience mostly */
766 /* if you have alpha, pick constant appropriate to the
767 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
768# define R300_TX_FORMAT_ALPHA_1CH 0x000
769# define R300_TX_FORMAT_ALPHA_2CH 0x200
770# define R300_TX_FORMAT_ALPHA_4CH 0x600
771# define R300_TX_FORMAT_ALPHA_NONE 0xA00
772 /* Swizzling */
773 /* constants */
774# define R300_TX_FORMAT_X 0
775# define R300_TX_FORMAT_Y 1
776# define R300_TX_FORMAT_Z 2
777# define R300_TX_FORMAT_W 3
778# define R300_TX_FORMAT_ZERO 4
779# define R300_TX_FORMAT_ONE 5
780# define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */
781# define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */
782
783# define R300_TX_FORMAT_B_SHIFT 18
784# define R300_TX_FORMAT_G_SHIFT 15
785# define R300_TX_FORMAT_R_SHIFT 12
786# define R300_TX_FORMAT_A_SHIFT 9
787 /* Convenience macro to take care of layout and swizzling */
788# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
789 ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
790 | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
791 | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
792 | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
793 | (R300_TX_FORMAT_##FMT) \
794 )
795 /* These can be ORed with result of R300_EASY_TX_FORMAT() */
796 /* We don't really know what they do. Take values from a constant color ? */
797# define R300_TX_FORMAT_CONST_X (1<<5)
798# define R300_TX_FORMAT_CONST_Y (2<<5)
799# define R300_TX_FORMAT_CONST_Z (4<<5)
800# define R300_TX_FORMAT_CONST_W (8<<5)
801
802# define R300_TX_FORMAT_YUV_MODE 0x00800000
803
804#define R300_TX_OFFSET_0 0x4540
805/* BEGIN: Guess from R200 */
806# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
807# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
808# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
809# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
810# define R300_TXO_OFFSET_MASK 0xffffffe0
811# define R300_TXO_OFFSET_SHIFT 5
812/* END */
813#define R300_TX_UNK4_0 0x4580
814#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
815
816/* END */
817
818/* BEGIN: Fragment program instruction set
819// Fragment programs are written directly into register space.
820// There are separate instruction streams for texture instructions and ALU
821// instructions.
822// In order to synchronize these streams, the program is divided into up
823// to 4 nodes. Each node begins with a number of TEX operations, followed
824// by a number of ALU operations.
825// The first node can have zero TEX ops, all subsequent nodes must have at least
826// one TEX ops.
827// All nodes must have at least one ALU op.
828//
829// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
830// 1 node, a value of 3 means 4 nodes.
831// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
832// offsets into the respective instruction streams, while *_END points to the
833// last instruction relative to this offset. */
834#define R300_PFS_CNTL_0 0x4600
835# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
836# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
837# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
838#define R300_PFS_CNTL_1 0x4604
839/* There is an unshifted value here which has so far always been equal to the
840// index of the highest used temporary register. */
841#define R300_PFS_CNTL_2 0x4608
842# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
843# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
844# define R300_PFS_CNTL_ALU_END_SHIFT 6
845# define R300_PFS_CNTL_ALU_END_MASK (63 << 0)
846# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
847# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
848# define R300_PFS_CNTL_TEX_END_SHIFT 18
849# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
850
851/* gap */
852/* Nodes are stored backwards. The last active node is always stored in
853// PFS_NODE_3.
854// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
855// first node is stored in NODE_2, the second node is stored in NODE_3.
856//
857// Offsets are relative to the master offset from PFS_CNTL_2.
858// LAST_NODE is set for the last node, and only for the last node. */
859#define R300_PFS_NODE_0 0x4610
860#define R300_PFS_NODE_1 0x4614
861#define R300_PFS_NODE_2 0x4618
862#define R300_PFS_NODE_3 0x461C
863# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
864# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
865# define R300_PFS_NODE_ALU_END_SHIFT 6
866# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
867# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
868# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
869# define R300_PFS_NODE_TEX_END_SHIFT 17
870# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
871# define R300_PFS_NODE_LAST_NODE (1 << 22)
872
873/* TEX
874// As far as I can tell, texture instructions cannot write into output
875// registers directly. A subsequent ALU instruction is always necessary,
876// even if it's just MAD o0, r0, 1, 0 */
877#define R300_PFS_TEXI_0 0x4620
878# define R300_FPITX_SRC_SHIFT 0
879# define R300_FPITX_SRC_MASK (31 << 0)
880# define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
881# define R300_FPITX_DST_SHIFT 6
882# define R300_FPITX_DST_MASK (31 << 6)
883# define R300_FPITX_IMAGE_SHIFT 11
884# define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */
885/* Unsure if these are opcodes, or some kind of bitfield, but this is how
886 * they were set when I checked
887 */
888# define R300_FPITX_OPCODE_SHIFT 15
889# define R300_FPITX_OP_TEX 1
890# define R300_FPITX_OP_TXP 3
891# define R300_FPITX_OP_TXB 4
892
893/* ALU
894// The ALU instructions register blocks are enumerated according to the order
895// in which fglrx. I assume there is space for 64 instructions, since
896// each block has space for a maximum of 64 DWORDs, and this matches reported
897// native limits.
898//
899// The basic functional block seems to be one MAD for each color and alpha,
900// and an adder that adds all components after the MUL.
901// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
902// - DP4: Use OUTC_DP4, OUTA_DP4
903// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
904// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
905// - CMP: If ARG2 < 0, return ARG1, else return ARG0
906// - FLR: use FRC+MAD
907// - XPD: use MAD+MAD
908// - SGE, SLT: use MAD+CMP
909// - RSQ: use ABS modifier for argument
910// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
911// into color register
912// - apparently, there's no quick DST operation
913// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
914// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
915// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
916//
917// Operand selection
918// First stage selects three sources from the available registers and
919// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
920// fglrx sorts the three source fields: Registers before constants,
921// lower indices before higher indices; I do not know whether this is necessary.
922// fglrx fills unused sources with "read constant 0"
923// According to specs, you cannot select more than two different constants.
924//
925// Second stage selects the operands from the sources. This is defined in
926// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
927// zero and one.
928// Swizzling and negation happens in this stage, as well.
929//
930// Important: Color and alpha seem to be mostly separate, i.e. their sources
931// selection appears to be fully independent (the register storage is probably
932// physically split into a color and an alpha section).
933// However (because of the apparent physical split), there is some interaction
934// WRT swizzling. If, for example, you want to load an R component into an
935// Alpha operand, this R component is taken from a *color* source, not from
936// an alpha source. The corresponding register doesn't even have to appear in
937// the alpha sources list. (I hope this alll makes sense to you)
938//
939// Destination selection
940// The destination register index is in FPI1 (color) and FPI3 (alpha) together
941// with enable bits.
942// There are separate enable bits for writing into temporary registers
943// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
944// You can write to both at once, or not write at all (the same index
945// must be used for both).
946//
947// Note: There is a special form for LRP
948// - Argument order is the same as in ARB_fragment_program.
949// - Operation is MAD
950// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
951// - Set FPI0/FPI2_SPECIAL_LRP
952// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
953#define R300_PFS_INSTR1_0 0x46C0
954# define R300_FPI1_SRC0C_SHIFT 0
955# define R300_FPI1_SRC0C_MASK (31 << 0)
956# define R300_FPI1_SRC0C_CONST (1 << 5)
957# define R300_FPI1_SRC1C_SHIFT 6
958# define R300_FPI1_SRC1C_MASK (31 << 6)
959# define R300_FPI1_SRC1C_CONST (1 << 11)
960# define R300_FPI1_SRC2C_SHIFT 12
961# define R300_FPI1_SRC2C_MASK (31 << 12)
962# define R300_FPI1_SRC2C_CONST (1 << 17)
963# define R300_FPI1_DSTC_SHIFT 18
964# define R300_FPI1_DSTC_MASK (31 << 18)
965# define R300_FPI1_DSTC_REG_X (1 << 23)
966# define R300_FPI1_DSTC_REG_Y (1 << 24)
967# define R300_FPI1_DSTC_REG_Z (1 << 25)
968# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
969# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
970# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
971
972#define R300_PFS_INSTR3_0 0x47C0
973# define R300_FPI3_SRC0A_SHIFT 0
974# define R300_FPI3_SRC0A_MASK (31 << 0)
975# define R300_FPI3_SRC0A_CONST (1 << 5)
976# define R300_FPI3_SRC1A_SHIFT 6
977# define R300_FPI3_SRC1A_MASK (31 << 6)
978# define R300_FPI3_SRC1A_CONST (1 << 11)
979# define R300_FPI3_SRC2A_SHIFT 12
980# define R300_FPI3_SRC2A_MASK (31 << 12)
981# define R300_FPI3_SRC2A_CONST (1 << 17)
982# define R300_FPI3_DSTA_SHIFT 18
983# define R300_FPI3_DSTA_MASK (31 << 18)
984# define R300_FPI3_DSTA_REG (1 << 23)
985# define R300_FPI3_DSTA_OUTPUT (1 << 24)
986
987#define R300_PFS_INSTR0_0 0x48C0
988# define R300_FPI0_ARGC_SRC0C_XYZ 0
989# define R300_FPI0_ARGC_SRC0C_XXX 1
990# define R300_FPI0_ARGC_SRC0C_YYY 2
991# define R300_FPI0_ARGC_SRC0C_ZZZ 3
992# define R300_FPI0_ARGC_SRC1C_XYZ 4
993# define R300_FPI0_ARGC_SRC1C_XXX 5
994# define R300_FPI0_ARGC_SRC1C_YYY 6
995# define R300_FPI0_ARGC_SRC1C_ZZZ 7
996# define R300_FPI0_ARGC_SRC2C_XYZ 8
997# define R300_FPI0_ARGC_SRC2C_XXX 9
998# define R300_FPI0_ARGC_SRC2C_YYY 10
999# define R300_FPI0_ARGC_SRC2C_ZZZ 11
1000# define R300_FPI0_ARGC_SRC0A 12
1001# define R300_FPI0_ARGC_SRC1A 13
1002# define R300_FPI0_ARGC_SRC2A 14
1003# define R300_FPI0_ARGC_SRC1C_LRP 15
1004# define R300_FPI0_ARGC_ZERO 20
1005# define R300_FPI0_ARGC_ONE 21
1006# define R300_FPI0_ARGC_HALF 22 /* GUESS */
1007# define R300_FPI0_ARGC_SRC0C_YZX 23
1008# define R300_FPI0_ARGC_SRC1C_YZX 24
1009# define R300_FPI0_ARGC_SRC2C_YZX 25
1010# define R300_FPI0_ARGC_SRC0C_ZXY 26
1011# define R300_FPI0_ARGC_SRC1C_ZXY 27
1012# define R300_FPI0_ARGC_SRC2C_ZXY 28
1013# define R300_FPI0_ARGC_SRC0CA_WZY 29
1014# define R300_FPI0_ARGC_SRC1CA_WZY 30
1015# define R300_FPI0_ARGC_SRC2CA_WZY 31
1016
1017# define R300_FPI0_ARG0C_SHIFT 0
1018# define R300_FPI0_ARG0C_MASK (31 << 0)
1019# define R300_FPI0_ARG0C_NEG (1 << 5)
1020# define R300_FPI0_ARG0C_ABS (1 << 6)
1021# define R300_FPI0_ARG1C_SHIFT 7
1022# define R300_FPI0_ARG1C_MASK (31 << 7)
1023# define R300_FPI0_ARG1C_NEG (1 << 12)
1024# define R300_FPI0_ARG1C_ABS (1 << 13)
1025# define R300_FPI0_ARG2C_SHIFT 14
1026# define R300_FPI0_ARG2C_MASK (31 << 14)
1027# define R300_FPI0_ARG2C_NEG (1 << 19)
1028# define R300_FPI0_ARG2C_ABS (1 << 20)
1029# define R300_FPI0_SPECIAL_LRP (1 << 21)
1030# define R300_FPI0_OUTC_MAD (0 << 23)
1031# define R300_FPI0_OUTC_DP3 (1 << 23)
1032# define R300_FPI0_OUTC_DP4 (2 << 23)
1033# define R300_FPI0_OUTC_MIN (4 << 23)
1034# define R300_FPI0_OUTC_MAX (5 << 23)
1035# define R300_FPI0_OUTC_CMP (8 << 23)
1036# define R300_FPI0_OUTC_FRC (9 << 23)
1037# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
1038# define R300_FPI0_OUTC_SAT (1 << 30)
1039# define R300_FPI0_UNKNOWN_31 (1 << 31)
1040
1041#define R300_PFS_INSTR2_0 0x49C0
1042# define R300_FPI2_ARGA_SRC0C_X 0
1043# define R300_FPI2_ARGA_SRC0C_Y 1
1044# define R300_FPI2_ARGA_SRC0C_Z 2
1045# define R300_FPI2_ARGA_SRC1C_X 3
1046# define R300_FPI2_ARGA_SRC1C_Y 4
1047# define R300_FPI2_ARGA_SRC1C_Z 5
1048# define R300_FPI2_ARGA_SRC2C_X 6
1049# define R300_FPI2_ARGA_SRC2C_Y 7
1050# define R300_FPI2_ARGA_SRC2C_Z 8
1051# define R300_FPI2_ARGA_SRC0A 9
1052# define R300_FPI2_ARGA_SRC1A 10
1053# define R300_FPI2_ARGA_SRC2A 11
1054# define R300_FPI2_ARGA_SRC1A_LRP 15
1055# define R300_FPI2_ARGA_ZERO 16
1056# define R300_FPI2_ARGA_ONE 17
1057# define R300_FPI2_ARGA_HALF 18 /* GUESS */
1058
1059# define R300_FPI2_ARG0A_SHIFT 0
1060# define R300_FPI2_ARG0A_MASK (31 << 0)
1061# define R300_FPI2_ARG0A_NEG (1 << 5)
1062# define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
1063# define R300_FPI2_ARG1A_SHIFT 7
1064# define R300_FPI2_ARG1A_MASK (31 << 7)
1065# define R300_FPI2_ARG1A_NEG (1 << 12)
1066# define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
1067# define R300_FPI2_ARG2A_SHIFT 14
1068# define R300_FPI2_ARG2A_MASK (31 << 14)
1069# define R300_FPI2_ARG2A_NEG (1 << 19)
1070# define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
1071# define R300_FPI2_SPECIAL_LRP (1 << 21)
1072# define R300_FPI2_OUTA_MAD (0 << 23)
1073# define R300_FPI2_OUTA_DP4 (1 << 23)
1074# define R300_FPI2_OUTA_MIN (2 << 23)
1075# define R300_FPI2_OUTA_MAX (3 << 23)
1076# define R300_FPI2_OUTA_CMP (6 << 23)
1077# define R300_FPI2_OUTA_FRC (7 << 23)
1078# define R300_FPI2_OUTA_EX2 (8 << 23)
1079# define R300_FPI2_OUTA_LG2 (9 << 23)
1080# define R300_FPI2_OUTA_RCP (10 << 23)
1081# define R300_FPI2_OUTA_RSQ (11 << 23)
1082# define R300_FPI2_OUTA_SAT (1 << 30)
1083# define R300_FPI2_UNKNOWN_31 (1 << 31)
1084/* END */
1085
1086/* gap */
1087#define R300_PP_ALPHA_TEST 0x4BD4
1088# define R300_REF_ALPHA_MASK 0x000000ff
1089# define R300_ALPHA_TEST_FAIL (0 << 8)
1090# define R300_ALPHA_TEST_LESS (1 << 8)
1091# define R300_ALPHA_TEST_LEQUAL (3 << 8)
1092# define R300_ALPHA_TEST_EQUAL (2 << 8)
1093# define R300_ALPHA_TEST_GEQUAL (6 << 8)
1094# define R300_ALPHA_TEST_GREATER (4 << 8)
1095# define R300_ALPHA_TEST_NEQUAL (5 << 8)
1096# define R300_ALPHA_TEST_PASS (7 << 8)
1097# define R300_ALPHA_TEST_OP_MASK (7 << 8)
1098# define R300_ALPHA_TEST_ENABLE (1 << 11)
1099
1100/* gap */
1101/* Fragment program parameters in 7.16 floating point */
1102#define R300_PFS_PARAM_0_X 0x4C00
1103#define R300_PFS_PARAM_0_Y 0x4C04
1104#define R300_PFS_PARAM_0_Z 0x4C08
1105#define R300_PFS_PARAM_0_W 0x4C0C
1106/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
1107#define R300_PFS_PARAM_31_X 0x4DF0
1108#define R300_PFS_PARAM_31_Y 0x4DF4
1109#define R300_PFS_PARAM_31_Z 0x4DF8
1110#define R300_PFS_PARAM_31_W 0x4DFC
1111
1112/* Notes:
1113// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
1114// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
1115// function (both registers are always set up completely in any case)
1116// - Most blend flags are simply copied from R200 and not tested yet */
1117#define R300_RB3D_CBLEND 0x4E04
1118#define R300_RB3D_ABLEND 0x4E08
1119 /* the following only appear in CBLEND */
1120# define R300_BLEND_ENABLE (1 << 0)
1121# define R300_BLEND_UNKNOWN (3 << 1)
1122# define R300_BLEND_NO_SEPARATE (1 << 3)
1123 /* the following are shared between CBLEND and ABLEND */
1124# define R300_FCN_MASK (3 << 12)
1125# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
1126# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
1127# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
1128# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
1129# define R300_SRC_BLEND_GL_ZERO (32 << 16)
1130# define R300_SRC_BLEND_GL_ONE (33 << 16)
1131# define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
1132# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
1133# define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
1134# define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
1135# define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
1136# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
1137# define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
1138# define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
1139# define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
1140# define R300_SRC_BLEND_MASK (63 << 16)
1141# define R300_DST_BLEND_GL_ZERO (32 << 24)
1142# define R300_DST_BLEND_GL_ONE (33 << 24)
1143# define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
1144# define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
1145# define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
1146# define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
1147# define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
1148# define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
1149# define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
1150# define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
1151# define R300_DST_BLEND_MASK (63 << 24)
1152#define R300_RB3D_COLORMASK 0x4E0C
1153# define R300_COLORMASK0_B (1<<0)
1154# define R300_COLORMASK0_G (1<<1)
1155# define R300_COLORMASK0_R (1<<2)
1156# define R300_COLORMASK0_A (1<<3)
1157
1158/* gap */
1159#define R300_RB3D_COLOROFFSET0 0x4E28
1160# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
1161#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
1162#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
1163#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
1164/* gap */
1165/* Bit 16: Larger tiles
1166// Bit 17: 4x2 tiles
1167// Bit 18: Extremely weird tile like, but some pixels duplicated? */
1168#define R300_RB3D_COLORPITCH0 0x4E38
1169# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1170# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1171# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1172# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1173# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1174# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1175# define R300_COLOR_FORMAT_RGB565 (2 << 22)
1176# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
1177#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
1178#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1179#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1180
1181/* gap */
1182/* Guess by Vladimir.
1183// Set to 0A before 3D operations, set to 02 afterwards. */
1184#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
1185# define R300_RB3D_DSTCACHE_02 0x00000002
1186# define R300_RB3D_DSTCACHE_0A 0x0000000A
1187
1188/* gap */
1189/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
1190/* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */
1191#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
1192# define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
1193# define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
1194# define R300_RB3D_Z_TEST 0x00000012
1195# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1196# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1197
1198# define R300_RB3D_Z_TEST 0x00000012
1199# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
1200# define R300_RB3D_Z_WRITE_ONLY 0x00000006
1201# define R300_RB3D_STENCIL_ENABLE 0x00000001
1202
1203#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
1204 /* functions */
1205# define R300_ZS_NEVER 0
1206# define R300_ZS_LESS 1
1207# define R300_ZS_LEQUAL 2
1208# define R300_ZS_EQUAL 3
1209# define R300_ZS_GEQUAL 4
1210# define R300_ZS_GREATER 5
1211# define R300_ZS_NOTEQUAL 6
1212# define R300_ZS_ALWAYS 7
1213# define R300_ZS_MASK 7
1214 /* operations */
1215# define R300_ZS_KEEP 0
1216# define R300_ZS_ZERO 1
1217# define R300_ZS_REPLACE 2
1218# define R300_ZS_INCR 3
1219# define R300_ZS_DECR 4
1220# define R300_ZS_INVERT 5
1221# define R300_ZS_INCR_WRAP 6
1222# define R300_ZS_DECR_WRAP 7
1223
1224 /* front and back refer to operations done for front
1225 and back faces, i.e. separate stencil function support */
1226# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
1227# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
1228# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
1229# define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9
1230# define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12
1231# define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15
1232# define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18
1233# define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21
1234# define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24
1235
1236
1237
1238#define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08
1239# define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0
1240# define R300_RB3D_ZS2_STENCIL_MASK 0xFF
1241# define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8
1242# define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16
1243
1244/* gap */
1245
1246#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
1247# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
1248# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
1249
1250/* gap */
1251#define R300_RB3D_DEPTHOFFSET 0x4F20
1252#define R300_RB3D_DEPTHPITCH 0x4F24
1253# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
1254# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
1255# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
1256# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1257# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1258# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1259
1260/* BEGIN: Vertex program instruction set
1261// Every instruction is four dwords long:
1262// DWORD 0: output and opcode
1263// DWORD 1: first argument
1264// DWORD 2: second argument
1265// DWORD 3: third argument
1266//
1267// Notes:
1268// - ABS r, a is implemented as MAX r, a, -a
1269// - MOV is implemented as ADD to zero
1270// - XPD is implemented as MUL + MAD
1271// - FLR is implemented as FRC + ADD
1272// - apparently, fglrx tries to schedule instructions so that there is at least
1273// one instruction between the write to a temporary and the first read
1274// from said temporary; however, violations of this scheduling are allowed
1275// - register indices seem to be unrelated with OpenGL aliasing to conventional state
1276// - only one attribute and one parameter can be loaded at a time; however, the
1277// same attribute/parameter can be used for more than one argument
1278// - the second software argument for POW is the third hardware argument (no idea why)
1279// - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
1280//
1281// There is some magic surrounding LIT:
1282// The single argument is replicated across all three inputs, but swizzled:
1283// First argument: xyzy
1284// Second argument: xyzx
1285// Third argument: xyzw
1286// Whenever the result is used later in the fragment program, fglrx forces x and w
1287// to be 1.0 in the input selection; I don't know whether this is strictly necessary */
1288#define R300_VPI_OUT_OP_DOT (1 << 0)
1289#define R300_VPI_OUT_OP_MUL (2 << 0)
1290#define R300_VPI_OUT_OP_ADD (3 << 0)
1291#define R300_VPI_OUT_OP_MAD (4 << 0)
1292#define R300_VPI_OUT_OP_DST (5 << 0)
1293#define R300_VPI_OUT_OP_FRC (6 << 0)
1294#define R300_VPI_OUT_OP_MAX (7 << 0)
1295#define R300_VPI_OUT_OP_MIN (8 << 0)
1296#define R300_VPI_OUT_OP_SGE (9 << 0)
1297#define R300_VPI_OUT_OP_SLT (10 << 0)
1298#define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
1299#define R300_VPI_OUT_OP_EXP (65 << 0)
1300#define R300_VPI_OUT_OP_LOG (66 << 0)
1301#define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */
1302#define R300_VPI_OUT_OP_LIT (68 << 0)
1303#define R300_VPI_OUT_OP_POW (69 << 0)
1304#define R300_VPI_OUT_OP_RCP (70 << 0)
1305#define R300_VPI_OUT_OP_RSQ (72 << 0)
1306#define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
1307#define R300_VPI_OUT_OP_EX2 (75 << 0)
1308#define R300_VPI_OUT_OP_LG2 (76 << 0)
1309#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
1310#define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */
1311
1312#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
1313#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
1314#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
1315
1316#define R300_VPI_OUT_REG_INDEX_SHIFT 13
1317#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */
1318
1319#define R300_VPI_OUT_WRITE_X (1 << 20)
1320#define R300_VPI_OUT_WRITE_Y (1 << 21)
1321#define R300_VPI_OUT_WRITE_Z (1 << 22)
1322#define R300_VPI_OUT_WRITE_W (1 << 23)
1323
1324#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
1325#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
1326#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
1327#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
1328#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
1329
1330#define R300_VPI_IN_REG_INDEX_SHIFT 5
1331#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */
1332
1333/* The R300 can select components from the input register arbitrarily.
1334// Use the following constants, shifted by the component shift you
1335// want to select */
1336#define R300_VPI_IN_SELECT_X 0
1337#define R300_VPI_IN_SELECT_Y 1
1338#define R300_VPI_IN_SELECT_Z 2
1339#define R300_VPI_IN_SELECT_W 3
1340#define R300_VPI_IN_SELECT_ZERO 4
1341#define R300_VPI_IN_SELECT_ONE 5
1342#define R300_VPI_IN_SELECT_MASK 7
1343
1344#define R300_VPI_IN_X_SHIFT 13
1345#define R300_VPI_IN_Y_SHIFT 16
1346#define R300_VPI_IN_Z_SHIFT 19
1347#define R300_VPI_IN_W_SHIFT 22
1348
1349#define R300_VPI_IN_NEG_X (1 << 25)
1350#define R300_VPI_IN_NEG_Y (1 << 26)
1351#define R300_VPI_IN_NEG_Z (1 << 27)
1352#define R300_VPI_IN_NEG_W (1 << 28)
1353/* END */
1354
1355//BEGIN: Packet 3 commands
1356
1357// A primitive emission dword.
1358#define R300_PRIM_TYPE_NONE (0 << 0)
1359#define R300_PRIM_TYPE_POINT (1 << 0)
1360#define R300_PRIM_TYPE_LINE (2 << 0)
1361#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
1362#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
1363#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
1364#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
1365#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
1366#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
1367#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
1368#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
1369#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200)
1370#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
1371#define R300_PRIM_TYPE_QUADS (13 << 0)
1372#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
1373#define R300_PRIM_TYPE_POLYGON (15 << 0)
1374#define R300_PRIM_TYPE_MASK 0xF
1375#define R300_PRIM_WALK_IND (1 << 4)
1376#define R300_PRIM_WALK_LIST (2 << 4)
1377#define R300_PRIM_WALK_RING (3 << 4)
1378#define R300_PRIM_WALK_MASK (3 << 4)
1379#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200)
1380#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
1381#define R300_PRIM_NUM_VERTICES_SHIFT 16
1382
1383// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
1384// Two parameter dwords:
1385// 0. The first parameter appears to be always 0
1386// 1. The second parameter is a standard primitive emission dword.
1387#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
1388
1389// Specify the full set of vertex arrays as (address, stride).
1390// The first parameter is the number of vertex arrays specified.
1391// The rest of the command is a variable length list of blocks, where
1392// each block is three dwords long and specifies two arrays.
1393// The first dword of a block is split into two words, the lower significant
1394// word refers to the first array, the more significant word to the second
1395// array in the block.
1396// The low byte of each word contains the size of an array entry in dwords,
1397// the high byte contains the stride of the array.
1398// The second dword of a block contains the pointer to the first array,
1399// the third dword of a block contains the pointer to the second array.
1400// Note that if the total number of arrays is odd, the third dword of
1401// the last block is omitted.
1402#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
1403
1404#define R300_PACKET3_INDX_BUFFER 0x00003300
1405# define R300_EB_UNK1_SHIFT 24
1406# define R300_EB_UNK1 (0x80<<24)
1407# define R300_EB_UNK2 0x0810
1408#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
1409
1410//END
1411
1412#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 20bcf872b348..6d9080a3ca7e 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35#include "r300_reg.h"
35 36
36#define RADEON_FIFO_DEBUG 0 37#define RADEON_FIFO_DEBUG 0
37 38
@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
1151 1152
1152#if __OS_HAS_AGP 1153#if __OS_HAS_AGP
1153 if ( !dev_priv->is_pci ) { 1154 if ( !dev_priv->is_pci ) {
1155 /* set RADEON_AGP_BASE here instead of relying on X from user space */
1156 RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
1154 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, 1157 RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
1155 dev_priv->ring_rptr->offset 1158 dev_priv->ring_rptr->offset
1156 - dev->agp->base 1159 - dev->agp->base
@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
1407 radeon_do_cleanup_cp(dev); 1410 radeon_do_cleanup_cp(dev);
1408 return DRM_ERR(EINVAL); 1411 return DRM_ERR(EINVAL);
1409 } 1412 }
1413 dev->agp_buffer_token = init->buffers_offset;
1410 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1414 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1411 if(!dev->agp_buffer_map) { 1415 if(!dev->agp_buffer_map) {
1412 DRM_ERROR("could not find dma buffer region!\n"); 1416 DRM_ERROR("could not find dma buffer region!\n");
@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
1625 1629
1626 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); 1630 DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
1627 1631
1632 if(init.func == RADEON_INIT_R300_CP)
1633 r300_init_reg_flags();
1634
1628 switch ( init.func ) { 1635 switch ( init.func ) {
1629 case RADEON_INIT_CP: 1636 case RADEON_INIT_CP:
1630 case RADEON_INIT_R200_CP: 1637 case RADEON_INIT_R200_CP:
@@ -2039,15 +2046,43 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
2039 case CHIP_RV200: 2046 case CHIP_RV200:
2040 case CHIP_R200: 2047 case CHIP_R200:
2041 case CHIP_R300: 2048 case CHIP_R300:
2049 case CHIP_R420:
2042 dev_priv->flags |= CHIP_HAS_HIERZ; 2050 dev_priv->flags |= CHIP_HAS_HIERZ;
2043 break; 2051 break;
2044 default: 2052 default:
2045 /* all other chips have no hierarchical z buffer */ 2053 /* all other chips have no hierarchical z buffer */
2046 break; 2054 break;
2047 } 2055 }
2056
2057 if (drm_device_is_agp(dev))
2058 dev_priv->flags |= CHIP_IS_AGP;
2059
2060 DRM_DEBUG("%s card detected\n",
2061 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
2048 return ret; 2062 return ret;
2049} 2063}
2050 2064
2065int radeon_presetup(struct drm_device *dev)
2066{
2067 int ret;
2068 drm_local_map_t *map;
2069 drm_radeon_private_t *dev_priv = dev->dev_private;
2070
2071 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
2072 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
2073 _DRM_READ_ONLY, &dev_priv->mmio);
2074 if (ret != 0)
2075 return ret;
2076
2077 ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
2078 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
2079 _DRM_WRITE_COMBINING, &map);
2080 if (ret != 0)
2081 return ret;
2082
2083 return 0;
2084}
2085
2051int radeon_driver_postcleanup(struct drm_device *dev) 2086int radeon_driver_postcleanup(struct drm_device *dev)
2052{ 2087{
2053 drm_radeon_private_t *dev_priv = dev->dev_private; 2088 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index c1e62d047989..3792798270a4 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -195,6 +195,52 @@ typedef union {
195#define RADEON_WAIT_2D 0x1 195#define RADEON_WAIT_2D 0x1
196#define RADEON_WAIT_3D 0x2 196#define RADEON_WAIT_3D 0x2
197 197
198/* Allowed parameters for R300_CMD_PACKET3
199 */
200#define R300_CMD_PACKET3_CLEAR 0
201#define R300_CMD_PACKET3_RAW 1
202
203/* Commands understood by cmd_buffer ioctl for R300.
204 * The interface has not been stabilized, so some of these may be removed
205 * and eventually reordered before stabilization.
206 */
207#define R300_CMD_PACKET0 1
208#define R300_CMD_VPU 2 /* emit vertex program upload */
209#define R300_CMD_PACKET3 3 /* emit a packet3 */
210#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
211#define R300_CMD_CP_DELAY 5
212#define R300_CMD_DMA_DISCARD 6
213#define R300_CMD_WAIT 7
214# define R300_WAIT_2D 0x1
215# define R300_WAIT_3D 0x2
216# define R300_WAIT_2D_CLEAN 0x3
217# define R300_WAIT_3D_CLEAN 0x4
218
219typedef union {
220 unsigned int u;
221 struct {
222 unsigned char cmd_type, pad0, pad1, pad2;
223 } header;
224 struct {
225 unsigned char cmd_type, count, reglo, reghi;
226 } packet0;
227 struct {
228 unsigned char cmd_type, count, adrlo, adrhi;
229 } vpu;
230 struct {
231 unsigned char cmd_type, packet, pad0, pad1;
232 } packet3;
233 struct {
234 unsigned char cmd_type, packet;
235 unsigned short count; /* amount of packet2 to emit */
236 } delay;
237 struct {
238 unsigned char cmd_type, buf_idx, pad0, pad1;
239 } dma;
240 struct {
241 unsigned char cmd_type, flags, pad0, pad1;
242 } wait;
243} drm_r300_cmd_header_t;
198 244
199#define RADEON_FRONT 0x1 245#define RADEON_FRONT 0x1
200#define RADEON_BACK 0x2 246#define RADEON_BACK 0x2
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 18e4e5b0952f..e0682f64b400 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -76,6 +76,7 @@ static struct drm_driver driver = {
76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 76 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 77 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
78 .preinit = radeon_driver_preinit, 78 .preinit = radeon_driver_preinit,
79 .presetup = radeon_presetup,
79 .postcleanup = radeon_driver_postcleanup, 80 .postcleanup = radeon_driver_postcleanup,
80 .prerelease = radeon_driver_prerelease, 81 .prerelease = radeon_driver_prerelease,
81 .pretakedown = radeon_driver_pretakedown, 82 .pretakedown = radeon_driver_pretakedown,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 771aa80a5e8c..f12a963ede18 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -82,9 +82,10 @@
82 * - Add support for r100 cube maps 82 * - Add support for r100 cube maps
83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear 83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
84 * texture filtering on r200 84 * texture filtering on r200
85 * 1.17- Add initial support for R300 (3D).
85 */ 86 */
86#define DRIVER_MAJOR 1 87#define DRIVER_MAJOR 1
87#define DRIVER_MINOR 16 88#define DRIVER_MINOR 17
88#define DRIVER_PATCHLEVEL 0 89#define DRIVER_PATCHLEVEL 0
89 90
90#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) 91#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
@@ -106,7 +107,9 @@ enum radeon_family {
106 CHIP_RV280, 107 CHIP_RV280,
107 CHIP_R300, 108 CHIP_R300,
108 CHIP_RS300, 109 CHIP_RS300,
110 CHIP_R350,
109 CHIP_RV350, 111 CHIP_RV350,
112 CHIP_R420,
110 CHIP_LAST, 113 CHIP_LAST,
111}; 114};
112 115
@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
290extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); 293extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
291 294
292extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); 295extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
296extern int radeon_presetup(struct drm_device *dev);
293extern int radeon_driver_postcleanup(struct drm_device *dev); 297extern int radeon_driver_postcleanup(struct drm_device *dev);
294 298
295extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); 299extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev );
320extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 324extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
321 unsigned long arg); 325 unsigned long arg);
322 326
327
328/* r300_cmdbuf.c */
329extern void r300_init_reg_flags(void);
330
331extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
332 drm_file_t* filp_priv,
333 drm_radeon_cmd_buffer_t* cmdbuf);
334
323/* Flags for stats.boxes 335/* Flags for stats.boxes
324 */ 336 */
325#define RADEON_BOX_DMA_IDLE 0x1 337#define RADEON_BOX_DMA_IDLE 0x1
@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
357#define RADEON_CRTC2_OFFSET 0x0324 369#define RADEON_CRTC2_OFFSET 0x0324
358#define RADEON_CRTC2_OFFSET_CNTL 0x0328 370#define RADEON_CRTC2_OFFSET_CNTL 0x0328
359 371
372#define RADEON_MPP_TB_CONFIG 0x01c0
373#define RADEON_MEM_CNTL 0x0140
374#define RADEON_MEM_SDRAM_MODE_REG 0x0158
375#define RADEON_AGP_BASE 0x0170
376
360#define RADEON_RB3D_COLOROFFSET 0x1c40 377#define RADEON_RB3D_COLOROFFSET 0x1c40
361#define RADEON_RB3D_COLORPITCH 0x1c48 378#define RADEON_RB3D_COLORPITCH 0x1c48
362 379
@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
651#define RADEON_CP_PACKET1 0x40000000 668#define RADEON_CP_PACKET1 0x40000000
652#define RADEON_CP_PACKET2 0x80000000 669#define RADEON_CP_PACKET2 0x80000000
653#define RADEON_CP_PACKET3 0xC0000000 670#define RADEON_CP_PACKET3 0xC0000000
671# define RADEON_CP_NOP 0x00001000
672# define RADEON_CP_NEXT_CHAR 0x00001900
673# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
674# define RADEON_CP_SET_SCISSORS 0x00001E00
675 /* GEN_INDX_PRIM is unsupported starting with R300 */
654# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 676# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
655# define RADEON_WAIT_FOR_IDLE 0x00002600 677# define RADEON_WAIT_FOR_IDLE 0x00002600
656# define RADEON_3D_DRAW_VBUF 0x00002800 678# define RADEON_3D_DRAW_VBUF 0x00002800
657# define RADEON_3D_DRAW_IMMD 0x00002900 679# define RADEON_3D_DRAW_IMMD 0x00002900
658# define RADEON_3D_DRAW_INDX 0x00002A00 680# define RADEON_3D_DRAW_INDX 0x00002A00
681# define RADEON_CP_LOAD_PALETTE 0x00002C00
659# define RADEON_3D_LOAD_VBPNTR 0x00002F00 682# define RADEON_3D_LOAD_VBPNTR 0x00002F00
660# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 683# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
661# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 684# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
662# define RADEON_3D_CLEAR_ZMASK 0x00003200 685# define RADEON_3D_CLEAR_ZMASK 0x00003200
686# define RADEON_CP_INDX_BUFFER 0x00003300
687# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
688# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
689# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
663# define RADEON_3D_CLEAR_HIZ 0x00003700 690# define RADEON_3D_CLEAR_HIZ 0x00003700
691# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
664# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 692# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
665# define RADEON_CNTL_PAINT_MULTI 0x00009A00 693# define RADEON_CNTL_PAINT_MULTI 0x00009A00
666# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 694# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 1f79e249146c..64a3e3a406ef 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
1493 1493
1494} 1494}
1495 1495
1496#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) 1496#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1497 1497
1498static int radeon_cp_dispatch_texture( DRMFILE filp, 1498static int radeon_cp_dispatch_texture( DRMFILE filp,
1499 drm_device_t *dev, 1499 drm_device_t *dev,
@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1506 u32 format; 1506 u32 format;
1507 u32 *buffer; 1507 u32 *buffer;
1508 const u8 __user *data; 1508 const u8 __user *data;
1509 int size, dwords, tex_width, blit_width; 1509 int size, dwords, tex_width, blit_width, spitch;
1510 u32 height; 1510 u32 height;
1511 int i; 1511 int i;
1512 u32 texpitch, microtile; 1512 u32 texpitch, microtile;
1513 u32 offset;
1513 RING_LOCALS; 1514 RING_LOCALS;
1514 1515
1515 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); 1516 DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1530 RADEON_WAIT_UNTIL_IDLE(); 1531 RADEON_WAIT_UNTIL_IDLE();
1531 ADVANCE_RING(); 1532 ADVANCE_RING();
1532 1533
1533#ifdef __BIG_ENDIAN
1534 /* The Mesa texture functions provide the data in little endian as the
1535 * chip wants it, but we need to compensate for the fact that the CP
1536 * ring gets byte-swapped
1537 */
1538 BEGIN_RING( 2 );
1539 OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1540 ADVANCE_RING();
1541#endif
1542
1543
1544 /* The compiler won't optimize away a division by a variable, 1534 /* The compiler won't optimize away a division by a variable,
1545 * even if the only legal values are powers of two. Thus, we'll 1535 * even if the only legal values are powers of two. Thus, we'll
1546 * use a shift instead. 1536 * use a shift instead.
@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1572 DRM_ERROR( "invalid texture format %d\n", tex->format ); 1562 DRM_ERROR( "invalid texture format %d\n", tex->format );
1573 return DRM_ERR(EINVAL); 1563 return DRM_ERR(EINVAL);
1574 } 1564 }
1565 spitch = blit_width >> 6;
1566 if (spitch == 0 && image->height > 1)
1567 return DRM_ERR(EINVAL);
1568
1575 texpitch = tex->pitch; 1569 texpitch = tex->pitch;
1576 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { 1570 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1577 microtile = 1; 1571 microtile = 1;
@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1624 */ 1618 */
1625 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); 1619 buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
1626 dwords = size / 4; 1620 dwords = size / 4;
1627 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1628 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1629 RADEON_GMC_BRUSH_NONE |
1630 (format << 8) |
1631 RADEON_GMC_SRC_DATATYPE_COLOR |
1632 RADEON_ROP3_S |
1633 RADEON_DP_SRC_SOURCE_HOST_DATA |
1634 RADEON_GMC_CLR_CMP_CNTL_DIS |
1635 RADEON_GMC_WR_MSK_DIS);
1636
1637 buffer[2] = (texpitch << 22) | (tex->offset >> 10);
1638 buffer[3] = 0xffffffff;
1639 buffer[4] = 0xffffffff;
1640 buffer[5] = (image->y << 16) | image->x;
1641 buffer[6] = (height << 16) | image->width;
1642 buffer[7] = dwords;
1643 buffer += 8;
1644
1645
1646 1621
1647 if (microtile) { 1622 if (microtile) {
1648 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1623 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
1750 } 1725 }
1751 1726
1752 buf->filp = filp; 1727 buf->filp = filp;
1753 buf->used = (dwords + 8) * sizeof(u32); 1728 buf->used = size;
1754 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); 1729 offset = dev_priv->gart_buffers_offset + buf->offset;
1755 radeon_cp_discard_buffer( dev, buf ); 1730 BEGIN_RING(9);
1731 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1732 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1733 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1734 RADEON_GMC_BRUSH_NONE |
1735 (format << 8) |
1736 RADEON_GMC_SRC_DATATYPE_COLOR |
1737 RADEON_ROP3_S |
1738 RADEON_DP_SRC_SOURCE_MEMORY |
1739 RADEON_GMC_CLR_CMP_CNTL_DIS |
1740 RADEON_GMC_WR_MSK_DIS );
1741 OUT_RING((spitch << 22) | (offset >> 10));
1742 OUT_RING((texpitch << 22) | (tex->offset >> 10));
1743 OUT_RING(0);
1744 OUT_RING((image->x << 16) | image->y);
1745 OUT_RING((image->width << 16) | height);
1746 RADEON_WAIT_UNTIL_2D_IDLE();
1747 ADVANCE_RING();
1748
1749 radeon_cp_discard_buffer(dev, buf);
1756 1750
1757 /* Update the input parameters for next time */ 1751 /* Update the input parameters for next time */
1758 image->y += height; 1752 image->y += height;
@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2797 2791
2798 orig_nbox = cmdbuf.nbox; 2792 orig_nbox = cmdbuf.nbox;
2799 2793
2794 if(dev_priv->microcode_version == UCODE_R300) {
2795 int temp;
2796 temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
2797
2798 if (orig_bufsz != 0)
2799 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2800
2801 return temp;
2802 }
2803
2804 /* microcode_version != r300 */
2800 while ( cmdbuf.bufsz >= sizeof(header) ) { 2805 while ( cmdbuf.bufsz >= sizeof(header) ) {
2801 2806
2802 header.i = *(int *)cmdbuf.buf; 2807 header.i = *(int *)cmdbuf.buf;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
new file mode 100644
index 000000000000..2fd40bac7c97
--- /dev/null
+++ b/drivers/char/drm/savage_bci.c
@@ -0,0 +1,1096 @@
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0
34
35static int
36savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
37{
38 uint32_t mask = dev_priv->status_used_mask;
39 uint32_t threshold = dev_priv->bci_threshold_hi;
40 uint32_t status;
41 int i;
42
43#if SAVAGE_BCI_DEBUG
44 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
45 DRM_ERROR("Trying to emit %d words "
46 "(more than guaranteed space in COB)\n", n);
47#endif
48
49 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
50 DRM_MEMORYBARRIER();
51 status = dev_priv->status_ptr[0];
52 if ((status & mask) < threshold)
53 return 0;
54 DRM_UDELAY(1);
55 }
56
57#if SAVAGE_BCI_DEBUG
58 DRM_ERROR("failed!\n");
59 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
60#endif
61 return DRM_ERR(EBUSY);
62}
63
64static int
65savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
66{
67 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
68 uint32_t status;
69 int i;
70
71 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
72 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
73 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
74 return 0;
75 DRM_UDELAY(1);
76 }
77
78#if SAVAGE_BCI_DEBUG
79 DRM_ERROR("failed!\n");
80 DRM_INFO(" status=0x%08x\n", status);
81#endif
82 return DRM_ERR(EBUSY);
83}
84
85static int
86savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
87{
88 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
89 uint32_t status;
90 int i;
91
92 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
93 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
94 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
95 return 0;
96 DRM_UDELAY(1);
97 }
98
99#if SAVAGE_BCI_DEBUG
100 DRM_ERROR("failed!\n");
101 DRM_INFO(" status=0x%08x\n", status);
102#endif
103 return DRM_ERR(EBUSY);
104}
105
106/*
107 * Waiting for events.
108 *
109 * The BIOSresets the event tag to 0 on mode changes. Therefore we
110 * never emit 0 to the event tag. If we find a 0 event tag we know the
111 * BIOS stomped on it and return success assuming that the BIOS waited
112 * for engine idle.
113 *
114 * Note: if the Xserver uses the event tag it has to follow the same
115 * rule. Otherwise there may be glitches every 2^16 events.
116 */
117static int
118savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
119{
120 uint32_t status;
121 int i;
122
123 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
124 DRM_MEMORYBARRIER();
125 status = dev_priv->status_ptr[1];
126 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
127 (status & 0xffff) == 0)
128 return 0;
129 DRM_UDELAY(1);
130 }
131
132#if SAVAGE_BCI_DEBUG
133 DRM_ERROR("failed!\n");
134 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
135#endif
136
137 return DRM_ERR(EBUSY);
138}
139
140static int
141savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
142{
143 uint32_t status;
144 int i;
145
146 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
147 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
148 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
149 (status & 0xffff) == 0)
150 return 0;
151 DRM_UDELAY(1);
152 }
153
154#if SAVAGE_BCI_DEBUG
155 DRM_ERROR("failed!\n");
156 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
157#endif
158
159 return DRM_ERR(EBUSY);
160}
161
162uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
163 unsigned int flags)
164{
165 uint16_t count;
166 BCI_LOCALS;
167
168 if (dev_priv->status_ptr) {
169 /* coordinate with Xserver */
170 count = dev_priv->status_ptr[1023];
171 if (count < dev_priv->event_counter)
172 dev_priv->event_wrap++;
173 } else {
174 count = dev_priv->event_counter;
175 }
176 count = (count + 1) & 0xffff;
177 if (count == 0) {
178 count++; /* See the comment above savage_wait_event_*. */
179 dev_priv->event_wrap++;
180 }
181 dev_priv->event_counter = count;
182 if (dev_priv->status_ptr)
183 dev_priv->status_ptr[1023] = (uint32_t)count;
184
185 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
186 unsigned int wait_cmd = BCI_CMD_WAIT;
187 if ((flags & SAVAGE_WAIT_2D))
188 wait_cmd |= BCI_CMD_WAIT_2D;
189 if ((flags & SAVAGE_WAIT_3D))
190 wait_cmd |= BCI_CMD_WAIT_3D;
191 BEGIN_BCI(2);
192 BCI_WRITE(wait_cmd);
193 } else {
194 BEGIN_BCI(1);
195 }
196 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
197
198 return count;
199}
200
201/*
202 * Freelist management
203 */
204static int savage_freelist_init(drm_device_t *dev)
205{
206 drm_savage_private_t *dev_priv = dev->dev_private;
207 drm_device_dma_t *dma = dev->dma;
208 drm_buf_t *buf;
209 drm_savage_buf_priv_t *entry;
210 int i;
211 DRM_DEBUG("count=%d\n", dma->buf_count);
212
213 dev_priv->head.next = &dev_priv->tail;
214 dev_priv->head.prev = NULL;
215 dev_priv->head.buf = NULL;
216
217 dev_priv->tail.next = NULL;
218 dev_priv->tail.prev = &dev_priv->head;
219 dev_priv->tail.buf = NULL;
220
221 for (i = 0; i < dma->buf_count; i++) {
222 buf = dma->buflist[i];
223 entry = buf->dev_private;
224
225 SET_AGE(&entry->age, 0, 0);
226 entry->buf = buf;
227
228 entry->next = dev_priv->head.next;
229 entry->prev = &dev_priv->head;
230 dev_priv->head.next->prev = entry;
231 dev_priv->head.next = entry;
232 }
233
234 return 0;
235}
236
237static drm_buf_t *savage_freelist_get(drm_device_t *dev)
238{
239 drm_savage_private_t *dev_priv = dev->dev_private;
240 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
241 uint16_t event;
242 unsigned int wrap;
243 DRM_DEBUG("\n");
244
245 UPDATE_EVENT_COUNTER();
246 if (dev_priv->status_ptr)
247 event = dev_priv->status_ptr[1] & 0xffff;
248 else
249 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
250 wrap = dev_priv->event_wrap;
251 if (event > dev_priv->event_counter)
252 wrap--; /* hardware hasn't passed the last wrap yet */
253
254 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
255 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
256
257 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
258 drm_savage_buf_priv_t *next = tail->next;
259 drm_savage_buf_priv_t *prev = tail->prev;
260 prev->next = next;
261 next->prev = prev;
262 tail->next = tail->prev = NULL;
263 return tail->buf;
264 }
265
266 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
267 return NULL;
268}
269
270void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
271{
272 drm_savage_private_t *dev_priv = dev->dev_private;
273 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
274
275 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
276
277 if (entry->next != NULL || entry->prev != NULL) {
278 DRM_ERROR("entry already on freelist.\n");
279 return;
280 }
281
282 prev = &dev_priv->head;
283 next = prev->next;
284 prev->next = entry;
285 next->prev = entry;
286 entry->prev = prev;
287 entry->next = next;
288}
289
290/*
291 * Command DMA
292 */
293static int savage_dma_init(drm_savage_private_t *dev_priv)
294{
295 unsigned int i;
296
297 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
298 (SAVAGE_DMA_PAGE_SIZE*4);
299 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
300 dev_priv->nr_dma_pages,
301 DRM_MEM_DRIVER);
302 if (dev_priv->dma_pages == NULL)
303 return DRM_ERR(ENOMEM);
304
305 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
306 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
307 dev_priv->dma_pages[i].used = 0;
308 dev_priv->dma_pages[i].flushed = 0;
309 }
310 SET_AGE(&dev_priv->last_dma_age, 0, 0);
311
312 dev_priv->first_dma_page = 0;
313 dev_priv->current_dma_page = 0;
314
315 return 0;
316}
317
318void savage_dma_reset(drm_savage_private_t *dev_priv)
319{
320 uint16_t event;
321 unsigned int wrap, i;
322 event = savage_bci_emit_event(dev_priv, 0);
323 wrap = dev_priv->event_wrap;
324 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
325 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
326 dev_priv->dma_pages[i].used = 0;
327 dev_priv->dma_pages[i].flushed = 0;
328 }
329 SET_AGE(&dev_priv->last_dma_age, event, wrap);
330 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
331}
332
333void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
334{
335 uint16_t event;
336 unsigned int wrap;
337
338 /* Faked DMA buffer pages don't age. */
339 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
340 return;
341
342 UPDATE_EVENT_COUNTER();
343 if (dev_priv->status_ptr)
344 event = dev_priv->status_ptr[1] & 0xffff;
345 else
346 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
347 wrap = dev_priv->event_wrap;
348 if (event > dev_priv->event_counter)
349 wrap--; /* hardware hasn't passed the last wrap yet */
350
351 if (dev_priv->dma_pages[page].age.wrap > wrap ||
352 (dev_priv->dma_pages[page].age.wrap == wrap &&
353 dev_priv->dma_pages[page].age.event > event)) {
354 if (dev_priv->wait_evnt(dev_priv,
355 dev_priv->dma_pages[page].age.event)
356 < 0)
357 DRM_ERROR("wait_evnt failed!\n");
358 }
359}
360
361uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
362{
363 unsigned int cur = dev_priv->current_dma_page;
364 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
365 dev_priv->dma_pages[cur].used;
366 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
367 SAVAGE_DMA_PAGE_SIZE;
368 uint32_t *dma_ptr;
369 unsigned int i;
370
371 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
372 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
373
374 if (cur + nr_pages < dev_priv->nr_dma_pages) {
375 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
376 cur*SAVAGE_DMA_PAGE_SIZE +
377 dev_priv->dma_pages[cur].used;
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
385 nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
386 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
387 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
388 dev_priv->dma_pages[i].used = 0;
389 dev_priv->dma_pages[i].flushed = 0;
390 }
391 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
392 dev_priv->first_dma_page = cur = 0;
393 }
394 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
395#if SAVAGE_DMA_DEBUG
396 if (dev_priv->dma_pages[i].used) {
397 DRM_ERROR("unflushed page %u: used=%u\n",
398 i, dev_priv->dma_pages[i].used);
399 }
400#endif
401 if (n > SAVAGE_DMA_PAGE_SIZE)
402 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
403 else
404 dev_priv->dma_pages[i].used = n;
405 n -= SAVAGE_DMA_PAGE_SIZE;
406 }
407 dev_priv->current_dma_page = --i;
408
409 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
410 i, dev_priv->dma_pages[i].used, n);
411
412 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
413
414 return dma_ptr;
415}
416
417static void savage_dma_flush(drm_savage_private_t *dev_priv)
418{
419 unsigned int first = dev_priv->first_dma_page;
420 unsigned int cur = dev_priv->current_dma_page;
421 uint16_t event;
422 unsigned int wrap, pad, align, len, i;
423 unsigned long phys_addr;
424 BCI_LOCALS;
425
426 if (first == cur &&
427 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
428 return;
429
430 /* pad length to multiples of 2 entries
431 * align start of next DMA block to multiles of 8 entries */
432 pad = -dev_priv->dma_pages[cur].used & 1;
433 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
434
435 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
436 "pad=%u, align=%u\n",
437 first, cur, dev_priv->dma_pages[first].flushed,
438 dev_priv->dma_pages[cur].used, pad, align);
439
440 /* pad with noops */
441 if (pad) {
442 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
443 cur * SAVAGE_DMA_PAGE_SIZE +
444 dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad;
446 while(pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used -
460 dev_priv->dma_pages[first].flushed;
461
462 DRM_DEBUG("phys_addr=%lx, len=%u\n",
463 phys_addr | dev_priv->dma_type, len);
464
465 BEGIN_BCI(3);
466 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
467 BCI_WRITE(phys_addr | dev_priv->dma_type);
468 BCI_DMA(len);
469
470 /* fix alignment of the start of the next block */
471 dev_priv->dma_pages[cur].used += align;
472
473 /* age DMA pages */
474 event = savage_bci_emit_event(dev_priv, 0);
475 wrap = dev_priv->event_wrap;
476 for (i = first; i < cur; ++i) {
477 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
478 dev_priv->dma_pages[i].used = 0;
479 dev_priv->dma_pages[i].flushed = 0;
480 }
481 /* age the current page only when it's full */
482 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
483 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
484 dev_priv->dma_pages[cur].used = 0;
485 dev_priv->dma_pages[cur].flushed = 0;
486 /* advance to next page */
487 cur++;
488 if (cur == dev_priv->nr_dma_pages)
489 cur = 0;
490 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
491 } else {
492 dev_priv->first_dma_page = cur;
493 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
494 }
495 SET_AGE(&dev_priv->last_dma_age, event, wrap);
496
497 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
498 dev_priv->dma_pages[cur].used,
499 dev_priv->dma_pages[cur].flushed);
500}
501
502static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
503{
504 unsigned int i, j;
505 BCI_LOCALS;
506
507 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
508 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
509 return;
510
511 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
512 dev_priv->first_dma_page, dev_priv->current_dma_page,
513 dev_priv->dma_pages[dev_priv->current_dma_page].used);
514
515 for (i = dev_priv->first_dma_page;
516 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
517 ++i) {
518 uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
519 i * SAVAGE_DMA_PAGE_SIZE;
520#if SAVAGE_DMA_DEBUG
521 /* Sanity check: all pages except the last one must be full. */
522 if (i < dev_priv->current_dma_page &&
523 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
524 DRM_ERROR("partial DMA page %u: used=%u",
525 i, dev_priv->dma_pages[i].used);
526 }
527#endif
528 BEGIN_BCI(dev_priv->dma_pages[i].used);
529 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
530 BCI_WRITE(dma_ptr[j]);
531 }
532 dev_priv->dma_pages[i].used = 0;
533 }
534
535 /* reset to first page */
536 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
537}
538
539/*
540 * Initalize mappings. On Savage4 and SavageIX the alignment
541 * and size of the aperture is not suitable for automatic MTRR setup
542 * in drm_addmap. Therefore we do it manually before the maps are
543 * initialized. We also need to take care of deleting the MTRRs in
544 * postcleanup.
545 */
546int savage_preinit(drm_device_t *dev, unsigned long chipset)
547{
548 drm_savage_private_t *dev_priv;
549 unsigned long mmio_base, fb_base, fb_size, aperture_base;
550 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
551 * in case we decide we need information on the BAR for BSD in the
552 * future.
553 */
554 unsigned int fb_rsrc, aper_rsrc;
555 int ret = 0;
556
557 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
558 if (dev_priv == NULL)
559 return DRM_ERR(ENOMEM);
560
561 memset(dev_priv, 0, sizeof(drm_savage_private_t));
562 dev->dev_private = (void *)dev_priv;
563 dev_priv->chipset = (enum savage_family)chipset;
564
565 dev_priv->mtrr[0].handle = -1;
566 dev_priv->mtrr[1].handle = -1;
567 dev_priv->mtrr[2].handle = -1;
568 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
569 fb_rsrc = 0;
570 fb_base = drm_get_resource_start(dev, 0);
571 fb_size = SAVAGE_FB_SIZE_S3;
572 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
573 aper_rsrc = 0;
574 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
575 /* this should always be true */
576 if (drm_get_resource_len(dev, 0) == 0x08000000) {
577 /* Don't make MMIO write-cobining! We need 3
578 * MTRRs. */
579 dev_priv->mtrr[0].base = fb_base;
580 dev_priv->mtrr[0].size = 0x01000000;
581 dev_priv->mtrr[0].handle = mtrr_add(
582 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
583 MTRR_TYPE_WRCOMB, 1);
584 dev_priv->mtrr[1].base = fb_base+0x02000000;
585 dev_priv->mtrr[1].size = 0x02000000;
586 dev_priv->mtrr[1].handle = mtrr_add(
587 dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
588 MTRR_TYPE_WRCOMB, 1);
589 dev_priv->mtrr[2].base = fb_base+0x04000000;
590 dev_priv->mtrr[2].size = 0x04000000;
591 dev_priv->mtrr[2].handle = mtrr_add(
592 dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
593 MTRR_TYPE_WRCOMB, 1);
594 } else {
595 DRM_ERROR("strange pci_resource_len %08lx\n",
596 drm_get_resource_len(dev, 0));
597 }
598 } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
599 mmio_base = drm_get_resource_start(dev, 0);
600 fb_rsrc = 1;
601 fb_base = drm_get_resource_start(dev, 1);
602 fb_size = SAVAGE_FB_SIZE_S4;
603 aper_rsrc = 1;
604 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
605 /* this should always be true */
606 if (drm_get_resource_len(dev, 1) == 0x08000000) {
607 /* Can use one MTRR to cover both fb and
608 * aperture. */
609 dev_priv->mtrr[0].base = fb_base;
610 dev_priv->mtrr[0].size = 0x08000000;
611 dev_priv->mtrr[0].handle = mtrr_add(
612 dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
613 MTRR_TYPE_WRCOMB, 1);
614 } else {
615 DRM_ERROR("strange pci_resource_len %08lx\n",
616 drm_get_resource_len(dev, 1));
617 }
618 } else {
619 mmio_base = drm_get_resource_start(dev, 0);
620 fb_rsrc = 1;
621 fb_base = drm_get_resource_start(dev, 1);
622 fb_size = drm_get_resource_len(dev, 1);
623 aper_rsrc = 2;
624 aperture_base = drm_get_resource_start(dev, 2);
625 /* Automatic MTRR setup will do the right thing. */
626 }
627
628 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
629 _DRM_READ_ONLY, &dev_priv->mmio);
630 if (ret)
631 return ret;
632
633 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
634 _DRM_WRITE_COMBINING, &dev_priv->fb);
635 if (ret)
636 return ret;
637
638 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
639 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
640 &dev_priv->aperture);
641 if (ret)
642 return ret;
643
644 return ret;
645}
646
647/*
648 * Delete MTRRs and free device-private data.
649 */
650int savage_postcleanup(drm_device_t *dev)
651{
652 drm_savage_private_t *dev_priv = dev->dev_private;
653 int i;
654
655 for (i = 0; i < 3; ++i)
656 if (dev_priv->mtrr[i].handle >= 0)
657 mtrr_del(dev_priv->mtrr[i].handle,
658 dev_priv->mtrr[i].base,
659 dev_priv->mtrr[i].size);
660
661 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
662
663 return 0;
664}
665
666static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
667{
668 drm_savage_private_t *dev_priv = dev->dev_private;
669
670 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
671 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
672 return DRM_ERR(EINVAL);
673 }
674 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
675 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
676 return DRM_ERR(EINVAL);
677 }
678 if (init->dma_type != SAVAGE_DMA_AGP &&
679 init->dma_type != SAVAGE_DMA_PCI) {
680 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
681 return DRM_ERR(EINVAL);
682 }
683
684 dev_priv->cob_size = init->cob_size;
685 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
686 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
687 dev_priv->dma_type = init->dma_type;
688
689 dev_priv->fb_bpp = init->fb_bpp;
690 dev_priv->front_offset = init->front_offset;
691 dev_priv->front_pitch = init->front_pitch;
692 dev_priv->back_offset = init->back_offset;
693 dev_priv->back_pitch = init->back_pitch;
694 dev_priv->depth_bpp = init->depth_bpp;
695 dev_priv->depth_offset = init->depth_offset;
696 dev_priv->depth_pitch = init->depth_pitch;
697
698 dev_priv->texture_offset = init->texture_offset;
699 dev_priv->texture_size = init->texture_size;
700
701 DRM_GETSAREA();
702 if (!dev_priv->sarea) {
703 DRM_ERROR("could not find sarea!\n");
704 savage_do_cleanup_bci(dev);
705 return DRM_ERR(EINVAL);
706 }
707 if (init->status_offset != 0) {
708 dev_priv->status = drm_core_findmap(dev, init->status_offset);
709 if (!dev_priv->status) {
710 DRM_ERROR("could not find shadow status region!\n");
711 savage_do_cleanup_bci(dev);
712 return DRM_ERR(EINVAL);
713 }
714 } else {
715 dev_priv->status = NULL;
716 }
717 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
718 dev->agp_buffer_map = drm_core_findmap(dev,
719 init->buffers_offset);
720 if (!dev->agp_buffer_map) {
721 DRM_ERROR("could not find DMA buffer region!\n");
722 savage_do_cleanup_bci(dev);
723 return DRM_ERR(EINVAL);
724 }
725 drm_core_ioremap(dev->agp_buffer_map, dev);
726 if (!dev->agp_buffer_map) {
727 DRM_ERROR("failed to ioremap DMA buffer region!\n");
728 savage_do_cleanup_bci(dev);
729 return DRM_ERR(ENOMEM);
730 }
731 }
732 if (init->agp_textures_offset) {
733 dev_priv->agp_textures =
734 drm_core_findmap(dev, init->agp_textures_offset);
735 if (!dev_priv->agp_textures) {
736 DRM_ERROR("could not find agp texture region!\n");
737 savage_do_cleanup_bci(dev);
738 return DRM_ERR(EINVAL);
739 }
740 } else {
741 dev_priv->agp_textures = NULL;
742 }
743
744 if (init->cmd_dma_offset) {
745 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
746 DRM_ERROR("command DMA not supported on "
747 "Savage3D/MX/IX.\n");
748 savage_do_cleanup_bci(dev);
749 return DRM_ERR(EINVAL);
750 }
751 if (dev->dma && dev->dma->buflist) {
752 DRM_ERROR("command and vertex DMA not supported "
753 "at the same time.\n");
754 savage_do_cleanup_bci(dev);
755 return DRM_ERR(EINVAL);
756 }
757 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
758 if (!dev_priv->cmd_dma) {
759 DRM_ERROR("could not find command DMA region!\n");
760 savage_do_cleanup_bci(dev);
761 return DRM_ERR(EINVAL);
762 }
763 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
764 if (dev_priv->cmd_dma->type != _DRM_AGP) {
765 DRM_ERROR("AGP command DMA region is not a "
766 "_DRM_AGP map!\n");
767 savage_do_cleanup_bci(dev);
768 return DRM_ERR(EINVAL);
769 }
770 drm_core_ioremap(dev_priv->cmd_dma, dev);
771 if (!dev_priv->cmd_dma->handle) {
772 DRM_ERROR("failed to ioremap command "
773 "DMA region!\n");
774 savage_do_cleanup_bci(dev);
775 return DRM_ERR(ENOMEM);
776 }
777 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
778 DRM_ERROR("PCI command DMA region is not a "
779 "_DRM_CONSISTENT map!\n");
780 savage_do_cleanup_bci(dev);
781 return DRM_ERR(EINVAL);
782 }
783 } else {
784 dev_priv->cmd_dma = NULL;
785 }
786
787 dev_priv->dma_flush = savage_dma_flush;
788 if (!dev_priv->cmd_dma) {
789 DRM_DEBUG("falling back to faked command DMA.\n");
790 dev_priv->fake_dma.offset = 0;
791 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
792 dev_priv->fake_dma.type = _DRM_SHM;
793 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
794 DRM_MEM_DRIVER);
795 if (!dev_priv->fake_dma.handle) {
796 DRM_ERROR("could not allocate faked DMA buffer!\n");
797 savage_do_cleanup_bci(dev);
798 return DRM_ERR(ENOMEM);
799 }
800 dev_priv->cmd_dma = &dev_priv->fake_dma;
801 dev_priv->dma_flush = savage_fake_dma_flush;
802 }
803
804 dev_priv->sarea_priv =
805 (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
806 init->sarea_priv_offset);
807
808 /* setup bitmap descriptors */
809 {
810 unsigned int color_tile_format;
811 unsigned int depth_tile_format;
812 unsigned int front_stride, back_stride, depth_stride;
813 if (dev_priv->chipset <= S3_SAVAGE4) {
814 color_tile_format = dev_priv->fb_bpp == 16 ?
815 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
816 depth_tile_format = dev_priv->depth_bpp == 16 ?
817 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
818 } else {
819 color_tile_format = SAVAGE_BD_TILE_DEST;
820 depth_tile_format = SAVAGE_BD_TILE_DEST;
821 }
822 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
823 back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
824 depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
825
826 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
827 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
828 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
829
830 dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
831 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
832 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
833
834 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
835 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
836 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
837 }
838
839 /* setup status and bci ptr */
840 dev_priv->event_counter = 0;
841 dev_priv->event_wrap = 0;
842 dev_priv->bci_ptr = (volatile uint32_t *)
843 ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
844 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
845 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
846 } else {
847 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
848 }
849 if (dev_priv->status != NULL) {
850 dev_priv->status_ptr =
851 (volatile uint32_t *)dev_priv->status->handle;
852 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
853 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
854 dev_priv->status_ptr[1023] = dev_priv->event_counter;
855 } else {
856 dev_priv->status_ptr = NULL;
857 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
858 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
859 } else {
860 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
861 }
862 dev_priv->wait_evnt = savage_bci_wait_event_reg;
863 }
864
865 /* cliprect functions */
866 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
867 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
868 else
869 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
870
871 if (savage_freelist_init(dev) < 0) {
872 DRM_ERROR("could not initialize freelist\n");
873 savage_do_cleanup_bci(dev);
874 return DRM_ERR(ENOMEM);
875 }
876
877 if (savage_dma_init(dev_priv) < 0) {
878 DRM_ERROR("could not initialize command DMA\n");
879 savage_do_cleanup_bci(dev);
880 return DRM_ERR(ENOMEM);
881 }
882
883 return 0;
884}
885
886int savage_do_cleanup_bci(drm_device_t *dev)
887{
888 drm_savage_private_t *dev_priv = dev->dev_private;
889
890 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
891 if (dev_priv->fake_dma.handle)
892 drm_free(dev_priv->fake_dma.handle,
893 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
894 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
895 dev_priv->cmd_dma->type == _DRM_AGP &&
896 dev_priv->dma_type == SAVAGE_DMA_AGP)
897 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
898
899 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
900 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
901 drm_core_ioremapfree(dev->agp_buffer_map, dev);
902 /* make sure the next instance (which may be running
903 * in PCI mode) doesn't try to use an old
904 * agp_buffer_map. */
905 dev->agp_buffer_map = NULL;
906 }
907
908 if (dev_priv->dma_pages)
909 drm_free(dev_priv->dma_pages,
910 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
911 DRM_MEM_DRIVER);
912
913 return 0;
914}
915
916static int savage_bci_init(DRM_IOCTL_ARGS)
917{
918 DRM_DEVICE;
919 drm_savage_init_t init;
920
921 LOCK_TEST_WITH_RETURN(dev, filp);
922
923 DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
924 sizeof(init));
925
926 switch (init.func) {
927 case SAVAGE_INIT_BCI:
928 return savage_do_init_bci(dev, &init);
929 case SAVAGE_CLEANUP_BCI:
930 return savage_do_cleanup_bci(dev);
931 }
932
933 return DRM_ERR(EINVAL);
934}
935
936static int savage_bci_event_emit(DRM_IOCTL_ARGS)
937{
938 DRM_DEVICE;
939 drm_savage_private_t *dev_priv = dev->dev_private;
940 drm_savage_event_emit_t event;
941
942 DRM_DEBUG("\n");
943
944 LOCK_TEST_WITH_RETURN(dev, filp);
945
946 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
947 sizeof(event));
948
949 event.count = savage_bci_emit_event(dev_priv, event.flags);
950 event.count |= dev_priv->event_wrap << 16;
951 DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,
952 event.count, sizeof(event.count));
953 return 0;
954}
955
956static int savage_bci_event_wait(DRM_IOCTL_ARGS)
957{
958 DRM_DEVICE;
959 drm_savage_private_t *dev_priv = dev->dev_private;
960 drm_savage_event_wait_t event;
961 unsigned int event_e, hw_e;
962 unsigned int event_w, hw_w;
963
964 DRM_DEBUG("\n");
965
966 DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
967 sizeof(event));
968
969 UPDATE_EVENT_COUNTER();
970 if (dev_priv->status_ptr)
971 hw_e = dev_priv->status_ptr[1] & 0xffff;
972 else
973 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
974 hw_w = dev_priv->event_wrap;
975 if (hw_e > dev_priv->event_counter)
976 hw_w--; /* hardware hasn't passed the last wrap yet */
977
978 event_e = event.count & 0xffff;
979 event_w = event.count >> 16;
980
981 /* Don't need to wait if
982 * - event counter wrapped since the event was emitted or
983 * - the hardware has advanced up to or over the event to wait for.
984 */
985 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
986 return 0;
987 else
988 return dev_priv->wait_evnt(dev_priv, event_e);
989}
990
991/*
992 * DMA buffer management
993 */
994
995static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
996{
997 drm_buf_t *buf;
998 int i;
999
1000 for (i = d->granted_count; i < d->request_count; i++) {
1001 buf = savage_freelist_get(dev);
1002 if (!buf)
1003 return DRM_ERR(EAGAIN);
1004
1005 buf->filp = filp;
1006
1007 if (DRM_COPY_TO_USER(&d->request_indices[i],
1008 &buf->idx, sizeof(buf->idx)))
1009 return DRM_ERR(EFAULT);
1010 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1011 &buf->total, sizeof(buf->total)))
1012 return DRM_ERR(EFAULT);
1013
1014 d->granted_count++;
1015 }
1016 return 0;
1017}
1018
1019int savage_bci_buffers(DRM_IOCTL_ARGS)
1020{
1021 DRM_DEVICE;
1022 drm_device_dma_t *dma = dev->dma;
1023 drm_dma_t d;
1024 int ret = 0;
1025
1026 LOCK_TEST_WITH_RETURN(dev, filp);
1027
1028 DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
1029
1030 /* Please don't send us buffers.
1031 */
1032 if (d.send_count != 0) {
1033 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1034 DRM_CURRENTPID, d.send_count);
1035 return DRM_ERR(EINVAL);
1036 }
1037
1038 /* We'll send you buffers.
1039 */
1040 if (d.request_count < 0 || d.request_count > dma->buf_count) {
1041 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1042 DRM_CURRENTPID, d.request_count, dma->buf_count);
1043 return DRM_ERR(EINVAL);
1044 }
1045
1046 d.granted_count = 0;
1047
1048 if (d.request_count) {
1049 ret = savage_bci_get_buffers(filp, dev, &d);
1050 }
1051
1052 DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
1053
1054 return ret;
1055}
1056
1057void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
1058 drm_device_dma_t *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i;
1061
1062 if (!dma)
1063 return;
1064 if (!dev_priv)
1065 return;
1066 if (!dma->buflist)
1067 return;
1068
1069 /*i830_flush_queue(dev);*/
1070
1071 for (i = 0; i < dma->buf_count; i++) {
1072 drm_buf_t *buf = dma->buflist[i];
1073 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1074
1075 if (buf->filp == filp && buf_priv &&
1076 buf_priv->next == NULL && buf_priv->prev == NULL) {
1077 uint16_t event;
1078 DRM_DEBUG("reclaimed from client\n");
1079 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1080 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1081 savage_freelist_put(dev, buf);
1082 }
1083 }
1084
1085 drm_core_reclaim_buffers(dev, filp);
1086}
1087
1088
1089drm_ioctl_desc_t savage_ioctls[] = {
1090 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
1091 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
1092 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
1093 [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
1094};
1095
1096int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
new file mode 100644
index 000000000000..6526c9aa7589
--- /dev/null
+++ b/drivers/char/drm/savage_drm.h
@@ -0,0 +1,209 @@
1/* savage_drm.h -- Public header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRM_H__
27#define __SAVAGE_DRM_H__
28
29#ifndef __SAVAGE_SAREA_DEFINES__
30#define __SAVAGE_SAREA_DEFINES__
31
32/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
33 * regions, subject to a minimum region size of (1<<16) == 64k.
34 *
35 * Clients may subdivide regions internally, but when sharing between
36 * clients, the region size is the minimum granularity.
37 */
38
39#define SAVAGE_CARD_HEAP 0
40#define SAVAGE_AGP_HEAP 1
41#define SAVAGE_NR_TEX_HEAPS 2
42#define SAVAGE_NR_TEX_REGIONS 16
43#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
44
45#endif /* __SAVAGE_SAREA_DEFINES__ */
46
47typedef struct _drm_savage_sarea {
48 /* LRU lists for texture memory in agp space and on the card.
49 */
50 drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
51 unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
52
53 /* Mechanism to validate card state.
54 */
55 int ctxOwner;
56} drm_savage_sarea_t, *drm_savage_sarea_ptr;
57
58/* Savage-specific ioctls
59 */
60#define DRM_SAVAGE_BCI_INIT 0x00
61#define DRM_SAVAGE_BCI_CMDBUF 0x01
62#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
63#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
64
65#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
66#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
67#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
68#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
69
70#define SAVAGE_DMA_PCI 1
71#define SAVAGE_DMA_AGP 3
72typedef struct drm_savage_init {
73 enum {
74 SAVAGE_INIT_BCI = 1,
75 SAVAGE_CLEANUP_BCI = 2
76 } func;
77 unsigned int sarea_priv_offset;
78
79 /* some parameters */
80 unsigned int cob_size;
81 unsigned int bci_threshold_lo, bci_threshold_hi;
82 unsigned int dma_type;
83
84 /* frame buffer layout */
85 unsigned int fb_bpp;
86 unsigned int front_offset, front_pitch;
87 unsigned int back_offset, back_pitch;
88 unsigned int depth_bpp;
89 unsigned int depth_offset, depth_pitch;
90
91 /* local textures */
92 unsigned int texture_offset;
93 unsigned int texture_size;
94
95 /* physical locations of non-permanent maps */
96 unsigned long status_offset;
97 unsigned long buffers_offset;
98 unsigned long agp_textures_offset;
99 unsigned long cmd_dma_offset;
100} drm_savage_init_t;
101
102typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
103typedef struct drm_savage_cmdbuf {
104 /* command buffer in client's address space */
105 drm_savage_cmd_header_t __user *cmd_addr;
106 unsigned int size; /* size of the command buffer in 64bit units */
107
108 unsigned int dma_idx; /* DMA buffer index to use */
109 int discard; /* discard DMA buffer when done */
110 /* vertex buffer in client's address space */
111 unsigned int __user *vb_addr;
112 unsigned int vb_size; /* size of client vertex buffer in bytes */
113 unsigned int vb_stride; /* stride of vertices in 32bit words */
114 /* boxes in client's address space */
115 drm_clip_rect_t __user *box_addr;
116 unsigned int nbox; /* number of clipping boxes */
117} drm_savage_cmdbuf_t;
118
119#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
120#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
121#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
122typedef struct drm_savage_event {
123 unsigned int count;
124 unsigned int flags;
125} drm_savage_event_emit_t, drm_savage_event_wait_t;
126
127/* Commands for the cmdbuf ioctl
128 */
129#define SAVAGE_CMD_STATE 0 /* a range of state registers */
130#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
131#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
132#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
133#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
134#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
135#define SAVAGE_CMD_SWAP 6 /* swap buffers */
136
137/* Primitive types
138*/
139#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
140#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
141#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
142#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
143 * shading on s3d */
144
145/* Skip flags (vertex format)
146 */
147#define SAVAGE_SKIP_Z 0x01
148#define SAVAGE_SKIP_W 0x02
149#define SAVAGE_SKIP_C0 0x04
150#define SAVAGE_SKIP_C1 0x08
151#define SAVAGE_SKIP_S0 0x10
152#define SAVAGE_SKIP_T0 0x20
153#define SAVAGE_SKIP_ST0 0x30
154#define SAVAGE_SKIP_S1 0x40
155#define SAVAGE_SKIP_T1 0x80
156#define SAVAGE_SKIP_ST1 0xc0
157#define SAVAGE_SKIP_ALL_S3D 0x3f
158#define SAVAGE_SKIP_ALL_S4 0xff
159
160/* Buffer names for clear command
161 */
162#define SAVAGE_FRONT 0x1
163#define SAVAGE_BACK 0x2
164#define SAVAGE_DEPTH 0x4
165
166/* 64-bit command header
167 */
168union drm_savage_cmd_header {
169 struct {
170 unsigned char cmd; /* command */
171 unsigned char pad0;
172 unsigned short pad1;
173 unsigned short pad2;
174 unsigned short pad3;
175 } cmd; /* generic */
176 struct {
177 unsigned char cmd;
178 unsigned char global; /* need idle engine? */
179 unsigned short count; /* number of consecutive registers */
180 unsigned short start; /* first register */
181 unsigned short pad3;
182 } state; /* SAVAGE_CMD_STATE */
183 struct {
184 unsigned char cmd;
185 unsigned char prim; /* primitive type */
186 unsigned short skip; /* vertex format (skip flags) */
187 unsigned short count; /* number of vertices */
188 unsigned short start; /* first vertex in DMA/vertex buffer */
189 } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
190 struct {
191 unsigned char cmd;
192 unsigned char prim;
193 unsigned short skip;
194 unsigned short count; /* number of indices that follow */
195 unsigned short pad3;
196 } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
197 struct {
198 unsigned char cmd;
199 unsigned char pad0;
200 unsigned short pad1;
201 unsigned int flags;
202 } clear0; /* SAVAGE_CMD_CLEAR */
203 struct {
204 unsigned int mask;
205 unsigned int value;
206 } clear1; /* SAVAGE_CMD_CLEAR data */
207};
208
209#endif
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
new file mode 100644
index 000000000000..ac8d270427ca
--- /dev/null
+++ b/drivers/char/drm/savage_drv.c
@@ -0,0 +1,112 @@
1/* savage_drv.c -- Savage driver for Linux
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/config.h>
27#include "drmP.h"
28#include "savage_drm.h"
29#include "savage_drv.h"
30
31#include "drm_pciids.h"
32
33static int postinit( struct drm_device *dev, unsigned long flags )
34{
35 DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
36 DRIVER_NAME,
37 DRIVER_MAJOR,
38 DRIVER_MINOR,
39 DRIVER_PATCHLEVEL,
40 DRIVER_DATE,
41 dev->primary.minor,
42 pci_pretty_name(dev->pdev)
43 );
44 return 0;
45}
46
47static int version( drm_version_t *version )
48{
49 int len;
50
51 version->version_major = DRIVER_MAJOR;
52 version->version_minor = DRIVER_MINOR;
53 version->version_patchlevel = DRIVER_PATCHLEVEL;
54 DRM_COPY( version->name, DRIVER_NAME );
55 DRM_COPY( version->date, DRIVER_DATE );
56 DRM_COPY( version->desc, DRIVER_DESC );
57 return 0;
58}
59
60static struct pci_device_id pciidlist[] = {
61 savage_PCI_IDS
62};
63
64extern drm_ioctl_desc_t savage_ioctls[];
65extern int savage_max_ioctl;
66
67static struct drm_driver driver = {
68 .driver_features =
69 DRIVER_USE_AGP | DRIVER_USE_MTRR |
70 DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
71 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
72 .preinit = savage_preinit,
73 .postinit = postinit,
74 .postcleanup = savage_postcleanup,
75 .reclaim_buffers = savage_reclaim_buffers,
76 .get_map_ofs = drm_core_get_map_ofs,
77 .get_reg_ofs = drm_core_get_reg_ofs,
78 .version = version,
79 .ioctls = savage_ioctls,
80 .dma_ioctl = savage_bci_buffers,
81 .fops = {
82 .owner = THIS_MODULE,
83 .open = drm_open,
84 .release = drm_release,
85 .ioctl = drm_ioctl,
86 .mmap = drm_mmap,
87 .poll = drm_poll,
88 .fasync = drm_fasync,
89 },
90 .pci_driver = {
91 .name = DRIVER_NAME,
92 .id_table = pciidlist,
93 }
94};
95
96static int __init savage_init(void)
97{
98 driver.num_ioctls = savage_max_ioctl;
99 return drm_init(&driver);
100}
101
102static void __exit savage_exit(void)
103{
104 drm_exit(&driver);
105}
106
107module_init(savage_init);
108module_exit(savage_exit);
109
110MODULE_AUTHOR( DRIVER_AUTHOR );
111MODULE_DESCRIPTION( DRIVER_DESC );
112MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
new file mode 100644
index 000000000000..a45434944658
--- /dev/null
+++ b/drivers/char/drm/savage_drv.h
@@ -0,0 +1,579 @@
1/* savage_drv.h -- Private header for the savage driver
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRV_H__
27#define __SAVAGE_DRV_H__
28
29#define DRIVER_AUTHOR "Felix Kuehling"
30
31#define DRIVER_NAME "savage"
32#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
33#define DRIVER_DATE "20050313"
34
35#define DRIVER_MAJOR 2
36#define DRIVER_MINOR 4
37#define DRIVER_PATCHLEVEL 1
38/* Interface history:
39 *
40 * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
41 * 2.0 The first real DRM
42 * 2.1 Scissors registers managed by the DRM, 3D operations clipped by
43 * cliprects of the cmdbuf ioctl
44 * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
45 * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
46 * wide and thus very long lived (unlikely to ever wrap). The size
47 * in the struct was 32 bits before, but only 16 bits were used
48 * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
49 * actually used
50 */
51
52typedef struct drm_savage_age {
53 uint16_t event;
54 unsigned int wrap;
55} drm_savage_age_t;
56
57typedef struct drm_savage_buf_priv {
58 struct drm_savage_buf_priv *next;
59 struct drm_savage_buf_priv *prev;
60 drm_savage_age_t age;
61 drm_buf_t *buf;
62} drm_savage_buf_priv_t;
63
64typedef struct drm_savage_dma_page {
65 drm_savage_age_t age;
66 unsigned int used, flushed;
67} drm_savage_dma_page_t;
68#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
69/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
70 * size of 16kbytes or 4k entries. Minimum requirement would be
71 * 10kbytes for 255 40-byte vertices in one drawing command. */
72#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
73
74/* interesting bits of hardware state that are saved in dev_priv */
75typedef union {
76 struct drm_savage_common_state {
77 uint32_t vbaddr;
78 } common;
79 struct {
80 unsigned char pad[sizeof(struct drm_savage_common_state)];
81 uint32_t texctrl, texaddr;
82 uint32_t scstart, new_scstart;
83 uint32_t scend, new_scend;
84 } s3d;
85 struct {
86 unsigned char pad[sizeof(struct drm_savage_common_state)];
87 uint32_t texdescr, texaddr0, texaddr1;
88 uint32_t drawctrl0, new_drawctrl0;
89 uint32_t drawctrl1, new_drawctrl1;
90 } s4;
91} drm_savage_state_t;
92
93/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
94enum savage_family {
95 S3_UNKNOWN = 0,
96 S3_SAVAGE3D,
97 S3_SAVAGE_MX,
98 S3_SAVAGE4,
99 S3_PROSAVAGE,
100 S3_TWISTER,
101 S3_PROSAVAGEDDR,
102 S3_SUPERSAVAGE,
103 S3_SAVAGE2000,
104 S3_LAST
105};
106
107#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
108
109#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
110 || (chip==S3_PROSAVAGE) \
111 || (chip==S3_TWISTER) \
112 || (chip==S3_PROSAVAGEDDR))
113
114#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
115
116#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
117
118#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
119 ||(chip==S3_PROSAVAGEDDR))
120
121/* flags */
122#define SAVAGE_IS_AGP 1
123
124typedef struct drm_savage_private {
125 drm_savage_sarea_t *sarea_priv;
126
127 drm_savage_buf_priv_t head, tail;
128
129 /* who am I? */
130 enum savage_family chipset;
131
132 unsigned int cob_size;
133 unsigned int bci_threshold_lo, bci_threshold_hi;
134 unsigned int dma_type;
135
136 /* frame buffer layout */
137 unsigned int fb_bpp;
138 unsigned int front_offset, front_pitch;
139 unsigned int back_offset, back_pitch;
140 unsigned int depth_bpp;
141 unsigned int depth_offset, depth_pitch;
142
143 /* bitmap descriptors for swap and clear */
144 unsigned int front_bd, back_bd, depth_bd;
145
146 /* local textures */
147 unsigned int texture_offset;
148 unsigned int texture_size;
149
150 /* memory regions in physical memory */
151 drm_local_map_t *sarea;
152 drm_local_map_t *mmio;
153 drm_local_map_t *fb;
154 drm_local_map_t *aperture;
155 drm_local_map_t *status;
156 drm_local_map_t *agp_textures;
157 drm_local_map_t *cmd_dma;
158 drm_local_map_t fake_dma;
159
160 struct {
161 int handle;
162 unsigned long base, size;
163 } mtrr[3];
164
165 /* BCI and status-related stuff */
166 volatile uint32_t *status_ptr, *bci_ptr;
167 uint32_t status_used_mask;
168 uint16_t event_counter;
169 unsigned int event_wrap;
170
171 /* Savage4 command DMA */
172 drm_savage_dma_page_t *dma_pages;
173 unsigned int nr_dma_pages, first_dma_page, current_dma_page;
174 drm_savage_age_t last_dma_age;
175
176 /* saved hw state for global/local check on S3D */
177 uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
178 /* and for scissors (global, so don't emit if not changed) */
179 uint32_t hw_scissors_start, hw_scissors_end;
180
181 drm_savage_state_t state;
182
183 /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
184 unsigned int waiting;
185
186 /* config/hardware-dependent function pointers */
187 int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
188 int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
189 /* Err, there is a macro wait_event in include/linux/wait.h.
190 * Avoid unwanted macro expansion. */
191 void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
192 drm_clip_rect_t *pbox);
193 void (*dma_flush)(struct drm_savage_private *dev_priv);
194} drm_savage_private_t;
195
196/* ioctls */
197extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
198extern int savage_bci_buffers(DRM_IOCTL_ARGS);
199
200/* BCI functions */
201extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
202 unsigned int flags);
203extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
204extern void savage_dma_reset(drm_savage_private_t *dev_priv);
205extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
206extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
207 unsigned int n);
208extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
209extern int savage_postcleanup(drm_device_t *dev);
210extern int savage_do_cleanup_bci(drm_device_t *dev);
211extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
212
213/* state functions */
214extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
215 drm_clip_rect_t *pbox);
216extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
217 drm_clip_rect_t *pbox);
218
219#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
220#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
221#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
222#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
223#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
224
225#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
226 * inside the MMIO region */
227#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
228 * BCI FIFO */
229
230/*
231 * MMIO registers
232 */
233#define SAVAGE_STATUS_WORD0 0x48C00
234#define SAVAGE_STATUS_WORD1 0x48C04
235#define SAVAGE_ALT_STATUS_WORD0 0x48C60
236
237#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
238#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
239
240/* Copied from savage_bci.h in the 2D driver with some renaming. */
241
242/* Bitmap descriptors */
243#define SAVAGE_BD_STRIDE_SHIFT 0
244#define SAVAGE_BD_BPP_SHIFT 16
245#define SAVAGE_BD_TILE_SHIFT 24
246#define SAVAGE_BD_BW_DISABLE (1<<28)
247/* common: */
248#define SAVAGE_BD_TILE_LINEAR 0
249/* savage4, MX, IX, 3D */
250#define SAVAGE_BD_TILE_16BPP 2
251#define SAVAGE_BD_TILE_32BPP 3
252/* twister, prosavage, DDR, supersavage, 2000 */
253#define SAVAGE_BD_TILE_DEST 1
254#define SAVAGE_BD_TILE_TEXTURE 2
255/* GBD - BCI enable */
256/* savage4, MX, IX, 3D */
257#define SAVAGE_GBD_BCI_ENABLE 8
258/* twister, prosavage, DDR, supersavage, 2000 */
259#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
260
261#define SAVAGE_GBD_BIG_ENDIAN 4
262#define SAVAGE_GBD_LITTLE_ENDIAN 0
263#define SAVAGE_GBD_64 1
264
265/* Global Bitmap Descriptor */
266#define SAVAGE_BCI_GLB_BD_LOW 0x8168
267#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
268
269/*
270 * BCI registers
271 */
272/* Savage4/Twister/ProSavage 3D registers */
273#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
274#define SAVAGE_TEXPALADDR_S4 0x1f
275#define SAVAGE_TEXCTRL0_S4 0x20
276#define SAVAGE_TEXCTRL1_S4 0x21
277#define SAVAGE_TEXADDR0_S4 0x22
278#define SAVAGE_TEXADDR1_S4 0x23
279#define SAVAGE_TEXBLEND0_S4 0x24
280#define SAVAGE_TEXBLEND1_S4 0x25
281#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
282#define SAVAGE_TEXDESCR_S4 0x27
283#define SAVAGE_FOGTABLE_S4 0x28
284#define SAVAGE_FOGCTRL_S4 0x30
285#define SAVAGE_STENCILCTRL_S4 0x31
286#define SAVAGE_ZBUFCTRL_S4 0x32
287#define SAVAGE_ZBUFOFF_S4 0x33
288#define SAVAGE_DESTCTRL_S4 0x34
289#define SAVAGE_DRAWCTRL0_S4 0x35
290#define SAVAGE_DRAWCTRL1_S4 0x36
291#define SAVAGE_ZWATERMARK_S4 0x37
292#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
293#define SAVAGE_TEXBLENDCOLOR_S4 0x39
294/* Savage3D/MX/IX 3D registers */
295#define SAVAGE_TEXPALADDR_S3D 0x18
296#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
297#define SAVAGE_TEXADDR_S3D 0x1A
298#define SAVAGE_TEXDESCR_S3D 0x1B
299#define SAVAGE_TEXCTRL_S3D 0x1C
300#define SAVAGE_FOGTABLE_S3D 0x20
301#define SAVAGE_FOGCTRL_S3D 0x30
302#define SAVAGE_DRAWCTRL_S3D 0x31
303#define SAVAGE_ZBUFCTRL_S3D 0x32
304#define SAVAGE_ZBUFOFF_S3D 0x33
305#define SAVAGE_DESTCTRL_S3D 0x34
306#define SAVAGE_SCSTART_S3D 0x35
307#define SAVAGE_SCEND_S3D 0x36
308#define SAVAGE_ZWATERMARK_S3D 0x37
309#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
310/* common stuff */
311#define SAVAGE_VERTBUFADDR 0x3e
312#define SAVAGE_BITPLANEWTMASK 0xd7
313#define SAVAGE_DMABUFADDR 0x51
314
315/* texture enable bits (needed for tex addr checking) */
316#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
317#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
318#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
319
320/* Global fields in Savage4/Twister/ProSavage 3D registers:
321 *
322 * All texture registers and DrawLocalCtrl are local. All other
323 * registers are global. */
324
325/* Global fields in Savage3D/MX/IX 3D registers:
326 *
327 * All texture registers are local. DrawCtrl and ZBufCtrl are
328 * partially local. All other registers are global.
329 *
330 * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
331 * ZBufCtrl global fields: zCmpFunc, zBufEn
332 */
333#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
334#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
335
336/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
337 */
338#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
339#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
340
341/*
342 * BCI commands
343 */
344#define BCI_CMD_NOP 0x40000000
345#define BCI_CMD_RECT 0x48000000
346#define BCI_CMD_RECT_XP 0x01000000
347#define BCI_CMD_RECT_YP 0x02000000
348#define BCI_CMD_SCANLINE 0x50000000
349#define BCI_CMD_LINE 0x5C000000
350#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
351#define BCI_CMD_BYTE_TEXT 0x63000000
352#define BCI_CMD_NT_BYTE_TEXT 0x67000000
353#define BCI_CMD_BIT_TEXT 0x6C000000
354#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
355#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
356#define BCI_CMD_SEND_COLOR 0x00008000
357
358#define BCI_CMD_CLIP_NONE 0x00000000
359#define BCI_CMD_CLIP_CURRENT 0x00002000
360#define BCI_CMD_CLIP_LR 0x00004000
361#define BCI_CMD_CLIP_NEW 0x00006000
362
363#define BCI_CMD_DEST_GBD 0x00000000
364#define BCI_CMD_DEST_PBD 0x00000800
365#define BCI_CMD_DEST_PBD_NEW 0x00000C00
366#define BCI_CMD_DEST_SBD 0x00001000
367#define BCI_CMD_DEST_SBD_NEW 0x00001400
368
369#define BCI_CMD_SRC_TRANSPARENT 0x00000200
370#define BCI_CMD_SRC_SOLID 0x00000000
371#define BCI_CMD_SRC_GBD 0x00000020
372#define BCI_CMD_SRC_COLOR 0x00000040
373#define BCI_CMD_SRC_MONO 0x00000060
374#define BCI_CMD_SRC_PBD_COLOR 0x00000080
375#define BCI_CMD_SRC_PBD_MONO 0x000000A0
376#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
377#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
378#define BCI_CMD_SRC_SBD_COLOR 0x00000100
379#define BCI_CMD_SRC_SBD_MONO 0x00000120
380#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
381#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
382
383#define BCI_CMD_PAT_TRANSPARENT 0x00000010
384#define BCI_CMD_PAT_NONE 0x00000000
385#define BCI_CMD_PAT_COLOR 0x00000002
386#define BCI_CMD_PAT_MONO 0x00000003
387#define BCI_CMD_PAT_PBD_COLOR 0x00000004
388#define BCI_CMD_PAT_PBD_MONO 0x00000005
389#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
390#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
391#define BCI_CMD_PAT_SBD_COLOR 0x00000008
392#define BCI_CMD_PAT_SBD_MONO 0x00000009
393#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
394#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
395
396#define BCI_BD_BW_DISABLE 0x10000000
397#define BCI_BD_TILE_MASK 0x03000000
398#define BCI_BD_TILE_NONE 0x00000000
399#define BCI_BD_TILE_16 0x02000000
400#define BCI_BD_TILE_32 0x03000000
401#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
402#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
403#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
404#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
405
406#define BCI_CMD_SET_REGISTER 0x96000000
407
408#define BCI_CMD_WAIT 0xC0000000
409#define BCI_CMD_WAIT_3D 0x00010000
410#define BCI_CMD_WAIT_2D 0x00020000
411
412#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
413
414#define BCI_CMD_DRAW_PRIM 0x80000000
415#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
416#define BCI_CMD_DRAW_CONT 0x01000000
417#define BCI_CMD_DRAW_TRILIST 0x00000000
418#define BCI_CMD_DRAW_TRISTRIP 0x02000000
419#define BCI_CMD_DRAW_TRIFAN 0x04000000
420#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
421#define BCI_CMD_DRAW_NO_Z 0x00000001
422#define BCI_CMD_DRAW_NO_W 0x00000002
423#define BCI_CMD_DRAW_NO_CD 0x00000004
424#define BCI_CMD_DRAW_NO_CS 0x00000008
425#define BCI_CMD_DRAW_NO_U0 0x00000010
426#define BCI_CMD_DRAW_NO_V0 0x00000020
427#define BCI_CMD_DRAW_NO_UV0 0x00000030
428#define BCI_CMD_DRAW_NO_U1 0x00000040
429#define BCI_CMD_DRAW_NO_V1 0x00000080
430#define BCI_CMD_DRAW_NO_UV1 0x000000c0
431
432#define BCI_CMD_DMA 0xa8000000
433
434#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
435#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
436#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
437#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
438#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
439#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
440
441#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
442#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
443#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
444 (((maj) & 0x1FFF) | \
445 ((ym) ? 1<<13 : 0) | \
446 ((xp) ? 1<<14 : 0) | \
447 ((yp) ? 1<<15 : 0) | \
448 ((err) << 16))
449
450/*
451 * common commands
452 */
453#define BCI_SET_REGISTERS( first, n ) \
454 BCI_WRITE(BCI_CMD_SET_REGISTER | \
455 ((uint32_t)(n) & 0xff) << 16 | \
456 ((uint32_t)(first) & 0xffff))
457#define DMA_SET_REGISTERS( first, n ) \
458 DMA_WRITE(BCI_CMD_SET_REGISTER | \
459 ((uint32_t)(n) & 0xff) << 16 | \
460 ((uint32_t)(first) & 0xffff))
461
462#define BCI_DRAW_PRIMITIVE(n, type, skip) \
463 BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
464 ((n) << 16))
465#define DMA_DRAW_PRIMITIVE(n, type, skip) \
466 DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
467 ((n) << 16))
468
469#define BCI_DRAW_INDICES_S3D(n, type, i0) \
470 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
471 ((n) << 16) | (i0))
472
473#define BCI_DRAW_INDICES_S4(n, type, skip) \
474 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
475 (skip) | ((n) << 16))
476
477#define BCI_DMA(n) \
478 BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
479
480/*
481 * access to MMIO
482 */
483#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
484#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
485
486/*
487 * access to the burst command interface (BCI)
488 */
489#define SAVAGE_BCI_DEBUG 1
490
491#define BCI_LOCALS volatile uint32_t *bci_ptr;
492
493#define BEGIN_BCI( n ) do { \
494 dev_priv->wait_fifo(dev_priv, (n)); \
495 bci_ptr = dev_priv->bci_ptr; \
496} while(0)
497
498#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
499
500#define BCI_COPY_FROM_USER(src,n) do { \
501 unsigned int i; \
502 for (i = 0; i < n; ++i) { \
503 uint32_t val; \
504 DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
505 BCI_WRITE(val); \
506 } \
507} while(0)
508
509/*
510 * command DMA support
511 */
512#define SAVAGE_DMA_DEBUG 1
513
514#define DMA_LOCALS uint32_t *dma_ptr;
515
516#define BEGIN_DMA( n ) do { \
517 unsigned int cur = dev_priv->current_dma_page; \
518 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
519 dev_priv->dma_pages[cur].used; \
520 if ((n) > rest) { \
521 dma_ptr = savage_dma_alloc(dev_priv, (n)); \
522 } else { /* fast path for small allocations */ \
523 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
524 cur * SAVAGE_DMA_PAGE_SIZE + \
525 dev_priv->dma_pages[cur].used; \
526 if (dev_priv->dma_pages[cur].used == 0) \
527 savage_dma_wait(dev_priv, cur); \
528 dev_priv->dma_pages[cur].used += (n); \
529 } \
530} while(0)
531
532#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
533
534#define DMA_COPY_FROM_USER(src,n) do { \
535 DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
536 dma_ptr += n; \
537} while(0)
538
539#if SAVAGE_DMA_DEBUG
540#define DMA_COMMIT() do { \
541 unsigned int cur = dev_priv->current_dma_page; \
542 uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
543 cur * SAVAGE_DMA_PAGE_SIZE + \
544 dev_priv->dma_pages[cur].used; \
545 if (dma_ptr != expected) { \
546 DRM_ERROR("DMA allocation and use don't match: " \
547 "%p != %p\n", expected, dma_ptr); \
548 savage_dma_reset(dev_priv); \
549 } \
550} while(0)
551#else
552#define DMA_COMMIT() do {/* nothing */} while(0)
553#endif
554
555#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
556
557/* Buffer aging via event tag
558 */
559
560#define UPDATE_EVENT_COUNTER( ) do { \
561 if (dev_priv->status_ptr) { \
562 uint16_t count; \
563 /* coordinate with Xserver */ \
564 count = dev_priv->status_ptr[1023]; \
565 if (count < dev_priv->event_counter) \
566 dev_priv->event_wrap++; \
567 dev_priv->event_counter = count; \
568 } \
569} while(0)
570
571#define SET_AGE( age, e, w ) do { \
572 (age)->event = e; \
573 (age)->wrap = w; \
574} while(0)
575
576#define TEST_AGE( age, e, w ) \
577 ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
578
579#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
new file mode 100644
index 000000000000..475695a00083
--- /dev/null
+++ b/drivers/char/drm/savage_state.c
@@ -0,0 +1,1146 @@
1/* savage_state.c -- State and drawing support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30 drm_clip_rect_t *pbox)
31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2-1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS;
43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart);
47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
51 DMA_COMMIT();
52 }
53}
54
55void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56 drm_clip_rect_t *pbox)
57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2-1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS;
69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
77 DMA_COMMIT();
78 }
79}
80
81static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
82 uint32_t addr)
83{
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return DRM_ERR(EINVAL);
87 }
88 if (!(addr & 1)) { /* local */
89 addr &= ~7;
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset+dev_priv->texture_size) {
92 DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
93 unit, addr);
94 return DRM_ERR(EINVAL);
95 }
96 } else { /* AGP */
97 if (!dev_priv->agp_textures) {
98 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
99 unit, addr);
100 return DRM_ERR(EINVAL);
101 }
102 addr &= ~7;
103 if (addr < dev_priv->agp_textures->offset ||
104 addr >= (dev_priv->agp_textures->offset +
105 dev_priv->agp_textures->size)) {
106 DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
107 unit, addr);
108 return DRM_ERR(EINVAL);
109 }
110 }
111 return 0;
112}
113
114#define SAVE_STATE(reg,where) \
115 if(start <= reg && start+count > reg) \
116 DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
117#define SAVE_STATE_MASK(reg,where,mask) do { \
118 if(start <= reg && start+count > reg) { \
119 uint32_t tmp; \
120 DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \
121 dev_priv->state.where = (tmp & (mask)) | \
122 (dev_priv->state.where & ~(mask)); \
123 } \
124} while (0)
125static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
126 unsigned int start, unsigned int count,
127 const uint32_t __user *regs)
128{
129 if (start < SAVAGE_TEXPALADDR_S3D ||
130 start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
131 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
132 start, start+count-1);
133 return DRM_ERR(EINVAL);
134 }
135
136 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
137 ~SAVAGE_SCISSOR_MASK_S3D);
138 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
139 ~SAVAGE_SCISSOR_MASK_S3D);
140
141 /* if any texture regs were changed ... */
142 if (start <= SAVAGE_TEXCTRL_S3D &&
143 start+count > SAVAGE_TEXPALADDR_S3D) {
144 /* ... check texture state */
145 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
146 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
147 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
148 return savage_verify_texaddr(
149 dev_priv, 0, dev_priv->state.s3d.texaddr);
150 }
151
152 return 0;
153}
154
155static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
156 unsigned int start, unsigned int count,
157 const uint32_t __user *regs)
158{
159 int ret = 0;
160
161 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
162 start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
163 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
164 start, start+count-1);
165 return DRM_ERR(EINVAL);
166 }
167
168 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
169 ~SAVAGE_SCISSOR_MASK_S4);
170 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
171 ~SAVAGE_SCISSOR_MASK_S4);
172
173 /* if any texture regs were changed ... */
174 if (start <= SAVAGE_TEXDESCR_S4 &&
175 start+count > SAVAGE_TEXPALADDR_S4) {
176 /* ... check texture state */
177 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
178 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
179 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
180 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
181 ret |= savage_verify_texaddr(
182 dev_priv, 0, dev_priv->state.s4.texaddr0);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
184 ret |= savage_verify_texaddr(
185 dev_priv, 1, dev_priv->state.s4.texaddr1);
186 }
187
188 return ret;
189}
190#undef SAVE_STATE
191#undef SAVE_STATE_MASK
192
193static int savage_dispatch_state(drm_savage_private_t *dev_priv,
194 const drm_savage_cmd_header_t *cmd_header,
195 const uint32_t __user *regs)
196{
197 unsigned int count = cmd_header->state.count;
198 unsigned int start = cmd_header->state.start;
199 unsigned int count2 = 0;
200 unsigned int bci_size;
201 int ret;
202 DMA_LOCALS;
203
204 if (!count)
205 return 0;
206
207 if (DRM_VERIFYAREA_READ(regs, count*4))
208 return DRM_ERR(EFAULT);
209
210 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
211 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
212 if (ret != 0)
213 return ret;
214 /* scissor regs are emitted in savage_dispatch_draw */
215 if (start < SAVAGE_SCSTART_S3D) {
216 if (start+count > SAVAGE_SCEND_S3D+1)
217 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
218 if (start+count > SAVAGE_SCSTART_S3D)
219 count = SAVAGE_SCSTART_S3D - start;
220 } else if (start <= SAVAGE_SCEND_S3D) {
221 if (start+count > SAVAGE_SCEND_S3D+1) {
222 count -= SAVAGE_SCEND_S3D+1 - start;
223 start = SAVAGE_SCEND_S3D+1;
224 } else
225 return 0;
226 }
227 } else {
228 ret = savage_verify_state_s4(dev_priv, start, count, regs);
229 if (ret != 0)
230 return ret;
231 /* scissor regs are emitted in savage_dispatch_draw */
232 if (start < SAVAGE_DRAWCTRL0_S4) {
233 if (start+count > SAVAGE_DRAWCTRL1_S4+1)
234 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
235 if (start+count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
239 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
240 start = SAVAGE_DRAWCTRL1_S4+1;
241 } else
242 return 0;
243 }
244 }
245
246 bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
247
248 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size+1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1;
252 } else {
253 BEGIN_DMA(bci_size);
254 }
255
256 do {
257 while (count > 0) {
258 unsigned int n = count < 255 ? count : 255;
259 DMA_SET_REGISTERS(start, n);
260 DMA_COPY_FROM_USER(regs, n);
261 count -= n;
262 start += n;
263 regs += n;
264 }
265 start += 2;
266 regs += 2;
267 count = count2;
268 count2 = 0;
269 } while (count);
270
271 DMA_COMMIT();
272
273 return 0;
274}
275
276static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277 const drm_savage_cmd_header_t *cmd_header,
278 const drm_buf_t *dmabuf)
279{
280 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim;
282 unsigned int skip = cmd_header->prim.skip;
283 unsigned int n = cmd_header->prim.count;
284 unsigned int start = cmd_header->prim.start;
285 unsigned int i;
286 BCI_LOCALS;
287
288 if (!dmabuf) {
289 DRM_ERROR("called without dma buffers!\n");
290 return DRM_ERR(EINVAL);
291 }
292
293 if (!n)
294 return 0;
295
296 switch (prim) {
297 case SAVAGE_PRIM_TRILIST_201:
298 reorder = 1;
299 prim = SAVAGE_PRIM_TRILIST;
300 case SAVAGE_PRIM_TRILIST:
301 if (n % 3 != 0) {
302 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
303 n);
304 return DRM_ERR(EINVAL);
305 }
306 break;
307 case SAVAGE_PRIM_TRISTRIP:
308 case SAVAGE_PRIM_TRIFAN:
309 if (n < 3) {
310 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
311 n);
312 return DRM_ERR(EINVAL);
313 }
314 break;
315 default:
316 DRM_ERROR("invalid primitive type %u\n", prim);
317 return DRM_ERR(EINVAL);
318 }
319
320 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
321 if (skip != 0) {
322 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
323 skip);
324 return DRM_ERR(EINVAL);
325 }
326 } else {
327 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
332 skip);
333 return DRM_ERR(EINVAL);
334 }
335 if (reorder) {
336 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
337 return DRM_ERR(EINVAL);
338 }
339 }
340
341 if (start + n > dmabuf->total/32) {
342 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343 start, start + n - 1, dmabuf->total/32);
344 return DRM_ERR(EINVAL);
345 }
346
347 /* Vertex DMA doesn't work with command DMA at the same time,
348 * so we use BCI_... to submit commands here. Flush buffered
349 * faked DMA first. */
350 DMA_FLUSH();
351
352 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353 BEGIN_BCI(2);
354 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
355 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
356 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357 }
358 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
359 /* Workaround for what looks like a hardware bug. If a
360 * WAIT_3D_IDLE was emitted some time before the
361 * indexed drawing command then the engine will lock
362 * up. There are two known workarounds:
363 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364 BEGIN_BCI(63);
365 for (i = 0; i < 63; ++i)
366 BCI_WRITE(BCI_CMD_WAIT);
367 dev_priv->waiting = 0;
368 }
369
370 prim <<= 25;
371 while (n != 0) {
372 /* Can emit up to 255 indices (85 triangles) at once. */
373 unsigned int count = n > 255 ? 255 : n;
374 if (reorder) {
375 /* Need to reorder indices for correct flat
376 * shading while preserving the clock sense
377 * for correct culling. Only on Savage3D. */
378 int reorder[3] = {-1, -1, -1};
379 reorder[start%3] = 2;
380
381 BEGIN_BCI((count+1+1)/2);
382 BCI_DRAW_INDICES_S3D(count, prim, start+2);
383
384 for (i = start+1; i+1 < start+count; i += 2)
385 BCI_WRITE((i + reorder[i % 3]) |
386 ((i+1 + reorder[(i+1) % 3]) << 16));
387 if (i < start+count)
388 BCI_WRITE(i + reorder[i%3]);
389 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390 BEGIN_BCI((count+1+1)/2);
391 BCI_DRAW_INDICES_S3D(count, prim, start);
392
393 for (i = start+1; i+1 < start+count; i += 2)
394 BCI_WRITE(i | ((i+1) << 16));
395 if (i < start+count)
396 BCI_WRITE(i);
397 } else {
398 BEGIN_BCI((count+2+1)/2);
399 BCI_DRAW_INDICES_S4(count, prim, skip);
400
401 for (i = start; i+1 < start+count; i += 2)
402 BCI_WRITE(i | ((i+1) << 16));
403 if (i < start+count)
404 BCI_WRITE(i);
405 }
406
407 start += count;
408 n -= count;
409
410 prim |= BCI_CMD_DRAW_CONT;
411 }
412
413 return 0;
414}
415
416static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
417 const drm_savage_cmd_header_t *cmd_header,
418 const uint32_t __user *vtxbuf,
419 unsigned int vb_size,
420 unsigned int vb_stride)
421{
422 unsigned char reorder = 0;
423 unsigned int prim = cmd_header->prim.prim;
424 unsigned int skip = cmd_header->prim.skip;
425 unsigned int n = cmd_header->prim.count;
426 unsigned int start = cmd_header->prim.start;
427 unsigned int vtx_size;
428 unsigned int i;
429 DMA_LOCALS;
430
431 if (!n)
432 return 0;
433
434 switch (prim) {
435 case SAVAGE_PRIM_TRILIST_201:
436 reorder = 1;
437 prim = SAVAGE_PRIM_TRILIST;
438 case SAVAGE_PRIM_TRILIST:
439 if (n % 3 != 0) {
440 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
441 n);
442 return DRM_ERR(EINVAL);
443 }
444 break;
445 case SAVAGE_PRIM_TRISTRIP:
446 case SAVAGE_PRIM_TRIFAN:
447 if (n < 3) {
448 DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
449 n);
450 return DRM_ERR(EINVAL);
451 }
452 break;
453 default:
454 DRM_ERROR("invalid primitive type %u\n", prim);
455 return DRM_ERR(EINVAL);
456 }
457
458 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
459 if (skip > SAVAGE_SKIP_ALL_S3D) {
460 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461 return DRM_ERR(EINVAL);
462 }
463 vtx_size = 8; /* full vertex */
464 } else {
465 if (skip > SAVAGE_SKIP_ALL_S4) {
466 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467 return DRM_ERR(EINVAL);
468 }
469 vtx_size = 10; /* full vertex */
470 }
471
472 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
475
476 if (vtx_size > vb_stride) {
477 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
478 vtx_size, vb_stride);
479 return DRM_ERR(EINVAL);
480 }
481
482 if (start + n > vb_size / (vb_stride*4)) {
483 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484 start, start + n - 1, vb_size / (vb_stride*4));
485 return DRM_ERR(EINVAL);
486 }
487
488 prim <<= 25;
489 while (n != 0) {
490 /* Can emit up to 255 vertices (85 triangles) at once. */
491 unsigned int count = n > 255 ? 255 : n;
492 if (reorder) {
493 /* Need to reorder vertices for correct flat
494 * shading while preserving the clock sense
495 * for correct culling. Only on Savage3D. */
496 int reorder[3] = {-1, -1, -1};
497 reorder[start%3] = 2;
498
499 BEGIN_DMA(count*vtx_size+1);
500 DMA_DRAW_PRIMITIVE(count, prim, skip);
501
502 for (i = start; i < start+count; ++i) {
503 unsigned int j = i + reorder[i % 3];
504 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
505 vtx_size);
506 }
507
508 DMA_COMMIT();
509 } else {
510 BEGIN_DMA(count*vtx_size+1);
511 DMA_DRAW_PRIMITIVE(count, prim, skip);
512
513 if (vb_stride == vtx_size) {
514 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
515 vtx_size*count);
516 } else {
517 for (i = start; i < start+count; ++i) {
518 DMA_COPY_FROM_USER(
519 &vtxbuf[vb_stride*i],
520 vtx_size);
521 }
522 }
523
524 DMA_COMMIT();
525 }
526
527 start += count;
528 n -= count;
529
530 prim |= BCI_CMD_DRAW_CONT;
531 }
532
533 return 0;
534}
535
536static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
537 const drm_savage_cmd_header_t *cmd_header,
538 const uint16_t __user *usr_idx,
539 const drm_buf_t *dmabuf)
540{
541 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim;
543 unsigned int skip = cmd_header->idx.skip;
544 unsigned int n = cmd_header->idx.count;
545 unsigned int i;
546 BCI_LOCALS;
547
548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n");
550 return DRM_ERR(EINVAL);
551 }
552
553 if (!n)
554 return 0;
555
556 switch (prim) {
557 case SAVAGE_PRIM_TRILIST_201:
558 reorder = 1;
559 prim = SAVAGE_PRIM_TRILIST;
560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n",
563 n);
564 return DRM_ERR(EINVAL);
565 }
566 break;
567 case SAVAGE_PRIM_TRISTRIP:
568 case SAVAGE_PRIM_TRIFAN:
569 if (n < 3) {
570 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
571 n);
572 return DRM_ERR(EINVAL);
573 }
574 break;
575 default:
576 DRM_ERROR("invalid primitive type %u\n", prim);
577 return DRM_ERR(EINVAL);
578 }
579
580 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581 if (skip != 0) {
582 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
583 skip);
584 return DRM_ERR(EINVAL);
585 }
586 } else {
587 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
588 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
589 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
590 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
591 DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
592 skip);
593 return DRM_ERR(EINVAL);
594 }
595 if (reorder) {
596 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
597 return DRM_ERR(EINVAL);
598 }
599 }
600
601 /* Vertex DMA doesn't work with command DMA at the same time,
602 * so we use BCI_... to submit commands here. Flush buffered
603 * faked DMA first. */
604 DMA_FLUSH();
605
606 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
607 BEGIN_BCI(2);
608 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
609 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
610 dev_priv->state.common.vbaddr = dmabuf->bus_address;
611 }
612 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
613 /* Workaround for what looks like a hardware bug. If a
614 * WAIT_3D_IDLE was emitted some time before the
615 * indexed drawing command then the engine will lock
616 * up. There are two known workarounds:
617 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
618 BEGIN_BCI(63);
619 for (i = 0; i < 63; ++i)
620 BCI_WRITE(BCI_CMD_WAIT);
621 dev_priv->waiting = 0;
622 }
623
624 prim <<= 25;
625 while (n != 0) {
626 /* Can emit up to 255 indices (85 triangles) at once. */
627 unsigned int count = n > 255 ? 255 : n;
628 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
629 uint16_t idx[255];
630
631 /* Copy and check indices */
632 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
633 for (i = 0; i < count; ++i) {
634 if (idx[i] > dmabuf->total/32) {
635 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
636 i, idx[i], dmabuf->total/32);
637 return DRM_ERR(EINVAL);
638 }
639 }
640
641 if (reorder) {
642 /* Need to reorder indices for correct flat
643 * shading while preserving the clock sense
644 * for correct culling. Only on Savage3D. */
645 int reorder[3] = {2, -1, -1};
646
647 BEGIN_BCI((count+1+1)/2);
648 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
649
650 for (i = 1; i+1 < count; i += 2)
651 BCI_WRITE(idx[i + reorder[i % 3]] |
652 (idx[i+1 + reorder[(i+1) % 3]] << 16));
653 if (i < count)
654 BCI_WRITE(idx[i + reorder[i%3]]);
655 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
656 BEGIN_BCI((count+1+1)/2);
657 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
658
659 for (i = 1; i+1 < count; i += 2)
660 BCI_WRITE(idx[i] | (idx[i+1] << 16));
661 if (i < count)
662 BCI_WRITE(idx[i]);
663 } else {
664 BEGIN_BCI((count+2+1)/2);
665 BCI_DRAW_INDICES_S4(count, prim, skip);
666
667 for (i = 0; i+1 < count; i += 2)
668 BCI_WRITE(idx[i] | (idx[i+1] << 16));
669 if (i < count)
670 BCI_WRITE(idx[i]);
671 }
672
673 usr_idx += count;
674 n -= count;
675
676 prim |= BCI_CMD_DRAW_CONT;
677 }
678
679 return 0;
680}
681
682static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
683 const drm_savage_cmd_header_t *cmd_header,
684 const uint16_t __user *usr_idx,
685 const uint32_t __user *vtxbuf,
686 unsigned int vb_size,
687 unsigned int vb_stride)
688{
689 unsigned char reorder = 0;
690 unsigned int prim = cmd_header->idx.prim;
691 unsigned int skip = cmd_header->idx.skip;
692 unsigned int n = cmd_header->idx.count;
693 unsigned int vtx_size;
694 unsigned int i;
695 DMA_LOCALS;
696
697 if (!n)
698 return 0;
699
700 switch (prim) {
701 case SAVAGE_PRIM_TRILIST_201:
702 reorder = 1;
703 prim = SAVAGE_PRIM_TRILIST;
704 case SAVAGE_PRIM_TRILIST:
705 if (n % 3 != 0) {
706 DRM_ERROR("wrong number of indices %u in TRILIST\n",
707 n);
708 return DRM_ERR(EINVAL);
709 }
710 break;
711 case SAVAGE_PRIM_TRISTRIP:
712 case SAVAGE_PRIM_TRIFAN:
713 if (n < 3) {
714 DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
715 n);
716 return DRM_ERR(EINVAL);
717 }
718 break;
719 default:
720 DRM_ERROR("invalid primitive type %u\n", prim);
721 return DRM_ERR(EINVAL);
722 }
723
724 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
725 if (skip > SAVAGE_SKIP_ALL_S3D) {
726 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
727 return DRM_ERR(EINVAL);
728 }
729 vtx_size = 8; /* full vertex */
730 } else {
731 if (skip > SAVAGE_SKIP_ALL_S4) {
732 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
733 return DRM_ERR(EINVAL);
734 }
735 vtx_size = 10; /* full vertex */
736 }
737
738 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
739 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
740 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
741
742 if (vtx_size > vb_stride) {
743 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
744 vtx_size, vb_stride);
745 return DRM_ERR(EINVAL);
746 }
747
748 prim <<= 25;
749 while (n != 0) {
750 /* Can emit up to 255 vertices (85 triangles) at once. */
751 unsigned int count = n > 255 ? 255 : n;
752 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
753 uint16_t idx[255];
754
755 /* Copy and check indices */
756 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
757 for (i = 0; i < count; ++i) {
758 if (idx[i] > vb_size / (vb_stride*4)) {
759 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
760 i, idx[i], vb_size / (vb_stride*4));
761 return DRM_ERR(EINVAL);
762 }
763 }
764
765 if (reorder) {
766 /* Need to reorder vertices for correct flat
767 * shading while preserving the clock sense
768 * for correct culling. Only on Savage3D. */
769 int reorder[3] = {2, -1, -1};
770
771 BEGIN_DMA(count*vtx_size+1);
772 DMA_DRAW_PRIMITIVE(count, prim, skip);
773
774 for (i = 0; i < count; ++i) {
775 unsigned int j = idx[i + reorder[i % 3]];
776 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
777 vtx_size);
778 }
779
780 DMA_COMMIT();
781 } else {
782 BEGIN_DMA(count*vtx_size+1);
783 DMA_DRAW_PRIMITIVE(count, prim, skip);
784
785 for (i = 0; i < count; ++i) {
786 unsigned int j = idx[i];
787 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
788 vtx_size);
789 }
790
791 DMA_COMMIT();
792 }
793
794 usr_idx += count;
795 n -= count;
796
797 prim |= BCI_CMD_DRAW_CONT;
798 }
799
800 return 0;
801}
802
803static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
804 const drm_savage_cmd_header_t *cmd_header,
805 const drm_savage_cmd_header_t __user *data,
806 unsigned int nbox,
807 const drm_clip_rect_t __user *usr_boxes)
808{
809 unsigned int flags = cmd_header->clear0.flags, mask, value;
810 unsigned int clear_cmd;
811 unsigned int i, nbufs;
812 DMA_LOCALS;
813
814 if (nbox == 0)
815 return 0;
816
817 DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
818 ->clear1.mask);
819 DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
820 ->clear1.value);
821
822 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824 BCI_CMD_SET_ROP(clear_cmd,0xCC);
825
826 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
827 ((flags & SAVAGE_BACK) ? 1 : 0) +
828 ((flags & SAVAGE_DEPTH) ? 1 : 0);
829 if (nbufs == 0)
830 return 0;
831
832 if (mask != 0xffffffff) {
833 /* set mask */
834 BEGIN_DMA(2);
835 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
836 DMA_WRITE(mask);
837 DMA_COMMIT();
838 }
839 for (i = 0; i < nbox; ++i) {
840 drm_clip_rect_t box;
841 unsigned int x, y, w, h;
842 unsigned int buf;
843 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
844 x = box.x1, y = box.y1;
845 w = box.x2 - box.x1;
846 h = box.y2 - box.y1;
847 BEGIN_DMA(nbufs*6);
848 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
849 if (!(flags & buf))
850 continue;
851 DMA_WRITE(clear_cmd);
852 switch(buf) {
853 case SAVAGE_FRONT:
854 DMA_WRITE(dev_priv->front_offset);
855 DMA_WRITE(dev_priv->front_bd);
856 break;
857 case SAVAGE_BACK:
858 DMA_WRITE(dev_priv->back_offset);
859 DMA_WRITE(dev_priv->back_bd);
860 break;
861 case SAVAGE_DEPTH:
862 DMA_WRITE(dev_priv->depth_offset);
863 DMA_WRITE(dev_priv->depth_bd);
864 break;
865 }
866 DMA_WRITE(value);
867 DMA_WRITE(BCI_X_Y(x, y));
868 DMA_WRITE(BCI_W_H(w, h));
869 }
870 DMA_COMMIT();
871 }
872 if (mask != 0xffffffff) {
873 /* reset mask */
874 BEGIN_DMA(2);
875 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
876 DMA_WRITE(0xffffffff);
877 DMA_COMMIT();
878 }
879
880 return 0;
881}
882
883static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
884 unsigned int nbox,
885 const drm_clip_rect_t __user *usr_boxes)
886{
887 unsigned int swap_cmd;
888 unsigned int i;
889 DMA_LOCALS;
890
891 if (nbox == 0)
892 return 0;
893
894 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
895 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
896 BCI_CMD_SET_ROP(swap_cmd,0xCC);
897
898 for (i = 0; i < nbox; ++i) {
899 drm_clip_rect_t box;
900 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
901
902 BEGIN_DMA(6);
903 DMA_WRITE(swap_cmd);
904 DMA_WRITE(dev_priv->back_offset);
905 DMA_WRITE(dev_priv->back_bd);
906 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
907 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
908 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
909 DMA_COMMIT();
910 }
911
912 return 0;
913}
914
915static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
916 const drm_savage_cmd_header_t __user *start,
917 const drm_savage_cmd_header_t __user *end,
918 const drm_buf_t *dmabuf,
919 const unsigned int __user *usr_vtxbuf,
920 unsigned int vb_size, unsigned int vb_stride,
921 unsigned int nbox,
922 const drm_clip_rect_t __user *usr_boxes)
923{
924 unsigned int i, j;
925 int ret;
926
927 for (i = 0; i < nbox; ++i) {
928 drm_clip_rect_t box;
929 const drm_savage_cmd_header_t __user *usr_cmdbuf;
930 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
931 dev_priv->emit_clip_rect(dev_priv, &box);
932
933 usr_cmdbuf = start;
934 while (usr_cmdbuf < end) {
935 drm_savage_cmd_header_t cmd_header;
936 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
937 sizeof(cmd_header));
938 usr_cmdbuf++;
939 switch (cmd_header.cmd.cmd) {
940 case SAVAGE_CMD_DMA_PRIM:
941 ret = savage_dispatch_dma_prim(
942 dev_priv, &cmd_header, dmabuf);
943 break;
944 case SAVAGE_CMD_VB_PRIM:
945 ret = savage_dispatch_vb_prim(
946 dev_priv, &cmd_header,
947 (const uint32_t __user *)usr_vtxbuf,
948 vb_size, vb_stride);
949 break;
950 case SAVAGE_CMD_DMA_IDX:
951 j = (cmd_header.idx.count + 3) / 4;
952 /* j was check in savage_bci_cmdbuf */
953 ret = savage_dispatch_dma_idx(
954 dev_priv, &cmd_header,
955 (const uint16_t __user *)usr_cmdbuf,
956 dmabuf);
957 usr_cmdbuf += j;
958 break;
959 case SAVAGE_CMD_VB_IDX:
960 j = (cmd_header.idx.count + 3) / 4;
961 /* j was check in savage_bci_cmdbuf */
962 ret = savage_dispatch_vb_idx(
963 dev_priv, &cmd_header,
964 (const uint16_t __user *)usr_cmdbuf,
965 (const uint32_t __user *)usr_vtxbuf,
966 vb_size, vb_stride);
967 usr_cmdbuf += j;
968 break;
969 default:
970 /* What's the best return code? EFAULT? */
971 DRM_ERROR("IMPLEMENTATION ERROR: "
972 "non-drawing-command %d\n",
973 cmd_header.cmd.cmd);
974 return DRM_ERR(EINVAL);
975 }
976
977 if (ret != 0)
978 return ret;
979 }
980 }
981
982 return 0;
983}
984
985int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
986{
987 DRM_DEVICE;
988 drm_savage_private_t *dev_priv = dev->dev_private;
989 drm_device_dma_t *dma = dev->dma;
990 drm_buf_t *dmabuf;
991 drm_savage_cmdbuf_t cmdbuf;
992 drm_savage_cmd_header_t __user *usr_cmdbuf;
993 drm_savage_cmd_header_t __user *first_draw_cmd;
994 unsigned int __user *usr_vtxbuf;
995 drm_clip_rect_t __user *usr_boxes;
996 unsigned int i, j;
997 int ret = 0;
998
999 DRM_DEBUG("\n");
1000
1001 LOCK_TEST_WITH_RETURN(dev, filp);
1002
1003 DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
1004 sizeof(cmdbuf));
1005
1006 if (dma && dma->buflist) {
1007 if (cmdbuf.dma_idx > dma->buf_count) {
1008 DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
1009 cmdbuf.dma_idx, dma->buf_count-1);
1010 return DRM_ERR(EINVAL);
1011 }
1012 dmabuf = dma->buflist[cmdbuf.dma_idx];
1013 } else {
1014 dmabuf = NULL;
1015 }
1016
1017 usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
1018 usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1019 usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
1020 if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
1021 (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
1022 usr_vtxbuf, cmdbuf.vb_size)) ||
1023 (cmdbuf.nbox && DRM_VERIFYAREA_READ(
1024 usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
1025 return DRM_ERR(EFAULT);
1026
1027 /* Make sure writes to DMA buffers are finished before sending
1028 * DMA commands to the graphics hardware. */
1029 DRM_MEMORYBARRIER();
1030
1031 /* Coming from user space. Don't know if the Xserver has
1032 * emitted wait commands. Assuming the worst. */
1033 dev_priv->waiting = 1;
1034
1035 i = 0;
1036 first_draw_cmd = NULL;
1037 while (i < cmdbuf.size) {
1038 drm_savage_cmd_header_t cmd_header;
1039 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
1040 sizeof(cmd_header));
1041 usr_cmdbuf++;
1042 i++;
1043
1044 /* Group drawing commands with same state to minimize
1045 * iterations over clip rects. */
1046 j = 0;
1047 switch (cmd_header.cmd.cmd) {
1048 case SAVAGE_CMD_DMA_IDX:
1049 case SAVAGE_CMD_VB_IDX:
1050 j = (cmd_header.idx.count + 3) / 4;
1051 if (i + j > cmdbuf.size) {
1052 DRM_ERROR("indexed drawing command extends "
1053 "beyond end of command buffer\n");
1054 DMA_FLUSH();
1055 return DRM_ERR(EINVAL);
1056 }
1057 /* fall through */
1058 case SAVAGE_CMD_DMA_PRIM:
1059 case SAVAGE_CMD_VB_PRIM:
1060 if (!first_draw_cmd)
1061 first_draw_cmd = usr_cmdbuf-1;
1062 usr_cmdbuf += j;
1063 i += j;
1064 break;
1065 default:
1066 if (first_draw_cmd) {
1067 ret = savage_dispatch_draw (
1068 dev_priv, first_draw_cmd, usr_cmdbuf-1,
1069 dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1070 cmdbuf.vb_stride,
1071 cmdbuf.nbox, usr_boxes);
1072 if (ret != 0)
1073 return ret;
1074 first_draw_cmd = NULL;
1075 }
1076 }
1077 if (first_draw_cmd)
1078 continue;
1079
1080 switch (cmd_header.cmd.cmd) {
1081 case SAVAGE_CMD_STATE:
1082 j = (cmd_header.state.count + 1) / 2;
1083 if (i + j > cmdbuf.size) {
1084 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1085 "beyond end of command buffer\n");
1086 DMA_FLUSH();
1087 return DRM_ERR(EINVAL);
1088 }
1089 ret = savage_dispatch_state(
1090 dev_priv, &cmd_header,
1091 (uint32_t __user *)usr_cmdbuf);
1092 usr_cmdbuf += j;
1093 i += j;
1094 break;
1095 case SAVAGE_CMD_CLEAR:
1096 if (i + 1 > cmdbuf.size) {
1097 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1098 "beyond end of command buffer\n");
1099 DMA_FLUSH();
1100 return DRM_ERR(EINVAL);
1101 }
1102 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1103 usr_cmdbuf,
1104 cmdbuf.nbox, usr_boxes);
1105 usr_cmdbuf++;
1106 i++;
1107 break;
1108 case SAVAGE_CMD_SWAP:
1109 ret = savage_dispatch_swap(dev_priv,
1110 cmdbuf.nbox, usr_boxes);
1111 break;
1112 default:
1113 DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1114 DMA_FLUSH();
1115 return DRM_ERR(EINVAL);
1116 }
1117
1118 if (ret != 0) {
1119 DMA_FLUSH();
1120 return ret;
1121 }
1122 }
1123
1124 if (first_draw_cmd) {
1125 ret = savage_dispatch_draw (
1126 dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
1127 usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
1128 cmdbuf.nbox, usr_boxes);
1129 if (ret != 0) {
1130 DMA_FLUSH();
1131 return ret;
1132 }
1133 }
1134
1135 DMA_FLUSH();
1136
1137 if (dmabuf && cmdbuf.discard) {
1138 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1139 uint16_t event;
1140 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1141 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1142 savage_freelist_put(dev, dmabuf);
1143 }
1144
1145 return 0;
1146}