diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-09-05 05:20:33 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-05 05:20:33 -0400 |
commit | d0bd99299bf933ae006d2dc6a31ffcba482ae3f2 (patch) | |
tree | 62615ed8bc6ab68f42b59d412a7fd7ac77c79803 /drivers | |
parent | 6f1062330499cee10396bf3fc66a03eb228c5fad (diff) | |
parent | 586a4ac509b041df55f26c2b26cd7cbdaf9b045d (diff) |
/spare/repo/libata-dev branch 'iomap-try3'
Diffstat (limited to 'drivers')
95 files changed, 18302 insertions, 3429 deletions
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig index 123417e43040..56ace9d5e2ae 100644 --- a/drivers/char/drm/Kconfig +++ b/drivers/char/drm/Kconfig | |||
@@ -23,13 +23,6 @@ config DRM_TDFX | |||
23 | Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), | 23 | Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), |
24 | graphics card. If M is selected, the module will be called tdfx. | 24 | graphics card. If M is selected, the module will be called tdfx. |
25 | 25 | ||
26 | config DRM_GAMMA | ||
27 | tristate "3dlabs GMX 2000" | ||
28 | depends on DRM && BROKEN | ||
29 | help | ||
30 | This is the old gamma driver, please tell me if it might actually | ||
31 | work. | ||
32 | |||
33 | config DRM_R128 | 26 | config DRM_R128 |
34 | tristate "ATI Rage 128" | 27 | tristate "ATI Rage 128" |
35 | depends on DRM && PCI | 28 | depends on DRM && PCI |
@@ -82,7 +75,7 @@ endchoice | |||
82 | 75 | ||
83 | config DRM_MGA | 76 | config DRM_MGA |
84 | tristate "Matrox g200/g400" | 77 | tristate "Matrox g200/g400" |
85 | depends on DRM && AGP | 78 | depends on DRM |
86 | help | 79 | help |
87 | Choose this option if you have a Matrox G200, G400 or G450 graphics | 80 | Choose this option if you have a Matrox G200, G400 or G450 graphics |
88 | card. If M is selected, the module will be called mga. AGP | 81 | card. If M is selected, the module will be called mga. AGP |
@@ -103,3 +96,10 @@ config DRM_VIA | |||
103 | Choose this option if you have a Via unichrome or compatible video | 96 | Choose this option if you have a Via unichrome or compatible video |
104 | chipset. If M is selected the module will be called via. | 97 | chipset. If M is selected the module will be called via. |
105 | 98 | ||
99 | config DRM_SAVAGE | ||
100 | tristate "Savage video cards" | ||
101 | depends on DRM | ||
102 | help | ||
103 | Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister | ||
104 | chipset. If M is selected the module will be called savage. | ||
105 | |||
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile index ddd941045b1f..e41060c76226 100644 --- a/drivers/char/drm/Makefile +++ b/drivers/char/drm/Makefile | |||
@@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ | |||
8 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | 8 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ |
9 | drm_sysfs.o | 9 | drm_sysfs.o |
10 | 10 | ||
11 | gamma-objs := gamma_drv.o gamma_dma.o | ||
12 | tdfx-objs := tdfx_drv.o | 11 | tdfx-objs := tdfx_drv.o |
13 | r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o | 12 | r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o |
14 | mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o | 13 | mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o |
15 | i810-objs := i810_drv.o i810_dma.o | 14 | i810-objs := i810_drv.o i810_dma.o |
16 | i830-objs := i830_drv.o i830_dma.o i830_irq.o | 15 | i830-objs := i830_drv.o i830_dma.o i830_irq.o |
17 | i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o | 16 | i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o |
18 | radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o | 17 | radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o |
19 | ffb-objs := ffb_drv.o ffb_context.o | 18 | ffb-objs := ffb_drv.o ffb_context.o |
20 | sis-objs := sis_drv.o sis_ds.o sis_mm.o | 19 | sis-objs := sis_drv.o sis_ds.o sis_mm.o |
20 | savage-objs := savage_drv.o savage_bci.o savage_state.o | ||
21 | via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o | 21 | via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o |
22 | 22 | ||
23 | ifeq ($(CONFIG_COMPAT),y) | 23 | ifeq ($(CONFIG_COMPAT),y) |
@@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o | |||
29 | endif | 29 | endif |
30 | 30 | ||
31 | obj-$(CONFIG_DRM) += drm.o | 31 | obj-$(CONFIG_DRM) += drm.o |
32 | obj-$(CONFIG_DRM_GAMMA) += gamma.o | ||
33 | obj-$(CONFIG_DRM_TDFX) += tdfx.o | 32 | obj-$(CONFIG_DRM_TDFX) += tdfx.o |
34 | obj-$(CONFIG_DRM_R128) += r128.o | 33 | obj-$(CONFIG_DRM_R128) += r128.o |
35 | obj-$(CONFIG_DRM_RADEON)+= radeon.o | 34 | obj-$(CONFIG_DRM_RADEON)+= radeon.o |
@@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o | |||
39 | obj-$(CONFIG_DRM_I915) += i915.o | 38 | obj-$(CONFIG_DRM_I915) += i915.o |
40 | obj-$(CONFIG_DRM_FFB) += ffb.o | 39 | obj-$(CONFIG_DRM_FFB) += ffb.o |
41 | obj-$(CONFIG_DRM_SIS) += sis.o | 40 | obj-$(CONFIG_DRM_SIS) += sis.o |
41 | obj-$(CONFIG_DRM_SAVAGE)+= savage.o | ||
42 | obj-$(CONFIG_DRM_VIA) +=via.o | 42 | obj-$(CONFIG_DRM_VIA) +=via.o |
43 | 43 | ||
44 | |||
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h index e8371dd87fbc..fc6598a81acd 100644 --- a/drivers/char/drm/drm.h +++ b/drivers/char/drm/drm.h | |||
@@ -98,7 +98,7 @@ | |||
98 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) | 98 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
99 | 99 | ||
100 | 100 | ||
101 | typedef unsigned long drm_handle_t; | 101 | typedef unsigned int drm_handle_t; |
102 | typedef unsigned int drm_context_t; | 102 | typedef unsigned int drm_context_t; |
103 | typedef unsigned int drm_drawable_t; | 103 | typedef unsigned int drm_drawable_t; |
104 | typedef unsigned int drm_magic_t; | 104 | typedef unsigned int drm_magic_t; |
@@ -209,7 +209,8 @@ typedef enum drm_map_type { | |||
209 | _DRM_REGISTERS = 1, /**< no caching, no core dump */ | 209 | _DRM_REGISTERS = 1, /**< no caching, no core dump */ |
210 | _DRM_SHM = 2, /**< shared, cached */ | 210 | _DRM_SHM = 2, /**< shared, cached */ |
211 | _DRM_AGP = 3, /**< AGP/GART */ | 211 | _DRM_AGP = 3, /**< AGP/GART */ |
212 | _DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ | 212 | _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
213 | _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ | ||
213 | } drm_map_type_t; | 214 | } drm_map_type_t; |
214 | 215 | ||
215 | 216 | ||
@@ -368,7 +369,8 @@ typedef struct drm_buf_desc { | |||
368 | enum { | 369 | enum { |
369 | _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ | 370 | _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
370 | _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ | 371 | _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
371 | _DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ | 372 | _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
373 | _DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */ | ||
372 | } flags; | 374 | } flags; |
373 | unsigned long agp_start; /**< | 375 | unsigned long agp_start; /**< |
374 | * Start address of where the AGP buffers are | 376 | * Start address of where the AGP buffers are |
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 5df09cc8c6db..6f98701dfe15 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h | |||
@@ -53,7 +53,6 @@ | |||
53 | #include <linux/init.h> | 53 | #include <linux/init.h> |
54 | #include <linux/file.h> | 54 | #include <linux/file.h> |
55 | #include <linux/pci.h> | 55 | #include <linux/pci.h> |
56 | #include <linux/version.h> | ||
57 | #include <linux/jiffies.h> | 56 | #include <linux/jiffies.h> |
58 | #include <linux/smp_lock.h> /* For (un)lock_kernel */ | 57 | #include <linux/smp_lock.h> /* For (un)lock_kernel */ |
59 | #include <linux/mm.h> | 58 | #include <linux/mm.h> |
@@ -96,6 +95,7 @@ | |||
96 | #define DRIVER_IRQ_SHARED 0x80 | 95 | #define DRIVER_IRQ_SHARED 0x80 |
97 | #define DRIVER_IRQ_VBL 0x100 | 96 | #define DRIVER_IRQ_VBL 0x100 |
98 | #define DRIVER_DMA_QUEUE 0x200 | 97 | #define DRIVER_DMA_QUEUE 0x200 |
98 | #define DRIVER_FB_DMA 0x400 | ||
99 | 99 | ||
100 | /***********************************************************************/ | 100 | /***********************************************************************/ |
101 | /** \name Begin the DRM... */ | 101 | /** \name Begin the DRM... */ |
@@ -160,36 +160,7 @@ | |||
160 | #define pte_unmap(pte) | 160 | #define pte_unmap(pte) |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) | ||
164 | static inline struct page * vmalloc_to_page(void * vmalloc_addr) | ||
165 | { | ||
166 | unsigned long addr = (unsigned long) vmalloc_addr; | ||
167 | struct page *page = NULL; | ||
168 | pgd_t *pgd = pgd_offset_k(addr); | ||
169 | pmd_t *pmd; | ||
170 | pte_t *ptep, pte; | ||
171 | |||
172 | if (!pgd_none(*pgd)) { | ||
173 | pmd = pmd_offset(pgd, addr); | ||
174 | if (!pmd_none(*pmd)) { | ||
175 | preempt_disable(); | ||
176 | ptep = pte_offset_map(pmd, addr); | ||
177 | pte = *ptep; | ||
178 | if (pte_present(pte)) | ||
179 | page = pte_page(pte); | ||
180 | pte_unmap(ptep); | ||
181 | preempt_enable(); | ||
182 | } | ||
183 | } | ||
184 | return page; | ||
185 | } | ||
186 | #endif | ||
187 | |||
188 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) | ||
189 | #define DRM_RPR_ARG(vma) | ||
190 | #else | ||
191 | #define DRM_RPR_ARG(vma) vma, | 163 | #define DRM_RPR_ARG(vma) vma, |
192 | #endif | ||
193 | 164 | ||
194 | #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) | 165 | #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) |
195 | 166 | ||
@@ -474,7 +445,8 @@ typedef struct drm_device_dma { | |||
474 | unsigned long byte_count; | 445 | unsigned long byte_count; |
475 | enum { | 446 | enum { |
476 | _DRM_DMA_USE_AGP = 0x01, | 447 | _DRM_DMA_USE_AGP = 0x01, |
477 | _DRM_DMA_USE_SG = 0x02 | 448 | _DRM_DMA_USE_SG = 0x02, |
449 | _DRM_DMA_USE_FB = 0x04 | ||
478 | } flags; | 450 | } flags; |
479 | 451 | ||
480 | } drm_device_dma_t; | 452 | } drm_device_dma_t; |
@@ -525,12 +497,19 @@ typedef struct drm_sigdata { | |||
525 | drm_hw_lock_t *lock; | 497 | drm_hw_lock_t *lock; |
526 | } drm_sigdata_t; | 498 | } drm_sigdata_t; |
527 | 499 | ||
500 | typedef struct drm_dma_handle { | ||
501 | dma_addr_t busaddr; | ||
502 | void *vaddr; | ||
503 | size_t size; | ||
504 | } drm_dma_handle_t; | ||
505 | |||
528 | /** | 506 | /** |
529 | * Mappings list | 507 | * Mappings list |
530 | */ | 508 | */ |
531 | typedef struct drm_map_list { | 509 | typedef struct drm_map_list { |
532 | struct list_head head; /**< list head */ | 510 | struct list_head head; /**< list head */ |
533 | drm_map_t *map; /**< mapping */ | 511 | drm_map_t *map; /**< mapping */ |
512 | unsigned int user_token; | ||
534 | } drm_map_list_t; | 513 | } drm_map_list_t; |
535 | 514 | ||
536 | typedef drm_map_t drm_local_map_t; | 515 | typedef drm_map_t drm_local_map_t; |
@@ -578,7 +557,22 @@ struct drm_driver { | |||
578 | int (*kernel_context_switch)(struct drm_device *dev, int old, int new); | 557 | int (*kernel_context_switch)(struct drm_device *dev, int old, int new); |
579 | void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); | 558 | void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); |
580 | int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); | 559 | int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); |
560 | |||
561 | /** | ||
562 | * Called by \c drm_device_is_agp. Typically used to determine if a | ||
563 | * card is really attached to AGP or not. | ||
564 | * | ||
565 | * \param dev DRM device handle | ||
566 | * | ||
567 | * \returns | ||
568 | * One of three values is returned depending on whether or not the | ||
569 | * card is absolutely \b not AGP (return of 0), absolutely \b is AGP | ||
570 | * (return of 1), or may or may not be AGP (return of 2). | ||
571 | */ | ||
572 | int (*device_is_agp) (struct drm_device * dev); | ||
573 | |||
581 | /* these have to be filled in */ | 574 | /* these have to be filled in */ |
575 | |||
582 | int (*postinit)(struct drm_device *, unsigned long flags); | 576 | int (*postinit)(struct drm_device *, unsigned long flags); |
583 | irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); | 577 | irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); |
584 | void (*irq_preinstall)(struct drm_device *dev); | 578 | void (*irq_preinstall)(struct drm_device *dev); |
@@ -722,12 +716,8 @@ typedef struct drm_device { | |||
722 | int pci_slot; /**< PCI slot number */ | 716 | int pci_slot; /**< PCI slot number */ |
723 | int pci_func; /**< PCI function number */ | 717 | int pci_func; /**< PCI function number */ |
724 | #ifdef __alpha__ | 718 | #ifdef __alpha__ |
725 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) | ||
726 | struct pci_controler *hose; | ||
727 | #else | ||
728 | struct pci_controller *hose; | 719 | struct pci_controller *hose; |
729 | #endif | 720 | #endif |
730 | #endif | ||
731 | drm_sg_mem_t *sg; /**< Scatter gather memory */ | 721 | drm_sg_mem_t *sg; /**< Scatter gather memory */ |
732 | unsigned long *ctx_bitmap; /**< context bitmap */ | 722 | unsigned long *ctx_bitmap; /**< context bitmap */ |
733 | void *dev_private; /**< device private data */ | 723 | void *dev_private; /**< device private data */ |
@@ -736,6 +726,7 @@ typedef struct drm_device { | |||
736 | 726 | ||
737 | struct drm_driver *driver; | 727 | struct drm_driver *driver; |
738 | drm_local_map_t *agp_buffer_map; | 728 | drm_local_map_t *agp_buffer_map; |
729 | unsigned int agp_buffer_token; | ||
739 | drm_head_t primary; /**< primary screen head */ | 730 | drm_head_t primary; /**< primary screen head */ |
740 | } drm_device_t; | 731 | } drm_device_t; |
741 | 732 | ||
@@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size, | |||
806 | drm_device_t *dev); | 797 | drm_device_t *dev); |
807 | extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); | 798 | extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); |
808 | 799 | ||
809 | extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); | 800 | extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); |
810 | extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); | 801 | extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); |
811 | extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); | 802 | extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); |
812 | extern int drm_unbind_agp(DRM_AGP_MEM *handle); | 803 | extern int drm_unbind_agp(DRM_AGP_MEM *handle); |
@@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev, | |||
881 | unsigned int context); | 872 | unsigned int context); |
882 | 873 | ||
883 | /* Buffer management support (drm_bufs.h) */ | 874 | /* Buffer management support (drm_bufs.h) */ |
875 | extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request); | ||
876 | extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request); | ||
877 | extern int drm_addmap(drm_device_t *dev, unsigned int offset, | ||
878 | unsigned int size, drm_map_type_t type, | ||
879 | drm_map_flags_t flags, drm_local_map_t **map_ptr); | ||
880 | extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, | ||
881 | unsigned int cmd, unsigned long arg); | ||
882 | extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); | ||
883 | extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); | ||
884 | extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | ||
885 | unsigned int cmd, unsigned long arg); | ||
886 | |||
884 | extern int drm_order( unsigned long size ); | 887 | extern int drm_order( unsigned long size ); |
885 | extern int drm_addmap( struct inode *inode, struct file *filp, | ||
886 | unsigned int cmd, unsigned long arg ); | ||
887 | extern int drm_rmmap( struct inode *inode, struct file *filp, | ||
888 | unsigned int cmd, unsigned long arg ); | ||
889 | extern int drm_addbufs( struct inode *inode, struct file *filp, | 888 | extern int drm_addbufs( struct inode *inode, struct file *filp, |
890 | unsigned int cmd, unsigned long arg ); | 889 | unsigned int cmd, unsigned long arg ); |
891 | extern int drm_infobufs( struct inode *inode, struct file *filp, | 890 | extern int drm_infobufs( struct inode *inode, struct file *filp, |
@@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp, | |||
896 | unsigned int cmd, unsigned long arg ); | 895 | unsigned int cmd, unsigned long arg ); |
897 | extern int drm_mapbufs( struct inode *inode, struct file *filp, | 896 | extern int drm_mapbufs( struct inode *inode, struct file *filp, |
898 | unsigned int cmd, unsigned long arg ); | 897 | unsigned int cmd, unsigned long arg ); |
898 | extern unsigned long drm_get_resource_start(drm_device_t *dev, | ||
899 | unsigned int resource); | ||
900 | extern unsigned long drm_get_resource_len(drm_device_t *dev, | ||
901 | unsigned int resource); | ||
899 | 902 | ||
900 | /* DMA support (drm_dma.h) */ | 903 | /* DMA support (drm_dma.h) */ |
901 | extern int drm_dma_setup(drm_device_t *dev); | 904 | extern int drm_dma_setup(drm_device_t *dev); |
@@ -919,15 +922,18 @@ extern void drm_vbl_send_signals( drm_device_t *dev ); | |||
919 | 922 | ||
920 | /* AGP/GART support (drm_agpsupport.h) */ | 923 | /* AGP/GART support (drm_agpsupport.h) */ |
921 | extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); | 924 | extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); |
922 | extern int drm_agp_acquire(struct inode *inode, struct file *filp, | 925 | extern int drm_agp_acquire(drm_device_t * dev); |
923 | unsigned int cmd, unsigned long arg); | 926 | extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, |
924 | extern void drm_agp_do_release(drm_device_t *dev); | 927 | unsigned int cmd, unsigned long arg); |
925 | extern int drm_agp_release(struct inode *inode, struct file *filp, | 928 | extern int drm_agp_release(drm_device_t *dev); |
926 | unsigned int cmd, unsigned long arg); | 929 | extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, |
927 | extern int drm_agp_enable(struct inode *inode, struct file *filp, | 930 | unsigned int cmd, unsigned long arg); |
928 | unsigned int cmd, unsigned long arg); | 931 | extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); |
929 | extern int drm_agp_info(struct inode *inode, struct file *filp, | 932 | extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, |
930 | unsigned int cmd, unsigned long arg); | 933 | unsigned int cmd, unsigned long arg); |
934 | extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); | ||
935 | extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, | ||
936 | unsigned int cmd, unsigned long arg); | ||
931 | extern int drm_agp_alloc(struct inode *inode, struct file *filp, | 937 | extern int drm_agp_alloc(struct inode *inode, struct file *filp, |
932 | unsigned int cmd, unsigned long arg); | 938 | unsigned int cmd, unsigned long arg); |
933 | extern int drm_agp_free(struct inode *inode, struct file *filp, | 939 | extern int drm_agp_free(struct inode *inode, struct file *filp, |
@@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev, | |||
976 | unsigned long addr, | 982 | unsigned long addr, |
977 | dma_addr_t bus_addr); | 983 | dma_addr_t bus_addr); |
978 | 984 | ||
979 | extern void *drm_pci_alloc(drm_device_t * dev, size_t size, | 985 | extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size, |
980 | size_t align, dma_addr_t maxaddr, | 986 | size_t align, dma_addr_t maxaddr); |
981 | dma_addr_t * busaddr); | 987 | extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah); |
982 | 988 | extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah); | |
983 | extern void drm_pci_free(drm_device_t * dev, size_t size, | ||
984 | void *vaddr, dma_addr_t busaddr); | ||
985 | 989 | ||
986 | /* sysfs support (drm_sysfs.c) */ | 990 | /* sysfs support (drm_sysfs.c) */ |
987 | struct drm_sysfs_class; | 991 | struct drm_sysfs_class; |
@@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi | |||
1012 | drm_ioremapfree( map->handle, map->size, dev ); | 1016 | drm_ioremapfree( map->handle, map->size, dev ); |
1013 | } | 1017 | } |
1014 | 1018 | ||
1015 | static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) | 1019 | static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) |
1016 | { | 1020 | { |
1017 | struct list_head *_list; | 1021 | drm_map_list_t *_entry; |
1018 | list_for_each( _list, &dev->maplist->head ) { | 1022 | list_for_each_entry(_entry, &dev->maplist->head, head) |
1019 | drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); | 1023 | if (_entry->user_token == token) |
1020 | if ( _entry->map && | ||
1021 | _entry->map->offset == offset ) { | ||
1022 | return _entry->map; | 1024 | return _entry->map; |
1025 | return NULL; | ||
1026 | } | ||
1027 | |||
1028 | static __inline__ int drm_device_is_agp(drm_device_t *dev) | ||
1029 | { | ||
1030 | if ( dev->driver->device_is_agp != NULL ) { | ||
1031 | int err = (*dev->driver->device_is_agp)( dev ); | ||
1032 | |||
1033 | if (err != 2) { | ||
1034 | return err; | ||
1023 | } | 1035 | } |
1024 | } | 1036 | } |
1025 | return NULL; | 1037 | |
1038 | return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); | ||
1026 | } | 1039 | } |
1027 | 1040 | ||
1028 | static __inline__ void drm_core_dropmap(struct drm_map *map) | 1041 | static __inline__ void drm_core_dropmap(struct drm_map *map) |
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c index 8d94c0b5fa44..8c215adcb4b2 100644 --- a/drivers/char/drm/drm_agpsupport.c +++ b/drivers/char/drm/drm_agpsupport.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #if __OS_HAS_AGP | 37 | #if __OS_HAS_AGP |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * AGP information ioctl. | 40 | * Get AGP information. |
41 | * | 41 | * |
42 | * \param inode device inode. | 42 | * \param inode device inode. |
43 | * \param filp file pointer. | 43 | * \param filp file pointer. |
@@ -48,51 +48,56 @@ | |||
48 | * Verifies the AGP device has been initialized and acquired and fills in the | 48 | * Verifies the AGP device has been initialized and acquired and fills in the |
49 | * drm_agp_info structure with the information in drm_agp_head::agp_info. | 49 | * drm_agp_info structure with the information in drm_agp_head::agp_info. |
50 | */ | 50 | */ |
51 | int drm_agp_info(struct inode *inode, struct file *filp, | 51 | int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info) |
52 | unsigned int cmd, unsigned long arg) | ||
53 | { | 52 | { |
54 | drm_file_t *priv = filp->private_data; | ||
55 | drm_device_t *dev = priv->head->dev; | ||
56 | DRM_AGP_KERN *kern; | 53 | DRM_AGP_KERN *kern; |
57 | drm_agp_info_t info; | ||
58 | 54 | ||
59 | if (!dev->agp || !dev->agp->acquired) | 55 | if (!dev->agp || !dev->agp->acquired) |
60 | return -EINVAL; | 56 | return -EINVAL; |
61 | 57 | ||
62 | kern = &dev->agp->agp_info; | 58 | kern = &dev->agp->agp_info; |
63 | info.agp_version_major = kern->version.major; | 59 | info->agp_version_major = kern->version.major; |
64 | info.agp_version_minor = kern->version.minor; | 60 | info->agp_version_minor = kern->version.minor; |
65 | info.mode = kern->mode; | 61 | info->mode = kern->mode; |
66 | info.aperture_base = kern->aper_base; | 62 | info->aperture_base = kern->aper_base; |
67 | info.aperture_size = kern->aper_size * 1024 * 1024; | 63 | info->aperture_size = kern->aper_size * 1024 * 1024; |
68 | info.memory_allowed = kern->max_memory << PAGE_SHIFT; | 64 | info->memory_allowed = kern->max_memory << PAGE_SHIFT; |
69 | info.memory_used = kern->current_memory << PAGE_SHIFT; | 65 | info->memory_used = kern->current_memory << PAGE_SHIFT; |
70 | info.id_vendor = kern->device->vendor; | 66 | info->id_vendor = kern->device->vendor; |
71 | info.id_device = kern->device->device; | 67 | info->id_device = kern->device->device; |
72 | 68 | ||
73 | if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) | 69 | return 0; |
70 | } | ||
71 | EXPORT_SYMBOL(drm_agp_info); | ||
72 | |||
73 | int drm_agp_info_ioctl(struct inode *inode, struct file *filp, | ||
74 | unsigned int cmd, unsigned long arg) | ||
75 | { | ||
76 | drm_file_t *priv = filp->private_data; | ||
77 | drm_device_t *dev = priv->head->dev; | ||
78 | drm_agp_info_t info; | ||
79 | int err; | ||
80 | |||
81 | err = drm_agp_info(dev, &info); | ||
82 | if (err) | ||
83 | return err; | ||
84 | |||
85 | if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) | ||
74 | return -EFAULT; | 86 | return -EFAULT; |
75 | return 0; | 87 | return 0; |
76 | } | 88 | } |
77 | 89 | ||
78 | /** | 90 | /** |
79 | * Acquire the AGP device (ioctl). | 91 | * Acquire the AGP device. |
80 | * | 92 | * |
81 | * \param inode device inode. | 93 | * \param dev DRM device that is to acquire AGP |
82 | * \param filp file pointer. | ||
83 | * \param cmd command. | ||
84 | * \param arg user argument. | ||
85 | * \return zero on success or a negative number on failure. | 94 | * \return zero on success or a negative number on failure. |
86 | * | 95 | * |
87 | * Verifies the AGP device hasn't been acquired before and calls | 96 | * Verifies the AGP device hasn't been acquired before and calls |
88 | * agp_acquire(). | 97 | * \c agp_backend_acquire. |
89 | */ | 98 | */ |
90 | int drm_agp_acquire(struct inode *inode, struct file *filp, | 99 | int drm_agp_acquire(drm_device_t *dev) |
91 | unsigned int cmd, unsigned long arg) | ||
92 | { | 100 | { |
93 | drm_file_t *priv = filp->private_data; | ||
94 | drm_device_t *dev = priv->head->dev; | ||
95 | |||
96 | if (!dev->agp) | 101 | if (!dev->agp) |
97 | return -ENODEV; | 102 | return -ENODEV; |
98 | if (dev->agp->acquired) | 103 | if (dev->agp->acquired) |
@@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp, | |||
102 | dev->agp->acquired = 1; | 107 | dev->agp->acquired = 1; |
103 | return 0; | 108 | return 0; |
104 | } | 109 | } |
110 | EXPORT_SYMBOL(drm_agp_acquire); | ||
105 | 111 | ||
106 | /** | 112 | /** |
107 | * Release the AGP device (ioctl). | 113 | * Acquire the AGP device (ioctl). |
108 | * | 114 | * |
109 | * \param inode device inode. | 115 | * \param inode device inode. |
110 | * \param filp file pointer. | 116 | * \param filp file pointer. |
@@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp, | |||
112 | * \param arg user argument. | 118 | * \param arg user argument. |
113 | * \return zero on success or a negative number on failure. | 119 | * \return zero on success or a negative number on failure. |
114 | * | 120 | * |
115 | * Verifies the AGP device has been acquired and calls agp_backend_release(). | 121 | * Verifies the AGP device hasn't been acquired before and calls |
122 | * \c agp_backend_acquire. | ||
116 | */ | 123 | */ |
117 | int drm_agp_release(struct inode *inode, struct file *filp, | 124 | int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, |
118 | unsigned int cmd, unsigned long arg) | 125 | unsigned int cmd, unsigned long arg) |
119 | { | 126 | { |
120 | drm_file_t *priv = filp->private_data; | 127 | drm_file_t *priv = filp->private_data; |
121 | drm_device_t *dev = priv->head->dev; | 128 | |
129 | return drm_agp_acquire( (drm_device_t *) priv->head->dev ); | ||
130 | } | ||
122 | 131 | ||
132 | /** | ||
133 | * Release the AGP device. | ||
134 | * | ||
135 | * \param dev DRM device that is to release AGP | ||
136 | * \return zero on success or a negative number on failure. | ||
137 | * | ||
138 | * Verifies the AGP device has been acquired and calls \c agp_backend_release. | ||
139 | */ | ||
140 | int drm_agp_release(drm_device_t *dev) | ||
141 | { | ||
123 | if (!dev->agp || !dev->agp->acquired) | 142 | if (!dev->agp || !dev->agp->acquired) |
124 | return -EINVAL; | 143 | return -EINVAL; |
125 | agp_backend_release(dev->agp->bridge); | 144 | agp_backend_release(dev->agp->bridge); |
126 | dev->agp->acquired = 0; | 145 | dev->agp->acquired = 0; |
127 | return 0; | 146 | return 0; |
128 | |||
129 | } | 147 | } |
148 | EXPORT_SYMBOL(drm_agp_release); | ||
130 | 149 | ||
131 | /** | 150 | int drm_agp_release_ioctl(struct inode *inode, struct file *filp, |
132 | * Release the AGP device. | 151 | unsigned int cmd, unsigned long arg) |
133 | * | ||
134 | * Calls agp_backend_release(). | ||
135 | */ | ||
136 | void drm_agp_do_release(drm_device_t *dev) | ||
137 | { | 152 | { |
138 | agp_backend_release(dev->agp->bridge); | 153 | drm_file_t *priv = filp->private_data; |
154 | drm_device_t *dev = priv->head->dev; | ||
155 | |||
156 | return drm_agp_release(dev); | ||
139 | } | 157 | } |
140 | 158 | ||
141 | /** | 159 | /** |
142 | * Enable the AGP bus. | 160 | * Enable the AGP bus. |
143 | * | 161 | * |
144 | * \param inode device inode. | 162 | * \param dev DRM device that has previously acquired AGP. |
145 | * \param filp file pointer. | 163 | * \param mode Requested AGP mode. |
146 | * \param cmd command. | ||
147 | * \param arg pointer to a drm_agp_mode structure. | ||
148 | * \return zero on success or a negative number on failure. | 164 | * \return zero on success or a negative number on failure. |
149 | * | 165 | * |
150 | * Verifies the AGP device has been acquired but not enabled, and calls | 166 | * Verifies the AGP device has been acquired but not enabled, and calls |
151 | * agp_enable(). | 167 | * \c agp_enable. |
152 | */ | 168 | */ |
153 | int drm_agp_enable(struct inode *inode, struct file *filp, | 169 | int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) |
154 | unsigned int cmd, unsigned long arg) | ||
155 | { | 170 | { |
156 | drm_file_t *priv = filp->private_data; | ||
157 | drm_device_t *dev = priv->head->dev; | ||
158 | drm_agp_mode_t mode; | ||
159 | |||
160 | if (!dev->agp || !dev->agp->acquired) | 171 | if (!dev->agp || !dev->agp->acquired) |
161 | return -EINVAL; | 172 | return -EINVAL; |
162 | 173 | ||
163 | if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode))) | ||
164 | return -EFAULT; | ||
165 | |||
166 | dev->agp->mode = mode.mode; | 174 | dev->agp->mode = mode.mode; |
167 | agp_enable(dev->agp->bridge, mode.mode); | 175 | agp_enable(dev->agp->bridge, mode.mode); |
168 | dev->agp->base = dev->agp->agp_info.aper_base; | 176 | dev->agp->base = dev->agp->agp_info.aper_base; |
169 | dev->agp->enabled = 1; | 177 | dev->agp->enabled = 1; |
170 | return 0; | 178 | return 0; |
171 | } | 179 | } |
180 | EXPORT_SYMBOL(drm_agp_enable); | ||
181 | |||
182 | int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, | ||
183 | unsigned int cmd, unsigned long arg) | ||
184 | { | ||
185 | drm_file_t *priv = filp->private_data; | ||
186 | drm_device_t *dev = priv->head->dev; | ||
187 | drm_agp_mode_t mode; | ||
188 | |||
189 | |||
190 | if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) | ||
191 | return -EFAULT; | ||
192 | |||
193 | return drm_agp_enable(dev, mode); | ||
194 | } | ||
172 | 195 | ||
173 | /** | 196 | /** |
174 | * Allocate AGP memory. | 197 | * Allocate AGP memory. |
@@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp, | |||
206 | pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; | 229 | pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; |
207 | type = (u32) request.type; | 230 | type = (u32) request.type; |
208 | 231 | ||
209 | if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { | 232 | if (!(memory = drm_alloc_agp(dev, pages, type))) { |
210 | drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); | 233 | drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); |
211 | return -ENOMEM; | 234 | return -ENOMEM; |
212 | } | 235 | } |
@@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev) | |||
403 | return NULL; | 426 | return NULL; |
404 | } | 427 | } |
405 | head->memory = NULL; | 428 | head->memory = NULL; |
406 | #if LINUX_VERSION_CODE <= 0x020408 | ||
407 | head->cant_use_aperture = 0; | ||
408 | head->page_mask = ~(0xfff); | ||
409 | #else | ||
410 | head->cant_use_aperture = head->agp_info.cant_use_aperture; | 429 | head->cant_use_aperture = head->agp_info.cant_use_aperture; |
411 | head->page_mask = head->agp_info.page_mask; | 430 | head->page_mask = head->agp_info.page_mask; |
412 | #endif | ||
413 | 431 | ||
414 | return head; | 432 | return head; |
415 | } | 433 | } |
@@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start) | |||
436 | return -EINVAL; | 454 | return -EINVAL; |
437 | return agp_bind_memory(handle, start); | 455 | return agp_bind_memory(handle, start); |
438 | } | 456 | } |
457 | EXPORT_SYMBOL(drm_agp_bind_memory); | ||
439 | 458 | ||
440 | /** Calls agp_unbind_memory() */ | 459 | /** Calls agp_unbind_memory() */ |
441 | int drm_agp_unbind_memory(DRM_AGP_MEM *handle) | 460 | int drm_agp_unbind_memory(DRM_AGP_MEM *handle) |
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index 4c6191d231b8..e0743ebbe4bd 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c | |||
@@ -36,37 +36,69 @@ | |||
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include "drmP.h" | 37 | #include "drmP.h" |
38 | 38 | ||
39 | /** | 39 | unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) |
40 | * Compute size order. Returns the exponent of the smaller power of two which | ||
41 | * is greater or equal to given number. | ||
42 | * | ||
43 | * \param size size. | ||
44 | * \return order. | ||
45 | * | ||
46 | * \todo Can be made faster. | ||
47 | */ | ||
48 | int drm_order( unsigned long size ) | ||
49 | { | 40 | { |
50 | int order; | 41 | return pci_resource_start(dev->pdev, resource); |
51 | unsigned long tmp; | 42 | } |
43 | EXPORT_SYMBOL(drm_get_resource_start); | ||
52 | 44 | ||
53 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) | 45 | unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) |
54 | ; | 46 | { |
47 | return pci_resource_len(dev->pdev, resource); | ||
48 | } | ||
49 | EXPORT_SYMBOL(drm_get_resource_len); | ||
55 | 50 | ||
56 | if (size & (size - 1)) | 51 | static drm_local_map_t *drm_find_matching_map(drm_device_t *dev, |
57 | ++order; | 52 | drm_local_map_t *map) |
53 | { | ||
54 | struct list_head *list; | ||
58 | 55 | ||
59 | return order; | 56 | list_for_each(list, &dev->maplist->head) { |
57 | drm_map_list_t *entry = list_entry(list, drm_map_list_t, head); | ||
58 | if (entry->map && map->type == entry->map->type && | ||
59 | entry->map->offset == map->offset) { | ||
60 | return entry->map; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | return NULL; | ||
60 | } | 65 | } |
61 | EXPORT_SYMBOL(drm_order); | ||
62 | 66 | ||
63 | #ifdef CONFIG_COMPAT | ||
64 | /* | 67 | /* |
65 | * Used to allocate 32-bit handles for _DRM_SHM regions | 68 | * Used to allocate 32-bit handles for mappings. |
66 | * The 0x10000000 value is chosen to be out of the way of | ||
67 | * FB/register and GART physical addresses. | ||
68 | */ | 69 | */ |
69 | static unsigned int map32_handle = 0x10000000; | 70 | #define START_RANGE 0x10000000 |
71 | #define END_RANGE 0x40000000 | ||
72 | |||
73 | #ifdef _LP64 | ||
74 | static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) | ||
75 | { | ||
76 | static unsigned int map32_handle = START_RANGE; | ||
77 | unsigned int hash; | ||
78 | |||
79 | if (lhandle & 0xffffffff00000000) { | ||
80 | hash = map32_handle; | ||
81 | map32_handle += PAGE_SIZE; | ||
82 | if (map32_handle > END_RANGE) | ||
83 | map32_handle = START_RANGE; | ||
84 | } else | ||
85 | hash = lhandle; | ||
86 | |||
87 | while (1) { | ||
88 | drm_map_list_t *_entry; | ||
89 | list_for_each_entry(_entry, &dev->maplist->head,head) { | ||
90 | if (_entry->user_token == hash) | ||
91 | break; | ||
92 | } | ||
93 | if (&_entry->head == &dev->maplist->head) | ||
94 | return hash; | ||
95 | |||
96 | hash += PAGE_SIZE; | ||
97 | map32_handle += PAGE_SIZE; | ||
98 | } | ||
99 | } | ||
100 | #else | ||
101 | # define HandleID(x,dev) (unsigned int)(x) | ||
70 | #endif | 102 | #endif |
71 | 103 | ||
72 | /** | 104 | /** |
@@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000; | |||
82 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where | 114 | * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where |
83 | * applicable and if supported by the kernel. | 115 | * applicable and if supported by the kernel. |
84 | */ | 116 | */ |
85 | int drm_addmap( struct inode *inode, struct file *filp, | 117 | int drm_addmap(drm_device_t * dev, unsigned int offset, |
86 | unsigned int cmd, unsigned long arg ) | 118 | unsigned int size, drm_map_type_t type, |
119 | drm_map_flags_t flags, drm_local_map_t ** map_ptr) | ||
87 | { | 120 | { |
88 | drm_file_t *priv = filp->private_data; | ||
89 | drm_device_t *dev = priv->head->dev; | ||
90 | drm_map_t *map; | 121 | drm_map_t *map; |
91 | drm_map_t __user *argp = (void __user *)arg; | ||
92 | drm_map_list_t *list; | 122 | drm_map_list_t *list; |
93 | 123 | drm_dma_handle_t *dmah; | |
94 | if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ | 124 | drm_local_map_t *found_map; |
95 | 125 | ||
96 | map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); | 126 | map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); |
97 | if ( !map ) | 127 | if ( !map ) |
98 | return -ENOMEM; | 128 | return -ENOMEM; |
99 | 129 | ||
100 | if ( copy_from_user( map, argp, sizeof(*map) ) ) { | 130 | map->offset = offset; |
101 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 131 | map->size = size; |
102 | return -EFAULT; | 132 | map->flags = flags; |
103 | } | 133 | map->type = type; |
104 | 134 | ||
105 | /* Only allow shared memory to be removable since we only keep enough | 135 | /* Only allow shared memory to be removable since we only keep enough |
106 | * book keeping information about shared memory to allow for removal | 136 | * book keeping information about shared memory to allow for removal |
@@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp, | |||
122 | switch ( map->type ) { | 152 | switch ( map->type ) { |
123 | case _DRM_REGISTERS: | 153 | case _DRM_REGISTERS: |
124 | case _DRM_FRAME_BUFFER: | 154 | case _DRM_FRAME_BUFFER: |
125 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) | 155 | #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) |
126 | if ( map->offset + map->size < map->offset || | 156 | if ( map->offset + map->size < map->offset || |
127 | map->offset < virt_to_phys(high_memory) ) { | 157 | map->offset < virt_to_phys(high_memory) ) { |
128 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 158 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); |
@@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp, | |||
132 | #ifdef __alpha__ | 162 | #ifdef __alpha__ |
133 | map->offset += dev->hose->mem_space->start; | 163 | map->offset += dev->hose->mem_space->start; |
134 | #endif | 164 | #endif |
165 | /* Some drivers preinitialize some maps, without the X Server | ||
166 | * needing to be aware of it. Therefore, we just return success | ||
167 | * when the server tries to create a duplicate map. | ||
168 | */ | ||
169 | found_map = drm_find_matching_map(dev, map); | ||
170 | if (found_map != NULL) { | ||
171 | if (found_map->size != map->size) { | ||
172 | DRM_DEBUG("Matching maps of type %d with " | ||
173 | "mismatched sizes, (%ld vs %ld)\n", | ||
174 | map->type, map->size, found_map->size); | ||
175 | found_map->size = map->size; | ||
176 | } | ||
177 | |||
178 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
179 | *map_ptr = found_map; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
135 | if (drm_core_has_MTRR(dev)) { | 183 | if (drm_core_has_MTRR(dev)) { |
136 | if ( map->type == _DRM_FRAME_BUFFER || | 184 | if ( map->type == _DRM_FRAME_BUFFER || |
137 | (map->flags & _DRM_WRITE_COMBINING) ) { | 185 | (map->flags & _DRM_WRITE_COMBINING) ) { |
@@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp, | |||
178 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 226 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
179 | return -EINVAL; | 227 | return -EINVAL; |
180 | } | 228 | } |
181 | map->offset += dev->sg->handle; | 229 | map->offset += (unsigned long)dev->sg->virtual; |
230 | break; | ||
231 | case _DRM_CONSISTENT: | ||
232 | /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, | ||
233 | * As we're limiting the address to 2^32-1 (or less), | ||
234 | * casting it down to 32 bits is no problem, but we | ||
235 | * need to point to a 64bit variable first. */ | ||
236 | dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); | ||
237 | if (!dmah) { | ||
238 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
239 | return -ENOMEM; | ||
240 | } | ||
241 | map->handle = dmah->vaddr; | ||
242 | map->offset = (unsigned long)dmah->busaddr; | ||
243 | kfree(dmah); | ||
182 | break; | 244 | break; |
183 | |||
184 | default: | 245 | default: |
185 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); | 246 | drm_free( map, sizeof(*map), DRM_MEM_MAPS ); |
186 | return -EINVAL; | 247 | return -EINVAL; |
@@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp, | |||
196 | 257 | ||
197 | down(&dev->struct_sem); | 258 | down(&dev->struct_sem); |
198 | list_add(&list->head, &dev->maplist->head); | 259 | list_add(&list->head, &dev->maplist->head); |
199 | #ifdef CONFIG_COMPAT | 260 | /* Assign a 32-bit handle */ |
200 | /* Assign a 32-bit handle for _DRM_SHM mappings */ | ||
201 | /* We do it here so that dev->struct_sem protects the increment */ | 261 | /* We do it here so that dev->struct_sem protects the increment */ |
202 | if (map->type == _DRM_SHM) | 262 | list->user_token = HandleID(map->type==_DRM_SHM |
203 | map->offset = map32_handle += PAGE_SIZE; | 263 | ? (unsigned long)map->handle |
204 | #endif | 264 | : map->offset, dev); |
205 | up(&dev->struct_sem); | 265 | up(&dev->struct_sem); |
206 | 266 | ||
207 | if ( copy_to_user( argp, map, sizeof(*map) ) ) | 267 | *map_ptr = map; |
268 | return 0; | ||
269 | } | ||
270 | EXPORT_SYMBOL(drm_addmap); | ||
271 | |||
272 | int drm_addmap_ioctl(struct inode *inode, struct file *filp, | ||
273 | unsigned int cmd, unsigned long arg) | ||
274 | { | ||
275 | drm_file_t *priv = filp->private_data; | ||
276 | drm_device_t *dev = priv->head->dev; | ||
277 | drm_map_t map; | ||
278 | drm_map_t *map_ptr; | ||
279 | drm_map_t __user *argp = (void __user *)arg; | ||
280 | int err; | ||
281 | unsigned long handle = 0; | ||
282 | |||
283 | if (!(filp->f_mode & 3)) | ||
284 | return -EACCES; /* Require read/write */ | ||
285 | |||
286 | if (copy_from_user(& map, argp, sizeof(map))) { | ||
287 | return -EFAULT; | ||
288 | } | ||
289 | |||
290 | err = drm_addmap(dev, map.offset, map.size, map.type, map.flags, | ||
291 | &map_ptr); | ||
292 | |||
293 | if (err) { | ||
294 | return err; | ||
295 | } | ||
296 | |||
297 | { | ||
298 | drm_map_list_t *_entry; | ||
299 | list_for_each_entry(_entry, &dev->maplist->head, head) { | ||
300 | if (_entry->map == map_ptr) | ||
301 | handle = _entry->user_token; | ||
302 | } | ||
303 | if (!handle) | ||
304 | return -EFAULT; | ||
305 | } | ||
306 | |||
307 | if (copy_to_user(argp, map_ptr, sizeof(*map_ptr))) | ||
208 | return -EFAULT; | 308 | return -EFAULT; |
209 | if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) | 309 | if (put_user(handle, &argp->handle)) |
210 | return -EFAULT; | 310 | return -EFAULT; |
211 | return 0; | 311 | return 0; |
212 | } | 312 | } |
@@ -226,81 +326,138 @@ int drm_addmap( struct inode *inode, struct file *filp, | |||
226 | * its being used, and free any associate resource (such as MTRR's) if it's not | 326 | * its being used, and free any associate resource (such as MTRR's) if it's not |
227 | * being on use. | 327 | * being on use. |
228 | * | 328 | * |
229 | * \sa addmap(). | 329 | * \sa drm_addmap |
230 | */ | 330 | */ |
231 | int drm_rmmap(struct inode *inode, struct file *filp, | 331 | int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) |
232 | unsigned int cmd, unsigned long arg) | ||
233 | { | 332 | { |
234 | drm_file_t *priv = filp->private_data; | ||
235 | drm_device_t *dev = priv->head->dev; | ||
236 | struct list_head *list; | 333 | struct list_head *list; |
237 | drm_map_list_t *r_list = NULL; | 334 | drm_map_list_t *r_list = NULL; |
238 | drm_vma_entry_t *pt, *prev; | 335 | drm_dma_handle_t dmah; |
239 | drm_map_t *map; | 336 | |
337 | /* Find the list entry for the map and remove it */ | ||
338 | list_for_each(list, &dev->maplist->head) { | ||
339 | r_list = list_entry(list, drm_map_list_t, head); | ||
340 | |||
341 | if (r_list->map == map) { | ||
342 | list_del(list); | ||
343 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | ||
344 | break; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | /* List has wrapped around to the head pointer, or it's empty and we | ||
349 | * didn't find anything. | ||
350 | */ | ||
351 | if (list == (&dev->maplist->head)) { | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | switch (map->type) { | ||
356 | case _DRM_REGISTERS: | ||
357 | drm_ioremapfree(map->handle, map->size, dev); | ||
358 | /* FALLTHROUGH */ | ||
359 | case _DRM_FRAME_BUFFER: | ||
360 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | ||
361 | int retcode; | ||
362 | retcode = mtrr_del(map->mtrr, map->offset, | ||
363 | map->size); | ||
364 | DRM_DEBUG ("mtrr_del=%d\n", retcode); | ||
365 | } | ||
366 | break; | ||
367 | case _DRM_SHM: | ||
368 | vfree(map->handle); | ||
369 | break; | ||
370 | case _DRM_AGP: | ||
371 | case _DRM_SCATTER_GATHER: | ||
372 | break; | ||
373 | case _DRM_CONSISTENT: | ||
374 | dmah.vaddr = map->handle; | ||
375 | dmah.busaddr = map->offset; | ||
376 | dmah.size = map->size; | ||
377 | __drm_pci_free(dev, &dmah); | ||
378 | break; | ||
379 | } | ||
380 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | EXPORT_SYMBOL(drm_rmmap_locked); | ||
385 | |||
386 | int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) | ||
387 | { | ||
388 | int ret; | ||
389 | |||
390 | down(&dev->struct_sem); | ||
391 | ret = drm_rmmap_locked(dev, map); | ||
392 | up(&dev->struct_sem); | ||
393 | |||
394 | return ret; | ||
395 | } | ||
396 | EXPORT_SYMBOL(drm_rmmap); | ||
397 | |||
398 | /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on | ||
399 | * the last close of the device, and this is necessary for cleanup when things | ||
400 | * exit uncleanly. Therefore, having userland manually remove mappings seems | ||
401 | * like a pointless exercise since they're going away anyway. | ||
402 | * | ||
403 | * One use case might be after addmap is allowed for normal users for SHM and | ||
404 | * gets used by drivers that the server doesn't need to care about. This seems | ||
405 | * unlikely. | ||
406 | */ | ||
407 | int drm_rmmap_ioctl(struct inode *inode, struct file *filp, | ||
408 | unsigned int cmd, unsigned long arg) | ||
409 | { | ||
410 | drm_file_t *priv = filp->private_data; | ||
411 | drm_device_t *dev = priv->head->dev; | ||
240 | drm_map_t request; | 412 | drm_map_t request; |
241 | int found_maps = 0; | 413 | drm_local_map_t *map = NULL; |
414 | struct list_head *list; | ||
415 | int ret; | ||
242 | 416 | ||
243 | if (copy_from_user(&request, (drm_map_t __user *)arg, | 417 | if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) { |
244 | sizeof(request))) { | ||
245 | return -EFAULT; | 418 | return -EFAULT; |
246 | } | 419 | } |
247 | 420 | ||
248 | down(&dev->struct_sem); | 421 | down(&dev->struct_sem); |
249 | list = &dev->maplist->head; | ||
250 | list_for_each(list, &dev->maplist->head) { | 422 | list_for_each(list, &dev->maplist->head) { |
251 | r_list = list_entry(list, drm_map_list_t, head); | 423 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); |
252 | 424 | ||
253 | if(r_list->map && | 425 | if (r_list->map && |
254 | r_list->map->offset == (unsigned long) request.handle && | 426 | r_list->user_token == (unsigned long) request.handle && |
255 | r_list->map->flags & _DRM_REMOVABLE) break; | 427 | r_list->map->flags & _DRM_REMOVABLE) { |
428 | map = r_list->map; | ||
429 | break; | ||
430 | } | ||
256 | } | 431 | } |
257 | 432 | ||
258 | /* List has wrapped around to the head pointer, or its empty we didn't | 433 | /* List has wrapped around to the head pointer, or its empty we didn't |
259 | * find anything. | 434 | * find anything. |
260 | */ | 435 | */ |
261 | if(list == (&dev->maplist->head)) { | 436 | if (list == (&dev->maplist->head)) { |
262 | up(&dev->struct_sem); | 437 | up(&dev->struct_sem); |
263 | return -EINVAL; | 438 | return -EINVAL; |
264 | } | 439 | } |
265 | map = r_list->map; | ||
266 | list_del(list); | ||
267 | drm_free(list, sizeof(*list), DRM_MEM_MAPS); | ||
268 | 440 | ||
269 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { | 441 | if (!map) |
270 | if (pt->vma->vm_private_data == map) found_maps++; | 442 | return -EINVAL; |
271 | } | ||
272 | 443 | ||
273 | if(!found_maps) { | 444 | /* Register and framebuffer maps are permanent */ |
274 | switch (map->type) { | 445 | if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { |
275 | case _DRM_REGISTERS: | 446 | up(&dev->struct_sem); |
276 | case _DRM_FRAME_BUFFER: | 447 | return 0; |
277 | if (drm_core_has_MTRR(dev)) { | ||
278 | if (map->mtrr >= 0) { | ||
279 | int retcode; | ||
280 | retcode = mtrr_del(map->mtrr, | ||
281 | map->offset, | ||
282 | map->size); | ||
283 | DRM_DEBUG("mtrr_del = %d\n", retcode); | ||
284 | } | ||
285 | } | ||
286 | drm_ioremapfree(map->handle, map->size, dev); | ||
287 | break; | ||
288 | case _DRM_SHM: | ||
289 | vfree(map->handle); | ||
290 | break; | ||
291 | case _DRM_AGP: | ||
292 | case _DRM_SCATTER_GATHER: | ||
293 | break; | ||
294 | } | ||
295 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
296 | } | 448 | } |
449 | |||
450 | ret = drm_rmmap_locked(dev, map); | ||
451 | |||
297 | up(&dev->struct_sem); | 452 | up(&dev->struct_sem); |
298 | return 0; | 453 | |
454 | return ret; | ||
299 | } | 455 | } |
300 | 456 | ||
301 | /** | 457 | /** |
302 | * Cleanup after an error on one of the addbufs() functions. | 458 | * Cleanup after an error on one of the addbufs() functions. |
303 | * | 459 | * |
460 | * \param dev DRM device. | ||
304 | * \param entry buffer entry where the error occurred. | 461 | * \param entry buffer entry where the error occurred. |
305 | * | 462 | * |
306 | * Frees any pages and buffers associated with the given entry. | 463 | * Frees any pages and buffers associated with the given entry. |
@@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry) | |||
344 | 501 | ||
345 | #if __OS_HAS_AGP | 502 | #if __OS_HAS_AGP |
346 | /** | 503 | /** |
347 | * Add AGP buffers for DMA transfers (ioctl). | 504 | * Add AGP buffers for DMA transfers. |
348 | * | 505 | * |
349 | * \param inode device inode. | 506 | * \param dev drm_device_t to which the buffers are to be added. |
350 | * \param filp file pointer. | 507 | * \param request pointer to a drm_buf_desc_t describing the request. |
351 | * \param cmd command. | ||
352 | * \param arg pointer to a drm_buf_desc_t request. | ||
353 | * \return zero on success or a negative number on failure. | 508 | * \return zero on success or a negative number on failure. |
354 | * | 509 | * |
355 | * After some sanity checks creates a drm_buf structure for each buffer and | 510 | * After some sanity checks creates a drm_buf structure for each buffer and |
356 | * reallocates the buffer list of the same size order to accommodate the new | 511 | * reallocates the buffer list of the same size order to accommodate the new |
357 | * buffers. | 512 | * buffers. |
358 | */ | 513 | */ |
359 | static int drm_addbufs_agp( struct inode *inode, struct file *filp, | 514 | int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request) |
360 | unsigned int cmd, unsigned long arg ) | ||
361 | { | 515 | { |
362 | drm_file_t *priv = filp->private_data; | ||
363 | drm_device_t *dev = priv->head->dev; | ||
364 | drm_device_dma_t *dma = dev->dma; | 516 | drm_device_dma_t *dma = dev->dma; |
365 | drm_buf_desc_t request; | ||
366 | drm_buf_entry_t *entry; | 517 | drm_buf_entry_t *entry; |
367 | drm_buf_t *buf; | 518 | drm_buf_t *buf; |
368 | unsigned long offset; | 519 | unsigned long offset; |
@@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp, | |||
376 | int byte_count; | 527 | int byte_count; |
377 | int i; | 528 | int i; |
378 | drm_buf_t **temp_buflist; | 529 | drm_buf_t **temp_buflist; |
379 | drm_buf_desc_t __user *argp = (void __user *)arg; | ||
380 | 530 | ||
381 | if ( !dma ) return -EINVAL; | 531 | if ( !dma ) return -EINVAL; |
382 | 532 | ||
383 | if ( copy_from_user( &request, argp, | 533 | count = request->count; |
384 | sizeof(request) ) ) | 534 | order = drm_order(request->size); |
385 | return -EFAULT; | ||
386 | |||
387 | count = request.count; | ||
388 | order = drm_order( request.size ); | ||
389 | size = 1 << order; | 535 | size = 1 << order; |
390 | 536 | ||
391 | alignment = (request.flags & _DRM_PAGE_ALIGN) | 537 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
392 | ? PAGE_ALIGN(size) : size; | 538 | ? PAGE_ALIGN(size) : size; |
393 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 539 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
394 | total = PAGE_SIZE << page_order; | 540 | total = PAGE_SIZE << page_order; |
395 | 541 | ||
396 | byte_count = 0; | 542 | byte_count = 0; |
397 | agp_offset = dev->agp->base + request.agp_start; | 543 | agp_offset = dev->agp->base + request->agp_start; |
398 | 544 | ||
399 | DRM_DEBUG( "count: %d\n", count ); | 545 | DRM_DEBUG( "count: %d\n", count ); |
400 | DRM_DEBUG( "order: %d\n", order ); | 546 | DRM_DEBUG( "order: %d\n", order ); |
@@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp, | |||
508 | 654 | ||
509 | up( &dev->struct_sem ); | 655 | up( &dev->struct_sem ); |
510 | 656 | ||
511 | request.count = entry->buf_count; | 657 | request->count = entry->buf_count; |
512 | request.size = size; | 658 | request->size = size; |
513 | |||
514 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | ||
515 | return -EFAULT; | ||
516 | 659 | ||
517 | dma->flags = _DRM_DMA_USE_AGP; | 660 | dma->flags = _DRM_DMA_USE_AGP; |
518 | 661 | ||
519 | atomic_dec( &dev->buf_alloc ); | 662 | atomic_dec( &dev->buf_alloc ); |
520 | return 0; | 663 | return 0; |
521 | } | 664 | } |
665 | EXPORT_SYMBOL(drm_addbufs_agp); | ||
522 | #endif /* __OS_HAS_AGP */ | 666 | #endif /* __OS_HAS_AGP */ |
523 | 667 | ||
524 | static int drm_addbufs_pci( struct inode *inode, struct file *filp, | 668 | int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request) |
525 | unsigned int cmd, unsigned long arg ) | ||
526 | { | 669 | { |
527 | drm_file_t *priv = filp->private_data; | ||
528 | drm_device_t *dev = priv->head->dev; | ||
529 | drm_device_dma_t *dma = dev->dma; | 670 | drm_device_dma_t *dma = dev->dma; |
530 | drm_buf_desc_t request; | ||
531 | int count; | 671 | int count; |
532 | int order; | 672 | int order; |
533 | int size; | 673 | int size; |
@@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp, | |||
543 | int page_count; | 683 | int page_count; |
544 | unsigned long *temp_pagelist; | 684 | unsigned long *temp_pagelist; |
545 | drm_buf_t **temp_buflist; | 685 | drm_buf_t **temp_buflist; |
546 | drm_buf_desc_t __user *argp = (void __user *)arg; | ||
547 | 686 | ||
548 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; | 687 | if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; |
549 | if ( !dma ) return -EINVAL; | 688 | if ( !dma ) return -EINVAL; |
550 | 689 | ||
551 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | 690 | count = request->count; |
552 | return -EFAULT; | 691 | order = drm_order(request->size); |
553 | |||
554 | count = request.count; | ||
555 | order = drm_order( request.size ); | ||
556 | size = 1 << order; | 692 | size = 1 << order; |
557 | 693 | ||
558 | DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", | 694 | DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", |
559 | request.count, request.size, size, | 695 | request->count, request->size, size, |
560 | order, dev->queue_count ); | 696 | order, dev->queue_count ); |
561 | 697 | ||
562 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; | 698 | if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; |
563 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ | 699 | if ( dev->queue_count ) return -EBUSY; /* Not while in use */ |
564 | 700 | ||
565 | alignment = (request.flags & _DRM_PAGE_ALIGN) | 701 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
566 | ? PAGE_ALIGN(size) : size; | 702 | ? PAGE_ALIGN(size) : size; |
567 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 703 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
568 | total = PAGE_SIZE << page_order; | 704 | total = PAGE_SIZE << page_order; |
@@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp, | |||
740 | 876 | ||
741 | up( &dev->struct_sem ); | 877 | up( &dev->struct_sem ); |
742 | 878 | ||
743 | request.count = entry->buf_count; | 879 | request->count = entry->buf_count; |
744 | request.size = size; | 880 | request->size = size; |
745 | |||
746 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | ||
747 | return -EFAULT; | ||
748 | 881 | ||
749 | atomic_dec( &dev->buf_alloc ); | 882 | atomic_dec( &dev->buf_alloc ); |
750 | return 0; | 883 | return 0; |
751 | 884 | ||
752 | } | 885 | } |
886 | EXPORT_SYMBOL(drm_addbufs_pci); | ||
753 | 887 | ||
754 | static int drm_addbufs_sg( struct inode *inode, struct file *filp, | 888 | static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request) |
755 | unsigned int cmd, unsigned long arg ) | ||
756 | { | 889 | { |
757 | drm_file_t *priv = filp->private_data; | ||
758 | drm_device_t *dev = priv->head->dev; | ||
759 | drm_device_dma_t *dma = dev->dma; | 890 | drm_device_dma_t *dma = dev->dma; |
760 | drm_buf_desc_t __user *argp = (void __user *)arg; | ||
761 | drm_buf_desc_t request; | ||
762 | drm_buf_entry_t *entry; | 891 | drm_buf_entry_t *entry; |
763 | drm_buf_t *buf; | 892 | drm_buf_t *buf; |
764 | unsigned long offset; | 893 | unsigned long offset; |
@@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, | |||
777 | 906 | ||
778 | if ( !dma ) return -EINVAL; | 907 | if ( !dma ) return -EINVAL; |
779 | 908 | ||
780 | if ( copy_from_user( &request, argp, sizeof(request) ) ) | 909 | count = request->count; |
781 | return -EFAULT; | 910 | order = drm_order(request->size); |
782 | |||
783 | count = request.count; | ||
784 | order = drm_order( request.size ); | ||
785 | size = 1 << order; | 911 | size = 1 << order; |
786 | 912 | ||
787 | alignment = (request.flags & _DRM_PAGE_ALIGN) | 913 | alignment = (request->flags & _DRM_PAGE_ALIGN) |
788 | ? PAGE_ALIGN(size) : size; | 914 | ? PAGE_ALIGN(size) : size; |
789 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | 915 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; |
790 | total = PAGE_SIZE << page_order; | 916 | total = PAGE_SIZE << page_order; |
791 | 917 | ||
792 | byte_count = 0; | 918 | byte_count = 0; |
793 | agp_offset = request.agp_start; | 919 | agp_offset = request->agp_start; |
794 | 920 | ||
795 | DRM_DEBUG( "count: %d\n", count ); | 921 | DRM_DEBUG( "count: %d\n", count ); |
796 | DRM_DEBUG( "order: %d\n", order ); | 922 | DRM_DEBUG( "order: %d\n", order ); |
@@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, | |||
848 | 974 | ||
849 | buf->offset = (dma->byte_count + offset); | 975 | buf->offset = (dma->byte_count + offset); |
850 | buf->bus_address = agp_offset + offset; | 976 | buf->bus_address = agp_offset + offset; |
851 | buf->address = (void *)(agp_offset + offset + dev->sg->handle); | 977 | buf->address = (void *)(agp_offset + offset |
978 | + (unsigned long)dev->sg->virtual); | ||
852 | buf->next = NULL; | 979 | buf->next = NULL; |
853 | buf->waiting = 0; | 980 | buf->waiting = 0; |
854 | buf->pending = 0; | 981 | buf->pending = 0; |
@@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, | |||
905 | 1032 | ||
906 | up( &dev->struct_sem ); | 1033 | up( &dev->struct_sem ); |
907 | 1034 | ||
908 | request.count = entry->buf_count; | 1035 | request->count = entry->buf_count; |
909 | request.size = size; | 1036 | request->size = size; |
910 | |||
911 | if ( copy_to_user( argp, &request, sizeof(request) ) ) | ||
912 | return -EFAULT; | ||
913 | 1037 | ||
914 | dma->flags = _DRM_DMA_USE_SG; | 1038 | dma->flags = _DRM_DMA_USE_SG; |
915 | 1039 | ||
@@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp, | |||
917 | return 0; | 1041 | return 0; |
918 | } | 1042 | } |
919 | 1043 | ||
1044 | int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request) | ||
1045 | { | ||
1046 | drm_device_dma_t *dma = dev->dma; | ||
1047 | drm_buf_entry_t *entry; | ||
1048 | drm_buf_t *buf; | ||
1049 | unsigned long offset; | ||
1050 | unsigned long agp_offset; | ||
1051 | int count; | ||
1052 | int order; | ||
1053 | int size; | ||
1054 | int alignment; | ||
1055 | int page_order; | ||
1056 | int total; | ||
1057 | int byte_count; | ||
1058 | int i; | ||
1059 | drm_buf_t **temp_buflist; | ||
1060 | |||
1061 | if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) | ||
1062 | return -EINVAL; | ||
1063 | |||
1064 | if (!dma) | ||
1065 | return -EINVAL; | ||
1066 | |||
1067 | count = request->count; | ||
1068 | order = drm_order(request->size); | ||
1069 | size = 1 << order; | ||
1070 | |||
1071 | alignment = (request->flags & _DRM_PAGE_ALIGN) | ||
1072 | ? PAGE_ALIGN(size) : size; | ||
1073 | page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; | ||
1074 | total = PAGE_SIZE << page_order; | ||
1075 | |||
1076 | byte_count = 0; | ||
1077 | agp_offset = request->agp_start; | ||
1078 | |||
1079 | DRM_DEBUG("count: %d\n", count); | ||
1080 | DRM_DEBUG("order: %d\n", order); | ||
1081 | DRM_DEBUG("size: %d\n", size); | ||
1082 | DRM_DEBUG("agp_offset: %lu\n", agp_offset); | ||
1083 | DRM_DEBUG("alignment: %d\n", alignment); | ||
1084 | DRM_DEBUG("page_order: %d\n", page_order); | ||
1085 | DRM_DEBUG("total: %d\n", total); | ||
1086 | |||
1087 | if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) | ||
1088 | return -EINVAL; | ||
1089 | if (dev->queue_count) | ||
1090 | return -EBUSY; /* Not while in use */ | ||
1091 | |||
1092 | spin_lock(&dev->count_lock); | ||
1093 | if (dev->buf_use) { | ||
1094 | spin_unlock(&dev->count_lock); | ||
1095 | return -EBUSY; | ||
1096 | } | ||
1097 | atomic_inc(&dev->buf_alloc); | ||
1098 | spin_unlock(&dev->count_lock); | ||
1099 | |||
1100 | down(&dev->struct_sem); | ||
1101 | entry = &dma->bufs[order]; | ||
1102 | if (entry->buf_count) { | ||
1103 | up(&dev->struct_sem); | ||
1104 | atomic_dec(&dev->buf_alloc); | ||
1105 | return -ENOMEM; /* May only call once for each order */ | ||
1106 | } | ||
1107 | |||
1108 | if (count < 0 || count > 4096) { | ||
1109 | up(&dev->struct_sem); | ||
1110 | atomic_dec(&dev->buf_alloc); | ||
1111 | return -EINVAL; | ||
1112 | } | ||
1113 | |||
1114 | entry->buflist = drm_alloc(count * sizeof(*entry->buflist), | ||
1115 | DRM_MEM_BUFS); | ||
1116 | if (!entry->buflist) { | ||
1117 | up(&dev->struct_sem); | ||
1118 | atomic_dec(&dev->buf_alloc); | ||
1119 | return -ENOMEM; | ||
1120 | } | ||
1121 | memset(entry->buflist, 0, count * sizeof(*entry->buflist)); | ||
1122 | |||
1123 | entry->buf_size = size; | ||
1124 | entry->page_order = page_order; | ||
1125 | |||
1126 | offset = 0; | ||
1127 | |||
1128 | while (entry->buf_count < count) { | ||
1129 | buf = &entry->buflist[entry->buf_count]; | ||
1130 | buf->idx = dma->buf_count + entry->buf_count; | ||
1131 | buf->total = alignment; | ||
1132 | buf->order = order; | ||
1133 | buf->used = 0; | ||
1134 | |||
1135 | buf->offset = (dma->byte_count + offset); | ||
1136 | buf->bus_address = agp_offset + offset; | ||
1137 | buf->address = (void *)(agp_offset + offset); | ||
1138 | buf->next = NULL; | ||
1139 | buf->waiting = 0; | ||
1140 | buf->pending = 0; | ||
1141 | init_waitqueue_head(&buf->dma_wait); | ||
1142 | buf->filp = NULL; | ||
1143 | |||
1144 | buf->dev_priv_size = dev->driver->dev_priv_size; | ||
1145 | buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); | ||
1146 | if (!buf->dev_private) { | ||
1147 | /* Set count correctly so we free the proper amount. */ | ||
1148 | entry->buf_count = count; | ||
1149 | drm_cleanup_buf_error(dev, entry); | ||
1150 | up(&dev->struct_sem); | ||
1151 | atomic_dec(&dev->buf_alloc); | ||
1152 | return -ENOMEM; | ||
1153 | } | ||
1154 | memset(buf->dev_private, 0, buf->dev_priv_size); | ||
1155 | |||
1156 | DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); | ||
1157 | |||
1158 | offset += alignment; | ||
1159 | entry->buf_count++; | ||
1160 | byte_count += PAGE_SIZE << page_order; | ||
1161 | } | ||
1162 | |||
1163 | DRM_DEBUG("byte_count: %d\n", byte_count); | ||
1164 | |||
1165 | temp_buflist = drm_realloc(dma->buflist, | ||
1166 | dma->buf_count * sizeof(*dma->buflist), | ||
1167 | (dma->buf_count + entry->buf_count) | ||
1168 | * sizeof(*dma->buflist), DRM_MEM_BUFS); | ||
1169 | if (!temp_buflist) { | ||
1170 | /* Free the entry because it isn't valid */ | ||
1171 | drm_cleanup_buf_error(dev, entry); | ||
1172 | up(&dev->struct_sem); | ||
1173 | atomic_dec(&dev->buf_alloc); | ||
1174 | return -ENOMEM; | ||
1175 | } | ||
1176 | dma->buflist = temp_buflist; | ||
1177 | |||
1178 | for (i = 0; i < entry->buf_count; i++) { | ||
1179 | dma->buflist[i + dma->buf_count] = &entry->buflist[i]; | ||
1180 | } | ||
1181 | |||
1182 | dma->buf_count += entry->buf_count; | ||
1183 | dma->byte_count += byte_count; | ||
1184 | |||
1185 | DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); | ||
1186 | DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); | ||
1187 | |||
1188 | up(&dev->struct_sem); | ||
1189 | |||
1190 | request->count = entry->buf_count; | ||
1191 | request->size = size; | ||
1192 | |||
1193 | dma->flags = _DRM_DMA_USE_FB; | ||
1194 | |||
1195 | atomic_dec(&dev->buf_alloc); | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
920 | /** | 1199 | /** |
921 | * Add buffers for DMA transfers (ioctl). | 1200 | * Add buffers for DMA transfers (ioctl). |
922 | * | 1201 | * |
@@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp, | |||
937 | drm_buf_desc_t request; | 1216 | drm_buf_desc_t request; |
938 | drm_file_t *priv = filp->private_data; | 1217 | drm_file_t *priv = filp->private_data; |
939 | drm_device_t *dev = priv->head->dev; | 1218 | drm_device_t *dev = priv->head->dev; |
1219 | int ret; | ||
940 | 1220 | ||
941 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) | 1221 | if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) |
942 | return -EINVAL; | 1222 | return -EINVAL; |
@@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp, | |||
947 | 1227 | ||
948 | #if __OS_HAS_AGP | 1228 | #if __OS_HAS_AGP |
949 | if ( request.flags & _DRM_AGP_BUFFER ) | 1229 | if ( request.flags & _DRM_AGP_BUFFER ) |
950 | return drm_addbufs_agp( inode, filp, cmd, arg ); | 1230 | ret=drm_addbufs_agp(dev, &request); |
951 | else | 1231 | else |
952 | #endif | 1232 | #endif |
953 | if ( request.flags & _DRM_SG_BUFFER ) | 1233 | if ( request.flags & _DRM_SG_BUFFER ) |
954 | return drm_addbufs_sg( inode, filp, cmd, arg ); | 1234 | ret=drm_addbufs_sg(dev, &request); |
1235 | else if ( request.flags & _DRM_FB_BUFFER) | ||
1236 | ret=drm_addbufs_fb(dev, &request); | ||
955 | else | 1237 | else |
956 | return drm_addbufs_pci( inode, filp, cmd, arg ); | 1238 | ret=drm_addbufs_pci(dev, &request); |
1239 | |||
1240 | if (ret==0) { | ||
1241 | if (copy_to_user((void __user *)arg, &request, | ||
1242 | sizeof(request))) { | ||
1243 | ret = -EFAULT; | ||
1244 | } | ||
1245 | } | ||
1246 | return ret; | ||
957 | } | 1247 | } |
958 | 1248 | ||
959 | 1249 | ||
@@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp, | |||
1196 | return -EFAULT; | 1486 | return -EFAULT; |
1197 | 1487 | ||
1198 | if ( request.count >= dma->buf_count ) { | 1488 | if ( request.count >= dma->buf_count ) { |
1199 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || | 1489 | if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) |
1200 | (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { | 1490 | || (drm_core_check_feature(dev, DRIVER_SG) |
1491 | && (dma->flags & _DRM_DMA_USE_SG)) | ||
1492 | || (drm_core_check_feature(dev, DRIVER_FB_DMA) | ||
1493 | && (dma->flags & _DRM_DMA_USE_FB))) { | ||
1201 | drm_map_t *map = dev->agp_buffer_map; | 1494 | drm_map_t *map = dev->agp_buffer_map; |
1495 | unsigned long token = dev->agp_buffer_token; | ||
1202 | 1496 | ||
1203 | if ( !map ) { | 1497 | if ( !map ) { |
1204 | retcode = -EINVAL; | 1498 | retcode = -EINVAL; |
1205 | goto done; | 1499 | goto done; |
1206 | } | 1500 | } |
1207 | 1501 | ||
1208 | #if LINUX_VERSION_CODE <= 0x020402 | ||
1209 | down( ¤t->mm->mmap_sem ); | ||
1210 | #else | ||
1211 | down_write( ¤t->mm->mmap_sem ); | 1502 | down_write( ¤t->mm->mmap_sem ); |
1212 | #endif | ||
1213 | virtual = do_mmap( filp, 0, map->size, | 1503 | virtual = do_mmap( filp, 0, map->size, |
1214 | PROT_READ | PROT_WRITE, | 1504 | PROT_READ | PROT_WRITE, |
1215 | MAP_SHARED, | 1505 | MAP_SHARED, |
1216 | (unsigned long)map->offset ); | 1506 | token ); |
1217 | #if LINUX_VERSION_CODE <= 0x020402 | ||
1218 | up( ¤t->mm->mmap_sem ); | ||
1219 | #else | ||
1220 | up_write( ¤t->mm->mmap_sem ); | 1507 | up_write( ¤t->mm->mmap_sem ); |
1221 | #endif | ||
1222 | } else { | 1508 | } else { |
1223 | #if LINUX_VERSION_CODE <= 0x020402 | ||
1224 | down( ¤t->mm->mmap_sem ); | ||
1225 | #else | ||
1226 | down_write( ¤t->mm->mmap_sem ); | 1509 | down_write( ¤t->mm->mmap_sem ); |
1227 | #endif | ||
1228 | virtual = do_mmap( filp, 0, dma->byte_count, | 1510 | virtual = do_mmap( filp, 0, dma->byte_count, |
1229 | PROT_READ | PROT_WRITE, | 1511 | PROT_READ | PROT_WRITE, |
1230 | MAP_SHARED, 0 ); | 1512 | MAP_SHARED, 0 ); |
1231 | #if LINUX_VERSION_CODE <= 0x020402 | ||
1232 | up( ¤t->mm->mmap_sem ); | ||
1233 | #else | ||
1234 | up_write( ¤t->mm->mmap_sem ); | 1513 | up_write( ¤t->mm->mmap_sem ); |
1235 | #endif | ||
1236 | } | 1514 | } |
1237 | if ( virtual > -1024UL ) { | 1515 | if ( virtual > -1024UL ) { |
1238 | /* Real error */ | 1516 | /* Real error */ |
@@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp, | |||
1279 | return retcode; | 1557 | return retcode; |
1280 | } | 1558 | } |
1281 | 1559 | ||
1560 | /** | ||
1561 | * Compute size order. Returns the exponent of the smaller power of two which | ||
1562 | * is greater or equal to given number. | ||
1563 | * | ||
1564 | * \param size size. | ||
1565 | * \return order. | ||
1566 | * | ||
1567 | * \todo Can be made faster. | ||
1568 | */ | ||
1569 | int drm_order( unsigned long size ) | ||
1570 | { | ||
1571 | int order; | ||
1572 | unsigned long tmp; | ||
1573 | |||
1574 | for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) | ||
1575 | ; | ||
1576 | |||
1577 | if (size & (size - 1)) | ||
1578 | ++order; | ||
1579 | |||
1580 | return order; | ||
1581 | } | ||
1582 | EXPORT_SYMBOL(drm_order); | ||
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c index a7cfabd1ca2e..f515567e5b6f 100644 --- a/drivers/char/drm/drm_context.c +++ b/drivers/char/drm/drm_context.c | |||
@@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
212 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; | 212 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; |
213 | drm_ctx_priv_map_t request; | 213 | drm_ctx_priv_map_t request; |
214 | drm_map_t *map; | 214 | drm_map_t *map; |
215 | drm_map_list_t *_entry; | ||
215 | 216 | ||
216 | if (copy_from_user(&request, argp, sizeof(request))) | 217 | if (copy_from_user(&request, argp, sizeof(request))) |
217 | return -EFAULT; | 218 | return -EFAULT; |
@@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp, | |||
225 | map = dev->context_sareas[request.ctx_id]; | 226 | map = dev->context_sareas[request.ctx_id]; |
226 | up(&dev->struct_sem); | 227 | up(&dev->struct_sem); |
227 | 228 | ||
228 | request.handle = (void *) map->offset; | 229 | request.handle = 0; |
230 | list_for_each_entry(_entry, &dev->maplist->head,head) { | ||
231 | if (_entry->map == map) { | ||
232 | request.handle = (void *)(unsigned long)_entry->user_token; | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | if (request.handle == 0) | ||
237 | return -EINVAL; | ||
238 | |||
239 | |||
229 | if (copy_to_user(argp, &request, sizeof(request))) | 240 | if (copy_to_user(argp, &request, sizeof(request))) |
230 | return -EFAULT; | 241 | return -EFAULT; |
231 | return 0; | 242 | return 0; |
@@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, | |||
262 | list_for_each(list, &dev->maplist->head) { | 273 | list_for_each(list, &dev->maplist->head) { |
263 | r_list = list_entry(list, drm_map_list_t, head); | 274 | r_list = list_entry(list, drm_map_list_t, head); |
264 | if (r_list->map | 275 | if (r_list->map |
265 | && r_list->map->offset == (unsigned long) request.handle) | 276 | && r_list->user_token == (unsigned long) request.handle) |
266 | goto found; | 277 | goto found; |
267 | } | 278 | } |
268 | bad: | 279 | bad: |
@@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp, | |||
369 | for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { | 380 | for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { |
370 | ctx.handle = i; | 381 | ctx.handle = i; |
371 | if ( copy_to_user( &res.contexts[i], | 382 | if ( copy_to_user( &res.contexts[i], |
372 | &i, sizeof(i) ) ) | 383 | &ctx, sizeof(ctx) ) ) |
373 | return -EFAULT; | 384 | return -EFAULT; |
374 | } | 385 | } |
375 | } | 386 | } |
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index 3333c250c4d9..6ba48f346fcf 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c | |||
@@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = { | |||
70 | [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, | 70 | [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, |
71 | [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, | 71 | [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, |
72 | 72 | ||
73 | [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, | 73 | [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 }, |
74 | [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, | 74 | [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 }, |
75 | 75 | ||
76 | [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, | 76 | [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, |
77 | [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, | 77 | [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, |
@@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = { | |||
102 | [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, | 102 | [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, |
103 | 103 | ||
104 | #if __OS_HAS_AGP | 104 | #if __OS_HAS_AGP |
105 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, | 105 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 }, |
106 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, | 106 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 }, |
107 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, | 107 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 }, |
108 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, | 108 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 }, |
109 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, | 109 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, |
110 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, | 110 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, |
111 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, | 111 | [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, |
@@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = { | |||
127 | * | 127 | * |
128 | * Frees every resource in \p dev. | 128 | * Frees every resource in \p dev. |
129 | * | 129 | * |
130 | * \sa drm_device and setup(). | 130 | * \sa drm_device |
131 | */ | 131 | */ |
132 | int drm_takedown( drm_device_t *dev ) | 132 | int drm_takedown( drm_device_t *dev ) |
133 | { | 133 | { |
134 | drm_magic_entry_t *pt, *next; | 134 | drm_magic_entry_t *pt, *next; |
135 | drm_map_t *map; | ||
136 | drm_map_list_t *r_list; | 135 | drm_map_list_t *r_list; |
137 | struct list_head *list, *list_next; | ||
138 | drm_vma_entry_t *vma, *vma_next; | 136 | drm_vma_entry_t *vma, *vma_next; |
139 | int i; | 137 | int i; |
140 | 138 | ||
@@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev ) | |||
142 | 140 | ||
143 | if (dev->driver->pretakedown) | 141 | if (dev->driver->pretakedown) |
144 | dev->driver->pretakedown(dev); | 142 | dev->driver->pretakedown(dev); |
143 | DRM_DEBUG("driver pretakedown completed\n"); | ||
145 | 144 | ||
146 | if (dev->unique) { | 145 | if (dev->unique) { |
147 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); | 146 | drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); |
@@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev ) | |||
178 | } | 177 | } |
179 | dev->agp->memory = NULL; | 178 | dev->agp->memory = NULL; |
180 | 179 | ||
181 | if ( dev->agp->acquired ) drm_agp_do_release(dev); | 180 | if (dev->agp->acquired) |
181 | drm_agp_release(dev); | ||
182 | 182 | ||
183 | dev->agp->acquired = 0; | 183 | dev->agp->acquired = 0; |
184 | dev->agp->enabled = 0; | 184 | dev->agp->enabled = 0; |
185 | } | 185 | } |
186 | if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { | ||
187 | drm_sg_cleanup(dev->sg); | ||
188 | dev->sg = NULL; | ||
189 | } | ||
186 | 190 | ||
187 | /* Clear vma list (only built for debugging) */ | 191 | /* Clear vma list (only built for debugging) */ |
188 | if ( dev->vmalist ) { | 192 | if ( dev->vmalist ) { |
@@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev ) | |||
194 | } | 198 | } |
195 | 199 | ||
196 | if( dev->maplist ) { | 200 | if( dev->maplist ) { |
197 | list_for_each_safe( list, list_next, &dev->maplist->head ) { | 201 | while (!list_empty(&dev->maplist->head)) { |
198 | r_list = (drm_map_list_t *)list; | 202 | struct list_head *list = dev->maplist->head.next; |
199 | 203 | r_list = list_entry(list, drm_map_list_t, head); | |
200 | if ( ( map = r_list->map ) ) { | 204 | drm_rmmap_locked(dev, r_list->map); |
201 | switch ( map->type ) { | 205 | } |
202 | case _DRM_REGISTERS: | ||
203 | case _DRM_FRAME_BUFFER: | ||
204 | if (drm_core_has_MTRR(dev)) { | ||
205 | if ( map->mtrr >= 0 ) { | ||
206 | int retcode; | ||
207 | retcode = mtrr_del( map->mtrr, | ||
208 | map->offset, | ||
209 | map->size ); | ||
210 | DRM_DEBUG( "mtrr_del=%d\n", retcode ); | ||
211 | } | ||
212 | } | ||
213 | drm_ioremapfree( map->handle, map->size, dev ); | ||
214 | break; | ||
215 | case _DRM_SHM: | ||
216 | vfree(map->handle); | ||
217 | break; | ||
218 | |||
219 | case _DRM_AGP: | ||
220 | /* Do nothing here, because this is all | ||
221 | * handled in the AGP/GART driver. | ||
222 | */ | ||
223 | break; | ||
224 | case _DRM_SCATTER_GATHER: | ||
225 | /* Handle it */ | ||
226 | if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { | ||
227 | drm_sg_cleanup(dev->sg); | ||
228 | dev->sg = NULL; | ||
229 | } | ||
230 | break; | ||
231 | } | ||
232 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | ||
233 | } | ||
234 | list_del( list ); | ||
235 | drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS); | ||
236 | } | ||
237 | drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); | ||
238 | dev->maplist = NULL; | ||
239 | } | 206 | } |
240 | 207 | ||
241 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { | 208 | if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { |
@@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev ) | |||
264 | } | 231 | } |
265 | up( &dev->struct_sem ); | 232 | up( &dev->struct_sem ); |
266 | 233 | ||
234 | DRM_DEBUG("takedown completed\n"); | ||
267 | return 0; | 235 | return 0; |
268 | } | 236 | } |
269 | 237 | ||
@@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init); | |||
312 | * | 280 | * |
313 | * Cleans up all DRM device, calling takedown(). | 281 | * Cleans up all DRM device, calling takedown(). |
314 | * | 282 | * |
315 | * \sa drm_init(). | 283 | * \sa drm_init |
316 | */ | 284 | */ |
317 | static void drm_cleanup( drm_device_t *dev ) | 285 | static void drm_cleanup( drm_device_t *dev ) |
318 | { | 286 | { |
@@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev ) | |||
325 | 293 | ||
326 | drm_takedown( dev ); | 294 | drm_takedown( dev ); |
327 | 295 | ||
296 | if (dev->maplist) { | ||
297 | drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); | ||
298 | dev->maplist = NULL; | ||
299 | } | ||
300 | |||
328 | drm_ctxbitmap_cleanup( dev ); | 301 | drm_ctxbitmap_cleanup( dev ); |
329 | 302 | ||
330 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && | 303 | if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && |
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 10e64fde8d78..a1f4e9cd64ed 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c | |||
@@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev ) | |||
71 | dev->magiclist[i].tail = NULL; | 71 | dev->magiclist[i].tail = NULL; |
72 | } | 72 | } |
73 | 73 | ||
74 | dev->maplist = drm_alloc(sizeof(*dev->maplist), | ||
75 | DRM_MEM_MAPS); | ||
76 | if(dev->maplist == NULL) return -ENOMEM; | ||
77 | memset(dev->maplist, 0, sizeof(*dev->maplist)); | ||
78 | INIT_LIST_HEAD(&dev->maplist->head); | ||
79 | |||
80 | dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), | 74 | dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), |
81 | DRM_MEM_CTXLIST); | 75 | DRM_MEM_CTXLIST); |
82 | if(dev->ctxlist == NULL) return -ENOMEM; | 76 | if(dev->ctxlist == NULL) return -ENOMEM; |
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c index 39afda0ccabe..d2ed3ba5aca9 100644 --- a/drivers/char/drm/drm_ioctl.c +++ b/drivers/char/drm/drm_ioctl.c | |||
@@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp, | |||
208 | map.size = r_list->map->size; | 208 | map.size = r_list->map->size; |
209 | map.type = r_list->map->type; | 209 | map.type = r_list->map->type; |
210 | map.flags = r_list->map->flags; | 210 | map.flags = r_list->map->flags; |
211 | map.handle = r_list->map->handle; | 211 | map.handle = (void *)(unsigned long) r_list->user_token; |
212 | map.mtrr = r_list->map->mtrr; | 212 | map.mtrr = r_list->map->mtrr; |
213 | up(&dev->struct_sem); | 213 | up(&dev->struct_sem); |
214 | 214 | ||
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c index ace3d42f4407..ff483fb418aa 100644 --- a/drivers/char/drm/drm_memory.c +++ b/drivers/char/drm/drm_memory.c | |||
@@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area) | |||
142 | 142 | ||
143 | #if __OS_HAS_AGP | 143 | #if __OS_HAS_AGP |
144 | /** Wrapper around agp_allocate_memory() */ | 144 | /** Wrapper around agp_allocate_memory() */ |
145 | DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) | 145 | DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) |
146 | { | 146 | { |
147 | return drm_agp_allocate_memory(bridge, pages, type); | 147 | return drm_agp_allocate_memory(dev->agp->bridge, pages, type); |
148 | } | 148 | } |
149 | EXPORT_SYMBOL(drm_alloc_agp); | ||
149 | 150 | ||
150 | /** Wrapper around agp_free_memory() */ | 151 | /** Wrapper around agp_free_memory() */ |
151 | int drm_free_agp(DRM_AGP_MEM *handle, int pages) | 152 | int drm_free_agp(DRM_AGP_MEM *handle, int pages) |
152 | { | 153 | { |
153 | return drm_agp_free_memory(handle) ? 0 : -EINVAL; | 154 | return drm_agp_free_memory(handle) ? 0 : -EINVAL; |
154 | } | 155 | } |
156 | EXPORT_SYMBOL(drm_free_agp); | ||
155 | 157 | ||
156 | /** Wrapper around agp_bind_memory() */ | 158 | /** Wrapper around agp_bind_memory() */ |
157 | int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) | 159 | int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) |
158 | { | 160 | { |
159 | return drm_agp_bind_memory(handle, start); | 161 | return drm_agp_bind_memory(handle, start); |
160 | } | 162 | } |
163 | EXPORT_SYMBOL(drm_bind_agp); | ||
161 | 164 | ||
162 | /** Wrapper around agp_unbind_memory() */ | 165 | /** Wrapper around agp_unbind_memory() */ |
163 | int drm_unbind_agp(DRM_AGP_MEM *handle) | 166 | int drm_unbind_agp(DRM_AGP_MEM *handle) |
164 | { | 167 | { |
165 | return drm_agp_unbind_memory(handle); | 168 | return drm_agp_unbind_memory(handle); |
166 | } | 169 | } |
170 | EXPORT_SYMBOL(drm_unbind_agp); | ||
167 | #endif /* agp */ | 171 | #endif /* agp */ |
168 | #endif /* debug_memory */ | 172 | #endif /* debug_memory */ |
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c index 192e8762571c..09ed712c1a7f 100644 --- a/drivers/char/drm/drm_pci.c +++ b/drivers/char/drm/drm_pci.c | |||
@@ -46,11 +46,11 @@ | |||
46 | /** | 46 | /** |
47 | * \brief Allocate a PCI consistent memory block, for DMA. | 47 | * \brief Allocate a PCI consistent memory block, for DMA. |
48 | */ | 48 | */ |
49 | void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, | 49 | drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, |
50 | dma_addr_t maxaddr, dma_addr_t * busaddr) | 50 | dma_addr_t maxaddr) |
51 | { | 51 | { |
52 | void *address; | 52 | drm_dma_handle_t *dmah; |
53 | #if DRM_DEBUG_MEMORY | 53 | #ifdef DRM_DEBUG_MEMORY |
54 | int area = DRM_MEM_DMA; | 54 | int area = DRM_MEM_DMA; |
55 | 55 | ||
56 | spin_lock(&drm_mem_lock); | 56 | spin_lock(&drm_mem_lock); |
@@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, | |||
74 | return NULL; | 74 | return NULL; |
75 | } | 75 | } |
76 | 76 | ||
77 | address = pci_alloc_consistent(dev->pdev, size, busaddr); | 77 | dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); |
78 | if (!dmah) | ||
79 | return NULL; | ||
80 | |||
81 | dmah->size = size; | ||
82 | dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr); | ||
78 | 83 | ||
79 | #if DRM_DEBUG_MEMORY | 84 | #ifdef DRM_DEBUG_MEMORY |
80 | if (address == NULL) { | 85 | if (dmah->vaddr == NULL) { |
81 | spin_lock(&drm_mem_lock); | 86 | spin_lock(&drm_mem_lock); |
82 | ++drm_mem_stats[area].fail_count; | 87 | ++drm_mem_stats[area].fail_count; |
83 | spin_unlock(&drm_mem_lock); | 88 | spin_unlock(&drm_mem_lock); |
89 | kfree(dmah); | ||
84 | return NULL; | 90 | return NULL; |
85 | } | 91 | } |
86 | 92 | ||
@@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, | |||
90 | drm_ram_used += size; | 96 | drm_ram_used += size; |
91 | spin_unlock(&drm_mem_lock); | 97 | spin_unlock(&drm_mem_lock); |
92 | #else | 98 | #else |
93 | if (address == NULL) | 99 | if (dmah->vaddr == NULL) { |
100 | kfree(dmah); | ||
94 | return NULL; | 101 | return NULL; |
102 | } | ||
95 | #endif | 103 | #endif |
96 | 104 | ||
97 | memset(address, 0, size); | 105 | memset(dmah->vaddr, 0, size); |
98 | 106 | ||
99 | return address; | 107 | return dmah; |
100 | } | 108 | } |
101 | EXPORT_SYMBOL(drm_pci_alloc); | 109 | EXPORT_SYMBOL(drm_pci_alloc); |
102 | 110 | ||
103 | /** | 111 | /** |
104 | * \brief Free a PCI consistent memory block. | 112 | * \brief Free a PCI consistent memory block with freeing its descriptor. |
113 | * | ||
114 | * This function is for internal use in the Linux-specific DRM core code. | ||
105 | */ | 115 | */ |
106 | void | 116 | void |
107 | drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) | 117 | __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) |
108 | { | 118 | { |
109 | #if DRM_DEBUG_MEMORY | 119 | #ifdef DRM_DEBUG_MEMORY |
110 | int area = DRM_MEM_DMA; | 120 | int area = DRM_MEM_DMA; |
111 | int alloc_count; | 121 | int alloc_count; |
112 | int free_count; | 122 | int free_count; |
113 | #endif | 123 | #endif |
114 | 124 | ||
115 | if (!vaddr) { | 125 | if (!dmah->vaddr) { |
116 | #if DRM_DEBUG_MEMORY | 126 | #ifdef DRM_DEBUG_MEMORY |
117 | DRM_MEM_ERROR(area, "Attempt to free address 0\n"); | 127 | DRM_MEM_ERROR(area, "Attempt to free address 0\n"); |
118 | #endif | 128 | #endif |
119 | } else { | 129 | } else { |
120 | pci_free_consistent(dev->pdev, size, vaddr, busaddr); | 130 | pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr, |
131 | dmah->busaddr); | ||
121 | } | 132 | } |
122 | 133 | ||
123 | #if DRM_DEBUG_MEMORY | 134 | #ifdef DRM_DEBUG_MEMORY |
124 | spin_lock(&drm_mem_lock); | 135 | spin_lock(&drm_mem_lock); |
125 | free_count = ++drm_mem_stats[area].free_count; | 136 | free_count = ++drm_mem_stats[area].free_count; |
126 | alloc_count = drm_mem_stats[area].succeed_count; | 137 | alloc_count = drm_mem_stats[area].succeed_count; |
@@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) | |||
135 | #endif | 146 | #endif |
136 | 147 | ||
137 | } | 148 | } |
149 | |||
150 | /** | ||
151 | * \brief Free a PCI consistent memory block | ||
152 | */ | ||
153 | void | ||
154 | drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah) | ||
155 | { | ||
156 | __drm_pci_free(dev, dmah); | ||
157 | kfree(dmah); | ||
158 | } | ||
138 | EXPORT_SYMBOL(drm_pci_free); | 159 | EXPORT_SYMBOL(drm_pci_free); |
139 | 160 | ||
140 | /*@}*/ | 161 | /*@}*/ |
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index 70ca4fa55c9d..58b1747cd440 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h | |||
@@ -25,6 +25,8 @@ | |||
25 | {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ | 25 | {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ |
26 | {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ | 26 | {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ |
27 | {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ | 27 | {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ |
28 | {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ | ||
29 | {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \ | ||
28 | {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ | 30 | {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ |
29 | {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ | 31 | {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ |
30 | {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ | 32 | {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ |
@@ -33,7 +35,17 @@ | |||
33 | {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ | 35 | {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ |
34 | {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ | 36 | {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ |
35 | {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ | 37 | {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ |
38 | {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ | ||
39 | {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ | ||
40 | {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ | ||
41 | {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ | ||
42 | {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ | ||
43 | {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ | ||
44 | {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ | ||
45 | {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \ | ||
36 | {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ | 46 | {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ |
47 | {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ | ||
48 | {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ | ||
37 | {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ | 49 | {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ |
38 | {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ | 50 | {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ |
39 | {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ | 51 | {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ |
@@ -56,6 +68,7 @@ | |||
56 | {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ | 68 | {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
57 | {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ | 69 | {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
58 | {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ | 70 | {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ |
71 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ | ||
59 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ | 72 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ |
60 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ | 73 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ |
61 | {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ | 74 | {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ |
@@ -116,9 +129,10 @@ | |||
116 | {0, 0, 0} | 129 | {0, 0, 0} |
117 | 130 | ||
118 | #define mga_PCI_IDS \ | 131 | #define mga_PCI_IDS \ |
119 | {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 132 | {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ |
120 | {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 133 | {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ |
121 | {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 134 | {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ |
135 | {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ | ||
122 | {0, 0, 0} | 136 | {0, 0, 0} |
123 | 137 | ||
124 | #define mach64_PCI_IDS \ | 138 | #define mach64_PCI_IDS \ |
@@ -162,9 +176,10 @@ | |||
162 | 176 | ||
163 | #define viadrv_PCI_IDS \ | 177 | #define viadrv_PCI_IDS \ |
164 | {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 178 | {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
179 | {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
165 | {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 180 | {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
166 | {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 181 | {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
167 | {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 182 | {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
168 | {0, 0, 0} | 183 | {0, 0, 0} |
169 | 184 | ||
170 | #define i810_PCI_IDS \ | 185 | #define i810_PCI_IDS \ |
@@ -181,33 +196,30 @@ | |||
181 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 196 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
182 | {0, 0, 0} | 197 | {0, 0, 0} |
183 | 198 | ||
184 | #define gamma_PCI_IDS \ | ||
185 | {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
186 | {0, 0, 0} | ||
187 | |||
188 | #define savage_PCI_IDS \ | 199 | #define savage_PCI_IDS \ |
189 | {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 200 | {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ |
190 | {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 201 | {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ |
191 | {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 202 | {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ |
192 | {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 203 | {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \ |
193 | {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 204 | {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ |
194 | {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 205 | {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ |
195 | {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 206 | {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ |
196 | {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 207 | {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \ |
197 | {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 208 | {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
198 | {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 209 | {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
199 | {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 210 | {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
200 | {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 211 | {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
201 | {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 212 | {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
202 | {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 213 | {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
203 | {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 214 | {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
204 | {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 215 | {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
205 | {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 216 | {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \ |
206 | {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 217 | {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ |
207 | {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 218 | {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \ |
208 | {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 219 | {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ |
209 | {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 220 | {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ |
210 | {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 221 | {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ |
222 | {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ | ||
211 | {0, 0, 0} | 223 | {0, 0, 0} |
212 | 224 | ||
213 | #define ffb_PCI_IDS \ | 225 | #define ffb_PCI_IDS \ |
@@ -223,10 +235,3 @@ | |||
223 | {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 235 | {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ |
224 | {0, 0, 0} | 236 | {0, 0, 0} |
225 | 237 | ||
226 | #define viadrv_PCI_IDS \ | ||
227 | {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
228 | {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
229 | {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
230 | {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | ||
231 | {0, 0, 0} | ||
232 | |||
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 4774087d2e9e..32d2bb99462c 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c | |||
@@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, | |||
210 | 210 | ||
211 | /* Hardcoded from _DRM_FRAME_BUFFER, | 211 | /* Hardcoded from _DRM_FRAME_BUFFER, |
212 | _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and | 212 | _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and |
213 | _DRM_SCATTER_GATHER. */ | 213 | _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ |
214 | const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; | 214 | const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; |
215 | const char *type; | 215 | const char *type; |
216 | int i; | 216 | int i; |
217 | 217 | ||
@@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, | |||
229 | if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { | 229 | if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { |
230 | r_list = list_entry(list, drm_map_list_t, head); | 230 | r_list = list_entry(list, drm_map_list_t, head); |
231 | map = r_list->map; | 231 | map = r_list->map; |
232 | if(!map) continue; | 232 | if(!map) |
233 | if (map->type < 0 || map->type > 4) type = "??"; | 233 | continue; |
234 | else type = types[map->type]; | 234 | if (map->type < 0 || map->type > 5) |
235 | DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", | 235 | type = "??"; |
236 | else | ||
237 | type = types[map->type]; | ||
238 | DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ", | ||
236 | i, | 239 | i, |
237 | map->offset, | 240 | map->offset, |
238 | map->size, | 241 | map->size, |
239 | type, | 242 | type, |
240 | map->flags, | 243 | map->flags, |
241 | (unsigned long)map->handle); | 244 | r_list->user_token); |
242 | if (map->mtrr < 0) { | 245 | if (map->mtrr < 0) { |
243 | DRM_PROC_PRINT("none\n"); | 246 | DRM_PROC_PRINT("none\n"); |
244 | } else { | 247 | } else { |
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c index 54fddb6ea2d1..ed267d49bc6a 100644 --- a/drivers/char/drm/drm_scatter.c +++ b/drivers/char/drm/drm_scatter.c | |||
@@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry ) | |||
61 | DRM_MEM_SGLISTS ); | 61 | DRM_MEM_SGLISTS ); |
62 | } | 62 | } |
63 | 63 | ||
64 | #ifdef _LP64 | ||
65 | # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) | ||
66 | #else | ||
67 | # define ScatterHandle(x) (unsigned int)(x) | ||
68 | #endif | ||
69 | |||
64 | int drm_sg_alloc( struct inode *inode, struct file *filp, | 70 | int drm_sg_alloc( struct inode *inode, struct file *filp, |
65 | unsigned int cmd, unsigned long arg ) | 71 | unsigned int cmd, unsigned long arg ) |
66 | { | 72 | { |
@@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp, | |||
133 | */ | 139 | */ |
134 | memset( entry->virtual, 0, pages << PAGE_SHIFT ); | 140 | memset( entry->virtual, 0, pages << PAGE_SHIFT ); |
135 | 141 | ||
136 | entry->handle = (unsigned long)entry->virtual; | 142 | entry->handle = ScatterHandle((unsigned long)entry->virtual); |
137 | 143 | ||
138 | DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); | 144 | DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); |
139 | DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); | 145 | DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); |
140 | 146 | ||
141 | for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { | 147 | for (i = (unsigned long)entry->virtual, j = 0; j < pages; |
148 | i += PAGE_SIZE, j++) { | ||
142 | entry->pagelist[j] = vmalloc_to_page((void *)i); | 149 | entry->pagelist[j] = vmalloc_to_page((void *)i); |
143 | if (!entry->pagelist[j]) | 150 | if (!entry->pagelist[j]) |
144 | goto failed; | 151 | goto failed; |
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 48829a1a086a..95a976c96eb8 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c | |||
@@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct | |||
75 | dev->pci_func = PCI_FUNC(pdev->devfn); | 75 | dev->pci_func = PCI_FUNC(pdev->devfn); |
76 | dev->irq = pdev->irq; | 76 | dev->irq = pdev->irq; |
77 | 77 | ||
78 | dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); | ||
79 | if (dev->maplist == NULL) | ||
80 | return -ENOMEM; | ||
81 | INIT_LIST_HEAD(&dev->maplist->head); | ||
82 | |||
78 | /* the DRM has 6 basic counters */ | 83 | /* the DRM has 6 basic counters */ |
79 | dev->counters = 6; | 84 | dev->counters = 6; |
80 | dev->types[0] = _DRM_STAT_LOCK; | 85 | dev->types[0] = _DRM_STAT_LOCK; |
@@ -91,7 +96,8 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct | |||
91 | goto error_out_unreg; | 96 | goto error_out_unreg; |
92 | 97 | ||
93 | if (drm_core_has_AGP(dev)) { | 98 | if (drm_core_has_AGP(dev)) { |
94 | dev->agp = drm_agp_init(dev); | 99 | if (drm_device_is_agp(dev)) |
100 | dev->agp = drm_agp_init(dev); | ||
95 | if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { | 101 | if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { |
96 | DRM_ERROR( "Cannot initialize the agpgart module.\n" ); | 102 | DRM_ERROR( "Cannot initialize the agpgart module.\n" ); |
97 | retcode = -EINVAL; | 103 | retcode = -EINVAL; |
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 621220f3f372..ced4215e2275 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c | |||
@@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | |||
73 | r_list = list_entry(list, drm_map_list_t, head); | 73 | r_list = list_entry(list, drm_map_list_t, head); |
74 | map = r_list->map; | 74 | map = r_list->map; |
75 | if (!map) continue; | 75 | if (!map) continue; |
76 | if (map->offset == VM_OFFSET(vma)) break; | 76 | if (r_list->user_token == VM_OFFSET(vma)) |
77 | break; | ||
77 | } | 78 | } |
78 | 79 | ||
79 | if (map && map->type == _DRM_AGP) { | 80 | if (map && map->type == _DRM_AGP) { |
80 | unsigned long offset = address - vma->vm_start; | 81 | unsigned long offset = address - vma->vm_start; |
81 | unsigned long baddr = VM_OFFSET(vma) + offset; | 82 | unsigned long baddr = map->offset + offset; |
82 | struct drm_agp_mem *agpmem; | 83 | struct drm_agp_mem *agpmem; |
83 | struct page *page; | 84 | struct page *page; |
84 | 85 | ||
@@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
210 | } | 211 | } |
211 | 212 | ||
212 | if(!found_maps) { | 213 | if(!found_maps) { |
214 | drm_dma_handle_t dmah; | ||
215 | |||
213 | switch (map->type) { | 216 | switch (map->type) { |
214 | case _DRM_REGISTERS: | 217 | case _DRM_REGISTERS: |
215 | case _DRM_FRAME_BUFFER: | 218 | case _DRM_FRAME_BUFFER: |
@@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) | |||
228 | case _DRM_AGP: | 231 | case _DRM_AGP: |
229 | case _DRM_SCATTER_GATHER: | 232 | case _DRM_SCATTER_GATHER: |
230 | break; | 233 | break; |
234 | case _DRM_CONSISTENT: | ||
235 | dmah.vaddr = map->handle; | ||
236 | dmah.busaddr = map->offset; | ||
237 | dmah.size = map->size; | ||
238 | __drm_pci_free(dev, &dmah); | ||
239 | break; | ||
231 | } | 240 | } |
232 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | 241 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); |
233 | } | 242 | } |
@@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |||
296 | 305 | ||
297 | 306 | ||
298 | offset = address - vma->vm_start; | 307 | offset = address - vma->vm_start; |
299 | map_offset = map->offset - dev->sg->handle; | 308 | map_offset = map->offset - (unsigned long)dev->sg->virtual; |
300 | page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); | 309 | page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); |
301 | page = entry->pagelist[page_offset]; | 310 | page = entry->pagelist[page_offset]; |
302 | get_page(page); | 311 | get_page(page); |
@@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |||
305 | } | 314 | } |
306 | 315 | ||
307 | 316 | ||
308 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) | ||
309 | |||
310 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, | 317 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, |
311 | unsigned long address, | 318 | unsigned long address, |
312 | int *type) { | 319 | int *type) { |
@@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, | |||
335 | return drm_do_vm_sg_nopage(vma, address); | 342 | return drm_do_vm_sg_nopage(vma, address); |
336 | } | 343 | } |
337 | 344 | ||
338 | #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ | ||
339 | |||
340 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, | ||
341 | unsigned long address, | ||
342 | int unused) { | ||
343 | return drm_do_vm_nopage(vma, address); | ||
344 | } | ||
345 | |||
346 | static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, | ||
347 | unsigned long address, | ||
348 | int unused) { | ||
349 | return drm_do_vm_shm_nopage(vma, address); | ||
350 | } | ||
351 | |||
352 | static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, | ||
353 | unsigned long address, | ||
354 | int unused) { | ||
355 | return drm_do_vm_dma_nopage(vma, address); | ||
356 | } | ||
357 | |||
358 | static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, | ||
359 | unsigned long address, | ||
360 | int unused) { | ||
361 | return drm_do_vm_sg_nopage(vma, address); | ||
362 | } | ||
363 | |||
364 | #endif | ||
365 | |||
366 | |||
367 | /** AGP virtual memory operations */ | 345 | /** AGP virtual memory operations */ |
368 | static struct vm_operations_struct drm_vm_ops = { | 346 | static struct vm_operations_struct drm_vm_ops = { |
369 | .nopage = drm_vm_nopage, | 347 | .nopage = drm_vm_nopage, |
@@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |||
487 | 465 | ||
488 | vma->vm_ops = &drm_vm_dma_ops; | 466 | vma->vm_ops = &drm_vm_dma_ops; |
489 | 467 | ||
490 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | ||
491 | vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ | ||
492 | #else | ||
493 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 468 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
494 | #endif | ||
495 | 469 | ||
496 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 470 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
497 | drm_vm_open(vma); | 471 | drm_vm_open(vma); |
@@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
560 | for performance, even if the list was a | 534 | for performance, even if the list was a |
561 | bit longer. */ | 535 | bit longer. */ |
562 | list_for_each(list, &dev->maplist->head) { | 536 | list_for_each(list, &dev->maplist->head) { |
563 | unsigned long off; | ||
564 | 537 | ||
565 | r_list = list_entry(list, drm_map_list_t, head); | 538 | r_list = list_entry(list, drm_map_list_t, head); |
566 | map = r_list->map; | 539 | map = r_list->map; |
567 | if (!map) continue; | 540 | if (!map) continue; |
568 | off = dev->driver->get_map_ofs(map); | 541 | if (r_list->user_token == VM_OFFSET(vma)) |
569 | if (off == VM_OFFSET(vma)) break; | 542 | break; |
570 | } | 543 | } |
571 | 544 | ||
572 | if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) | 545 | if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) |
@@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
605 | /* fall through to _DRM_FRAME_BUFFER... */ | 578 | /* fall through to _DRM_FRAME_BUFFER... */ |
606 | case _DRM_FRAME_BUFFER: | 579 | case _DRM_FRAME_BUFFER: |
607 | case _DRM_REGISTERS: | 580 | case _DRM_REGISTERS: |
608 | if (VM_OFFSET(vma) >= __pa(high_memory)) { | ||
609 | #if defined(__i386__) || defined(__x86_64__) | 581 | #if defined(__i386__) || defined(__x86_64__) |
610 | if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { | 582 | if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { |
611 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | 583 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; |
612 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; | 584 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; |
613 | } | 585 | } |
614 | #elif defined(__powerpc__) | 586 | #elif defined(__powerpc__) |
615 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; | 587 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; |
588 | if (map->type == _DRM_REGISTERS) | ||
589 | pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; | ||
616 | #endif | 590 | #endif |
617 | vma->vm_flags |= VM_IO; /* not in core dump */ | 591 | vma->vm_flags |= VM_IO; /* not in core dump */ |
618 | } | ||
619 | #if defined(__ia64__) | 592 | #if defined(__ia64__) |
620 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | 593 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - |
621 | vma->vm_start)) | 594 | vma->vm_start)) |
@@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
628 | offset = dev->driver->get_reg_ofs(dev); | 601 | offset = dev->driver->get_reg_ofs(dev); |
629 | #ifdef __sparc__ | 602 | #ifdef __sparc__ |
630 | if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, | 603 | if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, |
631 | (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, | 604 | (map->offset + offset) >> PAGE_SHIFT, |
632 | vma->vm_end - vma->vm_start, | 605 | vma->vm_end - vma->vm_start, |
633 | vma->vm_page_prot)) | 606 | vma->vm_page_prot)) |
634 | #else | 607 | #else |
635 | if (io_remap_pfn_range(vma, vma->vm_start, | 608 | if (io_remap_pfn_range(vma, vma->vm_start, |
636 | (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, | 609 | (map->offset + offset) >> PAGE_SHIFT, |
637 | vma->vm_end - vma->vm_start, | 610 | vma->vm_end - vma->vm_start, |
638 | vma->vm_page_prot)) | 611 | vma->vm_page_prot)) |
639 | #endif | 612 | #endif |
@@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
641 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | 614 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," |
642 | " offset = 0x%lx\n", | 615 | " offset = 0x%lx\n", |
643 | map->type, | 616 | map->type, |
644 | vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); | 617 | vma->vm_start, vma->vm_end, map->offset + offset); |
645 | vma->vm_ops = &drm_vm_ops; | 618 | vma->vm_ops = &drm_vm_ops; |
646 | break; | 619 | break; |
647 | case _DRM_SHM: | 620 | case _DRM_SHM: |
621 | case _DRM_CONSISTENT: | ||
622 | /* Consistent memory is really like shared memory. It's only | ||
623 | * allocate in a different way */ | ||
648 | vma->vm_ops = &drm_vm_shm_ops; | 624 | vma->vm_ops = &drm_vm_shm_ops; |
649 | vma->vm_private_data = (void *)map; | 625 | vma->vm_private_data = (void *)map; |
650 | /* Don't let this area swap. Change when | 626 | /* Don't let this area swap. Change when |
651 | DRM_KERNEL advisory is supported. */ | 627 | DRM_KERNEL advisory is supported. */ |
652 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | ||
653 | vma->vm_flags |= VM_LOCKED; | ||
654 | #else | ||
655 | vma->vm_flags |= VM_RESERVED; | 628 | vma->vm_flags |= VM_RESERVED; |
656 | #endif | ||
657 | break; | 629 | break; |
658 | case _DRM_SCATTER_GATHER: | 630 | case _DRM_SCATTER_GATHER: |
659 | vma->vm_ops = &drm_vm_sg_ops; | 631 | vma->vm_ops = &drm_vm_sg_ops; |
660 | vma->vm_private_data = (void *)map; | 632 | vma->vm_private_data = (void *)map; |
661 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | ||
662 | vma->vm_flags |= VM_LOCKED; | ||
663 | #else | ||
664 | vma->vm_flags |= VM_RESERVED; | 633 | vma->vm_flags |= VM_RESERVED; |
665 | #endif | ||
666 | break; | 634 | break; |
667 | default: | 635 | default: |
668 | return -EINVAL; /* This should never happen. */ | 636 | return -EINVAL; /* This should never happen. */ |
669 | } | 637 | } |
670 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | ||
671 | vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ | ||
672 | #else | ||
673 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | 638 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ |
674 | #endif | ||
675 | 639 | ||
676 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | 640 | vma->vm_file = filp; /* Needed for drm_vm_open() */ |
677 | drm_vm_open(vma); | 641 | drm_vm_open(vma); |
diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c index ec614fff8f04..1bd0d55ee0f0 100644 --- a/drivers/char/drm/ffb_drv.c +++ b/drivers/char/drm/ffb_drv.c | |||
@@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) | |||
152 | return NULL; | 152 | return NULL; |
153 | 153 | ||
154 | list_for_each(list, &dev->maplist->head) { | 154 | list_for_each(list, &dev->maplist->head) { |
155 | unsigned long uoff; | ||
156 | |||
157 | r_list = (drm_map_list_t *)list; | 155 | r_list = (drm_map_list_t *)list; |
158 | map = r_list->map; | 156 | map = r_list->map; |
159 | if (!map) | 157 | if (!map) |
160 | continue; | 158 | continue; |
161 | uoff = (map->offset & 0xffffffff); | 159 | if (r_list->user_token == off) |
162 | if (uoff == off) | ||
163 | return map; | 160 | return map; |
164 | } | 161 | } |
165 | 162 | ||
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h deleted file mode 100644 index d11b507f87ee..000000000000 --- a/drivers/char/drm/gamma_context.h +++ /dev/null | |||
@@ -1,492 +0,0 @@ | |||
1 | /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*- | ||
2 | * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com | ||
3 | * | ||
4 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | * ChangeLog: | ||
31 | * 2001-11-16 Torsten Duwe <duwe@caldera.de> | ||
32 | * added context constructor/destructor hooks, | ||
33 | * needed by SiS driver's memory management. | ||
34 | */ | ||
35 | |||
36 | /* ================================================================ | ||
37 | * Old-style context support -- only used by gamma. | ||
38 | */ | ||
39 | |||
40 | |||
41 | /* The drm_read and drm_write_string code (especially that which manages | ||
42 | the circular buffer), is based on Alessandro Rubini's LINUX DEVICE | ||
43 | DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */ | ||
44 | |||
45 | ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off) | ||
46 | { | ||
47 | drm_file_t *priv = filp->private_data; | ||
48 | drm_device_t *dev = priv->dev; | ||
49 | int left; | ||
50 | int avail; | ||
51 | int send; | ||
52 | int cur; | ||
53 | |||
54 | DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp); | ||
55 | |||
56 | while (dev->buf_rp == dev->buf_wp) { | ||
57 | DRM_DEBUG(" sleeping\n"); | ||
58 | if (filp->f_flags & O_NONBLOCK) { | ||
59 | return -EAGAIN; | ||
60 | } | ||
61 | interruptible_sleep_on(&dev->buf_readers); | ||
62 | if (signal_pending(current)) { | ||
63 | DRM_DEBUG(" interrupted\n"); | ||
64 | return -ERESTARTSYS; | ||
65 | } | ||
66 | DRM_DEBUG(" awake\n"); | ||
67 | } | ||
68 | |||
69 | left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; | ||
70 | avail = DRM_BSZ - left; | ||
71 | send = DRM_MIN(avail, count); | ||
72 | |||
73 | while (send) { | ||
74 | if (dev->buf_wp > dev->buf_rp) { | ||
75 | cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp); | ||
76 | } else { | ||
77 | cur = DRM_MIN(send, dev->buf_end - dev->buf_rp); | ||
78 | } | ||
79 | if (copy_to_user(buf, dev->buf_rp, cur)) | ||
80 | return -EFAULT; | ||
81 | dev->buf_rp += cur; | ||
82 | if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf; | ||
83 | send -= cur; | ||
84 | } | ||
85 | |||
86 | wake_up_interruptible(&dev->buf_writers); | ||
87 | return DRM_MIN(avail, count); | ||
88 | } | ||
89 | |||
90 | |||
91 | /* In an incredibly convoluted setup, the kernel module actually calls | ||
92 | * back into the X server to perform context switches on behalf of the | ||
93 | * 3d clients. | ||
94 | */ | ||
95 | int DRM(write_string)(drm_device_t *dev, const char *s) | ||
96 | { | ||
97 | int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ; | ||
98 | int send = strlen(s); | ||
99 | int count; | ||
100 | |||
101 | DRM_DEBUG("%d left, %d to send (%p, %p)\n", | ||
102 | left, send, dev->buf_rp, dev->buf_wp); | ||
103 | |||
104 | if (left == 1 || dev->buf_wp != dev->buf_rp) { | ||
105 | DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n", | ||
106 | left, | ||
107 | dev->buf_wp, | ||
108 | dev->buf_rp); | ||
109 | } | ||
110 | |||
111 | while (send) { | ||
112 | if (dev->buf_wp >= dev->buf_rp) { | ||
113 | count = DRM_MIN(send, dev->buf_end - dev->buf_wp); | ||
114 | if (count == left) --count; /* Leave a hole */ | ||
115 | } else { | ||
116 | count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1); | ||
117 | } | ||
118 | strncpy(dev->buf_wp, s, count); | ||
119 | dev->buf_wp += count; | ||
120 | if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf; | ||
121 | send -= count; | ||
122 | } | ||
123 | |||
124 | if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN); | ||
125 | |||
126 | DRM_DEBUG("waking\n"); | ||
127 | wake_up_interruptible(&dev->buf_readers); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait) | ||
132 | { | ||
133 | drm_file_t *priv = filp->private_data; | ||
134 | drm_device_t *dev = priv->dev; | ||
135 | |||
136 | poll_wait(filp, &dev->buf_readers, wait); | ||
137 | if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | int DRM(context_switch)(drm_device_t *dev, int old, int new) | ||
142 | { | ||
143 | char buf[64]; | ||
144 | drm_queue_t *q; | ||
145 | |||
146 | if (test_and_set_bit(0, &dev->context_flag)) { | ||
147 | DRM_ERROR("Reentering -- FIXME\n"); | ||
148 | return -EBUSY; | ||
149 | } | ||
150 | |||
151 | DRM_DEBUG("Context switch from %d to %d\n", old, new); | ||
152 | |||
153 | if (new >= dev->queue_count) { | ||
154 | clear_bit(0, &dev->context_flag); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | if (new == dev->last_context) { | ||
159 | clear_bit(0, &dev->context_flag); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | q = dev->queuelist[new]; | ||
164 | atomic_inc(&q->use_count); | ||
165 | if (atomic_read(&q->use_count) == 1) { | ||
166 | atomic_dec(&q->use_count); | ||
167 | clear_bit(0, &dev->context_flag); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | /* This causes the X server to wake up & do a bunch of hardware | ||
172 | * interaction to actually effect the context switch. | ||
173 | */ | ||
174 | sprintf(buf, "C %d %d\n", old, new); | ||
175 | DRM(write_string)(dev, buf); | ||
176 | |||
177 | atomic_dec(&q->use_count); | ||
178 | |||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | int DRM(context_switch_complete)(drm_device_t *dev, int new) | ||
183 | { | ||
184 | drm_device_dma_t *dma = dev->dma; | ||
185 | |||
186 | dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ | ||
187 | dev->last_switch = jiffies; | ||
188 | |||
189 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | ||
190 | DRM_ERROR("Lock isn't held after context switch\n"); | ||
191 | } | ||
192 | |||
193 | if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) { | ||
194 | if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock, | ||
195 | DRM_KERNEL_CONTEXT)) { | ||
196 | DRM_ERROR("Cannot free lock\n"); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | clear_bit(0, &dev->context_flag); | ||
201 | wake_up_interruptible(&dev->context_wait); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx) | ||
207 | { | ||
208 | DRM_DEBUG("\n"); | ||
209 | |||
210 | if (atomic_read(&q->use_count) != 1 | ||
211 | || atomic_read(&q->finalization) | ||
212 | || atomic_read(&q->block_count)) { | ||
213 | DRM_ERROR("New queue is already in use: u%d f%d b%d\n", | ||
214 | atomic_read(&q->use_count), | ||
215 | atomic_read(&q->finalization), | ||
216 | atomic_read(&q->block_count)); | ||
217 | } | ||
218 | |||
219 | atomic_set(&q->finalization, 0); | ||
220 | atomic_set(&q->block_count, 0); | ||
221 | atomic_set(&q->block_read, 0); | ||
222 | atomic_set(&q->block_write, 0); | ||
223 | atomic_set(&q->total_queued, 0); | ||
224 | atomic_set(&q->total_flushed, 0); | ||
225 | atomic_set(&q->total_locks, 0); | ||
226 | |||
227 | init_waitqueue_head(&q->write_queue); | ||
228 | init_waitqueue_head(&q->read_queue); | ||
229 | init_waitqueue_head(&q->flush_queue); | ||
230 | |||
231 | q->flags = ctx->flags; | ||
232 | |||
233 | DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | |||
239 | /* drm_alloc_queue: | ||
240 | PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not | ||
241 | disappear (so all deallocation must be done after IOCTLs are off) | ||
242 | 2) dev->queue_count < dev->queue_slots | ||
243 | 3) dev->queuelist[i].use_count == 0 and | ||
244 | dev->queuelist[i].finalization == 0 if i not in use | ||
245 | POST: 1) dev->queuelist[i].use_count == 1 | ||
246 | 2) dev->queue_count < dev->queue_slots */ | ||
247 | |||
248 | static int DRM(alloc_queue)(drm_device_t *dev) | ||
249 | { | ||
250 | int i; | ||
251 | drm_queue_t *queue; | ||
252 | int oldslots; | ||
253 | int newslots; | ||
254 | /* Check for a free queue */ | ||
255 | for (i = 0; i < dev->queue_count; i++) { | ||
256 | atomic_inc(&dev->queuelist[i]->use_count); | ||
257 | if (atomic_read(&dev->queuelist[i]->use_count) == 1 | ||
258 | && !atomic_read(&dev->queuelist[i]->finalization)) { | ||
259 | DRM_DEBUG("%d (free)\n", i); | ||
260 | return i; | ||
261 | } | ||
262 | atomic_dec(&dev->queuelist[i]->use_count); | ||
263 | } | ||
264 | /* Allocate a new queue */ | ||
265 | down(&dev->struct_sem); | ||
266 | |||
267 | queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES); | ||
268 | memset(queue, 0, sizeof(*queue)); | ||
269 | atomic_set(&queue->use_count, 1); | ||
270 | |||
271 | ++dev->queue_count; | ||
272 | if (dev->queue_count >= dev->queue_slots) { | ||
273 | oldslots = dev->queue_slots * sizeof(*dev->queuelist); | ||
274 | if (!dev->queue_slots) dev->queue_slots = 1; | ||
275 | dev->queue_slots *= 2; | ||
276 | newslots = dev->queue_slots * sizeof(*dev->queuelist); | ||
277 | |||
278 | dev->queuelist = DRM(realloc)(dev->queuelist, | ||
279 | oldslots, | ||
280 | newslots, | ||
281 | DRM_MEM_QUEUES); | ||
282 | if (!dev->queuelist) { | ||
283 | up(&dev->struct_sem); | ||
284 | DRM_DEBUG("out of memory\n"); | ||
285 | return -ENOMEM; | ||
286 | } | ||
287 | } | ||
288 | dev->queuelist[dev->queue_count-1] = queue; | ||
289 | |||
290 | up(&dev->struct_sem); | ||
291 | DRM_DEBUG("%d (new)\n", dev->queue_count - 1); | ||
292 | return dev->queue_count - 1; | ||
293 | } | ||
294 | |||
295 | int DRM(resctx)(struct inode *inode, struct file *filp, | ||
296 | unsigned int cmd, unsigned long arg) | ||
297 | { | ||
298 | drm_ctx_res_t __user *argp = (void __user *)arg; | ||
299 | drm_ctx_res_t res; | ||
300 | drm_ctx_t ctx; | ||
301 | int i; | ||
302 | |||
303 | DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS); | ||
304 | if (copy_from_user(&res, argp, sizeof(res))) | ||
305 | return -EFAULT; | ||
306 | if (res.count >= DRM_RESERVED_CONTEXTS) { | ||
307 | memset(&ctx, 0, sizeof(ctx)); | ||
308 | for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { | ||
309 | ctx.handle = i; | ||
310 | if (copy_to_user(&res.contexts[i], | ||
311 | &i, | ||
312 | sizeof(i))) | ||
313 | return -EFAULT; | ||
314 | } | ||
315 | } | ||
316 | res.count = DRM_RESERVED_CONTEXTS; | ||
317 | if (copy_to_user(argp, &res, sizeof(res))) | ||
318 | return -EFAULT; | ||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | int DRM(addctx)(struct inode *inode, struct file *filp, | ||
323 | unsigned int cmd, unsigned long arg) | ||
324 | { | ||
325 | drm_file_t *priv = filp->private_data; | ||
326 | drm_device_t *dev = priv->dev; | ||
327 | drm_ctx_t ctx; | ||
328 | drm_ctx_t __user *argp = (void __user *)arg; | ||
329 | |||
330 | if (copy_from_user(&ctx, argp, sizeof(ctx))) | ||
331 | return -EFAULT; | ||
332 | if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) { | ||
333 | /* Init kernel's context and get a new one. */ | ||
334 | DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx); | ||
335 | ctx.handle = DRM(alloc_queue)(dev); | ||
336 | } | ||
337 | DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx); | ||
338 | DRM_DEBUG("%d\n", ctx.handle); | ||
339 | if (copy_to_user(argp, &ctx, sizeof(ctx))) | ||
340 | return -EFAULT; | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | int DRM(modctx)(struct inode *inode, struct file *filp, | ||
345 | unsigned int cmd, unsigned long arg) | ||
346 | { | ||
347 | drm_file_t *priv = filp->private_data; | ||
348 | drm_device_t *dev = priv->dev; | ||
349 | drm_ctx_t ctx; | ||
350 | drm_queue_t *q; | ||
351 | |||
352 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
353 | return -EFAULT; | ||
354 | |||
355 | DRM_DEBUG("%d\n", ctx.handle); | ||
356 | |||
357 | if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL; | ||
358 | q = dev->queuelist[ctx.handle]; | ||
359 | |||
360 | atomic_inc(&q->use_count); | ||
361 | if (atomic_read(&q->use_count) == 1) { | ||
362 | /* No longer in use */ | ||
363 | atomic_dec(&q->use_count); | ||
364 | return -EINVAL; | ||
365 | } | ||
366 | |||
367 | if (DRM_BUFCOUNT(&q->waitlist)) { | ||
368 | atomic_dec(&q->use_count); | ||
369 | return -EBUSY; | ||
370 | } | ||
371 | |||
372 | q->flags = ctx.flags; | ||
373 | |||
374 | atomic_dec(&q->use_count); | ||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | int DRM(getctx)(struct inode *inode, struct file *filp, | ||
379 | unsigned int cmd, unsigned long arg) | ||
380 | { | ||
381 | drm_file_t *priv = filp->private_data; | ||
382 | drm_device_t *dev = priv->dev; | ||
383 | drm_ctx_t __user *argp = (void __user *)arg; | ||
384 | drm_ctx_t ctx; | ||
385 | drm_queue_t *q; | ||
386 | |||
387 | if (copy_from_user(&ctx, argp, sizeof(ctx))) | ||
388 | return -EFAULT; | ||
389 | |||
390 | DRM_DEBUG("%d\n", ctx.handle); | ||
391 | |||
392 | if (ctx.handle >= dev->queue_count) return -EINVAL; | ||
393 | q = dev->queuelist[ctx.handle]; | ||
394 | |||
395 | atomic_inc(&q->use_count); | ||
396 | if (atomic_read(&q->use_count) == 1) { | ||
397 | /* No longer in use */ | ||
398 | atomic_dec(&q->use_count); | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | ctx.flags = q->flags; | ||
403 | atomic_dec(&q->use_count); | ||
404 | |||
405 | if (copy_to_user(argp, &ctx, sizeof(ctx))) | ||
406 | return -EFAULT; | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | int DRM(switchctx)(struct inode *inode, struct file *filp, | ||
412 | unsigned int cmd, unsigned long arg) | ||
413 | { | ||
414 | drm_file_t *priv = filp->private_data; | ||
415 | drm_device_t *dev = priv->dev; | ||
416 | drm_ctx_t ctx; | ||
417 | |||
418 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
419 | return -EFAULT; | ||
420 | DRM_DEBUG("%d\n", ctx.handle); | ||
421 | return DRM(context_switch)(dev, dev->last_context, ctx.handle); | ||
422 | } | ||
423 | |||
424 | int DRM(newctx)(struct inode *inode, struct file *filp, | ||
425 | unsigned int cmd, unsigned long arg) | ||
426 | { | ||
427 | drm_file_t *priv = filp->private_data; | ||
428 | drm_device_t *dev = priv->dev; | ||
429 | drm_ctx_t ctx; | ||
430 | |||
431 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
432 | return -EFAULT; | ||
433 | DRM_DEBUG("%d\n", ctx.handle); | ||
434 | DRM(context_switch_complete)(dev, ctx.handle); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | int DRM(rmctx)(struct inode *inode, struct file *filp, | ||
440 | unsigned int cmd, unsigned long arg) | ||
441 | { | ||
442 | drm_file_t *priv = filp->private_data; | ||
443 | drm_device_t *dev = priv->dev; | ||
444 | drm_ctx_t ctx; | ||
445 | drm_queue_t *q; | ||
446 | drm_buf_t *buf; | ||
447 | |||
448 | if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx))) | ||
449 | return -EFAULT; | ||
450 | DRM_DEBUG("%d\n", ctx.handle); | ||
451 | |||
452 | if (ctx.handle >= dev->queue_count) return -EINVAL; | ||
453 | q = dev->queuelist[ctx.handle]; | ||
454 | |||
455 | atomic_inc(&q->use_count); | ||
456 | if (atomic_read(&q->use_count) == 1) { | ||
457 | /* No longer in use */ | ||
458 | atomic_dec(&q->use_count); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | |||
462 | atomic_inc(&q->finalization); /* Mark queue in finalization state */ | ||
463 | atomic_sub(2, &q->use_count); /* Mark queue as unused (pending | ||
464 | finalization) */ | ||
465 | |||
466 | while (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
467 | schedule(); | ||
468 | if (signal_pending(current)) { | ||
469 | clear_bit(0, &dev->interrupt_flag); | ||
470 | return -EINTR; | ||
471 | } | ||
472 | } | ||
473 | /* Remove queued buffers */ | ||
474 | while ((buf = DRM(waitlist_get)(&q->waitlist))) { | ||
475 | DRM(free_buffer)(dev, buf); | ||
476 | } | ||
477 | clear_bit(0, &dev->interrupt_flag); | ||
478 | |||
479 | /* Wakeup blocked processes */ | ||
480 | wake_up_interruptible(&q->read_queue); | ||
481 | wake_up_interruptible(&q->write_queue); | ||
482 | wake_up_interruptible(&q->flush_queue); | ||
483 | |||
484 | /* Finalization over. Queue is made | ||
485 | available when both use_count and | ||
486 | finalization become 0, which won't | ||
487 | happen until all the waiting processes | ||
488 | stop waiting. */ | ||
489 | atomic_dec(&q->finalization); | ||
490 | return 0; | ||
491 | } | ||
492 | |||
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c deleted file mode 100644 index e486fb8d31e9..000000000000 --- a/drivers/char/drm/gamma_dma.c +++ /dev/null | |||
@@ -1,946 +0,0 @@ | |||
1 | /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*- | ||
2 | * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include "gamma.h" | ||
33 | #include "drmP.h" | ||
34 | #include "drm.h" | ||
35 | #include "gamma_drm.h" | ||
36 | #include "gamma_drv.h" | ||
37 | |||
38 | #include <linux/interrupt.h> /* For task queue support */ | ||
39 | #include <linux/delay.h> | ||
40 | |||
41 | static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address, | ||
42 | unsigned long length) | ||
43 | { | ||
44 | drm_gamma_private_t *dev_priv = | ||
45 | (drm_gamma_private_t *)dev->dev_private; | ||
46 | mb(); | ||
47 | while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
48 | cpu_relax(); | ||
49 | |||
50 | GAMMA_WRITE(GAMMA_DMAADDRESS, address); | ||
51 | |||
52 | while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) | ||
53 | cpu_relax(); | ||
54 | |||
55 | GAMMA_WRITE(GAMMA_DMACOUNT, length / 4); | ||
56 | } | ||
57 | |||
58 | void gamma_dma_quiescent_single(drm_device_t *dev) | ||
59 | { | ||
60 | drm_gamma_private_t *dev_priv = | ||
61 | (drm_gamma_private_t *)dev->dev_private; | ||
62 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
63 | cpu_relax(); | ||
64 | |||
65 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
66 | cpu_relax(); | ||
67 | |||
68 | GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); | ||
69 | GAMMA_WRITE(GAMMA_SYNC, 0); | ||
70 | |||
71 | do { | ||
72 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) | ||
73 | cpu_relax(); | ||
74 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); | ||
75 | } | ||
76 | |||
77 | void gamma_dma_quiescent_dual(drm_device_t *dev) | ||
78 | { | ||
79 | drm_gamma_private_t *dev_priv = | ||
80 | (drm_gamma_private_t *)dev->dev_private; | ||
81 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
82 | cpu_relax(); | ||
83 | |||
84 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
85 | cpu_relax(); | ||
86 | |||
87 | GAMMA_WRITE(GAMMA_BROADCASTMASK, 3); | ||
88 | GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10); | ||
89 | GAMMA_WRITE(GAMMA_SYNC, 0); | ||
90 | |||
91 | /* Read from first MX */ | ||
92 | do { | ||
93 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) | ||
94 | cpu_relax(); | ||
95 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG); | ||
96 | |||
97 | /* Read from second MX */ | ||
98 | do { | ||
99 | while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) | ||
100 | cpu_relax(); | ||
101 | } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG); | ||
102 | } | ||
103 | |||
104 | void gamma_dma_ready(drm_device_t *dev) | ||
105 | { | ||
106 | drm_gamma_private_t *dev_priv = | ||
107 | (drm_gamma_private_t *)dev->dev_private; | ||
108 | while (GAMMA_READ(GAMMA_DMACOUNT)) | ||
109 | cpu_relax(); | ||
110 | } | ||
111 | |||
112 | static inline int gamma_dma_is_ready(drm_device_t *dev) | ||
113 | { | ||
114 | drm_gamma_private_t *dev_priv = | ||
115 | (drm_gamma_private_t *)dev->dev_private; | ||
116 | return (!GAMMA_READ(GAMMA_DMACOUNT)); | ||
117 | } | ||
118 | |||
119 | irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS ) | ||
120 | { | ||
121 | drm_device_t *dev = (drm_device_t *)arg; | ||
122 | drm_device_dma_t *dma = dev->dma; | ||
123 | drm_gamma_private_t *dev_priv = | ||
124 | (drm_gamma_private_t *)dev->dev_private; | ||
125 | |||
126 | /* FIXME: should check whether we're actually interested in the interrupt? */ | ||
127 | atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */ | ||
128 | |||
129 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
130 | cpu_relax(); | ||
131 | |||
132 | GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */ | ||
133 | GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8); | ||
134 | GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001); | ||
135 | if (gamma_dma_is_ready(dev)) { | ||
136 | /* Free previous buffer */ | ||
137 | if (test_and_set_bit(0, &dev->dma_flag)) | ||
138 | return IRQ_HANDLED; | ||
139 | if (dma->this_buffer) { | ||
140 | gamma_free_buffer(dev, dma->this_buffer); | ||
141 | dma->this_buffer = NULL; | ||
142 | } | ||
143 | clear_bit(0, &dev->dma_flag); | ||
144 | |||
145 | /* Dispatch new buffer */ | ||
146 | schedule_work(&dev->work); | ||
147 | } | ||
148 | return IRQ_HANDLED; | ||
149 | } | ||
150 | |||
151 | /* Only called by gamma_dma_schedule. */ | ||
152 | static int gamma_do_dma(drm_device_t *dev, int locked) | ||
153 | { | ||
154 | unsigned long address; | ||
155 | unsigned long length; | ||
156 | drm_buf_t *buf; | ||
157 | int retcode = 0; | ||
158 | drm_device_dma_t *dma = dev->dma; | ||
159 | |||
160 | if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY; | ||
161 | |||
162 | |||
163 | if (!dma->next_buffer) { | ||
164 | DRM_ERROR("No next_buffer\n"); | ||
165 | clear_bit(0, &dev->dma_flag); | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | buf = dma->next_buffer; | ||
170 | /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */ | ||
171 | /* So we pass the buffer index value into the physical page offset */ | ||
172 | address = buf->idx << 12; | ||
173 | length = buf->used; | ||
174 | |||
175 | DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", | ||
176 | buf->context, buf->idx, length); | ||
177 | |||
178 | if (buf->list == DRM_LIST_RECLAIM) { | ||
179 | gamma_clear_next_buffer(dev); | ||
180 | gamma_free_buffer(dev, buf); | ||
181 | clear_bit(0, &dev->dma_flag); | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | |||
185 | if (!length) { | ||
186 | DRM_ERROR("0 length buffer\n"); | ||
187 | gamma_clear_next_buffer(dev); | ||
188 | gamma_free_buffer(dev, buf); | ||
189 | clear_bit(0, &dev->dma_flag); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | if (!gamma_dma_is_ready(dev)) { | ||
194 | clear_bit(0, &dev->dma_flag); | ||
195 | return -EBUSY; | ||
196 | } | ||
197 | |||
198 | if (buf->while_locked) { | ||
199 | if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { | ||
200 | DRM_ERROR("Dispatching buffer %d from pid %d" | ||
201 | " \"while locked\", but no lock held\n", | ||
202 | buf->idx, current->pid); | ||
203 | } | ||
204 | } else { | ||
205 | if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock, | ||
206 | DRM_KERNEL_CONTEXT)) { | ||
207 | clear_bit(0, &dev->dma_flag); | ||
208 | return -EBUSY; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | if (dev->last_context != buf->context | ||
213 | && !(dev->queuelist[buf->context]->flags | ||
214 | & _DRM_CONTEXT_PRESERVED)) { | ||
215 | /* PRE: dev->last_context != buf->context */ | ||
216 | if (DRM(context_switch)(dev, dev->last_context, | ||
217 | buf->context)) { | ||
218 | DRM(clear_next_buffer)(dev); | ||
219 | DRM(free_buffer)(dev, buf); | ||
220 | } | ||
221 | retcode = -EBUSY; | ||
222 | goto cleanup; | ||
223 | |||
224 | /* POST: we will wait for the context | ||
225 | switch and will dispatch on a later call | ||
226 | when dev->last_context == buf->context. | ||
227 | NOTE WE HOLD THE LOCK THROUGHOUT THIS | ||
228 | TIME! */ | ||
229 | } | ||
230 | |||
231 | gamma_clear_next_buffer(dev); | ||
232 | buf->pending = 1; | ||
233 | buf->waiting = 0; | ||
234 | buf->list = DRM_LIST_PEND; | ||
235 | |||
236 | /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */ | ||
237 | address = buf->idx << 12; | ||
238 | |||
239 | gamma_dma_dispatch(dev, address, length); | ||
240 | gamma_free_buffer(dev, dma->this_buffer); | ||
241 | dma->this_buffer = buf; | ||
242 | |||
243 | atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */ | ||
244 | atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ | ||
245 | |||
246 | if (!buf->while_locked && !dev->context_flag && !locked) { | ||
247 | if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, | ||
248 | DRM_KERNEL_CONTEXT)) { | ||
249 | DRM_ERROR("\n"); | ||
250 | } | ||
251 | } | ||
252 | cleanup: | ||
253 | |||
254 | clear_bit(0, &dev->dma_flag); | ||
255 | |||
256 | |||
257 | return retcode; | ||
258 | } | ||
259 | |||
260 | static void gamma_dma_timer_bh(unsigned long dev) | ||
261 | { | ||
262 | gamma_dma_schedule((drm_device_t *)dev, 0); | ||
263 | } | ||
264 | |||
265 | void gamma_irq_immediate_bh(void *dev) | ||
266 | { | ||
267 | gamma_dma_schedule(dev, 0); | ||
268 | } | ||
269 | |||
270 | int gamma_dma_schedule(drm_device_t *dev, int locked) | ||
271 | { | ||
272 | int next; | ||
273 | drm_queue_t *q; | ||
274 | drm_buf_t *buf; | ||
275 | int retcode = 0; | ||
276 | int processed = 0; | ||
277 | int missed; | ||
278 | int expire = 20; | ||
279 | drm_device_dma_t *dma = dev->dma; | ||
280 | |||
281 | if (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
282 | /* Not reentrant */ | ||
283 | atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */ | ||
284 | return -EBUSY; | ||
285 | } | ||
286 | missed = atomic_read(&dev->counts[10]); | ||
287 | |||
288 | |||
289 | again: | ||
290 | if (dev->context_flag) { | ||
291 | clear_bit(0, &dev->interrupt_flag); | ||
292 | return -EBUSY; | ||
293 | } | ||
294 | if (dma->next_buffer) { | ||
295 | /* Unsent buffer that was previously | ||
296 | selected, but that couldn't be sent | ||
297 | because the lock could not be obtained | ||
298 | or the DMA engine wasn't ready. Try | ||
299 | again. */ | ||
300 | if (!(retcode = gamma_do_dma(dev, locked))) ++processed; | ||
301 | } else { | ||
302 | do { | ||
303 | next = gamma_select_queue(dev, gamma_dma_timer_bh); | ||
304 | if (next >= 0) { | ||
305 | q = dev->queuelist[next]; | ||
306 | buf = gamma_waitlist_get(&q->waitlist); | ||
307 | dma->next_buffer = buf; | ||
308 | dma->next_queue = q; | ||
309 | if (buf && buf->list == DRM_LIST_RECLAIM) { | ||
310 | gamma_clear_next_buffer(dev); | ||
311 | gamma_free_buffer(dev, buf); | ||
312 | } | ||
313 | } | ||
314 | } while (next >= 0 && !dma->next_buffer); | ||
315 | if (dma->next_buffer) { | ||
316 | if (!(retcode = gamma_do_dma(dev, locked))) { | ||
317 | ++processed; | ||
318 | } | ||
319 | } | ||
320 | } | ||
321 | |||
322 | if (--expire) { | ||
323 | if (missed != atomic_read(&dev->counts[10])) { | ||
324 | if (gamma_dma_is_ready(dev)) goto again; | ||
325 | } | ||
326 | if (processed && gamma_dma_is_ready(dev)) { | ||
327 | processed = 0; | ||
328 | goto again; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | clear_bit(0, &dev->interrupt_flag); | ||
333 | |||
334 | return retcode; | ||
335 | } | ||
336 | |||
337 | static int gamma_dma_priority(struct file *filp, | ||
338 | drm_device_t *dev, drm_dma_t *d) | ||
339 | { | ||
340 | unsigned long address; | ||
341 | unsigned long length; | ||
342 | int must_free = 0; | ||
343 | int retcode = 0; | ||
344 | int i; | ||
345 | int idx; | ||
346 | drm_buf_t *buf; | ||
347 | drm_buf_t *last_buf = NULL; | ||
348 | drm_device_dma_t *dma = dev->dma; | ||
349 | int *send_indices = NULL; | ||
350 | int *send_sizes = NULL; | ||
351 | |||
352 | DECLARE_WAITQUEUE(entry, current); | ||
353 | |||
354 | /* Turn off interrupt handling */ | ||
355 | while (test_and_set_bit(0, &dev->interrupt_flag)) { | ||
356 | schedule(); | ||
357 | if (signal_pending(current)) return -EINTR; | ||
358 | } | ||
359 | if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { | ||
360 | while (!gamma_lock_take(&dev->lock.hw_lock->lock, | ||
361 | DRM_KERNEL_CONTEXT)) { | ||
362 | schedule(); | ||
363 | if (signal_pending(current)) { | ||
364 | clear_bit(0, &dev->interrupt_flag); | ||
365 | return -EINTR; | ||
366 | } | ||
367 | } | ||
368 | ++must_free; | ||
369 | } | ||
370 | |||
371 | send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices), | ||
372 | DRM_MEM_DRIVER); | ||
373 | if (send_indices == NULL) | ||
374 | return -ENOMEM; | ||
375 | if (copy_from_user(send_indices, d->send_indices, | ||
376 | d->send_count * sizeof(*send_indices))) { | ||
377 | retcode = -EFAULT; | ||
378 | goto cleanup; | ||
379 | } | ||
380 | |||
381 | send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes), | ||
382 | DRM_MEM_DRIVER); | ||
383 | if (send_sizes == NULL) | ||
384 | return -ENOMEM; | ||
385 | if (copy_from_user(send_sizes, d->send_sizes, | ||
386 | d->send_count * sizeof(*send_sizes))) { | ||
387 | retcode = -EFAULT; | ||
388 | goto cleanup; | ||
389 | } | ||
390 | |||
391 | for (i = 0; i < d->send_count; i++) { | ||
392 | idx = send_indices[i]; | ||
393 | if (idx < 0 || idx >= dma->buf_count) { | ||
394 | DRM_ERROR("Index %d (of %d max)\n", | ||
395 | send_indices[i], dma->buf_count - 1); | ||
396 | continue; | ||
397 | } | ||
398 | buf = dma->buflist[ idx ]; | ||
399 | if (buf->filp != filp) { | ||
400 | DRM_ERROR("Process %d using buffer not owned\n", | ||
401 | current->pid); | ||
402 | retcode = -EINVAL; | ||
403 | goto cleanup; | ||
404 | } | ||
405 | if (buf->list != DRM_LIST_NONE) { | ||
406 | DRM_ERROR("Process %d using buffer on list %d\n", | ||
407 | current->pid, buf->list); | ||
408 | retcode = -EINVAL; | ||
409 | goto cleanup; | ||
410 | } | ||
411 | /* This isn't a race condition on | ||
412 | buf->list, since our concern is the | ||
413 | buffer reclaim during the time the | ||
414 | process closes the /dev/drm? handle, so | ||
415 | it can't also be doing DMA. */ | ||
416 | buf->list = DRM_LIST_PRIO; | ||
417 | buf->used = send_sizes[i]; | ||
418 | buf->context = d->context; | ||
419 | buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; | ||
420 | address = (unsigned long)buf->address; | ||
421 | length = buf->used; | ||
422 | if (!length) { | ||
423 | DRM_ERROR("0 length buffer\n"); | ||
424 | } | ||
425 | if (buf->pending) { | ||
426 | DRM_ERROR("Sending pending buffer:" | ||
427 | " buffer %d, offset %d\n", | ||
428 | send_indices[i], i); | ||
429 | retcode = -EINVAL; | ||
430 | goto cleanup; | ||
431 | } | ||
432 | if (buf->waiting) { | ||
433 | DRM_ERROR("Sending waiting buffer:" | ||
434 | " buffer %d, offset %d\n", | ||
435 | send_indices[i], i); | ||
436 | retcode = -EINVAL; | ||
437 | goto cleanup; | ||
438 | } | ||
439 | buf->pending = 1; | ||
440 | |||
441 | if (dev->last_context != buf->context | ||
442 | && !(dev->queuelist[buf->context]->flags | ||
443 | & _DRM_CONTEXT_PRESERVED)) { | ||
444 | add_wait_queue(&dev->context_wait, &entry); | ||
445 | current->state = TASK_INTERRUPTIBLE; | ||
446 | /* PRE: dev->last_context != buf->context */ | ||
447 | DRM(context_switch)(dev, dev->last_context, | ||
448 | buf->context); | ||
449 | /* POST: we will wait for the context | ||
450 | switch and will dispatch on a later call | ||
451 | when dev->last_context == buf->context. | ||
452 | NOTE WE HOLD THE LOCK THROUGHOUT THIS | ||
453 | TIME! */ | ||
454 | schedule(); | ||
455 | current->state = TASK_RUNNING; | ||
456 | remove_wait_queue(&dev->context_wait, &entry); | ||
457 | if (signal_pending(current)) { | ||
458 | retcode = -EINTR; | ||
459 | goto cleanup; | ||
460 | } | ||
461 | if (dev->last_context != buf->context) { | ||
462 | DRM_ERROR("Context mismatch: %d %d\n", | ||
463 | dev->last_context, | ||
464 | buf->context); | ||
465 | } | ||
466 | } | ||
467 | |||
468 | gamma_dma_dispatch(dev, address, length); | ||
469 | atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */ | ||
470 | atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ | ||
471 | |||
472 | if (last_buf) { | ||
473 | gamma_free_buffer(dev, last_buf); | ||
474 | } | ||
475 | last_buf = buf; | ||
476 | } | ||
477 | |||
478 | |||
479 | cleanup: | ||
480 | if (last_buf) { | ||
481 | gamma_dma_ready(dev); | ||
482 | gamma_free_buffer(dev, last_buf); | ||
483 | } | ||
484 | if (send_indices) | ||
485 | DRM(free)(send_indices, d->send_count * sizeof(*send_indices), | ||
486 | DRM_MEM_DRIVER); | ||
487 | if (send_sizes) | ||
488 | DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes), | ||
489 | DRM_MEM_DRIVER); | ||
490 | |||
491 | if (must_free && !dev->context_flag) { | ||
492 | if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, | ||
493 | DRM_KERNEL_CONTEXT)) { | ||
494 | DRM_ERROR("\n"); | ||
495 | } | ||
496 | } | ||
497 | clear_bit(0, &dev->interrupt_flag); | ||
498 | return retcode; | ||
499 | } | ||
500 | |||
501 | static int gamma_dma_send_buffers(struct file *filp, | ||
502 | drm_device_t *dev, drm_dma_t *d) | ||
503 | { | ||
504 | DECLARE_WAITQUEUE(entry, current); | ||
505 | drm_buf_t *last_buf = NULL; | ||
506 | int retcode = 0; | ||
507 | drm_device_dma_t *dma = dev->dma; | ||
508 | int send_index; | ||
509 | |||
510 | if (get_user(send_index, &d->send_indices[d->send_count-1])) | ||
511 | return -EFAULT; | ||
512 | |||
513 | if (d->flags & _DRM_DMA_BLOCK) { | ||
514 | last_buf = dma->buflist[send_index]; | ||
515 | add_wait_queue(&last_buf->dma_wait, &entry); | ||
516 | } | ||
517 | |||
518 | if ((retcode = gamma_dma_enqueue(filp, d))) { | ||
519 | if (d->flags & _DRM_DMA_BLOCK) | ||
520 | remove_wait_queue(&last_buf->dma_wait, &entry); | ||
521 | return retcode; | ||
522 | } | ||
523 | |||
524 | gamma_dma_schedule(dev, 0); | ||
525 | |||
526 | if (d->flags & _DRM_DMA_BLOCK) { | ||
527 | DRM_DEBUG("%d waiting\n", current->pid); | ||
528 | for (;;) { | ||
529 | current->state = TASK_INTERRUPTIBLE; | ||
530 | if (!last_buf->waiting && !last_buf->pending) | ||
531 | break; /* finished */ | ||
532 | schedule(); | ||
533 | if (signal_pending(current)) { | ||
534 | retcode = -EINTR; /* Can't restart */ | ||
535 | break; | ||
536 | } | ||
537 | } | ||
538 | current->state = TASK_RUNNING; | ||
539 | DRM_DEBUG("%d running\n", current->pid); | ||
540 | remove_wait_queue(&last_buf->dma_wait, &entry); | ||
541 | if (!retcode | ||
542 | || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) { | ||
543 | if (!waitqueue_active(&last_buf->dma_wait)) { | ||
544 | gamma_free_buffer(dev, last_buf); | ||
545 | } | ||
546 | } | ||
547 | if (retcode) { | ||
548 | DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n", | ||
549 | d->context, | ||
550 | last_buf->waiting, | ||
551 | last_buf->pending, | ||
552 | (long)DRM_WAITCOUNT(dev, d->context), | ||
553 | last_buf->idx, | ||
554 | last_buf->list, | ||
555 | current->pid); | ||
556 | } | ||
557 | } | ||
558 | return retcode; | ||
559 | } | ||
560 | |||
561 | int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd, | ||
562 | unsigned long arg) | ||
563 | { | ||
564 | drm_file_t *priv = filp->private_data; | ||
565 | drm_device_t *dev = priv->dev; | ||
566 | drm_device_dma_t *dma = dev->dma; | ||
567 | int retcode = 0; | ||
568 | drm_dma_t __user *argp = (void __user *)arg; | ||
569 | drm_dma_t d; | ||
570 | |||
571 | if (copy_from_user(&d, argp, sizeof(d))) | ||
572 | return -EFAULT; | ||
573 | |||
574 | if (d.send_count < 0 || d.send_count > dma->buf_count) { | ||
575 | DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", | ||
576 | current->pid, d.send_count, dma->buf_count); | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
580 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | ||
581 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
582 | current->pid, d.request_count, dma->buf_count); | ||
583 | return -EINVAL; | ||
584 | } | ||
585 | |||
586 | if (d.send_count) { | ||
587 | if (d.flags & _DRM_DMA_PRIORITY) | ||
588 | retcode = gamma_dma_priority(filp, dev, &d); | ||
589 | else | ||
590 | retcode = gamma_dma_send_buffers(filp, dev, &d); | ||
591 | } | ||
592 | |||
593 | d.granted_count = 0; | ||
594 | |||
595 | if (!retcode && d.request_count) { | ||
596 | retcode = gamma_dma_get_buffers(filp, &d); | ||
597 | } | ||
598 | |||
599 | DRM_DEBUG("%d returning, granted = %d\n", | ||
600 | current->pid, d.granted_count); | ||
601 | if (copy_to_user(argp, &d, sizeof(d))) | ||
602 | return -EFAULT; | ||
603 | |||
604 | return retcode; | ||
605 | } | ||
606 | |||
607 | /* ============================================================= | ||
608 | * DMA initialization, cleanup | ||
609 | */ | ||
610 | |||
611 | static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) | ||
612 | { | ||
613 | drm_gamma_private_t *dev_priv; | ||
614 | drm_device_dma_t *dma = dev->dma; | ||
615 | drm_buf_t *buf; | ||
616 | int i; | ||
617 | struct list_head *list; | ||
618 | unsigned long *pgt; | ||
619 | |||
620 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
621 | |||
622 | dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t), | ||
623 | DRM_MEM_DRIVER ); | ||
624 | if ( !dev_priv ) | ||
625 | return -ENOMEM; | ||
626 | |||
627 | dev->dev_private = (void *)dev_priv; | ||
628 | |||
629 | memset( dev_priv, 0, sizeof(drm_gamma_private_t) ); | ||
630 | |||
631 | dev_priv->num_rast = init->num_rast; | ||
632 | |||
633 | list_for_each(list, &dev->maplist->head) { | ||
634 | drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head); | ||
635 | if( r_list->map && | ||
636 | r_list->map->type == _DRM_SHM && | ||
637 | r_list->map->flags & _DRM_CONTAINS_LOCK ) { | ||
638 | dev_priv->sarea = r_list->map; | ||
639 | break; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0); | ||
644 | dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1); | ||
645 | dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2); | ||
646 | dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3); | ||
647 | |||
648 | dev_priv->sarea_priv = (drm_gamma_sarea_t *) | ||
649 | ((u8 *)dev_priv->sarea->handle + | ||
650 | init->sarea_priv_offset); | ||
651 | |||
652 | if (init->pcimode) { | ||
653 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
654 | pgt = buf->address; | ||
655 | |||
656 | for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { | ||
657 | buf = dma->buflist[i]; | ||
658 | *pgt = virt_to_phys((void*)buf->address) | 0x07; | ||
659 | pgt++; | ||
660 | } | ||
661 | |||
662 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
663 | } else { | ||
664 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
665 | drm_core_ioremap( dev->agp_buffer_map, dev); | ||
666 | |||
667 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
668 | pgt = buf->address; | ||
669 | |||
670 | for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) { | ||
671 | buf = dma->buflist[i]; | ||
672 | *pgt = (unsigned long)buf->address + 0x07; | ||
673 | pgt++; | ||
674 | } | ||
675 | |||
676 | buf = dma->buflist[GLINT_DRI_BUF_COUNT]; | ||
677 | |||
678 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1); | ||
679 | GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe); | ||
680 | } | ||
681 | while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); | ||
682 | GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) ); | ||
683 | GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 ); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | int gamma_do_cleanup_dma( drm_device_t *dev ) | ||
689 | { | ||
690 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
691 | |||
692 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
693 | * may not have been called from userspace and after dev_private | ||
694 | * is freed, it's too late. | ||
695 | */ | ||
696 | if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) | ||
697 | if ( dev->irq_enabled ) | ||
698 | DRM(irq_uninstall)(dev); | ||
699 | |||
700 | if ( dev->dev_private ) { | ||
701 | |||
702 | if ( dev->agp_buffer_map != NULL ) | ||
703 | drm_core_ioremapfree( dev->agp_buffer_map, dev ); | ||
704 | |||
705 | DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), | ||
706 | DRM_MEM_DRIVER ); | ||
707 | dev->dev_private = NULL; | ||
708 | } | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | int gamma_dma_init( struct inode *inode, struct file *filp, | ||
714 | unsigned int cmd, unsigned long arg ) | ||
715 | { | ||
716 | drm_file_t *priv = filp->private_data; | ||
717 | drm_device_t *dev = priv->dev; | ||
718 | drm_gamma_init_t init; | ||
719 | |||
720 | LOCK_TEST_WITH_RETURN( dev, filp ); | ||
721 | |||
722 | if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) ) | ||
723 | return -EFAULT; | ||
724 | |||
725 | switch ( init.func ) { | ||
726 | case GAMMA_INIT_DMA: | ||
727 | return gamma_do_init_dma( dev, &init ); | ||
728 | case GAMMA_CLEANUP_DMA: | ||
729 | return gamma_do_cleanup_dma( dev ); | ||
730 | } | ||
731 | |||
732 | return -EINVAL; | ||
733 | } | ||
734 | |||
735 | static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy ) | ||
736 | { | ||
737 | drm_device_dma_t *dma = dev->dma; | ||
738 | unsigned int *screenbuf; | ||
739 | |||
740 | DRM_DEBUG( "%s\n", __FUNCTION__ ); | ||
741 | |||
742 | /* We've DRM_RESTRICTED this DMA buffer */ | ||
743 | |||
744 | screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address; | ||
745 | |||
746 | #if 0 | ||
747 | *buffer++ = 0x180; /* Tag (FilterMode) */ | ||
748 | *buffer++ = 0x200; /* Allow FBColor through */ | ||
749 | *buffer++ = 0x53B; /* Tag */ | ||
750 | *buffer++ = copy->Pitch; | ||
751 | *buffer++ = 0x53A; /* Tag */ | ||
752 | *buffer++ = copy->SrcAddress; | ||
753 | *buffer++ = 0x539; /* Tag */ | ||
754 | *buffer++ = copy->WidthHeight; /* Initiates transfer */ | ||
755 | *buffer++ = 0x53C; /* Tag - DMAOutputAddress */ | ||
756 | *buffer++ = virt_to_phys((void*)screenbuf); | ||
757 | *buffer++ = 0x53D; /* Tag - DMAOutputCount */ | ||
758 | *buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/ | ||
759 | |||
760 | /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */ | ||
761 | /* Now put it back to the screen */ | ||
762 | |||
763 | *buffer++ = 0x180; /* Tag (FilterMode) */ | ||
764 | *buffer++ = 0x400; /* Allow Sync through */ | ||
765 | *buffer++ = 0x538; /* Tag - DMARectangleReadTarget */ | ||
766 | *buffer++ = 0x155; /* FBSourceData | count */ | ||
767 | *buffer++ = 0x537; /* Tag */ | ||
768 | *buffer++ = copy->Pitch; | ||
769 | *buffer++ = 0x536; /* Tag */ | ||
770 | *buffer++ = copy->DstAddress; | ||
771 | *buffer++ = 0x535; /* Tag */ | ||
772 | *buffer++ = copy->WidthHeight; /* Initiates transfer */ | ||
773 | *buffer++ = 0x530; /* Tag - DMAAddr */ | ||
774 | *buffer++ = virt_to_phys((void*)screenbuf); | ||
775 | *buffer++ = 0x531; | ||
776 | *buffer++ = copy->Count; /* initiates DMA transfer of color data */ | ||
777 | #endif | ||
778 | |||
779 | /* need to dispatch it now */ | ||
780 | |||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | int gamma_dma_copy( struct inode *inode, struct file *filp, | ||
785 | unsigned int cmd, unsigned long arg ) | ||
786 | { | ||
787 | drm_file_t *priv = filp->private_data; | ||
788 | drm_device_t *dev = priv->dev; | ||
789 | drm_gamma_copy_t copy; | ||
790 | |||
791 | if ( copy_from_user( ©, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) ) | ||
792 | return -EFAULT; | ||
793 | |||
794 | return gamma_do_copy_dma( dev, © ); | ||
795 | } | ||
796 | |||
797 | /* ============================================================= | ||
798 | * Per Context SAREA Support | ||
799 | */ | ||
800 | |||
801 | int gamma_getsareactx(struct inode *inode, struct file *filp, | ||
802 | unsigned int cmd, unsigned long arg) | ||
803 | { | ||
804 | drm_file_t *priv = filp->private_data; | ||
805 | drm_device_t *dev = priv->dev; | ||
806 | drm_ctx_priv_map_t __user *argp = (void __user *)arg; | ||
807 | drm_ctx_priv_map_t request; | ||
808 | drm_map_t *map; | ||
809 | |||
810 | if (copy_from_user(&request, argp, sizeof(request))) | ||
811 | return -EFAULT; | ||
812 | |||
813 | down(&dev->struct_sem); | ||
814 | if ((int)request.ctx_id >= dev->max_context) { | ||
815 | up(&dev->struct_sem); | ||
816 | return -EINVAL; | ||
817 | } | ||
818 | |||
819 | map = dev->context_sareas[request.ctx_id]; | ||
820 | up(&dev->struct_sem); | ||
821 | |||
822 | request.handle = map->handle; | ||
823 | if (copy_to_user(argp, &request, sizeof(request))) | ||
824 | return -EFAULT; | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | int gamma_setsareactx(struct inode *inode, struct file *filp, | ||
829 | unsigned int cmd, unsigned long arg) | ||
830 | { | ||
831 | drm_file_t *priv = filp->private_data; | ||
832 | drm_device_t *dev = priv->dev; | ||
833 | drm_ctx_priv_map_t request; | ||
834 | drm_map_t *map = NULL; | ||
835 | drm_map_list_t *r_list; | ||
836 | struct list_head *list; | ||
837 | |||
838 | if (copy_from_user(&request, | ||
839 | (drm_ctx_priv_map_t __user *)arg, | ||
840 | sizeof(request))) | ||
841 | return -EFAULT; | ||
842 | |||
843 | down(&dev->struct_sem); | ||
844 | r_list = NULL; | ||
845 | list_for_each(list, &dev->maplist->head) { | ||
846 | r_list = list_entry(list, drm_map_list_t, head); | ||
847 | if(r_list->map && | ||
848 | r_list->map->handle == request.handle) break; | ||
849 | } | ||
850 | if (list == &(dev->maplist->head)) { | ||
851 | up(&dev->struct_sem); | ||
852 | return -EINVAL; | ||
853 | } | ||
854 | map = r_list->map; | ||
855 | up(&dev->struct_sem); | ||
856 | |||
857 | if (!map) return -EINVAL; | ||
858 | |||
859 | down(&dev->struct_sem); | ||
860 | if ((int)request.ctx_id >= dev->max_context) { | ||
861 | up(&dev->struct_sem); | ||
862 | return -EINVAL; | ||
863 | } | ||
864 | dev->context_sareas[request.ctx_id] = map; | ||
865 | up(&dev->struct_sem); | ||
866 | return 0; | ||
867 | } | ||
868 | |||
869 | void gamma_driver_irq_preinstall( drm_device_t *dev ) { | ||
870 | drm_gamma_private_t *dev_priv = | ||
871 | (drm_gamma_private_t *)dev->dev_private; | ||
872 | |||
873 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2) | ||
874 | cpu_relax(); | ||
875 | |||
876 | GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 ); | ||
877 | GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); | ||
878 | } | ||
879 | |||
880 | void gamma_driver_irq_postinstall( drm_device_t *dev ) { | ||
881 | drm_gamma_private_t *dev_priv = | ||
882 | (drm_gamma_private_t *)dev->dev_private; | ||
883 | |||
884 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
885 | cpu_relax(); | ||
886 | |||
887 | GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); | ||
888 | GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); | ||
889 | GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); | ||
890 | } | ||
891 | |||
892 | void gamma_driver_irq_uninstall( drm_device_t *dev ) { | ||
893 | drm_gamma_private_t *dev_priv = | ||
894 | (drm_gamma_private_t *)dev->dev_private; | ||
895 | if (!dev_priv) | ||
896 | return; | ||
897 | |||
898 | while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3) | ||
899 | cpu_relax(); | ||
900 | |||
901 | GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); | ||
902 | GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); | ||
903 | GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); | ||
904 | } | ||
905 | |||
906 | extern drm_ioctl_desc_t DRM(ioctls)[]; | ||
907 | |||
908 | static int gamma_driver_preinit(drm_device_t *dev) | ||
909 | { | ||
910 | /* reset the finish ioctl */ | ||
911 | DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | static void gamma_driver_pretakedown(drm_device_t *dev) | ||
916 | { | ||
917 | gamma_do_cleanup_dma(dev); | ||
918 | } | ||
919 | |||
920 | static void gamma_driver_dma_ready(drm_device_t *dev) | ||
921 | { | ||
922 | gamma_dma_ready(dev); | ||
923 | } | ||
924 | |||
925 | static int gamma_driver_dma_quiescent(drm_device_t *dev) | ||
926 | { | ||
927 | drm_gamma_private_t *dev_priv = ( | ||
928 | drm_gamma_private_t *)dev->dev_private; | ||
929 | if (dev_priv->num_rast == 2) | ||
930 | gamma_dma_quiescent_dual(dev); | ||
931 | else gamma_dma_quiescent_single(dev); | ||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | void gamma_driver_register_fns(drm_device_t *dev) | ||
936 | { | ||
937 | dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ; | ||
938 | DRM(fops).read = gamma_fops_read; | ||
939 | DRM(fops).poll = gamma_fops_poll; | ||
940 | dev->driver.preinit = gamma_driver_preinit; | ||
941 | dev->driver.pretakedown = gamma_driver_pretakedown; | ||
942 | dev->driver.dma_ready = gamma_driver_dma_ready; | ||
943 | dev->driver.dma_quiescent = gamma_driver_dma_quiescent; | ||
944 | dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush; | ||
945 | dev->driver.dma_flush_unblock = gamma_flush_unblock; | ||
946 | } | ||
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h deleted file mode 100644 index 20819ded0e15..000000000000 --- a/drivers/char/drm/gamma_drm.h +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | #ifndef _GAMMA_DRM_H_ | ||
2 | #define _GAMMA_DRM_H_ | ||
3 | |||
4 | typedef struct _drm_gamma_tex_region { | ||
5 | unsigned char next, prev; /* indices to form a circular LRU */ | ||
6 | unsigned char in_use; /* owned by a client, or free? */ | ||
7 | int age; /* tracked by clients to update local LRU's */ | ||
8 | } drm_gamma_tex_region_t; | ||
9 | |||
10 | typedef struct { | ||
11 | unsigned int GDeltaMode; | ||
12 | unsigned int GDepthMode; | ||
13 | unsigned int GGeometryMode; | ||
14 | unsigned int GTransformMode; | ||
15 | } drm_gamma_context_regs_t; | ||
16 | |||
17 | typedef struct _drm_gamma_sarea { | ||
18 | drm_gamma_context_regs_t context_state; | ||
19 | |||
20 | unsigned int dirty; | ||
21 | |||
22 | |||
23 | /* Maintain an LRU of contiguous regions of texture space. If | ||
24 | * you think you own a region of texture memory, and it has an | ||
25 | * age different to the one you set, then you are mistaken and | ||
26 | * it has been stolen by another client. If global texAge | ||
27 | * hasn't changed, there is no need to walk the list. | ||
28 | * | ||
29 | * These regions can be used as a proxy for the fine-grained | ||
30 | * texture information of other clients - by maintaining them | ||
31 | * in the same lru which is used to age their own textures, | ||
32 | * clients have an approximate lru for the whole of global | ||
33 | * texture space, and can make informed decisions as to which | ||
34 | * areas to kick out. There is no need to choose whether to | ||
35 | * kick out your own texture or someone else's - simply eject | ||
36 | * them all in LRU order. | ||
37 | */ | ||
38 | |||
39 | #define GAMMA_NR_TEX_REGIONS 64 | ||
40 | drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; | ||
41 | /* Last elt is sentinal */ | ||
42 | int texAge; /* last time texture was uploaded */ | ||
43 | int last_enqueue; /* last time a buffer was enqueued */ | ||
44 | int last_dispatch; /* age of the most recently dispatched buffer */ | ||
45 | int last_quiescent; /* */ | ||
46 | int ctxOwner; /* last context to upload state */ | ||
47 | |||
48 | int vertex_prim; | ||
49 | } drm_gamma_sarea_t; | ||
50 | |||
51 | /* WARNING: If you change any of these defines, make sure to change the | ||
52 | * defines in the Xserver file (xf86drmGamma.h) | ||
53 | */ | ||
54 | |||
55 | /* Gamma specific ioctls | ||
56 | * The device specific ioctl range is 0x40 to 0x79. | ||
57 | */ | ||
58 | #define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t) | ||
59 | #define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t) | ||
60 | |||
61 | typedef struct drm_gamma_copy { | ||
62 | unsigned int DMAOutputAddress; | ||
63 | unsigned int DMAOutputCount; | ||
64 | unsigned int DMAReadGLINTSource; | ||
65 | unsigned int DMARectangleWriteAddress; | ||
66 | unsigned int DMARectangleWriteLinePitch; | ||
67 | unsigned int DMARectangleWrite; | ||
68 | unsigned int DMARectangleReadAddress; | ||
69 | unsigned int DMARectangleReadLinePitch; | ||
70 | unsigned int DMARectangleRead; | ||
71 | unsigned int DMARectangleReadTarget; | ||
72 | } drm_gamma_copy_t; | ||
73 | |||
74 | typedef struct drm_gamma_init { | ||
75 | enum { | ||
76 | GAMMA_INIT_DMA = 0x01, | ||
77 | GAMMA_CLEANUP_DMA = 0x02 | ||
78 | } func; | ||
79 | |||
80 | int sarea_priv_offset; | ||
81 | int pcimode; | ||
82 | unsigned int mmio0; | ||
83 | unsigned int mmio1; | ||
84 | unsigned int mmio2; | ||
85 | unsigned int mmio3; | ||
86 | unsigned int buffers_offset; | ||
87 | int num_rast; | ||
88 | } drm_gamma_init_t; | ||
89 | |||
90 | #endif /* _GAMMA_DRM_H_ */ | ||
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c deleted file mode 100644 index e7e64b62792a..000000000000 --- a/drivers/char/drm/gamma_drv.c +++ /dev/null | |||
@@ -1,59 +0,0 @@ | |||
1 | /* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*- | ||
2 | * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | #include <linux/config.h> | ||
33 | #include "gamma.h" | ||
34 | #include "drmP.h" | ||
35 | #include "drm.h" | ||
36 | #include "gamma_drm.h" | ||
37 | #include "gamma_drv.h" | ||
38 | |||
39 | #include "drm_auth.h" | ||
40 | #include "drm_agpsupport.h" | ||
41 | #include "drm_bufs.h" | ||
42 | #include "gamma_context.h" /* NOTE! */ | ||
43 | #include "drm_dma.h" | ||
44 | #include "gamma_old_dma.h" /* NOTE */ | ||
45 | #include "drm_drawable.h" | ||
46 | #include "drm_drv.h" | ||
47 | |||
48 | #include "drm_fops.h" | ||
49 | #include "drm_init.h" | ||
50 | #include "drm_ioctl.h" | ||
51 | #include "drm_irq.h" | ||
52 | #include "gamma_lists.h" /* NOTE */ | ||
53 | #include "drm_lock.h" | ||
54 | #include "gamma_lock.h" /* NOTE */ | ||
55 | #include "drm_memory.h" | ||
56 | #include "drm_proc.h" | ||
57 | #include "drm_vm.h" | ||
58 | #include "drm_stub.h" | ||
59 | #include "drm_scatter.h" | ||
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h deleted file mode 100644 index 146fcc6253cd..000000000000 --- a/drivers/char/drm/gamma_drv.h +++ /dev/null | |||
@@ -1,147 +0,0 @@ | |||
1 | /* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*- | ||
2 | * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #ifndef _GAMMA_DRV_H_ | ||
33 | #define _GAMMA_DRV_H_ | ||
34 | |||
35 | typedef struct drm_gamma_private { | ||
36 | drm_gamma_sarea_t *sarea_priv; | ||
37 | drm_map_t *sarea; | ||
38 | drm_map_t *mmio0; | ||
39 | drm_map_t *mmio1; | ||
40 | drm_map_t *mmio2; | ||
41 | drm_map_t *mmio3; | ||
42 | int num_rast; | ||
43 | } drm_gamma_private_t; | ||
44 | |||
45 | /* gamma_dma.c */ | ||
46 | extern int gamma_dma_init( struct inode *inode, struct file *filp, | ||
47 | unsigned int cmd, unsigned long arg ); | ||
48 | extern int gamma_dma_copy( struct inode *inode, struct file *filp, | ||
49 | unsigned int cmd, unsigned long arg ); | ||
50 | |||
51 | extern int gamma_do_cleanup_dma( drm_device_t *dev ); | ||
52 | extern void gamma_dma_ready(drm_device_t *dev); | ||
53 | extern void gamma_dma_quiescent_single(drm_device_t *dev); | ||
54 | extern void gamma_dma_quiescent_dual(drm_device_t *dev); | ||
55 | |||
56 | /* gamma_dma.c */ | ||
57 | extern int gamma_dma_schedule(drm_device_t *dev, int locked); | ||
58 | extern int gamma_dma(struct inode *inode, struct file *filp, | ||
59 | unsigned int cmd, unsigned long arg); | ||
60 | extern int gamma_find_devices(void); | ||
61 | extern int gamma_found(void); | ||
62 | |||
63 | /* Gamma-specific code pulled from drm_fops.h: | ||
64 | */ | ||
65 | extern int DRM(finish)(struct inode *inode, struct file *filp, | ||
66 | unsigned int cmd, unsigned long arg); | ||
67 | extern int DRM(flush_unblock)(drm_device_t *dev, int context, | ||
68 | drm_lock_flags_t flags); | ||
69 | extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context, | ||
70 | drm_lock_flags_t flags); | ||
71 | |||
72 | /* Gamma-specific code pulled from drm_dma.h: | ||
73 | */ | ||
74 | extern void DRM(clear_next_buffer)(drm_device_t *dev); | ||
75 | extern int DRM(select_queue)(drm_device_t *dev, | ||
76 | void (*wrapper)(unsigned long)); | ||
77 | extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma); | ||
78 | extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma); | ||
79 | |||
80 | |||
81 | /* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h): | ||
82 | */ | ||
83 | extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count); | ||
84 | extern int DRM(waitlist_destroy)(drm_waitlist_t *bl); | ||
85 | extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf); | ||
86 | extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl); | ||
87 | extern int DRM(freelist_create)(drm_freelist_t *bl, int count); | ||
88 | extern int DRM(freelist_destroy)(drm_freelist_t *bl); | ||
89 | extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, | ||
90 | drm_buf_t *buf); | ||
91 | extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block); | ||
92 | |||
93 | /* externs for gamma changes to the ops */ | ||
94 | extern struct file_operations DRM(fops); | ||
95 | extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait); | ||
96 | extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off); | ||
97 | |||
98 | |||
99 | #define GLINT_DRI_BUF_COUNT 256 | ||
100 | |||
101 | #define GAMMA_OFF(reg) \ | ||
102 | ((reg < 0x1000) \ | ||
103 | ? reg \ | ||
104 | : ((reg < 0x10000) \ | ||
105 | ? (reg - 0x1000) \ | ||
106 | : ((reg < 0x11000) \ | ||
107 | ? (reg - 0x10000) \ | ||
108 | : (reg - 0x11000)))) | ||
109 | |||
110 | #define GAMMA_BASE(reg) ((unsigned long) \ | ||
111 | ((reg < 0x1000) ? dev_priv->mmio0->handle : \ | ||
112 | ((reg < 0x10000) ? dev_priv->mmio1->handle : \ | ||
113 | ((reg < 0x11000) ? dev_priv->mmio2->handle : \ | ||
114 | dev_priv->mmio3->handle)))) | ||
115 | #define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg)) | ||
116 | #define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg) | ||
117 | #define GAMMA_READ(reg) GAMMA_DEREF(reg) | ||
118 | #define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0) | ||
119 | |||
120 | #define GAMMA_BROADCASTMASK 0x9378 | ||
121 | #define GAMMA_COMMANDINTENABLE 0x0c48 | ||
122 | #define GAMMA_DMAADDRESS 0x0028 | ||
123 | #define GAMMA_DMACOUNT 0x0030 | ||
124 | #define GAMMA_FILTERMODE 0x8c00 | ||
125 | #define GAMMA_GCOMMANDINTFLAGS 0x0c50 | ||
126 | #define GAMMA_GCOMMANDMODE 0x0c40 | ||
127 | #define GAMMA_QUEUED_DMA_MODE 1<<1 | ||
128 | #define GAMMA_GCOMMANDSTATUS 0x0c60 | ||
129 | #define GAMMA_GDELAYTIMER 0x0c38 | ||
130 | #define GAMMA_GDMACONTROL 0x0060 | ||
131 | #define GAMMA_USE_AGP 1<<1 | ||
132 | #define GAMMA_GINTENABLE 0x0808 | ||
133 | #define GAMMA_GINTFLAGS 0x0810 | ||
134 | #define GAMMA_INFIFOSPACE 0x0018 | ||
135 | #define GAMMA_OUTFIFOWORDS 0x0020 | ||
136 | #define GAMMA_OUTPUTFIFO 0x2000 | ||
137 | #define GAMMA_SYNC 0x8c40 | ||
138 | #define GAMMA_SYNC_TAG 0x0188 | ||
139 | #define GAMMA_PAGETABLEADDR 0x0C00 | ||
140 | #define GAMMA_PAGETABLELENGTH 0x0C08 | ||
141 | |||
142 | #define GAMMA_PASSTHROUGH 0x1FE | ||
143 | #define GAMMA_DMAADDRTAG 0x530 | ||
144 | #define GAMMA_DMACOUNTTAG 0x531 | ||
145 | #define GAMMA_COMMANDINTTAG 0x532 | ||
146 | |||
147 | #endif | ||
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h deleted file mode 100644 index 2d93f412b96b..000000000000 --- a/drivers/char/drm/gamma_lists.h +++ /dev/null | |||
@@ -1,215 +0,0 @@ | |||
1 | /* drm_lists.h -- Buffer list handling routines -*- linux-c -*- | ||
2 | * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | #include "drmP.h" | ||
33 | |||
34 | |||
35 | int DRM(waitlist_create)(drm_waitlist_t *bl, int count) | ||
36 | { | ||
37 | if (bl->count) return -EINVAL; | ||
38 | |||
39 | bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs), | ||
40 | DRM_MEM_BUFLISTS); | ||
41 | |||
42 | if(!bl->bufs) return -ENOMEM; | ||
43 | memset(bl->bufs, 0, sizeof(*bl->bufs)); | ||
44 | bl->count = count; | ||
45 | bl->rp = bl->bufs; | ||
46 | bl->wp = bl->bufs; | ||
47 | bl->end = &bl->bufs[bl->count+1]; | ||
48 | spin_lock_init(&bl->write_lock); | ||
49 | spin_lock_init(&bl->read_lock); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | int DRM(waitlist_destroy)(drm_waitlist_t *bl) | ||
54 | { | ||
55 | if (bl->rp != bl->wp) return -EINVAL; | ||
56 | if (bl->bufs) DRM(free)(bl->bufs, | ||
57 | (bl->count + 2) * sizeof(*bl->bufs), | ||
58 | DRM_MEM_BUFLISTS); | ||
59 | bl->count = 0; | ||
60 | bl->bufs = NULL; | ||
61 | bl->rp = NULL; | ||
62 | bl->wp = NULL; | ||
63 | bl->end = NULL; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf) | ||
68 | { | ||
69 | int left; | ||
70 | unsigned long flags; | ||
71 | |||
72 | left = DRM_LEFTCOUNT(bl); | ||
73 | if (!left) { | ||
74 | DRM_ERROR("Overflow while adding buffer %d from filp %p\n", | ||
75 | buf->idx, buf->filp); | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | buf->list = DRM_LIST_WAIT; | ||
79 | |||
80 | spin_lock_irqsave(&bl->write_lock, flags); | ||
81 | *bl->wp = buf; | ||
82 | if (++bl->wp >= bl->end) bl->wp = bl->bufs; | ||
83 | spin_unlock_irqrestore(&bl->write_lock, flags); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl) | ||
89 | { | ||
90 | drm_buf_t *buf; | ||
91 | unsigned long flags; | ||
92 | |||
93 | spin_lock_irqsave(&bl->read_lock, flags); | ||
94 | buf = *bl->rp; | ||
95 | if (bl->rp == bl->wp) { | ||
96 | spin_unlock_irqrestore(&bl->read_lock, flags); | ||
97 | return NULL; | ||
98 | } | ||
99 | if (++bl->rp >= bl->end) bl->rp = bl->bufs; | ||
100 | spin_unlock_irqrestore(&bl->read_lock, flags); | ||
101 | |||
102 | return buf; | ||
103 | } | ||
104 | |||
105 | int DRM(freelist_create)(drm_freelist_t *bl, int count) | ||
106 | { | ||
107 | atomic_set(&bl->count, 0); | ||
108 | bl->next = NULL; | ||
109 | init_waitqueue_head(&bl->waiting); | ||
110 | bl->low_mark = 0; | ||
111 | bl->high_mark = 0; | ||
112 | atomic_set(&bl->wfh, 0); | ||
113 | spin_lock_init(&bl->lock); | ||
114 | ++bl->initialized; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | int DRM(freelist_destroy)(drm_freelist_t *bl) | ||
119 | { | ||
120 | atomic_set(&bl->count, 0); | ||
121 | bl->next = NULL; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf) | ||
126 | { | ||
127 | drm_device_dma_t *dma = dev->dma; | ||
128 | |||
129 | if (!dma) { | ||
130 | DRM_ERROR("No DMA support\n"); | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) { | ||
135 | DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n", | ||
136 | buf->idx, buf->waiting, buf->pending, buf->list); | ||
137 | } | ||
138 | if (!bl) return 1; | ||
139 | buf->list = DRM_LIST_FREE; | ||
140 | |||
141 | spin_lock(&bl->lock); | ||
142 | buf->next = bl->next; | ||
143 | bl->next = buf; | ||
144 | spin_unlock(&bl->lock); | ||
145 | |||
146 | atomic_inc(&bl->count); | ||
147 | if (atomic_read(&bl->count) > dma->buf_count) { | ||
148 | DRM_ERROR("%d of %d buffers free after addition of %d\n", | ||
149 | atomic_read(&bl->count), dma->buf_count, buf->idx); | ||
150 | return 1; | ||
151 | } | ||
152 | /* Check for high water mark */ | ||
153 | if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) { | ||
154 | atomic_set(&bl->wfh, 0); | ||
155 | wake_up_interruptible(&bl->waiting); | ||
156 | } | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl) | ||
161 | { | ||
162 | drm_buf_t *buf; | ||
163 | |||
164 | if (!bl) return NULL; | ||
165 | |||
166 | /* Get buffer */ | ||
167 | spin_lock(&bl->lock); | ||
168 | if (!bl->next) { | ||
169 | spin_unlock(&bl->lock); | ||
170 | return NULL; | ||
171 | } | ||
172 | buf = bl->next; | ||
173 | bl->next = bl->next->next; | ||
174 | spin_unlock(&bl->lock); | ||
175 | |||
176 | atomic_dec(&bl->count); | ||
177 | buf->next = NULL; | ||
178 | buf->list = DRM_LIST_NONE; | ||
179 | if (buf->waiting || buf->pending) { | ||
180 | DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n", | ||
181 | buf->idx, buf->waiting, buf->pending, buf->list); | ||
182 | } | ||
183 | |||
184 | return buf; | ||
185 | } | ||
186 | |||
187 | drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block) | ||
188 | { | ||
189 | drm_buf_t *buf = NULL; | ||
190 | DECLARE_WAITQUEUE(entry, current); | ||
191 | |||
192 | if (!bl || !bl->initialized) return NULL; | ||
193 | |||
194 | /* Check for low water mark */ | ||
195 | if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */ | ||
196 | atomic_set(&bl->wfh, 1); | ||
197 | if (atomic_read(&bl->wfh)) { | ||
198 | if (block) { | ||
199 | add_wait_queue(&bl->waiting, &entry); | ||
200 | for (;;) { | ||
201 | current->state = TASK_INTERRUPTIBLE; | ||
202 | if (!atomic_read(&bl->wfh) | ||
203 | && (buf = DRM(freelist_try)(bl))) break; | ||
204 | schedule(); | ||
205 | if (signal_pending(current)) break; | ||
206 | } | ||
207 | current->state = TASK_RUNNING; | ||
208 | remove_wait_queue(&bl->waiting, &entry); | ||
209 | } | ||
210 | return buf; | ||
211 | } | ||
212 | |||
213 | return DRM(freelist_try)(bl); | ||
214 | } | ||
215 | |||
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h deleted file mode 100644 index ddec67e4ed16..000000000000 --- a/drivers/char/drm/gamma_lock.h +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | /* lock.c -- IOCTLs for locking -*- linux-c -*- | ||
2 | * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | |||
33 | /* Gamma-specific code extracted from drm_lock.h: | ||
34 | */ | ||
35 | static int DRM(flush_queue)(drm_device_t *dev, int context) | ||
36 | { | ||
37 | DECLARE_WAITQUEUE(entry, current); | ||
38 | int ret = 0; | ||
39 | drm_queue_t *q = dev->queuelist[context]; | ||
40 | |||
41 | DRM_DEBUG("\n"); | ||
42 | |||
43 | atomic_inc(&q->use_count); | ||
44 | if (atomic_read(&q->use_count) > 1) { | ||
45 | atomic_inc(&q->block_write); | ||
46 | add_wait_queue(&q->flush_queue, &entry); | ||
47 | atomic_inc(&q->block_count); | ||
48 | for (;;) { | ||
49 | current->state = TASK_INTERRUPTIBLE; | ||
50 | if (!DRM_BUFCOUNT(&q->waitlist)) break; | ||
51 | schedule(); | ||
52 | if (signal_pending(current)) { | ||
53 | ret = -EINTR; /* Can't restart */ | ||
54 | break; | ||
55 | } | ||
56 | } | ||
57 | atomic_dec(&q->block_count); | ||
58 | current->state = TASK_RUNNING; | ||
59 | remove_wait_queue(&q->flush_queue, &entry); | ||
60 | } | ||
61 | atomic_dec(&q->use_count); | ||
62 | |||
63 | /* NOTE: block_write is still incremented! | ||
64 | Use drm_flush_unlock_queue to decrement. */ | ||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static int DRM(flush_unblock_queue)(drm_device_t *dev, int context) | ||
69 | { | ||
70 | drm_queue_t *q = dev->queuelist[context]; | ||
71 | |||
72 | DRM_DEBUG("\n"); | ||
73 | |||
74 | atomic_inc(&q->use_count); | ||
75 | if (atomic_read(&q->use_count) > 1) { | ||
76 | if (atomic_read(&q->block_write)) { | ||
77 | atomic_dec(&q->block_write); | ||
78 | wake_up_interruptible(&q->write_queue); | ||
79 | } | ||
80 | } | ||
81 | atomic_dec(&q->use_count); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int DRM(flush_block_and_flush)(drm_device_t *dev, int context, | ||
86 | drm_lock_flags_t flags) | ||
87 | { | ||
88 | int ret = 0; | ||
89 | int i; | ||
90 | |||
91 | DRM_DEBUG("\n"); | ||
92 | |||
93 | if (flags & _DRM_LOCK_FLUSH) { | ||
94 | ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT); | ||
95 | if (!ret) ret = DRM(flush_queue)(dev, context); | ||
96 | } | ||
97 | if (flags & _DRM_LOCK_FLUSH_ALL) { | ||
98 | for (i = 0; !ret && i < dev->queue_count; i++) { | ||
99 | ret = DRM(flush_queue)(dev, i); | ||
100 | } | ||
101 | } | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags) | ||
106 | { | ||
107 | int ret = 0; | ||
108 | int i; | ||
109 | |||
110 | DRM_DEBUG("\n"); | ||
111 | |||
112 | if (flags & _DRM_LOCK_FLUSH) { | ||
113 | ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT); | ||
114 | if (!ret) ret = DRM(flush_unblock_queue)(dev, context); | ||
115 | } | ||
116 | if (flags & _DRM_LOCK_FLUSH_ALL) { | ||
117 | for (i = 0; !ret && i < dev->queue_count; i++) { | ||
118 | ret = DRM(flush_unblock_queue)(dev, i); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | return ret; | ||
123 | } | ||
124 | |||
125 | int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd, | ||
126 | unsigned long arg) | ||
127 | { | ||
128 | drm_file_t *priv = filp->private_data; | ||
129 | drm_device_t *dev = priv->dev; | ||
130 | int ret = 0; | ||
131 | drm_lock_t lock; | ||
132 | |||
133 | DRM_DEBUG("\n"); | ||
134 | |||
135 | if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock))) | ||
136 | return -EFAULT; | ||
137 | ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags); | ||
138 | DRM(flush_unblock)(dev, lock.context, lock.flags); | ||
139 | return ret; | ||
140 | } | ||
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h deleted file mode 100644 index abdd454aab9f..000000000000 --- a/drivers/char/drm/gamma_old_dma.h +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*- | ||
2 | * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com | ||
3 | * | ||
4 | * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
5 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
25 | * OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | |||
33 | /* Gamma-specific code pulled from drm_dma.h: | ||
34 | */ | ||
35 | |||
36 | void DRM(clear_next_buffer)(drm_device_t *dev) | ||
37 | { | ||
38 | drm_device_dma_t *dma = dev->dma; | ||
39 | |||
40 | dma->next_buffer = NULL; | ||
41 | if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { | ||
42 | wake_up_interruptible(&dma->next_queue->flush_queue); | ||
43 | } | ||
44 | dma->next_queue = NULL; | ||
45 | } | ||
46 | |||
47 | int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) | ||
48 | { | ||
49 | int i; | ||
50 | int candidate = -1; | ||
51 | int j = jiffies; | ||
52 | |||
53 | if (!dev) { | ||
54 | DRM_ERROR("No device\n"); | ||
55 | return -1; | ||
56 | } | ||
57 | if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { | ||
58 | /* This only happens between the time the | ||
59 | interrupt is initialized and the time | ||
60 | the queues are initialized. */ | ||
61 | return -1; | ||
62 | } | ||
63 | |||
64 | /* Doing "while locked" DMA? */ | ||
65 | if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { | ||
66 | return DRM_KERNEL_CONTEXT; | ||
67 | } | ||
68 | |||
69 | /* If there are buffers on the last_context | ||
70 | queue, and we have not been executing | ||
71 | this context very long, continue to | ||
72 | execute this context. */ | ||
73 | if (dev->last_switch <= j | ||
74 | && dev->last_switch + DRM_TIME_SLICE > j | ||
75 | && DRM_WAITCOUNT(dev, dev->last_context)) { | ||
76 | return dev->last_context; | ||
77 | } | ||
78 | |||
79 | /* Otherwise, find a candidate */ | ||
80 | for (i = dev->last_checked + 1; i < dev->queue_count; i++) { | ||
81 | if (DRM_WAITCOUNT(dev, i)) { | ||
82 | candidate = dev->last_checked = i; | ||
83 | break; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | if (candidate < 0) { | ||
88 | for (i = 0; i < dev->queue_count; i++) { | ||
89 | if (DRM_WAITCOUNT(dev, i)) { | ||
90 | candidate = dev->last_checked = i; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | } | ||
95 | |||
96 | if (wrapper | ||
97 | && candidate >= 0 | ||
98 | && candidate != dev->last_context | ||
99 | && dev->last_switch <= j | ||
100 | && dev->last_switch + DRM_TIME_SLICE > j) { | ||
101 | if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { | ||
102 | del_timer(&dev->timer); | ||
103 | dev->timer.function = wrapper; | ||
104 | dev->timer.data = (unsigned long)dev; | ||
105 | dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; | ||
106 | add_timer(&dev->timer); | ||
107 | } | ||
108 | return -1; | ||
109 | } | ||
110 | |||
111 | return candidate; | ||
112 | } | ||
113 | |||
114 | |||
115 | int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) | ||
116 | { | ||
117 | drm_file_t *priv = filp->private_data; | ||
118 | drm_device_t *dev = priv->dev; | ||
119 | int i; | ||
120 | drm_queue_t *q; | ||
121 | drm_buf_t *buf; | ||
122 | int idx; | ||
123 | int while_locked = 0; | ||
124 | drm_device_dma_t *dma = dev->dma; | ||
125 | int *ind; | ||
126 | int err; | ||
127 | DECLARE_WAITQUEUE(entry, current); | ||
128 | |||
129 | DRM_DEBUG("%d\n", d->send_count); | ||
130 | |||
131 | if (d->flags & _DRM_DMA_WHILE_LOCKED) { | ||
132 | int context = dev->lock.hw_lock->lock; | ||
133 | |||
134 | if (!_DRM_LOCK_IS_HELD(context)) { | ||
135 | DRM_ERROR("No lock held during \"while locked\"" | ||
136 | " request\n"); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | if (d->context != _DRM_LOCKING_CONTEXT(context) | ||
140 | && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { | ||
141 | DRM_ERROR("Lock held by %d while %d makes" | ||
142 | " \"while locked\" request\n", | ||
143 | _DRM_LOCKING_CONTEXT(context), | ||
144 | d->context); | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | q = dev->queuelist[DRM_KERNEL_CONTEXT]; | ||
148 | while_locked = 1; | ||
149 | } else { | ||
150 | q = dev->queuelist[d->context]; | ||
151 | } | ||
152 | |||
153 | |||
154 | atomic_inc(&q->use_count); | ||
155 | if (atomic_read(&q->block_write)) { | ||
156 | add_wait_queue(&q->write_queue, &entry); | ||
157 | atomic_inc(&q->block_count); | ||
158 | for (;;) { | ||
159 | current->state = TASK_INTERRUPTIBLE; | ||
160 | if (!atomic_read(&q->block_write)) break; | ||
161 | schedule(); | ||
162 | if (signal_pending(current)) { | ||
163 | atomic_dec(&q->use_count); | ||
164 | remove_wait_queue(&q->write_queue, &entry); | ||
165 | return -EINTR; | ||
166 | } | ||
167 | } | ||
168 | atomic_dec(&q->block_count); | ||
169 | current->state = TASK_RUNNING; | ||
170 | remove_wait_queue(&q->write_queue, &entry); | ||
171 | } | ||
172 | |||
173 | ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER); | ||
174 | if (!ind) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) { | ||
178 | err = -EFAULT; | ||
179 | goto out; | ||
180 | } | ||
181 | |||
182 | err = -EINVAL; | ||
183 | for (i = 0; i < d->send_count; i++) { | ||
184 | idx = ind[i]; | ||
185 | if (idx < 0 || idx >= dma->buf_count) { | ||
186 | DRM_ERROR("Index %d (of %d max)\n", | ||
187 | ind[i], dma->buf_count - 1); | ||
188 | goto out; | ||
189 | } | ||
190 | buf = dma->buflist[ idx ]; | ||
191 | if (buf->filp != filp) { | ||
192 | DRM_ERROR("Process %d using buffer not owned\n", | ||
193 | current->pid); | ||
194 | goto out; | ||
195 | } | ||
196 | if (buf->list != DRM_LIST_NONE) { | ||
197 | DRM_ERROR("Process %d using buffer %d on list %d\n", | ||
198 | current->pid, buf->idx, buf->list); | ||
199 | goto out; | ||
200 | } | ||
201 | buf->used = ind[i]; | ||
202 | buf->while_locked = while_locked; | ||
203 | buf->context = d->context; | ||
204 | if (!buf->used) { | ||
205 | DRM_ERROR("Queueing 0 length buffer\n"); | ||
206 | } | ||
207 | if (buf->pending) { | ||
208 | DRM_ERROR("Queueing pending buffer:" | ||
209 | " buffer %d, offset %d\n", | ||
210 | ind[i], i); | ||
211 | goto out; | ||
212 | } | ||
213 | if (buf->waiting) { | ||
214 | DRM_ERROR("Queueing waiting buffer:" | ||
215 | " buffer %d, offset %d\n", | ||
216 | ind[i], i); | ||
217 | goto out; | ||
218 | } | ||
219 | buf->waiting = 1; | ||
220 | if (atomic_read(&q->use_count) == 1 | ||
221 | || atomic_read(&q->finalization)) { | ||
222 | DRM(free_buffer)(dev, buf); | ||
223 | } else { | ||
224 | DRM(waitlist_put)(&q->waitlist, buf); | ||
225 | atomic_inc(&q->total_queued); | ||
226 | } | ||
227 | } | ||
228 | atomic_dec(&q->use_count); | ||
229 | |||
230 | return 0; | ||
231 | |||
232 | out: | ||
233 | DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER); | ||
234 | atomic_dec(&q->use_count); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, | ||
239 | int order) | ||
240 | { | ||
241 | drm_file_t *priv = filp->private_data; | ||
242 | drm_device_t *dev = priv->dev; | ||
243 | int i; | ||
244 | drm_buf_t *buf; | ||
245 | drm_device_dma_t *dma = dev->dma; | ||
246 | |||
247 | for (i = d->granted_count; i < d->request_count; i++) { | ||
248 | buf = DRM(freelist_get)(&dma->bufs[order].freelist, | ||
249 | d->flags & _DRM_DMA_WAIT); | ||
250 | if (!buf) break; | ||
251 | if (buf->pending || buf->waiting) { | ||
252 | DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", | ||
253 | buf->idx, | ||
254 | buf->filp, | ||
255 | buf->waiting, | ||
256 | buf->pending); | ||
257 | } | ||
258 | buf->filp = filp; | ||
259 | if (copy_to_user(&d->request_indices[i], | ||
260 | &buf->idx, | ||
261 | sizeof(buf->idx))) | ||
262 | return -EFAULT; | ||
263 | |||
264 | if (copy_to_user(&d->request_sizes[i], | ||
265 | &buf->total, | ||
266 | sizeof(buf->total))) | ||
267 | return -EFAULT; | ||
268 | |||
269 | ++d->granted_count; | ||
270 | } | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | |||
275 | int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) | ||
276 | { | ||
277 | int order; | ||
278 | int retcode = 0; | ||
279 | int tmp_order; | ||
280 | |||
281 | order = DRM(order)(dma->request_size); | ||
282 | |||
283 | dma->granted_count = 0; | ||
284 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); | ||
285 | |||
286 | if (dma->granted_count < dma->request_count | ||
287 | && (dma->flags & _DRM_DMA_SMALLER_OK)) { | ||
288 | for (tmp_order = order - 1; | ||
289 | !retcode | ||
290 | && dma->granted_count < dma->request_count | ||
291 | && tmp_order >= DRM_MIN_ORDER; | ||
292 | --tmp_order) { | ||
293 | |||
294 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, | ||
295 | tmp_order); | ||
296 | } | ||
297 | } | ||
298 | |||
299 | if (dma->granted_count < dma->request_count | ||
300 | && (dma->flags & _DRM_DMA_LARGER_OK)) { | ||
301 | for (tmp_order = order + 1; | ||
302 | !retcode | ||
303 | && dma->granted_count < dma->request_count | ||
304 | && tmp_order <= DRM_MAX_ORDER; | ||
305 | ++tmp_order) { | ||
306 | |||
307 | retcode = DRM(dma_get_buffers_of_order)(filp, dma, | ||
308 | tmp_order); | ||
309 | } | ||
310 | } | ||
311 | return 0; | ||
312 | } | ||
313 | |||
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index 18e0b7622893..2f1659b96fd1 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c | |||
@@ -45,11 +45,6 @@ | |||
45 | #define I810_BUF_UNMAPPED 0 | 45 | #define I810_BUF_UNMAPPED 0 |
46 | #define I810_BUF_MAPPED 1 | 46 | #define I810_BUF_MAPPED 1 |
47 | 47 | ||
48 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2) | ||
49 | #define down_write down | ||
50 | #define up_write up | ||
51 | #endif | ||
52 | |||
53 | static drm_buf_t *i810_freelist_get(drm_device_t *dev) | 48 | static drm_buf_t *i810_freelist_get(drm_device_t *dev) |
54 | { | 49 | { |
55 | drm_device_dma_t *dma = dev->dma; | 50 | drm_device_dma_t *dma = dev->dma; |
@@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev, | |||
351 | DRM_ERROR("can not find mmio map!\n"); | 346 | DRM_ERROR("can not find mmio map!\n"); |
352 | return -EINVAL; | 347 | return -EINVAL; |
353 | } | 348 | } |
349 | dev->agp_buffer_token = init->buffers_offset; | ||
354 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 350 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
355 | if (!dev->agp_buffer_map) { | 351 | if (!dev->agp_buffer_map) { |
356 | dev->dev_private = (void *)dev_priv; | 352 | dev->dev_private = (void *)dev_priv; |
@@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = { | |||
1383 | }; | 1379 | }; |
1384 | 1380 | ||
1385 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); | 1381 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); |
1382 | |||
1383 | /** | ||
1384 | * Determine if the device really is AGP or not. | ||
1385 | * | ||
1386 | * All Intel graphics chipsets are treated as AGP, even if they are really | ||
1387 | * PCI-e. | ||
1388 | * | ||
1389 | * \param dev The device to be tested. | ||
1390 | * | ||
1391 | * \returns | ||
1392 | * A value of 1 is always retured to indictate every i810 is AGP. | ||
1393 | */ | ||
1394 | int i810_driver_device_is_agp(drm_device_t * dev) | ||
1395 | { | ||
1396 | return 1; | ||
1397 | } | ||
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c index ff51b3259af9..00609329d578 100644 --- a/drivers/char/drm/i810_drv.c +++ b/drivers/char/drm/i810_drv.c | |||
@@ -84,6 +84,7 @@ static struct drm_driver driver = { | |||
84 | .dev_priv_size = sizeof(drm_i810_buf_priv_t), | 84 | .dev_priv_size = sizeof(drm_i810_buf_priv_t), |
85 | .pretakedown = i810_driver_pretakedown, | 85 | .pretakedown = i810_driver_pretakedown, |
86 | .prerelease = i810_driver_prerelease, | 86 | .prerelease = i810_driver_prerelease, |
87 | .device_is_agp = i810_driver_device_is_agp, | ||
87 | .release = i810_driver_release, | 88 | .release = i810_driver_release, |
88 | .dma_quiescent = i810_driver_dma_quiescent, | 89 | .dma_quiescent = i810_driver_dma_quiescent, |
89 | .reclaim_buffers = i810_reclaim_buffers, | 90 | .reclaim_buffers = i810_reclaim_buffers, |
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h index 1b40538d1725..62ee4f58c59a 100644 --- a/drivers/char/drm/i810_drv.h +++ b/drivers/char/drm/i810_drv.h | |||
@@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev); | |||
120 | extern void i810_driver_release(drm_device_t *dev, struct file *filp); | 120 | extern void i810_driver_release(drm_device_t *dev, struct file *filp); |
121 | extern void i810_driver_pretakedown(drm_device_t *dev); | 121 | extern void i810_driver_pretakedown(drm_device_t *dev); |
122 | extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); | 122 | extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); |
123 | extern int i810_driver_device_is_agp(drm_device_t * dev); | ||
123 | 124 | ||
124 | #define I810_BASE(reg) ((unsigned long) \ | 125 | #define I810_BASE(reg) ((unsigned long) \ |
125 | dev_priv->mmio_map->handle) | 126 | dev_priv->mmio_map->handle) |
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index dc7733035864..6f89d5796ef3 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c | |||
@@ -47,11 +47,6 @@ | |||
47 | #define I830_BUF_UNMAPPED 0 | 47 | #define I830_BUF_UNMAPPED 0 |
48 | #define I830_BUF_MAPPED 1 | 48 | #define I830_BUF_MAPPED 1 |
49 | 49 | ||
50 | #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2) | ||
51 | #define down_write down | ||
52 | #define up_write up | ||
53 | #endif | ||
54 | |||
55 | static drm_buf_t *i830_freelist_get(drm_device_t *dev) | 50 | static drm_buf_t *i830_freelist_get(drm_device_t *dev) |
56 | { | 51 | { |
57 | drm_device_dma_t *dma = dev->dma; | 52 | drm_device_dma_t *dma = dev->dma; |
@@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev, | |||
358 | DRM_ERROR("can not find mmio map!\n"); | 353 | DRM_ERROR("can not find mmio map!\n"); |
359 | return -EINVAL; | 354 | return -EINVAL; |
360 | } | 355 | } |
356 | dev->agp_buffer_token = init->buffers_offset; | ||
361 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 357 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
362 | if(!dev->agp_buffer_map) { | 358 | if(!dev->agp_buffer_map) { |
363 | dev->dev_private = (void *)dev_priv; | 359 | dev->dev_private = (void *)dev_priv; |
@@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = { | |||
1586 | }; | 1582 | }; |
1587 | 1583 | ||
1588 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); | 1584 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); |
1585 | |||
1586 | /** | ||
1587 | * Determine if the device really is AGP or not. | ||
1588 | * | ||
1589 | * All Intel graphics chipsets are treated as AGP, even if they are really | ||
1590 | * PCI-e. | ||
1591 | * | ||
1592 | * \param dev The device to be tested. | ||
1593 | * | ||
1594 | * \returns | ||
1595 | * A value of 1 is always retured to indictate every i8xx is AGP. | ||
1596 | */ | ||
1597 | int i830_driver_device_is_agp(drm_device_t * dev) | ||
1598 | { | ||
1599 | return 1; | ||
1600 | } | ||
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c index bc36be76b8b2..0da9cd19919e 100644 --- a/drivers/char/drm/i830_drv.c +++ b/drivers/char/drm/i830_drv.c | |||
@@ -88,6 +88,7 @@ static struct drm_driver driver = { | |||
88 | .dev_priv_size = sizeof(drm_i830_buf_priv_t), | 88 | .dev_priv_size = sizeof(drm_i830_buf_priv_t), |
89 | .pretakedown = i830_driver_pretakedown, | 89 | .pretakedown = i830_driver_pretakedown, |
90 | .prerelease = i830_driver_prerelease, | 90 | .prerelease = i830_driver_prerelease, |
91 | .device_is_agp = i830_driver_device_is_agp, | ||
91 | .release = i830_driver_release, | 92 | .release = i830_driver_release, |
92 | .dma_quiescent = i830_driver_dma_quiescent, | 93 | .dma_quiescent = i830_driver_dma_quiescent, |
93 | .reclaim_buffers = i830_reclaim_buffers, | 94 | .reclaim_buffers = i830_reclaim_buffers, |
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h index df7746131dea..63f96a8b6a4a 100644 --- a/drivers/char/drm/i830_drv.h +++ b/drivers/char/drm/i830_drv.h | |||
@@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev); | |||
137 | extern void i830_driver_release(drm_device_t *dev, struct file *filp); | 137 | extern void i830_driver_release(drm_device_t *dev, struct file *filp); |
138 | extern int i830_driver_dma_quiescent(drm_device_t *dev); | 138 | extern int i830_driver_dma_quiescent(drm_device_t *dev); |
139 | extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); | 139 | extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); |
140 | extern int i830_driver_device_is_agp(drm_device_t * dev); | ||
140 | 141 | ||
141 | #define I830_BASE(reg) ((unsigned long) \ | 142 | #define I830_BASE(reg) ((unsigned long) \ |
142 | dev_priv->mmio_map->handle) | 143 | dev_priv->mmio_map->handle) |
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index acf9e52a9507..34f552f90c4a 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c | |||
@@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev) | |||
95 | drm_core_ioremapfree( &dev_priv->ring.map, dev); | 95 | drm_core_ioremapfree( &dev_priv->ring.map, dev); |
96 | } | 96 | } |
97 | 97 | ||
98 | if (dev_priv->hw_status_page) { | 98 | if (dev_priv->status_page_dmah) { |
99 | drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, | 99 | drm_pci_free(dev, dev_priv->status_page_dmah); |
100 | dev_priv->dma_status_page); | ||
101 | /* Need to rewrite hardware status page */ | 100 | /* Need to rewrite hardware status page */ |
102 | I915_WRITE(0x02080, 0x1ffff000); | 101 | I915_WRITE(0x02080, 0x1ffff000); |
103 | } | 102 | } |
@@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev, | |||
174 | dev_priv->allow_batchbuffer = 1; | 173 | dev_priv->allow_batchbuffer = 1; |
175 | 174 | ||
176 | /* Program Hardware Status Page */ | 175 | /* Program Hardware Status Page */ |
177 | dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, | 176 | dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, |
178 | 0xffffffff, | 177 | 0xffffffff); |
179 | &dev_priv->dma_status_page); | ||
180 | 178 | ||
181 | if (!dev_priv->hw_status_page) { | 179 | if (!dev_priv->status_page_dmah) { |
182 | dev->dev_private = (void *)dev_priv; | 180 | dev->dev_private = (void *)dev_priv; |
183 | i915_dma_cleanup(dev); | 181 | i915_dma_cleanup(dev); |
184 | DRM_ERROR("Can not allocate hardware status page\n"); | 182 | DRM_ERROR("Can not allocate hardware status page\n"); |
185 | return DRM_ERR(ENOMEM); | 183 | return DRM_ERR(ENOMEM); |
186 | } | 184 | } |
185 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | ||
186 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | ||
187 | |||
187 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 188 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
188 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); | 189 | DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); |
189 | 190 | ||
@@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = { | |||
731 | }; | 732 | }; |
732 | 733 | ||
733 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 734 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
735 | |||
736 | /** | ||
737 | * Determine if the device really is AGP or not. | ||
738 | * | ||
739 | * All Intel graphics chipsets are treated as AGP, even if they are really | ||
740 | * PCI-e. | ||
741 | * | ||
742 | * \param dev The device to be tested. | ||
743 | * | ||
744 | * \returns | ||
745 | * A value of 1 is always retured to indictate every i9x5 is AGP. | ||
746 | */ | ||
747 | int i915_driver_device_is_agp(drm_device_t * dev) | ||
748 | { | ||
749 | return 1; | ||
750 | } | ||
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c index 1f59d3fc79bc..106b9ec02213 100644 --- a/drivers/char/drm/i915_drv.c +++ b/drivers/char/drm/i915_drv.c | |||
@@ -79,6 +79,7 @@ static struct drm_driver driver = { | |||
79 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, | 79 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, |
80 | .pretakedown = i915_driver_pretakedown, | 80 | .pretakedown = i915_driver_pretakedown, |
81 | .prerelease = i915_driver_prerelease, | 81 | .prerelease = i915_driver_prerelease, |
82 | .device_is_agp = i915_driver_device_is_agp, | ||
82 | .irq_preinstall = i915_driver_irq_preinstall, | 83 | .irq_preinstall = i915_driver_irq_preinstall, |
83 | .irq_postinstall = i915_driver_irq_postinstall, | 84 | .irq_postinstall = i915_driver_irq_postinstall, |
84 | .irq_uninstall = i915_driver_irq_uninstall, | 85 | .irq_uninstall = i915_driver_irq_uninstall, |
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h index 9c37d2367dd5..70ed4e68eac8 100644 --- a/drivers/char/drm/i915_drv.h +++ b/drivers/char/drm/i915_drv.h | |||
@@ -79,9 +79,10 @@ typedef struct drm_i915_private { | |||
79 | drm_i915_sarea_t *sarea_priv; | 79 | drm_i915_sarea_t *sarea_priv; |
80 | drm_i915_ring_buffer_t ring; | 80 | drm_i915_ring_buffer_t ring; |
81 | 81 | ||
82 | drm_dma_handle_t *status_page_dmah; | ||
82 | void *hw_status_page; | 83 | void *hw_status_page; |
83 | unsigned long counter; | ||
84 | dma_addr_t dma_status_page; | 84 | dma_addr_t dma_status_page; |
85 | unsigned long counter; | ||
85 | 86 | ||
86 | int back_offset; | 87 | int back_offset; |
87 | int front_offset; | 88 | int front_offset; |
@@ -102,6 +103,7 @@ typedef struct drm_i915_private { | |||
102 | extern void i915_kernel_lost_context(drm_device_t * dev); | 103 | extern void i915_kernel_lost_context(drm_device_t * dev); |
103 | extern void i915_driver_pretakedown(drm_device_t *dev); | 104 | extern void i915_driver_pretakedown(drm_device_t *dev); |
104 | extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); | 105 | extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); |
106 | extern int i915_driver_device_is_agp(drm_device_t *dev); | ||
105 | 107 | ||
106 | /* i915_irq.c */ | 108 | /* i915_irq.c */ |
107 | extern int i915_irq_emit(DRM_IOCTL_ARGS); | 109 | extern int i915_irq_emit(DRM_IOCTL_ARGS); |
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index 832eaf8a5068..567b425b784f 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c | |||
@@ -23,18 +23,21 @@ | |||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
25 | * DEALINGS IN THE SOFTWARE. | 25 | * DEALINGS IN THE SOFTWARE. |
26 | * | 26 | */ |
27 | * Authors: | 27 | |
28 | * Rickard E. (Rik) Faith <faith@valinux.com> | 28 | /** |
29 | * Jeff Hartmann <jhartmann@valinux.com> | 29 | * \file mga_dma.c |
30 | * Keith Whitwell <keith@tungstengraphics.com> | 30 | * DMA support for MGA G200 / G400. |
31 | * | 31 | * |
32 | * Rewritten by: | 32 | * \author Rickard E. (Rik) Faith <faith@valinux.com> |
33 | * Gareth Hughes <gareth@valinux.com> | 33 | * \author Jeff Hartmann <jhartmann@valinux.com> |
34 | * \author Keith Whitwell <keith@tungstengraphics.com> | ||
35 | * \author Gareth Hughes <gareth@valinux.com> | ||
34 | */ | 36 | */ |
35 | 37 | ||
36 | #include "drmP.h" | 38 | #include "drmP.h" |
37 | #include "drm.h" | 39 | #include "drm.h" |
40 | #include "drm_sarea.h" | ||
38 | #include "mga_drm.h" | 41 | #include "mga_drm.h" |
39 | #include "mga_drv.h" | 42 | #include "mga_drv.h" |
40 | 43 | ||
@@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv ) | |||
148 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); | 151 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); |
149 | 152 | ||
150 | mga_flush_write_combine(); | 153 | mga_flush_write_combine(); |
151 | MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); | 154 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
152 | 155 | ||
153 | DRM_DEBUG( "done.\n" ); | 156 | DRM_DEBUG( "done.\n" ); |
154 | } | 157 | } |
@@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) | |||
190 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); | 193 | DRM_DEBUG( " space = 0x%06x\n", primary->space ); |
191 | 194 | ||
192 | mga_flush_write_combine(); | 195 | mga_flush_write_combine(); |
193 | MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); | 196 | MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); |
194 | 197 | ||
195 | set_bit( 0, &primary->wrapped ); | 198 | set_bit( 0, &primary->wrapped ); |
196 | DRM_DEBUG( "done.\n" ); | 199 | DRM_DEBUG( "done.\n" ); |
@@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ) | |||
396 | * DMA initialization, cleanup | 399 | * DMA initialization, cleanup |
397 | */ | 400 | */ |
398 | 401 | ||
402 | |||
403 | int mga_driver_preinit(drm_device_t *dev, unsigned long flags) | ||
404 | { | ||
405 | drm_mga_private_t * dev_priv; | ||
406 | |||
407 | dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
408 | if (!dev_priv) | ||
409 | return DRM_ERR(ENOMEM); | ||
410 | |||
411 | dev->dev_private = (void *)dev_priv; | ||
412 | memset(dev_priv, 0, sizeof(drm_mga_private_t)); | ||
413 | |||
414 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | ||
415 | dev_priv->chipset = flags; | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * Bootstrap the driver for AGP DMA. | ||
422 | * | ||
423 | * \todo | ||
424 | * Investigate whether there is any benifit to storing the WARP microcode in | ||
425 | * AGP memory. If not, the microcode may as well always be put in PCI | ||
426 | * memory. | ||
427 | * | ||
428 | * \todo | ||
429 | * This routine needs to set dma_bs->agp_mode to the mode actually configured | ||
430 | * in the hardware. Looking just at the Linux AGP driver code, I don't see | ||
431 | * an easy way to determine this. | ||
432 | * | ||
433 | * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap | ||
434 | */ | ||
435 | static int mga_do_agp_dma_bootstrap(drm_device_t * dev, | ||
436 | drm_mga_dma_bootstrap_t * dma_bs) | ||
437 | { | ||
438 | drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
439 | const unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
440 | int err; | ||
441 | unsigned offset; | ||
442 | const unsigned secondary_size = dma_bs->secondary_bin_count | ||
443 | * dma_bs->secondary_bin_size; | ||
444 | const unsigned agp_size = (dma_bs->agp_size << 20); | ||
445 | drm_buf_desc_t req; | ||
446 | drm_agp_mode_t mode; | ||
447 | drm_agp_info_t info; | ||
448 | |||
449 | |||
450 | /* Acquire AGP. */ | ||
451 | err = drm_agp_acquire(dev); | ||
452 | if (err) { | ||
453 | DRM_ERROR("Unable to acquire AGP\n"); | ||
454 | return err; | ||
455 | } | ||
456 | |||
457 | err = drm_agp_info(dev, &info); | ||
458 | if (err) { | ||
459 | DRM_ERROR("Unable to get AGP info\n"); | ||
460 | return err; | ||
461 | } | ||
462 | |||
463 | mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; | ||
464 | err = drm_agp_enable(dev, mode); | ||
465 | if (err) { | ||
466 | DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); | ||
467 | return err; | ||
468 | } | ||
469 | |||
470 | |||
471 | /* In addition to the usual AGP mode configuration, the G200 AGP cards | ||
472 | * need to have the AGP mode "manually" set. | ||
473 | */ | ||
474 | |||
475 | if (dev_priv->chipset == MGA_CARD_TYPE_G200) { | ||
476 | if (mode.mode & 0x02) { | ||
477 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); | ||
478 | } | ||
479 | else { | ||
480 | MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); | ||
481 | } | ||
482 | } | ||
483 | |||
484 | |||
485 | /* Allocate and bind AGP memory. */ | ||
486 | dev_priv->agp_pages = agp_size / PAGE_SIZE; | ||
487 | dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 ); | ||
488 | if (dev_priv->agp_mem == NULL) { | ||
489 | dev_priv->agp_pages = 0; | ||
490 | DRM_ERROR("Unable to allocate %uMB AGP memory\n", | ||
491 | dma_bs->agp_size); | ||
492 | return DRM_ERR(ENOMEM); | ||
493 | } | ||
494 | |||
495 | err = drm_bind_agp( dev_priv->agp_mem, 0 ); | ||
496 | if (err) { | ||
497 | DRM_ERROR("Unable to bind AGP memory\n"); | ||
498 | return err; | ||
499 | } | ||
500 | |||
501 | offset = 0; | ||
502 | err = drm_addmap( dev, offset, warp_size, | ||
503 | _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); | ||
504 | if (err) { | ||
505 | DRM_ERROR("Unable to map WARP microcode\n"); | ||
506 | return err; | ||
507 | } | ||
508 | |||
509 | offset += warp_size; | ||
510 | err = drm_addmap( dev, offset, dma_bs->primary_size, | ||
511 | _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary ); | ||
512 | if (err) { | ||
513 | DRM_ERROR("Unable to map primary DMA region\n"); | ||
514 | return err; | ||
515 | } | ||
516 | |||
517 | offset += dma_bs->primary_size; | ||
518 | err = drm_addmap( dev, offset, secondary_size, | ||
519 | _DRM_AGP, 0, & dev->agp_buffer_map ); | ||
520 | if (err) { | ||
521 | DRM_ERROR("Unable to map secondary DMA region\n"); | ||
522 | return err; | ||
523 | } | ||
524 | |||
525 | (void) memset( &req, 0, sizeof(req) ); | ||
526 | req.count = dma_bs->secondary_bin_count; | ||
527 | req.size = dma_bs->secondary_bin_size; | ||
528 | req.flags = _DRM_AGP_BUFFER; | ||
529 | req.agp_start = offset; | ||
530 | |||
531 | err = drm_addbufs_agp( dev, & req ); | ||
532 | if (err) { | ||
533 | DRM_ERROR("Unable to add secondary DMA buffers\n"); | ||
534 | return err; | ||
535 | } | ||
536 | |||
537 | offset += secondary_size; | ||
538 | err = drm_addmap( dev, offset, agp_size - offset, | ||
539 | _DRM_AGP, 0, & dev_priv->agp_textures ); | ||
540 | if (err) { | ||
541 | DRM_ERROR("Unable to map AGP texture region\n"); | ||
542 | return err; | ||
543 | } | ||
544 | |||
545 | drm_core_ioremap(dev_priv->warp, dev); | ||
546 | drm_core_ioremap(dev_priv->primary, dev); | ||
547 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
548 | |||
549 | if (!dev_priv->warp->handle || | ||
550 | !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { | ||
551 | DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", | ||
552 | dev_priv->warp->handle, dev_priv->primary->handle, | ||
553 | dev->agp_buffer_map->handle); | ||
554 | return DRM_ERR(ENOMEM); | ||
555 | } | ||
556 | |||
557 | dev_priv->dma_access = MGA_PAGPXFER; | ||
558 | dev_priv->wagp_enable = MGA_WAGP_ENABLE; | ||
559 | |||
560 | DRM_INFO("Initialized card for AGP DMA.\n"); | ||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /** | ||
565 | * Bootstrap the driver for PCI DMA. | ||
566 | * | ||
567 | * \todo | ||
568 | * The algorithm for decreasing the size of the primary DMA buffer could be | ||
569 | * better. The size should be rounded up to the nearest page size, then | ||
570 | * decrease the request size by a single page each pass through the loop. | ||
571 | * | ||
572 | * \todo | ||
573 | * Determine whether the maximum address passed to drm_pci_alloc is correct. | ||
574 | * The same goes for drm_addbufs_pci. | ||
575 | * | ||
576 | * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap | ||
577 | */ | ||
578 | static int mga_do_pci_dma_bootstrap(drm_device_t * dev, | ||
579 | drm_mga_dma_bootstrap_t * dma_bs) | ||
580 | { | ||
581 | drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
582 | const unsigned int warp_size = mga_warp_microcode_size(dev_priv); | ||
583 | unsigned int primary_size; | ||
584 | unsigned int bin_count; | ||
585 | int err; | ||
586 | drm_buf_desc_t req; | ||
587 | |||
588 | |||
589 | if (dev->dma == NULL) { | ||
590 | DRM_ERROR("dev->dma is NULL\n"); | ||
591 | return DRM_ERR(EFAULT); | ||
592 | } | ||
593 | |||
594 | /* The proper alignment is 0x100 for this mapping */ | ||
595 | err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, | ||
596 | _DRM_READ_ONLY, &dev_priv->warp); | ||
597 | if (err != 0) { | ||
598 | DRM_ERROR("Unable to create mapping for WARP microcode\n"); | ||
599 | return err; | ||
600 | } | ||
601 | |||
602 | /* Other than the bottom two bits being used to encode other | ||
603 | * information, there don't appear to be any restrictions on the | ||
604 | * alignment of the primary or secondary DMA buffers. | ||
605 | */ | ||
606 | |||
607 | for ( primary_size = dma_bs->primary_size | ||
608 | ; primary_size != 0 | ||
609 | ; primary_size >>= 1 ) { | ||
610 | /* The proper alignment for this mapping is 0x04 */ | ||
611 | err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, | ||
612 | _DRM_READ_ONLY, &dev_priv->primary); | ||
613 | if (!err) | ||
614 | break; | ||
615 | } | ||
616 | |||
617 | if (err != 0) { | ||
618 | DRM_ERROR("Unable to allocate primary DMA region\n"); | ||
619 | return DRM_ERR(ENOMEM); | ||
620 | } | ||
621 | |||
622 | if (dev_priv->primary->size != dma_bs->primary_size) { | ||
623 | DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", | ||
624 | dma_bs->primary_size, | ||
625 | (unsigned) dev_priv->primary->size); | ||
626 | dma_bs->primary_size = dev_priv->primary->size; | ||
627 | } | ||
628 | |||
629 | for ( bin_count = dma_bs->secondary_bin_count | ||
630 | ; bin_count > 0 | ||
631 | ; bin_count-- ) { | ||
632 | (void) memset( &req, 0, sizeof(req) ); | ||
633 | req.count = bin_count; | ||
634 | req.size = dma_bs->secondary_bin_size; | ||
635 | |||
636 | err = drm_addbufs_pci( dev, & req ); | ||
637 | if (!err) { | ||
638 | break; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | if (bin_count == 0) { | ||
643 | DRM_ERROR("Unable to add secondary DMA buffers\n"); | ||
644 | return err; | ||
645 | } | ||
646 | |||
647 | if (bin_count != dma_bs->secondary_bin_count) { | ||
648 | DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " | ||
649 | "to %u.\n", dma_bs->secondary_bin_count, bin_count); | ||
650 | |||
651 | dma_bs->secondary_bin_count = bin_count; | ||
652 | } | ||
653 | |||
654 | dev_priv->dma_access = 0; | ||
655 | dev_priv->wagp_enable = 0; | ||
656 | |||
657 | dma_bs->agp_mode = 0; | ||
658 | |||
659 | DRM_INFO("Initialized card for PCI DMA.\n"); | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | |||
664 | static int mga_do_dma_bootstrap(drm_device_t * dev, | ||
665 | drm_mga_dma_bootstrap_t * dma_bs) | ||
666 | { | ||
667 | const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); | ||
668 | int err; | ||
669 | drm_mga_private_t * const dev_priv = | ||
670 | (drm_mga_private_t *) dev->dev_private; | ||
671 | |||
672 | |||
673 | dev_priv->used_new_dma_init = 1; | ||
674 | |||
675 | /* The first steps are the same for both PCI and AGP based DMA. Map | ||
676 | * the cards MMIO registers and map a status page. | ||
677 | */ | ||
678 | err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size, | ||
679 | _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio ); | ||
680 | if (err) { | ||
681 | DRM_ERROR("Unable to map MMIO region\n"); | ||
682 | return err; | ||
683 | } | ||
684 | |||
685 | |||
686 | err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM, | ||
687 | _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, | ||
688 | & dev_priv->status ); | ||
689 | if (err) { | ||
690 | DRM_ERROR("Unable to map status region\n"); | ||
691 | return err; | ||
692 | } | ||
693 | |||
694 | |||
695 | /* The DMA initialization procedure is slightly different for PCI and | ||
696 | * AGP cards. AGP cards just allocate a large block of AGP memory and | ||
697 | * carve off portions of it for internal uses. The remaining memory | ||
698 | * is returned to user-mode to be used for AGP textures. | ||
699 | */ | ||
700 | |||
701 | if (is_agp) { | ||
702 | err = mga_do_agp_dma_bootstrap(dev, dma_bs); | ||
703 | } | ||
704 | |||
705 | /* If we attempted to initialize the card for AGP DMA but failed, | ||
706 | * clean-up any mess that may have been created. | ||
707 | */ | ||
708 | |||
709 | if (err) { | ||
710 | mga_do_cleanup_dma(dev); | ||
711 | } | ||
712 | |||
713 | |||
714 | /* Not only do we want to try and initialized PCI cards for PCI DMA, | ||
715 | * but we also try to initialized AGP cards that could not be | ||
716 | * initialized for AGP DMA. This covers the case where we have an AGP | ||
717 | * card in a system with an unsupported AGP chipset. In that case the | ||
718 | * card will be detected as AGP, but we won't be able to allocate any | ||
719 | * AGP memory, etc. | ||
720 | */ | ||
721 | |||
722 | if (!is_agp || err) { | ||
723 | err = mga_do_pci_dma_bootstrap(dev, dma_bs); | ||
724 | } | ||
725 | |||
726 | |||
727 | return err; | ||
728 | } | ||
729 | |||
730 | int mga_dma_bootstrap(DRM_IOCTL_ARGS) | ||
731 | { | ||
732 | DRM_DEVICE; | ||
733 | drm_mga_dma_bootstrap_t bootstrap; | ||
734 | int err; | ||
735 | |||
736 | |||
737 | DRM_COPY_FROM_USER_IOCTL(bootstrap, | ||
738 | (drm_mga_dma_bootstrap_t __user *) data, | ||
739 | sizeof(bootstrap)); | ||
740 | |||
741 | err = mga_do_dma_bootstrap(dev, & bootstrap); | ||
742 | if (! err) { | ||
743 | static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; | ||
744 | const drm_mga_private_t * const dev_priv = | ||
745 | (drm_mga_private_t *) dev->dev_private; | ||
746 | |||
747 | if (dev_priv->agp_textures != NULL) { | ||
748 | bootstrap.texture_handle = dev_priv->agp_textures->offset; | ||
749 | bootstrap.texture_size = dev_priv->agp_textures->size; | ||
750 | } | ||
751 | else { | ||
752 | bootstrap.texture_handle = 0; | ||
753 | bootstrap.texture_size = 0; | ||
754 | } | ||
755 | |||
756 | bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ]; | ||
757 | if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap, | ||
758 | sizeof(bootstrap))) { | ||
759 | err = DRM_ERR(EFAULT); | ||
760 | } | ||
761 | } | ||
762 | else { | ||
763 | mga_do_cleanup_dma(dev); | ||
764 | } | ||
765 | |||
766 | return err; | ||
767 | } | ||
768 | |||
399 | static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | 769 | static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) |
400 | { | 770 | { |
401 | drm_mga_private_t *dev_priv; | 771 | drm_mga_private_t *dev_priv; |
402 | int ret; | 772 | int ret; |
403 | DRM_DEBUG( "\n" ); | 773 | DRM_DEBUG( "\n" ); |
404 | 774 | ||
405 | dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); | ||
406 | if ( !dev_priv ) | ||
407 | return DRM_ERR(ENOMEM); | ||
408 | |||
409 | memset( dev_priv, 0, sizeof(drm_mga_private_t) ); | ||
410 | 775 | ||
411 | dev_priv->chipset = init->chipset; | 776 | dev_priv = dev->dev_private; |
412 | 777 | ||
413 | dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; | 778 | if (init->sgram) { |
414 | |||
415 | if ( init->sgram ) { | ||
416 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; | 779 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; |
417 | } else { | 780 | } else { |
418 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; | 781 | dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; |
@@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | |||
436 | 799 | ||
437 | DRM_GETSAREA(); | 800 | DRM_GETSAREA(); |
438 | 801 | ||
439 | if(!dev_priv->sarea) { | 802 | if (!dev_priv->sarea) { |
440 | DRM_ERROR( "failed to find sarea!\n" ); | 803 | DRM_ERROR("failed to find sarea!\n"); |
441 | /* Assign dev_private so we can do cleanup. */ | ||
442 | dev->dev_private = (void *)dev_priv; | ||
443 | mga_do_cleanup_dma( dev ); | ||
444 | return DRM_ERR(EINVAL); | 804 | return DRM_ERR(EINVAL); |
445 | } | 805 | } |
446 | 806 | ||
447 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); | 807 | if (! dev_priv->used_new_dma_init) { |
448 | if(!dev_priv->mmio) { | 808 | dev_priv->status = drm_core_findmap(dev, init->status_offset); |
449 | DRM_ERROR( "failed to find mmio region!\n" ); | 809 | if (!dev_priv->status) { |
450 | /* Assign dev_private so we can do cleanup. */ | 810 | DRM_ERROR("failed to find status page!\n"); |
451 | dev->dev_private = (void *)dev_priv; | 811 | return DRM_ERR(EINVAL); |
452 | mga_do_cleanup_dma( dev ); | 812 | } |
453 | return DRM_ERR(EINVAL); | 813 | dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); |
454 | } | 814 | if (!dev_priv->mmio) { |
455 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | 815 | DRM_ERROR("failed to find mmio region!\n"); |
456 | if(!dev_priv->status) { | 816 | return DRM_ERR(EINVAL); |
457 | DRM_ERROR( "failed to find status page!\n" ); | 817 | } |
458 | /* Assign dev_private so we can do cleanup. */ | 818 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); |
459 | dev->dev_private = (void *)dev_priv; | 819 | if (!dev_priv->warp) { |
460 | mga_do_cleanup_dma( dev ); | 820 | DRM_ERROR("failed to find warp microcode region!\n"); |
461 | return DRM_ERR(EINVAL); | 821 | return DRM_ERR(EINVAL); |
462 | } | 822 | } |
463 | dev_priv->warp = drm_core_findmap(dev, init->warp_offset); | 823 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); |
464 | if(!dev_priv->warp) { | 824 | if (!dev_priv->primary) { |
465 | DRM_ERROR( "failed to find warp microcode region!\n" ); | 825 | DRM_ERROR("failed to find primary dma region!\n"); |
466 | /* Assign dev_private so we can do cleanup. */ | 826 | return DRM_ERR(EINVAL); |
467 | dev->dev_private = (void *)dev_priv; | 827 | } |
468 | mga_do_cleanup_dma( dev ); | 828 | dev->agp_buffer_token = init->buffers_offset; |
469 | return DRM_ERR(EINVAL); | 829 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
470 | } | 830 | if (!dev->agp_buffer_map) { |
471 | dev_priv->primary = drm_core_findmap(dev, init->primary_offset); | 831 | DRM_ERROR("failed to find dma buffer region!\n"); |
472 | if(!dev_priv->primary) { | 832 | return DRM_ERR(EINVAL); |
473 | DRM_ERROR( "failed to find primary dma region!\n" ); | 833 | } |
474 | /* Assign dev_private so we can do cleanup. */ | 834 | |
475 | dev->dev_private = (void *)dev_priv; | 835 | drm_core_ioremap(dev_priv->warp, dev); |
476 | mga_do_cleanup_dma( dev ); | 836 | drm_core_ioremap(dev_priv->primary, dev); |
477 | return DRM_ERR(EINVAL); | 837 | drm_core_ioremap(dev->agp_buffer_map, dev); |
478 | } | ||
479 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
480 | if(!dev->agp_buffer_map) { | ||
481 | DRM_ERROR( "failed to find dma buffer region!\n" ); | ||
482 | /* Assign dev_private so we can do cleanup. */ | ||
483 | dev->dev_private = (void *)dev_priv; | ||
484 | mga_do_cleanup_dma( dev ); | ||
485 | return DRM_ERR(EINVAL); | ||
486 | } | 838 | } |
487 | 839 | ||
488 | dev_priv->sarea_priv = | 840 | dev_priv->sarea_priv = |
489 | (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + | 841 | (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + |
490 | init->sarea_priv_offset); | 842 | init->sarea_priv_offset); |
491 | 843 | ||
492 | drm_core_ioremap( dev_priv->warp, dev ); | 844 | if (!dev_priv->warp->handle || |
493 | drm_core_ioremap( dev_priv->primary, dev ); | 845 | !dev_priv->primary->handle || |
494 | drm_core_ioremap( dev->agp_buffer_map, dev ); | 846 | ((dev_priv->dma_access != 0) && |
495 | 847 | ((dev->agp_buffer_map == NULL) || | |
496 | if(!dev_priv->warp->handle || | 848 | (dev->agp_buffer_map->handle == NULL)))) { |
497 | !dev_priv->primary->handle || | 849 | DRM_ERROR("failed to ioremap agp regions!\n"); |
498 | !dev->agp_buffer_map->handle ) { | ||
499 | DRM_ERROR( "failed to ioremap agp regions!\n" ); | ||
500 | /* Assign dev_private so we can do cleanup. */ | ||
501 | dev->dev_private = (void *)dev_priv; | ||
502 | mga_do_cleanup_dma( dev ); | ||
503 | return DRM_ERR(ENOMEM); | 850 | return DRM_ERR(ENOMEM); |
504 | } | 851 | } |
505 | 852 | ||
506 | ret = mga_warp_install_microcode( dev_priv ); | 853 | ret = mga_warp_install_microcode(dev_priv); |
507 | if ( ret < 0 ) { | 854 | if (ret < 0) { |
508 | DRM_ERROR( "failed to install WARP ucode!\n" ); | 855 | DRM_ERROR("failed to install WARP ucode!\n"); |
509 | /* Assign dev_private so we can do cleanup. */ | ||
510 | dev->dev_private = (void *)dev_priv; | ||
511 | mga_do_cleanup_dma( dev ); | ||
512 | return ret; | 856 | return ret; |
513 | } | 857 | } |
514 | 858 | ||
515 | ret = mga_warp_init( dev_priv ); | 859 | ret = mga_warp_init(dev_priv); |
516 | if ( ret < 0 ) { | 860 | if (ret < 0) { |
517 | DRM_ERROR( "failed to init WARP engine!\n" ); | 861 | DRM_ERROR("failed to init WARP engine!\n"); |
518 | /* Assign dev_private so we can do cleanup. */ | ||
519 | dev->dev_private = (void *)dev_priv; | ||
520 | mga_do_cleanup_dma( dev ); | ||
521 | return ret; | 862 | return ret; |
522 | } | 863 | } |
523 | 864 | ||
@@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) | |||
557 | dev_priv->sarea_priv->last_frame.head = 0; | 898 | dev_priv->sarea_priv->last_frame.head = 0; |
558 | dev_priv->sarea_priv->last_frame.wrap = 0; | 899 | dev_priv->sarea_priv->last_frame.wrap = 0; |
559 | 900 | ||
560 | if ( mga_freelist_init( dev, dev_priv ) < 0 ) { | 901 | if (mga_freelist_init(dev, dev_priv) < 0) { |
561 | DRM_ERROR( "could not initialize freelist\n" ); | 902 | DRM_ERROR("could not initialize freelist\n"); |
562 | /* Assign dev_private so we can do cleanup. */ | ||
563 | dev->dev_private = (void *)dev_priv; | ||
564 | mga_do_cleanup_dma( dev ); | ||
565 | return DRM_ERR(ENOMEM); | 903 | return DRM_ERR(ENOMEM); |
566 | } | 904 | } |
567 | 905 | ||
568 | /* Make dev_private visable to others. */ | ||
569 | dev->dev_private = (void *)dev_priv; | ||
570 | return 0; | 906 | return 0; |
571 | } | 907 | } |
572 | 908 | ||
573 | static int mga_do_cleanup_dma( drm_device_t *dev ) | 909 | static int mga_do_cleanup_dma( drm_device_t *dev ) |
574 | { | 910 | { |
575 | DRM_DEBUG( "\n" ); | 911 | int err = 0; |
912 | DRM_DEBUG("\n"); | ||
576 | 913 | ||
577 | /* Make sure interrupts are disabled here because the uninstall ioctl | 914 | /* Make sure interrupts are disabled here because the uninstall ioctl |
578 | * may not have been called from userspace and after dev_private | 915 | * may not have been called from userspace and after dev_private |
@@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev ) | |||
583 | if ( dev->dev_private ) { | 920 | if ( dev->dev_private ) { |
584 | drm_mga_private_t *dev_priv = dev->dev_private; | 921 | drm_mga_private_t *dev_priv = dev->dev_private; |
585 | 922 | ||
586 | if ( dev_priv->warp != NULL ) | 923 | if ((dev_priv->warp != NULL) |
587 | drm_core_ioremapfree( dev_priv->warp, dev ); | 924 | && (dev_priv->mmio->type != _DRM_CONSISTENT)) |
588 | if ( dev_priv->primary != NULL ) | 925 | drm_core_ioremapfree(dev_priv->warp, dev); |
589 | drm_core_ioremapfree( dev_priv->primary, dev ); | 926 | |
590 | if ( dev->agp_buffer_map != NULL ) | 927 | if ((dev_priv->primary != NULL) |
591 | drm_core_ioremapfree( dev->agp_buffer_map, dev ); | 928 | && (dev_priv->primary->type != _DRM_CONSISTENT)) |
929 | drm_core_ioremapfree(dev_priv->primary, dev); | ||
592 | 930 | ||
593 | if ( dev_priv->head != NULL ) { | 931 | if (dev->agp_buffer_map != NULL) |
594 | mga_freelist_cleanup( dev ); | 932 | drm_core_ioremapfree(dev->agp_buffer_map, dev); |
933 | |||
934 | if (dev_priv->used_new_dma_init) { | ||
935 | if (dev_priv->agp_mem != NULL) { | ||
936 | dev_priv->agp_textures = NULL; | ||
937 | drm_unbind_agp(dev_priv->agp_mem); | ||
938 | |||
939 | drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages); | ||
940 | dev_priv->agp_pages = 0; | ||
941 | dev_priv->agp_mem = NULL; | ||
942 | } | ||
943 | |||
944 | if ((dev->agp != NULL) && dev->agp->acquired) { | ||
945 | err = drm_agp_release(dev); | ||
946 | } | ||
947 | |||
948 | dev_priv->used_new_dma_init = 0; | ||
595 | } | 949 | } |
596 | 950 | ||
597 | drm_free( dev->dev_private, sizeof(drm_mga_private_t), | 951 | dev_priv->warp = NULL; |
598 | DRM_MEM_DRIVER ); | 952 | dev_priv->primary = NULL; |
599 | dev->dev_private = NULL; | 953 | dev_priv->mmio = NULL; |
954 | dev_priv->status = NULL; | ||
955 | dev_priv->sarea = NULL; | ||
956 | dev_priv->sarea_priv = NULL; | ||
957 | dev->agp_buffer_map = NULL; | ||
958 | |||
959 | memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); | ||
960 | dev_priv->warp_pipe = 0; | ||
961 | memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); | ||
962 | |||
963 | if (dev_priv->head != NULL) { | ||
964 | mga_freelist_cleanup(dev); | ||
965 | } | ||
600 | } | 966 | } |
601 | 967 | ||
602 | return 0; | 968 | return 0; |
@@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS ) | |||
606 | { | 972 | { |
607 | DRM_DEVICE; | 973 | DRM_DEVICE; |
608 | drm_mga_init_t init; | 974 | drm_mga_init_t init; |
975 | int err; | ||
609 | 976 | ||
610 | LOCK_TEST_WITH_RETURN( dev, filp ); | 977 | LOCK_TEST_WITH_RETURN( dev, filp ); |
611 | 978 | ||
612 | DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); | 979 | DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data, |
980 | sizeof(init)); | ||
613 | 981 | ||
614 | switch ( init.func ) { | 982 | switch ( init.func ) { |
615 | case MGA_INIT_DMA: | 983 | case MGA_INIT_DMA: |
616 | return mga_do_init_dma( dev, &init ); | 984 | err = mga_do_init_dma(dev, &init); |
985 | if (err) { | ||
986 | (void) mga_do_cleanup_dma(dev); | ||
987 | } | ||
988 | return err; | ||
617 | case MGA_CLEANUP_DMA: | 989 | case MGA_CLEANUP_DMA: |
618 | return mga_do_cleanup_dma( dev ); | 990 | return mga_do_cleanup_dma( dev ); |
619 | } | 991 | } |
@@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS ) | |||
742 | return ret; | 1114 | return ret; |
743 | } | 1115 | } |
744 | 1116 | ||
745 | void mga_driver_pretakedown(drm_device_t *dev) | 1117 | /** |
1118 | * Called just before the module is unloaded. | ||
1119 | */ | ||
1120 | int mga_driver_postcleanup(drm_device_t * dev) | ||
1121 | { | ||
1122 | drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); | ||
1123 | dev->dev_private = NULL; | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | /** | ||
1129 | * Called when the last opener of the device is closed. | ||
1130 | */ | ||
1131 | void mga_driver_pretakedown(drm_device_t * dev) | ||
746 | { | 1132 | { |
747 | mga_do_cleanup_dma( dev ); | 1133 | mga_do_cleanup_dma( dev ); |
748 | } | 1134 | } |
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h index 521d4451d012..d20aab3bd57b 100644 --- a/drivers/char/drm/mga_drm.h +++ b/drivers/char/drm/mga_drm.h | |||
@@ -73,7 +73,8 @@ | |||
73 | 73 | ||
74 | #define MGA_CARD_TYPE_G200 1 | 74 | #define MGA_CARD_TYPE_G200 1 |
75 | #define MGA_CARD_TYPE_G400 2 | 75 | #define MGA_CARD_TYPE_G400 2 |
76 | 76 | #define MGA_CARD_TYPE_G450 3 /* not currently used */ | |
77 | #define MGA_CARD_TYPE_G550 4 | ||
77 | 78 | ||
78 | #define MGA_FRONT 0x1 | 79 | #define MGA_FRONT 0x1 |
79 | #define MGA_BACK 0x2 | 80 | #define MGA_BACK 0x2 |
@@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea { | |||
225 | } drm_mga_sarea_t; | 226 | } drm_mga_sarea_t; |
226 | 227 | ||
227 | 228 | ||
228 | /* WARNING: If you change any of these defines, make sure to change the | ||
229 | * defines in the Xserver file (xf86drmMga.h) | ||
230 | */ | ||
231 | |||
232 | /* MGA specific ioctls | 229 | /* MGA specific ioctls |
233 | * The device specific ioctl range is 0x40 to 0x79. | 230 | * The device specific ioctl range is 0x40 to 0x79. |
234 | */ | 231 | */ |
@@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea { | |||
243 | #define DRM_MGA_BLIT 0x08 | 240 | #define DRM_MGA_BLIT 0x08 |
244 | #define DRM_MGA_GETPARAM 0x09 | 241 | #define DRM_MGA_GETPARAM 0x09 |
245 | 242 | ||
243 | /* 3.2: | ||
244 | * ioctls for operating on fences. | ||
245 | */ | ||
246 | #define DRM_MGA_SET_FENCE 0x0a | ||
247 | #define DRM_MGA_WAIT_FENCE 0x0b | ||
248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c | ||
249 | |||
250 | |||
246 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) | 251 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) |
247 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) | 252 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) |
248 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) | 253 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) |
@@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea { | |||
253 | #define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) | 258 | #define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) |
254 | #define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) | 259 | #define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) |
255 | #define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) | 260 | #define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) |
261 | #define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t) | ||
262 | #define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t) | ||
263 | #define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t) | ||
256 | 264 | ||
257 | typedef struct _drm_mga_warp_index { | 265 | typedef struct _drm_mga_warp_index { |
258 | int installed; | 266 | int installed; |
@@ -291,12 +299,72 @@ typedef struct drm_mga_init { | |||
291 | unsigned long buffers_offset; | 299 | unsigned long buffers_offset; |
292 | } drm_mga_init_t; | 300 | } drm_mga_init_t; |
293 | 301 | ||
294 | typedef struct drm_mga_fullscreen { | 302 | typedef struct drm_mga_dma_bootstrap { |
295 | enum { | 303 | /** |
296 | MGA_INIT_FULLSCREEN = 0x01, | 304 | * \name AGP texture region |
297 | MGA_CLEANUP_FULLSCREEN = 0x02 | 305 | * |
298 | } func; | 306 | * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will |
299 | } drm_mga_fullscreen_t; | 307 | * be filled in with the actual AGP texture settings. |
308 | * | ||
309 | * \warning | ||
310 | * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode | ||
311 | * is zero, it means that PCI memory (most likely through the use of | ||
312 | * an IOMMU) is being used for "AGP" textures. | ||
313 | */ | ||
314 | /*@{*/ | ||
315 | unsigned long texture_handle; /**< Handle used to map AGP textures. */ | ||
316 | uint32_t texture_size; /**< Size of the AGP texture region. */ | ||
317 | /*@}*/ | ||
318 | |||
319 | |||
320 | /** | ||
321 | * Requested size of the primary DMA region. | ||
322 | * | ||
323 | * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be | ||
324 | * filled in with the actual AGP mode. If AGP was not available | ||
325 | */ | ||
326 | uint32_t primary_size; | ||
327 | |||
328 | |||
329 | /** | ||
330 | * Requested number of secondary DMA buffers. | ||
331 | * | ||
332 | * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be | ||
333 | * filled in with the actual number of secondary DMA buffers | ||
334 | * allocated. Particularly when PCI DMA is used, this may be | ||
335 | * (subtantially) less than the number requested. | ||
336 | */ | ||
337 | uint32_t secondary_bin_count; | ||
338 | |||
339 | |||
340 | /** | ||
341 | * Requested size of each secondary DMA buffer. | ||
342 | * | ||
343 | * While the kernel \b is free to reduce | ||
344 | * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed | ||
345 | * to reduce dma_mga_dma_bootstrap::secondary_bin_size. | ||
346 | */ | ||
347 | uint32_t secondary_bin_size; | ||
348 | |||
349 | |||
350 | /** | ||
351 | * Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X, | ||
352 | * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is | ||
353 | * zero, it means that PCI DMA should be used, even if AGP is | ||
354 | * possible. | ||
355 | * | ||
356 | * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be | ||
357 | * filled in with the actual AGP mode. If AGP was not available | ||
358 | * (i.e., PCI DMA was used), this value will be zero. | ||
359 | */ | ||
360 | uint32_t agp_mode; | ||
361 | |||
362 | |||
363 | /** | ||
364 | * Desired AGP GART size, measured in megabytes. | ||
365 | */ | ||
366 | uint8_t agp_size; | ||
367 | } drm_mga_dma_bootstrap_t; | ||
300 | 368 | ||
301 | typedef struct drm_mga_clear { | 369 | typedef struct drm_mga_clear { |
302 | unsigned int flags; | 370 | unsigned int flags; |
@@ -341,6 +409,14 @@ typedef struct _drm_mga_blit { | |||
341 | */ | 409 | */ |
342 | #define MGA_PARAM_IRQ_NR 1 | 410 | #define MGA_PARAM_IRQ_NR 1 |
343 | 411 | ||
412 | /* 3.2: Query the actual card type. The DDX only distinguishes between | ||
413 | * G200 chips and non-G200 chips, which it calls G400. It turns out that | ||
414 | * there are some very sublte differences between the G4x0 chips and the G550 | ||
415 | * chips. Using this parameter query, a client-side driver can detect the | ||
416 | * difference between a G4x0 and a G550. | ||
417 | */ | ||
418 | #define MGA_PARAM_CARD_TYPE 2 | ||
419 | |||
344 | typedef struct drm_mga_getparam { | 420 | typedef struct drm_mga_getparam { |
345 | int param; | 421 | int param; |
346 | void __user *value; | 422 | void __user *value; |
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c index 844cca9cb29d..daabbba3b297 100644 --- a/drivers/char/drm/mga_drv.c +++ b/drivers/char/drm/mga_drv.c | |||
@@ -38,8 +38,15 @@ | |||
38 | 38 | ||
39 | #include "drm_pciids.h" | 39 | #include "drm_pciids.h" |
40 | 40 | ||
41 | static int mga_driver_device_is_agp(drm_device_t * dev); | ||
41 | static int postinit( struct drm_device *dev, unsigned long flags ) | 42 | static int postinit( struct drm_device *dev, unsigned long flags ) |
42 | { | 43 | { |
44 | drm_mga_private_t * const dev_priv = | ||
45 | (drm_mga_private_t *) dev->dev_private; | ||
46 | |||
47 | dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); | ||
48 | dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); | ||
49 | |||
43 | dev->counters += 3; | 50 | dev->counters += 3; |
44 | dev->types[6] = _DRM_STAT_IRQ; | 51 | dev->types[6] = _DRM_STAT_IRQ; |
45 | dev->types[7] = _DRM_STAT_PRIMARY; | 52 | dev->types[7] = _DRM_STAT_PRIMARY; |
@@ -79,8 +86,11 @@ extern int mga_max_ioctl; | |||
79 | 86 | ||
80 | static struct drm_driver driver = { | 87 | static struct drm_driver driver = { |
81 | .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, | 88 | .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, |
89 | .preinit = mga_driver_preinit, | ||
90 | .postcleanup = mga_driver_postcleanup, | ||
82 | .pretakedown = mga_driver_pretakedown, | 91 | .pretakedown = mga_driver_pretakedown, |
83 | .dma_quiescent = mga_driver_dma_quiescent, | 92 | .dma_quiescent = mga_driver_dma_quiescent, |
93 | .device_is_agp = mga_driver_device_is_agp, | ||
84 | .vblank_wait = mga_driver_vblank_wait, | 94 | .vblank_wait = mga_driver_vblank_wait, |
85 | .irq_preinstall = mga_driver_irq_preinstall, | 95 | .irq_preinstall = mga_driver_irq_preinstall, |
86 | .irq_postinstall = mga_driver_irq_postinstall, | 96 | .irq_postinstall = mga_driver_irq_postinstall, |
@@ -128,3 +138,38 @@ module_exit(mga_exit); | |||
128 | MODULE_AUTHOR( DRIVER_AUTHOR ); | 138 | MODULE_AUTHOR( DRIVER_AUTHOR ); |
129 | MODULE_DESCRIPTION( DRIVER_DESC ); | 139 | MODULE_DESCRIPTION( DRIVER_DESC ); |
130 | MODULE_LICENSE("GPL and additional rights"); | 140 | MODULE_LICENSE("GPL and additional rights"); |
141 | |||
142 | /** | ||
143 | * Determine if the device really is AGP or not. | ||
144 | * | ||
145 | * In addition to the usual tests performed by \c drm_device_is_agp, this | ||
146 | * function detects PCI G450 cards that appear to the system exactly like | ||
147 | * AGP G450 cards. | ||
148 | * | ||
149 | * \param dev The device to be tested. | ||
150 | * | ||
151 | * \returns | ||
152 | * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. | ||
153 | */ | ||
154 | int mga_driver_device_is_agp(drm_device_t * dev) | ||
155 | { | ||
156 | const struct pci_dev * const pdev = dev->pdev; | ||
157 | |||
158 | |||
159 | /* There are PCI versions of the G450. These cards have the | ||
160 | * same PCI ID as the AGP G450, but have an additional PCI-to-PCI | ||
161 | * bridge chip. We detect these cards, which are not currently | ||
162 | * supported by this driver, by looking at the device ID of the | ||
163 | * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the | ||
164 | * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the | ||
165 | * device. | ||
166 | */ | ||
167 | |||
168 | if ( (pdev->device == 0x0525) | ||
169 | && (pdev->bus->self->vendor == 0x3388) | ||
170 | && (pdev->bus->self->device == 0x0021) ) { | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | return 2; | ||
175 | } | ||
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h index 9412e2816eb7..b22fdbd4f830 100644 --- a/drivers/char/drm/mga_drv.h +++ b/drivers/char/drm/mga_drv.h | |||
@@ -38,10 +38,10 @@ | |||
38 | 38 | ||
39 | #define DRIVER_NAME "mga" | 39 | #define DRIVER_NAME "mga" |
40 | #define DRIVER_DESC "Matrox G200/G400" | 40 | #define DRIVER_DESC "Matrox G200/G400" |
41 | #define DRIVER_DATE "20021029" | 41 | #define DRIVER_DATE "20050607" |
42 | 42 | ||
43 | #define DRIVER_MAJOR 3 | 43 | #define DRIVER_MAJOR 3 |
44 | #define DRIVER_MINOR 1 | 44 | #define DRIVER_MINOR 2 |
45 | #define DRIVER_PATCHLEVEL 0 | 45 | #define DRIVER_PATCHLEVEL 0 |
46 | 46 | ||
47 | typedef struct drm_mga_primary_buffer { | 47 | typedef struct drm_mga_primary_buffer { |
@@ -87,9 +87,43 @@ typedef struct drm_mga_private { | |||
87 | int chipset; | 87 | int chipset; |
88 | int usec_timeout; | 88 | int usec_timeout; |
89 | 89 | ||
90 | /** | ||
91 | * If set, the new DMA initialization sequence was used. This is | ||
92 | * primarilly used to select how the driver should uninitialized its | ||
93 | * internal DMA structures. | ||
94 | */ | ||
95 | int used_new_dma_init; | ||
96 | |||
97 | /** | ||
98 | * If AGP memory is used for DMA buffers, this will be the value | ||
99 | * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer). | ||
100 | */ | ||
101 | u32 dma_access; | ||
102 | |||
103 | /** | ||
104 | * If AGP memory is used for DMA buffers, this will be the value | ||
105 | * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI | ||
106 | * transfer). | ||
107 | */ | ||
108 | u32 wagp_enable; | ||
109 | |||
110 | /** | ||
111 | * \name MMIO region parameters. | ||
112 | * | ||
113 | * \sa drm_mga_private_t::mmio | ||
114 | */ | ||
115 | /*@{*/ | ||
116 | u32 mmio_base; /**< Bus address of base of MMIO. */ | ||
117 | u32 mmio_size; /**< Size of the MMIO region. */ | ||
118 | /*@}*/ | ||
119 | |||
90 | u32 clear_cmd; | 120 | u32 clear_cmd; |
91 | u32 maccess; | 121 | u32 maccess; |
92 | 122 | ||
123 | wait_queue_head_t fence_queue; | ||
124 | atomic_t last_fence_retired; | ||
125 | u32 next_fence_to_post; | ||
126 | |||
93 | unsigned int fb_cpp; | 127 | unsigned int fb_cpp; |
94 | unsigned int front_offset; | 128 | unsigned int front_offset; |
95 | unsigned int front_pitch; | 129 | unsigned int front_pitch; |
@@ -108,35 +142,43 @@ typedef struct drm_mga_private { | |||
108 | drm_local_map_t *status; | 142 | drm_local_map_t *status; |
109 | drm_local_map_t *warp; | 143 | drm_local_map_t *warp; |
110 | drm_local_map_t *primary; | 144 | drm_local_map_t *primary; |
111 | drm_local_map_t *buffers; | ||
112 | drm_local_map_t *agp_textures; | 145 | drm_local_map_t *agp_textures; |
146 | |||
147 | DRM_AGP_MEM *agp_mem; | ||
148 | unsigned int agp_pages; | ||
113 | } drm_mga_private_t; | 149 | } drm_mga_private_t; |
114 | 150 | ||
115 | /* mga_dma.c */ | 151 | /* mga_dma.c */ |
116 | extern int mga_dma_init( DRM_IOCTL_ARGS ); | 152 | extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags); |
117 | extern int mga_dma_flush( DRM_IOCTL_ARGS ); | 153 | extern int mga_dma_bootstrap(DRM_IOCTL_ARGS); |
118 | extern int mga_dma_reset( DRM_IOCTL_ARGS ); | 154 | extern int mga_dma_init(DRM_IOCTL_ARGS); |
119 | extern int mga_dma_buffers( DRM_IOCTL_ARGS ); | 155 | extern int mga_dma_flush(DRM_IOCTL_ARGS); |
120 | extern void mga_driver_pretakedown(drm_device_t *dev); | 156 | extern int mga_dma_reset(DRM_IOCTL_ARGS); |
121 | extern int mga_driver_dma_quiescent(drm_device_t *dev); | 157 | extern int mga_dma_buffers(DRM_IOCTL_ARGS); |
122 | 158 | extern int mga_driver_postcleanup(drm_device_t * dev); | |
123 | extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); | 159 | extern void mga_driver_pretakedown(drm_device_t * dev); |
124 | 160 | extern int mga_driver_dma_quiescent(drm_device_t * dev); | |
125 | extern void mga_do_dma_flush( drm_mga_private_t *dev_priv ); | 161 | |
126 | extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); | 162 | extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); |
127 | extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ); | 163 | |
164 | extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); | ||
165 | extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); | ||
166 | extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); | ||
128 | 167 | ||
129 | extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); | 168 | extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); |
130 | 169 | ||
131 | /* mga_warp.c */ | 170 | /* mga_warp.c */ |
132 | extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); | 171 | extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv); |
133 | extern int mga_warp_init( drm_mga_private_t *dev_priv ); | 172 | extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); |
134 | 173 | extern int mga_warp_init(drm_mga_private_t * dev_priv); | |
135 | extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); | 174 | |
136 | extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); | 175 | /* mga_irq.c */ |
137 | extern void mga_driver_irq_preinstall( drm_device_t *dev ); | 176 | extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence); |
138 | extern void mga_driver_irq_postinstall( drm_device_t *dev ); | 177 | extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence); |
139 | extern void mga_driver_irq_uninstall( drm_device_t *dev ); | 178 | extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); |
179 | extern void mga_driver_irq_preinstall(drm_device_t * dev); | ||
180 | extern void mga_driver_irq_postinstall(drm_device_t * dev); | ||
181 | extern void mga_driver_irq_uninstall(drm_device_t * dev); | ||
140 | extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, | 182 | extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, |
141 | unsigned long arg); | 183 | unsigned long arg); |
142 | 184 | ||
@@ -527,6 +569,12 @@ do { \ | |||
527 | */ | 569 | */ |
528 | #define MGA_EXEC 0x0100 | 570 | #define MGA_EXEC 0x0100 |
529 | 571 | ||
572 | /* AGP PLL encoding (for G200 only). | ||
573 | */ | ||
574 | #define MGA_AGP_PLL 0x1e4c | ||
575 | # define MGA_AGP2XPLL_DISABLE (0 << 0) | ||
576 | # define MGA_AGP2XPLL_ENABLE (1 << 0) | ||
577 | |||
530 | /* Warp registers | 578 | /* Warp registers |
531 | */ | 579 | */ |
532 | #define MGA_WR0 0x2d00 | 580 | #define MGA_WR0 0x2d00 |
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c index bc745cfa2095..77d738e75a4d 100644 --- a/drivers/char/drm/mga_ioc32.c +++ b/drivers/char/drm/mga_ioc32.c | |||
@@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd, | |||
129 | DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); | 129 | DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); |
130 | } | 130 | } |
131 | 131 | ||
132 | typedef struct drm_mga_drm_bootstrap32 { | ||
133 | u32 texture_handle; | ||
134 | u32 texture_size; | ||
135 | u32 primary_size; | ||
136 | u32 secondary_bin_count; | ||
137 | u32 secondary_bin_size; | ||
138 | u32 agp_mode; | ||
139 | u8 agp_size; | ||
140 | } drm_mga_dma_bootstrap32_t; | ||
141 | |||
142 | static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, | ||
143 | unsigned long arg) | ||
144 | { | ||
145 | drm_mga_dma_bootstrap32_t dma_bootstrap32; | ||
146 | drm_mga_dma_bootstrap_t __user *dma_bootstrap; | ||
147 | int err; | ||
148 | |||
149 | if (copy_from_user(&dma_bootstrap32, (void __user *)arg, | ||
150 | sizeof(dma_bootstrap32))) | ||
151 | return -EFAULT; | ||
152 | |||
153 | dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap)); | ||
154 | if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap)) | ||
155 | || __put_user(dma_bootstrap32.texture_handle, | ||
156 | &dma_bootstrap->texture_handle) | ||
157 | || __put_user(dma_bootstrap32.texture_size, | ||
158 | &dma_bootstrap->texture_size) | ||
159 | || __put_user(dma_bootstrap32.primary_size, | ||
160 | &dma_bootstrap->primary_size) | ||
161 | || __put_user(dma_bootstrap32.secondary_bin_count, | ||
162 | &dma_bootstrap->secondary_bin_count) | ||
163 | || __put_user(dma_bootstrap32.secondary_bin_size, | ||
164 | &dma_bootstrap->secondary_bin_size) | ||
165 | || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) | ||
166 | || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) | ||
167 | return -EFAULT; | ||
168 | |||
169 | err = drm_ioctl(file->f_dentry->d_inode, file, | ||
170 | DRM_IOCTL_MGA_DMA_BOOTSTRAP, | ||
171 | (unsigned long)dma_bootstrap); | ||
172 | if (err) | ||
173 | return err; | ||
174 | |||
175 | if (__get_user(dma_bootstrap32.texture_handle, | ||
176 | &dma_bootstrap->texture_handle) | ||
177 | || __get_user(dma_bootstrap32.texture_size, | ||
178 | &dma_bootstrap->texture_size) | ||
179 | || __get_user(dma_bootstrap32.primary_size, | ||
180 | &dma_bootstrap->primary_size) | ||
181 | || __get_user(dma_bootstrap32.secondary_bin_count, | ||
182 | &dma_bootstrap->secondary_bin_count) | ||
183 | || __get_user(dma_bootstrap32.secondary_bin_size, | ||
184 | &dma_bootstrap->secondary_bin_size) | ||
185 | || __get_user(dma_bootstrap32.agp_mode, | ||
186 | &dma_bootstrap->agp_mode) | ||
187 | || __get_user(dma_bootstrap32.agp_size, | ||
188 | &dma_bootstrap->agp_size)) | ||
189 | return -EFAULT; | ||
190 | |||
191 | if (copy_to_user((void __user *)arg, &dma_bootstrap32, | ||
192 | sizeof(dma_bootstrap32))) | ||
193 | return -EFAULT; | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
132 | drm_ioctl_compat_t *mga_compat_ioctls[] = { | 198 | drm_ioctl_compat_t *mga_compat_ioctls[] = { |
133 | [DRM_MGA_INIT] = compat_mga_init, | 199 | [DRM_MGA_INIT] = compat_mga_init, |
134 | [DRM_MGA_GETPARAM] = compat_mga_getparam, | 200 | [DRM_MGA_GETPARAM] = compat_mga_getparam, |
201 | [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, | ||
135 | }; | 202 | }; |
136 | 203 | ||
137 | /** | 204 | /** |
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c index bc0b6b5d43a6..52eaa4e788f9 100644 --- a/drivers/char/drm/mga_irq.c +++ b/drivers/char/drm/mga_irq.c | |||
@@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ) | |||
41 | drm_mga_private_t *dev_priv = | 41 | drm_mga_private_t *dev_priv = |
42 | (drm_mga_private_t *)dev->dev_private; | 42 | (drm_mga_private_t *)dev->dev_private; |
43 | int status; | 43 | int status; |
44 | int handled = 0; | ||
45 | |||
46 | status = MGA_READ(MGA_STATUS); | ||
44 | 47 | ||
45 | status = MGA_READ( MGA_STATUS ); | ||
46 | |||
47 | /* VBLANK interrupt */ | 48 | /* VBLANK interrupt */ |
48 | if ( status & MGA_VLINEPEN ) { | 49 | if ( status & MGA_VLINEPEN ) { |
49 | MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); | 50 | MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); |
50 | atomic_inc(&dev->vbl_received); | 51 | atomic_inc(&dev->vbl_received); |
51 | DRM_WAKEUP(&dev->vbl_queue); | 52 | DRM_WAKEUP(&dev->vbl_queue); |
52 | drm_vbl_send_signals( dev ); | 53 | drm_vbl_send_signals(dev); |
54 | handled = 1; | ||
55 | } | ||
56 | |||
57 | /* SOFTRAP interrupt */ | ||
58 | if (status & MGA_SOFTRAPEN) { | ||
59 | const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); | ||
60 | const u32 prim_end = MGA_READ(MGA_PRIMEND); | ||
61 | |||
62 | |||
63 | MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); | ||
64 | |||
65 | /* In addition to clearing the interrupt-pending bit, we | ||
66 | * have to write to MGA_PRIMEND to re-start the DMA operation. | ||
67 | */ | ||
68 | if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) { | ||
69 | MGA_WRITE(MGA_PRIMEND, prim_end); | ||
70 | } | ||
71 | |||
72 | atomic_inc(&dev_priv->last_fence_retired); | ||
73 | DRM_WAKEUP(&dev_priv->fence_queue); | ||
74 | handled = 1; | ||
75 | } | ||
76 | |||
77 | if ( handled ) { | ||
53 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
54 | } | 79 | } |
55 | return IRQ_NONE; | 80 | return IRQ_NONE; |
@@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) | |||
73 | return ret; | 98 | return ret; |
74 | } | 99 | } |
75 | 100 | ||
76 | void mga_driver_irq_preinstall( drm_device_t *dev ) { | 101 | int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence) |
77 | drm_mga_private_t *dev_priv = | 102 | { |
78 | (drm_mga_private_t *)dev->dev_private; | 103 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
104 | unsigned int cur_fence; | ||
105 | int ret = 0; | ||
106 | |||
107 | /* Assume that the user has missed the current sequence number | ||
108 | * by about a day rather than she wants to wait for years | ||
109 | * using fences. | ||
110 | */ | ||
111 | DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, | ||
112 | (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) | ||
113 | - *sequence) <= (1 << 23))); | ||
114 | |||
115 | *sequence = cur_fence; | ||
116 | |||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | void mga_driver_irq_preinstall(drm_device_t * dev) | ||
121 | { | ||
122 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; | ||
79 | 123 | ||
80 | /* Disable *all* interrupts */ | 124 | /* Disable *all* interrupts */ |
81 | MGA_WRITE( MGA_IEN, 0 ); | 125 | MGA_WRITE( MGA_IEN, 0 ); |
@@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) { | |||
83 | MGA_WRITE( MGA_ICLEAR, ~0 ); | 127 | MGA_WRITE( MGA_ICLEAR, ~0 ); |
84 | } | 128 | } |
85 | 129 | ||
86 | void mga_driver_irq_postinstall( drm_device_t *dev ) { | 130 | void mga_driver_irq_postinstall(drm_device_t * dev) |
87 | drm_mga_private_t *dev_priv = | 131 | { |
88 | (drm_mga_private_t *)dev->dev_private; | 132 | drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; |
133 | |||
134 | DRM_INIT_WAITQUEUE( &dev_priv->fence_queue ); | ||
89 | 135 | ||
90 | /* Turn on VBL interrupt */ | 136 | /* Turn on vertical blank interrupt and soft trap interrupt. */ |
91 | MGA_WRITE( MGA_IEN, MGA_VLINEIEN ); | 137 | MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); |
92 | } | 138 | } |
93 | 139 | ||
94 | void mga_driver_irq_uninstall( drm_device_t *dev ) { | 140 | void mga_driver_irq_uninstall( drm_device_t *dev ) { |
@@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) { | |||
98 | return; | 144 | return; |
99 | 145 | ||
100 | /* Disable *all* interrupts */ | 146 | /* Disable *all* interrupts */ |
101 | MGA_WRITE( MGA_IEN, 0 ); | 147 | MGA_WRITE(MGA_IEN, 0); |
148 | |||
149 | dev->irq_enabled = 0; | ||
102 | } | 150 | } |
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c index 3c7a8f5ba501..05bbb4719376 100644 --- a/drivers/char/drm/mga_state.c +++ b/drivers/char/drm/mga_state.c | |||
@@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv, | |||
53 | 53 | ||
54 | /* Force reset of DWGCTL on G400 (eliminates clip disable bit). | 54 | /* Force reset of DWGCTL on G400 (eliminates clip disable bit). |
55 | */ | 55 | */ |
56 | if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { | 56 | if (dev_priv->chipset == MGA_CARD_TYPE_G400) { |
57 | DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, | 57 | DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, |
58 | MGA_LEN + MGA_EXEC, 0x80000000, | 58 | MGA_LEN + MGA_EXEC, 0x80000000, |
59 | MGA_DWGCTL, ctx->dwgctl, | 59 | MGA_DWGCTL, ctx->dwgctl, |
60 | MGA_LEN + MGA_EXEC, 0x80000000 ); | 60 | MGA_LEN + MGA_EXEC, 0x80000000); |
61 | } | 61 | } |
62 | DMA_BLOCK( MGA_DMAPAD, 0x00000000, | 62 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, |
63 | MGA_CXBNDRY, (box->x2 << 16) | box->x1, | 63 | MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, |
64 | MGA_YTOP, box->y1 * pitch, | 64 | MGA_YTOP, box->y1 * pitch, |
65 | MGA_YBOT, box->y2 * pitch ); | 65 | MGA_YBOT, (box->y2 - 1) * pitch); |
66 | 66 | ||
67 | ADVANCE_DMA(); | 67 | ADVANCE_DMA(); |
68 | } | 68 | } |
@@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv ) | |||
260 | 260 | ||
261 | /* Padding required to to hardware bug. | 261 | /* Padding required to to hardware bug. |
262 | */ | 262 | */ |
263 | DMA_BLOCK( MGA_DMAPAD, 0xffffffff, | 263 | DMA_BLOCK(MGA_DMAPAD, 0xffffffff, |
264 | MGA_DMAPAD, 0xffffffff, | 264 | MGA_DMAPAD, 0xffffffff, |
265 | MGA_DMAPAD, 0xffffffff, | 265 | MGA_DMAPAD, 0xffffffff, |
266 | MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | | 266 | MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | |
267 | MGA_WMODE_START | | 267 | MGA_WMODE_START | dev_priv->wagp_enable)); |
268 | MGA_WAGP_ENABLE) ); | ||
269 | 268 | ||
270 | ADVANCE_DMA(); | 269 | ADVANCE_DMA(); |
271 | } | 270 | } |
@@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv ) | |||
342 | MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ | 341 | MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ |
343 | 342 | ||
344 | /* Padding required to to hardware bug */ | 343 | /* Padding required to to hardware bug */ |
345 | DMA_BLOCK( MGA_DMAPAD, 0xffffffff, | 344 | DMA_BLOCK(MGA_DMAPAD, 0xffffffff, |
346 | MGA_DMAPAD, 0xffffffff, | 345 | MGA_DMAPAD, 0xffffffff, |
347 | MGA_DMAPAD, 0xffffffff, | 346 | MGA_DMAPAD, 0xffffffff, |
348 | MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | | 347 | MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | |
349 | MGA_WMODE_START | | 348 | MGA_WMODE_START | dev_priv->wagp_enable)); |
350 | MGA_WAGP_ENABLE) ); | ||
351 | 349 | ||
352 | ADVANCE_DMA(); | 350 | ADVANCE_DMA(); |
353 | } | 351 | } |
@@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv ) | |||
459 | if ( dirty & MGA_UPLOAD_TEX0 ) | 457 | if ( dirty & MGA_UPLOAD_TEX0 ) |
460 | ret |= mga_verify_tex( dev_priv, 0 ); | 458 | ret |= mga_verify_tex( dev_priv, 0 ); |
461 | 459 | ||
462 | if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { | 460 | if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { |
463 | if ( dirty & MGA_UPLOAD_TEX1 ) | 461 | if (dirty & MGA_UPLOAD_TEX1) |
464 | ret |= mga_verify_tex( dev_priv, 1 ); | 462 | ret |= mga_verify_tex(dev_priv, 1); |
465 | 463 | ||
466 | if ( dirty & MGA_UPLOAD_PIPE ) | 464 | if ( dirty & MGA_UPLOAD_PIPE ) |
467 | ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); | 465 | ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); |
@@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf ) | |||
686 | 684 | ||
687 | BEGIN_DMA( 1 ); | 685 | BEGIN_DMA( 1 ); |
688 | 686 | ||
689 | DMA_BLOCK( MGA_DMAPAD, 0x00000000, | 687 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, |
690 | MGA_DMAPAD, 0x00000000, | 688 | MGA_DMAPAD, 0x00000000, |
691 | MGA_SECADDRESS, (address | | 689 | MGA_SECADDRESS, (address | |
692 | MGA_DMA_VERTEX), | 690 | MGA_DMA_VERTEX), |
693 | MGA_SECEND, ((address + length) | | 691 | MGA_SECEND, ((address + length) | |
694 | MGA_PAGPXFER) ); | 692 | dev_priv->dma_access)); |
695 | 693 | ||
696 | ADVANCE_DMA(); | 694 | ADVANCE_DMA(); |
697 | } while ( ++i < sarea_priv->nbox ); | 695 | } while ( ++i < sarea_priv->nbox ); |
@@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf, | |||
733 | 731 | ||
734 | BEGIN_DMA( 1 ); | 732 | BEGIN_DMA( 1 ); |
735 | 733 | ||
736 | DMA_BLOCK( MGA_DMAPAD, 0x00000000, | 734 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, |
737 | MGA_DMAPAD, 0x00000000, | 735 | MGA_DMAPAD, 0x00000000, |
738 | MGA_SETUPADDRESS, address + start, | 736 | MGA_SETUPADDRESS, address + start, |
739 | MGA_SETUPEND, ((address + end) | | 737 | MGA_SETUPEND, ((address + end) | |
740 | MGA_PAGPXFER) ); | 738 | dev_priv->dma_access)); |
741 | 739 | ||
742 | ADVANCE_DMA(); | 740 | ADVANCE_DMA(); |
743 | } while ( ++i < sarea_priv->nbox ); | 741 | } while ( ++i < sarea_priv->nbox ); |
@@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf, | |||
764 | drm_mga_private_t *dev_priv = dev->dev_private; | 762 | drm_mga_private_t *dev_priv = dev->dev_private; |
765 | drm_mga_buf_priv_t *buf_priv = buf->dev_private; | 763 | drm_mga_buf_priv_t *buf_priv = buf->dev_private; |
766 | drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; | 764 | drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; |
767 | u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; | 765 | u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; |
768 | u32 y2; | 766 | u32 y2; |
769 | DMA_LOCALS; | 767 | DMA_LOCALS; |
770 | DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); | 768 | DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); |
@@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS ) | |||
1095 | case MGA_PARAM_IRQ_NR: | 1093 | case MGA_PARAM_IRQ_NR: |
1096 | value = dev->irq; | 1094 | value = dev->irq; |
1097 | break; | 1095 | break; |
1096 | case MGA_PARAM_CARD_TYPE: | ||
1097 | value = dev_priv->chipset; | ||
1098 | break; | ||
1098 | default: | 1099 | default: |
1099 | return DRM_ERR(EINVAL); | 1100 | return DRM_ERR(EINVAL); |
1100 | } | 1101 | } |
@@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS ) | |||
1107 | return 0; | 1108 | return 0; |
1108 | } | 1109 | } |
1109 | 1110 | ||
1111 | static int mga_set_fence(DRM_IOCTL_ARGS) | ||
1112 | { | ||
1113 | DRM_DEVICE; | ||
1114 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
1115 | u32 temp; | ||
1116 | DMA_LOCALS; | ||
1117 | |||
1118 | if (!dev_priv) { | ||
1119 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | ||
1120 | return DRM_ERR(EINVAL); | ||
1121 | } | ||
1122 | |||
1123 | DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); | ||
1124 | |||
1125 | /* I would normal do this assignment in the declaration of temp, | ||
1126 | * but dev_priv may be NULL. | ||
1127 | */ | ||
1128 | |||
1129 | temp = dev_priv->next_fence_to_post; | ||
1130 | dev_priv->next_fence_to_post++; | ||
1131 | |||
1132 | BEGIN_DMA(1); | ||
1133 | DMA_BLOCK(MGA_DMAPAD, 0x00000000, | ||
1134 | MGA_DMAPAD, 0x00000000, | ||
1135 | MGA_DMAPAD, 0x00000000, | ||
1136 | MGA_SOFTRAP, 0x00000000); | ||
1137 | ADVANCE_DMA(); | ||
1138 | |||
1139 | if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) { | ||
1140 | DRM_ERROR("copy_to_user\n"); | ||
1141 | return DRM_ERR(EFAULT); | ||
1142 | } | ||
1143 | |||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | static int mga_wait_fence(DRM_IOCTL_ARGS) | ||
1148 | { | ||
1149 | DRM_DEVICE; | ||
1150 | drm_mga_private_t *dev_priv = dev->dev_private; | ||
1151 | u32 fence; | ||
1152 | |||
1153 | if (!dev_priv) { | ||
1154 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | ||
1155 | return DRM_ERR(EINVAL); | ||
1156 | } | ||
1157 | |||
1158 | DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32)); | ||
1159 | |||
1160 | DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); | ||
1161 | |||
1162 | mga_driver_fence_wait(dev, & fence); | ||
1163 | |||
1164 | if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) { | ||
1165 | DRM_ERROR("copy_to_user\n"); | ||
1166 | return DRM_ERR(EFAULT); | ||
1167 | } | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1110 | drm_ioctl_desc_t mga_ioctls[] = { | 1172 | drm_ioctl_desc_t mga_ioctls[] = { |
1111 | [DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, | 1173 | [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1}, |
1112 | [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, | 1174 | [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0}, |
1113 | [DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, | 1175 | [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0}, |
1114 | [DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, | 1176 | [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0}, |
1115 | [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, | 1177 | [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0}, |
1116 | [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, | 1178 | [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0}, |
1117 | [DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, | 1179 | [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0}, |
1118 | [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, | 1180 | [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0}, |
1119 | [DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, | 1181 | [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0}, |
1120 | [DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, | 1182 | [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0}, |
1183 | [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0}, | ||
1184 | [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0}, | ||
1185 | [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1}, | ||
1186 | |||
1121 | }; | 1187 | }; |
1122 | 1188 | ||
1123 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); | 1189 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); |
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c index 0a3a0cc700dc..55ccc8a0ac29 100644 --- a/drivers/char/drm/mga_warp.c +++ b/drivers/char/drm/mga_warp.c | |||
@@ -48,65 +48,52 @@ do { \ | |||
48 | vcbase += WARP_UCODE_SIZE( which ); \ | 48 | vcbase += WARP_UCODE_SIZE( which ); \ |
49 | } while (0) | 49 | } while (0) |
50 | 50 | ||
51 | 51 | static const unsigned int mga_warp_g400_microcode_size = | |
52 | static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) | 52 | (WARP_UCODE_SIZE(warp_g400_tgz) + |
53 | { | 53 | WARP_UCODE_SIZE(warp_g400_tgza) + |
54 | unsigned int size; | 54 | WARP_UCODE_SIZE(warp_g400_tgzaf) + |
55 | 55 | WARP_UCODE_SIZE(warp_g400_tgzf) + | |
56 | size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + | 56 | WARP_UCODE_SIZE(warp_g400_tgzs) + |
57 | WARP_UCODE_SIZE( warp_g400_tgza ) + | 57 | WARP_UCODE_SIZE(warp_g400_tgzsa) + |
58 | WARP_UCODE_SIZE( warp_g400_tgzaf ) + | 58 | WARP_UCODE_SIZE(warp_g400_tgzsaf) + |
59 | WARP_UCODE_SIZE( warp_g400_tgzf ) + | 59 | WARP_UCODE_SIZE(warp_g400_tgzsf) + |
60 | WARP_UCODE_SIZE( warp_g400_tgzs ) + | 60 | WARP_UCODE_SIZE(warp_g400_t2gz) + |
61 | WARP_UCODE_SIZE( warp_g400_tgzsa ) + | 61 | WARP_UCODE_SIZE(warp_g400_t2gza) + |
62 | WARP_UCODE_SIZE( warp_g400_tgzsaf ) + | 62 | WARP_UCODE_SIZE(warp_g400_t2gzaf) + |
63 | WARP_UCODE_SIZE( warp_g400_tgzsf ) + | 63 | WARP_UCODE_SIZE(warp_g400_t2gzf) + |
64 | WARP_UCODE_SIZE( warp_g400_t2gz ) + | 64 | WARP_UCODE_SIZE(warp_g400_t2gzs) + |
65 | WARP_UCODE_SIZE( warp_g400_t2gza ) + | 65 | WARP_UCODE_SIZE(warp_g400_t2gzsa) + |
66 | WARP_UCODE_SIZE( warp_g400_t2gzaf ) + | 66 | WARP_UCODE_SIZE(warp_g400_t2gzsaf) + |
67 | WARP_UCODE_SIZE( warp_g400_t2gzf ) + | 67 | WARP_UCODE_SIZE(warp_g400_t2gzsf)); |
68 | WARP_UCODE_SIZE( warp_g400_t2gzs ) + | 68 | |
69 | WARP_UCODE_SIZE( warp_g400_t2gzsa ) + | 69 | static const unsigned int mga_warp_g200_microcode_size = |
70 | WARP_UCODE_SIZE( warp_g400_t2gzsaf ) + | 70 | (WARP_UCODE_SIZE(warp_g200_tgz) + |
71 | WARP_UCODE_SIZE( warp_g400_t2gzsf ) ); | 71 | WARP_UCODE_SIZE(warp_g200_tgza) + |
72 | 72 | WARP_UCODE_SIZE(warp_g200_tgzaf) + | |
73 | size = PAGE_ALIGN( size ); | 73 | WARP_UCODE_SIZE(warp_g200_tgzf) + |
74 | 74 | WARP_UCODE_SIZE(warp_g200_tgzs) + | |
75 | DRM_DEBUG( "G400 ucode size = %d bytes\n", size ); | 75 | WARP_UCODE_SIZE(warp_g200_tgzsa) + |
76 | return size; | 76 | WARP_UCODE_SIZE(warp_g200_tgzsaf) + |
77 | } | 77 | WARP_UCODE_SIZE(warp_g200_tgzsf)); |
78 | 78 | ||
79 | static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv ) | 79 | |
80 | unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv) | ||
80 | { | 81 | { |
81 | unsigned int size; | 82 | switch (dev_priv->chipset) { |
82 | 83 | case MGA_CARD_TYPE_G400: | |
83 | size = ( WARP_UCODE_SIZE( warp_g200_tgz ) + | 84 | case MGA_CARD_TYPE_G550: |
84 | WARP_UCODE_SIZE( warp_g200_tgza ) + | 85 | return PAGE_ALIGN(mga_warp_g400_microcode_size); |
85 | WARP_UCODE_SIZE( warp_g200_tgzaf ) + | 86 | case MGA_CARD_TYPE_G200: |
86 | WARP_UCODE_SIZE( warp_g200_tgzf ) + | 87 | return PAGE_ALIGN(mga_warp_g200_microcode_size); |
87 | WARP_UCODE_SIZE( warp_g200_tgzs ) + | 88 | default: |
88 | WARP_UCODE_SIZE( warp_g200_tgzsa ) + | 89 | return 0; |
89 | WARP_UCODE_SIZE( warp_g200_tgzsaf ) + | 90 | } |
90 | WARP_UCODE_SIZE( warp_g200_tgzsf ) ); | ||
91 | |||
92 | size = PAGE_ALIGN( size ); | ||
93 | |||
94 | DRM_DEBUG( "G200 ucode size = %d bytes\n", size ); | ||
95 | return size; | ||
96 | } | 91 | } |
97 | 92 | ||
98 | static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) | 93 | static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) |
99 | { | 94 | { |
100 | unsigned char *vcbase = dev_priv->warp->handle; | 95 | unsigned char *vcbase = dev_priv->warp->handle; |
101 | unsigned long pcbase = dev_priv->warp->offset; | 96 | unsigned long pcbase = dev_priv->warp->offset; |
102 | unsigned int size; | ||
103 | |||
104 | size = mga_warp_g400_microcode_size( dev_priv ); | ||
105 | if ( size > dev_priv->warp->size ) { | ||
106 | DRM_ERROR( "microcode too large! (%u > %lu)\n", | ||
107 | size, dev_priv->warp->size ); | ||
108 | return DRM_ERR(ENOMEM); | ||
109 | } | ||
110 | 97 | ||
111 | memset( dev_priv->warp_pipe_phys, 0, | 98 | memset( dev_priv->warp_pipe_phys, 0, |
112 | sizeof(dev_priv->warp_pipe_phys) ); | 99 | sizeof(dev_priv->warp_pipe_phys) ); |
@@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv ) | |||
136 | { | 123 | { |
137 | unsigned char *vcbase = dev_priv->warp->handle; | 124 | unsigned char *vcbase = dev_priv->warp->handle; |
138 | unsigned long pcbase = dev_priv->warp->offset; | 125 | unsigned long pcbase = dev_priv->warp->offset; |
139 | unsigned int size; | ||
140 | |||
141 | size = mga_warp_g200_microcode_size( dev_priv ); | ||
142 | if ( size > dev_priv->warp->size ) { | ||
143 | DRM_ERROR( "microcode too large! (%u > %lu)\n", | ||
144 | size, dev_priv->warp->size ); | ||
145 | return DRM_ERR(ENOMEM); | ||
146 | } | ||
147 | 126 | ||
148 | memset( dev_priv->warp_pipe_phys, 0, | 127 | memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); |
149 | sizeof(dev_priv->warp_pipe_phys) ); | ||
150 | 128 | ||
151 | WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); | 129 | WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ); |
152 | WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); | 130 | WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF); |
153 | WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); | 131 | WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA); |
154 | WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); | 132 | WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF); |
155 | WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); | 133 | WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS); |
156 | WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); | 134 | WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF); |
157 | WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); | 135 | WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA); |
158 | WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); | 136 | WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF); |
159 | 137 | ||
160 | return 0; | 138 | return 0; |
161 | } | 139 | } |
162 | 140 | ||
163 | int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) | 141 | int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) |
164 | { | 142 | { |
165 | switch ( dev_priv->chipset ) { | 143 | const unsigned int size = mga_warp_microcode_size(dev_priv); |
144 | |||
145 | DRM_DEBUG("MGA ucode size = %d bytes\n", size); | ||
146 | if (size > dev_priv->warp->size) { | ||
147 | DRM_ERROR("microcode too large! (%u > %lu)\n", | ||
148 | size, dev_priv->warp->size); | ||
149 | return DRM_ERR(ENOMEM); | ||
150 | } | ||
151 | |||
152 | switch (dev_priv->chipset) { | ||
166 | case MGA_CARD_TYPE_G400: | 153 | case MGA_CARD_TYPE_G400: |
167 | return mga_warp_install_g400_microcode( dev_priv ); | 154 | case MGA_CARD_TYPE_G550: |
155 | return mga_warp_install_g400_microcode(dev_priv); | ||
168 | case MGA_CARD_TYPE_G200: | 156 | case MGA_CARD_TYPE_G200: |
169 | return mga_warp_install_g200_microcode( dev_priv ); | 157 | return mga_warp_install_g200_microcode( dev_priv ); |
170 | default: | 158 | default: |
@@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv ) | |||
182 | */ | 170 | */ |
183 | switch ( dev_priv->chipset ) { | 171 | switch ( dev_priv->chipset ) { |
184 | case MGA_CARD_TYPE_G400: | 172 | case MGA_CARD_TYPE_G400: |
185 | MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); | 173 | case MGA_CARD_TYPE_G550: |
186 | MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); | 174 | MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); |
187 | MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); | 175 | MGA_WRITE(MGA_WGETMSB, 0x00000E00); |
188 | MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); | 176 | MGA_WRITE(MGA_WVRTXSZ, 0x00001807); |
177 | MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); | ||
189 | break; | 178 | break; |
190 | case MGA_CARD_TYPE_G200: | 179 | case MGA_CARD_TYPE_G200: |
191 | MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); | 180 | MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); |
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c index 08ed8d01d9d9..895152206b31 100644 --- a/drivers/char/drm/r128_cce.c +++ b/drivers/char/drm/r128_cce.c | |||
@@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, | |||
326 | ring_start = dev_priv->cce_ring->offset - dev->agp->base; | 326 | ring_start = dev_priv->cce_ring->offset - dev->agp->base; |
327 | else | 327 | else |
328 | #endif | 328 | #endif |
329 | ring_start = dev_priv->cce_ring->offset - dev->sg->handle; | 329 | ring_start = dev_priv->cce_ring->offset - |
330 | (unsigned long)dev->sg->virtual; | ||
330 | 331 | ||
331 | R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); | 332 | R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); |
332 | 333 | ||
@@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) | |||
487 | r128_do_cleanup_cce( dev ); | 488 | r128_do_cleanup_cce( dev ); |
488 | return DRM_ERR(EINVAL); | 489 | return DRM_ERR(EINVAL); |
489 | } | 490 | } |
491 | dev->agp_buffer_token = init->buffers_offset; | ||
490 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 492 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
491 | if(!dev->agp_buffer_map) { | 493 | if(!dev->agp_buffer_map) { |
492 | DRM_ERROR("could not find dma buffer region!\n"); | 494 | DRM_ERROR("could not find dma buffer region!\n"); |
@@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) | |||
537 | dev_priv->cce_buffers_offset = dev->agp->base; | 539 | dev_priv->cce_buffers_offset = dev->agp->base; |
538 | else | 540 | else |
539 | #endif | 541 | #endif |
540 | dev_priv->cce_buffers_offset = dev->sg->handle; | 542 | dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; |
541 | 543 | ||
542 | dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; | 544 | dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; |
543 | dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle | 545 | dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle |
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h index 0cba17d1e0ff..b616cd3ed2cd 100644 --- a/drivers/char/drm/r128_drm.h +++ b/drivers/char/drm/r128_drm.h | |||
@@ -215,7 +215,7 @@ typedef struct drm_r128_sarea { | |||
215 | #define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) | 215 | #define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) |
216 | #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) | 216 | #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) |
217 | #define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) | 217 | #define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) |
218 | #define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) | 218 | #define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) |
219 | #define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) | 219 | #define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) |
220 | 220 | ||
221 | typedef struct drm_r128_init { | 221 | typedef struct drm_r128_init { |
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c new file mode 100644 index 000000000000..623f1f460cb5 --- /dev/null +++ b/drivers/char/drm/r300_cmdbuf.c | |||
@@ -0,0 +1,801 @@ | |||
1 | /* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*- | ||
2 | * | ||
3 | * Copyright (C) The Weather Channel, Inc. 2002. | ||
4 | * Copyright (C) 2004 Nicolai Haehnle. | ||
5 | * All Rights Reserved. | ||
6 | * | ||
7 | * The Weather Channel (TM) funded Tungsten Graphics to develop the | ||
8 | * initial release of the Radeon 8500 driver under the XFree86 license. | ||
9 | * This notice must be preserved. | ||
10 | * | ||
11 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
12 | * copy of this software and associated documentation files (the "Software"), | ||
13 | * to deal in the Software without restriction, including without limitation | ||
14 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
15 | * and/or sell copies of the Software, and to permit persons to whom the | ||
16 | * Software is furnished to do so, subject to the following conditions: | ||
17 | * | ||
18 | * The above copyright notice and this permission notice (including the next | ||
19 | * paragraph) shall be included in all copies or substantial portions of the | ||
20 | * Software. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
25 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
26 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
27 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
28 | * DEALINGS IN THE SOFTWARE. | ||
29 | * | ||
30 | * Authors: | ||
31 | * Nicolai Haehnle <prefect_@gmx.net> | ||
32 | */ | ||
33 | |||
34 | #include "drmP.h" | ||
35 | #include "drm.h" | ||
36 | #include "radeon_drm.h" | ||
37 | #include "radeon_drv.h" | ||
38 | #include "r300_reg.h" | ||
39 | |||
40 | |||
41 | #define R300_SIMULTANEOUS_CLIPRECTS 4 | ||
42 | |||
43 | /* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects | ||
44 | */ | ||
45 | static const int r300_cliprect_cntl[4] = { | ||
46 | 0xAAAA, | ||
47 | 0xEEEE, | ||
48 | 0xFEFE, | ||
49 | 0xFFFE | ||
50 | }; | ||
51 | |||
52 | |||
53 | /** | ||
54 | * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command | ||
55 | * buffer, starting with index n. | ||
56 | */ | ||
57 | static int r300_emit_cliprects(drm_radeon_private_t* dev_priv, | ||
58 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
59 | int n) | ||
60 | { | ||
61 | drm_clip_rect_t box; | ||
62 | int nr; | ||
63 | int i; | ||
64 | RING_LOCALS; | ||
65 | |||
66 | nr = cmdbuf->nbox - n; | ||
67 | if (nr > R300_SIMULTANEOUS_CLIPRECTS) | ||
68 | nr = R300_SIMULTANEOUS_CLIPRECTS; | ||
69 | |||
70 | DRM_DEBUG("%i cliprects\n", nr); | ||
71 | |||
72 | if (nr) { | ||
73 | BEGIN_RING(6 + nr*2); | ||
74 | OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) ); | ||
75 | |||
76 | for(i = 0; i < nr; ++i) { | ||
77 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) { | ||
78 | DRM_ERROR("copy cliprect faulted\n"); | ||
79 | return DRM_ERR(EFAULT); | ||
80 | } | ||
81 | |||
82 | box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
83 | box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
84 | box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
85 | box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; | ||
86 | |||
87 | OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) | | ||
88 | (box.y1 << R300_CLIPRECT_Y_SHIFT)); | ||
89 | OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) | | ||
90 | (box.y2 << R300_CLIPRECT_Y_SHIFT)); | ||
91 | } | ||
92 | |||
93 | OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] ); | ||
94 | |||
95 | /* TODO/SECURITY: Force scissors to a safe value, otherwise the | ||
96 | * client might be able to trample over memory. | ||
97 | * The impact should be very limited, but I'd rather be safe than | ||
98 | * sorry. | ||
99 | */ | ||
100 | OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) ); | ||
101 | OUT_RING( 0 ); | ||
102 | OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK ); | ||
103 | ADVANCE_RING(); | ||
104 | } else { | ||
105 | /* Why we allow zero cliprect rendering: | ||
106 | * There are some commands in a command buffer that must be submitted | ||
107 | * even when there are no cliprects, e.g. DMA buffer discard | ||
108 | * or state setting (though state setting could be avoided by | ||
109 | * simulating a loss of context). | ||
110 | * | ||
111 | * Now since the cmdbuf interface is so chaotic right now (and is | ||
112 | * bound to remain that way for a bit until things settle down), | ||
113 | * it is basically impossible to filter out the commands that are | ||
114 | * necessary and those that aren't. | ||
115 | * | ||
116 | * So I choose the safe way and don't do any filtering at all; | ||
117 | * instead, I simply set up the engine so that all rendering | ||
118 | * can't produce any fragments. | ||
119 | */ | ||
120 | BEGIN_RING(2); | ||
121 | OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 ); | ||
122 | ADVANCE_RING(); | ||
123 | } | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | u8 r300_reg_flags[0x10000>>2]; | ||
129 | |||
130 | |||
131 | void r300_init_reg_flags(void) | ||
132 | { | ||
133 | int i; | ||
134 | memset(r300_reg_flags, 0, 0x10000>>2); | ||
135 | #define ADD_RANGE_MARK(reg, count,mark) \ | ||
136 | for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ | ||
137 | r300_reg_flags[i]|=(mark); | ||
138 | |||
139 | #define MARK_SAFE 1 | ||
140 | #define MARK_CHECK_OFFSET 2 | ||
141 | |||
142 | #define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE) | ||
143 | |||
144 | /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */ | ||
145 | ADD_RANGE(R300_SE_VPORT_XSCALE, 6); | ||
146 | ADD_RANGE(0x2080, 1); | ||
147 | ADD_RANGE(R300_SE_VTE_CNTL, 2); | ||
148 | ADD_RANGE(0x2134, 2); | ||
149 | ADD_RANGE(0x2140, 1); | ||
150 | ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2); | ||
151 | ADD_RANGE(0x21DC, 1); | ||
152 | ADD_RANGE(0x221C, 1); | ||
153 | ADD_RANGE(0x2220, 4); | ||
154 | ADD_RANGE(0x2288, 1); | ||
155 | ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2); | ||
156 | ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); | ||
157 | ADD_RANGE(R300_GB_ENABLE, 1); | ||
158 | ADD_RANGE(R300_GB_MSPOS0, 5); | ||
159 | ADD_RANGE(R300_TX_ENABLE, 1); | ||
160 | ADD_RANGE(0x4200, 4); | ||
161 | ADD_RANGE(0x4214, 1); | ||
162 | ADD_RANGE(R300_RE_POINTSIZE, 1); | ||
163 | ADD_RANGE(0x4230, 3); | ||
164 | ADD_RANGE(R300_RE_LINE_CNT, 1); | ||
165 | ADD_RANGE(0x4238, 1); | ||
166 | ADD_RANGE(0x4260, 3); | ||
167 | ADD_RANGE(0x4274, 4); | ||
168 | ADD_RANGE(0x4288, 5); | ||
169 | ADD_RANGE(0x42A0, 1); | ||
170 | ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4); | ||
171 | ADD_RANGE(0x42B4, 1); | ||
172 | ADD_RANGE(R300_RE_CULL_CNTL, 1); | ||
173 | ADD_RANGE(0x42C0, 2); | ||
174 | ADD_RANGE(R300_RS_CNTL_0, 2); | ||
175 | ADD_RANGE(R300_RS_INTERP_0, 8); | ||
176 | ADD_RANGE(R300_RS_ROUTE_0, 8); | ||
177 | ADD_RANGE(0x43A4, 2); | ||
178 | ADD_RANGE(0x43E8, 1); | ||
179 | ADD_RANGE(R300_PFS_CNTL_0, 3); | ||
180 | ADD_RANGE(R300_PFS_NODE_0, 4); | ||
181 | ADD_RANGE(R300_PFS_TEXI_0, 64); | ||
182 | ADD_RANGE(0x46A4, 5); | ||
183 | ADD_RANGE(R300_PFS_INSTR0_0, 64); | ||
184 | ADD_RANGE(R300_PFS_INSTR1_0, 64); | ||
185 | ADD_RANGE(R300_PFS_INSTR2_0, 64); | ||
186 | ADD_RANGE(R300_PFS_INSTR3_0, 64); | ||
187 | ADD_RANGE(0x4BC0, 1); | ||
188 | ADD_RANGE(0x4BC8, 3); | ||
189 | ADD_RANGE(R300_PP_ALPHA_TEST, 2); | ||
190 | ADD_RANGE(0x4BD8, 1); | ||
191 | ADD_RANGE(R300_PFS_PARAM_0_X, 64); | ||
192 | ADD_RANGE(0x4E00, 1); | ||
193 | ADD_RANGE(R300_RB3D_CBLEND, 2); | ||
194 | ADD_RANGE(R300_RB3D_COLORMASK, 1); | ||
195 | ADD_RANGE(0x4E10, 3); | ||
196 | ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */ | ||
197 | ADD_RANGE(R300_RB3D_COLORPITCH0, 1); | ||
198 | ADD_RANGE(0x4E50, 9); | ||
199 | ADD_RANGE(0x4E88, 1); | ||
200 | ADD_RANGE(0x4EA0, 2); | ||
201 | ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3); | ||
202 | ADD_RANGE(0x4F10, 4); | ||
203 | ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */ | ||
204 | ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); | ||
205 | ADD_RANGE(0x4F28, 1); | ||
206 | ADD_RANGE(0x4F30, 2); | ||
207 | ADD_RANGE(0x4F44, 1); | ||
208 | ADD_RANGE(0x4F54, 1); | ||
209 | |||
210 | ADD_RANGE(R300_TX_FILTER_0, 16); | ||
211 | ADD_RANGE(R300_TX_UNK1_0, 16); | ||
212 | ADD_RANGE(R300_TX_SIZE_0, 16); | ||
213 | ADD_RANGE(R300_TX_FORMAT_0, 16); | ||
214 | /* Texture offset is dangerous and needs more checking */ | ||
215 | ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET); | ||
216 | ADD_RANGE(R300_TX_UNK4_0, 16); | ||
217 | ADD_RANGE(R300_TX_BORDER_COLOR_0, 16); | ||
218 | |||
219 | /* Sporadic registers used as primitives are emitted */ | ||
220 | ADD_RANGE(0x4f18, 1); | ||
221 | ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1); | ||
222 | ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8); | ||
223 | ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); | ||
224 | |||
225 | } | ||
226 | |||
227 | static __inline__ int r300_check_range(unsigned reg, int count) | ||
228 | { | ||
229 | int i; | ||
230 | if(reg & ~0xffff)return -1; | ||
231 | for(i=(reg>>2);i<(reg>>2)+count;i++) | ||
232 | if(r300_reg_flags[i]!=MARK_SAFE)return 1; | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /* we expect offsets passed to the framebuffer to be either within video memory or | ||
237 | within AGP space */ | ||
238 | static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset) | ||
239 | { | ||
240 | /* we realy want to check against end of video aperture | ||
241 | but this value is not being kept. | ||
242 | This code is correct for now (does the same thing as the | ||
243 | code that sets MC_FB_LOCATION) in radeon_cp.c */ | ||
244 | if((offset>=dev_priv->fb_location) && | ||
245 | (offset<dev_priv->gart_vm_start))return 0; | ||
246 | if((offset>=dev_priv->gart_vm_start) && | ||
247 | (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0; | ||
248 | return 1; | ||
249 | } | ||
250 | |||
251 | static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv, | ||
252 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
253 | drm_r300_cmd_header_t header) | ||
254 | { | ||
255 | int reg; | ||
256 | int sz; | ||
257 | int i; | ||
258 | int values[64]; | ||
259 | RING_LOCALS; | ||
260 | |||
261 | sz = header.packet0.count; | ||
262 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; | ||
263 | |||
264 | if((sz>64)||(sz<0)){ | ||
265 | DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz); | ||
266 | return DRM_ERR(EINVAL); | ||
267 | } | ||
268 | for(i=0;i<sz;i++){ | ||
269 | values[i]=((int __user*)cmdbuf->buf)[i]; | ||
270 | switch(r300_reg_flags[(reg>>2)+i]){ | ||
271 | case MARK_SAFE: | ||
272 | break; | ||
273 | case MARK_CHECK_OFFSET: | ||
274 | if(r300_check_offset(dev_priv, (u32)values[i])){ | ||
275 | DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); | ||
276 | return DRM_ERR(EINVAL); | ||
277 | } | ||
278 | break; | ||
279 | default: | ||
280 | DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]); | ||
281 | return DRM_ERR(EINVAL); | ||
282 | } | ||
283 | } | ||
284 | |||
285 | BEGIN_RING(1+sz); | ||
286 | OUT_RING( CP_PACKET0( reg, sz-1 ) ); | ||
287 | OUT_RING_TABLE( values, sz ); | ||
288 | ADVANCE_RING(); | ||
289 | |||
290 | cmdbuf->buf += sz*4; | ||
291 | cmdbuf->bufsz -= sz*4; | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * Emits a packet0 setting arbitrary registers. | ||
298 | * Called by r300_do_cp_cmdbuf. | ||
299 | * | ||
300 | * Note that checks are performed on contents and addresses of the registers | ||
301 | */ | ||
302 | static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv, | ||
303 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
304 | drm_r300_cmd_header_t header) | ||
305 | { | ||
306 | int reg; | ||
307 | int sz; | ||
308 | RING_LOCALS; | ||
309 | |||
310 | sz = header.packet0.count; | ||
311 | reg = (header.packet0.reghi << 8) | header.packet0.reglo; | ||
312 | |||
313 | if (!sz) | ||
314 | return 0; | ||
315 | |||
316 | if (sz*4 > cmdbuf->bufsz) | ||
317 | return DRM_ERR(EINVAL); | ||
318 | |||
319 | if (reg+sz*4 >= 0x10000){ | ||
320 | DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz); | ||
321 | return DRM_ERR(EINVAL); | ||
322 | } | ||
323 | |||
324 | if(r300_check_range(reg, sz)){ | ||
325 | /* go and check everything */ | ||
326 | return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header); | ||
327 | } | ||
328 | /* the rest of the data is safe to emit, whatever the values the user passed */ | ||
329 | |||
330 | BEGIN_RING(1+sz); | ||
331 | OUT_RING( CP_PACKET0( reg, sz-1 ) ); | ||
332 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz ); | ||
333 | ADVANCE_RING(); | ||
334 | |||
335 | cmdbuf->buf += sz*4; | ||
336 | cmdbuf->bufsz -= sz*4; | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | |||
342 | /** | ||
343 | * Uploads user-supplied vertex program instructions or parameters onto | ||
344 | * the graphics card. | ||
345 | * Called by r300_do_cp_cmdbuf. | ||
346 | */ | ||
347 | static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv, | ||
348 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
349 | drm_r300_cmd_header_t header) | ||
350 | { | ||
351 | int sz; | ||
352 | int addr; | ||
353 | RING_LOCALS; | ||
354 | |||
355 | sz = header.vpu.count; | ||
356 | addr = (header.vpu.adrhi << 8) | header.vpu.adrlo; | ||
357 | |||
358 | if (!sz) | ||
359 | return 0; | ||
360 | if (sz*16 > cmdbuf->bufsz) | ||
361 | return DRM_ERR(EINVAL); | ||
362 | |||
363 | BEGIN_RING(5+sz*4); | ||
364 | /* Wait for VAP to come to senses.. */ | ||
365 | /* there is no need to emit it multiple times, (only once before VAP is programmed, | ||
366 | but this optimization is for later */ | ||
367 | OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 ); | ||
368 | OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr ); | ||
369 | OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) ); | ||
370 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 ); | ||
371 | |||
372 | ADVANCE_RING(); | ||
373 | |||
374 | cmdbuf->buf += sz*16; | ||
375 | cmdbuf->bufsz -= sz*16; | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | |||
381 | /** | ||
382 | * Emit a clear packet from userspace. | ||
383 | * Called by r300_emit_packet3. | ||
384 | */ | ||
385 | static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv, | ||
386 | drm_radeon_cmd_buffer_t* cmdbuf) | ||
387 | { | ||
388 | RING_LOCALS; | ||
389 | |||
390 | if (8*4 > cmdbuf->bufsz) | ||
391 | return DRM_ERR(EINVAL); | ||
392 | |||
393 | BEGIN_RING(10); | ||
394 | OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) ); | ||
395 | OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING| | ||
396 | (1<<R300_PRIM_NUM_VERTICES_SHIFT) ); | ||
397 | OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 ); | ||
398 | ADVANCE_RING(); | ||
399 | |||
400 | cmdbuf->buf += 8*4; | ||
401 | cmdbuf->bufsz -= 8*4; | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv, | ||
407 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
408 | u32 header) | ||
409 | { | ||
410 | int count, i,k; | ||
411 | #define MAX_ARRAY_PACKET 64 | ||
412 | u32 payload[MAX_ARRAY_PACKET]; | ||
413 | u32 narrays; | ||
414 | RING_LOCALS; | ||
415 | |||
416 | count=(header>>16) & 0x3fff; | ||
417 | |||
418 | if((count+1)>MAX_ARRAY_PACKET){ | ||
419 | DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); | ||
420 | return DRM_ERR(EINVAL); | ||
421 | } | ||
422 | memset(payload, 0, MAX_ARRAY_PACKET*4); | ||
423 | memcpy(payload, cmdbuf->buf+4, (count+1)*4); | ||
424 | |||
425 | /* carefully check packet contents */ | ||
426 | |||
427 | narrays=payload[0]; | ||
428 | k=0; | ||
429 | i=1; | ||
430 | while((k<narrays) && (i<(count+1))){ | ||
431 | i++; /* skip attribute field */ | ||
432 | if(r300_check_offset(dev_priv, payload[i])){ | ||
433 | DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); | ||
434 | return DRM_ERR(EINVAL); | ||
435 | } | ||
436 | k++; | ||
437 | i++; | ||
438 | if(k==narrays)break; | ||
439 | /* have one more to process, they come in pairs */ | ||
440 | if(r300_check_offset(dev_priv, payload[i])){ | ||
441 | DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); | ||
442 | return DRM_ERR(EINVAL); | ||
443 | } | ||
444 | k++; | ||
445 | i++; | ||
446 | } | ||
447 | /* do the counts match what we expect ? */ | ||
448 | if((k!=narrays) || (i!=(count+1))){ | ||
449 | DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1); | ||
450 | return DRM_ERR(EINVAL); | ||
451 | } | ||
452 | |||
453 | /* all clear, output packet */ | ||
454 | |||
455 | BEGIN_RING(count+2); | ||
456 | OUT_RING(header); | ||
457 | OUT_RING_TABLE(payload, count+1); | ||
458 | ADVANCE_RING(); | ||
459 | |||
460 | cmdbuf->buf += (count+2)*4; | ||
461 | cmdbuf->bufsz -= (count+2)*4; | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv, | ||
467 | drm_radeon_cmd_buffer_t* cmdbuf) | ||
468 | { | ||
469 | u32 header; | ||
470 | int count; | ||
471 | RING_LOCALS; | ||
472 | |||
473 | if (4 > cmdbuf->bufsz) | ||
474 | return DRM_ERR(EINVAL); | ||
475 | |||
476 | /* Fixme !! This simply emits a packet without much checking. | ||
477 | We need to be smarter. */ | ||
478 | |||
479 | /* obtain first word - actual packet3 header */ | ||
480 | header = *(u32 __user*)cmdbuf->buf; | ||
481 | |||
482 | /* Is it packet 3 ? */ | ||
483 | if( (header>>30)!=0x3 ) { | ||
484 | DRM_ERROR("Not a packet3 header (0x%08x)\n", header); | ||
485 | return DRM_ERR(EINVAL); | ||
486 | } | ||
487 | |||
488 | count=(header>>16) & 0x3fff; | ||
489 | |||
490 | /* Check again now that we know how much data to expect */ | ||
491 | if ((count+2)*4 > cmdbuf->bufsz){ | ||
492 | DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n", | ||
493 | (count+2)*4, cmdbuf->bufsz); | ||
494 | return DRM_ERR(EINVAL); | ||
495 | } | ||
496 | |||
497 | /* Is it a packet type we know about ? */ | ||
498 | switch(header & 0xff00){ | ||
499 | case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ | ||
500 | return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); | ||
501 | |||
502 | case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ | ||
503 | case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ | ||
504 | case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ | ||
505 | case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ | ||
506 | case RADEON_WAIT_FOR_IDLE: | ||
507 | case RADEON_CP_NOP: | ||
508 | /* these packets are safe */ | ||
509 | break; | ||
510 | default: | ||
511 | DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); | ||
512 | return DRM_ERR(EINVAL); | ||
513 | } | ||
514 | |||
515 | |||
516 | BEGIN_RING(count+2); | ||
517 | OUT_RING(header); | ||
518 | OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1); | ||
519 | ADVANCE_RING(); | ||
520 | |||
521 | cmdbuf->buf += (count+2)*4; | ||
522 | cmdbuf->bufsz -= (count+2)*4; | ||
523 | |||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | |||
528 | /** | ||
529 | * Emit a rendering packet3 from userspace. | ||
530 | * Called by r300_do_cp_cmdbuf. | ||
531 | */ | ||
532 | static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv, | ||
533 | drm_radeon_cmd_buffer_t* cmdbuf, | ||
534 | drm_r300_cmd_header_t header) | ||
535 | { | ||
536 | int n; | ||
537 | int ret; | ||
538 | char __user* orig_buf = cmdbuf->buf; | ||
539 | int orig_bufsz = cmdbuf->bufsz; | ||
540 | |||
541 | /* This is a do-while-loop so that we run the interior at least once, | ||
542 | * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. | ||
543 | */ | ||
544 | n = 0; | ||
545 | do { | ||
546 | if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) { | ||
547 | ret = r300_emit_cliprects(dev_priv, cmdbuf, n); | ||
548 | if (ret) | ||
549 | return ret; | ||
550 | |||
551 | cmdbuf->buf = orig_buf; | ||
552 | cmdbuf->bufsz = orig_bufsz; | ||
553 | } | ||
554 | |||
555 | switch(header.packet3.packet) { | ||
556 | case R300_CMD_PACKET3_CLEAR: | ||
557 | DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n"); | ||
558 | ret = r300_emit_clear(dev_priv, cmdbuf); | ||
559 | if (ret) { | ||
560 | DRM_ERROR("r300_emit_clear failed\n"); | ||
561 | return ret; | ||
562 | } | ||
563 | break; | ||
564 | |||
565 | case R300_CMD_PACKET3_RAW: | ||
566 | DRM_DEBUG("R300_CMD_PACKET3_RAW\n"); | ||
567 | ret = r300_emit_raw_packet3(dev_priv, cmdbuf); | ||
568 | if (ret) { | ||
569 | DRM_ERROR("r300_emit_raw_packet3 failed\n"); | ||
570 | return ret; | ||
571 | } | ||
572 | break; | ||
573 | |||
574 | default: | ||
575 | DRM_ERROR("bad packet3 type %i at %p\n", | ||
576 | header.packet3.packet, | ||
577 | cmdbuf->buf - sizeof(header)); | ||
578 | return DRM_ERR(EINVAL); | ||
579 | } | ||
580 | |||
581 | n += R300_SIMULTANEOUS_CLIPRECTS; | ||
582 | } while(n < cmdbuf->nbox); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* Some of the R300 chips seem to be extremely touchy about the two registers | ||
588 | * that are configured in r300_pacify. | ||
589 | * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace | ||
590 | * sends a command buffer that contains only state setting commands and a | ||
591 | * vertex program/parameter upload sequence, this will eventually lead to a | ||
592 | * lockup, unless the sequence is bracketed by calls to r300_pacify. | ||
593 | * So we should take great care to *always* call r300_pacify before | ||
594 | * *anything* 3D related, and again afterwards. This is what the | ||
595 | * call bracket in r300_do_cp_cmdbuf is for. | ||
596 | */ | ||
597 | |||
598 | /** | ||
599 | * Emit the sequence to pacify R300. | ||
600 | */ | ||
601 | static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv) | ||
602 | { | ||
603 | RING_LOCALS; | ||
604 | |||
605 | BEGIN_RING(6); | ||
606 | OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) ); | ||
607 | OUT_RING( 0xa ); | ||
608 | OUT_RING( CP_PACKET0( 0x4f18, 0 ) ); | ||
609 | OUT_RING( 0x3 ); | ||
610 | OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) ); | ||
611 | OUT_RING( 0x0 ); | ||
612 | ADVANCE_RING(); | ||
613 | } | ||
614 | |||
615 | |||
616 | /** | ||
617 | * Called by r300_do_cp_cmdbuf to update the internal buffer age and state. | ||
618 | * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must | ||
619 | * be careful about how this function is called. | ||
620 | */ | ||
621 | static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf) | ||
622 | { | ||
623 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
624 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | ||
625 | |||
626 | buf_priv->age = ++dev_priv->sarea_priv->last_dispatch; | ||
627 | buf->pending = 1; | ||
628 | buf->used = 0; | ||
629 | } | ||
630 | |||
631 | |||
632 | /** | ||
633 | * Parses and validates a user-supplied command buffer and emits appropriate | ||
634 | * commands on the DMA ring buffer. | ||
635 | * Called by the ioctl handler function radeon_cp_cmdbuf. | ||
636 | */ | ||
637 | int r300_do_cp_cmdbuf(drm_device_t* dev, | ||
638 | DRMFILE filp, | ||
639 | drm_file_t* filp_priv, | ||
640 | drm_radeon_cmd_buffer_t* cmdbuf) | ||
641 | { | ||
642 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
643 | drm_device_dma_t *dma = dev->dma; | ||
644 | drm_buf_t *buf = NULL; | ||
645 | int emit_dispatch_age = 0; | ||
646 | int ret = 0; | ||
647 | |||
648 | DRM_DEBUG("\n"); | ||
649 | |||
650 | /* See the comment above r300_emit_begin3d for why this call must be here, | ||
651 | * and what the cleanup gotos are for. */ | ||
652 | r300_pacify(dev_priv); | ||
653 | |||
654 | if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) { | ||
655 | ret = r300_emit_cliprects(dev_priv, cmdbuf, 0); | ||
656 | if (ret) | ||
657 | goto cleanup; | ||
658 | } | ||
659 | |||
660 | while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { | ||
661 | int idx; | ||
662 | drm_r300_cmd_header_t header; | ||
663 | |||
664 | header.u = *(unsigned int *)cmdbuf->buf; | ||
665 | |||
666 | cmdbuf->buf += sizeof(header); | ||
667 | cmdbuf->bufsz -= sizeof(header); | ||
668 | |||
669 | switch(header.header.cmd_type) { | ||
670 | case R300_CMD_PACKET0: | ||
671 | DRM_DEBUG("R300_CMD_PACKET0\n"); | ||
672 | ret = r300_emit_packet0(dev_priv, cmdbuf, header); | ||
673 | if (ret) { | ||
674 | DRM_ERROR("r300_emit_packet0 failed\n"); | ||
675 | goto cleanup; | ||
676 | } | ||
677 | break; | ||
678 | |||
679 | case R300_CMD_VPU: | ||
680 | DRM_DEBUG("R300_CMD_VPU\n"); | ||
681 | ret = r300_emit_vpu(dev_priv, cmdbuf, header); | ||
682 | if (ret) { | ||
683 | DRM_ERROR("r300_emit_vpu failed\n"); | ||
684 | goto cleanup; | ||
685 | } | ||
686 | break; | ||
687 | |||
688 | case R300_CMD_PACKET3: | ||
689 | DRM_DEBUG("R300_CMD_PACKET3\n"); | ||
690 | ret = r300_emit_packet3(dev_priv, cmdbuf, header); | ||
691 | if (ret) { | ||
692 | DRM_ERROR("r300_emit_packet3 failed\n"); | ||
693 | goto cleanup; | ||
694 | } | ||
695 | break; | ||
696 | |||
697 | case R300_CMD_END3D: | ||
698 | DRM_DEBUG("R300_CMD_END3D\n"); | ||
699 | /* TODO: | ||
700 | Ideally userspace driver should not need to issue this call, | ||
701 | i.e. the drm driver should issue it automatically and prevent | ||
702 | lockups. | ||
703 | |||
704 | In practice, we do not understand why this call is needed and what | ||
705 | it does (except for some vague guesses that it has to do with cache | ||
706 | coherence) and so the user space driver does it. | ||
707 | |||
708 | Once we are sure which uses prevent lockups the code could be moved | ||
709 | into the kernel and the userspace driver will not | ||
710 | need to use this command. | ||
711 | |||
712 | Note that issuing this command does not hurt anything | ||
713 | except, possibly, performance */ | ||
714 | r300_pacify(dev_priv); | ||
715 | break; | ||
716 | |||
717 | case R300_CMD_CP_DELAY: | ||
718 | /* simple enough, we can do it here */ | ||
719 | DRM_DEBUG("R300_CMD_CP_DELAY\n"); | ||
720 | { | ||
721 | int i; | ||
722 | RING_LOCALS; | ||
723 | |||
724 | BEGIN_RING(header.delay.count); | ||
725 | for(i=0;i<header.delay.count;i++) | ||
726 | OUT_RING(RADEON_CP_PACKET2); | ||
727 | ADVANCE_RING(); | ||
728 | } | ||
729 | break; | ||
730 | |||
731 | case R300_CMD_DMA_DISCARD: | ||
732 | DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); | ||
733 | idx = header.dma.buf_idx; | ||
734 | if (idx < 0 || idx >= dma->buf_count) { | ||
735 | DRM_ERROR("buffer index %d (of %d max)\n", | ||
736 | idx, dma->buf_count - 1); | ||
737 | ret = DRM_ERR(EINVAL); | ||
738 | goto cleanup; | ||
739 | } | ||
740 | |||
741 | buf = dma->buflist[idx]; | ||
742 | if (buf->filp != filp || buf->pending) { | ||
743 | DRM_ERROR("bad buffer %p %p %d\n", | ||
744 | buf->filp, filp, buf->pending); | ||
745 | ret = DRM_ERR(EINVAL); | ||
746 | goto cleanup; | ||
747 | } | ||
748 | |||
749 | emit_dispatch_age = 1; | ||
750 | r300_discard_buffer(dev, buf); | ||
751 | break; | ||
752 | |||
753 | case R300_CMD_WAIT: | ||
754 | /* simple enough, we can do it here */ | ||
755 | DRM_DEBUG("R300_CMD_WAIT\n"); | ||
756 | if(header.wait.flags==0)break; /* nothing to do */ | ||
757 | |||
758 | { | ||
759 | RING_LOCALS; | ||
760 | |||
761 | BEGIN_RING(2); | ||
762 | OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); | ||
763 | OUT_RING( (header.wait.flags & 0xf)<<14 ); | ||
764 | ADVANCE_RING(); | ||
765 | } | ||
766 | break; | ||
767 | |||
768 | default: | ||
769 | DRM_ERROR("bad cmd_type %i at %p\n", | ||
770 | header.header.cmd_type, | ||
771 | cmdbuf->buf - sizeof(header)); | ||
772 | ret = DRM_ERR(EINVAL); | ||
773 | goto cleanup; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | DRM_DEBUG("END\n"); | ||
778 | |||
779 | cleanup: | ||
780 | r300_pacify(dev_priv); | ||
781 | |||
782 | /* We emit the vertex buffer age here, outside the pacifier "brackets" | ||
783 | * for two reasons: | ||
784 | * (1) This may coalesce multiple age emissions into a single one and | ||
785 | * (2) more importantly, some chips lock up hard when scratch registers | ||
786 | * are written inside the pacifier bracket. | ||
787 | */ | ||
788 | if (emit_dispatch_age) { | ||
789 | RING_LOCALS; | ||
790 | |||
791 | /* Emit the vertex buffer age */ | ||
792 | BEGIN_RING(2); | ||
793 | RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch); | ||
794 | ADVANCE_RING(); | ||
795 | } | ||
796 | |||
797 | COMMIT_RING(); | ||
798 | |||
799 | return ret; | ||
800 | } | ||
801 | |||
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h new file mode 100644 index 000000000000..c3e7ca3dbe3d --- /dev/null +++ b/drivers/char/drm/r300_reg.h | |||
@@ -0,0 +1,1412 @@ | |||
1 | /************************************************************************** | ||
2 | |||
3 | Copyright (C) 2004-2005 Nicolai Haehnle et al. | ||
4 | |||
5 | Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | copy of this software and associated documentation files (the "Software"), | ||
7 | to deal in the Software without restriction, including without limitation | ||
8 | on the rights to use, copy, modify, merge, publish, distribute, sub | ||
9 | license, and/or sell copies of the Software, and to permit persons to whom | ||
10 | the Software is furnished to do so, subject to the following conditions: | ||
11 | |||
12 | The above copyright notice and this permission notice (including the next | ||
13 | paragraph) shall be included in all copies or substantial portions of the | ||
14 | Software. | ||
15 | |||
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
19 | THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
20 | DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
21 | OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
22 | USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | |||
24 | **************************************************************************/ | ||
25 | |||
26 | #ifndef _R300_REG_H | ||
27 | #define _R300_REG_H | ||
28 | |||
29 | #define R300_MC_INIT_MISC_LAT_TIMER 0x180 | ||
30 | # define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 | ||
31 | # define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 | ||
32 | # define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8 | ||
33 | # define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12 | ||
34 | # define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16 | ||
35 | # define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20 | ||
36 | # define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 | ||
37 | # define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 | ||
38 | |||
39 | |||
40 | #define R300_MC_INIT_GFX_LAT_TIMER 0x154 | ||
41 | # define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 | ||
42 | # define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 | ||
43 | # define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8 | ||
44 | # define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12 | ||
45 | # define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16 | ||
46 | # define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20 | ||
47 | # define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24 | ||
48 | # define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28 | ||
49 | |||
50 | /* | ||
51 | This file contains registers and constants for the R300. They have been | ||
52 | found mostly by examining command buffers captured using glxtest, as well | ||
53 | as by extrapolating some known registers and constants from the R200. | ||
54 | |||
55 | I am fairly certain that they are correct unless stated otherwise in comments. | ||
56 | */ | ||
57 | |||
58 | #define R300_SE_VPORT_XSCALE 0x1D98 | ||
59 | #define R300_SE_VPORT_XOFFSET 0x1D9C | ||
60 | #define R300_SE_VPORT_YSCALE 0x1DA0 | ||
61 | #define R300_SE_VPORT_YOFFSET 0x1DA4 | ||
62 | #define R300_SE_VPORT_ZSCALE 0x1DA8 | ||
63 | #define R300_SE_VPORT_ZOFFSET 0x1DAC | ||
64 | |||
65 | |||
66 | /* This register is written directly and also starts data section in many 3d CP_PACKET3's */ | ||
67 | #define R300_VAP_VF_CNTL 0x2084 | ||
68 | |||
69 | # define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0 | ||
70 | # define R300_VAP_VF_CNTL__PRIM_NONE (0<<0) | ||
71 | # define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0) | ||
72 | # define R300_VAP_VF_CNTL__PRIM_LINES (2<<0) | ||
73 | # define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0) | ||
74 | # define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0) | ||
75 | # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0) | ||
76 | # define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0) | ||
77 | # define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0) | ||
78 | # define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0) | ||
79 | # define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0) | ||
80 | # define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0) | ||
81 | |||
82 | # define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4 | ||
83 | /* State based - direct writes to registers trigger vertex generation */ | ||
84 | # define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4) | ||
85 | # define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4) | ||
86 | # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4) | ||
87 | # define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4) | ||
88 | |||
89 | /* I don't think I saw these three used.. */ | ||
90 | # define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6 | ||
91 | # define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9 | ||
92 | # define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10 | ||
93 | |||
94 | /* index size - when not set the indices are assumed to be 16 bit */ | ||
95 | # define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11) | ||
96 | /* number of vertices */ | ||
97 | # define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16 | ||
98 | |||
99 | /* BEGIN: Wild guesses */ | ||
100 | #define R300_VAP_OUTPUT_VTX_FMT_0 0x2090 | ||
101 | # define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0) | ||
102 | # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1) | ||
103 | # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */ | ||
104 | # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */ | ||
105 | # define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */ | ||
106 | # define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */ | ||
107 | |||
108 | #define R300_VAP_OUTPUT_VTX_FMT_1 0x2094 | ||
109 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 | ||
110 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 | ||
111 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 | ||
112 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 | ||
113 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 | ||
114 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 | ||
115 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 | ||
116 | # define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 | ||
117 | /* END */ | ||
118 | |||
119 | #define R300_SE_VTE_CNTL 0x20b0 | ||
120 | # define R300_VPORT_X_SCALE_ENA 0x00000001 | ||
121 | # define R300_VPORT_X_OFFSET_ENA 0x00000002 | ||
122 | # define R300_VPORT_Y_SCALE_ENA 0x00000004 | ||
123 | # define R300_VPORT_Y_OFFSET_ENA 0x00000008 | ||
124 | # define R300_VPORT_Z_SCALE_ENA 0x00000010 | ||
125 | # define R300_VPORT_Z_OFFSET_ENA 0x00000020 | ||
126 | # define R300_VTX_XY_FMT 0x00000100 | ||
127 | # define R300_VTX_Z_FMT 0x00000200 | ||
128 | # define R300_VTX_W0_FMT 0x00000400 | ||
129 | # define R300_VTX_W0_NORMALIZE 0x00000800 | ||
130 | # define R300_VTX_ST_DENORMALIZED 0x00001000 | ||
131 | |||
132 | /* BEGIN: Vertex data assembly - lots of uncertainties */ | ||
133 | /* gap */ | ||
134 | /* Where do we get our vertex data? | ||
135 | // | ||
136 | // Vertex data either comes either from immediate mode registers or from | ||
137 | // vertex arrays. | ||
138 | // There appears to be no mixed mode (though we can force the pitch of | ||
139 | // vertex arrays to 0, effectively reusing the same element over and over | ||
140 | // again). | ||
141 | // | ||
142 | // Immediate mode is controlled by the INPUT_CNTL registers. I am not sure | ||
143 | // if these registers influence vertex array processing. | ||
144 | // | ||
145 | // Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3. | ||
146 | // | ||
147 | // In both cases, vertex attributes are then passed through INPUT_ROUTE. | ||
148 | |||
149 | // Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data | ||
150 | // into the vertex processor's input registers. | ||
151 | // The first word routes the first input, the second word the second, etc. | ||
152 | // The corresponding input is routed into the register with the given index. | ||
153 | // The list is ended by a word with INPUT_ROUTE_END set. | ||
154 | // | ||
155 | // Always set COMPONENTS_4 in immediate mode. */ | ||
156 | |||
157 | #define R300_VAP_INPUT_ROUTE_0_0 0x2150 | ||
158 | # define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0) | ||
159 | # define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0) | ||
160 | # define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0) | ||
161 | # define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0) | ||
162 | # define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */ | ||
163 | # define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8 | ||
164 | # define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */ | ||
165 | # define R300_VAP_INPUT_ROUTE_END (1 << 13) | ||
166 | # define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */ | ||
167 | # define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */ | ||
168 | # define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */ | ||
169 | # define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */ | ||
170 | #define R300_VAP_INPUT_ROUTE_0_1 0x2154 | ||
171 | #define R300_VAP_INPUT_ROUTE_0_2 0x2158 | ||
172 | #define R300_VAP_INPUT_ROUTE_0_3 0x215C | ||
173 | #define R300_VAP_INPUT_ROUTE_0_4 0x2160 | ||
174 | #define R300_VAP_INPUT_ROUTE_0_5 0x2164 | ||
175 | #define R300_VAP_INPUT_ROUTE_0_6 0x2168 | ||
176 | #define R300_VAP_INPUT_ROUTE_0_7 0x216C | ||
177 | |||
178 | /* gap */ | ||
179 | /* Notes: | ||
180 | // - always set up to produce at least two attributes: | ||
181 | // if vertex program uses only position, fglrx will set normal, too | ||
182 | // - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */ | ||
183 | #define R300_VAP_INPUT_CNTL_0 0x2180 | ||
184 | # define R300_INPUT_CNTL_0_COLOR 0x00000001 | ||
185 | #define R300_VAP_INPUT_CNTL_1 0x2184 | ||
186 | # define R300_INPUT_CNTL_POS 0x00000001 | ||
187 | # define R300_INPUT_CNTL_NORMAL 0x00000002 | ||
188 | # define R300_INPUT_CNTL_COLOR 0x00000004 | ||
189 | # define R300_INPUT_CNTL_TC0 0x00000400 | ||
190 | # define R300_INPUT_CNTL_TC1 0x00000800 | ||
191 | # define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */ | ||
192 | # define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */ | ||
193 | # define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */ | ||
194 | # define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */ | ||
195 | # define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */ | ||
196 | # define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */ | ||
197 | |||
198 | /* gap */ | ||
199 | /* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0 | ||
200 | // are set to a swizzling bit pattern, other words are 0. | ||
201 | // | ||
202 | // In immediate mode, the pattern is always set to xyzw. In vertex array | ||
203 | // mode, the swizzling pattern is e.g. used to set zw components in texture | ||
204 | // coordinates with only tweo components. */ | ||
205 | #define R300_VAP_INPUT_ROUTE_1_0 0x21E0 | ||
206 | # define R300_INPUT_ROUTE_SELECT_X 0 | ||
207 | # define R300_INPUT_ROUTE_SELECT_Y 1 | ||
208 | # define R300_INPUT_ROUTE_SELECT_Z 2 | ||
209 | # define R300_INPUT_ROUTE_SELECT_W 3 | ||
210 | # define R300_INPUT_ROUTE_SELECT_ZERO 4 | ||
211 | # define R300_INPUT_ROUTE_SELECT_ONE 5 | ||
212 | # define R300_INPUT_ROUTE_SELECT_MASK 7 | ||
213 | # define R300_INPUT_ROUTE_X_SHIFT 0 | ||
214 | # define R300_INPUT_ROUTE_Y_SHIFT 3 | ||
215 | # define R300_INPUT_ROUTE_Z_SHIFT 6 | ||
216 | # define R300_INPUT_ROUTE_W_SHIFT 9 | ||
217 | # define R300_INPUT_ROUTE_ENABLE (15 << 12) | ||
218 | #define R300_VAP_INPUT_ROUTE_1_1 0x21E4 | ||
219 | #define R300_VAP_INPUT_ROUTE_1_2 0x21E8 | ||
220 | #define R300_VAP_INPUT_ROUTE_1_3 0x21EC | ||
221 | #define R300_VAP_INPUT_ROUTE_1_4 0x21F0 | ||
222 | #define R300_VAP_INPUT_ROUTE_1_5 0x21F4 | ||
223 | #define R300_VAP_INPUT_ROUTE_1_6 0x21F8 | ||
224 | #define R300_VAP_INPUT_ROUTE_1_7 0x21FC | ||
225 | |||
226 | /* END */ | ||
227 | |||
228 | /* gap */ | ||
229 | /* BEGIN: Upload vertex program and data | ||
230 | // The programmable vertex shader unit has a memory bank of unknown size | ||
231 | // that can be written to in 16 byte units by writing the address into | ||
232 | // UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs). | ||
233 | // | ||
234 | // Pointers into the memory bank are always in multiples of 16 bytes. | ||
235 | // | ||
236 | // The memory bank is divided into areas with fixed meaning. | ||
237 | // | ||
238 | // Starting at address UPLOAD_PROGRAM: Vertex program instructions. | ||
239 | // Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB), | ||
240 | // whereas the difference between known addresses suggests size 512. | ||
241 | // | ||
242 | // Starting at address UPLOAD_PARAMETERS: Vertex program parameters. | ||
243 | // Native reported limits and the VPI layout suggest size 256, whereas | ||
244 | // difference between known addresses suggests size 512. | ||
245 | // | ||
246 | // At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the | ||
247 | // floating point pointsize. The exact purpose of this state is uncertain, | ||
248 | // as there is also the R300_RE_POINTSIZE register. | ||
249 | // | ||
250 | // Multiple vertex programs and parameter sets can be loaded at once, | ||
251 | // which could explain the size discrepancy. */ | ||
252 | #define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200 | ||
253 | # define R300_PVS_UPLOAD_PROGRAM 0x00000000 | ||
254 | # define R300_PVS_UPLOAD_PARAMETERS 0x00000200 | ||
255 | # define R300_PVS_UPLOAD_POINTSIZE 0x00000406 | ||
256 | /* gap */ | ||
257 | #define R300_VAP_PVS_UPLOAD_DATA 0x2208 | ||
258 | /* END */ | ||
259 | |||
260 | /* gap */ | ||
261 | /* I do not know the purpose of this register. However, I do know that | ||
262 | // it is set to 221C_CLEAR for clear operations and to 221C_NORMAL | ||
263 | // for normal rendering. */ | ||
264 | #define R300_VAP_UNKNOWN_221C 0x221C | ||
265 | # define R300_221C_NORMAL 0x00000000 | ||
266 | # define R300_221C_CLEAR 0x0001C000 | ||
267 | |||
268 | /* gap */ | ||
269 | /* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between | ||
270 | // rendering commands and overwriting vertex program parameters. | ||
271 | // Therefore, I suspect writing zero to 0x2284 synchronizes the engine and | ||
272 | // avoids bugs caused by still running shaders reading bad data from memory. */ | ||
273 | #define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */ | ||
274 | |||
275 | /* Absolutely no clue what this register is about. */ | ||
276 | #define R300_VAP_UNKNOWN_2288 0x2288 | ||
277 | # define R300_2288_R300 0x00750000 /* -- nh */ | ||
278 | # define R300_2288_RV350 0x0000FFFF /* -- Vladimir */ | ||
279 | |||
280 | /* gap */ | ||
281 | /* Addresses are relative to the vertex program instruction area of the | ||
282 | // memory bank. PROGRAM_END points to the last instruction of the active | ||
283 | // program | ||
284 | // | ||
285 | // The meaning of the two UNKNOWN fields is obviously not known. However, | ||
286 | // experiments so far have shown that both *must* point to an instruction | ||
287 | // inside the vertex program, otherwise the GPU locks up. | ||
288 | // fglrx usually sets CNTL_3_UNKNOWN to the end of the program and | ||
289 | // CNTL_1_UNKNOWN points to instruction where last write to position takes place. | ||
290 | // Most likely this is used to ignore rest of the program in cases where group of verts arent visible. | ||
291 | // For some reason this "section" is sometimes accepted other instruction that have | ||
292 | // no relationship with position calculations. | ||
293 | */ | ||
294 | #define R300_VAP_PVS_CNTL_1 0x22D0 | ||
295 | # define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 | ||
296 | # define R300_PVS_CNTL_1_POS_END_SHIFT 10 | ||
297 | # define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 | ||
298 | /* Addresses are relative the the vertex program parameters area. */ | ||
299 | #define R300_VAP_PVS_CNTL_2 0x22D4 | ||
300 | # define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 | ||
301 | # define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 | ||
302 | #define R300_VAP_PVS_CNTL_3 0x22D8 | ||
303 | # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10 | ||
304 | # define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0 | ||
305 | |||
306 | /* The entire range from 0x2300 to 0x2AC inclusive seems to be used for | ||
307 | // immediate vertices */ | ||
308 | #define R300_VAP_VTX_COLOR_R 0x2464 | ||
309 | #define R300_VAP_VTX_COLOR_G 0x2468 | ||
310 | #define R300_VAP_VTX_COLOR_B 0x246C | ||
311 | #define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */ | ||
312 | #define R300_VAP_VTX_POS_0_Y_1 0x2494 | ||
313 | #define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */ | ||
314 | #define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */ | ||
315 | #define R300_VAP_VTX_POS_0_Y_2 0x24A4 | ||
316 | #define R300_VAP_VTX_POS_0_Z_2 0x24A8 | ||
317 | #define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */ | ||
318 | |||
319 | /* gap */ | ||
320 | |||
321 | /* These are values from r300_reg/r300_reg.h - they are known to be correct | ||
322 | and are here so we can use one register file instead of several | ||
323 | - Vladimir */ | ||
324 | #define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000 | ||
325 | # define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0) | ||
326 | # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1) | ||
327 | # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2) | ||
328 | # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3) | ||
329 | # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4) | ||
330 | # define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5) | ||
331 | # define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16) | ||
332 | |||
333 | #define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004 | ||
334 | /* each of the following is 3 bits wide, specifies number | ||
335 | of components */ | ||
336 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0 | ||
337 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3 | ||
338 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6 | ||
339 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9 | ||
340 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12 | ||
341 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15 | ||
342 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18 | ||
343 | # define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21 | ||
344 | |||
345 | /* UNK30 seems to enables point to quad transformation on textures | ||
346 | (or something closely related to that). | ||
347 | This bit is rather fatal at the time being due to lackings at pixel shader side */ | ||
348 | #define R300_GB_ENABLE 0x4008 | ||
349 | # define R300_GB_POINT_STUFF_ENABLE (1<<0) | ||
350 | # define R300_GB_LINE_STUFF_ENABLE (1<<1) | ||
351 | # define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2) | ||
352 | # define R300_GB_STENCIL_AUTO_ENABLE (1<<4) | ||
353 | # define R300_GB_UNK30 (1<<30) | ||
354 | /* each of the following is 2 bits wide */ | ||
355 | #define R300_GB_TEX_REPLICATE 0 | ||
356 | #define R300_GB_TEX_ST 1 | ||
357 | #define R300_GB_TEX_STR 2 | ||
358 | # define R300_GB_TEX0_SOURCE_SHIFT 16 | ||
359 | # define R300_GB_TEX1_SOURCE_SHIFT 18 | ||
360 | # define R300_GB_TEX2_SOURCE_SHIFT 20 | ||
361 | # define R300_GB_TEX3_SOURCE_SHIFT 22 | ||
362 | # define R300_GB_TEX4_SOURCE_SHIFT 24 | ||
363 | # define R300_GB_TEX5_SOURCE_SHIFT 26 | ||
364 | # define R300_GB_TEX6_SOURCE_SHIFT 28 | ||
365 | # define R300_GB_TEX7_SOURCE_SHIFT 30 | ||
366 | |||
367 | /* MSPOS - positions for multisample antialiasing (?) */ | ||
368 | #define R300_GB_MSPOS0 0x4010 | ||
369 | /* shifts - each of the fields is 4 bits */ | ||
370 | # define R300_GB_MSPOS0__MS_X0_SHIFT 0 | ||
371 | # define R300_GB_MSPOS0__MS_Y0_SHIFT 4 | ||
372 | # define R300_GB_MSPOS0__MS_X1_SHIFT 8 | ||
373 | # define R300_GB_MSPOS0__MS_Y1_SHIFT 12 | ||
374 | # define R300_GB_MSPOS0__MS_X2_SHIFT 16 | ||
375 | # define R300_GB_MSPOS0__MS_Y2_SHIFT 20 | ||
376 | # define R300_GB_MSPOS0__MSBD0_Y 24 | ||
377 | # define R300_GB_MSPOS0__MSBD0_X 28 | ||
378 | |||
379 | #define R300_GB_MSPOS1 0x4014 | ||
380 | # define R300_GB_MSPOS1__MS_X3_SHIFT 0 | ||
381 | # define R300_GB_MSPOS1__MS_Y3_SHIFT 4 | ||
382 | # define R300_GB_MSPOS1__MS_X4_SHIFT 8 | ||
383 | # define R300_GB_MSPOS1__MS_Y4_SHIFT 12 | ||
384 | # define R300_GB_MSPOS1__MS_X5_SHIFT 16 | ||
385 | # define R300_GB_MSPOS1__MS_Y5_SHIFT 20 | ||
386 | # define R300_GB_MSPOS1__MSBD1 24 | ||
387 | |||
388 | |||
389 | #define R300_GB_TILE_CONFIG 0x4018 | ||
390 | # define R300_GB_TILE_ENABLE (1<<0) | ||
391 | # define R300_GB_TILE_PIPE_COUNT_RV300 0 | ||
392 | # define R300_GB_TILE_PIPE_COUNT_R300 (3<<1) | ||
393 | # define R300_GB_TILE_PIPE_COUNT_R420 (7<<1) | ||
394 | # define R300_GB_TILE_SIZE_8 0 | ||
395 | # define R300_GB_TILE_SIZE_16 (1<<4) | ||
396 | # define R300_GB_TILE_SIZE_32 (2<<4) | ||
397 | # define R300_GB_SUPER_SIZE_1 (0<<6) | ||
398 | # define R300_GB_SUPER_SIZE_2 (1<<6) | ||
399 | # define R300_GB_SUPER_SIZE_4 (2<<6) | ||
400 | # define R300_GB_SUPER_SIZE_8 (3<<6) | ||
401 | # define R300_GB_SUPER_SIZE_16 (4<<6) | ||
402 | # define R300_GB_SUPER_SIZE_32 (5<<6) | ||
403 | # define R300_GB_SUPER_SIZE_64 (6<<6) | ||
404 | # define R300_GB_SUPER_SIZE_128 (7<<6) | ||
405 | # define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */ | ||
406 | # define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */ | ||
407 | # define R300_GB_SUPER_TILE_A 0 | ||
408 | # define R300_GB_SUPER_TILE_B (1<<15) | ||
409 | # define R300_GB_SUBPIXEL_1_12 0 | ||
410 | # define R300_GB_SUBPIXEL_1_16 (1<<16) | ||
411 | |||
412 | #define R300_GB_FIFO_SIZE 0x4024 | ||
413 | /* each of the following is 2 bits wide */ | ||
414 | #define R300_GB_FIFO_SIZE_32 0 | ||
415 | #define R300_GB_FIFO_SIZE_64 1 | ||
416 | #define R300_GB_FIFO_SIZE_128 2 | ||
417 | #define R300_GB_FIFO_SIZE_256 3 | ||
418 | # define R300_SC_IFIFO_SIZE_SHIFT 0 | ||
419 | # define R300_SC_TZFIFO_SIZE_SHIFT 2 | ||
420 | # define R300_SC_BFIFO_SIZE_SHIFT 4 | ||
421 | |||
422 | # define R300_US_OFIFO_SIZE_SHIFT 12 | ||
423 | # define R300_US_WFIFO_SIZE_SHIFT 14 | ||
424 | /* the following use the same constants as above, but meaning is | ||
425 | is times 2 (i.e. instead of 32 words it means 64 */ | ||
426 | # define R300_RS_TFIFO_SIZE_SHIFT 6 | ||
427 | # define R300_RS_CFIFO_SIZE_SHIFT 8 | ||
428 | # define R300_US_RAM_SIZE_SHIFT 10 | ||
429 | /* watermarks, 3 bits wide */ | ||
430 | # define R300_RS_HIGHWATER_COL_SHIFT 16 | ||
431 | # define R300_RS_HIGHWATER_TEX_SHIFT 19 | ||
432 | # define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */ | ||
433 | # define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24 | ||
434 | |||
435 | #define R300_GB_SELECT 0x401C | ||
436 | # define R300_GB_FOG_SELECT_C0A 0 | ||
437 | # define R300_GB_FOG_SELECT_C1A 1 | ||
438 | # define R300_GB_FOG_SELECT_C2A 2 | ||
439 | # define R300_GB_FOG_SELECT_C3A 3 | ||
440 | # define R300_GB_FOG_SELECT_1_1_W 4 | ||
441 | # define R300_GB_FOG_SELECT_Z 5 | ||
442 | # define R300_GB_DEPTH_SELECT_Z 0 | ||
443 | # define R300_GB_DEPTH_SELECT_1_1_W (1<<3) | ||
444 | # define R300_GB_W_SELECT_1_W 0 | ||
445 | # define R300_GB_W_SELECT_1 (1<<4) | ||
446 | |||
447 | #define R300_GB_AA_CONFIG 0x4020 | ||
448 | # define R300_AA_ENABLE 0x01 | ||
449 | # define R300_AA_SUBSAMPLES_2 0 | ||
450 | # define R300_AA_SUBSAMPLES_3 (1<<1) | ||
451 | # define R300_AA_SUBSAMPLES_4 (2<<1) | ||
452 | # define R300_AA_SUBSAMPLES_6 (3<<1) | ||
453 | |||
454 | /* END */ | ||
455 | |||
456 | /* gap */ | ||
457 | /* The upper enable bits are guessed, based on fglrx reported limits. */ | ||
458 | #define R300_TX_ENABLE 0x4104 | ||
459 | # define R300_TX_ENABLE_0 (1 << 0) | ||
460 | # define R300_TX_ENABLE_1 (1 << 1) | ||
461 | # define R300_TX_ENABLE_2 (1 << 2) | ||
462 | # define R300_TX_ENABLE_3 (1 << 3) | ||
463 | # define R300_TX_ENABLE_4 (1 << 4) | ||
464 | # define R300_TX_ENABLE_5 (1 << 5) | ||
465 | # define R300_TX_ENABLE_6 (1 << 6) | ||
466 | # define R300_TX_ENABLE_7 (1 << 7) | ||
467 | # define R300_TX_ENABLE_8 (1 << 8) | ||
468 | # define R300_TX_ENABLE_9 (1 << 9) | ||
469 | # define R300_TX_ENABLE_10 (1 << 10) | ||
470 | # define R300_TX_ENABLE_11 (1 << 11) | ||
471 | # define R300_TX_ENABLE_12 (1 << 12) | ||
472 | # define R300_TX_ENABLE_13 (1 << 13) | ||
473 | # define R300_TX_ENABLE_14 (1 << 14) | ||
474 | # define R300_TX_ENABLE_15 (1 << 15) | ||
475 | |||
476 | /* The pointsize is given in multiples of 6. The pointsize can be | ||
477 | // enormous: Clear() renders a single point that fills the entire | ||
478 | // framebuffer. */ | ||
479 | #define R300_RE_POINTSIZE 0x421C | ||
480 | # define R300_POINTSIZE_Y_SHIFT 0 | ||
481 | # define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */ | ||
482 | # define R300_POINTSIZE_X_SHIFT 16 | ||
483 | # define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */ | ||
484 | # define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6) | ||
485 | |||
486 | /* The line width is given in multiples of 6. | ||
487 | In default mode lines are classified as vertical lines. | ||
488 | HO: horizontal | ||
489 | VE: vertical or horizontal | ||
490 | HO & VE: no classification | ||
491 | */ | ||
492 | #define R300_RE_LINE_CNT 0x4234 | ||
493 | # define R300_LINESIZE_SHIFT 0 | ||
494 | # define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */ | ||
495 | # define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6) | ||
496 | # define R300_LINE_CNT_HO (1 << 16) | ||
497 | # define R300_LINE_CNT_VE (1 << 17) | ||
498 | |||
499 | /* Some sort of scale or clamp value for texcoordless textures. */ | ||
500 | #define R300_RE_UNK4238 0x4238 | ||
501 | |||
502 | #define R300_RE_SHADE_MODEL 0x4278 | ||
503 | # define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa | ||
504 | # define R300_RE_SHADE_MODEL_FLAT 0x39595 | ||
505 | |||
506 | /* Dangerous */ | ||
507 | #define R300_RE_POLYGON_MODE 0x4288 | ||
508 | # define R300_PM_ENABLED (1 << 0) | ||
509 | # define R300_PM_FRONT_POINT (0 << 0) | ||
510 | # define R300_PM_BACK_POINT (0 << 0) | ||
511 | # define R300_PM_FRONT_LINE (1 << 4) | ||
512 | # define R300_PM_FRONT_FILL (1 << 5) | ||
513 | # define R300_PM_BACK_LINE (1 << 7) | ||
514 | # define R300_PM_BACK_FILL (1 << 8) | ||
515 | |||
516 | /* Not sure why there are duplicate of factor and constant values. | ||
517 | My best guess so far is that there are seperate zbiases for test and write. | ||
518 | Ordering might be wrong. | ||
519 | Some of the tests indicate that fgl has a fallback implementation of zbias | ||
520 | via pixel shaders. */ | ||
521 | #define R300_RE_ZBIAS_T_FACTOR 0x42A4 | ||
522 | #define R300_RE_ZBIAS_T_CONSTANT 0x42A8 | ||
523 | #define R300_RE_ZBIAS_W_FACTOR 0x42AC | ||
524 | #define R300_RE_ZBIAS_W_CONSTANT 0x42B0 | ||
525 | |||
526 | /* This register needs to be set to (1<<1) for RV350 to correctly | ||
527 | perform depth test (see --vb-triangles in r300_demo) | ||
528 | Don't know about other chips. - Vladimir | ||
529 | This is set to 3 when GL_POLYGON_OFFSET_FILL is on. | ||
530 | My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT). | ||
531 | One to enable depth test and one for depth write. | ||
532 | Yet this doesnt explain why depth writes work ... | ||
533 | */ | ||
534 | #define R300_RE_OCCLUSION_CNTL 0x42B4 | ||
535 | # define R300_OCCLUSION_ON (1<<1) | ||
536 | |||
537 | #define R300_RE_CULL_CNTL 0x42B8 | ||
538 | # define R300_CULL_FRONT (1 << 0) | ||
539 | # define R300_CULL_BACK (1 << 1) | ||
540 | # define R300_FRONT_FACE_CCW (0 << 2) | ||
541 | # define R300_FRONT_FACE_CW (1 << 2) | ||
542 | |||
543 | |||
544 | /* BEGIN: Rasterization / Interpolators - many guesses | ||
545 | // 0_UNKNOWN_18 has always been set except for clear operations. | ||
546 | // TC_CNT is the number of incoming texture coordinate sets (i.e. it depends | ||
547 | // on the vertex program, *not* the fragment program) */ | ||
548 | #define R300_RS_CNTL_0 0x4300 | ||
549 | # define R300_RS_CNTL_TC_CNT_SHIFT 2 | ||
550 | # define R300_RS_CNTL_TC_CNT_MASK (7 << 2) | ||
551 | # define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */ | ||
552 | # define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18) | ||
553 | /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */ | ||
554 | #define R300_RS_CNTL_1 0x4304 | ||
555 | |||
556 | /* gap */ | ||
557 | /* Only used for texture coordinates. | ||
558 | // Use the source field to route texture coordinate input from the vertex program | ||
559 | // to the desired interpolator. Note that the source field is relative to the | ||
560 | // outputs the vertex program *actually* writes. If a vertex program only writes | ||
561 | // texcoord[1], this will be source index 0. | ||
562 | // Set INTERP_USED on all interpolators that produce data used by the | ||
563 | // fragment program. INTERP_USED looks like a swizzling mask, but | ||
564 | // I haven't seen it used that way. | ||
565 | // | ||
566 | // Note: The _UNKNOWN constants are always set in their respective register. | ||
567 | // I don't know if this is necessary. */ | ||
568 | #define R300_RS_INTERP_0 0x4310 | ||
569 | #define R300_RS_INTERP_1 0x4314 | ||
570 | # define R300_RS_INTERP_1_UNKNOWN 0x40 | ||
571 | #define R300_RS_INTERP_2 0x4318 | ||
572 | # define R300_RS_INTERP_2_UNKNOWN 0x80 | ||
573 | #define R300_RS_INTERP_3 0x431C | ||
574 | # define R300_RS_INTERP_3_UNKNOWN 0xC0 | ||
575 | #define R300_RS_INTERP_4 0x4320 | ||
576 | #define R300_RS_INTERP_5 0x4324 | ||
577 | #define R300_RS_INTERP_6 0x4328 | ||
578 | #define R300_RS_INTERP_7 0x432C | ||
579 | # define R300_RS_INTERP_SRC_SHIFT 2 | ||
580 | # define R300_RS_INTERP_SRC_MASK (7 << 2) | ||
581 | # define R300_RS_INTERP_USED 0x00D10000 | ||
582 | |||
583 | /* These DWORDs control how vertex data is routed into fragment program | ||
584 | // registers, after interpolators. */ | ||
585 | #define R300_RS_ROUTE_0 0x4330 | ||
586 | #define R300_RS_ROUTE_1 0x4334 | ||
587 | #define R300_RS_ROUTE_2 0x4338 | ||
588 | #define R300_RS_ROUTE_3 0x433C /* GUESS */ | ||
589 | #define R300_RS_ROUTE_4 0x4340 /* GUESS */ | ||
590 | #define R300_RS_ROUTE_5 0x4344 /* GUESS */ | ||
591 | #define R300_RS_ROUTE_6 0x4348 /* GUESS */ | ||
592 | #define R300_RS_ROUTE_7 0x434C /* GUESS */ | ||
593 | # define R300_RS_ROUTE_SOURCE_INTERP_0 0 | ||
594 | # define R300_RS_ROUTE_SOURCE_INTERP_1 1 | ||
595 | # define R300_RS_ROUTE_SOURCE_INTERP_2 2 | ||
596 | # define R300_RS_ROUTE_SOURCE_INTERP_3 3 | ||
597 | # define R300_RS_ROUTE_SOURCE_INTERP_4 4 | ||
598 | # define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */ | ||
599 | # define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */ | ||
600 | # define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */ | ||
601 | # define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */ | ||
602 | # define R300_RS_ROUTE_DEST_SHIFT 6 | ||
603 | # define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */ | ||
604 | |||
605 | /* Special handling for color: When the fragment program uses color, | ||
606 | // the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the | ||
607 | // color register index. */ | ||
608 | # define R300_RS_ROUTE_0_COLOR (1 << 14) | ||
609 | # define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17 | ||
610 | # define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */ | ||
611 | /* As above, but for secondary color */ | ||
612 | # define R300_RS_ROUTE_1_COLOR1 (1 << 14) | ||
613 | # define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17 | ||
614 | # define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17) | ||
615 | # define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11) | ||
616 | /* END */ | ||
617 | |||
618 | /* BEGIN: Scissors and cliprects | ||
619 | // There are four clipping rectangles. Their corner coordinates are inclusive. | ||
620 | // Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending | ||
621 | // on whether the pixel is inside cliprects 0-3, respectively. For example, | ||
622 | // if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned | ||
623 | // the number 3 (binary 0011). | ||
624 | // Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set, | ||
625 | // the pixel is rasterized. | ||
626 | // | ||
627 | // In addition to this, there is a scissors rectangle. Only pixels inside the | ||
628 | // scissors rectangle are drawn. (coordinates are inclusive) | ||
629 | // | ||
630 | // For some reason, the top-left corner of the framebuffer is at (1440, 1440) | ||
631 | // for the purpose of clipping and scissors. */ | ||
632 | #define R300_RE_CLIPRECT_TL_0 0x43B0 | ||
633 | #define R300_RE_CLIPRECT_BR_0 0x43B4 | ||
634 | #define R300_RE_CLIPRECT_TL_1 0x43B8 | ||
635 | #define R300_RE_CLIPRECT_BR_1 0x43BC | ||
636 | #define R300_RE_CLIPRECT_TL_2 0x43C0 | ||
637 | #define R300_RE_CLIPRECT_BR_2 0x43C4 | ||
638 | #define R300_RE_CLIPRECT_TL_3 0x43C8 | ||
639 | #define R300_RE_CLIPRECT_BR_3 0x43CC | ||
640 | # define R300_CLIPRECT_OFFSET 1440 | ||
641 | # define R300_CLIPRECT_MASK 0x1FFF | ||
642 | # define R300_CLIPRECT_X_SHIFT 0 | ||
643 | # define R300_CLIPRECT_X_MASK (0x1FFF << 0) | ||
644 | # define R300_CLIPRECT_Y_SHIFT 13 | ||
645 | # define R300_CLIPRECT_Y_MASK (0x1FFF << 13) | ||
646 | #define R300_RE_CLIPRECT_CNTL 0x43D0 | ||
647 | # define R300_CLIP_OUT (1 << 0) | ||
648 | # define R300_CLIP_0 (1 << 1) | ||
649 | # define R300_CLIP_1 (1 << 2) | ||
650 | # define R300_CLIP_10 (1 << 3) | ||
651 | # define R300_CLIP_2 (1 << 4) | ||
652 | # define R300_CLIP_20 (1 << 5) | ||
653 | # define R300_CLIP_21 (1 << 6) | ||
654 | # define R300_CLIP_210 (1 << 7) | ||
655 | # define R300_CLIP_3 (1 << 8) | ||
656 | # define R300_CLIP_30 (1 << 9) | ||
657 | # define R300_CLIP_31 (1 << 10) | ||
658 | # define R300_CLIP_310 (1 << 11) | ||
659 | # define R300_CLIP_32 (1 << 12) | ||
660 | # define R300_CLIP_320 (1 << 13) | ||
661 | # define R300_CLIP_321 (1 << 14) | ||
662 | # define R300_CLIP_3210 (1 << 15) | ||
663 | |||
664 | /* gap */ | ||
665 | #define R300_RE_SCISSORS_TL 0x43E0 | ||
666 | #define R300_RE_SCISSORS_BR 0x43E4 | ||
667 | # define R300_SCISSORS_OFFSET 1440 | ||
668 | # define R300_SCISSORS_X_SHIFT 0 | ||
669 | # define R300_SCISSORS_X_MASK (0x1FFF << 0) | ||
670 | # define R300_SCISSORS_Y_SHIFT 13 | ||
671 | # define R300_SCISSORS_Y_MASK (0x1FFF << 13) | ||
672 | /* END */ | ||
673 | |||
674 | /* BEGIN: Texture specification | ||
675 | // The texture specification dwords are grouped by meaning and not by texture unit. | ||
676 | // This means that e.g. the offset for texture image unit N is found in register | ||
677 | // TX_OFFSET_0 + (4*N) */ | ||
678 | #define R300_TX_FILTER_0 0x4400 | ||
679 | # define R300_TX_REPEAT 0 | ||
680 | # define R300_TX_MIRRORED 1 | ||
681 | # define R300_TX_CLAMP 4 | ||
682 | # define R300_TX_CLAMP_TO_EDGE 2 | ||
683 | # define R300_TX_CLAMP_TO_BORDER 6 | ||
684 | # define R300_TX_WRAP_S_SHIFT 0 | ||
685 | # define R300_TX_WRAP_S_MASK (7 << 0) | ||
686 | # define R300_TX_WRAP_T_SHIFT 3 | ||
687 | # define R300_TX_WRAP_T_MASK (7 << 3) | ||
688 | # define R300_TX_WRAP_Q_SHIFT 6 | ||
689 | # define R300_TX_WRAP_Q_MASK (7 << 6) | ||
690 | # define R300_TX_MAG_FILTER_NEAREST (1 << 9) | ||
691 | # define R300_TX_MAG_FILTER_LINEAR (2 << 9) | ||
692 | # define R300_TX_MAG_FILTER_MASK (3 << 9) | ||
693 | # define R300_TX_MIN_FILTER_NEAREST (1 << 11) | ||
694 | # define R300_TX_MIN_FILTER_LINEAR (2 << 11) | ||
695 | # define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11) | ||
696 | # define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11) | ||
697 | # define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11) | ||
698 | # define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11) | ||
699 | |||
700 | /* NOTE: NEAREST doesnt seem to exist. | ||
701 | Im not seting MAG_FILTER_MASK and (3 << 11) on for all | ||
702 | anisotropy modes because that would void selected mag filter */ | ||
703 | # define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/) | ||
704 | # define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/) | ||
705 | # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/) | ||
706 | # define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/) | ||
707 | # define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) ) | ||
708 | # define R300_TX_MAX_ANISO_1_TO_1 (0 << 21) | ||
709 | # define R300_TX_MAX_ANISO_2_TO_1 (2 << 21) | ||
710 | # define R300_TX_MAX_ANISO_4_TO_1 (4 << 21) | ||
711 | # define R300_TX_MAX_ANISO_8_TO_1 (6 << 21) | ||
712 | # define R300_TX_MAX_ANISO_16_TO_1 (8 << 21) | ||
713 | # define R300_TX_MAX_ANISO_MASK (14 << 21) | ||
714 | |||
715 | #define R300_TX_UNK1_0 0x4440 | ||
716 | # define R300_LOD_BIAS_MASK 0x1fff | ||
717 | |||
718 | #define R300_TX_SIZE_0 0x4480 | ||
719 | # define R300_TX_WIDTHMASK_SHIFT 0 | ||
720 | # define R300_TX_WIDTHMASK_MASK (2047 << 0) | ||
721 | # define R300_TX_HEIGHTMASK_SHIFT 11 | ||
722 | # define R300_TX_HEIGHTMASK_MASK (2047 << 11) | ||
723 | # define R300_TX_UNK23 (1 << 23) | ||
724 | # define R300_TX_SIZE_SHIFT 26 /* largest of width, height */ | ||
725 | # define R300_TX_SIZE_MASK (15 << 26) | ||
726 | #define R300_TX_FORMAT_0 0x44C0 | ||
727 | /* The interpretation of the format word by Wladimir van der Laan */ | ||
728 | /* The X, Y, Z and W refer to the layout of the components. | ||
729 | They are given meanings as R, G, B and Alpha by the swizzle | ||
730 | specification */ | ||
731 | # define R300_TX_FORMAT_X8 0x0 | ||
732 | # define R300_TX_FORMAT_X16 0x1 | ||
733 | # define R300_TX_FORMAT_Y4X4 0x2 | ||
734 | # define R300_TX_FORMAT_Y8X8 0x3 | ||
735 | # define R300_TX_FORMAT_Y16X16 0x4 | ||
736 | # define R300_TX_FORMAT_Z3Y3X2 0x5 | ||
737 | # define R300_TX_FORMAT_Z5Y6X5 0x6 | ||
738 | # define R300_TX_FORMAT_Z6Y5X5 0x7 | ||
739 | # define R300_TX_FORMAT_Z11Y11X10 0x8 | ||
740 | # define R300_TX_FORMAT_Z10Y11X11 0x9 | ||
741 | # define R300_TX_FORMAT_W4Z4Y4X4 0xA | ||
742 | # define R300_TX_FORMAT_W1Z5Y5X5 0xB | ||
743 | # define R300_TX_FORMAT_W8Z8Y8X8 0xC | ||
744 | # define R300_TX_FORMAT_W2Z10Y10X10 0xD | ||
745 | # define R300_TX_FORMAT_W16Z16Y16X16 0xE | ||
746 | # define R300_TX_FORMAT_DXT1 0xF | ||
747 | # define R300_TX_FORMAT_DXT3 0x10 | ||
748 | # define R300_TX_FORMAT_DXT5 0x11 | ||
749 | # define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ | ||
750 | # define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ | ||
751 | # define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ | ||
752 | # define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ | ||
753 | /* 0x16 - some 16 bit green format.. ?? */ | ||
754 | # define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ | ||
755 | |||
756 | /* gap */ | ||
757 | /* Floating point formats */ | ||
758 | /* Note - hardware supports both 16 and 32 bit floating point */ | ||
759 | # define R300_TX_FORMAT_FL_I16 0x18 | ||
760 | # define R300_TX_FORMAT_FL_I16A16 0x19 | ||
761 | # define R300_TX_FORMAT_FL_R16G16B16A16 0x1A | ||
762 | # define R300_TX_FORMAT_FL_I32 0x1B | ||
763 | # define R300_TX_FORMAT_FL_I32A32 0x1C | ||
764 | # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D | ||
765 | /* alpha modes, convenience mostly */ | ||
766 | /* if you have alpha, pick constant appropriate to the | ||
767 | number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ | ||
768 | # define R300_TX_FORMAT_ALPHA_1CH 0x000 | ||
769 | # define R300_TX_FORMAT_ALPHA_2CH 0x200 | ||
770 | # define R300_TX_FORMAT_ALPHA_4CH 0x600 | ||
771 | # define R300_TX_FORMAT_ALPHA_NONE 0xA00 | ||
772 | /* Swizzling */ | ||
773 | /* constants */ | ||
774 | # define R300_TX_FORMAT_X 0 | ||
775 | # define R300_TX_FORMAT_Y 1 | ||
776 | # define R300_TX_FORMAT_Z 2 | ||
777 | # define R300_TX_FORMAT_W 3 | ||
778 | # define R300_TX_FORMAT_ZERO 4 | ||
779 | # define R300_TX_FORMAT_ONE 5 | ||
780 | # define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */ | ||
781 | # define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */ | ||
782 | |||
783 | # define R300_TX_FORMAT_B_SHIFT 18 | ||
784 | # define R300_TX_FORMAT_G_SHIFT 15 | ||
785 | # define R300_TX_FORMAT_R_SHIFT 12 | ||
786 | # define R300_TX_FORMAT_A_SHIFT 9 | ||
787 | /* Convenience macro to take care of layout and swizzling */ | ||
788 | # define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\ | ||
789 | ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \ | ||
790 | | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \ | ||
791 | | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \ | ||
792 | | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \ | ||
793 | | (R300_TX_FORMAT_##FMT) \ | ||
794 | ) | ||
795 | /* These can be ORed with result of R300_EASY_TX_FORMAT() */ | ||
796 | /* We don't really know what they do. Take values from a constant color ? */ | ||
797 | # define R300_TX_FORMAT_CONST_X (1<<5) | ||
798 | # define R300_TX_FORMAT_CONST_Y (2<<5) | ||
799 | # define R300_TX_FORMAT_CONST_Z (4<<5) | ||
800 | # define R300_TX_FORMAT_CONST_W (8<<5) | ||
801 | |||
802 | # define R300_TX_FORMAT_YUV_MODE 0x00800000 | ||
803 | |||
804 | #define R300_TX_OFFSET_0 0x4540 | ||
805 | /* BEGIN: Guess from R200 */ | ||
806 | # define R300_TXO_ENDIAN_NO_SWAP (0 << 0) | ||
807 | # define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0) | ||
808 | # define R300_TXO_ENDIAN_WORD_SWAP (2 << 0) | ||
809 | # define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) | ||
810 | # define R300_TXO_OFFSET_MASK 0xffffffe0 | ||
811 | # define R300_TXO_OFFSET_SHIFT 5 | ||
812 | /* END */ | ||
813 | #define R300_TX_UNK4_0 0x4580 | ||
814 | #define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 } | ||
815 | |||
816 | /* END */ | ||
817 | |||
818 | /* BEGIN: Fragment program instruction set | ||
819 | // Fragment programs are written directly into register space. | ||
820 | // There are separate instruction streams for texture instructions and ALU | ||
821 | // instructions. | ||
822 | // In order to synchronize these streams, the program is divided into up | ||
823 | // to 4 nodes. Each node begins with a number of TEX operations, followed | ||
824 | // by a number of ALU operations. | ||
825 | // The first node can have zero TEX ops, all subsequent nodes must have at least | ||
826 | // one TEX ops. | ||
827 | // All nodes must have at least one ALU op. | ||
828 | // | ||
829 | // The index of the last node is stored in PFS_CNTL_0: A value of 0 means | ||
830 | // 1 node, a value of 3 means 4 nodes. | ||
831 | // The total amount of instructions is defined in PFS_CNTL_2. The offsets are | ||
832 | // offsets into the respective instruction streams, while *_END points to the | ||
833 | // last instruction relative to this offset. */ | ||
834 | #define R300_PFS_CNTL_0 0x4600 | ||
835 | # define R300_PFS_CNTL_LAST_NODES_SHIFT 0 | ||
836 | # define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0) | ||
837 | # define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3) | ||
838 | #define R300_PFS_CNTL_1 0x4604 | ||
839 | /* There is an unshifted value here which has so far always been equal to the | ||
840 | // index of the highest used temporary register. */ | ||
841 | #define R300_PFS_CNTL_2 0x4608 | ||
842 | # define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0 | ||
843 | # define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0) | ||
844 | # define R300_PFS_CNTL_ALU_END_SHIFT 6 | ||
845 | # define R300_PFS_CNTL_ALU_END_MASK (63 << 0) | ||
846 | # define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12 | ||
847 | # define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */ | ||
848 | # define R300_PFS_CNTL_TEX_END_SHIFT 18 | ||
849 | # define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */ | ||
850 | |||
851 | /* gap */ | ||
852 | /* Nodes are stored backwards. The last active node is always stored in | ||
853 | // PFS_NODE_3. | ||
854 | // Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The | ||
855 | // first node is stored in NODE_2, the second node is stored in NODE_3. | ||
856 | // | ||
857 | // Offsets are relative to the master offset from PFS_CNTL_2. | ||
858 | // LAST_NODE is set for the last node, and only for the last node. */ | ||
859 | #define R300_PFS_NODE_0 0x4610 | ||
860 | #define R300_PFS_NODE_1 0x4614 | ||
861 | #define R300_PFS_NODE_2 0x4618 | ||
862 | #define R300_PFS_NODE_3 0x461C | ||
863 | # define R300_PFS_NODE_ALU_OFFSET_SHIFT 0 | ||
864 | # define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0) | ||
865 | # define R300_PFS_NODE_ALU_END_SHIFT 6 | ||
866 | # define R300_PFS_NODE_ALU_END_MASK (63 << 6) | ||
867 | # define R300_PFS_NODE_TEX_OFFSET_SHIFT 12 | ||
868 | # define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12) | ||
869 | # define R300_PFS_NODE_TEX_END_SHIFT 17 | ||
870 | # define R300_PFS_NODE_TEX_END_MASK (31 << 17) | ||
871 | # define R300_PFS_NODE_LAST_NODE (1 << 22) | ||
872 | |||
873 | /* TEX | ||
874 | // As far as I can tell, texture instructions cannot write into output | ||
875 | // registers directly. A subsequent ALU instruction is always necessary, | ||
876 | // even if it's just MAD o0, r0, 1, 0 */ | ||
877 | #define R300_PFS_TEXI_0 0x4620 | ||
878 | # define R300_FPITX_SRC_SHIFT 0 | ||
879 | # define R300_FPITX_SRC_MASK (31 << 0) | ||
880 | # define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */ | ||
881 | # define R300_FPITX_DST_SHIFT 6 | ||
882 | # define R300_FPITX_DST_MASK (31 << 6) | ||
883 | # define R300_FPITX_IMAGE_SHIFT 11 | ||
884 | # define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */ | ||
885 | /* Unsure if these are opcodes, or some kind of bitfield, but this is how | ||
886 | * they were set when I checked | ||
887 | */ | ||
888 | # define R300_FPITX_OPCODE_SHIFT 15 | ||
889 | # define R300_FPITX_OP_TEX 1 | ||
890 | # define R300_FPITX_OP_TXP 3 | ||
891 | # define R300_FPITX_OP_TXB 4 | ||
892 | |||
893 | /* ALU | ||
894 | // The ALU instructions register blocks are enumerated according to the order | ||
895 | // in which fglrx. I assume there is space for 64 instructions, since | ||
896 | // each block has space for a maximum of 64 DWORDs, and this matches reported | ||
897 | // native limits. | ||
898 | // | ||
899 | // The basic functional block seems to be one MAD for each color and alpha, | ||
900 | // and an adder that adds all components after the MUL. | ||
901 | // - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands | ||
902 | // - DP4: Use OUTC_DP4, OUTA_DP4 | ||
903 | // - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands | ||
904 | // - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands | ||
905 | // - CMP: If ARG2 < 0, return ARG1, else return ARG0 | ||
906 | // - FLR: use FRC+MAD | ||
907 | // - XPD: use MAD+MAD | ||
908 | // - SGE, SLT: use MAD+CMP | ||
909 | // - RSQ: use ABS modifier for argument | ||
910 | // - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP) | ||
911 | // into color register | ||
912 | // - apparently, there's no quick DST operation | ||
913 | // - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2" | ||
914 | // - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0" | ||
915 | // - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1" | ||
916 | // | ||
917 | // Operand selection | ||
918 | // First stage selects three sources from the available registers and | ||
919 | // constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha). | ||
920 | // fglrx sorts the three source fields: Registers before constants, | ||
921 | // lower indices before higher indices; I do not know whether this is necessary. | ||
922 | // fglrx fills unused sources with "read constant 0" | ||
923 | // According to specs, you cannot select more than two different constants. | ||
924 | // | ||
925 | // Second stage selects the operands from the sources. This is defined in | ||
926 | // INSTR0 (color) and INSTR2 (alpha). You can also select the special constants | ||
927 | // zero and one. | ||
928 | // Swizzling and negation happens in this stage, as well. | ||
929 | // | ||
930 | // Important: Color and alpha seem to be mostly separate, i.e. their sources | ||
931 | // selection appears to be fully independent (the register storage is probably | ||
932 | // physically split into a color and an alpha section). | ||
933 | // However (because of the apparent physical split), there is some interaction | ||
934 | // WRT swizzling. If, for example, you want to load an R component into an | ||
935 | // Alpha operand, this R component is taken from a *color* source, not from | ||
936 | // an alpha source. The corresponding register doesn't even have to appear in | ||
937 | // the alpha sources list. (I hope this alll makes sense to you) | ||
938 | // | ||
939 | // Destination selection | ||
940 | // The destination register index is in FPI1 (color) and FPI3 (alpha) together | ||
941 | // with enable bits. | ||
942 | // There are separate enable bits for writing into temporary registers | ||
943 | // (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT). | ||
944 | // You can write to both at once, or not write at all (the same index | ||
945 | // must be used for both). | ||
946 | // | ||
947 | // Note: There is a special form for LRP | ||
948 | // - Argument order is the same as in ARB_fragment_program. | ||
949 | // - Operation is MAD | ||
950 | // - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP | ||
951 | // - Set FPI0/FPI2_SPECIAL_LRP | ||
952 | // Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */ | ||
953 | #define R300_PFS_INSTR1_0 0x46C0 | ||
954 | # define R300_FPI1_SRC0C_SHIFT 0 | ||
955 | # define R300_FPI1_SRC0C_MASK (31 << 0) | ||
956 | # define R300_FPI1_SRC0C_CONST (1 << 5) | ||
957 | # define R300_FPI1_SRC1C_SHIFT 6 | ||
958 | # define R300_FPI1_SRC1C_MASK (31 << 6) | ||
959 | # define R300_FPI1_SRC1C_CONST (1 << 11) | ||
960 | # define R300_FPI1_SRC2C_SHIFT 12 | ||
961 | # define R300_FPI1_SRC2C_MASK (31 << 12) | ||
962 | # define R300_FPI1_SRC2C_CONST (1 << 17) | ||
963 | # define R300_FPI1_DSTC_SHIFT 18 | ||
964 | # define R300_FPI1_DSTC_MASK (31 << 18) | ||
965 | # define R300_FPI1_DSTC_REG_X (1 << 23) | ||
966 | # define R300_FPI1_DSTC_REG_Y (1 << 24) | ||
967 | # define R300_FPI1_DSTC_REG_Z (1 << 25) | ||
968 | # define R300_FPI1_DSTC_OUTPUT_X (1 << 26) | ||
969 | # define R300_FPI1_DSTC_OUTPUT_Y (1 << 27) | ||
970 | # define R300_FPI1_DSTC_OUTPUT_Z (1 << 28) | ||
971 | |||
972 | #define R300_PFS_INSTR3_0 0x47C0 | ||
973 | # define R300_FPI3_SRC0A_SHIFT 0 | ||
974 | # define R300_FPI3_SRC0A_MASK (31 << 0) | ||
975 | # define R300_FPI3_SRC0A_CONST (1 << 5) | ||
976 | # define R300_FPI3_SRC1A_SHIFT 6 | ||
977 | # define R300_FPI3_SRC1A_MASK (31 << 6) | ||
978 | # define R300_FPI3_SRC1A_CONST (1 << 11) | ||
979 | # define R300_FPI3_SRC2A_SHIFT 12 | ||
980 | # define R300_FPI3_SRC2A_MASK (31 << 12) | ||
981 | # define R300_FPI3_SRC2A_CONST (1 << 17) | ||
982 | # define R300_FPI3_DSTA_SHIFT 18 | ||
983 | # define R300_FPI3_DSTA_MASK (31 << 18) | ||
984 | # define R300_FPI3_DSTA_REG (1 << 23) | ||
985 | # define R300_FPI3_DSTA_OUTPUT (1 << 24) | ||
986 | |||
987 | #define R300_PFS_INSTR0_0 0x48C0 | ||
988 | # define R300_FPI0_ARGC_SRC0C_XYZ 0 | ||
989 | # define R300_FPI0_ARGC_SRC0C_XXX 1 | ||
990 | # define R300_FPI0_ARGC_SRC0C_YYY 2 | ||
991 | # define R300_FPI0_ARGC_SRC0C_ZZZ 3 | ||
992 | # define R300_FPI0_ARGC_SRC1C_XYZ 4 | ||
993 | # define R300_FPI0_ARGC_SRC1C_XXX 5 | ||
994 | # define R300_FPI0_ARGC_SRC1C_YYY 6 | ||
995 | # define R300_FPI0_ARGC_SRC1C_ZZZ 7 | ||
996 | # define R300_FPI0_ARGC_SRC2C_XYZ 8 | ||
997 | # define R300_FPI0_ARGC_SRC2C_XXX 9 | ||
998 | # define R300_FPI0_ARGC_SRC2C_YYY 10 | ||
999 | # define R300_FPI0_ARGC_SRC2C_ZZZ 11 | ||
1000 | # define R300_FPI0_ARGC_SRC0A 12 | ||
1001 | # define R300_FPI0_ARGC_SRC1A 13 | ||
1002 | # define R300_FPI0_ARGC_SRC2A 14 | ||
1003 | # define R300_FPI0_ARGC_SRC1C_LRP 15 | ||
1004 | # define R300_FPI0_ARGC_ZERO 20 | ||
1005 | # define R300_FPI0_ARGC_ONE 21 | ||
1006 | # define R300_FPI0_ARGC_HALF 22 /* GUESS */ | ||
1007 | # define R300_FPI0_ARGC_SRC0C_YZX 23 | ||
1008 | # define R300_FPI0_ARGC_SRC1C_YZX 24 | ||
1009 | # define R300_FPI0_ARGC_SRC2C_YZX 25 | ||
1010 | # define R300_FPI0_ARGC_SRC0C_ZXY 26 | ||
1011 | # define R300_FPI0_ARGC_SRC1C_ZXY 27 | ||
1012 | # define R300_FPI0_ARGC_SRC2C_ZXY 28 | ||
1013 | # define R300_FPI0_ARGC_SRC0CA_WZY 29 | ||
1014 | # define R300_FPI0_ARGC_SRC1CA_WZY 30 | ||
1015 | # define R300_FPI0_ARGC_SRC2CA_WZY 31 | ||
1016 | |||
1017 | # define R300_FPI0_ARG0C_SHIFT 0 | ||
1018 | # define R300_FPI0_ARG0C_MASK (31 << 0) | ||
1019 | # define R300_FPI0_ARG0C_NEG (1 << 5) | ||
1020 | # define R300_FPI0_ARG0C_ABS (1 << 6) | ||
1021 | # define R300_FPI0_ARG1C_SHIFT 7 | ||
1022 | # define R300_FPI0_ARG1C_MASK (31 << 7) | ||
1023 | # define R300_FPI0_ARG1C_NEG (1 << 12) | ||
1024 | # define R300_FPI0_ARG1C_ABS (1 << 13) | ||
1025 | # define R300_FPI0_ARG2C_SHIFT 14 | ||
1026 | # define R300_FPI0_ARG2C_MASK (31 << 14) | ||
1027 | # define R300_FPI0_ARG2C_NEG (1 << 19) | ||
1028 | # define R300_FPI0_ARG2C_ABS (1 << 20) | ||
1029 | # define R300_FPI0_SPECIAL_LRP (1 << 21) | ||
1030 | # define R300_FPI0_OUTC_MAD (0 << 23) | ||
1031 | # define R300_FPI0_OUTC_DP3 (1 << 23) | ||
1032 | # define R300_FPI0_OUTC_DP4 (2 << 23) | ||
1033 | # define R300_FPI0_OUTC_MIN (4 << 23) | ||
1034 | # define R300_FPI0_OUTC_MAX (5 << 23) | ||
1035 | # define R300_FPI0_OUTC_CMP (8 << 23) | ||
1036 | # define R300_FPI0_OUTC_FRC (9 << 23) | ||
1037 | # define R300_FPI0_OUTC_REPL_ALPHA (10 << 23) | ||
1038 | # define R300_FPI0_OUTC_SAT (1 << 30) | ||
1039 | # define R300_FPI0_UNKNOWN_31 (1 << 31) | ||
1040 | |||
1041 | #define R300_PFS_INSTR2_0 0x49C0 | ||
1042 | # define R300_FPI2_ARGA_SRC0C_X 0 | ||
1043 | # define R300_FPI2_ARGA_SRC0C_Y 1 | ||
1044 | # define R300_FPI2_ARGA_SRC0C_Z 2 | ||
1045 | # define R300_FPI2_ARGA_SRC1C_X 3 | ||
1046 | # define R300_FPI2_ARGA_SRC1C_Y 4 | ||
1047 | # define R300_FPI2_ARGA_SRC1C_Z 5 | ||
1048 | # define R300_FPI2_ARGA_SRC2C_X 6 | ||
1049 | # define R300_FPI2_ARGA_SRC2C_Y 7 | ||
1050 | # define R300_FPI2_ARGA_SRC2C_Z 8 | ||
1051 | # define R300_FPI2_ARGA_SRC0A 9 | ||
1052 | # define R300_FPI2_ARGA_SRC1A 10 | ||
1053 | # define R300_FPI2_ARGA_SRC2A 11 | ||
1054 | # define R300_FPI2_ARGA_SRC1A_LRP 15 | ||
1055 | # define R300_FPI2_ARGA_ZERO 16 | ||
1056 | # define R300_FPI2_ARGA_ONE 17 | ||
1057 | # define R300_FPI2_ARGA_HALF 18 /* GUESS */ | ||
1058 | |||
1059 | # define R300_FPI2_ARG0A_SHIFT 0 | ||
1060 | # define R300_FPI2_ARG0A_MASK (31 << 0) | ||
1061 | # define R300_FPI2_ARG0A_NEG (1 << 5) | ||
1062 | # define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */ | ||
1063 | # define R300_FPI2_ARG1A_SHIFT 7 | ||
1064 | # define R300_FPI2_ARG1A_MASK (31 << 7) | ||
1065 | # define R300_FPI2_ARG1A_NEG (1 << 12) | ||
1066 | # define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */ | ||
1067 | # define R300_FPI2_ARG2A_SHIFT 14 | ||
1068 | # define R300_FPI2_ARG2A_MASK (31 << 14) | ||
1069 | # define R300_FPI2_ARG2A_NEG (1 << 19) | ||
1070 | # define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */ | ||
1071 | # define R300_FPI2_SPECIAL_LRP (1 << 21) | ||
1072 | # define R300_FPI2_OUTA_MAD (0 << 23) | ||
1073 | # define R300_FPI2_OUTA_DP4 (1 << 23) | ||
1074 | # define R300_FPI2_OUTA_MIN (2 << 23) | ||
1075 | # define R300_FPI2_OUTA_MAX (3 << 23) | ||
1076 | # define R300_FPI2_OUTA_CMP (6 << 23) | ||
1077 | # define R300_FPI2_OUTA_FRC (7 << 23) | ||
1078 | # define R300_FPI2_OUTA_EX2 (8 << 23) | ||
1079 | # define R300_FPI2_OUTA_LG2 (9 << 23) | ||
1080 | # define R300_FPI2_OUTA_RCP (10 << 23) | ||
1081 | # define R300_FPI2_OUTA_RSQ (11 << 23) | ||
1082 | # define R300_FPI2_OUTA_SAT (1 << 30) | ||
1083 | # define R300_FPI2_UNKNOWN_31 (1 << 31) | ||
1084 | /* END */ | ||
1085 | |||
1086 | /* gap */ | ||
1087 | #define R300_PP_ALPHA_TEST 0x4BD4 | ||
1088 | # define R300_REF_ALPHA_MASK 0x000000ff | ||
1089 | # define R300_ALPHA_TEST_FAIL (0 << 8) | ||
1090 | # define R300_ALPHA_TEST_LESS (1 << 8) | ||
1091 | # define R300_ALPHA_TEST_LEQUAL (3 << 8) | ||
1092 | # define R300_ALPHA_TEST_EQUAL (2 << 8) | ||
1093 | # define R300_ALPHA_TEST_GEQUAL (6 << 8) | ||
1094 | # define R300_ALPHA_TEST_GREATER (4 << 8) | ||
1095 | # define R300_ALPHA_TEST_NEQUAL (5 << 8) | ||
1096 | # define R300_ALPHA_TEST_PASS (7 << 8) | ||
1097 | # define R300_ALPHA_TEST_OP_MASK (7 << 8) | ||
1098 | # define R300_ALPHA_TEST_ENABLE (1 << 11) | ||
1099 | |||
1100 | /* gap */ | ||
1101 | /* Fragment program parameters in 7.16 floating point */ | ||
1102 | #define R300_PFS_PARAM_0_X 0x4C00 | ||
1103 | #define R300_PFS_PARAM_0_Y 0x4C04 | ||
1104 | #define R300_PFS_PARAM_0_Z 0x4C08 | ||
1105 | #define R300_PFS_PARAM_0_W 0x4C0C | ||
1106 | /* GUESS: PARAM_31 is last, based on native limits reported by fglrx */ | ||
1107 | #define R300_PFS_PARAM_31_X 0x4DF0 | ||
1108 | #define R300_PFS_PARAM_31_Y 0x4DF4 | ||
1109 | #define R300_PFS_PARAM_31_Z 0x4DF8 | ||
1110 | #define R300_PFS_PARAM_31_W 0x4DFC | ||
1111 | |||
1112 | /* Notes: | ||
1113 | // - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application | ||
1114 | // - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same | ||
1115 | // function (both registers are always set up completely in any case) | ||
1116 | // - Most blend flags are simply copied from R200 and not tested yet */ | ||
1117 | #define R300_RB3D_CBLEND 0x4E04 | ||
1118 | #define R300_RB3D_ABLEND 0x4E08 | ||
1119 | /* the following only appear in CBLEND */ | ||
1120 | # define R300_BLEND_ENABLE (1 << 0) | ||
1121 | # define R300_BLEND_UNKNOWN (3 << 1) | ||
1122 | # define R300_BLEND_NO_SEPARATE (1 << 3) | ||
1123 | /* the following are shared between CBLEND and ABLEND */ | ||
1124 | # define R300_FCN_MASK (3 << 12) | ||
1125 | # define R300_COMB_FCN_ADD_CLAMP (0 << 12) | ||
1126 | # define R300_COMB_FCN_ADD_NOCLAMP (1 << 12) | ||
1127 | # define R300_COMB_FCN_SUB_CLAMP (2 << 12) | ||
1128 | # define R300_COMB_FCN_SUB_NOCLAMP (3 << 12) | ||
1129 | # define R300_SRC_BLEND_GL_ZERO (32 << 16) | ||
1130 | # define R300_SRC_BLEND_GL_ONE (33 << 16) | ||
1131 | # define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16) | ||
1132 | # define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16) | ||
1133 | # define R300_SRC_BLEND_GL_DST_COLOR (36 << 16) | ||
1134 | # define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16) | ||
1135 | # define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16) | ||
1136 | # define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16) | ||
1137 | # define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16) | ||
1138 | # define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16) | ||
1139 | # define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16) | ||
1140 | # define R300_SRC_BLEND_MASK (63 << 16) | ||
1141 | # define R300_DST_BLEND_GL_ZERO (32 << 24) | ||
1142 | # define R300_DST_BLEND_GL_ONE (33 << 24) | ||
1143 | # define R300_DST_BLEND_GL_SRC_COLOR (34 << 24) | ||
1144 | # define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24) | ||
1145 | # define R300_DST_BLEND_GL_DST_COLOR (36 << 24) | ||
1146 | # define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24) | ||
1147 | # define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24) | ||
1148 | # define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24) | ||
1149 | # define R300_DST_BLEND_GL_DST_ALPHA (40 << 24) | ||
1150 | # define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24) | ||
1151 | # define R300_DST_BLEND_MASK (63 << 24) | ||
1152 | #define R300_RB3D_COLORMASK 0x4E0C | ||
1153 | # define R300_COLORMASK0_B (1<<0) | ||
1154 | # define R300_COLORMASK0_G (1<<1) | ||
1155 | # define R300_COLORMASK0_R (1<<2) | ||
1156 | # define R300_COLORMASK0_A (1<<3) | ||
1157 | |||
1158 | /* gap */ | ||
1159 | #define R300_RB3D_COLOROFFSET0 0x4E28 | ||
1160 | # define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */ | ||
1161 | #define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */ | ||
1162 | #define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */ | ||
1163 | #define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */ | ||
1164 | /* gap */ | ||
1165 | /* Bit 16: Larger tiles | ||
1166 | // Bit 17: 4x2 tiles | ||
1167 | // Bit 18: Extremely weird tile like, but some pixels duplicated? */ | ||
1168 | #define R300_RB3D_COLORPITCH0 0x4E38 | ||
1169 | # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ | ||
1170 | # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ | ||
1171 | # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ | ||
1172 | # define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ | ||
1173 | # define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ | ||
1174 | # define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ | ||
1175 | # define R300_COLOR_FORMAT_RGB565 (2 << 22) | ||
1176 | # define R300_COLOR_FORMAT_ARGB8888 (3 << 22) | ||
1177 | #define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */ | ||
1178 | #define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ | ||
1179 | #define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ | ||
1180 | |||
1181 | /* gap */ | ||
1182 | /* Guess by Vladimir. | ||
1183 | // Set to 0A before 3D operations, set to 02 afterwards. */ | ||
1184 | #define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C | ||
1185 | # define R300_RB3D_DSTCACHE_02 0x00000002 | ||
1186 | # define R300_RB3D_DSTCACHE_0A 0x0000000A | ||
1187 | |||
1188 | /* gap */ | ||
1189 | /* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */ | ||
1190 | /* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */ | ||
1191 | #define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00 | ||
1192 | # define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */ | ||
1193 | # define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */ | ||
1194 | # define R300_RB3D_Z_TEST 0x00000012 | ||
1195 | # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 | ||
1196 | # define R300_RB3D_Z_WRITE_ONLY 0x00000006 | ||
1197 | |||
1198 | # define R300_RB3D_Z_TEST 0x00000012 | ||
1199 | # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 | ||
1200 | # define R300_RB3D_Z_WRITE_ONLY 0x00000006 | ||
1201 | # define R300_RB3D_STENCIL_ENABLE 0x00000001 | ||
1202 | |||
1203 | #define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04 | ||
1204 | /* functions */ | ||
1205 | # define R300_ZS_NEVER 0 | ||
1206 | # define R300_ZS_LESS 1 | ||
1207 | # define R300_ZS_LEQUAL 2 | ||
1208 | # define R300_ZS_EQUAL 3 | ||
1209 | # define R300_ZS_GEQUAL 4 | ||
1210 | # define R300_ZS_GREATER 5 | ||
1211 | # define R300_ZS_NOTEQUAL 6 | ||
1212 | # define R300_ZS_ALWAYS 7 | ||
1213 | # define R300_ZS_MASK 7 | ||
1214 | /* operations */ | ||
1215 | # define R300_ZS_KEEP 0 | ||
1216 | # define R300_ZS_ZERO 1 | ||
1217 | # define R300_ZS_REPLACE 2 | ||
1218 | # define R300_ZS_INCR 3 | ||
1219 | # define R300_ZS_DECR 4 | ||
1220 | # define R300_ZS_INVERT 5 | ||
1221 | # define R300_ZS_INCR_WRAP 6 | ||
1222 | # define R300_ZS_DECR_WRAP 7 | ||
1223 | |||
1224 | /* front and back refer to operations done for front | ||
1225 | and back faces, i.e. separate stencil function support */ | ||
1226 | # define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0 | ||
1227 | # define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3 | ||
1228 | # define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6 | ||
1229 | # define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT 9 | ||
1230 | # define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT 12 | ||
1231 | # define R300_RB3D_ZS1_BACK_FUNC_SHIFT 15 | ||
1232 | # define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT 18 | ||
1233 | # define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT 21 | ||
1234 | # define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT 24 | ||
1235 | |||
1236 | |||
1237 | |||
1238 | #define R300_RB3D_ZSTENCIL_CNTL_2 0x4F08 | ||
1239 | # define R300_RB3D_ZS2_STENCIL_REF_SHIFT 0 | ||
1240 | # define R300_RB3D_ZS2_STENCIL_MASK 0xFF | ||
1241 | # define R300_RB3D_ZS2_STENCIL_MASK_SHIFT 8 | ||
1242 | # define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT 16 | ||
1243 | |||
1244 | /* gap */ | ||
1245 | |||
1246 | #define R300_RB3D_ZSTENCIL_FORMAT 0x4F10 | ||
1247 | # define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0) | ||
1248 | # define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0) | ||
1249 | |||
1250 | /* gap */ | ||
1251 | #define R300_RB3D_DEPTHOFFSET 0x4F20 | ||
1252 | #define R300_RB3D_DEPTHPITCH 0x4F24 | ||
1253 | # define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */ | ||
1254 | # define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */ | ||
1255 | # define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */ | ||
1256 | # define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ | ||
1257 | # define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ | ||
1258 | # define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ | ||
1259 | |||
1260 | /* BEGIN: Vertex program instruction set | ||
1261 | // Every instruction is four dwords long: | ||
1262 | // DWORD 0: output and opcode | ||
1263 | // DWORD 1: first argument | ||
1264 | // DWORD 2: second argument | ||
1265 | // DWORD 3: third argument | ||
1266 | // | ||
1267 | // Notes: | ||
1268 | // - ABS r, a is implemented as MAX r, a, -a | ||
1269 | // - MOV is implemented as ADD to zero | ||
1270 | // - XPD is implemented as MUL + MAD | ||
1271 | // - FLR is implemented as FRC + ADD | ||
1272 | // - apparently, fglrx tries to schedule instructions so that there is at least | ||
1273 | // one instruction between the write to a temporary and the first read | ||
1274 | // from said temporary; however, violations of this scheduling are allowed | ||
1275 | // - register indices seem to be unrelated with OpenGL aliasing to conventional state | ||
1276 | // - only one attribute and one parameter can be loaded at a time; however, the | ||
1277 | // same attribute/parameter can be used for more than one argument | ||
1278 | // - the second software argument for POW is the third hardware argument (no idea why) | ||
1279 | // - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2 | ||
1280 | // | ||
1281 | // There is some magic surrounding LIT: | ||
1282 | // The single argument is replicated across all three inputs, but swizzled: | ||
1283 | // First argument: xyzy | ||
1284 | // Second argument: xyzx | ||
1285 | // Third argument: xyzw | ||
1286 | // Whenever the result is used later in the fragment program, fglrx forces x and w | ||
1287 | // to be 1.0 in the input selection; I don't know whether this is strictly necessary */ | ||
1288 | #define R300_VPI_OUT_OP_DOT (1 << 0) | ||
1289 | #define R300_VPI_OUT_OP_MUL (2 << 0) | ||
1290 | #define R300_VPI_OUT_OP_ADD (3 << 0) | ||
1291 | #define R300_VPI_OUT_OP_MAD (4 << 0) | ||
1292 | #define R300_VPI_OUT_OP_DST (5 << 0) | ||
1293 | #define R300_VPI_OUT_OP_FRC (6 << 0) | ||
1294 | #define R300_VPI_OUT_OP_MAX (7 << 0) | ||
1295 | #define R300_VPI_OUT_OP_MIN (8 << 0) | ||
1296 | #define R300_VPI_OUT_OP_SGE (9 << 0) | ||
1297 | #define R300_VPI_OUT_OP_SLT (10 << 0) | ||
1298 | #define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */ | ||
1299 | #define R300_VPI_OUT_OP_EXP (65 << 0) | ||
1300 | #define R300_VPI_OUT_OP_LOG (66 << 0) | ||
1301 | #define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */ | ||
1302 | #define R300_VPI_OUT_OP_LIT (68 << 0) | ||
1303 | #define R300_VPI_OUT_OP_POW (69 << 0) | ||
1304 | #define R300_VPI_OUT_OP_RCP (70 << 0) | ||
1305 | #define R300_VPI_OUT_OP_RSQ (72 << 0) | ||
1306 | #define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */ | ||
1307 | #define R300_VPI_OUT_OP_EX2 (75 << 0) | ||
1308 | #define R300_VPI_OUT_OP_LG2 (76 << 0) | ||
1309 | #define R300_VPI_OUT_OP_MAD_2 (128 << 0) | ||
1310 | #define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */ | ||
1311 | |||
1312 | #define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8) | ||
1313 | #define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8) | ||
1314 | #define R300_VPI_OUT_REG_CLASS_MASK (31 << 8) | ||
1315 | |||
1316 | #define R300_VPI_OUT_REG_INDEX_SHIFT 13 | ||
1317 | #define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */ | ||
1318 | |||
1319 | #define R300_VPI_OUT_WRITE_X (1 << 20) | ||
1320 | #define R300_VPI_OUT_WRITE_Y (1 << 21) | ||
1321 | #define R300_VPI_OUT_WRITE_Z (1 << 22) | ||
1322 | #define R300_VPI_OUT_WRITE_W (1 << 23) | ||
1323 | |||
1324 | #define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0) | ||
1325 | #define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0) | ||
1326 | #define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0) | ||
1327 | #define R300_VPI_IN_REG_CLASS_NONE (9 << 0) | ||
1328 | #define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */ | ||
1329 | |||
1330 | #define R300_VPI_IN_REG_INDEX_SHIFT 5 | ||
1331 | #define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */ | ||
1332 | |||
1333 | /* The R300 can select components from the input register arbitrarily. | ||
1334 | // Use the following constants, shifted by the component shift you | ||
1335 | // want to select */ | ||
1336 | #define R300_VPI_IN_SELECT_X 0 | ||
1337 | #define R300_VPI_IN_SELECT_Y 1 | ||
1338 | #define R300_VPI_IN_SELECT_Z 2 | ||
1339 | #define R300_VPI_IN_SELECT_W 3 | ||
1340 | #define R300_VPI_IN_SELECT_ZERO 4 | ||
1341 | #define R300_VPI_IN_SELECT_ONE 5 | ||
1342 | #define R300_VPI_IN_SELECT_MASK 7 | ||
1343 | |||
1344 | #define R300_VPI_IN_X_SHIFT 13 | ||
1345 | #define R300_VPI_IN_Y_SHIFT 16 | ||
1346 | #define R300_VPI_IN_Z_SHIFT 19 | ||
1347 | #define R300_VPI_IN_W_SHIFT 22 | ||
1348 | |||
1349 | #define R300_VPI_IN_NEG_X (1 << 25) | ||
1350 | #define R300_VPI_IN_NEG_Y (1 << 26) | ||
1351 | #define R300_VPI_IN_NEG_Z (1 << 27) | ||
1352 | #define R300_VPI_IN_NEG_W (1 << 28) | ||
1353 | /* END */ | ||
1354 | |||
1355 | //BEGIN: Packet 3 commands | ||
1356 | |||
1357 | // A primitive emission dword. | ||
1358 | #define R300_PRIM_TYPE_NONE (0 << 0) | ||
1359 | #define R300_PRIM_TYPE_POINT (1 << 0) | ||
1360 | #define R300_PRIM_TYPE_LINE (2 << 0) | ||
1361 | #define R300_PRIM_TYPE_LINE_STRIP (3 << 0) | ||
1362 | #define R300_PRIM_TYPE_TRI_LIST (4 << 0) | ||
1363 | #define R300_PRIM_TYPE_TRI_FAN (5 << 0) | ||
1364 | #define R300_PRIM_TYPE_TRI_STRIP (6 << 0) | ||
1365 | #define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0) | ||
1366 | #define R300_PRIM_TYPE_RECT_LIST (8 << 0) | ||
1367 | #define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0) | ||
1368 | #define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0) | ||
1369 | #define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200) | ||
1370 | #define R300_PRIM_TYPE_LINE_LOOP (12 << 0) | ||
1371 | #define R300_PRIM_TYPE_QUADS (13 << 0) | ||
1372 | #define R300_PRIM_TYPE_QUAD_STRIP (14 << 0) | ||
1373 | #define R300_PRIM_TYPE_POLYGON (15 << 0) | ||
1374 | #define R300_PRIM_TYPE_MASK 0xF | ||
1375 | #define R300_PRIM_WALK_IND (1 << 4) | ||
1376 | #define R300_PRIM_WALK_LIST (2 << 4) | ||
1377 | #define R300_PRIM_WALK_RING (3 << 4) | ||
1378 | #define R300_PRIM_WALK_MASK (3 << 4) | ||
1379 | #define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200) | ||
1380 | #define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS | ||
1381 | #define R300_PRIM_NUM_VERTICES_SHIFT 16 | ||
1382 | |||
1383 | // Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR. | ||
1384 | // Two parameter dwords: | ||
1385 | // 0. The first parameter appears to be always 0 | ||
1386 | // 1. The second parameter is a standard primitive emission dword. | ||
1387 | #define R300_PACKET3_3D_DRAW_VBUF 0x00002800 | ||
1388 | |||
1389 | // Specify the full set of vertex arrays as (address, stride). | ||
1390 | // The first parameter is the number of vertex arrays specified. | ||
1391 | // The rest of the command is a variable length list of blocks, where | ||
1392 | // each block is three dwords long and specifies two arrays. | ||
1393 | // The first dword of a block is split into two words, the lower significant | ||
1394 | // word refers to the first array, the more significant word to the second | ||
1395 | // array in the block. | ||
1396 | // The low byte of each word contains the size of an array entry in dwords, | ||
1397 | // the high byte contains the stride of the array. | ||
1398 | // The second dword of a block contains the pointer to the first array, | ||
1399 | // the third dword of a block contains the pointer to the second array. | ||
1400 | // Note that if the total number of arrays is odd, the third dword of | ||
1401 | // the last block is omitted. | ||
1402 | #define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00 | ||
1403 | |||
1404 | #define R300_PACKET3_INDX_BUFFER 0x00003300 | ||
1405 | # define R300_EB_UNK1_SHIFT 24 | ||
1406 | # define R300_EB_UNK1 (0x80<<24) | ||
1407 | # define R300_EB_UNK2 0x0810 | ||
1408 | #define R300_PACKET3_3D_DRAW_INDX_2 0x00003600 | ||
1409 | |||
1410 | //END | ||
1411 | |||
1412 | #endif /* _R300_REG_H */ | ||
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 20bcf872b348..6d9080a3ca7e 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "drm.h" | 32 | #include "drm.h" |
33 | #include "radeon_drm.h" | 33 | #include "radeon_drm.h" |
34 | #include "radeon_drv.h" | 34 | #include "radeon_drv.h" |
35 | #include "r300_reg.h" | ||
35 | 36 | ||
36 | #define RADEON_FIFO_DEBUG 0 | 37 | #define RADEON_FIFO_DEBUG 0 |
37 | 38 | ||
@@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, | |||
1151 | 1152 | ||
1152 | #if __OS_HAS_AGP | 1153 | #if __OS_HAS_AGP |
1153 | if ( !dev_priv->is_pci ) { | 1154 | if ( !dev_priv->is_pci ) { |
1155 | /* set RADEON_AGP_BASE here instead of relying on X from user space */ | ||
1156 | RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); | ||
1154 | RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, | 1157 | RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, |
1155 | dev_priv->ring_rptr->offset | 1158 | dev_priv->ring_rptr->offset |
1156 | - dev->agp->base | 1159 | - dev->agp->base |
@@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) | |||
1407 | radeon_do_cleanup_cp(dev); | 1410 | radeon_do_cleanup_cp(dev); |
1408 | return DRM_ERR(EINVAL); | 1411 | return DRM_ERR(EINVAL); |
1409 | } | 1412 | } |
1413 | dev->agp_buffer_token = init->buffers_offset; | ||
1410 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | 1414 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); |
1411 | if(!dev->agp_buffer_map) { | 1415 | if(!dev->agp_buffer_map) { |
1412 | DRM_ERROR("could not find dma buffer region!\n"); | 1416 | DRM_ERROR("could not find dma buffer region!\n"); |
@@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS ) | |||
1625 | 1629 | ||
1626 | DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); | 1630 | DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); |
1627 | 1631 | ||
1632 | if(init.func == RADEON_INIT_R300_CP) | ||
1633 | r300_init_reg_flags(); | ||
1634 | |||
1628 | switch ( init.func ) { | 1635 | switch ( init.func ) { |
1629 | case RADEON_INIT_CP: | 1636 | case RADEON_INIT_CP: |
1630 | case RADEON_INIT_R200_CP: | 1637 | case RADEON_INIT_R200_CP: |
@@ -2039,15 +2046,43 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags) | |||
2039 | case CHIP_RV200: | 2046 | case CHIP_RV200: |
2040 | case CHIP_R200: | 2047 | case CHIP_R200: |
2041 | case CHIP_R300: | 2048 | case CHIP_R300: |
2049 | case CHIP_R420: | ||
2042 | dev_priv->flags |= CHIP_HAS_HIERZ; | 2050 | dev_priv->flags |= CHIP_HAS_HIERZ; |
2043 | break; | 2051 | break; |
2044 | default: | 2052 | default: |
2045 | /* all other chips have no hierarchical z buffer */ | 2053 | /* all other chips have no hierarchical z buffer */ |
2046 | break; | 2054 | break; |
2047 | } | 2055 | } |
2056 | |||
2057 | if (drm_device_is_agp(dev)) | ||
2058 | dev_priv->flags |= CHIP_IS_AGP; | ||
2059 | |||
2060 | DRM_DEBUG("%s card detected\n", | ||
2061 | ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI")); | ||
2048 | return ret; | 2062 | return ret; |
2049 | } | 2063 | } |
2050 | 2064 | ||
2065 | int radeon_presetup(struct drm_device *dev) | ||
2066 | { | ||
2067 | int ret; | ||
2068 | drm_local_map_t *map; | ||
2069 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
2070 | |||
2071 | ret = drm_addmap(dev, drm_get_resource_start(dev, 2), | ||
2072 | drm_get_resource_len(dev, 2), _DRM_REGISTERS, | ||
2073 | _DRM_READ_ONLY, &dev_priv->mmio); | ||
2074 | if (ret != 0) | ||
2075 | return ret; | ||
2076 | |||
2077 | ret = drm_addmap(dev, drm_get_resource_start(dev, 0), | ||
2078 | drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, | ||
2079 | _DRM_WRITE_COMBINING, &map); | ||
2080 | if (ret != 0) | ||
2081 | return ret; | ||
2082 | |||
2083 | return 0; | ||
2084 | } | ||
2085 | |||
2051 | int radeon_driver_postcleanup(struct drm_device *dev) | 2086 | int radeon_driver_postcleanup(struct drm_device *dev) |
2052 | { | 2087 | { |
2053 | drm_radeon_private_t *dev_priv = dev->dev_private; | 2088 | drm_radeon_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h index c1e62d047989..3792798270a4 100644 --- a/drivers/char/drm/radeon_drm.h +++ b/drivers/char/drm/radeon_drm.h | |||
@@ -195,6 +195,52 @@ typedef union { | |||
195 | #define RADEON_WAIT_2D 0x1 | 195 | #define RADEON_WAIT_2D 0x1 |
196 | #define RADEON_WAIT_3D 0x2 | 196 | #define RADEON_WAIT_3D 0x2 |
197 | 197 | ||
198 | /* Allowed parameters for R300_CMD_PACKET3 | ||
199 | */ | ||
200 | #define R300_CMD_PACKET3_CLEAR 0 | ||
201 | #define R300_CMD_PACKET3_RAW 1 | ||
202 | |||
203 | /* Commands understood by cmd_buffer ioctl for R300. | ||
204 | * The interface has not been stabilized, so some of these may be removed | ||
205 | * and eventually reordered before stabilization. | ||
206 | */ | ||
207 | #define R300_CMD_PACKET0 1 | ||
208 | #define R300_CMD_VPU 2 /* emit vertex program upload */ | ||
209 | #define R300_CMD_PACKET3 3 /* emit a packet3 */ | ||
210 | #define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ | ||
211 | #define R300_CMD_CP_DELAY 5 | ||
212 | #define R300_CMD_DMA_DISCARD 6 | ||
213 | #define R300_CMD_WAIT 7 | ||
214 | # define R300_WAIT_2D 0x1 | ||
215 | # define R300_WAIT_3D 0x2 | ||
216 | # define R300_WAIT_2D_CLEAN 0x3 | ||
217 | # define R300_WAIT_3D_CLEAN 0x4 | ||
218 | |||
219 | typedef union { | ||
220 | unsigned int u; | ||
221 | struct { | ||
222 | unsigned char cmd_type, pad0, pad1, pad2; | ||
223 | } header; | ||
224 | struct { | ||
225 | unsigned char cmd_type, count, reglo, reghi; | ||
226 | } packet0; | ||
227 | struct { | ||
228 | unsigned char cmd_type, count, adrlo, adrhi; | ||
229 | } vpu; | ||
230 | struct { | ||
231 | unsigned char cmd_type, packet, pad0, pad1; | ||
232 | } packet3; | ||
233 | struct { | ||
234 | unsigned char cmd_type, packet; | ||
235 | unsigned short count; /* amount of packet2 to emit */ | ||
236 | } delay; | ||
237 | struct { | ||
238 | unsigned char cmd_type, buf_idx, pad0, pad1; | ||
239 | } dma; | ||
240 | struct { | ||
241 | unsigned char cmd_type, flags, pad0, pad1; | ||
242 | } wait; | ||
243 | } drm_r300_cmd_header_t; | ||
198 | 244 | ||
199 | #define RADEON_FRONT 0x1 | 245 | #define RADEON_FRONT 0x1 |
200 | #define RADEON_BACK 0x2 | 246 | #define RADEON_BACK 0x2 |
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c index 18e4e5b0952f..e0682f64b400 100644 --- a/drivers/char/drm/radeon_drv.c +++ b/drivers/char/drm/radeon_drv.c | |||
@@ -76,6 +76,7 @@ static struct drm_driver driver = { | |||
76 | .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, | 76 | .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, |
77 | .dev_priv_size = sizeof(drm_radeon_buf_priv_t), | 77 | .dev_priv_size = sizeof(drm_radeon_buf_priv_t), |
78 | .preinit = radeon_driver_preinit, | 78 | .preinit = radeon_driver_preinit, |
79 | .presetup = radeon_presetup, | ||
79 | .postcleanup = radeon_driver_postcleanup, | 80 | .postcleanup = radeon_driver_postcleanup, |
80 | .prerelease = radeon_driver_prerelease, | 81 | .prerelease = radeon_driver_prerelease, |
81 | .pretakedown = radeon_driver_pretakedown, | 82 | .pretakedown = radeon_driver_pretakedown, |
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h index 771aa80a5e8c..f12a963ede18 100644 --- a/drivers/char/drm/radeon_drv.h +++ b/drivers/char/drm/radeon_drv.h | |||
@@ -82,9 +82,10 @@ | |||
82 | * - Add support for r100 cube maps | 82 | * - Add support for r100 cube maps |
83 | * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear | 83 | * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear |
84 | * texture filtering on r200 | 84 | * texture filtering on r200 |
85 | * 1.17- Add initial support for R300 (3D). | ||
85 | */ | 86 | */ |
86 | #define DRIVER_MAJOR 1 | 87 | #define DRIVER_MAJOR 1 |
87 | #define DRIVER_MINOR 16 | 88 | #define DRIVER_MINOR 17 |
88 | #define DRIVER_PATCHLEVEL 0 | 89 | #define DRIVER_PATCHLEVEL 0 |
89 | 90 | ||
90 | #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) | 91 | #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) |
@@ -106,7 +107,9 @@ enum radeon_family { | |||
106 | CHIP_RV280, | 107 | CHIP_RV280, |
107 | CHIP_R300, | 108 | CHIP_R300, |
108 | CHIP_RS300, | 109 | CHIP_RS300, |
110 | CHIP_R350, | ||
109 | CHIP_RV350, | 111 | CHIP_RV350, |
112 | CHIP_R420, | ||
110 | CHIP_LAST, | 113 | CHIP_LAST, |
111 | }; | 114 | }; |
112 | 115 | ||
@@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n ); | |||
290 | extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); | 293 | extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); |
291 | 294 | ||
292 | extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); | 295 | extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); |
296 | extern int radeon_presetup(struct drm_device *dev); | ||
293 | extern int radeon_driver_postcleanup(struct drm_device *dev); | 297 | extern int radeon_driver_postcleanup(struct drm_device *dev); |
294 | 298 | ||
295 | extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); | 299 | extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); |
@@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev ); | |||
320 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, | 324 | extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, |
321 | unsigned long arg); | 325 | unsigned long arg); |
322 | 326 | ||
327 | |||
328 | /* r300_cmdbuf.c */ | ||
329 | extern void r300_init_reg_flags(void); | ||
330 | |||
331 | extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp, | ||
332 | drm_file_t* filp_priv, | ||
333 | drm_radeon_cmd_buffer_t* cmdbuf); | ||
334 | |||
323 | /* Flags for stats.boxes | 335 | /* Flags for stats.boxes |
324 | */ | 336 | */ |
325 | #define RADEON_BOX_DMA_IDLE 0x1 | 337 | #define RADEON_BOX_DMA_IDLE 0x1 |
@@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, | |||
357 | #define RADEON_CRTC2_OFFSET 0x0324 | 369 | #define RADEON_CRTC2_OFFSET 0x0324 |
358 | #define RADEON_CRTC2_OFFSET_CNTL 0x0328 | 370 | #define RADEON_CRTC2_OFFSET_CNTL 0x0328 |
359 | 371 | ||
372 | #define RADEON_MPP_TB_CONFIG 0x01c0 | ||
373 | #define RADEON_MEM_CNTL 0x0140 | ||
374 | #define RADEON_MEM_SDRAM_MODE_REG 0x0158 | ||
375 | #define RADEON_AGP_BASE 0x0170 | ||
376 | |||
360 | #define RADEON_RB3D_COLOROFFSET 0x1c40 | 377 | #define RADEON_RB3D_COLOROFFSET 0x1c40 |
361 | #define RADEON_RB3D_COLORPITCH 0x1c48 | 378 | #define RADEON_RB3D_COLORPITCH 0x1c48 |
362 | 379 | ||
@@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, | |||
651 | #define RADEON_CP_PACKET1 0x40000000 | 668 | #define RADEON_CP_PACKET1 0x40000000 |
652 | #define RADEON_CP_PACKET2 0x80000000 | 669 | #define RADEON_CP_PACKET2 0x80000000 |
653 | #define RADEON_CP_PACKET3 0xC0000000 | 670 | #define RADEON_CP_PACKET3 0xC0000000 |
671 | # define RADEON_CP_NOP 0x00001000 | ||
672 | # define RADEON_CP_NEXT_CHAR 0x00001900 | ||
673 | # define RADEON_CP_PLY_NEXTSCAN 0x00001D00 | ||
674 | # define RADEON_CP_SET_SCISSORS 0x00001E00 | ||
675 | /* GEN_INDX_PRIM is unsupported starting with R300 */ | ||
654 | # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 | 676 | # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 |
655 | # define RADEON_WAIT_FOR_IDLE 0x00002600 | 677 | # define RADEON_WAIT_FOR_IDLE 0x00002600 |
656 | # define RADEON_3D_DRAW_VBUF 0x00002800 | 678 | # define RADEON_3D_DRAW_VBUF 0x00002800 |
657 | # define RADEON_3D_DRAW_IMMD 0x00002900 | 679 | # define RADEON_3D_DRAW_IMMD 0x00002900 |
658 | # define RADEON_3D_DRAW_INDX 0x00002A00 | 680 | # define RADEON_3D_DRAW_INDX 0x00002A00 |
681 | # define RADEON_CP_LOAD_PALETTE 0x00002C00 | ||
659 | # define RADEON_3D_LOAD_VBPNTR 0x00002F00 | 682 | # define RADEON_3D_LOAD_VBPNTR 0x00002F00 |
660 | # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 | 683 | # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 |
661 | # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 | 684 | # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 |
662 | # define RADEON_3D_CLEAR_ZMASK 0x00003200 | 685 | # define RADEON_3D_CLEAR_ZMASK 0x00003200 |
686 | # define RADEON_CP_INDX_BUFFER 0x00003300 | ||
687 | # define RADEON_CP_3D_DRAW_VBUF_2 0x00003400 | ||
688 | # define RADEON_CP_3D_DRAW_IMMD_2 0x00003500 | ||
689 | # define RADEON_CP_3D_DRAW_INDX_2 0x00003600 | ||
663 | # define RADEON_3D_CLEAR_HIZ 0x00003700 | 690 | # define RADEON_3D_CLEAR_HIZ 0x00003700 |
691 | # define RADEON_CP_3D_CLEAR_CMASK 0x00003802 | ||
664 | # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 | 692 | # define RADEON_CNTL_HOSTDATA_BLT 0x00009400 |
665 | # define RADEON_CNTL_PAINT_MULTI 0x00009A00 | 693 | # define RADEON_CNTL_PAINT_MULTI 0x00009A00 |
666 | # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 | 694 | # define RADEON_CNTL_BITBLT_MULTI 0x00009B00 |
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c index 1f79e249146c..64a3e3a406ef 100644 --- a/drivers/char/drm/radeon_state.c +++ b/drivers/char/drm/radeon_state.c | |||
@@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev, | |||
1493 | 1493 | ||
1494 | } | 1494 | } |
1495 | 1495 | ||
1496 | #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) | 1496 | #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE |
1497 | 1497 | ||
1498 | static int radeon_cp_dispatch_texture( DRMFILE filp, | 1498 | static int radeon_cp_dispatch_texture( DRMFILE filp, |
1499 | drm_device_t *dev, | 1499 | drm_device_t *dev, |
@@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, | |||
1506 | u32 format; | 1506 | u32 format; |
1507 | u32 *buffer; | 1507 | u32 *buffer; |
1508 | const u8 __user *data; | 1508 | const u8 __user *data; |
1509 | int size, dwords, tex_width, blit_width; | 1509 | int size, dwords, tex_width, blit_width, spitch; |
1510 | u32 height; | 1510 | u32 height; |
1511 | int i; | 1511 | int i; |
1512 | u32 texpitch, microtile; | 1512 | u32 texpitch, microtile; |
1513 | u32 offset; | ||
1513 | RING_LOCALS; | 1514 | RING_LOCALS; |
1514 | 1515 | ||
1515 | DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); | 1516 | DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); |
@@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, | |||
1530 | RADEON_WAIT_UNTIL_IDLE(); | 1531 | RADEON_WAIT_UNTIL_IDLE(); |
1531 | ADVANCE_RING(); | 1532 | ADVANCE_RING(); |
1532 | 1533 | ||
1533 | #ifdef __BIG_ENDIAN | ||
1534 | /* The Mesa texture functions provide the data in little endian as the | ||
1535 | * chip wants it, but we need to compensate for the fact that the CP | ||
1536 | * ring gets byte-swapped | ||
1537 | */ | ||
1538 | BEGIN_RING( 2 ); | ||
1539 | OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT ); | ||
1540 | ADVANCE_RING(); | ||
1541 | #endif | ||
1542 | |||
1543 | |||
1544 | /* The compiler won't optimize away a division by a variable, | 1534 | /* The compiler won't optimize away a division by a variable, |
1545 | * even if the only legal values are powers of two. Thus, we'll | 1535 | * even if the only legal values are powers of two. Thus, we'll |
1546 | * use a shift instead. | 1536 | * use a shift instead. |
@@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, | |||
1572 | DRM_ERROR( "invalid texture format %d\n", tex->format ); | 1562 | DRM_ERROR( "invalid texture format %d\n", tex->format ); |
1573 | return DRM_ERR(EINVAL); | 1563 | return DRM_ERR(EINVAL); |
1574 | } | 1564 | } |
1565 | spitch = blit_width >> 6; | ||
1566 | if (spitch == 0 && image->height > 1) | ||
1567 | return DRM_ERR(EINVAL); | ||
1568 | |||
1575 | texpitch = tex->pitch; | 1569 | texpitch = tex->pitch; |
1576 | if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { | 1570 | if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { |
1577 | microtile = 1; | 1571 | microtile = 1; |
@@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, | |||
1624 | */ | 1618 | */ |
1625 | buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); | 1619 | buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); |
1626 | dwords = size / 4; | 1620 | dwords = size / 4; |
1627 | buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 ); | ||
1628 | buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL | | ||
1629 | RADEON_GMC_BRUSH_NONE | | ||
1630 | (format << 8) | | ||
1631 | RADEON_GMC_SRC_DATATYPE_COLOR | | ||
1632 | RADEON_ROP3_S | | ||
1633 | RADEON_DP_SRC_SOURCE_HOST_DATA | | ||
1634 | RADEON_GMC_CLR_CMP_CNTL_DIS | | ||
1635 | RADEON_GMC_WR_MSK_DIS); | ||
1636 | |||
1637 | buffer[2] = (texpitch << 22) | (tex->offset >> 10); | ||
1638 | buffer[3] = 0xffffffff; | ||
1639 | buffer[4] = 0xffffffff; | ||
1640 | buffer[5] = (image->y << 16) | image->x; | ||
1641 | buffer[6] = (height << 16) | image->width; | ||
1642 | buffer[7] = dwords; | ||
1643 | buffer += 8; | ||
1644 | |||
1645 | |||
1646 | 1621 | ||
1647 | if (microtile) { | 1622 | if (microtile) { |
1648 | /* texture micro tiling in use, minimum texture width is thus 16 bytes. | 1623 | /* texture micro tiling in use, minimum texture width is thus 16 bytes. |
@@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp, | |||
1750 | } | 1725 | } |
1751 | 1726 | ||
1752 | buf->filp = filp; | 1727 | buf->filp = filp; |
1753 | buf->used = (dwords + 8) * sizeof(u32); | 1728 | buf->used = size; |
1754 | radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); | 1729 | offset = dev_priv->gart_buffers_offset + buf->offset; |
1755 | radeon_cp_discard_buffer( dev, buf ); | 1730 | BEGIN_RING(9); |
1731 | OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); | ||
1732 | OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | | ||
1733 | RADEON_GMC_DST_PITCH_OFFSET_CNTL | | ||
1734 | RADEON_GMC_BRUSH_NONE | | ||
1735 | (format << 8) | | ||
1736 | RADEON_GMC_SRC_DATATYPE_COLOR | | ||
1737 | RADEON_ROP3_S | | ||
1738 | RADEON_DP_SRC_SOURCE_MEMORY | | ||
1739 | RADEON_GMC_CLR_CMP_CNTL_DIS | | ||
1740 | RADEON_GMC_WR_MSK_DIS ); | ||
1741 | OUT_RING((spitch << 22) | (offset >> 10)); | ||
1742 | OUT_RING((texpitch << 22) | (tex->offset >> 10)); | ||
1743 | OUT_RING(0); | ||
1744 | OUT_RING((image->x << 16) | image->y); | ||
1745 | OUT_RING((image->width << 16) | height); | ||
1746 | RADEON_WAIT_UNTIL_2D_IDLE(); | ||
1747 | ADVANCE_RING(); | ||
1748 | |||
1749 | radeon_cp_discard_buffer(dev, buf); | ||
1756 | 1750 | ||
1757 | /* Update the input parameters for next time */ | 1751 | /* Update the input parameters for next time */ |
1758 | image->y += height; | 1752 | image->y += height; |
@@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS ) | |||
2797 | 2791 | ||
2798 | orig_nbox = cmdbuf.nbox; | 2792 | orig_nbox = cmdbuf.nbox; |
2799 | 2793 | ||
2794 | if(dev_priv->microcode_version == UCODE_R300) { | ||
2795 | int temp; | ||
2796 | temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf); | ||
2797 | |||
2798 | if (orig_bufsz != 0) | ||
2799 | drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); | ||
2800 | |||
2801 | return temp; | ||
2802 | } | ||
2803 | |||
2804 | /* microcode_version != r300 */ | ||
2800 | while ( cmdbuf.bufsz >= sizeof(header) ) { | 2805 | while ( cmdbuf.bufsz >= sizeof(header) ) { |
2801 | 2806 | ||
2802 | header.i = *(int *)cmdbuf.buf; | 2807 | header.i = *(int *)cmdbuf.buf; |
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c new file mode 100644 index 000000000000..2fd40bac7c97 --- /dev/null +++ b/drivers/char/drm/savage_bci.c | |||
@@ -0,0 +1,1096 @@ | |||
1 | /* savage_bci.c -- BCI support for Savage | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | #include "drmP.h" | ||
26 | #include "savage_drm.h" | ||
27 | #include "savage_drv.h" | ||
28 | |||
29 | /* Need a long timeout for shadow status updates can take a while | ||
30 | * and so can waiting for events when the queue is full. */ | ||
31 | #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ | ||
32 | #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ | ||
33 | #define SAVAGE_FREELIST_DEBUG 0 | ||
34 | |||
35 | static int | ||
36 | savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) | ||
37 | { | ||
38 | uint32_t mask = dev_priv->status_used_mask; | ||
39 | uint32_t threshold = dev_priv->bci_threshold_hi; | ||
40 | uint32_t status; | ||
41 | int i; | ||
42 | |||
43 | #if SAVAGE_BCI_DEBUG | ||
44 | if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) | ||
45 | DRM_ERROR("Trying to emit %d words " | ||
46 | "(more than guaranteed space in COB)\n", n); | ||
47 | #endif | ||
48 | |||
49 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
50 | DRM_MEMORYBARRIER(); | ||
51 | status = dev_priv->status_ptr[0]; | ||
52 | if ((status & mask) < threshold) | ||
53 | return 0; | ||
54 | DRM_UDELAY(1); | ||
55 | } | ||
56 | |||
57 | #if SAVAGE_BCI_DEBUG | ||
58 | DRM_ERROR("failed!\n"); | ||
59 | DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); | ||
60 | #endif | ||
61 | return DRM_ERR(EBUSY); | ||
62 | } | ||
63 | |||
64 | static int | ||
65 | savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) | ||
66 | { | ||
67 | uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; | ||
68 | uint32_t status; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
72 | status = SAVAGE_READ(SAVAGE_STATUS_WORD0); | ||
73 | if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed) | ||
74 | return 0; | ||
75 | DRM_UDELAY(1); | ||
76 | } | ||
77 | |||
78 | #if SAVAGE_BCI_DEBUG | ||
79 | DRM_ERROR("failed!\n"); | ||
80 | DRM_INFO(" status=0x%08x\n", status); | ||
81 | #endif | ||
82 | return DRM_ERR(EBUSY); | ||
83 | } | ||
84 | |||
85 | static int | ||
86 | savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) | ||
87 | { | ||
88 | uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; | ||
89 | uint32_t status; | ||
90 | int i; | ||
91 | |||
92 | for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { | ||
93 | status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0); | ||
94 | if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed) | ||
95 | return 0; | ||
96 | DRM_UDELAY(1); | ||
97 | } | ||
98 | |||
99 | #if SAVAGE_BCI_DEBUG | ||
100 | DRM_ERROR("failed!\n"); | ||
101 | DRM_INFO(" status=0x%08x\n", status); | ||
102 | #endif | ||
103 | return DRM_ERR(EBUSY); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Waiting for events. | ||
108 | * | ||
109 | * The BIOSresets the event tag to 0 on mode changes. Therefore we | ||
110 | * never emit 0 to the event tag. If we find a 0 event tag we know the | ||
111 | * BIOS stomped on it and return success assuming that the BIOS waited | ||
112 | * for engine idle. | ||
113 | * | ||
114 | * Note: if the Xserver uses the event tag it has to follow the same | ||
115 | * rule. Otherwise there may be glitches every 2^16 events. | ||
116 | */ | ||
117 | static int | ||
118 | savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) | ||
119 | { | ||
120 | uint32_t status; | ||
121 | int i; | ||
122 | |||
123 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { | ||
124 | DRM_MEMORYBARRIER(); | ||
125 | status = dev_priv->status_ptr[1]; | ||
126 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || | ||
127 | (status & 0xffff) == 0) | ||
128 | return 0; | ||
129 | DRM_UDELAY(1); | ||
130 | } | ||
131 | |||
132 | #if SAVAGE_BCI_DEBUG | ||
133 | DRM_ERROR("failed!\n"); | ||
134 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | ||
135 | #endif | ||
136 | |||
137 | return DRM_ERR(EBUSY); | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) | ||
142 | { | ||
143 | uint32_t status; | ||
144 | int i; | ||
145 | |||
146 | for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) { | ||
147 | status = SAVAGE_READ(SAVAGE_STATUS_WORD1); | ||
148 | if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || | ||
149 | (status & 0xffff) == 0) | ||
150 | return 0; | ||
151 | DRM_UDELAY(1); | ||
152 | } | ||
153 | |||
154 | #if SAVAGE_BCI_DEBUG | ||
155 | DRM_ERROR("failed!\n"); | ||
156 | DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); | ||
157 | #endif | ||
158 | |||
159 | return DRM_ERR(EBUSY); | ||
160 | } | ||
161 | |||
162 | uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, | ||
163 | unsigned int flags) | ||
164 | { | ||
165 | uint16_t count; | ||
166 | BCI_LOCALS; | ||
167 | |||
168 | if (dev_priv->status_ptr) { | ||
169 | /* coordinate with Xserver */ | ||
170 | count = dev_priv->status_ptr[1023]; | ||
171 | if (count < dev_priv->event_counter) | ||
172 | dev_priv->event_wrap++; | ||
173 | } else { | ||
174 | count = dev_priv->event_counter; | ||
175 | } | ||
176 | count = (count + 1) & 0xffff; | ||
177 | if (count == 0) { | ||
178 | count++; /* See the comment above savage_wait_event_*. */ | ||
179 | dev_priv->event_wrap++; | ||
180 | } | ||
181 | dev_priv->event_counter = count; | ||
182 | if (dev_priv->status_ptr) | ||
183 | dev_priv->status_ptr[1023] = (uint32_t)count; | ||
184 | |||
185 | if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { | ||
186 | unsigned int wait_cmd = BCI_CMD_WAIT; | ||
187 | if ((flags & SAVAGE_WAIT_2D)) | ||
188 | wait_cmd |= BCI_CMD_WAIT_2D; | ||
189 | if ((flags & SAVAGE_WAIT_3D)) | ||
190 | wait_cmd |= BCI_CMD_WAIT_3D; | ||
191 | BEGIN_BCI(2); | ||
192 | BCI_WRITE(wait_cmd); | ||
193 | } else { | ||
194 | BEGIN_BCI(1); | ||
195 | } | ||
196 | BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count); | ||
197 | |||
198 | return count; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Freelist management | ||
203 | */ | ||
204 | static int savage_freelist_init(drm_device_t *dev) | ||
205 | { | ||
206 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
207 | drm_device_dma_t *dma = dev->dma; | ||
208 | drm_buf_t *buf; | ||
209 | drm_savage_buf_priv_t *entry; | ||
210 | int i; | ||
211 | DRM_DEBUG("count=%d\n", dma->buf_count); | ||
212 | |||
213 | dev_priv->head.next = &dev_priv->tail; | ||
214 | dev_priv->head.prev = NULL; | ||
215 | dev_priv->head.buf = NULL; | ||
216 | |||
217 | dev_priv->tail.next = NULL; | ||
218 | dev_priv->tail.prev = &dev_priv->head; | ||
219 | dev_priv->tail.buf = NULL; | ||
220 | |||
221 | for (i = 0; i < dma->buf_count; i++) { | ||
222 | buf = dma->buflist[i]; | ||
223 | entry = buf->dev_private; | ||
224 | |||
225 | SET_AGE(&entry->age, 0, 0); | ||
226 | entry->buf = buf; | ||
227 | |||
228 | entry->next = dev_priv->head.next; | ||
229 | entry->prev = &dev_priv->head; | ||
230 | dev_priv->head.next->prev = entry; | ||
231 | dev_priv->head.next = entry; | ||
232 | } | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static drm_buf_t *savage_freelist_get(drm_device_t *dev) | ||
238 | { | ||
239 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
240 | drm_savage_buf_priv_t *tail = dev_priv->tail.prev; | ||
241 | uint16_t event; | ||
242 | unsigned int wrap; | ||
243 | DRM_DEBUG("\n"); | ||
244 | |||
245 | UPDATE_EVENT_COUNTER(); | ||
246 | if (dev_priv->status_ptr) | ||
247 | event = dev_priv->status_ptr[1] & 0xffff; | ||
248 | else | ||
249 | event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
250 | wrap = dev_priv->event_wrap; | ||
251 | if (event > dev_priv->event_counter) | ||
252 | wrap--; /* hardware hasn't passed the last wrap yet */ | ||
253 | |||
254 | DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); | ||
255 | DRM_DEBUG(" head=0x%04x %d\n", event, wrap); | ||
256 | |||
257 | if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { | ||
258 | drm_savage_buf_priv_t *next = tail->next; | ||
259 | drm_savage_buf_priv_t *prev = tail->prev; | ||
260 | prev->next = next; | ||
261 | next->prev = prev; | ||
262 | tail->next = tail->prev = NULL; | ||
263 | return tail->buf; | ||
264 | } | ||
265 | |||
266 | DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); | ||
267 | return NULL; | ||
268 | } | ||
269 | |||
270 | void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf) | ||
271 | { | ||
272 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
273 | drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; | ||
274 | |||
275 | DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); | ||
276 | |||
277 | if (entry->next != NULL || entry->prev != NULL) { | ||
278 | DRM_ERROR("entry already on freelist.\n"); | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | prev = &dev_priv->head; | ||
283 | next = prev->next; | ||
284 | prev->next = entry; | ||
285 | next->prev = entry; | ||
286 | entry->prev = prev; | ||
287 | entry->next = next; | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * Command DMA | ||
292 | */ | ||
293 | static int savage_dma_init(drm_savage_private_t *dev_priv) | ||
294 | { | ||
295 | unsigned int i; | ||
296 | |||
297 | dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / | ||
298 | (SAVAGE_DMA_PAGE_SIZE*4); | ||
299 | dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * | ||
300 | dev_priv->nr_dma_pages, | ||
301 | DRM_MEM_DRIVER); | ||
302 | if (dev_priv->dma_pages == NULL) | ||
303 | return DRM_ERR(ENOMEM); | ||
304 | |||
305 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { | ||
306 | SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); | ||
307 | dev_priv->dma_pages[i].used = 0; | ||
308 | dev_priv->dma_pages[i].flushed = 0; | ||
309 | } | ||
310 | SET_AGE(&dev_priv->last_dma_age, 0, 0); | ||
311 | |||
312 | dev_priv->first_dma_page = 0; | ||
313 | dev_priv->current_dma_page = 0; | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | void savage_dma_reset(drm_savage_private_t *dev_priv) | ||
319 | { | ||
320 | uint16_t event; | ||
321 | unsigned int wrap, i; | ||
322 | event = savage_bci_emit_event(dev_priv, 0); | ||
323 | wrap = dev_priv->event_wrap; | ||
324 | for (i = 0; i < dev_priv->nr_dma_pages; ++i) { | ||
325 | SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); | ||
326 | dev_priv->dma_pages[i].used = 0; | ||
327 | dev_priv->dma_pages[i].flushed = 0; | ||
328 | } | ||
329 | SET_AGE(&dev_priv->last_dma_age, event, wrap); | ||
330 | dev_priv->first_dma_page = dev_priv->current_dma_page = 0; | ||
331 | } | ||
332 | |||
333 | void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page) | ||
334 | { | ||
335 | uint16_t event; | ||
336 | unsigned int wrap; | ||
337 | |||
338 | /* Faked DMA buffer pages don't age. */ | ||
339 | if (dev_priv->cmd_dma == &dev_priv->fake_dma) | ||
340 | return; | ||
341 | |||
342 | UPDATE_EVENT_COUNTER(); | ||
343 | if (dev_priv->status_ptr) | ||
344 | event = dev_priv->status_ptr[1] & 0xffff; | ||
345 | else | ||
346 | event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
347 | wrap = dev_priv->event_wrap; | ||
348 | if (event > dev_priv->event_counter) | ||
349 | wrap--; /* hardware hasn't passed the last wrap yet */ | ||
350 | |||
351 | if (dev_priv->dma_pages[page].age.wrap > wrap || | ||
352 | (dev_priv->dma_pages[page].age.wrap == wrap && | ||
353 | dev_priv->dma_pages[page].age.event > event)) { | ||
354 | if (dev_priv->wait_evnt(dev_priv, | ||
355 | dev_priv->dma_pages[page].age.event) | ||
356 | < 0) | ||
357 | DRM_ERROR("wait_evnt failed!\n"); | ||
358 | } | ||
359 | } | ||
360 | |||
361 | uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) | ||
362 | { | ||
363 | unsigned int cur = dev_priv->current_dma_page; | ||
364 | unsigned int rest = SAVAGE_DMA_PAGE_SIZE - | ||
365 | dev_priv->dma_pages[cur].used; | ||
366 | unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) / | ||
367 | SAVAGE_DMA_PAGE_SIZE; | ||
368 | uint32_t *dma_ptr; | ||
369 | unsigned int i; | ||
370 | |||
371 | DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", | ||
372 | cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); | ||
373 | |||
374 | if (cur + nr_pages < dev_priv->nr_dma_pages) { | ||
375 | dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + | ||
376 | cur*SAVAGE_DMA_PAGE_SIZE + | ||
377 | dev_priv->dma_pages[cur].used; | ||
378 | if (n < rest) | ||
379 | rest = n; | ||
380 | dev_priv->dma_pages[cur].used += rest; | ||
381 | n -= rest; | ||
382 | cur++; | ||
383 | } else { | ||
384 | dev_priv->dma_flush(dev_priv); | ||
385 | nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE; | ||
386 | for (i = cur; i < dev_priv->nr_dma_pages; ++i) { | ||
387 | dev_priv->dma_pages[i].age = dev_priv->last_dma_age; | ||
388 | dev_priv->dma_pages[i].used = 0; | ||
389 | dev_priv->dma_pages[i].flushed = 0; | ||
390 | } | ||
391 | dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; | ||
392 | dev_priv->first_dma_page = cur = 0; | ||
393 | } | ||
394 | for (i = cur; nr_pages > 0; ++i, --nr_pages) { | ||
395 | #if SAVAGE_DMA_DEBUG | ||
396 | if (dev_priv->dma_pages[i].used) { | ||
397 | DRM_ERROR("unflushed page %u: used=%u\n", | ||
398 | i, dev_priv->dma_pages[i].used); | ||
399 | } | ||
400 | #endif | ||
401 | if (n > SAVAGE_DMA_PAGE_SIZE) | ||
402 | dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE; | ||
403 | else | ||
404 | dev_priv->dma_pages[i].used = n; | ||
405 | n -= SAVAGE_DMA_PAGE_SIZE; | ||
406 | } | ||
407 | dev_priv->current_dma_page = --i; | ||
408 | |||
409 | DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", | ||
410 | i, dev_priv->dma_pages[i].used, n); | ||
411 | |||
412 | savage_dma_wait(dev_priv, dev_priv->current_dma_page); | ||
413 | |||
414 | return dma_ptr; | ||
415 | } | ||
416 | |||
417 | static void savage_dma_flush(drm_savage_private_t *dev_priv) | ||
418 | { | ||
419 | unsigned int first = dev_priv->first_dma_page; | ||
420 | unsigned int cur = dev_priv->current_dma_page; | ||
421 | uint16_t event; | ||
422 | unsigned int wrap, pad, align, len, i; | ||
423 | unsigned long phys_addr; | ||
424 | BCI_LOCALS; | ||
425 | |||
426 | if (first == cur && | ||
427 | dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) | ||
428 | return; | ||
429 | |||
430 | /* pad length to multiples of 2 entries | ||
431 | * align start of next DMA block to multiles of 8 entries */ | ||
432 | pad = -dev_priv->dma_pages[cur].used & 1; | ||
433 | align = -(dev_priv->dma_pages[cur].used + pad) & 7; | ||
434 | |||
435 | DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " | ||
436 | "pad=%u, align=%u\n", | ||
437 | first, cur, dev_priv->dma_pages[first].flushed, | ||
438 | dev_priv->dma_pages[cur].used, pad, align); | ||
439 | |||
440 | /* pad with noops */ | ||
441 | if (pad) { | ||
442 | uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + | ||
443 | cur * SAVAGE_DMA_PAGE_SIZE + | ||
444 | dev_priv->dma_pages[cur].used; | ||
445 | dev_priv->dma_pages[cur].used += pad; | ||
446 | while(pad != 0) { | ||
447 | *dma_ptr++ = BCI_CMD_WAIT; | ||
448 | pad--; | ||
449 | } | ||
450 | } | ||
451 | |||
452 | DRM_MEMORYBARRIER(); | ||
453 | |||
454 | /* do flush ... */ | ||
455 | phys_addr = dev_priv->cmd_dma->offset + | ||
456 | (first * SAVAGE_DMA_PAGE_SIZE + | ||
457 | dev_priv->dma_pages[first].flushed) * 4; | ||
458 | len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + | ||
459 | dev_priv->dma_pages[cur].used - | ||
460 | dev_priv->dma_pages[first].flushed; | ||
461 | |||
462 | DRM_DEBUG("phys_addr=%lx, len=%u\n", | ||
463 | phys_addr | dev_priv->dma_type, len); | ||
464 | |||
465 | BEGIN_BCI(3); | ||
466 | BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); | ||
467 | BCI_WRITE(phys_addr | dev_priv->dma_type); | ||
468 | BCI_DMA(len); | ||
469 | |||
470 | /* fix alignment of the start of the next block */ | ||
471 | dev_priv->dma_pages[cur].used += align; | ||
472 | |||
473 | /* age DMA pages */ | ||
474 | event = savage_bci_emit_event(dev_priv, 0); | ||
475 | wrap = dev_priv->event_wrap; | ||
476 | for (i = first; i < cur; ++i) { | ||
477 | SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); | ||
478 | dev_priv->dma_pages[i].used = 0; | ||
479 | dev_priv->dma_pages[i].flushed = 0; | ||
480 | } | ||
481 | /* age the current page only when it's full */ | ||
482 | if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) { | ||
483 | SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); | ||
484 | dev_priv->dma_pages[cur].used = 0; | ||
485 | dev_priv->dma_pages[cur].flushed = 0; | ||
486 | /* advance to next page */ | ||
487 | cur++; | ||
488 | if (cur == dev_priv->nr_dma_pages) | ||
489 | cur = 0; | ||
490 | dev_priv->first_dma_page = dev_priv->current_dma_page = cur; | ||
491 | } else { | ||
492 | dev_priv->first_dma_page = cur; | ||
493 | dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; | ||
494 | } | ||
495 | SET_AGE(&dev_priv->last_dma_age, event, wrap); | ||
496 | |||
497 | DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, | ||
498 | dev_priv->dma_pages[cur].used, | ||
499 | dev_priv->dma_pages[cur].flushed); | ||
500 | } | ||
501 | |||
502 | static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) | ||
503 | { | ||
504 | unsigned int i, j; | ||
505 | BCI_LOCALS; | ||
506 | |||
507 | if (dev_priv->first_dma_page == dev_priv->current_dma_page && | ||
508 | dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) | ||
509 | return; | ||
510 | |||
511 | DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", | ||
512 | dev_priv->first_dma_page, dev_priv->current_dma_page, | ||
513 | dev_priv->dma_pages[dev_priv->current_dma_page].used); | ||
514 | |||
515 | for (i = dev_priv->first_dma_page; | ||
516 | i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; | ||
517 | ++i) { | ||
518 | uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + | ||
519 | i * SAVAGE_DMA_PAGE_SIZE; | ||
520 | #if SAVAGE_DMA_DEBUG | ||
521 | /* Sanity check: all pages except the last one must be full. */ | ||
522 | if (i < dev_priv->current_dma_page && | ||
523 | dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) { | ||
524 | DRM_ERROR("partial DMA page %u: used=%u", | ||
525 | i, dev_priv->dma_pages[i].used); | ||
526 | } | ||
527 | #endif | ||
528 | BEGIN_BCI(dev_priv->dma_pages[i].used); | ||
529 | for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { | ||
530 | BCI_WRITE(dma_ptr[j]); | ||
531 | } | ||
532 | dev_priv->dma_pages[i].used = 0; | ||
533 | } | ||
534 | |||
535 | /* reset to first page */ | ||
536 | dev_priv->first_dma_page = dev_priv->current_dma_page = 0; | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Initalize mappings. On Savage4 and SavageIX the alignment | ||
541 | * and size of the aperture is not suitable for automatic MTRR setup | ||
542 | * in drm_addmap. Therefore we do it manually before the maps are | ||
543 | * initialized. We also need to take care of deleting the MTRRs in | ||
544 | * postcleanup. | ||
545 | */ | ||
546 | int savage_preinit(drm_device_t *dev, unsigned long chipset) | ||
547 | { | ||
548 | drm_savage_private_t *dev_priv; | ||
549 | unsigned long mmio_base, fb_base, fb_size, aperture_base; | ||
550 | /* fb_rsrc and aper_rsrc aren't really used currently, but still exist | ||
551 | * in case we decide we need information on the BAR for BSD in the | ||
552 | * future. | ||
553 | */ | ||
554 | unsigned int fb_rsrc, aper_rsrc; | ||
555 | int ret = 0; | ||
556 | |||
557 | dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER); | ||
558 | if (dev_priv == NULL) | ||
559 | return DRM_ERR(ENOMEM); | ||
560 | |||
561 | memset(dev_priv, 0, sizeof(drm_savage_private_t)); | ||
562 | dev->dev_private = (void *)dev_priv; | ||
563 | dev_priv->chipset = (enum savage_family)chipset; | ||
564 | |||
565 | dev_priv->mtrr[0].handle = -1; | ||
566 | dev_priv->mtrr[1].handle = -1; | ||
567 | dev_priv->mtrr[2].handle = -1; | ||
568 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
569 | fb_rsrc = 0; | ||
570 | fb_base = drm_get_resource_start(dev, 0); | ||
571 | fb_size = SAVAGE_FB_SIZE_S3; | ||
572 | mmio_base = fb_base + SAVAGE_FB_SIZE_S3; | ||
573 | aper_rsrc = 0; | ||
574 | aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; | ||
575 | /* this should always be true */ | ||
576 | if (drm_get_resource_len(dev, 0) == 0x08000000) { | ||
577 | /* Don't make MMIO write-cobining! We need 3 | ||
578 | * MTRRs. */ | ||
579 | dev_priv->mtrr[0].base = fb_base; | ||
580 | dev_priv->mtrr[0].size = 0x01000000; | ||
581 | dev_priv->mtrr[0].handle = mtrr_add( | ||
582 | dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, | ||
583 | MTRR_TYPE_WRCOMB, 1); | ||
584 | dev_priv->mtrr[1].base = fb_base+0x02000000; | ||
585 | dev_priv->mtrr[1].size = 0x02000000; | ||
586 | dev_priv->mtrr[1].handle = mtrr_add( | ||
587 | dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, | ||
588 | MTRR_TYPE_WRCOMB, 1); | ||
589 | dev_priv->mtrr[2].base = fb_base+0x04000000; | ||
590 | dev_priv->mtrr[2].size = 0x04000000; | ||
591 | dev_priv->mtrr[2].handle = mtrr_add( | ||
592 | dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, | ||
593 | MTRR_TYPE_WRCOMB, 1); | ||
594 | } else { | ||
595 | DRM_ERROR("strange pci_resource_len %08lx\n", | ||
596 | drm_get_resource_len(dev, 0)); | ||
597 | } | ||
598 | } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) { | ||
599 | mmio_base = drm_get_resource_start(dev, 0); | ||
600 | fb_rsrc = 1; | ||
601 | fb_base = drm_get_resource_start(dev, 1); | ||
602 | fb_size = SAVAGE_FB_SIZE_S4; | ||
603 | aper_rsrc = 1; | ||
604 | aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; | ||
605 | /* this should always be true */ | ||
606 | if (drm_get_resource_len(dev, 1) == 0x08000000) { | ||
607 | /* Can use one MTRR to cover both fb and | ||
608 | * aperture. */ | ||
609 | dev_priv->mtrr[0].base = fb_base; | ||
610 | dev_priv->mtrr[0].size = 0x08000000; | ||
611 | dev_priv->mtrr[0].handle = mtrr_add( | ||
612 | dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, | ||
613 | MTRR_TYPE_WRCOMB, 1); | ||
614 | } else { | ||
615 | DRM_ERROR("strange pci_resource_len %08lx\n", | ||
616 | drm_get_resource_len(dev, 1)); | ||
617 | } | ||
618 | } else { | ||
619 | mmio_base = drm_get_resource_start(dev, 0); | ||
620 | fb_rsrc = 1; | ||
621 | fb_base = drm_get_resource_start(dev, 1); | ||
622 | fb_size = drm_get_resource_len(dev, 1); | ||
623 | aper_rsrc = 2; | ||
624 | aperture_base = drm_get_resource_start(dev, 2); | ||
625 | /* Automatic MTRR setup will do the right thing. */ | ||
626 | } | ||
627 | |||
628 | ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, | ||
629 | _DRM_READ_ONLY, &dev_priv->mmio); | ||
630 | if (ret) | ||
631 | return ret; | ||
632 | |||
633 | ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, | ||
634 | _DRM_WRITE_COMBINING, &dev_priv->fb); | ||
635 | if (ret) | ||
636 | return ret; | ||
637 | |||
638 | ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, | ||
639 | _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, | ||
640 | &dev_priv->aperture); | ||
641 | if (ret) | ||
642 | return ret; | ||
643 | |||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | * Delete MTRRs and free device-private data. | ||
649 | */ | ||
650 | int savage_postcleanup(drm_device_t *dev) | ||
651 | { | ||
652 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
653 | int i; | ||
654 | |||
655 | for (i = 0; i < 3; ++i) | ||
656 | if (dev_priv->mtrr[i].handle >= 0) | ||
657 | mtrr_del(dev_priv->mtrr[i].handle, | ||
658 | dev_priv->mtrr[i].base, | ||
659 | dev_priv->mtrr[i].size); | ||
660 | |||
661 | drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER); | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) | ||
667 | { | ||
668 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
669 | |||
670 | if (init->fb_bpp != 16 && init->fb_bpp != 32) { | ||
671 | DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); | ||
672 | return DRM_ERR(EINVAL); | ||
673 | } | ||
674 | if (init->depth_bpp != 16 && init->depth_bpp != 32) { | ||
675 | DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); | ||
676 | return DRM_ERR(EINVAL); | ||
677 | } | ||
678 | if (init->dma_type != SAVAGE_DMA_AGP && | ||
679 | init->dma_type != SAVAGE_DMA_PCI) { | ||
680 | DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); | ||
681 | return DRM_ERR(EINVAL); | ||
682 | } | ||
683 | |||
684 | dev_priv->cob_size = init->cob_size; | ||
685 | dev_priv->bci_threshold_lo = init->bci_threshold_lo; | ||
686 | dev_priv->bci_threshold_hi = init->bci_threshold_hi; | ||
687 | dev_priv->dma_type = init->dma_type; | ||
688 | |||
689 | dev_priv->fb_bpp = init->fb_bpp; | ||
690 | dev_priv->front_offset = init->front_offset; | ||
691 | dev_priv->front_pitch = init->front_pitch; | ||
692 | dev_priv->back_offset = init->back_offset; | ||
693 | dev_priv->back_pitch = init->back_pitch; | ||
694 | dev_priv->depth_bpp = init->depth_bpp; | ||
695 | dev_priv->depth_offset = init->depth_offset; | ||
696 | dev_priv->depth_pitch = init->depth_pitch; | ||
697 | |||
698 | dev_priv->texture_offset = init->texture_offset; | ||
699 | dev_priv->texture_size = init->texture_size; | ||
700 | |||
701 | DRM_GETSAREA(); | ||
702 | if (!dev_priv->sarea) { | ||
703 | DRM_ERROR("could not find sarea!\n"); | ||
704 | savage_do_cleanup_bci(dev); | ||
705 | return DRM_ERR(EINVAL); | ||
706 | } | ||
707 | if (init->status_offset != 0) { | ||
708 | dev_priv->status = drm_core_findmap(dev, init->status_offset); | ||
709 | if (!dev_priv->status) { | ||
710 | DRM_ERROR("could not find shadow status region!\n"); | ||
711 | savage_do_cleanup_bci(dev); | ||
712 | return DRM_ERR(EINVAL); | ||
713 | } | ||
714 | } else { | ||
715 | dev_priv->status = NULL; | ||
716 | } | ||
717 | if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { | ||
718 | dev->agp_buffer_map = drm_core_findmap(dev, | ||
719 | init->buffers_offset); | ||
720 | if (!dev->agp_buffer_map) { | ||
721 | DRM_ERROR("could not find DMA buffer region!\n"); | ||
722 | savage_do_cleanup_bci(dev); | ||
723 | return DRM_ERR(EINVAL); | ||
724 | } | ||
725 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
726 | if (!dev->agp_buffer_map) { | ||
727 | DRM_ERROR("failed to ioremap DMA buffer region!\n"); | ||
728 | savage_do_cleanup_bci(dev); | ||
729 | return DRM_ERR(ENOMEM); | ||
730 | } | ||
731 | } | ||
732 | if (init->agp_textures_offset) { | ||
733 | dev_priv->agp_textures = | ||
734 | drm_core_findmap(dev, init->agp_textures_offset); | ||
735 | if (!dev_priv->agp_textures) { | ||
736 | DRM_ERROR("could not find agp texture region!\n"); | ||
737 | savage_do_cleanup_bci(dev); | ||
738 | return DRM_ERR(EINVAL); | ||
739 | } | ||
740 | } else { | ||
741 | dev_priv->agp_textures = NULL; | ||
742 | } | ||
743 | |||
744 | if (init->cmd_dma_offset) { | ||
745 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
746 | DRM_ERROR("command DMA not supported on " | ||
747 | "Savage3D/MX/IX.\n"); | ||
748 | savage_do_cleanup_bci(dev); | ||
749 | return DRM_ERR(EINVAL); | ||
750 | } | ||
751 | if (dev->dma && dev->dma->buflist) { | ||
752 | DRM_ERROR("command and vertex DMA not supported " | ||
753 | "at the same time.\n"); | ||
754 | savage_do_cleanup_bci(dev); | ||
755 | return DRM_ERR(EINVAL); | ||
756 | } | ||
757 | dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); | ||
758 | if (!dev_priv->cmd_dma) { | ||
759 | DRM_ERROR("could not find command DMA region!\n"); | ||
760 | savage_do_cleanup_bci(dev); | ||
761 | return DRM_ERR(EINVAL); | ||
762 | } | ||
763 | if (dev_priv->dma_type == SAVAGE_DMA_AGP) { | ||
764 | if (dev_priv->cmd_dma->type != _DRM_AGP) { | ||
765 | DRM_ERROR("AGP command DMA region is not a " | ||
766 | "_DRM_AGP map!\n"); | ||
767 | savage_do_cleanup_bci(dev); | ||
768 | return DRM_ERR(EINVAL); | ||
769 | } | ||
770 | drm_core_ioremap(dev_priv->cmd_dma, dev); | ||
771 | if (!dev_priv->cmd_dma->handle) { | ||
772 | DRM_ERROR("failed to ioremap command " | ||
773 | "DMA region!\n"); | ||
774 | savage_do_cleanup_bci(dev); | ||
775 | return DRM_ERR(ENOMEM); | ||
776 | } | ||
777 | } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { | ||
778 | DRM_ERROR("PCI command DMA region is not a " | ||
779 | "_DRM_CONSISTENT map!\n"); | ||
780 | savage_do_cleanup_bci(dev); | ||
781 | return DRM_ERR(EINVAL); | ||
782 | } | ||
783 | } else { | ||
784 | dev_priv->cmd_dma = NULL; | ||
785 | } | ||
786 | |||
787 | dev_priv->dma_flush = savage_dma_flush; | ||
788 | if (!dev_priv->cmd_dma) { | ||
789 | DRM_DEBUG("falling back to faked command DMA.\n"); | ||
790 | dev_priv->fake_dma.offset = 0; | ||
791 | dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE; | ||
792 | dev_priv->fake_dma.type = _DRM_SHM; | ||
793 | dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE, | ||
794 | DRM_MEM_DRIVER); | ||
795 | if (!dev_priv->fake_dma.handle) { | ||
796 | DRM_ERROR("could not allocate faked DMA buffer!\n"); | ||
797 | savage_do_cleanup_bci(dev); | ||
798 | return DRM_ERR(ENOMEM); | ||
799 | } | ||
800 | dev_priv->cmd_dma = &dev_priv->fake_dma; | ||
801 | dev_priv->dma_flush = savage_fake_dma_flush; | ||
802 | } | ||
803 | |||
804 | dev_priv->sarea_priv = | ||
805 | (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + | ||
806 | init->sarea_priv_offset); | ||
807 | |||
808 | /* setup bitmap descriptors */ | ||
809 | { | ||
810 | unsigned int color_tile_format; | ||
811 | unsigned int depth_tile_format; | ||
812 | unsigned int front_stride, back_stride, depth_stride; | ||
813 | if (dev_priv->chipset <= S3_SAVAGE4) { | ||
814 | color_tile_format = dev_priv->fb_bpp == 16 ? | ||
815 | SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; | ||
816 | depth_tile_format = dev_priv->depth_bpp == 16 ? | ||
817 | SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; | ||
818 | } else { | ||
819 | color_tile_format = SAVAGE_BD_TILE_DEST; | ||
820 | depth_tile_format = SAVAGE_BD_TILE_DEST; | ||
821 | } | ||
822 | front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8); | ||
823 | back_stride = dev_priv-> back_pitch / (dev_priv->fb_bpp/8); | ||
824 | depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8); | ||
825 | |||
826 | dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | | ||
827 | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
828 | (color_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
829 | |||
830 | dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE | | ||
831 | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
832 | (color_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
833 | |||
834 | dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | | ||
835 | (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | | ||
836 | (depth_tile_format << SAVAGE_BD_TILE_SHIFT); | ||
837 | } | ||
838 | |||
839 | /* setup status and bci ptr */ | ||
840 | dev_priv->event_counter = 0; | ||
841 | dev_priv->event_wrap = 0; | ||
842 | dev_priv->bci_ptr = (volatile uint32_t *) | ||
843 | ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); | ||
844 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
845 | dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; | ||
846 | } else { | ||
847 | dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4; | ||
848 | } | ||
849 | if (dev_priv->status != NULL) { | ||
850 | dev_priv->status_ptr = | ||
851 | (volatile uint32_t *)dev_priv->status->handle; | ||
852 | dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; | ||
853 | dev_priv->wait_evnt = savage_bci_wait_event_shadow; | ||
854 | dev_priv->status_ptr[1023] = dev_priv->event_counter; | ||
855 | } else { | ||
856 | dev_priv->status_ptr = NULL; | ||
857 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
858 | dev_priv->wait_fifo = savage_bci_wait_fifo_s3d; | ||
859 | } else { | ||
860 | dev_priv->wait_fifo = savage_bci_wait_fifo_s4; | ||
861 | } | ||
862 | dev_priv->wait_evnt = savage_bci_wait_event_reg; | ||
863 | } | ||
864 | |||
865 | /* cliprect functions */ | ||
866 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) | ||
867 | dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d; | ||
868 | else | ||
869 | dev_priv->emit_clip_rect = savage_emit_clip_rect_s4; | ||
870 | |||
871 | if (savage_freelist_init(dev) < 0) { | ||
872 | DRM_ERROR("could not initialize freelist\n"); | ||
873 | savage_do_cleanup_bci(dev); | ||
874 | return DRM_ERR(ENOMEM); | ||
875 | } | ||
876 | |||
877 | if (savage_dma_init(dev_priv) < 0) { | ||
878 | DRM_ERROR("could not initialize command DMA\n"); | ||
879 | savage_do_cleanup_bci(dev); | ||
880 | return DRM_ERR(ENOMEM); | ||
881 | } | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | int savage_do_cleanup_bci(drm_device_t *dev) | ||
887 | { | ||
888 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
889 | |||
890 | if (dev_priv->cmd_dma == &dev_priv->fake_dma) { | ||
891 | if (dev_priv->fake_dma.handle) | ||
892 | drm_free(dev_priv->fake_dma.handle, | ||
893 | SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER); | ||
894 | } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && | ||
895 | dev_priv->cmd_dma->type == _DRM_AGP && | ||
896 | dev_priv->dma_type == SAVAGE_DMA_AGP) | ||
897 | drm_core_ioremapfree(dev_priv->cmd_dma, dev); | ||
898 | |||
899 | if (dev_priv->dma_type == SAVAGE_DMA_AGP && | ||
900 | dev->agp_buffer_map && dev->agp_buffer_map->handle) { | ||
901 | drm_core_ioremapfree(dev->agp_buffer_map, dev); | ||
902 | /* make sure the next instance (which may be running | ||
903 | * in PCI mode) doesn't try to use an old | ||
904 | * agp_buffer_map. */ | ||
905 | dev->agp_buffer_map = NULL; | ||
906 | } | ||
907 | |||
908 | if (dev_priv->dma_pages) | ||
909 | drm_free(dev_priv->dma_pages, | ||
910 | sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, | ||
911 | DRM_MEM_DRIVER); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static int savage_bci_init(DRM_IOCTL_ARGS) | ||
917 | { | ||
918 | DRM_DEVICE; | ||
919 | drm_savage_init_t init; | ||
920 | |||
921 | LOCK_TEST_WITH_RETURN(dev, filp); | ||
922 | |||
923 | DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data, | ||
924 | sizeof(init)); | ||
925 | |||
926 | switch (init.func) { | ||
927 | case SAVAGE_INIT_BCI: | ||
928 | return savage_do_init_bci(dev, &init); | ||
929 | case SAVAGE_CLEANUP_BCI: | ||
930 | return savage_do_cleanup_bci(dev); | ||
931 | } | ||
932 | |||
933 | return DRM_ERR(EINVAL); | ||
934 | } | ||
935 | |||
936 | static int savage_bci_event_emit(DRM_IOCTL_ARGS) | ||
937 | { | ||
938 | DRM_DEVICE; | ||
939 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
940 | drm_savage_event_emit_t event; | ||
941 | |||
942 | DRM_DEBUG("\n"); | ||
943 | |||
944 | LOCK_TEST_WITH_RETURN(dev, filp); | ||
945 | |||
946 | DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data, | ||
947 | sizeof(event)); | ||
948 | |||
949 | event.count = savage_bci_emit_event(dev_priv, event.flags); | ||
950 | event.count |= dev_priv->event_wrap << 16; | ||
951 | DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count, | ||
952 | event.count, sizeof(event.count)); | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | static int savage_bci_event_wait(DRM_IOCTL_ARGS) | ||
957 | { | ||
958 | DRM_DEVICE; | ||
959 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
960 | drm_savage_event_wait_t event; | ||
961 | unsigned int event_e, hw_e; | ||
962 | unsigned int event_w, hw_w; | ||
963 | |||
964 | DRM_DEBUG("\n"); | ||
965 | |||
966 | DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data, | ||
967 | sizeof(event)); | ||
968 | |||
969 | UPDATE_EVENT_COUNTER(); | ||
970 | if (dev_priv->status_ptr) | ||
971 | hw_e = dev_priv->status_ptr[1] & 0xffff; | ||
972 | else | ||
973 | hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; | ||
974 | hw_w = dev_priv->event_wrap; | ||
975 | if (hw_e > dev_priv->event_counter) | ||
976 | hw_w--; /* hardware hasn't passed the last wrap yet */ | ||
977 | |||
978 | event_e = event.count & 0xffff; | ||
979 | event_w = event.count >> 16; | ||
980 | |||
981 | /* Don't need to wait if | ||
982 | * - event counter wrapped since the event was emitted or | ||
983 | * - the hardware has advanced up to or over the event to wait for. | ||
984 | */ | ||
985 | if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) ) | ||
986 | return 0; | ||
987 | else | ||
988 | return dev_priv->wait_evnt(dev_priv, event_e); | ||
989 | } | ||
990 | |||
991 | /* | ||
992 | * DMA buffer management | ||
993 | */ | ||
994 | |||
995 | static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d) | ||
996 | { | ||
997 | drm_buf_t *buf; | ||
998 | int i; | ||
999 | |||
1000 | for (i = d->granted_count; i < d->request_count; i++) { | ||
1001 | buf = savage_freelist_get(dev); | ||
1002 | if (!buf) | ||
1003 | return DRM_ERR(EAGAIN); | ||
1004 | |||
1005 | buf->filp = filp; | ||
1006 | |||
1007 | if (DRM_COPY_TO_USER(&d->request_indices[i], | ||
1008 | &buf->idx, sizeof(buf->idx))) | ||
1009 | return DRM_ERR(EFAULT); | ||
1010 | if (DRM_COPY_TO_USER(&d->request_sizes[i], | ||
1011 | &buf->total, sizeof(buf->total))) | ||
1012 | return DRM_ERR(EFAULT); | ||
1013 | |||
1014 | d->granted_count++; | ||
1015 | } | ||
1016 | return 0; | ||
1017 | } | ||
1018 | |||
1019 | int savage_bci_buffers(DRM_IOCTL_ARGS) | ||
1020 | { | ||
1021 | DRM_DEVICE; | ||
1022 | drm_device_dma_t *dma = dev->dma; | ||
1023 | drm_dma_t d; | ||
1024 | int ret = 0; | ||
1025 | |||
1026 | LOCK_TEST_WITH_RETURN(dev, filp); | ||
1027 | |||
1028 | DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d)); | ||
1029 | |||
1030 | /* Please don't send us buffers. | ||
1031 | */ | ||
1032 | if (d.send_count != 0) { | ||
1033 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | ||
1034 | DRM_CURRENTPID, d.send_count); | ||
1035 | return DRM_ERR(EINVAL); | ||
1036 | } | ||
1037 | |||
1038 | /* We'll send you buffers. | ||
1039 | */ | ||
1040 | if (d.request_count < 0 || d.request_count > dma->buf_count) { | ||
1041 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
1042 | DRM_CURRENTPID, d.request_count, dma->buf_count); | ||
1043 | return DRM_ERR(EINVAL); | ||
1044 | } | ||
1045 | |||
1046 | d.granted_count = 0; | ||
1047 | |||
1048 | if (d.request_count) { | ||
1049 | ret = savage_bci_get_buffers(filp, dev, &d); | ||
1050 | } | ||
1051 | |||
1052 | DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d)); | ||
1053 | |||
1054 | return ret; | ||
1055 | } | ||
1056 | |||
1057 | void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) { | ||
1058 | drm_device_dma_t *dma = dev->dma; | ||
1059 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
1060 | int i; | ||
1061 | |||
1062 | if (!dma) | ||
1063 | return; | ||
1064 | if (!dev_priv) | ||
1065 | return; | ||
1066 | if (!dma->buflist) | ||
1067 | return; | ||
1068 | |||
1069 | /*i830_flush_queue(dev);*/ | ||
1070 | |||
1071 | for (i = 0; i < dma->buf_count; i++) { | ||
1072 | drm_buf_t *buf = dma->buflist[i]; | ||
1073 | drm_savage_buf_priv_t *buf_priv = buf->dev_private; | ||
1074 | |||
1075 | if (buf->filp == filp && buf_priv && | ||
1076 | buf_priv->next == NULL && buf_priv->prev == NULL) { | ||
1077 | uint16_t event; | ||
1078 | DRM_DEBUG("reclaimed from client\n"); | ||
1079 | event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); | ||
1080 | SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); | ||
1081 | savage_freelist_put(dev, buf); | ||
1082 | } | ||
1083 | } | ||
1084 | |||
1085 | drm_core_reclaim_buffers(dev, filp); | ||
1086 | } | ||
1087 | |||
1088 | |||
1089 | drm_ioctl_desc_t savage_ioctls[] = { | ||
1090 | [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1}, | ||
1091 | [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0}, | ||
1092 | [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0}, | ||
1093 | [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0}, | ||
1094 | }; | ||
1095 | |||
1096 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); | ||
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h new file mode 100644 index 000000000000..6526c9aa7589 --- /dev/null +++ b/drivers/char/drm/savage_drm.h | |||
@@ -0,0 +1,209 @@ | |||
1 | /* savage_drm.h -- Public header for the savage driver | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef __SAVAGE_DRM_H__ | ||
27 | #define __SAVAGE_DRM_H__ | ||
28 | |||
29 | #ifndef __SAVAGE_SAREA_DEFINES__ | ||
30 | #define __SAVAGE_SAREA_DEFINES__ | ||
31 | |||
32 | /* 2 heaps (1 for card, 1 for agp), each divided into upto 128 | ||
33 | * regions, subject to a minimum region size of (1<<16) == 64k. | ||
34 | * | ||
35 | * Clients may subdivide regions internally, but when sharing between | ||
36 | * clients, the region size is the minimum granularity. | ||
37 | */ | ||
38 | |||
39 | #define SAVAGE_CARD_HEAP 0 | ||
40 | #define SAVAGE_AGP_HEAP 1 | ||
41 | #define SAVAGE_NR_TEX_HEAPS 2 | ||
42 | #define SAVAGE_NR_TEX_REGIONS 16 | ||
43 | #define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16 | ||
44 | |||
45 | #endif /* __SAVAGE_SAREA_DEFINES__ */ | ||
46 | |||
47 | typedef struct _drm_savage_sarea { | ||
48 | /* LRU lists for texture memory in agp space and on the card. | ||
49 | */ | ||
50 | drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1]; | ||
51 | unsigned int texAge[SAVAGE_NR_TEX_HEAPS]; | ||
52 | |||
53 | /* Mechanism to validate card state. | ||
54 | */ | ||
55 | int ctxOwner; | ||
56 | } drm_savage_sarea_t, *drm_savage_sarea_ptr; | ||
57 | |||
58 | /* Savage-specific ioctls | ||
59 | */ | ||
60 | #define DRM_SAVAGE_BCI_INIT 0x00 | ||
61 | #define DRM_SAVAGE_BCI_CMDBUF 0x01 | ||
62 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 | ||
63 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 | ||
64 | |||
65 | #define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) | ||
66 | #define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) | ||
67 | #define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) | ||
68 | #define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) | ||
69 | |||
70 | #define SAVAGE_DMA_PCI 1 | ||
71 | #define SAVAGE_DMA_AGP 3 | ||
72 | typedef struct drm_savage_init { | ||
73 | enum { | ||
74 | SAVAGE_INIT_BCI = 1, | ||
75 | SAVAGE_CLEANUP_BCI = 2 | ||
76 | } func; | ||
77 | unsigned int sarea_priv_offset; | ||
78 | |||
79 | /* some parameters */ | ||
80 | unsigned int cob_size; | ||
81 | unsigned int bci_threshold_lo, bci_threshold_hi; | ||
82 | unsigned int dma_type; | ||
83 | |||
84 | /* frame buffer layout */ | ||
85 | unsigned int fb_bpp; | ||
86 | unsigned int front_offset, front_pitch; | ||
87 | unsigned int back_offset, back_pitch; | ||
88 | unsigned int depth_bpp; | ||
89 | unsigned int depth_offset, depth_pitch; | ||
90 | |||
91 | /* local textures */ | ||
92 | unsigned int texture_offset; | ||
93 | unsigned int texture_size; | ||
94 | |||
95 | /* physical locations of non-permanent maps */ | ||
96 | unsigned long status_offset; | ||
97 | unsigned long buffers_offset; | ||
98 | unsigned long agp_textures_offset; | ||
99 | unsigned long cmd_dma_offset; | ||
100 | } drm_savage_init_t; | ||
101 | |||
102 | typedef union drm_savage_cmd_header drm_savage_cmd_header_t; | ||
103 | typedef struct drm_savage_cmdbuf { | ||
104 | /* command buffer in client's address space */ | ||
105 | drm_savage_cmd_header_t __user *cmd_addr; | ||
106 | unsigned int size; /* size of the command buffer in 64bit units */ | ||
107 | |||
108 | unsigned int dma_idx; /* DMA buffer index to use */ | ||
109 | int discard; /* discard DMA buffer when done */ | ||
110 | /* vertex buffer in client's address space */ | ||
111 | unsigned int __user *vb_addr; | ||
112 | unsigned int vb_size; /* size of client vertex buffer in bytes */ | ||
113 | unsigned int vb_stride; /* stride of vertices in 32bit words */ | ||
114 | /* boxes in client's address space */ | ||
115 | drm_clip_rect_t __user *box_addr; | ||
116 | unsigned int nbox; /* number of clipping boxes */ | ||
117 | } drm_savage_cmdbuf_t; | ||
118 | |||
119 | #define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */ | ||
120 | #define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */ | ||
121 | #define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */ | ||
122 | typedef struct drm_savage_event { | ||
123 | unsigned int count; | ||
124 | unsigned int flags; | ||
125 | } drm_savage_event_emit_t, drm_savage_event_wait_t; | ||
126 | |||
127 | /* Commands for the cmdbuf ioctl | ||
128 | */ | ||
129 | #define SAVAGE_CMD_STATE 0 /* a range of state registers */ | ||
130 | #define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */ | ||
131 | #define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */ | ||
132 | #define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */ | ||
133 | #define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */ | ||
134 | #define SAVAGE_CMD_CLEAR 5 /* clear buffers */ | ||
135 | #define SAVAGE_CMD_SWAP 6 /* swap buffers */ | ||
136 | |||
137 | /* Primitive types | ||
138 | */ | ||
139 | #define SAVAGE_PRIM_TRILIST 0 /* triangle list */ | ||
140 | #define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */ | ||
141 | #define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */ | ||
142 | #define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat | ||
143 | * shading on s3d */ | ||
144 | |||
145 | /* Skip flags (vertex format) | ||
146 | */ | ||
147 | #define SAVAGE_SKIP_Z 0x01 | ||
148 | #define SAVAGE_SKIP_W 0x02 | ||
149 | #define SAVAGE_SKIP_C0 0x04 | ||
150 | #define SAVAGE_SKIP_C1 0x08 | ||
151 | #define SAVAGE_SKIP_S0 0x10 | ||
152 | #define SAVAGE_SKIP_T0 0x20 | ||
153 | #define SAVAGE_SKIP_ST0 0x30 | ||
154 | #define SAVAGE_SKIP_S1 0x40 | ||
155 | #define SAVAGE_SKIP_T1 0x80 | ||
156 | #define SAVAGE_SKIP_ST1 0xc0 | ||
157 | #define SAVAGE_SKIP_ALL_S3D 0x3f | ||
158 | #define SAVAGE_SKIP_ALL_S4 0xff | ||
159 | |||
160 | /* Buffer names for clear command | ||
161 | */ | ||
162 | #define SAVAGE_FRONT 0x1 | ||
163 | #define SAVAGE_BACK 0x2 | ||
164 | #define SAVAGE_DEPTH 0x4 | ||
165 | |||
166 | /* 64-bit command header | ||
167 | */ | ||
168 | union drm_savage_cmd_header { | ||
169 | struct { | ||
170 | unsigned char cmd; /* command */ | ||
171 | unsigned char pad0; | ||
172 | unsigned short pad1; | ||
173 | unsigned short pad2; | ||
174 | unsigned short pad3; | ||
175 | } cmd; /* generic */ | ||
176 | struct { | ||
177 | unsigned char cmd; | ||
178 | unsigned char global; /* need idle engine? */ | ||
179 | unsigned short count; /* number of consecutive registers */ | ||
180 | unsigned short start; /* first register */ | ||
181 | unsigned short pad3; | ||
182 | } state; /* SAVAGE_CMD_STATE */ | ||
183 | struct { | ||
184 | unsigned char cmd; | ||
185 | unsigned char prim; /* primitive type */ | ||
186 | unsigned short skip; /* vertex format (skip flags) */ | ||
187 | unsigned short count; /* number of vertices */ | ||
188 | unsigned short start; /* first vertex in DMA/vertex buffer */ | ||
189 | } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */ | ||
190 | struct { | ||
191 | unsigned char cmd; | ||
192 | unsigned char prim; | ||
193 | unsigned short skip; | ||
194 | unsigned short count; /* number of indices that follow */ | ||
195 | unsigned short pad3; | ||
196 | } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */ | ||
197 | struct { | ||
198 | unsigned char cmd; | ||
199 | unsigned char pad0; | ||
200 | unsigned short pad1; | ||
201 | unsigned int flags; | ||
202 | } clear0; /* SAVAGE_CMD_CLEAR */ | ||
203 | struct { | ||
204 | unsigned int mask; | ||
205 | unsigned int value; | ||
206 | } clear1; /* SAVAGE_CMD_CLEAR data */ | ||
207 | }; | ||
208 | |||
209 | #endif | ||
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c new file mode 100644 index 000000000000..ac8d270427ca --- /dev/null +++ b/drivers/char/drm/savage_drv.c | |||
@@ -0,0 +1,112 @@ | |||
1 | /* savage_drv.c -- Savage driver for Linux | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include "drmP.h" | ||
28 | #include "savage_drm.h" | ||
29 | #include "savage_drv.h" | ||
30 | |||
31 | #include "drm_pciids.h" | ||
32 | |||
33 | static int postinit( struct drm_device *dev, unsigned long flags ) | ||
34 | { | ||
35 | DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n", | ||
36 | DRIVER_NAME, | ||
37 | DRIVER_MAJOR, | ||
38 | DRIVER_MINOR, | ||
39 | DRIVER_PATCHLEVEL, | ||
40 | DRIVER_DATE, | ||
41 | dev->primary.minor, | ||
42 | pci_pretty_name(dev->pdev) | ||
43 | ); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static int version( drm_version_t *version ) | ||
48 | { | ||
49 | int len; | ||
50 | |||
51 | version->version_major = DRIVER_MAJOR; | ||
52 | version->version_minor = DRIVER_MINOR; | ||
53 | version->version_patchlevel = DRIVER_PATCHLEVEL; | ||
54 | DRM_COPY( version->name, DRIVER_NAME ); | ||
55 | DRM_COPY( version->date, DRIVER_DATE ); | ||
56 | DRM_COPY( version->desc, DRIVER_DESC ); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static struct pci_device_id pciidlist[] = { | ||
61 | savage_PCI_IDS | ||
62 | }; | ||
63 | |||
64 | extern drm_ioctl_desc_t savage_ioctls[]; | ||
65 | extern int savage_max_ioctl; | ||
66 | |||
67 | static struct drm_driver driver = { | ||
68 | .driver_features = | ||
69 | DRIVER_USE_AGP | DRIVER_USE_MTRR | | ||
70 | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, | ||
71 | .dev_priv_size = sizeof(drm_savage_buf_priv_t), | ||
72 | .preinit = savage_preinit, | ||
73 | .postinit = postinit, | ||
74 | .postcleanup = savage_postcleanup, | ||
75 | .reclaim_buffers = savage_reclaim_buffers, | ||
76 | .get_map_ofs = drm_core_get_map_ofs, | ||
77 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
78 | .version = version, | ||
79 | .ioctls = savage_ioctls, | ||
80 | .dma_ioctl = savage_bci_buffers, | ||
81 | .fops = { | ||
82 | .owner = THIS_MODULE, | ||
83 | .open = drm_open, | ||
84 | .release = drm_release, | ||
85 | .ioctl = drm_ioctl, | ||
86 | .mmap = drm_mmap, | ||
87 | .poll = drm_poll, | ||
88 | .fasync = drm_fasync, | ||
89 | }, | ||
90 | .pci_driver = { | ||
91 | .name = DRIVER_NAME, | ||
92 | .id_table = pciidlist, | ||
93 | } | ||
94 | }; | ||
95 | |||
96 | static int __init savage_init(void) | ||
97 | { | ||
98 | driver.num_ioctls = savage_max_ioctl; | ||
99 | return drm_init(&driver); | ||
100 | } | ||
101 | |||
102 | static void __exit savage_exit(void) | ||
103 | { | ||
104 | drm_exit(&driver); | ||
105 | } | ||
106 | |||
107 | module_init(savage_init); | ||
108 | module_exit(savage_exit); | ||
109 | |||
110 | MODULE_AUTHOR( DRIVER_AUTHOR ); | ||
111 | MODULE_DESCRIPTION( DRIVER_DESC ); | ||
112 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h new file mode 100644 index 000000000000..a45434944658 --- /dev/null +++ b/drivers/char/drm/savage_drv.h | |||
@@ -0,0 +1,579 @@ | |||
1 | /* savage_drv.h -- Private header for the savage driver | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
26 | #ifndef __SAVAGE_DRV_H__ | ||
27 | #define __SAVAGE_DRV_H__ | ||
28 | |||
29 | #define DRIVER_AUTHOR "Felix Kuehling" | ||
30 | |||
31 | #define DRIVER_NAME "savage" | ||
32 | #define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]" | ||
33 | #define DRIVER_DATE "20050313" | ||
34 | |||
35 | #define DRIVER_MAJOR 2 | ||
36 | #define DRIVER_MINOR 4 | ||
37 | #define DRIVER_PATCHLEVEL 1 | ||
38 | /* Interface history: | ||
39 | * | ||
40 | * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy | ||
41 | * 2.0 The first real DRM | ||
42 | * 2.1 Scissors registers managed by the DRM, 3D operations clipped by | ||
43 | * cliprects of the cmdbuf ioctl | ||
44 | * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX | ||
45 | * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits | ||
46 | * wide and thus very long lived (unlikely to ever wrap). The size | ||
47 | * in the struct was 32 bits before, but only 16 bits were used | ||
48 | * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is | ||
49 | * actually used | ||
50 | */ | ||
51 | |||
52 | typedef struct drm_savage_age { | ||
53 | uint16_t event; | ||
54 | unsigned int wrap; | ||
55 | } drm_savage_age_t; | ||
56 | |||
57 | typedef struct drm_savage_buf_priv { | ||
58 | struct drm_savage_buf_priv *next; | ||
59 | struct drm_savage_buf_priv *prev; | ||
60 | drm_savage_age_t age; | ||
61 | drm_buf_t *buf; | ||
62 | } drm_savage_buf_priv_t; | ||
63 | |||
64 | typedef struct drm_savage_dma_page { | ||
65 | drm_savage_age_t age; | ||
66 | unsigned int used, flushed; | ||
67 | } drm_savage_dma_page_t; | ||
68 | #define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ | ||
69 | /* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command | ||
70 | * size of 16kbytes or 4k entries. Minimum requirement would be | ||
71 | * 10kbytes for 255 40-byte vertices in one drawing command. */ | ||
72 | #define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4) | ||
73 | |||
74 | /* interesting bits of hardware state that are saved in dev_priv */ | ||
75 | typedef union { | ||
76 | struct drm_savage_common_state { | ||
77 | uint32_t vbaddr; | ||
78 | } common; | ||
79 | struct { | ||
80 | unsigned char pad[sizeof(struct drm_savage_common_state)]; | ||
81 | uint32_t texctrl, texaddr; | ||
82 | uint32_t scstart, new_scstart; | ||
83 | uint32_t scend, new_scend; | ||
84 | } s3d; | ||
85 | struct { | ||
86 | unsigned char pad[sizeof(struct drm_savage_common_state)]; | ||
87 | uint32_t texdescr, texaddr0, texaddr1; | ||
88 | uint32_t drawctrl0, new_drawctrl0; | ||
89 | uint32_t drawctrl1, new_drawctrl1; | ||
90 | } s4; | ||
91 | } drm_savage_state_t; | ||
92 | |||
93 | /* these chip tags should match the ones in the 2D driver in savage_regs.h. */ | ||
94 | enum savage_family { | ||
95 | S3_UNKNOWN = 0, | ||
96 | S3_SAVAGE3D, | ||
97 | S3_SAVAGE_MX, | ||
98 | S3_SAVAGE4, | ||
99 | S3_PROSAVAGE, | ||
100 | S3_TWISTER, | ||
101 | S3_PROSAVAGEDDR, | ||
102 | S3_SUPERSAVAGE, | ||
103 | S3_SAVAGE2000, | ||
104 | S3_LAST | ||
105 | }; | ||
106 | |||
107 | #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) | ||
108 | |||
109 | #define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \ | ||
110 | || (chip==S3_PROSAVAGE) \ | ||
111 | || (chip==S3_TWISTER) \ | ||
112 | || (chip==S3_PROSAVAGEDDR)) | ||
113 | |||
114 | #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) | ||
115 | |||
116 | #define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) | ||
117 | |||
118 | #define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \ | ||
119 | ||(chip==S3_PROSAVAGEDDR)) | ||
120 | |||
121 | /* flags */ | ||
122 | #define SAVAGE_IS_AGP 1 | ||
123 | |||
124 | typedef struct drm_savage_private { | ||
125 | drm_savage_sarea_t *sarea_priv; | ||
126 | |||
127 | drm_savage_buf_priv_t head, tail; | ||
128 | |||
129 | /* who am I? */ | ||
130 | enum savage_family chipset; | ||
131 | |||
132 | unsigned int cob_size; | ||
133 | unsigned int bci_threshold_lo, bci_threshold_hi; | ||
134 | unsigned int dma_type; | ||
135 | |||
136 | /* frame buffer layout */ | ||
137 | unsigned int fb_bpp; | ||
138 | unsigned int front_offset, front_pitch; | ||
139 | unsigned int back_offset, back_pitch; | ||
140 | unsigned int depth_bpp; | ||
141 | unsigned int depth_offset, depth_pitch; | ||
142 | |||
143 | /* bitmap descriptors for swap and clear */ | ||
144 | unsigned int front_bd, back_bd, depth_bd; | ||
145 | |||
146 | /* local textures */ | ||
147 | unsigned int texture_offset; | ||
148 | unsigned int texture_size; | ||
149 | |||
150 | /* memory regions in physical memory */ | ||
151 | drm_local_map_t *sarea; | ||
152 | drm_local_map_t *mmio; | ||
153 | drm_local_map_t *fb; | ||
154 | drm_local_map_t *aperture; | ||
155 | drm_local_map_t *status; | ||
156 | drm_local_map_t *agp_textures; | ||
157 | drm_local_map_t *cmd_dma; | ||
158 | drm_local_map_t fake_dma; | ||
159 | |||
160 | struct { | ||
161 | int handle; | ||
162 | unsigned long base, size; | ||
163 | } mtrr[3]; | ||
164 | |||
165 | /* BCI and status-related stuff */ | ||
166 | volatile uint32_t *status_ptr, *bci_ptr; | ||
167 | uint32_t status_used_mask; | ||
168 | uint16_t event_counter; | ||
169 | unsigned int event_wrap; | ||
170 | |||
171 | /* Savage4 command DMA */ | ||
172 | drm_savage_dma_page_t *dma_pages; | ||
173 | unsigned int nr_dma_pages, first_dma_page, current_dma_page; | ||
174 | drm_savage_age_t last_dma_age; | ||
175 | |||
176 | /* saved hw state for global/local check on S3D */ | ||
177 | uint32_t hw_draw_ctrl, hw_zbuf_ctrl; | ||
178 | /* and for scissors (global, so don't emit if not changed) */ | ||
179 | uint32_t hw_scissors_start, hw_scissors_end; | ||
180 | |||
181 | drm_savage_state_t state; | ||
182 | |||
183 | /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */ | ||
184 | unsigned int waiting; | ||
185 | |||
186 | /* config/hardware-dependent function pointers */ | ||
187 | int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n); | ||
188 | int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e); | ||
189 | /* Err, there is a macro wait_event in include/linux/wait.h. | ||
190 | * Avoid unwanted macro expansion. */ | ||
191 | void (*emit_clip_rect)(struct drm_savage_private *dev_priv, | ||
192 | drm_clip_rect_t *pbox); | ||
193 | void (*dma_flush)(struct drm_savage_private *dev_priv); | ||
194 | } drm_savage_private_t; | ||
195 | |||
196 | /* ioctls */ | ||
197 | extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS); | ||
198 | extern int savage_bci_buffers(DRM_IOCTL_ARGS); | ||
199 | |||
200 | /* BCI functions */ | ||
201 | extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, | ||
202 | unsigned int flags); | ||
203 | extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf); | ||
204 | extern void savage_dma_reset(drm_savage_private_t *dev_priv); | ||
205 | extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); | ||
206 | extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, | ||
207 | unsigned int n); | ||
208 | extern int savage_preinit(drm_device_t *dev, unsigned long chipset); | ||
209 | extern int savage_postcleanup(drm_device_t *dev); | ||
210 | extern int savage_do_cleanup_bci(drm_device_t *dev); | ||
211 | extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); | ||
212 | |||
213 | /* state functions */ | ||
214 | extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, | ||
215 | drm_clip_rect_t *pbox); | ||
216 | extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, | ||
217 | drm_clip_rect_t *pbox); | ||
218 | |||
219 | #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ | ||
220 | #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ | ||
221 | #define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */ | ||
222 | #define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */ | ||
223 | #define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */ | ||
224 | |||
225 | #define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region | ||
226 | * inside the MMIO region */ | ||
227 | #define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip | ||
228 | * BCI FIFO */ | ||
229 | |||
230 | /* | ||
231 | * MMIO registers | ||
232 | */ | ||
233 | #define SAVAGE_STATUS_WORD0 0x48C00 | ||
234 | #define SAVAGE_STATUS_WORD1 0x48C04 | ||
235 | #define SAVAGE_ALT_STATUS_WORD0 0x48C60 | ||
236 | |||
237 | #define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff | ||
238 | #define SAVAGE_FIFO_USED_MASK_S4 0x001fffff | ||
239 | |||
240 | /* Copied from savage_bci.h in the 2D driver with some renaming. */ | ||
241 | |||
242 | /* Bitmap descriptors */ | ||
243 | #define SAVAGE_BD_STRIDE_SHIFT 0 | ||
244 | #define SAVAGE_BD_BPP_SHIFT 16 | ||
245 | #define SAVAGE_BD_TILE_SHIFT 24 | ||
246 | #define SAVAGE_BD_BW_DISABLE (1<<28) | ||
247 | /* common: */ | ||
248 | #define SAVAGE_BD_TILE_LINEAR 0 | ||
249 | /* savage4, MX, IX, 3D */ | ||
250 | #define SAVAGE_BD_TILE_16BPP 2 | ||
251 | #define SAVAGE_BD_TILE_32BPP 3 | ||
252 | /* twister, prosavage, DDR, supersavage, 2000 */ | ||
253 | #define SAVAGE_BD_TILE_DEST 1 | ||
254 | #define SAVAGE_BD_TILE_TEXTURE 2 | ||
255 | /* GBD - BCI enable */ | ||
256 | /* savage4, MX, IX, 3D */ | ||
257 | #define SAVAGE_GBD_BCI_ENABLE 8 | ||
258 | /* twister, prosavage, DDR, supersavage, 2000 */ | ||
259 | #define SAVAGE_GBD_BCI_ENABLE_TWISTER 0 | ||
260 | |||
261 | #define SAVAGE_GBD_BIG_ENDIAN 4 | ||
262 | #define SAVAGE_GBD_LITTLE_ENDIAN 0 | ||
263 | #define SAVAGE_GBD_64 1 | ||
264 | |||
265 | /* Global Bitmap Descriptor */ | ||
266 | #define SAVAGE_BCI_GLB_BD_LOW 0x8168 | ||
267 | #define SAVAGE_BCI_GLB_BD_HIGH 0x816C | ||
268 | |||
269 | /* | ||
270 | * BCI registers | ||
271 | */ | ||
272 | /* Savage4/Twister/ProSavage 3D registers */ | ||
273 | #define SAVAGE_DRAWLOCALCTRL_S4 0x1e | ||
274 | #define SAVAGE_TEXPALADDR_S4 0x1f | ||
275 | #define SAVAGE_TEXCTRL0_S4 0x20 | ||
276 | #define SAVAGE_TEXCTRL1_S4 0x21 | ||
277 | #define SAVAGE_TEXADDR0_S4 0x22 | ||
278 | #define SAVAGE_TEXADDR1_S4 0x23 | ||
279 | #define SAVAGE_TEXBLEND0_S4 0x24 | ||
280 | #define SAVAGE_TEXBLEND1_S4 0x25 | ||
281 | #define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ | ||
282 | #define SAVAGE_TEXDESCR_S4 0x27 | ||
283 | #define SAVAGE_FOGTABLE_S4 0x28 | ||
284 | #define SAVAGE_FOGCTRL_S4 0x30 | ||
285 | #define SAVAGE_STENCILCTRL_S4 0x31 | ||
286 | #define SAVAGE_ZBUFCTRL_S4 0x32 | ||
287 | #define SAVAGE_ZBUFOFF_S4 0x33 | ||
288 | #define SAVAGE_DESTCTRL_S4 0x34 | ||
289 | #define SAVAGE_DRAWCTRL0_S4 0x35 | ||
290 | #define SAVAGE_DRAWCTRL1_S4 0x36 | ||
291 | #define SAVAGE_ZWATERMARK_S4 0x37 | ||
292 | #define SAVAGE_DESTTEXRWWATERMARK_S4 0x38 | ||
293 | #define SAVAGE_TEXBLENDCOLOR_S4 0x39 | ||
294 | /* Savage3D/MX/IX 3D registers */ | ||
295 | #define SAVAGE_TEXPALADDR_S3D 0x18 | ||
296 | #define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ | ||
297 | #define SAVAGE_TEXADDR_S3D 0x1A | ||
298 | #define SAVAGE_TEXDESCR_S3D 0x1B | ||
299 | #define SAVAGE_TEXCTRL_S3D 0x1C | ||
300 | #define SAVAGE_FOGTABLE_S3D 0x20 | ||
301 | #define SAVAGE_FOGCTRL_S3D 0x30 | ||
302 | #define SAVAGE_DRAWCTRL_S3D 0x31 | ||
303 | #define SAVAGE_ZBUFCTRL_S3D 0x32 | ||
304 | #define SAVAGE_ZBUFOFF_S3D 0x33 | ||
305 | #define SAVAGE_DESTCTRL_S3D 0x34 | ||
306 | #define SAVAGE_SCSTART_S3D 0x35 | ||
307 | #define SAVAGE_SCEND_S3D 0x36 | ||
308 | #define SAVAGE_ZWATERMARK_S3D 0x37 | ||
309 | #define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38 | ||
310 | /* common stuff */ | ||
311 | #define SAVAGE_VERTBUFADDR 0x3e | ||
312 | #define SAVAGE_BITPLANEWTMASK 0xd7 | ||
313 | #define SAVAGE_DMABUFADDR 0x51 | ||
314 | |||
315 | /* texture enable bits (needed for tex addr checking) */ | ||
316 | #define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ | ||
317 | #define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ | ||
318 | #define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ | ||
319 | |||
320 | /* Global fields in Savage4/Twister/ProSavage 3D registers: | ||
321 | * | ||
322 | * All texture registers and DrawLocalCtrl are local. All other | ||
323 | * registers are global. */ | ||
324 | |||
325 | /* Global fields in Savage3D/MX/IX 3D registers: | ||
326 | * | ||
327 | * All texture registers are local. DrawCtrl and ZBufCtrl are | ||
328 | * partially local. All other registers are global. | ||
329 | * | ||
330 | * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal | ||
331 | * ZBufCtrl global fields: zCmpFunc, zBufEn | ||
332 | */ | ||
333 | #define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c | ||
334 | #define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027 | ||
335 | |||
336 | /* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d) | ||
337 | */ | ||
338 | #define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff | ||
339 | #define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff | ||
340 | |||
341 | /* | ||
342 | * BCI commands | ||
343 | */ | ||
344 | #define BCI_CMD_NOP 0x40000000 | ||
345 | #define BCI_CMD_RECT 0x48000000 | ||
346 | #define BCI_CMD_RECT_XP 0x01000000 | ||
347 | #define BCI_CMD_RECT_YP 0x02000000 | ||
348 | #define BCI_CMD_SCANLINE 0x50000000 | ||
349 | #define BCI_CMD_LINE 0x5C000000 | ||
350 | #define BCI_CMD_LINE_LAST_PIXEL 0x58000000 | ||
351 | #define BCI_CMD_BYTE_TEXT 0x63000000 | ||
352 | #define BCI_CMD_NT_BYTE_TEXT 0x67000000 | ||
353 | #define BCI_CMD_BIT_TEXT 0x6C000000 | ||
354 | #define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF) | ||
355 | #define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16)) | ||
356 | #define BCI_CMD_SEND_COLOR 0x00008000 | ||
357 | |||
358 | #define BCI_CMD_CLIP_NONE 0x00000000 | ||
359 | #define BCI_CMD_CLIP_CURRENT 0x00002000 | ||
360 | #define BCI_CMD_CLIP_LR 0x00004000 | ||
361 | #define BCI_CMD_CLIP_NEW 0x00006000 | ||
362 | |||
363 | #define BCI_CMD_DEST_GBD 0x00000000 | ||
364 | #define BCI_CMD_DEST_PBD 0x00000800 | ||
365 | #define BCI_CMD_DEST_PBD_NEW 0x00000C00 | ||
366 | #define BCI_CMD_DEST_SBD 0x00001000 | ||
367 | #define BCI_CMD_DEST_SBD_NEW 0x00001400 | ||
368 | |||
369 | #define BCI_CMD_SRC_TRANSPARENT 0x00000200 | ||
370 | #define BCI_CMD_SRC_SOLID 0x00000000 | ||
371 | #define BCI_CMD_SRC_GBD 0x00000020 | ||
372 | #define BCI_CMD_SRC_COLOR 0x00000040 | ||
373 | #define BCI_CMD_SRC_MONO 0x00000060 | ||
374 | #define BCI_CMD_SRC_PBD_COLOR 0x00000080 | ||
375 | #define BCI_CMD_SRC_PBD_MONO 0x000000A0 | ||
376 | #define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0 | ||
377 | #define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0 | ||
378 | #define BCI_CMD_SRC_SBD_COLOR 0x00000100 | ||
379 | #define BCI_CMD_SRC_SBD_MONO 0x00000120 | ||
380 | #define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140 | ||
381 | #define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160 | ||
382 | |||
383 | #define BCI_CMD_PAT_TRANSPARENT 0x00000010 | ||
384 | #define BCI_CMD_PAT_NONE 0x00000000 | ||
385 | #define BCI_CMD_PAT_COLOR 0x00000002 | ||
386 | #define BCI_CMD_PAT_MONO 0x00000003 | ||
387 | #define BCI_CMD_PAT_PBD_COLOR 0x00000004 | ||
388 | #define BCI_CMD_PAT_PBD_MONO 0x00000005 | ||
389 | #define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006 | ||
390 | #define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007 | ||
391 | #define BCI_CMD_PAT_SBD_COLOR 0x00000008 | ||
392 | #define BCI_CMD_PAT_SBD_MONO 0x00000009 | ||
393 | #define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A | ||
394 | #define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B | ||
395 | |||
396 | #define BCI_BD_BW_DISABLE 0x10000000 | ||
397 | #define BCI_BD_TILE_MASK 0x03000000 | ||
398 | #define BCI_BD_TILE_NONE 0x00000000 | ||
399 | #define BCI_BD_TILE_16 0x02000000 | ||
400 | #define BCI_BD_TILE_32 0x03000000 | ||
401 | #define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF) | ||
402 | #define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16)) | ||
403 | #define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF) | ||
404 | #define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF)) | ||
405 | |||
406 | #define BCI_CMD_SET_REGISTER 0x96000000 | ||
407 | |||
408 | #define BCI_CMD_WAIT 0xC0000000 | ||
409 | #define BCI_CMD_WAIT_3D 0x00010000 | ||
410 | #define BCI_CMD_WAIT_2D 0x00020000 | ||
411 | |||
412 | #define BCI_CMD_UPDATE_EVENT_TAG 0x98000000 | ||
413 | |||
414 | #define BCI_CMD_DRAW_PRIM 0x80000000 | ||
415 | #define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000 | ||
416 | #define BCI_CMD_DRAW_CONT 0x01000000 | ||
417 | #define BCI_CMD_DRAW_TRILIST 0x00000000 | ||
418 | #define BCI_CMD_DRAW_TRISTRIP 0x02000000 | ||
419 | #define BCI_CMD_DRAW_TRIFAN 0x04000000 | ||
420 | #define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff | ||
421 | #define BCI_CMD_DRAW_NO_Z 0x00000001 | ||
422 | #define BCI_CMD_DRAW_NO_W 0x00000002 | ||
423 | #define BCI_CMD_DRAW_NO_CD 0x00000004 | ||
424 | #define BCI_CMD_DRAW_NO_CS 0x00000008 | ||
425 | #define BCI_CMD_DRAW_NO_U0 0x00000010 | ||
426 | #define BCI_CMD_DRAW_NO_V0 0x00000020 | ||
427 | #define BCI_CMD_DRAW_NO_UV0 0x00000030 | ||
428 | #define BCI_CMD_DRAW_NO_U1 0x00000040 | ||
429 | #define BCI_CMD_DRAW_NO_V1 0x00000080 | ||
430 | #define BCI_CMD_DRAW_NO_UV1 0x000000c0 | ||
431 | |||
432 | #define BCI_CMD_DMA 0xa8000000 | ||
433 | |||
434 | #define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF) | ||
435 | #define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF) | ||
436 | #define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF) | ||
437 | #define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) | ||
438 | #define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) | ||
439 | #define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF) | ||
440 | |||
441 | #define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF)) | ||
442 | #define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF)) | ||
443 | #define BCI_LINE_MISC(maj, ym, xp, yp, err) \ | ||
444 | (((maj) & 0x1FFF) | \ | ||
445 | ((ym) ? 1<<13 : 0) | \ | ||
446 | ((xp) ? 1<<14 : 0) | \ | ||
447 | ((yp) ? 1<<15 : 0) | \ | ||
448 | ((err) << 16)) | ||
449 | |||
450 | /* | ||
451 | * common commands | ||
452 | */ | ||
453 | #define BCI_SET_REGISTERS( first, n ) \ | ||
454 | BCI_WRITE(BCI_CMD_SET_REGISTER | \ | ||
455 | ((uint32_t)(n) & 0xff) << 16 | \ | ||
456 | ((uint32_t)(first) & 0xffff)) | ||
457 | #define DMA_SET_REGISTERS( first, n ) \ | ||
458 | DMA_WRITE(BCI_CMD_SET_REGISTER | \ | ||
459 | ((uint32_t)(n) & 0xff) << 16 | \ | ||
460 | ((uint32_t)(first) & 0xffff)) | ||
461 | |||
462 | #define BCI_DRAW_PRIMITIVE(n, type, skip) \ | ||
463 | BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ | ||
464 | ((n) << 16)) | ||
465 | #define DMA_DRAW_PRIMITIVE(n, type, skip) \ | ||
466 | DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \ | ||
467 | ((n) << 16)) | ||
468 | |||
469 | #define BCI_DRAW_INDICES_S3D(n, type, i0) \ | ||
470 | BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ | ||
471 | ((n) << 16) | (i0)) | ||
472 | |||
473 | #define BCI_DRAW_INDICES_S4(n, type, skip) \ | ||
474 | BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \ | ||
475 | (skip) | ((n) << 16)) | ||
476 | |||
477 | #define BCI_DMA(n) \ | ||
478 | BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1)) | ||
479 | |||
480 | /* | ||
481 | * access to MMIO | ||
482 | */ | ||
483 | #define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) | ||
484 | #define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) ) | ||
485 | |||
486 | /* | ||
487 | * access to the burst command interface (BCI) | ||
488 | */ | ||
489 | #define SAVAGE_BCI_DEBUG 1 | ||
490 | |||
491 | #define BCI_LOCALS volatile uint32_t *bci_ptr; | ||
492 | |||
493 | #define BEGIN_BCI( n ) do { \ | ||
494 | dev_priv->wait_fifo(dev_priv, (n)); \ | ||
495 | bci_ptr = dev_priv->bci_ptr; \ | ||
496 | } while(0) | ||
497 | |||
498 | #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val) | ||
499 | |||
500 | #define BCI_COPY_FROM_USER(src,n) do { \ | ||
501 | unsigned int i; \ | ||
502 | for (i = 0; i < n; ++i) { \ | ||
503 | uint32_t val; \ | ||
504 | DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \ | ||
505 | BCI_WRITE(val); \ | ||
506 | } \ | ||
507 | } while(0) | ||
508 | |||
509 | /* | ||
510 | * command DMA support | ||
511 | */ | ||
512 | #define SAVAGE_DMA_DEBUG 1 | ||
513 | |||
514 | #define DMA_LOCALS uint32_t *dma_ptr; | ||
515 | |||
516 | #define BEGIN_DMA( n ) do { \ | ||
517 | unsigned int cur = dev_priv->current_dma_page; \ | ||
518 | unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \ | ||
519 | dev_priv->dma_pages[cur].used; \ | ||
520 | if ((n) > rest) { \ | ||
521 | dma_ptr = savage_dma_alloc(dev_priv, (n)); \ | ||
522 | } else { /* fast path for small allocations */ \ | ||
523 | dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \ | ||
524 | cur * SAVAGE_DMA_PAGE_SIZE + \ | ||
525 | dev_priv->dma_pages[cur].used; \ | ||
526 | if (dev_priv->dma_pages[cur].used == 0) \ | ||
527 | savage_dma_wait(dev_priv, cur); \ | ||
528 | dev_priv->dma_pages[cur].used += (n); \ | ||
529 | } \ | ||
530 | } while(0) | ||
531 | |||
532 | #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val) | ||
533 | |||
534 | #define DMA_COPY_FROM_USER(src,n) do { \ | ||
535 | DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \ | ||
536 | dma_ptr += n; \ | ||
537 | } while(0) | ||
538 | |||
539 | #if SAVAGE_DMA_DEBUG | ||
540 | #define DMA_COMMIT() do { \ | ||
541 | unsigned int cur = dev_priv->current_dma_page; \ | ||
542 | uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \ | ||
543 | cur * SAVAGE_DMA_PAGE_SIZE + \ | ||
544 | dev_priv->dma_pages[cur].used; \ | ||
545 | if (dma_ptr != expected) { \ | ||
546 | DRM_ERROR("DMA allocation and use don't match: " \ | ||
547 | "%p != %p\n", expected, dma_ptr); \ | ||
548 | savage_dma_reset(dev_priv); \ | ||
549 | } \ | ||
550 | } while(0) | ||
551 | #else | ||
552 | #define DMA_COMMIT() do {/* nothing */} while(0) | ||
553 | #endif | ||
554 | |||
555 | #define DMA_FLUSH() dev_priv->dma_flush(dev_priv) | ||
556 | |||
557 | /* Buffer aging via event tag | ||
558 | */ | ||
559 | |||
560 | #define UPDATE_EVENT_COUNTER( ) do { \ | ||
561 | if (dev_priv->status_ptr) { \ | ||
562 | uint16_t count; \ | ||
563 | /* coordinate with Xserver */ \ | ||
564 | count = dev_priv->status_ptr[1023]; \ | ||
565 | if (count < dev_priv->event_counter) \ | ||
566 | dev_priv->event_wrap++; \ | ||
567 | dev_priv->event_counter = count; \ | ||
568 | } \ | ||
569 | } while(0) | ||
570 | |||
571 | #define SET_AGE( age, e, w ) do { \ | ||
572 | (age)->event = e; \ | ||
573 | (age)->wrap = w; \ | ||
574 | } while(0) | ||
575 | |||
576 | #define TEST_AGE( age, e, w ) \ | ||
577 | ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) | ||
578 | |||
579 | #endif /* __SAVAGE_DRV_H__ */ | ||
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c new file mode 100644 index 000000000000..475695a00083 --- /dev/null +++ b/drivers/char/drm/savage_state.c | |||
@@ -0,0 +1,1146 @@ | |||
1 | /* savage_state.c -- State and drawing support for Savage | ||
2 | * | ||
3 | * Copyright 2004 Felix Kuehling | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the | ||
14 | * next paragraph) shall be included in all copies or substantial portions | ||
15 | * of the Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
20 | * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR | ||
21 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||
22 | * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | #include "drmP.h" | ||
26 | #include "savage_drm.h" | ||
27 | #include "savage_drv.h" | ||
28 | |||
29 | void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, | ||
30 | drm_clip_rect_t *pbox) | ||
31 | { | ||
32 | uint32_t scstart = dev_priv->state.s3d.new_scstart; | ||
33 | uint32_t scend = dev_priv->state.s3d.new_scend; | ||
34 | scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | | ||
35 | ((uint32_t)pbox->x1 & 0x000007ff) | | ||
36 | (((uint32_t)pbox->y1 << 16) & 0x07ff0000); | ||
37 | scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | | ||
38 | (((uint32_t)pbox->x2-1) & 0x000007ff) | | ||
39 | ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000); | ||
40 | if (scstart != dev_priv->state.s3d.scstart || | ||
41 | scend != dev_priv->state.s3d.scend) { | ||
42 | DMA_LOCALS; | ||
43 | BEGIN_DMA(4); | ||
44 | DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); | ||
45 | DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); | ||
46 | DMA_WRITE(scstart); | ||
47 | DMA_WRITE(scend); | ||
48 | dev_priv->state.s3d.scstart = scstart; | ||
49 | dev_priv->state.s3d.scend = scend; | ||
50 | dev_priv->waiting = 1; | ||
51 | DMA_COMMIT(); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, | ||
56 | drm_clip_rect_t *pbox) | ||
57 | { | ||
58 | uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; | ||
59 | uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; | ||
60 | drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | | ||
61 | ((uint32_t)pbox->x1 & 0x000007ff) | | ||
62 | (((uint32_t)pbox->y1 << 12) & 0x00fff000); | ||
63 | drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | | ||
64 | (((uint32_t)pbox->x2-1) & 0x000007ff) | | ||
65 | ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000); | ||
66 | if (drawctrl0 != dev_priv->state.s4.drawctrl0 || | ||
67 | drawctrl1 != dev_priv->state.s4.drawctrl1) { | ||
68 | DMA_LOCALS; | ||
69 | BEGIN_DMA(4); | ||
70 | DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); | ||
71 | DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); | ||
72 | DMA_WRITE(drawctrl0); | ||
73 | DMA_WRITE(drawctrl1); | ||
74 | dev_priv->state.s4.drawctrl0 = drawctrl0; | ||
75 | dev_priv->state.s4.drawctrl1 = drawctrl1; | ||
76 | dev_priv->waiting = 1; | ||
77 | DMA_COMMIT(); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, | ||
82 | uint32_t addr) | ||
83 | { | ||
84 | if ((addr & 6) != 2) { /* reserved bits */ | ||
85 | DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); | ||
86 | return DRM_ERR(EINVAL); | ||
87 | } | ||
88 | if (!(addr & 1)) { /* local */ | ||
89 | addr &= ~7; | ||
90 | if (addr < dev_priv->texture_offset || | ||
91 | addr >= dev_priv->texture_offset+dev_priv->texture_size) { | ||
92 | DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n", | ||
93 | unit, addr); | ||
94 | return DRM_ERR(EINVAL); | ||
95 | } | ||
96 | } else { /* AGP */ | ||
97 | if (!dev_priv->agp_textures) { | ||
98 | DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", | ||
99 | unit, addr); | ||
100 | return DRM_ERR(EINVAL); | ||
101 | } | ||
102 | addr &= ~7; | ||
103 | if (addr < dev_priv->agp_textures->offset || | ||
104 | addr >= (dev_priv->agp_textures->offset + | ||
105 | dev_priv->agp_textures->size)) { | ||
106 | DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n", | ||
107 | unit, addr); | ||
108 | return DRM_ERR(EINVAL); | ||
109 | } | ||
110 | } | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | #define SAVE_STATE(reg,where) \ | ||
115 | if(start <= reg && start+count > reg) \ | ||
116 | DRM_GET_USER_UNCHECKED(dev_priv->state.where, ®s[reg-start]) | ||
117 | #define SAVE_STATE_MASK(reg,where,mask) do { \ | ||
118 | if(start <= reg && start+count > reg) { \ | ||
119 | uint32_t tmp; \ | ||
120 | DRM_GET_USER_UNCHECKED(tmp, ®s[reg-start]); \ | ||
121 | dev_priv->state.where = (tmp & (mask)) | \ | ||
122 | (dev_priv->state.where & ~(mask)); \ | ||
123 | } \ | ||
124 | } while (0) | ||
125 | static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, | ||
126 | unsigned int start, unsigned int count, | ||
127 | const uint32_t __user *regs) | ||
128 | { | ||
129 | if (start < SAVAGE_TEXPALADDR_S3D || | ||
130 | start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { | ||
131 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | ||
132 | start, start+count-1); | ||
133 | return DRM_ERR(EINVAL); | ||
134 | } | ||
135 | |||
136 | SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart, | ||
137 | ~SAVAGE_SCISSOR_MASK_S3D); | ||
138 | SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend, | ||
139 | ~SAVAGE_SCISSOR_MASK_S3D); | ||
140 | |||
141 | /* if any texture regs were changed ... */ | ||
142 | if (start <= SAVAGE_TEXCTRL_S3D && | ||
143 | start+count > SAVAGE_TEXPALADDR_S3D) { | ||
144 | /* ... check texture state */ | ||
145 | SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); | ||
146 | SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); | ||
147 | if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK) | ||
148 | return savage_verify_texaddr( | ||
149 | dev_priv, 0, dev_priv->state.s3d.texaddr); | ||
150 | } | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static int savage_verify_state_s4(drm_savage_private_t *dev_priv, | ||
156 | unsigned int start, unsigned int count, | ||
157 | const uint32_t __user *regs) | ||
158 | { | ||
159 | int ret = 0; | ||
160 | |||
161 | if (start < SAVAGE_DRAWLOCALCTRL_S4 || | ||
162 | start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { | ||
163 | DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", | ||
164 | start, start+count-1); | ||
165 | return DRM_ERR(EINVAL); | ||
166 | } | ||
167 | |||
168 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0, | ||
169 | ~SAVAGE_SCISSOR_MASK_S4); | ||
170 | SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1, | ||
171 | ~SAVAGE_SCISSOR_MASK_S4); | ||
172 | |||
173 | /* if any texture regs were changed ... */ | ||
174 | if (start <= SAVAGE_TEXDESCR_S4 && | ||
175 | start+count > SAVAGE_TEXPALADDR_S4) { | ||
176 | /* ... check texture state */ | ||
177 | SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr); | ||
178 | SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0); | ||
179 | SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1); | ||
180 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK) | ||
181 | ret |= savage_verify_texaddr( | ||
182 | dev_priv, 0, dev_priv->state.s4.texaddr0); | ||
183 | if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK) | ||
184 | ret |= savage_verify_texaddr( | ||
185 | dev_priv, 1, dev_priv->state.s4.texaddr1); | ||
186 | } | ||
187 | |||
188 | return ret; | ||
189 | } | ||
190 | #undef SAVE_STATE | ||
191 | #undef SAVE_STATE_MASK | ||
192 | |||
193 | static int savage_dispatch_state(drm_savage_private_t *dev_priv, | ||
194 | const drm_savage_cmd_header_t *cmd_header, | ||
195 | const uint32_t __user *regs) | ||
196 | { | ||
197 | unsigned int count = cmd_header->state.count; | ||
198 | unsigned int start = cmd_header->state.start; | ||
199 | unsigned int count2 = 0; | ||
200 | unsigned int bci_size; | ||
201 | int ret; | ||
202 | DMA_LOCALS; | ||
203 | |||
204 | if (!count) | ||
205 | return 0; | ||
206 | |||
207 | if (DRM_VERIFYAREA_READ(regs, count*4)) | ||
208 | return DRM_ERR(EFAULT); | ||
209 | |||
210 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
211 | ret = savage_verify_state_s3d(dev_priv, start, count, regs); | ||
212 | if (ret != 0) | ||
213 | return ret; | ||
214 | /* scissor regs are emitted in savage_dispatch_draw */ | ||
215 | if (start < SAVAGE_SCSTART_S3D) { | ||
216 | if (start+count > SAVAGE_SCEND_S3D+1) | ||
217 | count2 = count - (SAVAGE_SCEND_S3D+1 - start); | ||
218 | if (start+count > SAVAGE_SCSTART_S3D) | ||
219 | count = SAVAGE_SCSTART_S3D - start; | ||
220 | } else if (start <= SAVAGE_SCEND_S3D) { | ||
221 | if (start+count > SAVAGE_SCEND_S3D+1) { | ||
222 | count -= SAVAGE_SCEND_S3D+1 - start; | ||
223 | start = SAVAGE_SCEND_S3D+1; | ||
224 | } else | ||
225 | return 0; | ||
226 | } | ||
227 | } else { | ||
228 | ret = savage_verify_state_s4(dev_priv, start, count, regs); | ||
229 | if (ret != 0) | ||
230 | return ret; | ||
231 | /* scissor regs are emitted in savage_dispatch_draw */ | ||
232 | if (start < SAVAGE_DRAWCTRL0_S4) { | ||
233 | if (start+count > SAVAGE_DRAWCTRL1_S4+1) | ||
234 | count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start); | ||
235 | if (start+count > SAVAGE_DRAWCTRL0_S4) | ||
236 | count = SAVAGE_DRAWCTRL0_S4 - start; | ||
237 | } else if (start <= SAVAGE_DRAWCTRL1_S4) { | ||
238 | if (start+count > SAVAGE_DRAWCTRL1_S4+1) { | ||
239 | count -= SAVAGE_DRAWCTRL1_S4+1 - start; | ||
240 | start = SAVAGE_DRAWCTRL1_S4+1; | ||
241 | } else | ||
242 | return 0; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | bci_size = count + (count+254)/255 + count2 + (count2+254)/255; | ||
247 | |||
248 | if (cmd_header->state.global) { | ||
249 | BEGIN_DMA(bci_size+1); | ||
250 | DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); | ||
251 | dev_priv->waiting = 1; | ||
252 | } else { | ||
253 | BEGIN_DMA(bci_size); | ||
254 | } | ||
255 | |||
256 | do { | ||
257 | while (count > 0) { | ||
258 | unsigned int n = count < 255 ? count : 255; | ||
259 | DMA_SET_REGISTERS(start, n); | ||
260 | DMA_COPY_FROM_USER(regs, n); | ||
261 | count -= n; | ||
262 | start += n; | ||
263 | regs += n; | ||
264 | } | ||
265 | start += 2; | ||
266 | regs += 2; | ||
267 | count = count2; | ||
268 | count2 = 0; | ||
269 | } while (count); | ||
270 | |||
271 | DMA_COMMIT(); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, | ||
277 | const drm_savage_cmd_header_t *cmd_header, | ||
278 | const drm_buf_t *dmabuf) | ||
279 | { | ||
280 | unsigned char reorder = 0; | ||
281 | unsigned int prim = cmd_header->prim.prim; | ||
282 | unsigned int skip = cmd_header->prim.skip; | ||
283 | unsigned int n = cmd_header->prim.count; | ||
284 | unsigned int start = cmd_header->prim.start; | ||
285 | unsigned int i; | ||
286 | BCI_LOCALS; | ||
287 | |||
288 | if (!dmabuf) { | ||
289 | DRM_ERROR("called without dma buffers!\n"); | ||
290 | return DRM_ERR(EINVAL); | ||
291 | } | ||
292 | |||
293 | if (!n) | ||
294 | return 0; | ||
295 | |||
296 | switch (prim) { | ||
297 | case SAVAGE_PRIM_TRILIST_201: | ||
298 | reorder = 1; | ||
299 | prim = SAVAGE_PRIM_TRILIST; | ||
300 | case SAVAGE_PRIM_TRILIST: | ||
301 | if (n % 3 != 0) { | ||
302 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | ||
303 | n); | ||
304 | return DRM_ERR(EINVAL); | ||
305 | } | ||
306 | break; | ||
307 | case SAVAGE_PRIM_TRISTRIP: | ||
308 | case SAVAGE_PRIM_TRIFAN: | ||
309 | if (n < 3) { | ||
310 | DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n", | ||
311 | n); | ||
312 | return DRM_ERR(EINVAL); | ||
313 | } | ||
314 | break; | ||
315 | default: | ||
316 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
317 | return DRM_ERR(EINVAL); | ||
318 | } | ||
319 | |||
320 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
321 | if (skip != 0) { | ||
322 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", | ||
323 | skip); | ||
324 | return DRM_ERR(EINVAL); | ||
325 | } | ||
326 | } else { | ||
327 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | ||
328 | (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - | ||
329 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | ||
330 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | ||
331 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", | ||
332 | skip); | ||
333 | return DRM_ERR(EINVAL); | ||
334 | } | ||
335 | if (reorder) { | ||
336 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | ||
337 | return DRM_ERR(EINVAL); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | if (start + n > dmabuf->total/32) { | ||
342 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | ||
343 | start, start + n - 1, dmabuf->total/32); | ||
344 | return DRM_ERR(EINVAL); | ||
345 | } | ||
346 | |||
347 | /* Vertex DMA doesn't work with command DMA at the same time, | ||
348 | * so we use BCI_... to submit commands here. Flush buffered | ||
349 | * faked DMA first. */ | ||
350 | DMA_FLUSH(); | ||
351 | |||
352 | if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { | ||
353 | BEGIN_BCI(2); | ||
354 | BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); | ||
355 | BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); | ||
356 | dev_priv->state.common.vbaddr = dmabuf->bus_address; | ||
357 | } | ||
358 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { | ||
359 | /* Workaround for what looks like a hardware bug. If a | ||
360 | * WAIT_3D_IDLE was emitted some time before the | ||
361 | * indexed drawing command then the engine will lock | ||
362 | * up. There are two known workarounds: | ||
363 | * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ | ||
364 | BEGIN_BCI(63); | ||
365 | for (i = 0; i < 63; ++i) | ||
366 | BCI_WRITE(BCI_CMD_WAIT); | ||
367 | dev_priv->waiting = 0; | ||
368 | } | ||
369 | |||
370 | prim <<= 25; | ||
371 | while (n != 0) { | ||
372 | /* Can emit up to 255 indices (85 triangles) at once. */ | ||
373 | unsigned int count = n > 255 ? 255 : n; | ||
374 | if (reorder) { | ||
375 | /* Need to reorder indices for correct flat | ||
376 | * shading while preserving the clock sense | ||
377 | * for correct culling. Only on Savage3D. */ | ||
378 | int reorder[3] = {-1, -1, -1}; | ||
379 | reorder[start%3] = 2; | ||
380 | |||
381 | BEGIN_BCI((count+1+1)/2); | ||
382 | BCI_DRAW_INDICES_S3D(count, prim, start+2); | ||
383 | |||
384 | for (i = start+1; i+1 < start+count; i += 2) | ||
385 | BCI_WRITE((i + reorder[i % 3]) | | ||
386 | ((i+1 + reorder[(i+1) % 3]) << 16)); | ||
387 | if (i < start+count) | ||
388 | BCI_WRITE(i + reorder[i%3]); | ||
389 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
390 | BEGIN_BCI((count+1+1)/2); | ||
391 | BCI_DRAW_INDICES_S3D(count, prim, start); | ||
392 | |||
393 | for (i = start+1; i+1 < start+count; i += 2) | ||
394 | BCI_WRITE(i | ((i+1) << 16)); | ||
395 | if (i < start+count) | ||
396 | BCI_WRITE(i); | ||
397 | } else { | ||
398 | BEGIN_BCI((count+2+1)/2); | ||
399 | BCI_DRAW_INDICES_S4(count, prim, skip); | ||
400 | |||
401 | for (i = start; i+1 < start+count; i += 2) | ||
402 | BCI_WRITE(i | ((i+1) << 16)); | ||
403 | if (i < start+count) | ||
404 | BCI_WRITE(i); | ||
405 | } | ||
406 | |||
407 | start += count; | ||
408 | n -= count; | ||
409 | |||
410 | prim |= BCI_CMD_DRAW_CONT; | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, | ||
417 | const drm_savage_cmd_header_t *cmd_header, | ||
418 | const uint32_t __user *vtxbuf, | ||
419 | unsigned int vb_size, | ||
420 | unsigned int vb_stride) | ||
421 | { | ||
422 | unsigned char reorder = 0; | ||
423 | unsigned int prim = cmd_header->prim.prim; | ||
424 | unsigned int skip = cmd_header->prim.skip; | ||
425 | unsigned int n = cmd_header->prim.count; | ||
426 | unsigned int start = cmd_header->prim.start; | ||
427 | unsigned int vtx_size; | ||
428 | unsigned int i; | ||
429 | DMA_LOCALS; | ||
430 | |||
431 | if (!n) | ||
432 | return 0; | ||
433 | |||
434 | switch (prim) { | ||
435 | case SAVAGE_PRIM_TRILIST_201: | ||
436 | reorder = 1; | ||
437 | prim = SAVAGE_PRIM_TRILIST; | ||
438 | case SAVAGE_PRIM_TRILIST: | ||
439 | if (n % 3 != 0) { | ||
440 | DRM_ERROR("wrong number of vertices %u in TRILIST\n", | ||
441 | n); | ||
442 | return DRM_ERR(EINVAL); | ||
443 | } | ||
444 | break; | ||
445 | case SAVAGE_PRIM_TRISTRIP: | ||
446 | case SAVAGE_PRIM_TRIFAN: | ||
447 | if (n < 3) { | ||
448 | DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n", | ||
449 | n); | ||
450 | return DRM_ERR(EINVAL); | ||
451 | } | ||
452 | break; | ||
453 | default: | ||
454 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
455 | return DRM_ERR(EINVAL); | ||
456 | } | ||
457 | |||
458 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
459 | if (skip > SAVAGE_SKIP_ALL_S3D) { | ||
460 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
461 | return DRM_ERR(EINVAL); | ||
462 | } | ||
463 | vtx_size = 8; /* full vertex */ | ||
464 | } else { | ||
465 | if (skip > SAVAGE_SKIP_ALL_S4) { | ||
466 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
467 | return DRM_ERR(EINVAL); | ||
468 | } | ||
469 | vtx_size = 10; /* full vertex */ | ||
470 | } | ||
471 | |||
472 | vtx_size -= (skip & 1) + (skip >> 1 & 1) + | ||
473 | (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + | ||
474 | (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); | ||
475 | |||
476 | if (vtx_size > vb_stride) { | ||
477 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | ||
478 | vtx_size, vb_stride); | ||
479 | return DRM_ERR(EINVAL); | ||
480 | } | ||
481 | |||
482 | if (start + n > vb_size / (vb_stride*4)) { | ||
483 | DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", | ||
484 | start, start + n - 1, vb_size / (vb_stride*4)); | ||
485 | return DRM_ERR(EINVAL); | ||
486 | } | ||
487 | |||
488 | prim <<= 25; | ||
489 | while (n != 0) { | ||
490 | /* Can emit up to 255 vertices (85 triangles) at once. */ | ||
491 | unsigned int count = n > 255 ? 255 : n; | ||
492 | if (reorder) { | ||
493 | /* Need to reorder vertices for correct flat | ||
494 | * shading while preserving the clock sense | ||
495 | * for correct culling. Only on Savage3D. */ | ||
496 | int reorder[3] = {-1, -1, -1}; | ||
497 | reorder[start%3] = 2; | ||
498 | |||
499 | BEGIN_DMA(count*vtx_size+1); | ||
500 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
501 | |||
502 | for (i = start; i < start+count; ++i) { | ||
503 | unsigned int j = i + reorder[i % 3]; | ||
504 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], | ||
505 | vtx_size); | ||
506 | } | ||
507 | |||
508 | DMA_COMMIT(); | ||
509 | } else { | ||
510 | BEGIN_DMA(count*vtx_size+1); | ||
511 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
512 | |||
513 | if (vb_stride == vtx_size) { | ||
514 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start], | ||
515 | vtx_size*count); | ||
516 | } else { | ||
517 | for (i = start; i < start+count; ++i) { | ||
518 | DMA_COPY_FROM_USER( | ||
519 | &vtxbuf[vb_stride*i], | ||
520 | vtx_size); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | DMA_COMMIT(); | ||
525 | } | ||
526 | |||
527 | start += count; | ||
528 | n -= count; | ||
529 | |||
530 | prim |= BCI_CMD_DRAW_CONT; | ||
531 | } | ||
532 | |||
533 | return 0; | ||
534 | } | ||
535 | |||
536 | static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, | ||
537 | const drm_savage_cmd_header_t *cmd_header, | ||
538 | const uint16_t __user *usr_idx, | ||
539 | const drm_buf_t *dmabuf) | ||
540 | { | ||
541 | unsigned char reorder = 0; | ||
542 | unsigned int prim = cmd_header->idx.prim; | ||
543 | unsigned int skip = cmd_header->idx.skip; | ||
544 | unsigned int n = cmd_header->idx.count; | ||
545 | unsigned int i; | ||
546 | BCI_LOCALS; | ||
547 | |||
548 | if (!dmabuf) { | ||
549 | DRM_ERROR("called without dma buffers!\n"); | ||
550 | return DRM_ERR(EINVAL); | ||
551 | } | ||
552 | |||
553 | if (!n) | ||
554 | return 0; | ||
555 | |||
556 | switch (prim) { | ||
557 | case SAVAGE_PRIM_TRILIST_201: | ||
558 | reorder = 1; | ||
559 | prim = SAVAGE_PRIM_TRILIST; | ||
560 | case SAVAGE_PRIM_TRILIST: | ||
561 | if (n % 3 != 0) { | ||
562 | DRM_ERROR("wrong number of indices %u in TRILIST\n", | ||
563 | n); | ||
564 | return DRM_ERR(EINVAL); | ||
565 | } | ||
566 | break; | ||
567 | case SAVAGE_PRIM_TRISTRIP: | ||
568 | case SAVAGE_PRIM_TRIFAN: | ||
569 | if (n < 3) { | ||
570 | DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n", | ||
571 | n); | ||
572 | return DRM_ERR(EINVAL); | ||
573 | } | ||
574 | break; | ||
575 | default: | ||
576 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
577 | return DRM_ERR(EINVAL); | ||
578 | } | ||
579 | |||
580 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
581 | if (skip != 0) { | ||
582 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", | ||
583 | skip); | ||
584 | return DRM_ERR(EINVAL); | ||
585 | } | ||
586 | } else { | ||
587 | unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - | ||
588 | (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - | ||
589 | (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); | ||
590 | if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { | ||
591 | DRM_ERROR("invalid skip flags 0x%04x for DMA\n", | ||
592 | skip); | ||
593 | return DRM_ERR(EINVAL); | ||
594 | } | ||
595 | if (reorder) { | ||
596 | DRM_ERROR("TRILIST_201 used on Savage4 hardware\n"); | ||
597 | return DRM_ERR(EINVAL); | ||
598 | } | ||
599 | } | ||
600 | |||
601 | /* Vertex DMA doesn't work with command DMA at the same time, | ||
602 | * so we use BCI_... to submit commands here. Flush buffered | ||
603 | * faked DMA first. */ | ||
604 | DMA_FLUSH(); | ||
605 | |||
606 | if (dmabuf->bus_address != dev_priv->state.common.vbaddr) { | ||
607 | BEGIN_BCI(2); | ||
608 | BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1); | ||
609 | BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type); | ||
610 | dev_priv->state.common.vbaddr = dmabuf->bus_address; | ||
611 | } | ||
612 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) { | ||
613 | /* Workaround for what looks like a hardware bug. If a | ||
614 | * WAIT_3D_IDLE was emitted some time before the | ||
615 | * indexed drawing command then the engine will lock | ||
616 | * up. There are two known workarounds: | ||
617 | * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */ | ||
618 | BEGIN_BCI(63); | ||
619 | for (i = 0; i < 63; ++i) | ||
620 | BCI_WRITE(BCI_CMD_WAIT); | ||
621 | dev_priv->waiting = 0; | ||
622 | } | ||
623 | |||
624 | prim <<= 25; | ||
625 | while (n != 0) { | ||
626 | /* Can emit up to 255 indices (85 triangles) at once. */ | ||
627 | unsigned int count = n > 255 ? 255 : n; | ||
628 | /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ | ||
629 | uint16_t idx[255]; | ||
630 | |||
631 | /* Copy and check indices */ | ||
632 | DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2); | ||
633 | for (i = 0; i < count; ++i) { | ||
634 | if (idx[i] > dmabuf->total/32) { | ||
635 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | ||
636 | i, idx[i], dmabuf->total/32); | ||
637 | return DRM_ERR(EINVAL); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | if (reorder) { | ||
642 | /* Need to reorder indices for correct flat | ||
643 | * shading while preserving the clock sense | ||
644 | * for correct culling. Only on Savage3D. */ | ||
645 | int reorder[3] = {2, -1, -1}; | ||
646 | |||
647 | BEGIN_BCI((count+1+1)/2); | ||
648 | BCI_DRAW_INDICES_S3D(count, prim, idx[2]); | ||
649 | |||
650 | for (i = 1; i+1 < count; i += 2) | ||
651 | BCI_WRITE(idx[i + reorder[i % 3]] | | ||
652 | (idx[i+1 + reorder[(i+1) % 3]] << 16)); | ||
653 | if (i < count) | ||
654 | BCI_WRITE(idx[i + reorder[i%3]]); | ||
655 | } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
656 | BEGIN_BCI((count+1+1)/2); | ||
657 | BCI_DRAW_INDICES_S3D(count, prim, idx[0]); | ||
658 | |||
659 | for (i = 1; i+1 < count; i += 2) | ||
660 | BCI_WRITE(idx[i] | (idx[i+1] << 16)); | ||
661 | if (i < count) | ||
662 | BCI_WRITE(idx[i]); | ||
663 | } else { | ||
664 | BEGIN_BCI((count+2+1)/2); | ||
665 | BCI_DRAW_INDICES_S4(count, prim, skip); | ||
666 | |||
667 | for (i = 0; i+1 < count; i += 2) | ||
668 | BCI_WRITE(idx[i] | (idx[i+1] << 16)); | ||
669 | if (i < count) | ||
670 | BCI_WRITE(idx[i]); | ||
671 | } | ||
672 | |||
673 | usr_idx += count; | ||
674 | n -= count; | ||
675 | |||
676 | prim |= BCI_CMD_DRAW_CONT; | ||
677 | } | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, | ||
683 | const drm_savage_cmd_header_t *cmd_header, | ||
684 | const uint16_t __user *usr_idx, | ||
685 | const uint32_t __user *vtxbuf, | ||
686 | unsigned int vb_size, | ||
687 | unsigned int vb_stride) | ||
688 | { | ||
689 | unsigned char reorder = 0; | ||
690 | unsigned int prim = cmd_header->idx.prim; | ||
691 | unsigned int skip = cmd_header->idx.skip; | ||
692 | unsigned int n = cmd_header->idx.count; | ||
693 | unsigned int vtx_size; | ||
694 | unsigned int i; | ||
695 | DMA_LOCALS; | ||
696 | |||
697 | if (!n) | ||
698 | return 0; | ||
699 | |||
700 | switch (prim) { | ||
701 | case SAVAGE_PRIM_TRILIST_201: | ||
702 | reorder = 1; | ||
703 | prim = SAVAGE_PRIM_TRILIST; | ||
704 | case SAVAGE_PRIM_TRILIST: | ||
705 | if (n % 3 != 0) { | ||
706 | DRM_ERROR("wrong number of indices %u in TRILIST\n", | ||
707 | n); | ||
708 | return DRM_ERR(EINVAL); | ||
709 | } | ||
710 | break; | ||
711 | case SAVAGE_PRIM_TRISTRIP: | ||
712 | case SAVAGE_PRIM_TRIFAN: | ||
713 | if (n < 3) { | ||
714 | DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n", | ||
715 | n); | ||
716 | return DRM_ERR(EINVAL); | ||
717 | } | ||
718 | break; | ||
719 | default: | ||
720 | DRM_ERROR("invalid primitive type %u\n", prim); | ||
721 | return DRM_ERR(EINVAL); | ||
722 | } | ||
723 | |||
724 | if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { | ||
725 | if (skip > SAVAGE_SKIP_ALL_S3D) { | ||
726 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
727 | return DRM_ERR(EINVAL); | ||
728 | } | ||
729 | vtx_size = 8; /* full vertex */ | ||
730 | } else { | ||
731 | if (skip > SAVAGE_SKIP_ALL_S4) { | ||
732 | DRM_ERROR("invalid skip flags 0x%04x\n", skip); | ||
733 | return DRM_ERR(EINVAL); | ||
734 | } | ||
735 | vtx_size = 10; /* full vertex */ | ||
736 | } | ||
737 | |||
738 | vtx_size -= (skip & 1) + (skip >> 1 & 1) + | ||
739 | (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + | ||
740 | (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); | ||
741 | |||
742 | if (vtx_size > vb_stride) { | ||
743 | DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", | ||
744 | vtx_size, vb_stride); | ||
745 | return DRM_ERR(EINVAL); | ||
746 | } | ||
747 | |||
748 | prim <<= 25; | ||
749 | while (n != 0) { | ||
750 | /* Can emit up to 255 vertices (85 triangles) at once. */ | ||
751 | unsigned int count = n > 255 ? 255 : n; | ||
752 | /* Is it ok to allocate 510 bytes on the stack in an ioctl? */ | ||
753 | uint16_t idx[255]; | ||
754 | |||
755 | /* Copy and check indices */ | ||
756 | DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2); | ||
757 | for (i = 0; i < count; ++i) { | ||
758 | if (idx[i] > vb_size / (vb_stride*4)) { | ||
759 | DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", | ||
760 | i, idx[i], vb_size / (vb_stride*4)); | ||
761 | return DRM_ERR(EINVAL); | ||
762 | } | ||
763 | } | ||
764 | |||
765 | if (reorder) { | ||
766 | /* Need to reorder vertices for correct flat | ||
767 | * shading while preserving the clock sense | ||
768 | * for correct culling. Only on Savage3D. */ | ||
769 | int reorder[3] = {2, -1, -1}; | ||
770 | |||
771 | BEGIN_DMA(count*vtx_size+1); | ||
772 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
773 | |||
774 | for (i = 0; i < count; ++i) { | ||
775 | unsigned int j = idx[i + reorder[i % 3]]; | ||
776 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], | ||
777 | vtx_size); | ||
778 | } | ||
779 | |||
780 | DMA_COMMIT(); | ||
781 | } else { | ||
782 | BEGIN_DMA(count*vtx_size+1); | ||
783 | DMA_DRAW_PRIMITIVE(count, prim, skip); | ||
784 | |||
785 | for (i = 0; i < count; ++i) { | ||
786 | unsigned int j = idx[i]; | ||
787 | DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j], | ||
788 | vtx_size); | ||
789 | } | ||
790 | |||
791 | DMA_COMMIT(); | ||
792 | } | ||
793 | |||
794 | usr_idx += count; | ||
795 | n -= count; | ||
796 | |||
797 | prim |= BCI_CMD_DRAW_CONT; | ||
798 | } | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int savage_dispatch_clear(drm_savage_private_t *dev_priv, | ||
804 | const drm_savage_cmd_header_t *cmd_header, | ||
805 | const drm_savage_cmd_header_t __user *data, | ||
806 | unsigned int nbox, | ||
807 | const drm_clip_rect_t __user *usr_boxes) | ||
808 | { | ||
809 | unsigned int flags = cmd_header->clear0.flags, mask, value; | ||
810 | unsigned int clear_cmd; | ||
811 | unsigned int i, nbufs; | ||
812 | DMA_LOCALS; | ||
813 | |||
814 | if (nbox == 0) | ||
815 | return 0; | ||
816 | |||
817 | DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data) | ||
818 | ->clear1.mask); | ||
819 | DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data) | ||
820 | ->clear1.value); | ||
821 | |||
822 | clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | | ||
823 | BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; | ||
824 | BCI_CMD_SET_ROP(clear_cmd,0xCC); | ||
825 | |||
826 | nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + | ||
827 | ((flags & SAVAGE_BACK) ? 1 : 0) + | ||
828 | ((flags & SAVAGE_DEPTH) ? 1 : 0); | ||
829 | if (nbufs == 0) | ||
830 | return 0; | ||
831 | |||
832 | if (mask != 0xffffffff) { | ||
833 | /* set mask */ | ||
834 | BEGIN_DMA(2); | ||
835 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | ||
836 | DMA_WRITE(mask); | ||
837 | DMA_COMMIT(); | ||
838 | } | ||
839 | for (i = 0; i < nbox; ++i) { | ||
840 | drm_clip_rect_t box; | ||
841 | unsigned int x, y, w, h; | ||
842 | unsigned int buf; | ||
843 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | ||
844 | x = box.x1, y = box.y1; | ||
845 | w = box.x2 - box.x1; | ||
846 | h = box.y2 - box.y1; | ||
847 | BEGIN_DMA(nbufs*6); | ||
848 | for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { | ||
849 | if (!(flags & buf)) | ||
850 | continue; | ||
851 | DMA_WRITE(clear_cmd); | ||
852 | switch(buf) { | ||
853 | case SAVAGE_FRONT: | ||
854 | DMA_WRITE(dev_priv->front_offset); | ||
855 | DMA_WRITE(dev_priv->front_bd); | ||
856 | break; | ||
857 | case SAVAGE_BACK: | ||
858 | DMA_WRITE(dev_priv->back_offset); | ||
859 | DMA_WRITE(dev_priv->back_bd); | ||
860 | break; | ||
861 | case SAVAGE_DEPTH: | ||
862 | DMA_WRITE(dev_priv->depth_offset); | ||
863 | DMA_WRITE(dev_priv->depth_bd); | ||
864 | break; | ||
865 | } | ||
866 | DMA_WRITE(value); | ||
867 | DMA_WRITE(BCI_X_Y(x, y)); | ||
868 | DMA_WRITE(BCI_W_H(w, h)); | ||
869 | } | ||
870 | DMA_COMMIT(); | ||
871 | } | ||
872 | if (mask != 0xffffffff) { | ||
873 | /* reset mask */ | ||
874 | BEGIN_DMA(2); | ||
875 | DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1); | ||
876 | DMA_WRITE(0xffffffff); | ||
877 | DMA_COMMIT(); | ||
878 | } | ||
879 | |||
880 | return 0; | ||
881 | } | ||
882 | |||
883 | static int savage_dispatch_swap(drm_savage_private_t *dev_priv, | ||
884 | unsigned int nbox, | ||
885 | const drm_clip_rect_t __user *usr_boxes) | ||
886 | { | ||
887 | unsigned int swap_cmd; | ||
888 | unsigned int i; | ||
889 | DMA_LOCALS; | ||
890 | |||
891 | if (nbox == 0) | ||
892 | return 0; | ||
893 | |||
894 | swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | | ||
895 | BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; | ||
896 | BCI_CMD_SET_ROP(swap_cmd,0xCC); | ||
897 | |||
898 | for (i = 0; i < nbox; ++i) { | ||
899 | drm_clip_rect_t box; | ||
900 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | ||
901 | |||
902 | BEGIN_DMA(6); | ||
903 | DMA_WRITE(swap_cmd); | ||
904 | DMA_WRITE(dev_priv->back_offset); | ||
905 | DMA_WRITE(dev_priv->back_bd); | ||
906 | DMA_WRITE(BCI_X_Y(box.x1, box.y1)); | ||
907 | DMA_WRITE(BCI_X_Y(box.x1, box.y1)); | ||
908 | DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1)); | ||
909 | DMA_COMMIT(); | ||
910 | } | ||
911 | |||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | static int savage_dispatch_draw(drm_savage_private_t *dev_priv, | ||
916 | const drm_savage_cmd_header_t __user *start, | ||
917 | const drm_savage_cmd_header_t __user *end, | ||
918 | const drm_buf_t *dmabuf, | ||
919 | const unsigned int __user *usr_vtxbuf, | ||
920 | unsigned int vb_size, unsigned int vb_stride, | ||
921 | unsigned int nbox, | ||
922 | const drm_clip_rect_t __user *usr_boxes) | ||
923 | { | ||
924 | unsigned int i, j; | ||
925 | int ret; | ||
926 | |||
927 | for (i = 0; i < nbox; ++i) { | ||
928 | drm_clip_rect_t box; | ||
929 | const drm_savage_cmd_header_t __user *usr_cmdbuf; | ||
930 | DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box)); | ||
931 | dev_priv->emit_clip_rect(dev_priv, &box); | ||
932 | |||
933 | usr_cmdbuf = start; | ||
934 | while (usr_cmdbuf < end) { | ||
935 | drm_savage_cmd_header_t cmd_header; | ||
936 | DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, | ||
937 | sizeof(cmd_header)); | ||
938 | usr_cmdbuf++; | ||
939 | switch (cmd_header.cmd.cmd) { | ||
940 | case SAVAGE_CMD_DMA_PRIM: | ||
941 | ret = savage_dispatch_dma_prim( | ||
942 | dev_priv, &cmd_header, dmabuf); | ||
943 | break; | ||
944 | case SAVAGE_CMD_VB_PRIM: | ||
945 | ret = savage_dispatch_vb_prim( | ||
946 | dev_priv, &cmd_header, | ||
947 | (const uint32_t __user *)usr_vtxbuf, | ||
948 | vb_size, vb_stride); | ||
949 | break; | ||
950 | case SAVAGE_CMD_DMA_IDX: | ||
951 | j = (cmd_header.idx.count + 3) / 4; | ||
952 | /* j was check in savage_bci_cmdbuf */ | ||
953 | ret = savage_dispatch_dma_idx( | ||
954 | dev_priv, &cmd_header, | ||
955 | (const uint16_t __user *)usr_cmdbuf, | ||
956 | dmabuf); | ||
957 | usr_cmdbuf += j; | ||
958 | break; | ||
959 | case SAVAGE_CMD_VB_IDX: | ||
960 | j = (cmd_header.idx.count + 3) / 4; | ||
961 | /* j was check in savage_bci_cmdbuf */ | ||
962 | ret = savage_dispatch_vb_idx( | ||
963 | dev_priv, &cmd_header, | ||
964 | (const uint16_t __user *)usr_cmdbuf, | ||
965 | (const uint32_t __user *)usr_vtxbuf, | ||
966 | vb_size, vb_stride); | ||
967 | usr_cmdbuf += j; | ||
968 | break; | ||
969 | default: | ||
970 | /* What's the best return code? EFAULT? */ | ||
971 | DRM_ERROR("IMPLEMENTATION ERROR: " | ||
972 | "non-drawing-command %d\n", | ||
973 | cmd_header.cmd.cmd); | ||
974 | return DRM_ERR(EINVAL); | ||
975 | } | ||
976 | |||
977 | if (ret != 0) | ||
978 | return ret; | ||
979 | } | ||
980 | } | ||
981 | |||
982 | return 0; | ||
983 | } | ||
984 | |||
985 | int savage_bci_cmdbuf(DRM_IOCTL_ARGS) | ||
986 | { | ||
987 | DRM_DEVICE; | ||
988 | drm_savage_private_t *dev_priv = dev->dev_private; | ||
989 | drm_device_dma_t *dma = dev->dma; | ||
990 | drm_buf_t *dmabuf; | ||
991 | drm_savage_cmdbuf_t cmdbuf; | ||
992 | drm_savage_cmd_header_t __user *usr_cmdbuf; | ||
993 | drm_savage_cmd_header_t __user *first_draw_cmd; | ||
994 | unsigned int __user *usr_vtxbuf; | ||
995 | drm_clip_rect_t __user *usr_boxes; | ||
996 | unsigned int i, j; | ||
997 | int ret = 0; | ||
998 | |||
999 | DRM_DEBUG("\n"); | ||
1000 | |||
1001 | LOCK_TEST_WITH_RETURN(dev, filp); | ||
1002 | |||
1003 | DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data, | ||
1004 | sizeof(cmdbuf)); | ||
1005 | |||
1006 | if (dma && dma->buflist) { | ||
1007 | if (cmdbuf.dma_idx > dma->buf_count) { | ||
1008 | DRM_ERROR("vertex buffer index %u out of range (0-%u)\n", | ||
1009 | cmdbuf.dma_idx, dma->buf_count-1); | ||
1010 | return DRM_ERR(EINVAL); | ||
1011 | } | ||
1012 | dmabuf = dma->buflist[cmdbuf.dma_idx]; | ||
1013 | } else { | ||
1014 | dmabuf = NULL; | ||
1015 | } | ||
1016 | |||
1017 | usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr; | ||
1018 | usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr; | ||
1019 | usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr; | ||
1020 | if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) || | ||
1021 | (cmdbuf.vb_size && DRM_VERIFYAREA_READ( | ||
1022 | usr_vtxbuf, cmdbuf.vb_size)) || | ||
1023 | (cmdbuf.nbox && DRM_VERIFYAREA_READ( | ||
1024 | usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t)))) | ||
1025 | return DRM_ERR(EFAULT); | ||
1026 | |||
1027 | /* Make sure writes to DMA buffers are finished before sending | ||
1028 | * DMA commands to the graphics hardware. */ | ||
1029 | DRM_MEMORYBARRIER(); | ||
1030 | |||
1031 | /* Coming from user space. Don't know if the Xserver has | ||
1032 | * emitted wait commands. Assuming the worst. */ | ||
1033 | dev_priv->waiting = 1; | ||
1034 | |||
1035 | i = 0; | ||
1036 | first_draw_cmd = NULL; | ||
1037 | while (i < cmdbuf.size) { | ||
1038 | drm_savage_cmd_header_t cmd_header; | ||
1039 | DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf, | ||
1040 | sizeof(cmd_header)); | ||
1041 | usr_cmdbuf++; | ||
1042 | i++; | ||
1043 | |||
1044 | /* Group drawing commands with same state to minimize | ||
1045 | * iterations over clip rects. */ | ||
1046 | j = 0; | ||
1047 | switch (cmd_header.cmd.cmd) { | ||
1048 | case SAVAGE_CMD_DMA_IDX: | ||
1049 | case SAVAGE_CMD_VB_IDX: | ||
1050 | j = (cmd_header.idx.count + 3) / 4; | ||
1051 | if (i + j > cmdbuf.size) { | ||
1052 | DRM_ERROR("indexed drawing command extends " | ||
1053 | "beyond end of command buffer\n"); | ||
1054 | DMA_FLUSH(); | ||
1055 | return DRM_ERR(EINVAL); | ||
1056 | } | ||
1057 | /* fall through */ | ||
1058 | case SAVAGE_CMD_DMA_PRIM: | ||
1059 | case SAVAGE_CMD_VB_PRIM: | ||
1060 | if (!first_draw_cmd) | ||
1061 | first_draw_cmd = usr_cmdbuf-1; | ||
1062 | usr_cmdbuf += j; | ||
1063 | i += j; | ||
1064 | break; | ||
1065 | default: | ||
1066 | if (first_draw_cmd) { | ||
1067 | ret = savage_dispatch_draw ( | ||
1068 | dev_priv, first_draw_cmd, usr_cmdbuf-1, | ||
1069 | dmabuf, usr_vtxbuf, cmdbuf.vb_size, | ||
1070 | cmdbuf.vb_stride, | ||
1071 | cmdbuf.nbox, usr_boxes); | ||
1072 | if (ret != 0) | ||
1073 | return ret; | ||
1074 | first_draw_cmd = NULL; | ||
1075 | } | ||
1076 | } | ||
1077 | if (first_draw_cmd) | ||
1078 | continue; | ||
1079 | |||
1080 | switch (cmd_header.cmd.cmd) { | ||
1081 | case SAVAGE_CMD_STATE: | ||
1082 | j = (cmd_header.state.count + 1) / 2; | ||
1083 | if (i + j > cmdbuf.size) { | ||
1084 | DRM_ERROR("command SAVAGE_CMD_STATE extends " | ||
1085 | "beyond end of command buffer\n"); | ||
1086 | DMA_FLUSH(); | ||
1087 | return DRM_ERR(EINVAL); | ||
1088 | } | ||
1089 | ret = savage_dispatch_state( | ||
1090 | dev_priv, &cmd_header, | ||
1091 | (uint32_t __user *)usr_cmdbuf); | ||
1092 | usr_cmdbuf += j; | ||
1093 | i += j; | ||
1094 | break; | ||
1095 | case SAVAGE_CMD_CLEAR: | ||
1096 | if (i + 1 > cmdbuf.size) { | ||
1097 | DRM_ERROR("command SAVAGE_CMD_CLEAR extends " | ||
1098 | "beyond end of command buffer\n"); | ||
1099 | DMA_FLUSH(); | ||
1100 | return DRM_ERR(EINVAL); | ||
1101 | } | ||
1102 | ret = savage_dispatch_clear(dev_priv, &cmd_header, | ||
1103 | usr_cmdbuf, | ||
1104 | cmdbuf.nbox, usr_boxes); | ||
1105 | usr_cmdbuf++; | ||
1106 | i++; | ||
1107 | break; | ||
1108 | case SAVAGE_CMD_SWAP: | ||
1109 | ret = savage_dispatch_swap(dev_priv, | ||
1110 | cmdbuf.nbox, usr_boxes); | ||
1111 | break; | ||
1112 | default: | ||
1113 | DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd); | ||
1114 | DMA_FLUSH(); | ||
1115 | return DRM_ERR(EINVAL); | ||
1116 | } | ||
1117 | |||
1118 | if (ret != 0) { | ||
1119 | DMA_FLUSH(); | ||
1120 | return ret; | ||
1121 | } | ||
1122 | } | ||
1123 | |||
1124 | if (first_draw_cmd) { | ||
1125 | ret = savage_dispatch_draw ( | ||
1126 | dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf, | ||
1127 | usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride, | ||
1128 | cmdbuf.nbox, usr_boxes); | ||
1129 | if (ret != 0) { | ||
1130 | DMA_FLUSH(); | ||
1131 | return ret; | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | DMA_FLUSH(); | ||
1136 | |||
1137 | if (dmabuf && cmdbuf.discard) { | ||
1138 | drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private; | ||
1139 | uint16_t event; | ||
1140 | event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D); | ||
1141 | SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); | ||
1142 | savage_freelist_put(dev, dmabuf); | ||
1143 | } | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 79e8aa6f2b9e..e0239a10d325 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1923,6 +1923,17 @@ config R8169_VLAN | |||
1923 | 1923 | ||
1924 | If in doubt, say Y. | 1924 | If in doubt, say Y. |
1925 | 1925 | ||
1926 | config SIS190 | ||
1927 | tristate "SiS190 gigabit ethernet support" | ||
1928 | depends on PCI | ||
1929 | select CRC32 | ||
1930 | select MII | ||
1931 | ---help--- | ||
1932 | Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter. | ||
1933 | |||
1934 | To compile this driver as a module, choose M here: the module | ||
1935 | will be called sis190. This is recommended. | ||
1936 | |||
1926 | config SKGE | 1937 | config SKGE |
1927 | tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" | 1938 | tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" |
1928 | depends on PCI && EXPERIMENTAL | 1939 | depends on PCI && EXPERIMENTAL |
@@ -2093,6 +2104,25 @@ endmenu | |||
2093 | menu "Ethernet (10000 Mbit)" | 2104 | menu "Ethernet (10000 Mbit)" |
2094 | depends on !UML | 2105 | depends on !UML |
2095 | 2106 | ||
2107 | config CHELSIO_T1 | ||
2108 | tristate "Chelsio 10Gb Ethernet support" | ||
2109 | depends on PCI | ||
2110 | help | ||
2111 | This driver supports Chelsio N110 and N210 models 10Gb Ethernet | ||
2112 | cards. More information about adapter features and performance | ||
2113 | tuning is in <file:Documentation/networking/cxgb.txt>. | ||
2114 | |||
2115 | For general information about Chelsio and our products, visit | ||
2116 | our website at <http://www.chelsio.com>. | ||
2117 | |||
2118 | For customer support, please visit our customer support page at | ||
2119 | <http://www.chelsio.com/support.htm>. | ||
2120 | |||
2121 | Please send feedback to <linux-bugs@chelsio.com>. | ||
2122 | |||
2123 | To compile this driver as a module, choose M here: the module | ||
2124 | will be called cxgb. | ||
2125 | |||
2096 | config IXGB | 2126 | config IXGB |
2097 | tristate "Intel(R) PRO/10GbE support" | 2127 | tristate "Intel(R) PRO/10GbE support" |
2098 | depends on PCI | 2128 | depends on PCI |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index a369ae284a9a..5baafcd55610 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -9,6 +9,7 @@ endif | |||
9 | obj-$(CONFIG_E1000) += e1000/ | 9 | obj-$(CONFIG_E1000) += e1000/ |
10 | obj-$(CONFIG_IBM_EMAC) += ibm_emac/ | 10 | obj-$(CONFIG_IBM_EMAC) += ibm_emac/ |
11 | obj-$(CONFIG_IXGB) += ixgb/ | 11 | obj-$(CONFIG_IXGB) += ixgb/ |
12 | obj-$(CONFIG_CHELSIO_T1) += chelsio/ | ||
12 | obj-$(CONFIG_BONDING) += bonding/ | 13 | obj-$(CONFIG_BONDING) += bonding/ |
13 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | 14 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o |
14 | 15 | ||
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o | |||
42 | obj-$(CONFIG_E100) += e100.o | 43 | obj-$(CONFIG_E100) += e100.o |
43 | obj-$(CONFIG_TLAN) += tlan.o | 44 | obj-$(CONFIG_TLAN) += tlan.o |
44 | obj-$(CONFIG_EPIC100) += epic100.o | 45 | obj-$(CONFIG_EPIC100) += epic100.o |
46 | obj-$(CONFIG_SIS190) += sis190.o | ||
45 | obj-$(CONFIG_SIS900) += sis900.o | 47 | obj-$(CONFIG_SIS900) += sis900.o |
46 | obj-$(CONFIG_YELLOWFIN) += yellowfin.o | 48 | obj-$(CONFIG_YELLOWFIN) += yellowfin.o |
47 | obj-$(CONFIG_ACENIC) += acenic.o | 49 | obj-$(CONFIG_ACENIC) += acenic.o |
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile new file mode 100644 index 000000000000..91e927827c43 --- /dev/null +++ b/drivers/net/chelsio/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # Chelsio 10Gb NIC driver for Linux. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CHELSIO_T1) += cxgb.o | ||
6 | |||
7 | EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS) | ||
8 | |||
9 | |||
10 | cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o | ||
11 | |||
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h new file mode 100644 index 000000000000..f09348802b46 --- /dev/null +++ b/drivers/net/chelsio/common.h | |||
@@ -0,0 +1,314 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: common.h * | ||
4 | * $Revision: 1.21 $ * | ||
5 | * $Date: 2005/06/22 00:43:25 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_COMMON_H_ | ||
40 | #define _CXGB_COMMON_H_ | ||
41 | |||
42 | #include <linux/config.h> | ||
43 | #include <linux/module.h> | ||
44 | #include <linux/netdevice.h> | ||
45 | #include <linux/types.h> | ||
46 | #include <linux/delay.h> | ||
47 | #include <linux/pci.h> | ||
48 | #include <linux/ethtool.h> | ||
49 | #include <linux/mii.h> | ||
50 | #include <linux/crc32.h> | ||
51 | #include <linux/init.h> | ||
52 | #include <asm/io.h> | ||
53 | #include <linux/pci_ids.h> | ||
54 | |||
55 | #define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" | ||
56 | #define DRV_NAME "cxgb" | ||
57 | #define DRV_VERSION "2.1.1" | ||
58 | #define PFX DRV_NAME ": " | ||
59 | |||
60 | #define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__) | ||
61 | #define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__) | ||
62 | #define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__) | ||
63 | |||
64 | #define CH_DEVICE(devid, ssid, idx) \ | ||
65 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } | ||
66 | |||
67 | #define SUPPORTED_PAUSE (1 << 13) | ||
68 | #define SUPPORTED_LOOPBACK (1 << 15) | ||
69 | |||
70 | #define ADVERTISED_PAUSE (1 << 13) | ||
71 | #define ADVERTISED_ASYM_PAUSE (1 << 14) | ||
72 | |||
73 | typedef struct adapter adapter_t; | ||
74 | |||
75 | void t1_elmer0_ext_intr(adapter_t *adapter); | ||
76 | void t1_link_changed(adapter_t *adapter, int port_id, int link_status, | ||
77 | int speed, int duplex, int fc); | ||
78 | |||
79 | struct t1_rx_mode { | ||
80 | struct net_device *dev; | ||
81 | u32 idx; | ||
82 | struct dev_mc_list *list; | ||
83 | }; | ||
84 | |||
85 | #define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC) | ||
86 | #define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI) | ||
87 | #define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count) | ||
88 | |||
89 | static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm) | ||
90 | { | ||
91 | u8 *addr = 0; | ||
92 | |||
93 | if (rm->idx++ < rm->dev->mc_count) { | ||
94 | addr = rm->list->dmi_addr; | ||
95 | rm->list = rm->list->next; | ||
96 | } | ||
97 | return addr; | ||
98 | } | ||
99 | |||
100 | #define MAX_NPORTS 4 | ||
101 | |||
102 | #define SPEED_INVALID 0xffff | ||
103 | #define DUPLEX_INVALID 0xff | ||
104 | |||
105 | enum { | ||
106 | CHBT_BOARD_N110, | ||
107 | CHBT_BOARD_N210 | ||
108 | }; | ||
109 | |||
110 | enum { | ||
111 | CHBT_TERM_T1, | ||
112 | CHBT_TERM_T2 | ||
113 | }; | ||
114 | |||
115 | enum { | ||
116 | CHBT_MAC_PM3393, | ||
117 | }; | ||
118 | |||
119 | enum { | ||
120 | CHBT_PHY_88X2010, | ||
121 | }; | ||
122 | |||
123 | enum { | ||
124 | PAUSE_RX = 1 << 0, | ||
125 | PAUSE_TX = 1 << 1, | ||
126 | PAUSE_AUTONEG = 1 << 2 | ||
127 | }; | ||
128 | |||
129 | /* Revisions of T1 chip */ | ||
130 | enum { | ||
131 | TERM_T1A = 0, | ||
132 | TERM_T1B = 1, | ||
133 | TERM_T2 = 3 | ||
134 | }; | ||
135 | |||
136 | struct sge_params { | ||
137 | unsigned int cmdQ_size[2]; | ||
138 | unsigned int freelQ_size[2]; | ||
139 | unsigned int large_buf_capacity; | ||
140 | unsigned int rx_coalesce_usecs; | ||
141 | unsigned int last_rx_coalesce_raw; | ||
142 | unsigned int default_rx_coalesce_usecs; | ||
143 | unsigned int sample_interval_usecs; | ||
144 | unsigned int coalesce_enable; | ||
145 | unsigned int polling; | ||
146 | }; | ||
147 | |||
148 | struct chelsio_pci_params { | ||
149 | unsigned short speed; | ||
150 | unsigned char width; | ||
151 | unsigned char is_pcix; | ||
152 | }; | ||
153 | |||
154 | struct adapter_params { | ||
155 | struct sge_params sge; | ||
156 | struct chelsio_pci_params pci; | ||
157 | |||
158 | const struct board_info *brd_info; | ||
159 | |||
160 | unsigned int nports; /* # of ethernet ports */ | ||
161 | unsigned int stats_update_period; | ||
162 | unsigned short chip_revision; | ||
163 | unsigned char chip_version; | ||
164 | }; | ||
165 | |||
166 | struct link_config { | ||
167 | unsigned int supported; /* link capabilities */ | ||
168 | unsigned int advertising; /* advertised capabilities */ | ||
169 | unsigned short requested_speed; /* speed user has requested */ | ||
170 | unsigned short speed; /* actual link speed */ | ||
171 | unsigned char requested_duplex; /* duplex user has requested */ | ||
172 | unsigned char duplex; /* actual link duplex */ | ||
173 | unsigned char requested_fc; /* flow control user has requested */ | ||
174 | unsigned char fc; /* actual link flow control */ | ||
175 | unsigned char autoneg; /* autonegotiating? */ | ||
176 | }; | ||
177 | |||
178 | struct cmac; | ||
179 | struct cphy; | ||
180 | |||
181 | struct port_info { | ||
182 | struct net_device *dev; | ||
183 | struct cmac *mac; | ||
184 | struct cphy *phy; | ||
185 | struct link_config link_config; | ||
186 | struct net_device_stats netstats; | ||
187 | }; | ||
188 | |||
189 | struct sge; | ||
190 | struct peespi; | ||
191 | |||
192 | struct adapter { | ||
193 | u8 *regs; | ||
194 | struct pci_dev *pdev; | ||
195 | unsigned long registered_device_map; | ||
196 | unsigned long open_device_map; | ||
197 | unsigned long flags; | ||
198 | |||
199 | const char *name; | ||
200 | int msg_enable; | ||
201 | u32 mmio_len; | ||
202 | |||
203 | struct work_struct ext_intr_handler_task; | ||
204 | struct adapter_params params; | ||
205 | |||
206 | struct vlan_group *vlan_grp; | ||
207 | |||
208 | /* Terminator modules. */ | ||
209 | struct sge *sge; | ||
210 | struct peespi *espi; | ||
211 | |||
212 | struct port_info port[MAX_NPORTS]; | ||
213 | struct work_struct stats_update_task; | ||
214 | struct timer_list stats_update_timer; | ||
215 | |||
216 | struct semaphore mib_mutex; | ||
217 | spinlock_t tpi_lock; | ||
218 | spinlock_t work_lock; | ||
219 | /* guards async operations */ | ||
220 | spinlock_t async_lock ____cacheline_aligned; | ||
221 | u32 slow_intr_mask; | ||
222 | }; | ||
223 | |||
224 | enum { /* adapter flags */ | ||
225 | FULL_INIT_DONE = 1 << 0, | ||
226 | TSO_CAPABLE = 1 << 2, | ||
227 | TCP_CSUM_CAPABLE = 1 << 3, | ||
228 | UDP_CSUM_CAPABLE = 1 << 4, | ||
229 | VLAN_ACCEL_CAPABLE = 1 << 5, | ||
230 | RX_CSUM_ENABLED = 1 << 6, | ||
231 | }; | ||
232 | |||
233 | struct mdio_ops; | ||
234 | struct gmac; | ||
235 | struct gphy; | ||
236 | |||
237 | struct board_info { | ||
238 | unsigned char board; | ||
239 | unsigned char port_number; | ||
240 | unsigned long caps; | ||
241 | unsigned char chip_term; | ||
242 | unsigned char chip_mac; | ||
243 | unsigned char chip_phy; | ||
244 | unsigned int clock_core; | ||
245 | unsigned int clock_mc3; | ||
246 | unsigned int clock_mc4; | ||
247 | unsigned int espi_nports; | ||
248 | unsigned int clock_cspi; | ||
249 | unsigned int clock_elmer0; | ||
250 | unsigned char mdio_mdien; | ||
251 | unsigned char mdio_mdiinv; | ||
252 | unsigned char mdio_mdc; | ||
253 | unsigned char mdio_phybaseaddr; | ||
254 | struct gmac *gmac; | ||
255 | struct gphy *gphy; | ||
256 | struct mdio_ops *mdio_ops; | ||
257 | const char *desc; | ||
258 | }; | ||
259 | |||
260 | extern struct pci_device_id t1_pci_tbl[]; | ||
261 | |||
262 | static inline int adapter_matches_type(const adapter_t *adapter, | ||
263 | int version, int revision) | ||
264 | { | ||
265 | return adapter->params.chip_version == version && | ||
266 | adapter->params.chip_revision == revision; | ||
267 | } | ||
268 | |||
269 | #define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B) | ||
270 | #define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2) | ||
271 | |||
272 | /* Returns true if an adapter supports VLAN acceleration and TSO */ | ||
273 | static inline int vlan_tso_capable(const adapter_t *adapter) | ||
274 | { | ||
275 | return !t1_is_T1B(adapter); | ||
276 | } | ||
277 | |||
278 | #define for_each_port(adapter, iter) \ | ||
279 | for (iter = 0; iter < (adapter)->params.nports; ++iter) | ||
280 | |||
281 | #define board_info(adapter) ((adapter)->params.brd_info) | ||
282 | #define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full) | ||
283 | |||
284 | static inline unsigned int core_ticks_per_usec(const adapter_t *adap) | ||
285 | { | ||
286 | return board_info(adap)->clock_core / 1000000; | ||
287 | } | ||
288 | |||
289 | extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); | ||
290 | extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); | ||
291 | |||
292 | extern void t1_interrupts_enable(adapter_t *adapter); | ||
293 | extern void t1_interrupts_disable(adapter_t *adapter); | ||
294 | extern void t1_interrupts_clear(adapter_t *adapter); | ||
295 | extern int elmer0_ext_intr_handler(adapter_t *adapter); | ||
296 | extern int t1_slow_intr_handler(adapter_t *adapter); | ||
297 | |||
298 | extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); | ||
299 | extern const struct board_info *t1_get_board_info(unsigned int board_id); | ||
300 | extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid, | ||
301 | unsigned short ssid); | ||
302 | extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data); | ||
303 | extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, | ||
304 | struct adapter_params *p); | ||
305 | extern int t1_init_hw_modules(adapter_t *adapter); | ||
306 | extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); | ||
307 | extern void t1_free_sw_modules(adapter_t *adapter); | ||
308 | extern void t1_fatal_err(adapter_t *adapter); | ||
309 | |||
310 | extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable); | ||
311 | extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable); | ||
312 | extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable); | ||
313 | |||
314 | #endif /* _CXGB_COMMON_H_ */ | ||
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h new file mode 100644 index 000000000000..3412342f7345 --- /dev/null +++ b/drivers/net/chelsio/cphy.h | |||
@@ -0,0 +1,148 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: cphy.h * | ||
4 | * $Revision: 1.7 $ * | ||
5 | * $Date: 2005/06/21 18:29:47 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_CPHY_H_ | ||
40 | #define _CXGB_CPHY_H_ | ||
41 | |||
42 | #include "common.h" | ||
43 | |||
44 | struct mdio_ops { | ||
45 | void (*init)(adapter_t *adapter, const struct board_info *bi); | ||
46 | int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
47 | int reg_addr, unsigned int *val); | ||
48 | int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
49 | int reg_addr, unsigned int val); | ||
50 | }; | ||
51 | |||
52 | /* PHY interrupt types */ | ||
53 | enum { | ||
54 | cphy_cause_link_change = 0x1, | ||
55 | cphy_cause_error = 0x2 | ||
56 | }; | ||
57 | |||
58 | struct cphy; | ||
59 | |||
60 | /* PHY operations */ | ||
61 | struct cphy_ops { | ||
62 | void (*destroy)(struct cphy *); | ||
63 | int (*reset)(struct cphy *, int wait); | ||
64 | |||
65 | int (*interrupt_enable)(struct cphy *); | ||
66 | int (*interrupt_disable)(struct cphy *); | ||
67 | int (*interrupt_clear)(struct cphy *); | ||
68 | int (*interrupt_handler)(struct cphy *); | ||
69 | |||
70 | int (*autoneg_enable)(struct cphy *); | ||
71 | int (*autoneg_disable)(struct cphy *); | ||
72 | int (*autoneg_restart)(struct cphy *); | ||
73 | |||
74 | int (*advertise)(struct cphy *phy, unsigned int advertise_map); | ||
75 | int (*set_loopback)(struct cphy *, int on); | ||
76 | int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); | ||
77 | int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, | ||
78 | int *duplex, int *fc); | ||
79 | }; | ||
80 | |||
81 | /* A PHY instance */ | ||
82 | struct cphy { | ||
83 | int addr; /* PHY address */ | ||
84 | adapter_t *adapter; /* associated adapter */ | ||
85 | struct cphy_ops *ops; /* PHY operations */ | ||
86 | int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
87 | int reg_addr, unsigned int *val); | ||
88 | int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
89 | int reg_addr, unsigned int val); | ||
90 | struct cphy_instance *instance; | ||
91 | }; | ||
92 | |||
93 | /* Convenience MDIO read/write wrappers */ | ||
94 | static inline int mdio_read(struct cphy *cphy, int mmd, int reg, | ||
95 | unsigned int *valp) | ||
96 | { | ||
97 | return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp); | ||
98 | } | ||
99 | |||
100 | static inline int mdio_write(struct cphy *cphy, int mmd, int reg, | ||
101 | unsigned int val) | ||
102 | { | ||
103 | return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val); | ||
104 | } | ||
105 | |||
106 | static inline int simple_mdio_read(struct cphy *cphy, int reg, | ||
107 | unsigned int *valp) | ||
108 | { | ||
109 | return mdio_read(cphy, 0, reg, valp); | ||
110 | } | ||
111 | |||
112 | static inline int simple_mdio_write(struct cphy *cphy, int reg, | ||
113 | unsigned int val) | ||
114 | { | ||
115 | return mdio_write(cphy, 0, reg, val); | ||
116 | } | ||
117 | |||
118 | /* Convenience initializer */ | ||
119 | static inline void cphy_init(struct cphy *phy, adapter_t *adapter, | ||
120 | int phy_addr, struct cphy_ops *phy_ops, | ||
121 | struct mdio_ops *mdio_ops) | ||
122 | { | ||
123 | phy->adapter = adapter; | ||
124 | phy->addr = phy_addr; | ||
125 | phy->ops = phy_ops; | ||
126 | if (mdio_ops) { | ||
127 | phy->mdio_read = mdio_ops->read; | ||
128 | phy->mdio_write = mdio_ops->write; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* Operations of the PHY-instance factory */ | ||
133 | struct gphy { | ||
134 | /* Construct a PHY instance with the given PHY address */ | ||
135 | struct cphy *(*create)(adapter_t *adapter, int phy_addr, | ||
136 | struct mdio_ops *mdio_ops); | ||
137 | |||
138 | /* | ||
139 | * Reset the PHY chip. This resets the whole PHY chip, not individual | ||
140 | * ports. | ||
141 | */ | ||
142 | int (*reset)(adapter_t *adapter); | ||
143 | }; | ||
144 | |||
145 | extern struct gphy t1_mv88x201x_ops; | ||
146 | extern struct gphy t1_dummy_phy_ops; | ||
147 | |||
148 | #endif /* _CXGB_CPHY_H_ */ | ||
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h new file mode 100644 index 000000000000..27925e487bcf --- /dev/null +++ b/drivers/net/chelsio/cpl5_cmd.h | |||
@@ -0,0 +1,145 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: cpl5_cmd.h * | ||
4 | * $Revision: 1.6 $ * | ||
5 | * $Date: 2005/06/21 18:29:47 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_CPL5_CMD_H_ | ||
40 | #define _CXGB_CPL5_CMD_H_ | ||
41 | |||
42 | #include <asm/byteorder.h> | ||
43 | |||
44 | #if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) | ||
45 | #error "Adjust your <asm/byteorder.h> defines" | ||
46 | #endif | ||
47 | |||
48 | enum CPL_opcode { | ||
49 | CPL_RX_PKT = 0xAD, | ||
50 | CPL_TX_PKT = 0xB2, | ||
51 | CPL_TX_PKT_LSO = 0xB6, | ||
52 | }; | ||
53 | |||
54 | enum { /* TX_PKT_LSO ethernet types */ | ||
55 | CPL_ETH_II, | ||
56 | CPL_ETH_II_VLAN, | ||
57 | CPL_ETH_802_3, | ||
58 | CPL_ETH_802_3_VLAN | ||
59 | }; | ||
60 | |||
61 | struct cpl_rx_data { | ||
62 | u32 rsvd0; | ||
63 | u32 len; | ||
64 | u32 seq; | ||
65 | u16 urg; | ||
66 | u8 rsvd1; | ||
67 | u8 status; | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * We want this header's alignment to be no more stringent than 2-byte aligned. | ||
72 | * All fields are u8 or u16 except for the length. However that field is not | ||
73 | * used so we break it into 2 16-bit parts to easily meet our alignment needs. | ||
74 | */ | ||
75 | struct cpl_tx_pkt { | ||
76 | u8 opcode; | ||
77 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
78 | u8 iff:4; | ||
79 | u8 ip_csum_dis:1; | ||
80 | u8 l4_csum_dis:1; | ||
81 | u8 vlan_valid:1; | ||
82 | u8 rsvd:1; | ||
83 | #else | ||
84 | u8 rsvd:1; | ||
85 | u8 vlan_valid:1; | ||
86 | u8 l4_csum_dis:1; | ||
87 | u8 ip_csum_dis:1; | ||
88 | u8 iff:4; | ||
89 | #endif | ||
90 | u16 vlan; | ||
91 | u16 len_hi; | ||
92 | u16 len_lo; | ||
93 | }; | ||
94 | |||
95 | struct cpl_tx_pkt_lso { | ||
96 | u8 opcode; | ||
97 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
98 | u8 iff:4; | ||
99 | u8 ip_csum_dis:1; | ||
100 | u8 l4_csum_dis:1; | ||
101 | u8 vlan_valid:1; | ||
102 | u8 rsvd:1; | ||
103 | #else | ||
104 | u8 rsvd:1; | ||
105 | u8 vlan_valid:1; | ||
106 | u8 l4_csum_dis:1; | ||
107 | u8 ip_csum_dis:1; | ||
108 | u8 iff:4; | ||
109 | #endif | ||
110 | u16 vlan; | ||
111 | u32 len; | ||
112 | |||
113 | u32 rsvd2; | ||
114 | u8 rsvd3; | ||
115 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
116 | u8 tcp_hdr_words:4; | ||
117 | u8 ip_hdr_words:4; | ||
118 | #else | ||
119 | u8 ip_hdr_words:4; | ||
120 | u8 tcp_hdr_words:4; | ||
121 | #endif | ||
122 | u16 eth_type_mss; | ||
123 | }; | ||
124 | |||
125 | struct cpl_rx_pkt { | ||
126 | u8 opcode; | ||
127 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
128 | u8 iff:4; | ||
129 | u8 csum_valid:1; | ||
130 | u8 bad_pkt:1; | ||
131 | u8 vlan_valid:1; | ||
132 | u8 rsvd:1; | ||
133 | #else | ||
134 | u8 rsvd:1; | ||
135 | u8 vlan_valid:1; | ||
136 | u8 bad_pkt:1; | ||
137 | u8 csum_valid:1; | ||
138 | u8 iff:4; | ||
139 | #endif | ||
140 | u16 csum; | ||
141 | u16 vlan; | ||
142 | u16 len; | ||
143 | }; | ||
144 | |||
145 | #endif /* _CXGB_CPL5_CMD_H_ */ | ||
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c new file mode 100644 index 000000000000..28ae478b386d --- /dev/null +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -0,0 +1,1256 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: cxgb2.c * | ||
4 | * $Revision: 1.25 $ * | ||
5 | * $Date: 2005/06/22 00:43:25 $ * | ||
6 | * Description: * | ||
7 | * Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #include "common.h" | ||
40 | #include <linux/config.h> | ||
41 | #include <linux/module.h> | ||
42 | #include <linux/init.h> | ||
43 | #include <linux/pci.h> | ||
44 | #include <linux/netdevice.h> | ||
45 | #include <linux/etherdevice.h> | ||
46 | #include <linux/if_vlan.h> | ||
47 | #include <linux/mii.h> | ||
48 | #include <linux/sockios.h> | ||
49 | #include <linux/proc_fs.h> | ||
50 | #include <linux/dma-mapping.h> | ||
51 | #include <asm/uaccess.h> | ||
52 | |||
53 | #include "cpl5_cmd.h" | ||
54 | #include "regs.h" | ||
55 | #include "gmac.h" | ||
56 | #include "cphy.h" | ||
57 | #include "sge.h" | ||
58 | #include "espi.h" | ||
59 | |||
60 | #ifdef work_struct | ||
61 | #include <linux/tqueue.h> | ||
62 | #define INIT_WORK INIT_TQUEUE | ||
63 | #define schedule_work schedule_task | ||
64 | #define flush_scheduled_work flush_scheduled_tasks | ||
65 | |||
66 | static inline void schedule_mac_stats_update(struct adapter *ap, int secs) | ||
67 | { | ||
68 | mod_timer(&ap->stats_update_timer, jiffies + secs * HZ); | ||
69 | } | ||
70 | |||
71 | static inline void cancel_mac_stats_update(struct adapter *ap) | ||
72 | { | ||
73 | del_timer_sync(&ap->stats_update_timer); | ||
74 | flush_scheduled_tasks(); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Stats update timer for 2.4. It schedules a task to do the actual update as | ||
79 | * we need to access MAC statistics in process context. | ||
80 | */ | ||
81 | static void mac_stats_timer(unsigned long data) | ||
82 | { | ||
83 | struct adapter *ap = (struct adapter *)data; | ||
84 | |||
85 | schedule_task(&ap->stats_update_task); | ||
86 | } | ||
87 | #else | ||
88 | #include <linux/workqueue.h> | ||
89 | |||
90 | static inline void schedule_mac_stats_update(struct adapter *ap, int secs) | ||
91 | { | ||
92 | schedule_delayed_work(&ap->stats_update_task, secs * HZ); | ||
93 | } | ||
94 | |||
95 | static inline void cancel_mac_stats_update(struct adapter *ap) | ||
96 | { | ||
97 | cancel_delayed_work(&ap->stats_update_task); | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | #define MAX_CMDQ_ENTRIES 16384 | ||
102 | #define MAX_CMDQ1_ENTRIES 1024 | ||
103 | #define MAX_RX_BUFFERS 16384 | ||
104 | #define MAX_RX_JUMBO_BUFFERS 16384 | ||
105 | #define MAX_TX_BUFFERS_HIGH 16384U | ||
106 | #define MAX_TX_BUFFERS_LOW 1536U | ||
107 | #define MIN_FL_ENTRIES 32 | ||
108 | |||
109 | #define PORT_MASK ((1 << MAX_NPORTS) - 1) | ||
110 | |||
111 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | ||
112 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | ||
113 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | ||
114 | |||
115 | /* | ||
116 | * The EEPROM is actually bigger but only the first few bytes are used so we | ||
117 | * only report those. | ||
118 | */ | ||
119 | #define EEPROM_SIZE 32 | ||
120 | |||
121 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
122 | MODULE_AUTHOR("Chelsio Communications"); | ||
123 | MODULE_LICENSE("GPL"); | ||
124 | |||
125 | static int dflt_msg_enable = DFLT_MSG_ENABLE; | ||
126 | |||
127 | MODULE_PARM(dflt_msg_enable, "i"); | ||
128 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap"); | ||
129 | |||
130 | |||
131 | static const char pci_speed[][4] = { | ||
132 | "33", "66", "100", "133" | ||
133 | }; | ||
134 | |||
135 | /* | ||
136 | * Setup MAC to receive the types of packets we want. | ||
137 | */ | ||
138 | static void t1_set_rxmode(struct net_device *dev) | ||
139 | { | ||
140 | struct adapter *adapter = dev->priv; | ||
141 | struct cmac *mac = adapter->port[dev->if_port].mac; | ||
142 | struct t1_rx_mode rm; | ||
143 | |||
144 | rm.dev = dev; | ||
145 | rm.idx = 0; | ||
146 | rm.list = dev->mc_list; | ||
147 | mac->ops->set_rx_mode(mac, &rm); | ||
148 | } | ||
149 | |||
150 | static void link_report(struct port_info *p) | ||
151 | { | ||
152 | if (!netif_carrier_ok(p->dev)) | ||
153 | printk(KERN_INFO "%s: link down\n", p->dev->name); | ||
154 | else { | ||
155 | const char *s = "10Mbps"; | ||
156 | |||
157 | switch (p->link_config.speed) { | ||
158 | case SPEED_10000: s = "10Gbps"; break; | ||
159 | case SPEED_1000: s = "1000Mbps"; break; | ||
160 | case SPEED_100: s = "100Mbps"; break; | ||
161 | } | ||
162 | |||
163 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", | ||
164 | p->dev->name, s, | ||
165 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | void t1_link_changed(struct adapter *adapter, int port_id, int link_stat, | ||
170 | int speed, int duplex, int pause) | ||
171 | { | ||
172 | struct port_info *p = &adapter->port[port_id]; | ||
173 | |||
174 | if (link_stat != netif_carrier_ok(p->dev)) { | ||
175 | if (link_stat) | ||
176 | netif_carrier_on(p->dev); | ||
177 | else | ||
178 | netif_carrier_off(p->dev); | ||
179 | link_report(p); | ||
180 | |||
181 | } | ||
182 | } | ||
183 | |||
184 | static void link_start(struct port_info *p) | ||
185 | { | ||
186 | struct cmac *mac = p->mac; | ||
187 | |||
188 | mac->ops->reset(mac); | ||
189 | if (mac->ops->macaddress_set) | ||
190 | mac->ops->macaddress_set(mac, p->dev->dev_addr); | ||
191 | t1_set_rxmode(p->dev); | ||
192 | t1_link_start(p->phy, mac, &p->link_config); | ||
193 | mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); | ||
194 | } | ||
195 | |||
196 | static void enable_hw_csum(struct adapter *adapter) | ||
197 | { | ||
198 | if (adapter->flags & TSO_CAPABLE) | ||
199 | t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */ | ||
200 | t1_tp_set_tcp_checksum_offload(adapter, 1); | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Things to do upon first use of a card. | ||
205 | * This must run with the rtnl lock held. | ||
206 | */ | ||
207 | static int cxgb_up(struct adapter *adapter) | ||
208 | { | ||
209 | int err = 0; | ||
210 | |||
211 | if (!(adapter->flags & FULL_INIT_DONE)) { | ||
212 | err = t1_init_hw_modules(adapter); | ||
213 | if (err) | ||
214 | goto out_err; | ||
215 | |||
216 | enable_hw_csum(adapter); | ||
217 | adapter->flags |= FULL_INIT_DONE; | ||
218 | } | ||
219 | |||
220 | t1_interrupts_clear(adapter); | ||
221 | if ((err = request_irq(adapter->pdev->irq, | ||
222 | t1_select_intr_handler(adapter), SA_SHIRQ, | ||
223 | adapter->name, adapter))) { | ||
224 | goto out_err; | ||
225 | } | ||
226 | t1_sge_start(adapter->sge); | ||
227 | t1_interrupts_enable(adapter); | ||
228 | out_err: | ||
229 | return err; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Release resources when all the ports have been stopped. | ||
234 | */ | ||
235 | static void cxgb_down(struct adapter *adapter) | ||
236 | { | ||
237 | t1_sge_stop(adapter->sge); | ||
238 | t1_interrupts_disable(adapter); | ||
239 | free_irq(adapter->pdev->irq, adapter); | ||
240 | } | ||
241 | |||
242 | static int cxgb_open(struct net_device *dev) | ||
243 | { | ||
244 | int err; | ||
245 | struct adapter *adapter = dev->priv; | ||
246 | int other_ports = adapter->open_device_map & PORT_MASK; | ||
247 | |||
248 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) | ||
249 | return err; | ||
250 | |||
251 | __set_bit(dev->if_port, &adapter->open_device_map); | ||
252 | link_start(&adapter->port[dev->if_port]); | ||
253 | netif_start_queue(dev); | ||
254 | if (!other_ports && adapter->params.stats_update_period) | ||
255 | schedule_mac_stats_update(adapter, | ||
256 | adapter->params.stats_update_period); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int cxgb_close(struct net_device *dev) | ||
261 | { | ||
262 | struct adapter *adapter = dev->priv; | ||
263 | struct port_info *p = &adapter->port[dev->if_port]; | ||
264 | struct cmac *mac = p->mac; | ||
265 | |||
266 | netif_stop_queue(dev); | ||
267 | mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); | ||
268 | netif_carrier_off(dev); | ||
269 | |||
270 | clear_bit(dev->if_port, &adapter->open_device_map); | ||
271 | if (adapter->params.stats_update_period && | ||
272 | !(adapter->open_device_map & PORT_MASK)) { | ||
273 | /* Stop statistics accumulation. */ | ||
274 | smp_mb__after_clear_bit(); | ||
275 | spin_lock(&adapter->work_lock); /* sync with update task */ | ||
276 | spin_unlock(&adapter->work_lock); | ||
277 | cancel_mac_stats_update(adapter); | ||
278 | } | ||
279 | |||
280 | if (!adapter->open_device_map) | ||
281 | cxgb_down(adapter); | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static struct net_device_stats *t1_get_stats(struct net_device *dev) | ||
286 | { | ||
287 | struct adapter *adapter = dev->priv; | ||
288 | struct port_info *p = &adapter->port[dev->if_port]; | ||
289 | struct net_device_stats *ns = &p->netstats; | ||
290 | const struct cmac_statistics *pstats; | ||
291 | |||
292 | /* Do a full update of the MAC stats */ | ||
293 | pstats = p->mac->ops->statistics_update(p->mac, | ||
294 | MAC_STATS_UPDATE_FULL); | ||
295 | |||
296 | ns->tx_packets = pstats->TxUnicastFramesOK + | ||
297 | pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; | ||
298 | |||
299 | ns->rx_packets = pstats->RxUnicastFramesOK + | ||
300 | pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; | ||
301 | |||
302 | ns->tx_bytes = pstats->TxOctetsOK; | ||
303 | ns->rx_bytes = pstats->RxOctetsOK; | ||
304 | |||
305 | ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + | ||
306 | pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; | ||
307 | ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + | ||
308 | pstats->RxFCSErrors + pstats->RxAlignErrors + | ||
309 | pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + | ||
310 | pstats->RxSymbolErrors + pstats->RxRuntErrors; | ||
311 | |||
312 | ns->multicast = pstats->RxMulticastFramesOK; | ||
313 | ns->collisions = pstats->TxTotalCollisions; | ||
314 | |||
315 | /* detailed rx_errors */ | ||
316 | ns->rx_length_errors = pstats->RxFrameTooLongErrors + | ||
317 | pstats->RxJabberErrors; | ||
318 | ns->rx_over_errors = 0; | ||
319 | ns->rx_crc_errors = pstats->RxFCSErrors; | ||
320 | ns->rx_frame_errors = pstats->RxAlignErrors; | ||
321 | ns->rx_fifo_errors = 0; | ||
322 | ns->rx_missed_errors = 0; | ||
323 | |||
324 | /* detailed tx_errors */ | ||
325 | ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; | ||
326 | ns->tx_carrier_errors = 0; | ||
327 | ns->tx_fifo_errors = pstats->TxUnderrun; | ||
328 | ns->tx_heartbeat_errors = 0; | ||
329 | ns->tx_window_errors = pstats->TxLateCollisions; | ||
330 | return ns; | ||
331 | } | ||
332 | |||
333 | static u32 get_msglevel(struct net_device *dev) | ||
334 | { | ||
335 | struct adapter *adapter = dev->priv; | ||
336 | |||
337 | return adapter->msg_enable; | ||
338 | } | ||
339 | |||
340 | static void set_msglevel(struct net_device *dev, u32 val) | ||
341 | { | ||
342 | struct adapter *adapter = dev->priv; | ||
343 | |||
344 | adapter->msg_enable = val; | ||
345 | } | ||
346 | |||
347 | static char stats_strings[][ETH_GSTRING_LEN] = { | ||
348 | "TxOctetsOK", | ||
349 | "TxOctetsBad", | ||
350 | "TxUnicastFramesOK", | ||
351 | "TxMulticastFramesOK", | ||
352 | "TxBroadcastFramesOK", | ||
353 | "TxPauseFrames", | ||
354 | "TxFramesWithDeferredXmissions", | ||
355 | "TxLateCollisions", | ||
356 | "TxTotalCollisions", | ||
357 | "TxFramesAbortedDueToXSCollisions", | ||
358 | "TxUnderrun", | ||
359 | "TxLengthErrors", | ||
360 | "TxInternalMACXmitError", | ||
361 | "TxFramesWithExcessiveDeferral", | ||
362 | "TxFCSErrors", | ||
363 | |||
364 | "RxOctetsOK", | ||
365 | "RxOctetsBad", | ||
366 | "RxUnicastFramesOK", | ||
367 | "RxMulticastFramesOK", | ||
368 | "RxBroadcastFramesOK", | ||
369 | "RxPauseFrames", | ||
370 | "RxFCSErrors", | ||
371 | "RxAlignErrors", | ||
372 | "RxSymbolErrors", | ||
373 | "RxDataErrors", | ||
374 | "RxSequenceErrors", | ||
375 | "RxRuntErrors", | ||
376 | "RxJabberErrors", | ||
377 | "RxInternalMACRcvError", | ||
378 | "RxInRangeLengthErrors", | ||
379 | "RxOutOfRangeLengthField", | ||
380 | "RxFrameTooLongErrors", | ||
381 | |||
382 | "TSO", | ||
383 | "VLANextractions", | ||
384 | "VLANinsertions", | ||
385 | "RxCsumGood", | ||
386 | "TxCsumOffload", | ||
387 | "RxDrops" | ||
388 | |||
389 | "respQ_empty", | ||
390 | "respQ_overflow", | ||
391 | "freelistQ_empty", | ||
392 | "pkt_too_big", | ||
393 | "pkt_mismatch", | ||
394 | "cmdQ_full0", | ||
395 | "cmdQ_full1", | ||
396 | "tx_ipfrags", | ||
397 | "tx_reg_pkts", | ||
398 | "tx_lso_pkts", | ||
399 | "tx_do_cksum", | ||
400 | |||
401 | "espi_DIP2ParityErr", | ||
402 | "espi_DIP4Err", | ||
403 | "espi_RxDrops", | ||
404 | "espi_TxDrops", | ||
405 | "espi_RxOvfl", | ||
406 | "espi_ParityErr" | ||
407 | }; | ||
408 | |||
409 | #define T2_REGMAP_SIZE (3 * 1024) | ||
410 | |||
411 | static int get_regs_len(struct net_device *dev) | ||
412 | { | ||
413 | return T2_REGMAP_SIZE; | ||
414 | } | ||
415 | |||
416 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
417 | { | ||
418 | struct adapter *adapter = dev->priv; | ||
419 | |||
420 | strcpy(info->driver, DRV_NAME); | ||
421 | strcpy(info->version, DRV_VERSION); | ||
422 | strcpy(info->fw_version, "N/A"); | ||
423 | strcpy(info->bus_info, pci_name(adapter->pdev)); | ||
424 | } | ||
425 | |||
426 | static int get_stats_count(struct net_device *dev) | ||
427 | { | ||
428 | return ARRAY_SIZE(stats_strings); | ||
429 | } | ||
430 | |||
431 | static void get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
432 | { | ||
433 | if (stringset == ETH_SS_STATS) | ||
434 | memcpy(data, stats_strings, sizeof(stats_strings)); | ||
435 | } | ||
436 | |||
437 | static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | ||
438 | u64 *data) | ||
439 | { | ||
440 | struct adapter *adapter = dev->priv; | ||
441 | struct cmac *mac = adapter->port[dev->if_port].mac; | ||
442 | const struct cmac_statistics *s; | ||
443 | const struct sge_port_stats *ss; | ||
444 | const struct sge_intr_counts *t; | ||
445 | |||
446 | s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); | ||
447 | ss = t1_sge_get_port_stats(adapter->sge, dev->if_port); | ||
448 | t = t1_sge_get_intr_counts(adapter->sge); | ||
449 | |||
450 | *data++ = s->TxOctetsOK; | ||
451 | *data++ = s->TxOctetsBad; | ||
452 | *data++ = s->TxUnicastFramesOK; | ||
453 | *data++ = s->TxMulticastFramesOK; | ||
454 | *data++ = s->TxBroadcastFramesOK; | ||
455 | *data++ = s->TxPauseFrames; | ||
456 | *data++ = s->TxFramesWithDeferredXmissions; | ||
457 | *data++ = s->TxLateCollisions; | ||
458 | *data++ = s->TxTotalCollisions; | ||
459 | *data++ = s->TxFramesAbortedDueToXSCollisions; | ||
460 | *data++ = s->TxUnderrun; | ||
461 | *data++ = s->TxLengthErrors; | ||
462 | *data++ = s->TxInternalMACXmitError; | ||
463 | *data++ = s->TxFramesWithExcessiveDeferral; | ||
464 | *data++ = s->TxFCSErrors; | ||
465 | |||
466 | *data++ = s->RxOctetsOK; | ||
467 | *data++ = s->RxOctetsBad; | ||
468 | *data++ = s->RxUnicastFramesOK; | ||
469 | *data++ = s->RxMulticastFramesOK; | ||
470 | *data++ = s->RxBroadcastFramesOK; | ||
471 | *data++ = s->RxPauseFrames; | ||
472 | *data++ = s->RxFCSErrors; | ||
473 | *data++ = s->RxAlignErrors; | ||
474 | *data++ = s->RxSymbolErrors; | ||
475 | *data++ = s->RxDataErrors; | ||
476 | *data++ = s->RxSequenceErrors; | ||
477 | *data++ = s->RxRuntErrors; | ||
478 | *data++ = s->RxJabberErrors; | ||
479 | *data++ = s->RxInternalMACRcvError; | ||
480 | *data++ = s->RxInRangeLengthErrors; | ||
481 | *data++ = s->RxOutOfRangeLengthField; | ||
482 | *data++ = s->RxFrameTooLongErrors; | ||
483 | |||
484 | *data++ = ss->tso; | ||
485 | *data++ = ss->vlan_xtract; | ||
486 | *data++ = ss->vlan_insert; | ||
487 | *data++ = ss->rx_cso_good; | ||
488 | *data++ = ss->tx_cso; | ||
489 | *data++ = ss->rx_drops; | ||
490 | |||
491 | *data++ = (u64)t->respQ_empty; | ||
492 | *data++ = (u64)t->respQ_overflow; | ||
493 | *data++ = (u64)t->freelistQ_empty; | ||
494 | *data++ = (u64)t->pkt_too_big; | ||
495 | *data++ = (u64)t->pkt_mismatch; | ||
496 | *data++ = (u64)t->cmdQ_full[0]; | ||
497 | *data++ = (u64)t->cmdQ_full[1]; | ||
498 | *data++ = (u64)t->tx_ipfrags; | ||
499 | *data++ = (u64)t->tx_reg_pkts; | ||
500 | *data++ = (u64)t->tx_lso_pkts; | ||
501 | *data++ = (u64)t->tx_do_cksum; | ||
502 | } | ||
503 | |||
504 | static inline void reg_block_dump(struct adapter *ap, void *buf, | ||
505 | unsigned int start, unsigned int end) | ||
506 | { | ||
507 | u32 *p = buf + start; | ||
508 | |||
509 | for ( ; start <= end; start += sizeof(u32)) | ||
510 | *p++ = readl(ap->regs + start); | ||
511 | } | ||
512 | |||
513 | static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
514 | void *buf) | ||
515 | { | ||
516 | struct adapter *ap = dev->priv; | ||
517 | |||
518 | /* | ||
519 | * Version scheme: bits 0..9: chip version, bits 10..15: chip revision | ||
520 | */ | ||
521 | regs->version = 2; | ||
522 | |||
523 | memset(buf, 0, T2_REGMAP_SIZE); | ||
524 | reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); | ||
525 | } | ||
526 | |||
527 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
528 | { | ||
529 | struct adapter *adapter = dev->priv; | ||
530 | struct port_info *p = &adapter->port[dev->if_port]; | ||
531 | |||
532 | cmd->supported = p->link_config.supported; | ||
533 | cmd->advertising = p->link_config.advertising; | ||
534 | |||
535 | if (netif_carrier_ok(dev)) { | ||
536 | cmd->speed = p->link_config.speed; | ||
537 | cmd->duplex = p->link_config.duplex; | ||
538 | } else { | ||
539 | cmd->speed = -1; | ||
540 | cmd->duplex = -1; | ||
541 | } | ||
542 | |||
543 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; | ||
544 | cmd->phy_address = p->phy->addr; | ||
545 | cmd->transceiver = XCVR_EXTERNAL; | ||
546 | cmd->autoneg = p->link_config.autoneg; | ||
547 | cmd->maxtxpkt = 0; | ||
548 | cmd->maxrxpkt = 0; | ||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static int speed_duplex_to_caps(int speed, int duplex) | ||
553 | { | ||
554 | int cap = 0; | ||
555 | |||
556 | switch (speed) { | ||
557 | case SPEED_10: | ||
558 | if (duplex == DUPLEX_FULL) | ||
559 | cap = SUPPORTED_10baseT_Full; | ||
560 | else | ||
561 | cap = SUPPORTED_10baseT_Half; | ||
562 | break; | ||
563 | case SPEED_100: | ||
564 | if (duplex == DUPLEX_FULL) | ||
565 | cap = SUPPORTED_100baseT_Full; | ||
566 | else | ||
567 | cap = SUPPORTED_100baseT_Half; | ||
568 | break; | ||
569 | case SPEED_1000: | ||
570 | if (duplex == DUPLEX_FULL) | ||
571 | cap = SUPPORTED_1000baseT_Full; | ||
572 | else | ||
573 | cap = SUPPORTED_1000baseT_Half; | ||
574 | break; | ||
575 | case SPEED_10000: | ||
576 | if (duplex == DUPLEX_FULL) | ||
577 | cap = SUPPORTED_10000baseT_Full; | ||
578 | } | ||
579 | return cap; | ||
580 | } | ||
581 | |||
582 | #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ | ||
583 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ | ||
584 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ | ||
585 | ADVERTISED_10000baseT_Full) | ||
586 | |||
587 | static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
588 | { | ||
589 | struct adapter *adapter = dev->priv; | ||
590 | struct port_info *p = &adapter->port[dev->if_port]; | ||
591 | struct link_config *lc = &p->link_config; | ||
592 | |||
593 | if (!(lc->supported & SUPPORTED_Autoneg)) | ||
594 | return -EOPNOTSUPP; /* can't change speed/duplex */ | ||
595 | |||
596 | if (cmd->autoneg == AUTONEG_DISABLE) { | ||
597 | int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex); | ||
598 | |||
599 | if (!(lc->supported & cap) || cmd->speed == SPEED_1000) | ||
600 | return -EINVAL; | ||
601 | lc->requested_speed = cmd->speed; | ||
602 | lc->requested_duplex = cmd->duplex; | ||
603 | lc->advertising = 0; | ||
604 | } else { | ||
605 | cmd->advertising &= ADVERTISED_MASK; | ||
606 | if (cmd->advertising & (cmd->advertising - 1)) | ||
607 | cmd->advertising = lc->supported; | ||
608 | cmd->advertising &= lc->supported; | ||
609 | if (!cmd->advertising) | ||
610 | return -EINVAL; | ||
611 | lc->requested_speed = SPEED_INVALID; | ||
612 | lc->requested_duplex = DUPLEX_INVALID; | ||
613 | lc->advertising = cmd->advertising | ADVERTISED_Autoneg; | ||
614 | } | ||
615 | lc->autoneg = cmd->autoneg; | ||
616 | if (netif_running(dev)) | ||
617 | t1_link_start(p->phy, p->mac, lc); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | static void get_pauseparam(struct net_device *dev, | ||
622 | struct ethtool_pauseparam *epause) | ||
623 | { | ||
624 | struct adapter *adapter = dev->priv; | ||
625 | struct port_info *p = &adapter->port[dev->if_port]; | ||
626 | |||
627 | epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; | ||
628 | epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; | ||
629 | epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; | ||
630 | } | ||
631 | |||
632 | static int set_pauseparam(struct net_device *dev, | ||
633 | struct ethtool_pauseparam *epause) | ||
634 | { | ||
635 | struct adapter *adapter = dev->priv; | ||
636 | struct port_info *p = &adapter->port[dev->if_port]; | ||
637 | struct link_config *lc = &p->link_config; | ||
638 | |||
639 | if (epause->autoneg == AUTONEG_DISABLE) | ||
640 | lc->requested_fc = 0; | ||
641 | else if (lc->supported & SUPPORTED_Autoneg) | ||
642 | lc->requested_fc = PAUSE_AUTONEG; | ||
643 | else | ||
644 | return -EINVAL; | ||
645 | |||
646 | if (epause->rx_pause) | ||
647 | lc->requested_fc |= PAUSE_RX; | ||
648 | if (epause->tx_pause) | ||
649 | lc->requested_fc |= PAUSE_TX; | ||
650 | if (lc->autoneg == AUTONEG_ENABLE) { | ||
651 | if (netif_running(dev)) | ||
652 | t1_link_start(p->phy, p->mac, lc); | ||
653 | } else { | ||
654 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
655 | if (netif_running(dev)) | ||
656 | p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, | ||
657 | lc->fc); | ||
658 | } | ||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | static u32 get_rx_csum(struct net_device *dev) | ||
663 | { | ||
664 | struct adapter *adapter = dev->priv; | ||
665 | |||
666 | return (adapter->flags & RX_CSUM_ENABLED) != 0; | ||
667 | } | ||
668 | |||
669 | static int set_rx_csum(struct net_device *dev, u32 data) | ||
670 | { | ||
671 | struct adapter *adapter = dev->priv; | ||
672 | |||
673 | if (data) | ||
674 | adapter->flags |= RX_CSUM_ENABLED; | ||
675 | else | ||
676 | adapter->flags &= ~RX_CSUM_ENABLED; | ||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | static int set_tso(struct net_device *dev, u32 value) | ||
681 | { | ||
682 | struct adapter *adapter = dev->priv; | ||
683 | |||
684 | if (!(adapter->flags & TSO_CAPABLE)) | ||
685 | return value ? -EOPNOTSUPP : 0; | ||
686 | return ethtool_op_set_tso(dev, value); | ||
687 | } | ||
688 | |||
689 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
690 | { | ||
691 | struct adapter *adapter = dev->priv; | ||
692 | int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | ||
693 | |||
694 | e->rx_max_pending = MAX_RX_BUFFERS; | ||
695 | e->rx_mini_max_pending = 0; | ||
696 | e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; | ||
697 | e->tx_max_pending = MAX_CMDQ_ENTRIES; | ||
698 | |||
699 | e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; | ||
700 | e->rx_mini_pending = 0; | ||
701 | e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; | ||
702 | e->tx_pending = adapter->params.sge.cmdQ_size[0]; | ||
703 | } | ||
704 | |||
705 | static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | ||
706 | { | ||
707 | struct adapter *adapter = dev->priv; | ||
708 | int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | ||
709 | |||
710 | if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || | ||
711 | e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || | ||
712 | e->tx_pending > MAX_CMDQ_ENTRIES || | ||
713 | e->rx_pending < MIN_FL_ENTRIES || | ||
714 | e->rx_jumbo_pending < MIN_FL_ENTRIES || | ||
715 | e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) | ||
716 | return -EINVAL; | ||
717 | |||
718 | if (adapter->flags & FULL_INIT_DONE) | ||
719 | return -EBUSY; | ||
720 | |||
721 | adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; | ||
722 | adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; | ||
723 | adapter->params.sge.cmdQ_size[0] = e->tx_pending; | ||
724 | adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? | ||
725 | MAX_CMDQ1_ENTRIES : e->tx_pending; | ||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
730 | { | ||
731 | struct adapter *adapter = dev->priv; | ||
732 | |||
733 | /* | ||
734 | * If RX coalescing is requested we use NAPI, otherwise interrupts. | ||
735 | * This choice can be made only when all ports and the TOE are off. | ||
736 | */ | ||
737 | if (adapter->open_device_map == 0) | ||
738 | adapter->params.sge.polling = c->use_adaptive_rx_coalesce; | ||
739 | |||
740 | if (adapter->params.sge.polling) { | ||
741 | adapter->params.sge.rx_coalesce_usecs = 0; | ||
742 | } else { | ||
743 | adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; | ||
744 | } | ||
745 | adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; | ||
746 | adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; | ||
747 | t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
752 | { | ||
753 | struct adapter *adapter = dev->priv; | ||
754 | |||
755 | c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; | ||
756 | c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; | ||
757 | c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; | ||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static int get_eeprom_len(struct net_device *dev) | ||
762 | { | ||
763 | return EEPROM_SIZE; | ||
764 | } | ||
765 | |||
766 | #define EEPROM_MAGIC(ap) \ | ||
767 | (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) | ||
768 | |||
769 | static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, | ||
770 | u8 *data) | ||
771 | { | ||
772 | int i; | ||
773 | u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); | ||
774 | struct adapter *adapter = dev->priv; | ||
775 | |||
776 | e->magic = EEPROM_MAGIC(adapter); | ||
777 | for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) | ||
778 | t1_seeprom_read(adapter, i, (u32 *)&buf[i]); | ||
779 | memcpy(data, buf + e->offset, e->len); | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static struct ethtool_ops t1_ethtool_ops = { | ||
784 | .get_settings = get_settings, | ||
785 | .set_settings = set_settings, | ||
786 | .get_drvinfo = get_drvinfo, | ||
787 | .get_msglevel = get_msglevel, | ||
788 | .set_msglevel = set_msglevel, | ||
789 | .get_ringparam = get_sge_param, | ||
790 | .set_ringparam = set_sge_param, | ||
791 | .get_coalesce = get_coalesce, | ||
792 | .set_coalesce = set_coalesce, | ||
793 | .get_eeprom_len = get_eeprom_len, | ||
794 | .get_eeprom = get_eeprom, | ||
795 | .get_pauseparam = get_pauseparam, | ||
796 | .set_pauseparam = set_pauseparam, | ||
797 | .get_rx_csum = get_rx_csum, | ||
798 | .set_rx_csum = set_rx_csum, | ||
799 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
800 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
801 | .get_sg = ethtool_op_get_sg, | ||
802 | .set_sg = ethtool_op_set_sg, | ||
803 | .get_link = ethtool_op_get_link, | ||
804 | .get_strings = get_strings, | ||
805 | .get_stats_count = get_stats_count, | ||
806 | .get_ethtool_stats = get_stats, | ||
807 | .get_regs_len = get_regs_len, | ||
808 | .get_regs = get_regs, | ||
809 | .get_tso = ethtool_op_get_tso, | ||
810 | .set_tso = set_tso, | ||
811 | }; | ||
812 | |||
813 | static void cxgb_proc_cleanup(struct adapter *adapter, | ||
814 | struct proc_dir_entry *dir) | ||
815 | { | ||
816 | const char *name; | ||
817 | name = adapter->name; | ||
818 | remove_proc_entry(name, dir); | ||
819 | } | ||
820 | //#define chtoe_setup_toedev(adapter) NULL | ||
821 | #define update_mtu_tab(adapter) | ||
822 | #define write_smt_entry(adapter, idx) | ||
823 | |||
824 | static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | ||
825 | { | ||
826 | struct adapter *adapter = dev->priv; | ||
827 | struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; | ||
828 | |||
829 | switch (cmd) { | ||
830 | case SIOCGMIIPHY: | ||
831 | data->phy_id = adapter->port[dev->if_port].phy->addr; | ||
832 | /* FALLTHRU */ | ||
833 | case SIOCGMIIREG: { | ||
834 | struct cphy *phy = adapter->port[dev->if_port].phy; | ||
835 | u32 val; | ||
836 | |||
837 | if (!phy->mdio_read) | ||
838 | return -EOPNOTSUPP; | ||
839 | phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, | ||
840 | &val); | ||
841 | data->val_out = val; | ||
842 | break; | ||
843 | } | ||
844 | case SIOCSMIIREG: { | ||
845 | struct cphy *phy = adapter->port[dev->if_port].phy; | ||
846 | |||
847 | if (!capable(CAP_NET_ADMIN)) | ||
848 | return -EPERM; | ||
849 | if (!phy->mdio_write) | ||
850 | return -EOPNOTSUPP; | ||
851 | phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, | ||
852 | data->val_in); | ||
853 | break; | ||
854 | } | ||
855 | |||
856 | default: | ||
857 | return -EOPNOTSUPP; | ||
858 | } | ||
859 | return 0; | ||
860 | } | ||
861 | |||
862 | static int t1_change_mtu(struct net_device *dev, int new_mtu) | ||
863 | { | ||
864 | int ret; | ||
865 | struct adapter *adapter = dev->priv; | ||
866 | struct cmac *mac = adapter->port[dev->if_port].mac; | ||
867 | |||
868 | if (!mac->ops->set_mtu) | ||
869 | return -EOPNOTSUPP; | ||
870 | if (new_mtu < 68) | ||
871 | return -EINVAL; | ||
872 | if ((ret = mac->ops->set_mtu(mac, new_mtu))) | ||
873 | return ret; | ||
874 | dev->mtu = new_mtu; | ||
875 | return 0; | ||
876 | } | ||
877 | |||
878 | static int t1_set_mac_addr(struct net_device *dev, void *p) | ||
879 | { | ||
880 | struct adapter *adapter = dev->priv; | ||
881 | struct cmac *mac = adapter->port[dev->if_port].mac; | ||
882 | struct sockaddr *addr = p; | ||
883 | |||
884 | if (!mac->ops->macaddress_set) | ||
885 | return -EOPNOTSUPP; | ||
886 | |||
887 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
888 | mac->ops->macaddress_set(mac, dev->dev_addr); | ||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
893 | static void vlan_rx_register(struct net_device *dev, | ||
894 | struct vlan_group *grp) | ||
895 | { | ||
896 | struct adapter *adapter = dev->priv; | ||
897 | |||
898 | spin_lock_irq(&adapter->async_lock); | ||
899 | adapter->vlan_grp = grp; | ||
900 | t1_set_vlan_accel(adapter, grp != NULL); | ||
901 | spin_unlock_irq(&adapter->async_lock); | ||
902 | } | ||
903 | |||
904 | static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
905 | { | ||
906 | struct adapter *adapter = dev->priv; | ||
907 | |||
908 | spin_lock_irq(&adapter->async_lock); | ||
909 | if (adapter->vlan_grp) | ||
910 | adapter->vlan_grp->vlan_devices[vid] = NULL; | ||
911 | spin_unlock_irq(&adapter->async_lock); | ||
912 | } | ||
913 | #endif | ||
914 | |||
915 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
916 | static void t1_netpoll(struct net_device *dev) | ||
917 | { | ||
918 | unsigned long flags; | ||
919 | struct adapter *adapter = dev->priv; | ||
920 | |||
921 | local_irq_save(flags); | ||
922 | t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL); | ||
923 | local_irq_restore(flags); | ||
924 | } | ||
925 | #endif | ||
926 | |||
927 | /* | ||
928 | * Periodic accumulation of MAC statistics. This is used only if the MAC | ||
929 | * does not have any other way to prevent stats counter overflow. | ||
930 | */ | ||
931 | static void mac_stats_task(void *data) | ||
932 | { | ||
933 | int i; | ||
934 | struct adapter *adapter = data; | ||
935 | |||
936 | for_each_port(adapter, i) { | ||
937 | struct port_info *p = &adapter->port[i]; | ||
938 | |||
939 | if (netif_running(p->dev)) | ||
940 | p->mac->ops->statistics_update(p->mac, | ||
941 | MAC_STATS_UPDATE_FAST); | ||
942 | } | ||
943 | |||
944 | /* Schedule the next statistics update if any port is active. */ | ||
945 | spin_lock(&adapter->work_lock); | ||
946 | if (adapter->open_device_map & PORT_MASK) | ||
947 | schedule_mac_stats_update(adapter, | ||
948 | adapter->params.stats_update_period); | ||
949 | spin_unlock(&adapter->work_lock); | ||
950 | } | ||
951 | |||
952 | /* | ||
953 | * Processes elmer0 external interrupts in process context. | ||
954 | */ | ||
955 | static void ext_intr_task(void *data) | ||
956 | { | ||
957 | struct adapter *adapter = data; | ||
958 | |||
959 | elmer0_ext_intr_handler(adapter); | ||
960 | |||
961 | /* Now reenable external interrupts */ | ||
962 | spin_lock_irq(&adapter->async_lock); | ||
963 | adapter->slow_intr_mask |= F_PL_INTR_EXT; | ||
964 | writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); | ||
965 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, | ||
966 | adapter->regs + A_PL_ENABLE); | ||
967 | spin_unlock_irq(&adapter->async_lock); | ||
968 | } | ||
969 | |||
970 | /* | ||
971 | * Interrupt-context handler for elmer0 external interrupts. | ||
972 | */ | ||
973 | void t1_elmer0_ext_intr(struct adapter *adapter) | ||
974 | { | ||
975 | /* | ||
976 | * Schedule a task to handle external interrupts as we require | ||
977 | * a process context. We disable EXT interrupts in the interim | ||
978 | * and let the task reenable them when it's done. | ||
979 | */ | ||
980 | adapter->slow_intr_mask &= ~F_PL_INTR_EXT; | ||
981 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, | ||
982 | adapter->regs + A_PL_ENABLE); | ||
983 | schedule_work(&adapter->ext_intr_handler_task); | ||
984 | } | ||
985 | |||
986 | void t1_fatal_err(struct adapter *adapter) | ||
987 | { | ||
988 | if (adapter->flags & FULL_INIT_DONE) { | ||
989 | t1_sge_stop(adapter->sge); | ||
990 | t1_interrupts_disable(adapter); | ||
991 | } | ||
992 | CH_ALERT("%s: encountered fatal error, operation suspended\n", | ||
993 | adapter->name); | ||
994 | } | ||
995 | |||
996 | static int __devinit init_one(struct pci_dev *pdev, | ||
997 | const struct pci_device_id *ent) | ||
998 | { | ||
999 | static int version_printed; | ||
1000 | |||
1001 | int i, err, pci_using_dac = 0; | ||
1002 | unsigned long mmio_start, mmio_len; | ||
1003 | const struct board_info *bi; | ||
1004 | struct adapter *adapter = NULL; | ||
1005 | struct port_info *pi; | ||
1006 | |||
1007 | if (!version_printed) { | ||
1008 | printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, | ||
1009 | DRV_VERSION); | ||
1010 | ++version_printed; | ||
1011 | } | ||
1012 | |||
1013 | err = pci_enable_device(pdev); | ||
1014 | if (err) | ||
1015 | return err; | ||
1016 | |||
1017 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
1018 | CH_ERR("%s: cannot find PCI device memory base address\n", | ||
1019 | pci_name(pdev)); | ||
1020 | err = -ENODEV; | ||
1021 | goto out_disable_pdev; | ||
1022 | } | ||
1023 | |||
1024 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
1025 | pci_using_dac = 1; | ||
1026 | |||
1027 | if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
1028 | CH_ERR("%s: unable to obtain 64-bit DMA for" | ||
1029 | "consistent allocations\n", pci_name(pdev)); | ||
1030 | err = -ENODEV; | ||
1031 | goto out_disable_pdev; | ||
1032 | } | ||
1033 | |||
1034 | } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { | ||
1035 | CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); | ||
1036 | goto out_disable_pdev; | ||
1037 | } | ||
1038 | |||
1039 | err = pci_request_regions(pdev, DRV_NAME); | ||
1040 | if (err) { | ||
1041 | CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); | ||
1042 | goto out_disable_pdev; | ||
1043 | } | ||
1044 | |||
1045 | pci_set_master(pdev); | ||
1046 | |||
1047 | mmio_start = pci_resource_start(pdev, 0); | ||
1048 | mmio_len = pci_resource_len(pdev, 0); | ||
1049 | bi = t1_get_board_info(ent->driver_data); | ||
1050 | |||
1051 | for (i = 0; i < bi->port_number; ++i) { | ||
1052 | struct net_device *netdev; | ||
1053 | |||
1054 | netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); | ||
1055 | if (!netdev) { | ||
1056 | err = -ENOMEM; | ||
1057 | goto out_free_dev; | ||
1058 | } | ||
1059 | |||
1060 | SET_MODULE_OWNER(netdev); | ||
1061 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
1062 | |||
1063 | if (!adapter) { | ||
1064 | adapter = netdev->priv; | ||
1065 | adapter->pdev = pdev; | ||
1066 | adapter->port[0].dev = netdev; /* so we don't leak it */ | ||
1067 | |||
1068 | adapter->regs = ioremap(mmio_start, mmio_len); | ||
1069 | if (!adapter->regs) { | ||
1070 | CH_ERR("%s: cannot map device registers\n", | ||
1071 | pci_name(pdev)); | ||
1072 | err = -ENOMEM; | ||
1073 | goto out_free_dev; | ||
1074 | } | ||
1075 | |||
1076 | if (t1_get_board_rev(adapter, bi, &adapter->params)) { | ||
1077 | err = -ENODEV; /* Can't handle this chip rev */ | ||
1078 | goto out_free_dev; | ||
1079 | } | ||
1080 | |||
1081 | adapter->name = pci_name(pdev); | ||
1082 | adapter->msg_enable = dflt_msg_enable; | ||
1083 | adapter->mmio_len = mmio_len; | ||
1084 | |||
1085 | init_MUTEX(&adapter->mib_mutex); | ||
1086 | spin_lock_init(&adapter->tpi_lock); | ||
1087 | spin_lock_init(&adapter->work_lock); | ||
1088 | spin_lock_init(&adapter->async_lock); | ||
1089 | |||
1090 | INIT_WORK(&adapter->ext_intr_handler_task, | ||
1091 | ext_intr_task, adapter); | ||
1092 | INIT_WORK(&adapter->stats_update_task, mac_stats_task, | ||
1093 | adapter); | ||
1094 | #ifdef work_struct | ||
1095 | init_timer(&adapter->stats_update_timer); | ||
1096 | adapter->stats_update_timer.function = mac_stats_timer; | ||
1097 | adapter->stats_update_timer.data = | ||
1098 | (unsigned long)adapter; | ||
1099 | #endif | ||
1100 | |||
1101 | pci_set_drvdata(pdev, netdev); | ||
1102 | } | ||
1103 | |||
1104 | pi = &adapter->port[i]; | ||
1105 | pi->dev = netdev; | ||
1106 | netif_carrier_off(netdev); | ||
1107 | netdev->irq = pdev->irq; | ||
1108 | netdev->if_port = i; | ||
1109 | netdev->mem_start = mmio_start; | ||
1110 | netdev->mem_end = mmio_start + mmio_len - 1; | ||
1111 | netdev->priv = adapter; | ||
1112 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
1113 | netdev->features |= NETIF_F_LLTX; | ||
1114 | |||
1115 | adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE; | ||
1116 | if (pci_using_dac) | ||
1117 | netdev->features |= NETIF_F_HIGHDMA; | ||
1118 | if (vlan_tso_capable(adapter)) { | ||
1119 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
1120 | adapter->flags |= VLAN_ACCEL_CAPABLE; | ||
1121 | netdev->features |= | ||
1122 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1123 | netdev->vlan_rx_register = vlan_rx_register; | ||
1124 | netdev->vlan_rx_kill_vid = vlan_rx_kill_vid; | ||
1125 | #endif | ||
1126 | adapter->flags |= TSO_CAPABLE; | ||
1127 | netdev->features |= NETIF_F_TSO; | ||
1128 | } | ||
1129 | |||
1130 | netdev->open = cxgb_open; | ||
1131 | netdev->stop = cxgb_close; | ||
1132 | netdev->hard_start_xmit = t1_start_xmit; | ||
1133 | netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? | ||
1134 | sizeof(struct cpl_tx_pkt_lso) : | ||
1135 | sizeof(struct cpl_tx_pkt); | ||
1136 | netdev->get_stats = t1_get_stats; | ||
1137 | netdev->set_multicast_list = t1_set_rxmode; | ||
1138 | netdev->do_ioctl = t1_ioctl; | ||
1139 | netdev->change_mtu = t1_change_mtu; | ||
1140 | netdev->set_mac_address = t1_set_mac_addr; | ||
1141 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1142 | netdev->poll_controller = t1_netpoll; | ||
1143 | #endif | ||
1144 | netdev->weight = 64; | ||
1145 | |||
1146 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); | ||
1147 | } | ||
1148 | |||
1149 | if (t1_init_sw_modules(adapter, bi) < 0) { | ||
1150 | err = -ENODEV; | ||
1151 | goto out_free_dev; | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * The card is now ready to go. If any errors occur during device | ||
1156 | * registration we do not fail the whole card but rather proceed only | ||
1157 | * with the ports we manage to register successfully. However we must | ||
1158 | * register at least one net device. | ||
1159 | */ | ||
1160 | for (i = 0; i < bi->port_number; ++i) { | ||
1161 | err = register_netdev(adapter->port[i].dev); | ||
1162 | if (err) | ||
1163 | CH_WARN("%s: cannot register net device %s, skipping\n", | ||
1164 | pci_name(pdev), adapter->port[i].dev->name); | ||
1165 | else { | ||
1166 | /* | ||
1167 | * Change the name we use for messages to the name of | ||
1168 | * the first successfully registered interface. | ||
1169 | */ | ||
1170 | if (!adapter->registered_device_map) | ||
1171 | adapter->name = adapter->port[i].dev->name; | ||
1172 | |||
1173 | __set_bit(i, &adapter->registered_device_map); | ||
1174 | } | ||
1175 | } | ||
1176 | if (!adapter->registered_device_map) { | ||
1177 | CH_ERR("%s: could not register any net devices\n", | ||
1178 | pci_name(pdev)); | ||
1179 | goto out_release_adapter_res; | ||
1180 | } | ||
1181 | |||
1182 | printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, | ||
1183 | bi->desc, adapter->params.chip_revision, | ||
1184 | adapter->params.pci.is_pcix ? "PCIX" : "PCI", | ||
1185 | adapter->params.pci.speed, adapter->params.pci.width); | ||
1186 | return 0; | ||
1187 | |||
1188 | out_release_adapter_res: | ||
1189 | t1_free_sw_modules(adapter); | ||
1190 | out_free_dev: | ||
1191 | if (adapter) { | ||
1192 | if (adapter->regs) iounmap(adapter->regs); | ||
1193 | for (i = bi->port_number - 1; i >= 0; --i) | ||
1194 | if (adapter->port[i].dev) { | ||
1195 | cxgb_proc_cleanup(adapter, proc_root_driver); | ||
1196 | kfree(adapter->port[i].dev); | ||
1197 | } | ||
1198 | } | ||
1199 | pci_release_regions(pdev); | ||
1200 | out_disable_pdev: | ||
1201 | pci_disable_device(pdev); | ||
1202 | pci_set_drvdata(pdev, NULL); | ||
1203 | return err; | ||
1204 | } | ||
1205 | |||
1206 | static inline void t1_sw_reset(struct pci_dev *pdev) | ||
1207 | { | ||
1208 | pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); | ||
1209 | pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); | ||
1210 | } | ||
1211 | |||
1212 | static void __devexit remove_one(struct pci_dev *pdev) | ||
1213 | { | ||
1214 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1215 | |||
1216 | if (dev) { | ||
1217 | int i; | ||
1218 | struct adapter *adapter = dev->priv; | ||
1219 | |||
1220 | for_each_port(adapter, i) | ||
1221 | if (test_bit(i, &adapter->registered_device_map)) | ||
1222 | unregister_netdev(adapter->port[i].dev); | ||
1223 | |||
1224 | t1_free_sw_modules(adapter); | ||
1225 | iounmap(adapter->regs); | ||
1226 | while (--i >= 0) | ||
1227 | if (adapter->port[i].dev) { | ||
1228 | cxgb_proc_cleanup(adapter, proc_root_driver); | ||
1229 | kfree(adapter->port[i].dev); | ||
1230 | } | ||
1231 | pci_release_regions(pdev); | ||
1232 | pci_disable_device(pdev); | ||
1233 | pci_set_drvdata(pdev, NULL); | ||
1234 | t1_sw_reset(pdev); | ||
1235 | } | ||
1236 | } | ||
1237 | |||
1238 | static struct pci_driver driver = { | ||
1239 | .name = DRV_NAME, | ||
1240 | .id_table = t1_pci_tbl, | ||
1241 | .probe = init_one, | ||
1242 | .remove = __devexit_p(remove_one), | ||
1243 | }; | ||
1244 | |||
1245 | static int __init t1_init_module(void) | ||
1246 | { | ||
1247 | return pci_module_init(&driver); | ||
1248 | } | ||
1249 | |||
1250 | static void __exit t1_cleanup_module(void) | ||
1251 | { | ||
1252 | pci_unregister_driver(&driver); | ||
1253 | } | ||
1254 | |||
1255 | module_init(t1_init_module); | ||
1256 | module_exit(t1_cleanup_module); | ||
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h new file mode 100644 index 000000000000..5590cb2dac19 --- /dev/null +++ b/drivers/net/chelsio/elmer0.h | |||
@@ -0,0 +1,151 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: elmer0.h * | ||
4 | * $Revision: 1.6 $ * | ||
5 | * $Date: 2005/06/21 22:49:43 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_ELMER0_H_ | ||
40 | #define _CXGB_ELMER0_H_ | ||
41 | |||
42 | /* ELMER0 registers */ | ||
43 | #define A_ELMER0_VERSION 0x100000 | ||
44 | #define A_ELMER0_PHY_CFG 0x100004 | ||
45 | #define A_ELMER0_INT_ENABLE 0x100008 | ||
46 | #define A_ELMER0_INT_CAUSE 0x10000c | ||
47 | #define A_ELMER0_GPI_CFG 0x100010 | ||
48 | #define A_ELMER0_GPI_STAT 0x100014 | ||
49 | #define A_ELMER0_GPO 0x100018 | ||
50 | #define A_ELMER0_PORT0_MI1_CFG 0x400000 | ||
51 | |||
52 | #define S_MI1_MDI_ENABLE 0 | ||
53 | #define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) | ||
54 | #define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U) | ||
55 | |||
56 | #define S_MI1_MDI_INVERT 1 | ||
57 | #define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT) | ||
58 | #define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U) | ||
59 | |||
60 | #define S_MI1_PREAMBLE_ENABLE 2 | ||
61 | #define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE) | ||
62 | #define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U) | ||
63 | |||
64 | #define S_MI1_SOF 3 | ||
65 | #define M_MI1_SOF 0x3 | ||
66 | #define V_MI1_SOF(x) ((x) << S_MI1_SOF) | ||
67 | #define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF) | ||
68 | |||
69 | #define S_MI1_CLK_DIV 5 | ||
70 | #define M_MI1_CLK_DIV 0xff | ||
71 | #define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV) | ||
72 | #define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV) | ||
73 | |||
74 | #define A_ELMER0_PORT0_MI1_ADDR 0x400004 | ||
75 | |||
76 | #define S_MI1_REG_ADDR 0 | ||
77 | #define M_MI1_REG_ADDR 0x1f | ||
78 | #define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR) | ||
79 | #define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR) | ||
80 | |||
81 | #define S_MI1_PHY_ADDR 5 | ||
82 | #define M_MI1_PHY_ADDR 0x1f | ||
83 | #define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR) | ||
84 | #define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR) | ||
85 | |||
86 | #define A_ELMER0_PORT0_MI1_DATA 0x400008 | ||
87 | |||
88 | #define S_MI1_DATA 0 | ||
89 | #define M_MI1_DATA 0xffff | ||
90 | #define V_MI1_DATA(x) ((x) << S_MI1_DATA) | ||
91 | #define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA) | ||
92 | |||
93 | #define A_ELMER0_PORT0_MI1_OP 0x40000c | ||
94 | |||
95 | #define S_MI1_OP 0 | ||
96 | #define M_MI1_OP 0x3 | ||
97 | #define V_MI1_OP(x) ((x) << S_MI1_OP) | ||
98 | #define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP) | ||
99 | |||
100 | #define S_MI1_ADDR_AUTOINC 2 | ||
101 | #define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC) | ||
102 | #define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U) | ||
103 | |||
104 | #define S_MI1_OP_BUSY 31 | ||
105 | #define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) | ||
106 | #define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) | ||
107 | |||
108 | #define A_ELMER0_PORT1_MI1_CFG 0x500000 | ||
109 | #define A_ELMER0_PORT1_MI1_ADDR 0x500004 | ||
110 | #define A_ELMER0_PORT1_MI1_DATA 0x500008 | ||
111 | #define A_ELMER0_PORT1_MI1_OP 0x50000c | ||
112 | #define A_ELMER0_PORT2_MI1_CFG 0x600000 | ||
113 | #define A_ELMER0_PORT2_MI1_ADDR 0x600004 | ||
114 | #define A_ELMER0_PORT2_MI1_DATA 0x600008 | ||
115 | #define A_ELMER0_PORT2_MI1_OP 0x60000c | ||
116 | #define A_ELMER0_PORT3_MI1_CFG 0x700000 | ||
117 | #define A_ELMER0_PORT3_MI1_ADDR 0x700004 | ||
118 | #define A_ELMER0_PORT3_MI1_DATA 0x700008 | ||
119 | #define A_ELMER0_PORT3_MI1_OP 0x70000c | ||
120 | |||
121 | /* Simple bit definition for GPI and GP0 registers. */ | ||
122 | #define ELMER0_GP_BIT0 0x0001 | ||
123 | #define ELMER0_GP_BIT1 0x0002 | ||
124 | #define ELMER0_GP_BIT2 0x0004 | ||
125 | #define ELMER0_GP_BIT3 0x0008 | ||
126 | #define ELMER0_GP_BIT4 0x0010 | ||
127 | #define ELMER0_GP_BIT5 0x0020 | ||
128 | #define ELMER0_GP_BIT6 0x0040 | ||
129 | #define ELMER0_GP_BIT7 0x0080 | ||
130 | #define ELMER0_GP_BIT8 0x0100 | ||
131 | #define ELMER0_GP_BIT9 0x0200 | ||
132 | #define ELMER0_GP_BIT10 0x0400 | ||
133 | #define ELMER0_GP_BIT11 0x0800 | ||
134 | #define ELMER0_GP_BIT12 0x1000 | ||
135 | #define ELMER0_GP_BIT13 0x2000 | ||
136 | #define ELMER0_GP_BIT14 0x4000 | ||
137 | #define ELMER0_GP_BIT15 0x8000 | ||
138 | #define ELMER0_GP_BIT16 0x10000 | ||
139 | #define ELMER0_GP_BIT17 0x20000 | ||
140 | #define ELMER0_GP_BIT18 0x40000 | ||
141 | #define ELMER0_GP_BIT19 0x80000 | ||
142 | |||
143 | #define MI1_OP_DIRECT_WRITE 1 | ||
144 | #define MI1_OP_DIRECT_READ 2 | ||
145 | |||
146 | #define MI1_OP_INDIRECT_ADDRESS 0 | ||
147 | #define MI1_OP_INDIRECT_WRITE 1 | ||
148 | #define MI1_OP_INDIRECT_READ_INC 2 | ||
149 | #define MI1_OP_INDIRECT_READ 3 | ||
150 | |||
151 | #endif /* _CXGB_ELMER0_H_ */ | ||
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c new file mode 100644 index 000000000000..230642571c92 --- /dev/null +++ b/drivers/net/chelsio/espi.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: espi.c * | ||
4 | * $Revision: 1.14 $ * | ||
5 | * $Date: 2005/05/14 00:59:32 $ * | ||
6 | * Description: * | ||
7 | * Ethernet SPI functionality. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "common.h" | ||
41 | #include "regs.h" | ||
42 | #include "espi.h" | ||
43 | |||
44 | struct peespi { | ||
45 | adapter_t *adapter; | ||
46 | struct espi_intr_counts intr_cnt; | ||
47 | u32 misc_ctrl; | ||
48 | spinlock_t lock; | ||
49 | }; | ||
50 | |||
51 | #define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \ | ||
52 | F_RAMPARITYERR | F_DIP2PARITYERR) | ||
53 | #define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \ | ||
54 | | F_MONITORED_INTERFACE) | ||
55 | |||
56 | #define TRICN_CNFG 14 | ||
57 | #define TRICN_CMD_READ 0x11 | ||
58 | #define TRICN_CMD_WRITE 0x21 | ||
59 | #define TRICN_CMD_ATTEMPTS 10 | ||
60 | |||
61 | static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, | ||
62 | int ch_addr, int reg_offset, u32 wr_data) | ||
63 | { | ||
64 | int busy, attempts = TRICN_CMD_ATTEMPTS; | ||
65 | |||
66 | writel(V_WRITE_DATA(wr_data) | | ||
67 | V_REGISTER_OFFSET(reg_offset) | | ||
68 | V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | | ||
69 | V_BUNDLE_ADDR(bundle_addr) | | ||
70 | V_SPI4_COMMAND(TRICN_CMD_WRITE), | ||
71 | adapter->regs + A_ESPI_CMD_ADDR); | ||
72 | writel(0, adapter->regs + A_ESPI_GOSTAT); | ||
73 | |||
74 | do { | ||
75 | busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; | ||
76 | } while (busy && --attempts); | ||
77 | |||
78 | if (busy) | ||
79 | CH_ERR("%s: TRICN write timed out\n", adapter->name); | ||
80 | |||
81 | return busy; | ||
82 | } | ||
83 | |||
84 | /* 1. Deassert rx_reset_core. */ | ||
85 | /* 2. Program TRICN_CNFG registers. */ | ||
86 | /* 3. Deassert rx_reset_link */ | ||
87 | static int tricn_init(adapter_t *adapter) | ||
88 | { | ||
89 | int i = 0; | ||
90 | int sme = 1; | ||
91 | int stat = 0; | ||
92 | int timeout = 0; | ||
93 | int is_ready = 0; | ||
94 | int dynamic_deskew = 0; | ||
95 | |||
96 | if (dynamic_deskew) | ||
97 | sme = 0; | ||
98 | |||
99 | |||
100 | /* 1 */ | ||
101 | timeout=1000; | ||
102 | do { | ||
103 | stat = readl(adapter->regs + A_ESPI_RX_RESET); | ||
104 | is_ready = (stat & 0x4); | ||
105 | timeout--; | ||
106 | udelay(5); | ||
107 | } while (!is_ready || (timeout==0)); | ||
108 | writel(0x2, adapter->regs + A_ESPI_RX_RESET); | ||
109 | if (timeout==0) | ||
110 | { | ||
111 | CH_ERR("ESPI : ERROR : Timeout tricn_init() \n"); | ||
112 | t1_fatal_err(adapter); | ||
113 | } | ||
114 | |||
115 | /* 2 */ | ||
116 | if (sme) { | ||
117 | tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); | ||
118 | tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); | ||
119 | tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); | ||
120 | } | ||
121 | for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); | ||
122 | for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); | ||
123 | for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | ||
124 | for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | ||
125 | for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); | ||
126 | for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | ||
127 | for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80); | ||
128 | for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); | ||
129 | |||
130 | /* 3 */ | ||
131 | writel(0x3, adapter->regs + A_ESPI_RX_RESET); | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | void t1_espi_intr_enable(struct peespi *espi) | ||
137 | { | ||
138 | u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); | ||
139 | |||
140 | /* | ||
141 | * Cannot enable ESPI interrupts on T1B because HW asserts the | ||
142 | * interrupt incorrectly, namely the driver gets ESPI interrupts | ||
143 | * but no data is actually dropped (can verify this reading the ESPI | ||
144 | * drop registers). Also, once the ESPI interrupt is asserted it | ||
145 | * cannot be cleared (HW bug). | ||
146 | */ | ||
147 | enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; | ||
148 | writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE); | ||
149 | writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); | ||
150 | } | ||
151 | |||
152 | void t1_espi_intr_clear(struct peespi *espi) | ||
153 | { | ||
154 | writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
155 | writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); | ||
156 | } | ||
157 | |||
158 | void t1_espi_intr_disable(struct peespi *espi) | ||
159 | { | ||
160 | u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); | ||
161 | |||
162 | writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE); | ||
163 | writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); | ||
164 | } | ||
165 | |||
166 | int t1_espi_intr_handler(struct peespi *espi) | ||
167 | { | ||
168 | u32 cnt; | ||
169 | u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
170 | |||
171 | if (status & F_DIP4ERR) | ||
172 | espi->intr_cnt.DIP4_err++; | ||
173 | if (status & F_RXDROP) | ||
174 | espi->intr_cnt.rx_drops++; | ||
175 | if (status & F_TXDROP) | ||
176 | espi->intr_cnt.tx_drops++; | ||
177 | if (status & F_RXOVERFLOW) | ||
178 | espi->intr_cnt.rx_ovflw++; | ||
179 | if (status & F_RAMPARITYERR) | ||
180 | espi->intr_cnt.parity_err++; | ||
181 | if (status & F_DIP2PARITYERR) { | ||
182 | espi->intr_cnt.DIP2_parity_err++; | ||
183 | |||
184 | /* | ||
185 | * Must read the error count to clear the interrupt | ||
186 | * that it causes. | ||
187 | */ | ||
188 | cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we | ||
193 | * write the status as is. | ||
194 | */ | ||
195 | if (status && t1_is_T1B(espi->adapter)) | ||
196 | status = 1; | ||
197 | writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) | ||
202 | { | ||
203 | return &espi->intr_cnt; | ||
204 | } | ||
205 | |||
206 | static void espi_setup_for_pm3393(adapter_t *adapter) | ||
207 | { | ||
208 | u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; | ||
209 | |||
210 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); | ||
211 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1); | ||
212 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); | ||
213 | writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3); | ||
214 | writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); | ||
215 | writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); | ||
216 | writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH); | ||
217 | writel(0x08000008, adapter->regs + A_ESPI_TRAIN); | ||
218 | writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); | ||
219 | } | ||
220 | |||
221 | /* T2 Init part -- */ | ||
222 | /* 1. Set T_ESPI_MISCCTRL_ADDR */ | ||
223 | /* 2. Init ESPI registers. */ | ||
224 | /* 3. Init TriCN Hard Macro */ | ||
225 | int t1_espi_init(struct peespi *espi, int mac_type, int nports) | ||
226 | { | ||
227 | u32 cnt; | ||
228 | |||
229 | u32 status_enable_extra = 0; | ||
230 | adapter_t *adapter = espi->adapter; | ||
231 | u32 status, burstval = 0x800100; | ||
232 | |||
233 | /* Disable ESPI training. MACs that can handle it enable it below. */ | ||
234 | writel(0, adapter->regs + A_ESPI_TRAIN); | ||
235 | |||
236 | if (is_T2(adapter)) { | ||
237 | writel(V_OUT_OF_SYNC_COUNT(4) | | ||
238 | V_DIP2_PARITY_ERR_THRES(3) | | ||
239 | V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); | ||
240 | if (nports == 4) { | ||
241 | /* T204: maxburst1 = 0x40, maxburst2 = 0x20 */ | ||
242 | burstval = 0x200040; | ||
243 | } | ||
244 | } | ||
245 | writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); | ||
246 | |||
247 | switch (mac_type) { | ||
248 | case CHBT_MAC_PM3393: | ||
249 | espi_setup_for_pm3393(adapter); | ||
250 | break; | ||
251 | default: | ||
252 | return -1; | ||
253 | } | ||
254 | |||
255 | /* | ||
256 | * Make sure any pending interrupts from the SPI are | ||
257 | * Cleared before enabling the interrupt. | ||
258 | */ | ||
259 | writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE); | ||
260 | status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
261 | if (status & F_DIP2PARITYERR) { | ||
262 | cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we | ||
267 | * write the status as is. | ||
268 | */ | ||
269 | if (status && t1_is_T1B(espi->adapter)) | ||
270 | status = 1; | ||
271 | writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); | ||
272 | |||
273 | writel(status_enable_extra | F_RXSTATUSENABLE, | ||
274 | adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); | ||
275 | |||
276 | if (is_T2(adapter)) { | ||
277 | tricn_init(adapter); | ||
278 | /* | ||
279 | * Always position the control at the 1st port egress IN | ||
280 | * (sop,eop) counter to reduce PIOs for T/N210 workaround. | ||
281 | */ | ||
282 | espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL) | ||
283 | & ~MON_MASK) | (F_MONITORED_DIRECTION | ||
284 | | F_MONITORED_INTERFACE); | ||
285 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | ||
286 | spin_lock_init(&espi->lock); | ||
287 | } | ||
288 | |||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | void t1_espi_destroy(struct peespi *espi) | ||
293 | { | ||
294 | kfree(espi); | ||
295 | } | ||
296 | |||
297 | struct peespi *t1_espi_create(adapter_t *adapter) | ||
298 | { | ||
299 | struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL); | ||
300 | |||
301 | memset(espi, 0, sizeof(*espi)); | ||
302 | |||
303 | if (espi) | ||
304 | espi->adapter = adapter; | ||
305 | return espi; | ||
306 | } | ||
307 | |||
308 | void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) | ||
309 | { | ||
310 | struct peespi *espi = adapter->espi; | ||
311 | |||
312 | if (!is_T2(adapter)) | ||
313 | return; | ||
314 | spin_lock(&espi->lock); | ||
315 | espi->misc_ctrl = (val & ~MON_MASK) | | ||
316 | (espi->misc_ctrl & MON_MASK); | ||
317 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | ||
318 | spin_unlock(&espi->lock); | ||
319 | } | ||
320 | |||
321 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) | ||
322 | { | ||
323 | u32 sel; | ||
324 | |||
325 | struct peespi *espi = adapter->espi; | ||
326 | |||
327 | if (!is_T2(adapter)) | ||
328 | return 0; | ||
329 | sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); | ||
330 | if (!wait) { | ||
331 | if (!spin_trylock(&espi->lock)) | ||
332 | return 0; | ||
333 | } | ||
334 | else | ||
335 | spin_lock(&espi->lock); | ||
336 | if ((sel != (espi->misc_ctrl & MON_MASK))) { | ||
337 | writel(((espi->misc_ctrl & ~MON_MASK) | sel), | ||
338 | adapter->regs + A_ESPI_MISC_CONTROL); | ||
339 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | ||
340 | writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); | ||
341 | } | ||
342 | else | ||
343 | sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); | ||
344 | spin_unlock(&espi->lock); | ||
345 | return sel; | ||
346 | } | ||
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h new file mode 100644 index 000000000000..c90e37f8457c --- /dev/null +++ b/drivers/net/chelsio/espi.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: espi.h * | ||
4 | * $Revision: 1.7 $ * | ||
5 | * $Date: 2005/06/21 18:29:47 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_ESPI_H_ | ||
40 | #define _CXGB_ESPI_H_ | ||
41 | |||
42 | #include "common.h" | ||
43 | |||
44 | struct espi_intr_counts { | ||
45 | unsigned int DIP4_err; | ||
46 | unsigned int rx_drops; | ||
47 | unsigned int tx_drops; | ||
48 | unsigned int rx_ovflw; | ||
49 | unsigned int parity_err; | ||
50 | unsigned int DIP2_parity_err; | ||
51 | }; | ||
52 | |||
53 | struct peespi; | ||
54 | |||
55 | struct peespi *t1_espi_create(adapter_t *adapter); | ||
56 | void t1_espi_destroy(struct peespi *espi); | ||
57 | int t1_espi_init(struct peespi *espi, int mac_type, int nports); | ||
58 | |||
59 | void t1_espi_intr_enable(struct peespi *); | ||
60 | void t1_espi_intr_clear(struct peespi *); | ||
61 | void t1_espi_intr_disable(struct peespi *); | ||
62 | int t1_espi_intr_handler(struct peespi *); | ||
63 | const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi); | ||
64 | |||
65 | void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val); | ||
66 | u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); | ||
67 | |||
68 | #endif /* _CXGB_ESPI_H_ */ | ||
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h new file mode 100644 index 000000000000..746b0eeea964 --- /dev/null +++ b/drivers/net/chelsio/gmac.h | |||
@@ -0,0 +1,134 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: gmac.h * | ||
4 | * $Revision: 1.6 $ * | ||
5 | * $Date: 2005/06/21 18:29:47 $ * | ||
6 | * Description: * | ||
7 | * Generic MAC functionality. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #ifndef _CXGB_GMAC_H_ | ||
41 | #define _CXGB_GMAC_H_ | ||
42 | |||
43 | #include "common.h" | ||
44 | |||
45 | enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL }; | ||
46 | enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 }; | ||
47 | |||
48 | struct cmac_statistics { | ||
49 | /* Transmit */ | ||
50 | u64 TxOctetsOK; | ||
51 | u64 TxOctetsBad; | ||
52 | u64 TxUnicastFramesOK; | ||
53 | u64 TxMulticastFramesOK; | ||
54 | u64 TxBroadcastFramesOK; | ||
55 | u64 TxPauseFrames; | ||
56 | u64 TxFramesWithDeferredXmissions; | ||
57 | u64 TxLateCollisions; | ||
58 | u64 TxTotalCollisions; | ||
59 | u64 TxFramesAbortedDueToXSCollisions; | ||
60 | u64 TxUnderrun; | ||
61 | u64 TxLengthErrors; | ||
62 | u64 TxInternalMACXmitError; | ||
63 | u64 TxFramesWithExcessiveDeferral; | ||
64 | u64 TxFCSErrors; | ||
65 | |||
66 | /* Receive */ | ||
67 | u64 RxOctetsOK; | ||
68 | u64 RxOctetsBad; | ||
69 | u64 RxUnicastFramesOK; | ||
70 | u64 RxMulticastFramesOK; | ||
71 | u64 RxBroadcastFramesOK; | ||
72 | u64 RxPauseFrames; | ||
73 | u64 RxFCSErrors; | ||
74 | u64 RxAlignErrors; | ||
75 | u64 RxSymbolErrors; | ||
76 | u64 RxDataErrors; | ||
77 | u64 RxSequenceErrors; | ||
78 | u64 RxRuntErrors; | ||
79 | u64 RxJabberErrors; | ||
80 | u64 RxInternalMACRcvError; | ||
81 | u64 RxInRangeLengthErrors; | ||
82 | u64 RxOutOfRangeLengthField; | ||
83 | u64 RxFrameTooLongErrors; | ||
84 | }; | ||
85 | |||
86 | struct cmac_ops { | ||
87 | void (*destroy)(struct cmac *); | ||
88 | int (*reset)(struct cmac *); | ||
89 | int (*interrupt_enable)(struct cmac *); | ||
90 | int (*interrupt_disable)(struct cmac *); | ||
91 | int (*interrupt_clear)(struct cmac *); | ||
92 | int (*interrupt_handler)(struct cmac *); | ||
93 | |||
94 | int (*enable)(struct cmac *, int); | ||
95 | int (*disable)(struct cmac *, int); | ||
96 | |||
97 | int (*loopback_enable)(struct cmac *); | ||
98 | int (*loopback_disable)(struct cmac *); | ||
99 | |||
100 | int (*set_mtu)(struct cmac *, int mtu); | ||
101 | int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm); | ||
102 | |||
103 | int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc); | ||
104 | int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex, | ||
105 | int *fc); | ||
106 | |||
107 | const struct cmac_statistics *(*statistics_update)(struct cmac *, int); | ||
108 | |||
109 | int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); | ||
110 | int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); | ||
111 | }; | ||
112 | |||
113 | typedef struct _cmac_instance cmac_instance; | ||
114 | |||
115 | struct cmac { | ||
116 | struct cmac_statistics stats; | ||
117 | adapter_t *adapter; | ||
118 | struct cmac_ops *ops; | ||
119 | cmac_instance *instance; | ||
120 | }; | ||
121 | |||
122 | struct gmac { | ||
123 | unsigned int stats_update_period; | ||
124 | struct cmac *(*create)(adapter_t *adapter, int index); | ||
125 | int (*reset)(adapter_t *); | ||
126 | }; | ||
127 | |||
128 | extern struct gmac t1_pm3393_ops; | ||
129 | extern struct gmac t1_chelsio_mac_ops; | ||
130 | extern struct gmac t1_vsc7321_ops; | ||
131 | extern struct gmac t1_ixf1010_ops; | ||
132 | extern struct gmac t1_dummy_mac_ops; | ||
133 | |||
134 | #endif /* _CXGB_GMAC_H_ */ | ||
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c new file mode 100644 index 000000000000..db5034282782 --- /dev/null +++ b/drivers/net/chelsio/mv88x201x.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: mv88x201x.c * | ||
4 | * $Revision: 1.12 $ * | ||
5 | * $Date: 2005/04/15 19:27:14 $ * | ||
6 | * Description: * | ||
7 | * Marvell PHY (mv88x201x) functionality. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "cphy.h" | ||
41 | #include "elmer0.h" | ||
42 | |||
43 | /* | ||
44 | * The 88x2010 Rev C. requires some link status registers * to be read | ||
45 | * twice in order to get the right values. Future * revisions will fix | ||
46 | * this problem and then this macro * can disappear. | ||
47 | */ | ||
48 | #define MV88x2010_LINK_STATUS_BUGS 1 | ||
49 | |||
50 | static int led_init(struct cphy *cphy) | ||
51 | { | ||
52 | /* Setup the LED registers so we can turn on/off. | ||
53 | * Writing these bits maps control to another | ||
54 | * register. mmd(0x1) addr(0x7) | ||
55 | */ | ||
56 | mdio_write(cphy, 0x3, 0x8304, 0xdddd); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static int led_link(struct cphy *cphy, u32 do_enable) | ||
61 | { | ||
62 | u32 led = 0; | ||
63 | #define LINK_ENABLE_BIT 0x1 | ||
64 | |||
65 | mdio_read(cphy, 0x1, 0x7, &led); | ||
66 | |||
67 | if (do_enable & LINK_ENABLE_BIT) { | ||
68 | led |= LINK_ENABLE_BIT; | ||
69 | mdio_write(cphy, 0x1, 0x7, led); | ||
70 | } else { | ||
71 | led &= ~LINK_ENABLE_BIT; | ||
72 | mdio_write(cphy, 0x1, 0x7, led); | ||
73 | } | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | /* Port Reset */ | ||
78 | static int mv88x201x_reset(struct cphy *cphy, int wait) | ||
79 | { | ||
80 | /* This can be done through registers. It is not required since | ||
81 | * a full chip reset is used. | ||
82 | */ | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static int mv88x201x_interrupt_enable(struct cphy *cphy) | ||
87 | { | ||
88 | u32 elmer; | ||
89 | |||
90 | /* Enable PHY LASI interrupts. */ | ||
91 | mdio_write(cphy, 0x1, 0x9002, 0x1); | ||
92 | |||
93 | /* Enable Marvell interrupts through Elmer0. */ | ||
94 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
95 | elmer |= ELMER0_GP_BIT6; | ||
96 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int mv88x201x_interrupt_disable(struct cphy *cphy) | ||
101 | { | ||
102 | u32 elmer; | ||
103 | |||
104 | /* Disable PHY LASI interrupts. */ | ||
105 | mdio_write(cphy, 0x1, 0x9002, 0x0); | ||
106 | |||
107 | /* Disable Marvell interrupts through Elmer0. */ | ||
108 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
109 | elmer &= ~ELMER0_GP_BIT6; | ||
110 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int mv88x201x_interrupt_clear(struct cphy *cphy) | ||
115 | { | ||
116 | u32 elmer; | ||
117 | u32 val; | ||
118 | |||
119 | #ifdef MV88x2010_LINK_STATUS_BUGS | ||
120 | /* Required to read twice before clear takes affect. */ | ||
121 | mdio_read(cphy, 0x1, 0x9003, &val); | ||
122 | mdio_read(cphy, 0x1, 0x9004, &val); | ||
123 | mdio_read(cphy, 0x1, 0x9005, &val); | ||
124 | |||
125 | /* Read this register after the others above it else | ||
126 | * the register doesn't clear correctly. | ||
127 | */ | ||
128 | mdio_read(cphy, 0x1, 0x1, &val); | ||
129 | #endif | ||
130 | |||
131 | /* Clear link status. */ | ||
132 | mdio_read(cphy, 0x1, 0x1, &val); | ||
133 | /* Clear PHY LASI interrupts. */ | ||
134 | mdio_read(cphy, 0x1, 0x9005, &val); | ||
135 | |||
136 | #ifdef MV88x2010_LINK_STATUS_BUGS | ||
137 | /* Do it again. */ | ||
138 | mdio_read(cphy, 0x1, 0x9003, &val); | ||
139 | mdio_read(cphy, 0x1, 0x9004, &val); | ||
140 | #endif | ||
141 | |||
142 | /* Clear Marvell interrupts through Elmer0. */ | ||
143 | t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); | ||
144 | elmer |= ELMER0_GP_BIT6; | ||
145 | t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int mv88x201x_interrupt_handler(struct cphy *cphy) | ||
150 | { | ||
151 | /* Clear interrupts */ | ||
152 | mv88x201x_interrupt_clear(cphy); | ||
153 | |||
154 | /* We have only enabled link change interrupts and so | ||
155 | * cphy_cause must be a link change interrupt. | ||
156 | */ | ||
157 | return cphy_cause_link_change; | ||
158 | } | ||
159 | |||
160 | static int mv88x201x_set_loopback(struct cphy *cphy, int on) | ||
161 | { | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, | ||
166 | int *speed, int *duplex, int *fc) | ||
167 | { | ||
168 | u32 val = 0; | ||
169 | #define LINK_STATUS_BIT 0x4 | ||
170 | |||
171 | if (link_ok) { | ||
172 | /* Read link status. */ | ||
173 | mdio_read(cphy, 0x1, 0x1, &val); | ||
174 | val &= LINK_STATUS_BIT; | ||
175 | *link_ok = (val == LINK_STATUS_BIT); | ||
176 | /* Turn on/off Link LED */ | ||
177 | led_link(cphy, *link_ok); | ||
178 | } | ||
179 | if (speed) | ||
180 | *speed = SPEED_10000; | ||
181 | if (duplex) | ||
182 | *duplex = DUPLEX_FULL; | ||
183 | if (fc) | ||
184 | *fc = PAUSE_RX | PAUSE_TX; | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static void mv88x201x_destroy(struct cphy *cphy) | ||
189 | { | ||
190 | kfree(cphy); | ||
191 | } | ||
192 | |||
193 | static struct cphy_ops mv88x201x_ops = { | ||
194 | .destroy = mv88x201x_destroy, | ||
195 | .reset = mv88x201x_reset, | ||
196 | .interrupt_enable = mv88x201x_interrupt_enable, | ||
197 | .interrupt_disable = mv88x201x_interrupt_disable, | ||
198 | .interrupt_clear = mv88x201x_interrupt_clear, | ||
199 | .interrupt_handler = mv88x201x_interrupt_handler, | ||
200 | .get_link_status = mv88x201x_get_link_status, | ||
201 | .set_loopback = mv88x201x_set_loopback, | ||
202 | }; | ||
203 | |||
204 | static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr, | ||
205 | struct mdio_ops *mdio_ops) | ||
206 | { | ||
207 | u32 val; | ||
208 | struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL); | ||
209 | |||
210 | if (!cphy) | ||
211 | return NULL; | ||
212 | memset(cphy, 0, sizeof(*cphy)); | ||
213 | cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops); | ||
214 | |||
215 | /* Commands the PHY to enable XFP's clock. */ | ||
216 | mdio_read(cphy, 0x3, 0x8300, &val); | ||
217 | mdio_write(cphy, 0x3, 0x8300, val | 1); | ||
218 | |||
219 | /* Clear link status. Required because of a bug in the PHY. */ | ||
220 | mdio_read(cphy, 0x1, 0x8, &val); | ||
221 | mdio_read(cphy, 0x3, 0x8, &val); | ||
222 | |||
223 | /* Allows for Link,Ack LED turn on/off */ | ||
224 | led_init(cphy); | ||
225 | return cphy; | ||
226 | } | ||
227 | |||
228 | /* Chip Reset */ | ||
229 | static int mv88x201x_phy_reset(adapter_t *adapter) | ||
230 | { | ||
231 | u32 val; | ||
232 | |||
233 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
234 | val &= ~4; | ||
235 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
236 | msleep(100); | ||
237 | |||
238 | t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); | ||
239 | msleep(1000); | ||
240 | |||
241 | /* Now lets enable the Laser. Delay 100us */ | ||
242 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
243 | val |= 0x8000; | ||
244 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
245 | udelay(100); | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | struct gphy t1_mv88x201x_ops = { | ||
250 | mv88x201x_phy_create, | ||
251 | mv88x201x_phy_reset | ||
252 | }; | ||
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c new file mode 100644 index 000000000000..04a1404fc65e --- /dev/null +++ b/drivers/net/chelsio/pm3393.c | |||
@@ -0,0 +1,826 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: pm3393.c * | ||
4 | * $Revision: 1.16 $ * | ||
5 | * $Date: 2005/05/14 00:59:32 $ * | ||
6 | * Description: * | ||
7 | * PMC/SIERRA (pm3393) MAC-PHY functionality. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "common.h" | ||
41 | #include "regs.h" | ||
42 | #include "gmac.h" | ||
43 | #include "elmer0.h" | ||
44 | #include "suni1x10gexp_regs.h" | ||
45 | |||
46 | /* 802.3ae 10Gb/s MDIO Manageable Device(MMD) | ||
47 | */ | ||
48 | enum { | ||
49 | MMD_RESERVED, | ||
50 | MMD_PMAPMD, | ||
51 | MMD_WIS, | ||
52 | MMD_PCS, | ||
53 | MMD_PHY_XGXS, /* XGMII Extender Sublayer */ | ||
54 | MMD_DTE_XGXS, | ||
55 | }; | ||
56 | |||
57 | enum { | ||
58 | PHY_XGXS_CTRL_1, | ||
59 | PHY_XGXS_STATUS_1 | ||
60 | }; | ||
61 | |||
62 | #define OFFSET(REG_ADDR) (REG_ADDR << 2) | ||
63 | |||
64 | /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ | ||
65 | #define MAX_FRAME_SIZE 9600 | ||
66 | |||
67 | #define IPG 12 | ||
68 | #define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ | ||
69 | SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ | ||
70 | SUNI1x10GEXP_BITMSK_TXXG_PADEN) | ||
71 | #define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \ | ||
72 | SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP) | ||
73 | |||
74 | /* Update statistics every 15 minutes */ | ||
75 | #define STATS_TICK_SECS (15 * 60) | ||
76 | |||
77 | enum { /* RMON registers */ | ||
78 | RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW, | ||
79 | RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW, | ||
80 | RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW, | ||
81 | RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW, | ||
82 | RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW, | ||
83 | RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW, | ||
84 | RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW, | ||
85 | RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW, | ||
86 | RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW, | ||
87 | RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW, | ||
88 | RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, | ||
89 | RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, | ||
90 | RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, | ||
91 | |||
92 | TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, | ||
93 | TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, | ||
94 | TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW, | ||
95 | TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, | ||
96 | TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, | ||
97 | TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, | ||
98 | TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW | ||
99 | }; | ||
100 | |||
101 | struct _cmac_instance { | ||
102 | u8 enabled; | ||
103 | u8 fc; | ||
104 | u8 mac_addr[6]; | ||
105 | }; | ||
106 | |||
107 | static int pmread(struct cmac *cmac, u32 reg, u32 * data32) | ||
108 | { | ||
109 | t1_tpi_read(cmac->adapter, OFFSET(reg), data32); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static int pmwrite(struct cmac *cmac, u32 reg, u32 data32) | ||
114 | { | ||
115 | t1_tpi_write(cmac->adapter, OFFSET(reg), data32); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* Port reset. */ | ||
120 | static int pm3393_reset(struct cmac *cmac) | ||
121 | { | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Enable interrupts for the PM3393 | ||
127 | |||
128 | 1. Enable PM3393 BLOCK interrupts. | ||
129 | 2. Enable PM3393 Master Interrupt bit(INTE) | ||
130 | 3. Enable ELMER's PM3393 bit. | ||
131 | 4. Enable Terminator external interrupt. | ||
132 | */ | ||
133 | static int pm3393_interrupt_enable(struct cmac *cmac) | ||
134 | { | ||
135 | u32 pl_intr; | ||
136 | |||
137 | /* PM3393 - Enabling all hardware block interrupts. | ||
138 | */ | ||
139 | pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff); | ||
140 | pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff); | ||
141 | pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff); | ||
142 | pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff); | ||
143 | |||
144 | /* Don't interrupt on statistics overflow, we are polling */ | ||
145 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); | ||
146 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); | ||
147 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); | ||
148 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); | ||
149 | |||
150 | pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff); | ||
151 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff); | ||
152 | pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff); | ||
153 | pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff); | ||
154 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff); | ||
155 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff); | ||
156 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff); | ||
157 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff); | ||
158 | pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff); | ||
159 | |||
160 | /* PM3393 - Global interrupt enable | ||
161 | */ | ||
162 | /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */ | ||
163 | pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, | ||
164 | 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); | ||
165 | |||
166 | /* TERMINATOR - PL_INTERUPTS_EXT */ | ||
167 | pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE); | ||
168 | pl_intr |= F_PL_INTR_EXT; | ||
169 | writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE); | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int pm3393_interrupt_disable(struct cmac *cmac) | ||
174 | { | ||
175 | u32 elmer; | ||
176 | |||
177 | /* PM3393 - Enabling HW interrupt blocks. */ | ||
178 | pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0); | ||
179 | pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0); | ||
180 | pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0); | ||
181 | pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0); | ||
182 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); | ||
183 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); | ||
184 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); | ||
185 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); | ||
186 | pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0); | ||
187 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0); | ||
188 | pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0); | ||
189 | pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0); | ||
190 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0); | ||
191 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0); | ||
192 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0); | ||
193 | pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0); | ||
194 | pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0); | ||
195 | |||
196 | /* PM3393 - Global interrupt enable */ | ||
197 | pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0); | ||
198 | |||
199 | /* ELMER - External chip interrupts. */ | ||
200 | t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer); | ||
201 | elmer &= ~ELMER0_GP_BIT1; | ||
202 | t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer); | ||
203 | |||
204 | /* TERMINATOR - PL_INTERUPTS_EXT */ | ||
205 | /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP | ||
206 | * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level. | ||
207 | */ | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int pm3393_interrupt_clear(struct cmac *cmac) | ||
213 | { | ||
214 | u32 elmer; | ||
215 | u32 pl_intr; | ||
216 | u32 val32; | ||
217 | |||
218 | /* PM3393 - Clearing HW interrupt blocks. Note, this assumes | ||
219 | * bit WCIMODE=0 for a clear-on-read. | ||
220 | */ | ||
221 | pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32); | ||
222 | pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32); | ||
223 | pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32); | ||
224 | pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32); | ||
225 | pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32); | ||
226 | pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32); | ||
227 | pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32); | ||
228 | pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32); | ||
229 | pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32); | ||
230 | pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32); | ||
231 | pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32); | ||
232 | pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION, | ||
233 | &val32); | ||
234 | pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32); | ||
235 | pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32); | ||
236 | |||
237 | /* PM3393 - Global interrupt status | ||
238 | */ | ||
239 | pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32); | ||
240 | |||
241 | /* ELMER - External chip interrupts. | ||
242 | */ | ||
243 | t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer); | ||
244 | elmer |= ELMER0_GP_BIT1; | ||
245 | t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer); | ||
246 | |||
247 | /* TERMINATOR - PL_INTERUPTS_EXT | ||
248 | */ | ||
249 | pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE); | ||
250 | pl_intr |= F_PL_INTR_EXT; | ||
251 | writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* Interrupt handler */ | ||
257 | static int pm3393_interrupt_handler(struct cmac *cmac) | ||
258 | { | ||
259 | u32 master_intr_status; | ||
260 | /* | ||
261 | 1. Read master interrupt register. | ||
262 | 2. Read BLOCK's interrupt status registers. | ||
263 | 3. Handle BLOCK interrupts. | ||
264 | */ | ||
265 | /* Read the master interrupt status register. */ | ||
266 | pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, | ||
267 | &master_intr_status); | ||
268 | |||
269 | /* TBD XXX Lets just clear everything for now */ | ||
270 | pm3393_interrupt_clear(cmac); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int pm3393_enable(struct cmac *cmac, int which) | ||
276 | { | ||
277 | if (which & MAC_DIRECTION_RX) | ||
278 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, | ||
279 | (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN)); | ||
280 | |||
281 | if (which & MAC_DIRECTION_TX) { | ||
282 | u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0; | ||
283 | |||
284 | if (cmac->instance->fc & PAUSE_RX) | ||
285 | val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX; | ||
286 | if (cmac->instance->fc & PAUSE_TX) | ||
287 | val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX; | ||
288 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val); | ||
289 | } | ||
290 | |||
291 | cmac->instance->enabled |= which; | ||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | static int pm3393_enable_port(struct cmac *cmac, int which) | ||
296 | { | ||
297 | /* Clear port statistics */ | ||
298 | pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL, | ||
299 | SUNI1x10GEXP_BITMSK_MSTAT_CLEAR); | ||
300 | udelay(2); | ||
301 | memset(&cmac->stats, 0, sizeof(struct cmac_statistics)); | ||
302 | |||
303 | pm3393_enable(cmac, which); | ||
304 | |||
305 | /* | ||
306 | * XXX This should be done by the PHY and preferrably not at all. | ||
307 | * The PHY doesn't give us link status indication on its own so have | ||
308 | * the link management code query it instead. | ||
309 | */ | ||
310 | { | ||
311 | extern void link_changed(adapter_t *adapter, int port_id); | ||
312 | |||
313 | link_changed(cmac->adapter, 0); | ||
314 | } | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int pm3393_disable(struct cmac *cmac, int which) | ||
319 | { | ||
320 | if (which & MAC_DIRECTION_RX) | ||
321 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL); | ||
322 | if (which & MAC_DIRECTION_TX) | ||
323 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL); | ||
324 | |||
325 | /* | ||
326 | * The disable is graceful. Give the PM3393 time. Can't wait very | ||
327 | * long here, we may be holding locks. | ||
328 | */ | ||
329 | udelay(20); | ||
330 | |||
331 | cmac->instance->enabled &= ~which; | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int pm3393_loopback_enable(struct cmac *cmac) | ||
336 | { | ||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | static int pm3393_loopback_disable(struct cmac *cmac) | ||
341 | { | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int pm3393_set_mtu(struct cmac *cmac, int mtu) | ||
346 | { | ||
347 | int enabled = cmac->instance->enabled; | ||
348 | |||
349 | /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ | ||
350 | mtu += 14 + 4; | ||
351 | if (mtu > MAX_FRAME_SIZE) | ||
352 | return -EINVAL; | ||
353 | |||
354 | /* Disable Rx/Tx MAC before configuring it. */ | ||
355 | if (enabled) | ||
356 | pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); | ||
357 | |||
358 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu); | ||
359 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu); | ||
360 | |||
361 | if (enabled) | ||
362 | pm3393_enable(cmac, enabled); | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static u32 calc_crc(u8 *b, int len) | ||
367 | { | ||
368 | int i; | ||
369 | u32 crc = (u32)~0; | ||
370 | |||
371 | /* calculate crc one bit at a time */ | ||
372 | while (len--) { | ||
373 | crc ^= *b++; | ||
374 | for (i = 0; i < 8; i++) { | ||
375 | if (crc & 0x1) | ||
376 | crc = (crc >> 1) ^ 0xedb88320; | ||
377 | else | ||
378 | crc = (crc >> 1); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | /* reverse bits */ | ||
383 | crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0); | ||
384 | crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc); | ||
385 | crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa); | ||
386 | /* swap bytes */ | ||
387 | crc = (crc >> 16) | (crc << 16); | ||
388 | crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00); | ||
389 | |||
390 | return crc; | ||
391 | } | ||
392 | |||
393 | static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) | ||
394 | { | ||
395 | int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; | ||
396 | u32 rx_mode; | ||
397 | |||
398 | /* Disable MAC RX before reconfiguring it */ | ||
399 | if (enabled) | ||
400 | pm3393_disable(cmac, MAC_DIRECTION_RX); | ||
401 | |||
402 | pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode); | ||
403 | rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE | | ||
404 | SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN); | ||
405 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, | ||
406 | (u16)rx_mode); | ||
407 | |||
408 | if (t1_rx_mode_promisc(rm)) { | ||
409 | /* Promiscuous mode. */ | ||
410 | rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE; | ||
411 | } | ||
412 | if (t1_rx_mode_allmulti(rm)) { | ||
413 | /* Accept all multicast. */ | ||
414 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff); | ||
415 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff); | ||
416 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff); | ||
417 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff); | ||
418 | rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; | ||
419 | } else if (t1_rx_mode_mc_cnt(rm)) { | ||
420 | /* Accept one or more multicast(s). */ | ||
421 | u8 *addr; | ||
422 | int bit; | ||
423 | u16 mc_filter[4] = { 0, }; | ||
424 | |||
425 | while ((addr = t1_get_next_mcaddr(rm))) { | ||
426 | bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */ | ||
427 | mc_filter[bit >> 4] |= 1 << (bit & 0xf); | ||
428 | } | ||
429 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); | ||
430 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]); | ||
431 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]); | ||
432 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]); | ||
433 | rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; | ||
434 | } | ||
435 | |||
436 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); | ||
437 | |||
438 | if (enabled) | ||
439 | pm3393_enable(cmac, MAC_DIRECTION_RX); | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed, | ||
445 | int *duplex, int *fc) | ||
446 | { | ||
447 | if (speed) | ||
448 | *speed = SPEED_10000; | ||
449 | if (duplex) | ||
450 | *duplex = DUPLEX_FULL; | ||
451 | if (fc) | ||
452 | *fc = cmac->instance->fc; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, | ||
457 | int fc) | ||
458 | { | ||
459 | if (speed >= 0 && speed != SPEED_10000) | ||
460 | return -1; | ||
461 | if (duplex >= 0 && duplex != DUPLEX_FULL) | ||
462 | return -1; | ||
463 | if (fc & ~(PAUSE_TX | PAUSE_RX)) | ||
464 | return -1; | ||
465 | |||
466 | if (fc != cmac->instance->fc) { | ||
467 | cmac->instance->fc = (u8) fc; | ||
468 | if (cmac->instance->enabled & MAC_DIRECTION_TX) | ||
469 | pm3393_enable(cmac, MAC_DIRECTION_TX); | ||
470 | } | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | #define RMON_UPDATE(mac, name, stat_name) \ | ||
475 | { \ | ||
476 | t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ | ||
477 | t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \ | ||
478 | t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \ | ||
479 | (mac)->stats.stat_name = ((u64)val0 & 0xffff) | \ | ||
480 | (((u64)val1 & 0xffff) << 16) | \ | ||
481 | (((u64)val2 & 0xff) << 32) | \ | ||
482 | ((mac)->stats.stat_name & \ | ||
483 | (~(u64)0 << 40)); \ | ||
484 | if (ro & \ | ||
485 | ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \ | ||
486 | (mac)->stats.stat_name += ((u64)1 << 40); \ | ||
487 | } | ||
488 | |||
489 | static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, | ||
490 | int flag) | ||
491 | { | ||
492 | u64 ro; | ||
493 | u32 val0, val1, val2, val3; | ||
494 | |||
495 | /* Snap the counters */ | ||
496 | pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, | ||
497 | SUNI1x10GEXP_BITMSK_MSTAT_SNAP); | ||
498 | |||
499 | /* Counter rollover, clear on read */ | ||
500 | pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0); | ||
501 | pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1); | ||
502 | pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2); | ||
503 | pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3); | ||
504 | ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | | ||
505 | (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); | ||
506 | |||
507 | /* Rx stats */ | ||
508 | RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); | ||
509 | RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); | ||
510 | RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); | ||
511 | RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); | ||
512 | RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); | ||
513 | RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); | ||
514 | RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, | ||
515 | RxInternalMACRcvError); | ||
516 | RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); | ||
517 | RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); | ||
518 | RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); | ||
519 | RMON_UPDATE(mac, RxJabbers, RxJabberErrors); | ||
520 | RMON_UPDATE(mac, RxFragments, RxRuntErrors); | ||
521 | RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); | ||
522 | |||
523 | /* Tx stats */ | ||
524 | RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); | ||
525 | RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, | ||
526 | TxInternalMACXmitError); | ||
527 | RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); | ||
528 | RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); | ||
529 | RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); | ||
530 | RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); | ||
531 | RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); | ||
532 | |||
533 | return &mac->stats; | ||
534 | } | ||
535 | |||
536 | static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) | ||
537 | { | ||
538 | memcpy(mac_addr, cmac->instance->mac_addr, 6); | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) | ||
543 | { | ||
544 | u32 val, lo, mid, hi, enabled = cmac->instance->enabled; | ||
545 | |||
546 | /* | ||
547 | * MAC addr: 00:07:43:00:13:09 | ||
548 | * | ||
549 | * ma[5] = 0x09 | ||
550 | * ma[4] = 0x13 | ||
551 | * ma[3] = 0x00 | ||
552 | * ma[2] = 0x43 | ||
553 | * ma[1] = 0x07 | ||
554 | * ma[0] = 0x00 | ||
555 | * | ||
556 | * The PM3393 requires byte swapping and reverse order entry | ||
557 | * when programming MAC addresses: | ||
558 | * | ||
559 | * low_bits[15:0] = ma[1]:ma[0] | ||
560 | * mid_bits[31:16] = ma[3]:ma[2] | ||
561 | * high_bits[47:32] = ma[5]:ma[4] | ||
562 | */ | ||
563 | |||
564 | /* Store local copy */ | ||
565 | memcpy(cmac->instance->mac_addr, ma, 6); | ||
566 | |||
567 | lo = ((u32) ma[1] << 8) | (u32) ma[0]; | ||
568 | mid = ((u32) ma[3] << 8) | (u32) ma[2]; | ||
569 | hi = ((u32) ma[5] << 8) | (u32) ma[4]; | ||
570 | |||
571 | /* Disable Rx/Tx MAC before configuring it. */ | ||
572 | if (enabled) | ||
573 | pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); | ||
574 | |||
575 | /* Set RXXG Station Address */ | ||
576 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo); | ||
577 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid); | ||
578 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi); | ||
579 | |||
580 | /* Set TXXG Station Address */ | ||
581 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo); | ||
582 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid); | ||
583 | pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi); | ||
584 | |||
585 | /* Setup Exact Match Filter 1 with our MAC address | ||
586 | * | ||
587 | * Must disable exact match filter before configuring it. | ||
588 | */ | ||
589 | pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val); | ||
590 | val &= 0xff0f; | ||
591 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); | ||
592 | |||
593 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo); | ||
594 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid); | ||
595 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi); | ||
596 | |||
597 | val |= 0x0090; | ||
598 | pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); | ||
599 | |||
600 | if (enabled) | ||
601 | pm3393_enable(cmac, enabled); | ||
602 | return 0; | ||
603 | } | ||
604 | |||
605 | static void pm3393_destroy(struct cmac *cmac) | ||
606 | { | ||
607 | kfree(cmac); | ||
608 | } | ||
609 | |||
610 | static struct cmac_ops pm3393_ops = { | ||
611 | .destroy = pm3393_destroy, | ||
612 | .reset = pm3393_reset, | ||
613 | .interrupt_enable = pm3393_interrupt_enable, | ||
614 | .interrupt_disable = pm3393_interrupt_disable, | ||
615 | .interrupt_clear = pm3393_interrupt_clear, | ||
616 | .interrupt_handler = pm3393_interrupt_handler, | ||
617 | .enable = pm3393_enable_port, | ||
618 | .disable = pm3393_disable, | ||
619 | .loopback_enable = pm3393_loopback_enable, | ||
620 | .loopback_disable = pm3393_loopback_disable, | ||
621 | .set_mtu = pm3393_set_mtu, | ||
622 | .set_rx_mode = pm3393_set_rx_mode, | ||
623 | .get_speed_duplex_fc = pm3393_get_speed_duplex_fc, | ||
624 | .set_speed_duplex_fc = pm3393_set_speed_duplex_fc, | ||
625 | .statistics_update = pm3393_update_statistics, | ||
626 | .macaddress_get = pm3393_macaddress_get, | ||
627 | .macaddress_set = pm3393_macaddress_set | ||
628 | }; | ||
629 | |||
630 | static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) | ||
631 | { | ||
632 | struct cmac *cmac; | ||
633 | |||
634 | cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); | ||
635 | if (!cmac) | ||
636 | return NULL; | ||
637 | memset(cmac, 0, sizeof(*cmac)); | ||
638 | |||
639 | cmac->ops = &pm3393_ops; | ||
640 | cmac->instance = (cmac_instance *) (cmac + 1); | ||
641 | cmac->adapter = adapter; | ||
642 | cmac->instance->fc = PAUSE_TX | PAUSE_RX; | ||
643 | |||
644 | t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000); | ||
645 | t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000); | ||
646 | t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800); | ||
647 | t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */ | ||
648 | t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800); | ||
649 | t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800); | ||
650 | t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800); | ||
651 | t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800); | ||
652 | t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800); | ||
653 | t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800); | ||
654 | t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800); | ||
655 | t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800); | ||
656 | t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800); | ||
657 | t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800); | ||
658 | t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800); | ||
659 | t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800); | ||
660 | t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800); | ||
661 | t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800); | ||
662 | t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800); | ||
663 | t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800); | ||
664 | t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00); | ||
665 | t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */ | ||
666 | |||
667 | t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */ | ||
668 | t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */ | ||
669 | t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */ | ||
670 | t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */ | ||
671 | t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */ | ||
672 | t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */ | ||
673 | t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */ | ||
674 | t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */ | ||
675 | t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */ | ||
676 | t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */ | ||
677 | t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */ | ||
678 | t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */ | ||
679 | t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */ | ||
680 | |||
681 | t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */ | ||
682 | t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */ | ||
683 | t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */ | ||
684 | t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */ | ||
685 | t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */ | ||
686 | t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */ | ||
687 | t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */ | ||
688 | t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */ | ||
689 | t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */ | ||
690 | t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */ | ||
691 | t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */ | ||
692 | |||
693 | t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */ | ||
694 | t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */ | ||
695 | t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */ | ||
696 | t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */ | ||
697 | t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */ | ||
698 | t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */ | ||
699 | |||
700 | t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */ | ||
701 | t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */ | ||
702 | |||
703 | t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */ | ||
704 | t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */ | ||
705 | |||
706 | t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ | ||
707 | /* For T1 use timer based Mac flow control. */ | ||
708 | t1_tpi_write(adapter, OFFSET(0x304d), 0x8000); | ||
709 | t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ | ||
710 | t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */ | ||
711 | t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ | ||
712 | |||
713 | /* Setup Exact Match Filter 0 to allow broadcast packets. | ||
714 | */ | ||
715 | t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */ | ||
716 | t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */ | ||
717 | t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */ | ||
718 | t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */ | ||
719 | t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */ | ||
720 | |||
721 | t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */ | ||
722 | t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */ | ||
723 | t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */ | ||
724 | |||
725 | return cmac; | ||
726 | } | ||
727 | |||
728 | static int pm3393_mac_reset(adapter_t * adapter) | ||
729 | { | ||
730 | u32 val; | ||
731 | u32 x; | ||
732 | u32 is_pl4_reset_finished; | ||
733 | u32 is_pl4_outof_lock; | ||
734 | u32 is_xaui_mabc_pll_locked; | ||
735 | u32 successful_reset; | ||
736 | int i; | ||
737 | |||
738 | /* The following steps are required to properly reset | ||
739 | * the PM3393. This information is provided in the | ||
740 | * PM3393 datasheet (Issue 2: November 2002) | ||
741 | * section 13.1 -- Device Reset. | ||
742 | * | ||
743 | * The PM3393 has three types of components that are | ||
744 | * individually reset: | ||
745 | * | ||
746 | * DRESETB - Digital circuitry | ||
747 | * PL4_ARESETB - PL4 analog circuitry | ||
748 | * XAUI_ARESETB - XAUI bus analog circuitry | ||
749 | * | ||
750 | * Steps to reset PM3393 using RSTB pin: | ||
751 | * | ||
752 | * 1. Assert RSTB pin low ( write 0 ) | ||
753 | * 2. Wait at least 1ms to initiate a complete initialization of device. | ||
754 | * 3. Wait until all external clocks and REFSEL are stable. | ||
755 | * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable) | ||
756 | * 5. De-assert RSTB ( write 1 ) | ||
757 | * 6. Wait until internal timers to expires after ~14ms. | ||
758 | * - Allows analog clock synthesizer(PL4CSU) to stabilize to | ||
759 | * selected reference frequency before allowing the digital | ||
760 | * portion of the device to operate. | ||
761 | * 7. Wait at least 200us for XAUI interface to stabilize. | ||
762 | * 8. Verify the PM3393 came out of reset successfully. | ||
763 | * Set successful reset flag if everything worked else try again | ||
764 | * a few more times. | ||
765 | */ | ||
766 | |||
767 | successful_reset = 0; | ||
768 | for (i = 0; i < 3 && !successful_reset; i++) { | ||
769 | /* 1 */ | ||
770 | t1_tpi_read(adapter, A_ELMER0_GPO, &val); | ||
771 | val &= ~1; | ||
772 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
773 | |||
774 | /* 2 */ | ||
775 | msleep(1); | ||
776 | |||
777 | /* 3 */ | ||
778 | msleep(1); | ||
779 | |||
780 | /* 4 */ | ||
781 | msleep(2 /*1 extra ms for safety */ ); | ||
782 | |||
783 | /* 5 */ | ||
784 | val |= 1; | ||
785 | t1_tpi_write(adapter, A_ELMER0_GPO, val); | ||
786 | |||
787 | /* 6 */ | ||
788 | msleep(15 /*1 extra ms for safety */ ); | ||
789 | |||
790 | /* 7 */ | ||
791 | msleep(1); | ||
792 | |||
793 | /* 8 */ | ||
794 | |||
795 | /* Has PL4 analog block come out of reset correctly? */ | ||
796 | t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val); | ||
797 | is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED); | ||
798 | |||
799 | /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence | ||
800 | * figure out why? */ | ||
801 | |||
802 | /* Have all PL4 block clocks locked? */ | ||
803 | x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL | ||
804 | /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ | | ||
805 | SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL | | ||
806 | SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL | | ||
807 | SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL); | ||
808 | is_pl4_outof_lock = (val & x); | ||
809 | |||
810 | /* ??? If this fails, might be able to software reset the XAUI part | ||
811 | * and try to recover... thus saving us from doing another HW reset */ | ||
812 | /* Has the XAUI MABC PLL circuitry stablized? */ | ||
813 | is_xaui_mabc_pll_locked = | ||
814 | (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED); | ||
815 | |||
816 | successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock | ||
817 | && is_xaui_mabc_pll_locked); | ||
818 | } | ||
819 | return successful_reset ? 0 : 1; | ||
820 | } | ||
821 | |||
822 | struct gmac t1_pm3393_ops = { | ||
823 | STATS_TICK_SECS, | ||
824 | pm3393_mac_create, | ||
825 | pm3393_mac_reset | ||
826 | }; | ||
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h new file mode 100644 index 000000000000..b90e11f40d1f --- /dev/null +++ b/drivers/net/chelsio/regs.h | |||
@@ -0,0 +1,468 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: regs.h * | ||
4 | * $Revision: 1.8 $ * | ||
5 | * $Date: 2005/06/21 18:29:48 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_REGS_H_ | ||
40 | #define _CXGB_REGS_H_ | ||
41 | |||
42 | /* SGE registers */ | ||
43 | #define A_SG_CONTROL 0x0 | ||
44 | |||
45 | #define S_CMDQ0_ENABLE 0 | ||
46 | #define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE) | ||
47 | #define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U) | ||
48 | |||
49 | #define S_CMDQ1_ENABLE 1 | ||
50 | #define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE) | ||
51 | #define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U) | ||
52 | |||
53 | #define S_FL0_ENABLE 2 | ||
54 | #define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE) | ||
55 | #define F_FL0_ENABLE V_FL0_ENABLE(1U) | ||
56 | |||
57 | #define S_FL1_ENABLE 3 | ||
58 | #define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE) | ||
59 | #define F_FL1_ENABLE V_FL1_ENABLE(1U) | ||
60 | |||
61 | #define S_CPL_ENABLE 4 | ||
62 | #define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE) | ||
63 | #define F_CPL_ENABLE V_CPL_ENABLE(1U) | ||
64 | |||
65 | #define S_RESPONSE_QUEUE_ENABLE 5 | ||
66 | #define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE) | ||
67 | #define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U) | ||
68 | |||
69 | #define S_CMDQ_PRIORITY 6 | ||
70 | #define M_CMDQ_PRIORITY 0x3 | ||
71 | #define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) | ||
72 | #define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) | ||
73 | |||
74 | #define S_DISABLE_CMDQ1_GTS 9 | ||
75 | #define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) | ||
76 | #define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) | ||
77 | |||
78 | #define S_DISABLE_FL0_GTS 10 | ||
79 | #define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS) | ||
80 | #define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U) | ||
81 | |||
82 | #define S_DISABLE_FL1_GTS 11 | ||
83 | #define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS) | ||
84 | #define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U) | ||
85 | |||
86 | #define S_ENABLE_BIG_ENDIAN 12 | ||
87 | #define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) | ||
88 | #define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) | ||
89 | |||
90 | #define S_ISCSI_COALESCE 14 | ||
91 | #define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) | ||
92 | #define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) | ||
93 | |||
94 | #define S_RX_PKT_OFFSET 15 | ||
95 | #define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) | ||
96 | |||
97 | #define S_VLAN_XTRACT 18 | ||
98 | #define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) | ||
99 | #define F_VLAN_XTRACT V_VLAN_XTRACT(1U) | ||
100 | |||
101 | #define A_SG_DOORBELL 0x4 | ||
102 | #define A_SG_CMD0BASELWR 0x8 | ||
103 | #define A_SG_CMD0BASEUPR 0xc | ||
104 | #define A_SG_CMD1BASELWR 0x10 | ||
105 | #define A_SG_CMD1BASEUPR 0x14 | ||
106 | #define A_SG_FL0BASELWR 0x18 | ||
107 | #define A_SG_FL0BASEUPR 0x1c | ||
108 | #define A_SG_FL1BASELWR 0x20 | ||
109 | #define A_SG_FL1BASEUPR 0x24 | ||
110 | #define A_SG_CMD0SIZE 0x28 | ||
111 | #define A_SG_FL0SIZE 0x2c | ||
112 | #define A_SG_RSPSIZE 0x30 | ||
113 | #define A_SG_RSPBASELWR 0x34 | ||
114 | #define A_SG_RSPBASEUPR 0x38 | ||
115 | #define A_SG_FLTHRESHOLD 0x3c | ||
116 | #define A_SG_RSPQUEUECREDIT 0x40 | ||
117 | #define A_SG_SLEEPING 0x48 | ||
118 | #define A_SG_INTRTIMER 0x4c | ||
119 | #define A_SG_CMD1SIZE 0xb0 | ||
120 | #define A_SG_FL1SIZE 0xb4 | ||
121 | #define A_SG_INT_ENABLE 0xb8 | ||
122 | |||
123 | #define S_RESPQ_EXHAUSTED 0 | ||
124 | #define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED) | ||
125 | #define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U) | ||
126 | |||
127 | #define S_RESPQ_OVERFLOW 1 | ||
128 | #define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW) | ||
129 | #define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U) | ||
130 | |||
131 | #define S_FL_EXHAUSTED 2 | ||
132 | #define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED) | ||
133 | #define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U) | ||
134 | |||
135 | #define S_PACKET_TOO_BIG 3 | ||
136 | #define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG) | ||
137 | #define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U) | ||
138 | |||
139 | #define S_PACKET_MISMATCH 4 | ||
140 | #define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH) | ||
141 | #define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U) | ||
142 | |||
143 | #define A_SG_INT_CAUSE 0xbc | ||
144 | #define A_SG_RESPACCUTIMER 0xc0 | ||
145 | |||
146 | /* MC3 registers */ | ||
147 | |||
148 | #define S_READY 1 | ||
149 | #define V_READY(x) ((x) << S_READY) | ||
150 | #define F_READY V_READY(1U) | ||
151 | |||
152 | /* MC4 registers */ | ||
153 | |||
154 | #define A_MC4_CFG 0x180 | ||
155 | #define S_MC4_SLOW 25 | ||
156 | #define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) | ||
157 | #define F_MC4_SLOW V_MC4_SLOW(1U) | ||
158 | |||
159 | /* TPI registers */ | ||
160 | |||
161 | #define A_TPI_ADDR 0x280 | ||
162 | #define A_TPI_WR_DATA 0x284 | ||
163 | #define A_TPI_RD_DATA 0x288 | ||
164 | #define A_TPI_CSR 0x28c | ||
165 | |||
166 | #define S_TPIWR 0 | ||
167 | #define V_TPIWR(x) ((x) << S_TPIWR) | ||
168 | #define F_TPIWR V_TPIWR(1U) | ||
169 | |||
170 | #define S_TPIRDY 1 | ||
171 | #define V_TPIRDY(x) ((x) << S_TPIRDY) | ||
172 | #define F_TPIRDY V_TPIRDY(1U) | ||
173 | |||
174 | #define A_TPI_PAR 0x29c | ||
175 | |||
176 | #define S_TPIPAR 0 | ||
177 | #define M_TPIPAR 0x7f | ||
178 | #define V_TPIPAR(x) ((x) << S_TPIPAR) | ||
179 | #define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) | ||
180 | |||
181 | /* TP registers */ | ||
182 | |||
183 | #define A_TP_IN_CONFIG 0x300 | ||
184 | |||
185 | #define S_TP_IN_CSPI_CPL 3 | ||
186 | #define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) | ||
187 | #define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) | ||
188 | |||
189 | #define S_TP_IN_CSPI_CHECK_IP_CSUM 5 | ||
190 | #define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) | ||
191 | #define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) | ||
192 | |||
193 | #define S_TP_IN_CSPI_CHECK_TCP_CSUM 6 | ||
194 | #define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) | ||
195 | #define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) | ||
196 | |||
197 | #define S_TP_IN_ESPI_ETHERNET 8 | ||
198 | #define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) | ||
199 | #define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) | ||
200 | |||
201 | #define S_TP_IN_ESPI_CHECK_IP_CSUM 12 | ||
202 | #define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) | ||
203 | #define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) | ||
204 | |||
205 | #define S_TP_IN_ESPI_CHECK_TCP_CSUM 13 | ||
206 | #define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM) | ||
207 | #define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U) | ||
208 | |||
209 | #define S_OFFLOAD_DISABLE 14 | ||
210 | #define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE) | ||
211 | #define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U) | ||
212 | |||
213 | #define A_TP_OUT_CONFIG 0x304 | ||
214 | |||
215 | #define S_TP_OUT_CSPI_CPL 2 | ||
216 | #define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) | ||
217 | #define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) | ||
218 | |||
219 | #define S_TP_OUT_ESPI_ETHERNET 6 | ||
220 | #define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) | ||
221 | #define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) | ||
222 | |||
223 | #define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 | ||
224 | #define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) | ||
225 | #define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) | ||
226 | |||
227 | #define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11 | ||
228 | #define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM) | ||
229 | #define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U) | ||
230 | |||
231 | #define A_TP_GLOBAL_CONFIG 0x308 | ||
232 | |||
233 | #define S_IP_TTL 0 | ||
234 | #define M_IP_TTL 0xff | ||
235 | #define V_IP_TTL(x) ((x) << S_IP_TTL) | ||
236 | |||
237 | #define S_TCP_CSUM 11 | ||
238 | #define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) | ||
239 | #define F_TCP_CSUM V_TCP_CSUM(1U) | ||
240 | |||
241 | #define S_UDP_CSUM 12 | ||
242 | #define V_UDP_CSUM(x) ((x) << S_UDP_CSUM) | ||
243 | #define F_UDP_CSUM V_UDP_CSUM(1U) | ||
244 | |||
245 | #define S_IP_CSUM 13 | ||
246 | #define V_IP_CSUM(x) ((x) << S_IP_CSUM) | ||
247 | #define F_IP_CSUM V_IP_CSUM(1U) | ||
248 | |||
249 | #define S_PATH_MTU 15 | ||
250 | #define V_PATH_MTU(x) ((x) << S_PATH_MTU) | ||
251 | #define F_PATH_MTU V_PATH_MTU(1U) | ||
252 | |||
253 | #define S_5TUPLE_LOOKUP 17 | ||
254 | #define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) | ||
255 | |||
256 | #define S_SYN_COOKIE_PARAMETER 26 | ||
257 | #define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) | ||
258 | |||
259 | #define A_TP_PC_CONFIG 0x348 | ||
260 | #define S_DIS_TX_FILL_WIN_PUSH 12 | ||
261 | #define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) | ||
262 | #define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) | ||
263 | |||
264 | #define S_TP_PC_REV 30 | ||
265 | #define M_TP_PC_REV 0x3 | ||
266 | #define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) | ||
267 | #define A_TP_RESET 0x44c | ||
268 | #define S_TP_RESET 0 | ||
269 | #define V_TP_RESET(x) ((x) << S_TP_RESET) | ||
270 | #define F_TP_RESET V_TP_RESET(1U) | ||
271 | |||
272 | #define A_TP_INT_ENABLE 0x470 | ||
273 | #define A_TP_INT_CAUSE 0x474 | ||
274 | #define A_TP_TX_DROP_CONFIG 0x4b8 | ||
275 | |||
276 | #define S_ENABLE_TX_DROP 31 | ||
277 | #define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP) | ||
278 | #define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U) | ||
279 | |||
280 | #define S_ENABLE_TX_ERROR 30 | ||
281 | #define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR) | ||
282 | #define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) | ||
283 | |||
284 | #define S_DROP_TICKS_CNT 4 | ||
285 | #define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) | ||
286 | |||
287 | #define S_NUM_PKTS_DROPPED 0 | ||
288 | #define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) | ||
289 | |||
290 | /* CSPI registers */ | ||
291 | |||
292 | #define S_DIP4ERR 0 | ||
293 | #define V_DIP4ERR(x) ((x) << S_DIP4ERR) | ||
294 | #define F_DIP4ERR V_DIP4ERR(1U) | ||
295 | |||
296 | #define S_RXDROP 1 | ||
297 | #define V_RXDROP(x) ((x) << S_RXDROP) | ||
298 | #define F_RXDROP V_RXDROP(1U) | ||
299 | |||
300 | #define S_TXDROP 2 | ||
301 | #define V_TXDROP(x) ((x) << S_TXDROP) | ||
302 | #define F_TXDROP V_TXDROP(1U) | ||
303 | |||
304 | #define S_RXOVERFLOW 3 | ||
305 | #define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW) | ||
306 | #define F_RXOVERFLOW V_RXOVERFLOW(1U) | ||
307 | |||
308 | #define S_RAMPARITYERR 4 | ||
309 | #define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) | ||
310 | #define F_RAMPARITYERR V_RAMPARITYERR(1U) | ||
311 | |||
312 | /* ESPI registers */ | ||
313 | |||
314 | #define A_ESPI_SCH_TOKEN0 0x880 | ||
315 | #define A_ESPI_SCH_TOKEN1 0x884 | ||
316 | #define A_ESPI_SCH_TOKEN2 0x888 | ||
317 | #define A_ESPI_SCH_TOKEN3 0x88c | ||
318 | #define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 | ||
319 | #define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 | ||
320 | #define A_ESPI_CALENDAR_LENGTH 0x898 | ||
321 | #define A_PORT_CONFIG 0x89c | ||
322 | |||
323 | #define S_RX_NPORTS 0 | ||
324 | #define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) | ||
325 | |||
326 | #define S_TX_NPORTS 8 | ||
327 | #define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) | ||
328 | |||
329 | #define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 | ||
330 | |||
331 | #define S_RXSTATUSENABLE 0 | ||
332 | #define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) | ||
333 | #define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) | ||
334 | |||
335 | #define S_INTEL1010MODE 4 | ||
336 | #define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) | ||
337 | #define F_INTEL1010MODE V_INTEL1010MODE(1U) | ||
338 | |||
339 | #define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 | ||
340 | #define A_ESPI_TRAIN 0x8ac | ||
341 | #define A_ESPI_INTR_STATUS 0x8c8 | ||
342 | |||
343 | #define S_DIP2PARITYERR 5 | ||
344 | #define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR) | ||
345 | #define F_DIP2PARITYERR V_DIP2PARITYERR(1U) | ||
346 | |||
347 | #define A_ESPI_INTR_ENABLE 0x8cc | ||
348 | #define A_RX_DROP_THRESHOLD 0x8d0 | ||
349 | #define A_ESPI_RX_RESET 0x8ec | ||
350 | #define A_ESPI_MISC_CONTROL 0x8f0 | ||
351 | |||
352 | #define S_OUT_OF_SYNC_COUNT 0 | ||
353 | #define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) | ||
354 | |||
355 | #define S_DIP2_PARITY_ERR_THRES 5 | ||
356 | #define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) | ||
357 | |||
358 | #define S_DIP4_THRES 9 | ||
359 | #define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) | ||
360 | |||
361 | #define S_MONITORED_PORT_NUM 25 | ||
362 | #define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) | ||
363 | |||
364 | #define S_MONITORED_DIRECTION 27 | ||
365 | #define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) | ||
366 | #define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U) | ||
367 | |||
368 | #define S_MONITORED_INTERFACE 28 | ||
369 | #define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE) | ||
370 | #define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) | ||
371 | |||
372 | #define A_ESPI_DIP2_ERR_COUNT 0x8f4 | ||
373 | #define A_ESPI_CMD_ADDR 0x8f8 | ||
374 | |||
375 | #define S_WRITE_DATA 0 | ||
376 | #define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) | ||
377 | |||
378 | #define S_REGISTER_OFFSET 8 | ||
379 | #define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) | ||
380 | |||
381 | #define S_CHANNEL_ADDR 12 | ||
382 | #define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) | ||
383 | |||
384 | #define S_MODULE_ADDR 16 | ||
385 | #define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) | ||
386 | |||
387 | #define S_BUNDLE_ADDR 20 | ||
388 | #define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) | ||
389 | |||
390 | #define S_SPI4_COMMAND 24 | ||
391 | #define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) | ||
392 | |||
393 | #define A_ESPI_GOSTAT 0x8fc | ||
394 | #define S_ESPI_CMD_BUSY 8 | ||
395 | #define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) | ||
396 | #define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) | ||
397 | |||
398 | /* PL registers */ | ||
399 | |||
400 | #define A_PL_ENABLE 0xa00 | ||
401 | |||
402 | #define S_PL_INTR_SGE_ERR 0 | ||
403 | #define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR) | ||
404 | #define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U) | ||
405 | |||
406 | #define S_PL_INTR_SGE_DATA 1 | ||
407 | #define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) | ||
408 | #define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) | ||
409 | |||
410 | #define S_PL_INTR_TP 6 | ||
411 | #define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) | ||
412 | #define F_PL_INTR_TP V_PL_INTR_TP(1U) | ||
413 | |||
414 | #define S_PL_INTR_ESPI 8 | ||
415 | #define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) | ||
416 | #define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) | ||
417 | |||
418 | #define S_PL_INTR_PCIX 10 | ||
419 | #define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) | ||
420 | #define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) | ||
421 | |||
422 | #define S_PL_INTR_EXT 11 | ||
423 | #define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT) | ||
424 | #define F_PL_INTR_EXT V_PL_INTR_EXT(1U) | ||
425 | |||
426 | #define A_PL_CAUSE 0xa04 | ||
427 | |||
428 | /* MC5 registers */ | ||
429 | |||
430 | #define A_MC5_CONFIG 0xc04 | ||
431 | |||
432 | #define S_TCAM_RESET 1 | ||
433 | #define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) | ||
434 | #define F_TCAM_RESET V_TCAM_RESET(1U) | ||
435 | |||
436 | #define S_M_BUS_ENABLE 5 | ||
437 | #define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) | ||
438 | #define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) | ||
439 | |||
440 | /* PCICFG registers */ | ||
441 | |||
442 | #define A_PCICFG_PM_CSR 0x44 | ||
443 | #define A_PCICFG_VPD_ADDR 0x4a | ||
444 | |||
445 | #define S_VPD_OP_FLAG 15 | ||
446 | #define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) | ||
447 | #define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) | ||
448 | |||
449 | #define A_PCICFG_VPD_DATA 0x4c | ||
450 | |||
451 | #define A_PCICFG_INTR_ENABLE 0xf4 | ||
452 | #define A_PCICFG_INTR_CAUSE 0xf8 | ||
453 | |||
454 | #define A_PCICFG_MODE 0xfc | ||
455 | |||
456 | #define S_PCI_MODE_64BIT 0 | ||
457 | #define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) | ||
458 | #define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) | ||
459 | |||
460 | #define S_PCI_MODE_PCIX 5 | ||
461 | #define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) | ||
462 | #define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) | ||
463 | |||
464 | #define S_PCI_MODE_CLK 6 | ||
465 | #define M_PCI_MODE_CLK 0x3 | ||
466 | #define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) | ||
467 | |||
468 | #endif /* _CXGB_REGS_H_ */ | ||
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c new file mode 100644 index 000000000000..53b41d99b00b --- /dev/null +++ b/drivers/net/chelsio/sge.c | |||
@@ -0,0 +1,1684 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: sge.c * | ||
4 | * $Revision: 1.26 $ * | ||
5 | * $Date: 2005/06/21 18:29:48 $ * | ||
6 | * Description: * | ||
7 | * DMA engine. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "common.h" | ||
41 | |||
42 | #include <linux/config.h> | ||
43 | #include <linux/types.h> | ||
44 | #include <linux/errno.h> | ||
45 | #include <linux/pci.h> | ||
46 | #include <linux/netdevice.h> | ||
47 | #include <linux/etherdevice.h> | ||
48 | #include <linux/if_vlan.h> | ||
49 | #include <linux/skbuff.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/mm.h> | ||
52 | #include <linux/ip.h> | ||
53 | #include <linux/in.h> | ||
54 | #include <linux/if_arp.h> | ||
55 | |||
56 | #include "cpl5_cmd.h" | ||
57 | #include "sge.h" | ||
58 | #include "regs.h" | ||
59 | #include "espi.h" | ||
60 | |||
61 | |||
62 | #ifdef NETIF_F_TSO | ||
63 | #include <linux/tcp.h> | ||
64 | #endif | ||
65 | |||
66 | #define SGE_CMDQ_N 2 | ||
67 | #define SGE_FREELQ_N 2 | ||
68 | #define SGE_CMDQ0_E_N 1024 | ||
69 | #define SGE_CMDQ1_E_N 128 | ||
70 | #define SGE_FREEL_SIZE 4096 | ||
71 | #define SGE_JUMBO_FREEL_SIZE 512 | ||
72 | #define SGE_FREEL_REFILL_THRESH 16 | ||
73 | #define SGE_RESPQ_E_N 1024 | ||
74 | #define SGE_INTRTIMER_NRES 1000 | ||
75 | #define SGE_RX_COPY_THRES 256 | ||
76 | #define SGE_RX_SM_BUF_SIZE 1536 | ||
77 | |||
78 | # define SGE_RX_DROP_THRES 2 | ||
79 | |||
80 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) | ||
81 | |||
82 | /* | ||
83 | * Period of the TX buffer reclaim timer. This timer does not need to run | ||
84 | * frequently as TX buffers are usually reclaimed by new TX packets. | ||
85 | */ | ||
86 | #define TX_RECLAIM_PERIOD (HZ / 4) | ||
87 | |||
88 | #ifndef NET_IP_ALIGN | ||
89 | # define NET_IP_ALIGN 2 | ||
90 | #endif | ||
91 | |||
92 | #define M_CMD_LEN 0x7fffffff | ||
93 | #define V_CMD_LEN(v) (v) | ||
94 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) | ||
95 | #define V_CMD_GEN1(v) ((v) << 31) | ||
96 | #define V_CMD_GEN2(v) (v) | ||
97 | #define F_CMD_DATAVALID (1 << 1) | ||
98 | #define F_CMD_SOP (1 << 2) | ||
99 | #define V_CMD_EOP(v) ((v) << 3) | ||
100 | |||
101 | /* | ||
102 | * Command queue, receive buffer list, and response queue descriptors. | ||
103 | */ | ||
104 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
105 | struct cmdQ_e { | ||
106 | u32 addr_lo; | ||
107 | u32 len_gen; | ||
108 | u32 flags; | ||
109 | u32 addr_hi; | ||
110 | }; | ||
111 | |||
112 | struct freelQ_e { | ||
113 | u32 addr_lo; | ||
114 | u32 len_gen; | ||
115 | u32 gen2; | ||
116 | u32 addr_hi; | ||
117 | }; | ||
118 | |||
119 | struct respQ_e { | ||
120 | u32 Qsleeping : 4; | ||
121 | u32 Cmdq1CreditReturn : 5; | ||
122 | u32 Cmdq1DmaComplete : 5; | ||
123 | u32 Cmdq0CreditReturn : 5; | ||
124 | u32 Cmdq0DmaComplete : 5; | ||
125 | u32 FreelistQid : 2; | ||
126 | u32 CreditValid : 1; | ||
127 | u32 DataValid : 1; | ||
128 | u32 Offload : 1; | ||
129 | u32 Eop : 1; | ||
130 | u32 Sop : 1; | ||
131 | u32 GenerationBit : 1; | ||
132 | u32 BufferLength; | ||
133 | }; | ||
134 | #elif defined(__LITTLE_ENDIAN_BITFIELD) | ||
135 | struct cmdQ_e { | ||
136 | u32 len_gen; | ||
137 | u32 addr_lo; | ||
138 | u32 addr_hi; | ||
139 | u32 flags; | ||
140 | }; | ||
141 | |||
142 | struct freelQ_e { | ||
143 | u32 len_gen; | ||
144 | u32 addr_lo; | ||
145 | u32 addr_hi; | ||
146 | u32 gen2; | ||
147 | }; | ||
148 | |||
149 | struct respQ_e { | ||
150 | u32 BufferLength; | ||
151 | u32 GenerationBit : 1; | ||
152 | u32 Sop : 1; | ||
153 | u32 Eop : 1; | ||
154 | u32 Offload : 1; | ||
155 | u32 DataValid : 1; | ||
156 | u32 CreditValid : 1; | ||
157 | u32 FreelistQid : 2; | ||
158 | u32 Cmdq0DmaComplete : 5; | ||
159 | u32 Cmdq0CreditReturn : 5; | ||
160 | u32 Cmdq1DmaComplete : 5; | ||
161 | u32 Cmdq1CreditReturn : 5; | ||
162 | u32 Qsleeping : 4; | ||
163 | } ; | ||
164 | #endif | ||
165 | |||
166 | /* | ||
167 | * SW Context Command and Freelist Queue Descriptors | ||
168 | */ | ||
169 | struct cmdQ_ce { | ||
170 | struct sk_buff *skb; | ||
171 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | ||
172 | DECLARE_PCI_UNMAP_LEN(dma_len); | ||
173 | }; | ||
174 | |||
175 | struct freelQ_ce { | ||
176 | struct sk_buff *skb; | ||
177 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | ||
178 | DECLARE_PCI_UNMAP_LEN(dma_len); | ||
179 | }; | ||
180 | |||
181 | /* | ||
182 | * SW command, freelist and response rings | ||
183 | */ | ||
184 | struct cmdQ { | ||
185 | unsigned long status; /* HW DMA fetch status */ | ||
186 | unsigned int in_use; /* # of in-use command descriptors */ | ||
187 | unsigned int size; /* # of descriptors */ | ||
188 | unsigned int processed; /* total # of descs HW has processed */ | ||
189 | unsigned int cleaned; /* total # of descs SW has reclaimed */ | ||
190 | unsigned int stop_thres; /* SW TX queue suspend threshold */ | ||
191 | u16 pidx; /* producer index (SW) */ | ||
192 | u16 cidx; /* consumer index (HW) */ | ||
193 | u8 genbit; /* current generation (=valid) bit */ | ||
194 | u8 sop; /* is next entry start of packet? */ | ||
195 | struct cmdQ_e *entries; /* HW command descriptor Q */ | ||
196 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | ||
197 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ | ||
198 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ | ||
199 | }; | ||
200 | |||
201 | struct freelQ { | ||
202 | unsigned int credits; /* # of available RX buffers */ | ||
203 | unsigned int size; /* free list capacity */ | ||
204 | u16 pidx; /* producer index (SW) */ | ||
205 | u16 cidx; /* consumer index (HW) */ | ||
206 | u16 rx_buffer_size; /* Buffer size on this free list */ | ||
207 | u16 dma_offset; /* DMA offset to align IP headers */ | ||
208 | u16 recycleq_idx; /* skb recycle q to use */ | ||
209 | u8 genbit; /* current generation (=valid) bit */ | ||
210 | struct freelQ_e *entries; /* HW freelist descriptor Q */ | ||
211 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ | ||
212 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ | ||
213 | }; | ||
214 | |||
215 | struct respQ { | ||
216 | unsigned int credits; /* credits to be returned to SGE */ | ||
217 | unsigned int size; /* # of response Q descriptors */ | ||
218 | u16 cidx; /* consumer index (SW) */ | ||
219 | u8 genbit; /* current generation(=valid) bit */ | ||
220 | struct respQ_e *entries; /* HW response descriptor Q */ | ||
221 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ | ||
222 | }; | ||
223 | |||
224 | /* Bit flags for cmdQ.status */ | ||
225 | enum { | ||
226 | CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ | ||
227 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ | ||
228 | }; | ||
229 | |||
230 | /* | ||
231 | * Main SGE data structure | ||
232 | * | ||
233 | * Interrupts are handled by a single CPU and it is likely that on a MP system | ||
234 | * the application is migrated to another CPU. In that scenario, we try to | ||
235 | * seperate the RX(in irq context) and TX state in order to decrease memory | ||
236 | * contention. | ||
237 | */ | ||
238 | struct sge { | ||
239 | struct adapter *adapter; /* adapter backpointer */ | ||
240 | struct net_device *netdev; /* netdevice backpointer */ | ||
241 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ | ||
242 | struct respQ respQ; /* response Q */ | ||
243 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ | ||
244 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ | ||
245 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | ||
246 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ | ||
247 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ | ||
248 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ | ||
249 | struct timer_list espibug_timer; | ||
250 | unsigned int espibug_timeout; | ||
251 | struct sk_buff *espibug_skb; | ||
252 | u32 sge_control; /* shadow value of sge control reg */ | ||
253 | struct sge_intr_counts stats; | ||
254 | struct sge_port_stats port_stats[MAX_NPORTS]; | ||
255 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; | ||
256 | }; | ||
257 | |||
258 | /* | ||
259 | * PIO to indicate that memory mapped Q contains valid descriptor(s). | ||
260 | */ | ||
261 | static inline void doorbell_pio(struct adapter *adapter, u32 val) | ||
262 | { | ||
263 | wmb(); | ||
264 | writel(val, adapter->regs + A_SG_DOORBELL); | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Frees all RX buffers on the freelist Q. The caller must make sure that | ||
269 | * the SGE is turned off before calling this function. | ||
270 | */ | ||
271 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) | ||
272 | { | ||
273 | unsigned int cidx = q->cidx; | ||
274 | |||
275 | while (q->credits--) { | ||
276 | struct freelQ_ce *ce = &q->centries[cidx]; | ||
277 | |||
278 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | ||
279 | pci_unmap_len(ce, dma_len), | ||
280 | PCI_DMA_FROMDEVICE); | ||
281 | dev_kfree_skb(ce->skb); | ||
282 | ce->skb = NULL; | ||
283 | if (++cidx == q->size) | ||
284 | cidx = 0; | ||
285 | } | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Free RX free list and response queue resources. | ||
290 | */ | ||
291 | static void free_rx_resources(struct sge *sge) | ||
292 | { | ||
293 | struct pci_dev *pdev = sge->adapter->pdev; | ||
294 | unsigned int size, i; | ||
295 | |||
296 | if (sge->respQ.entries) { | ||
297 | size = sizeof(struct respQ_e) * sge->respQ.size; | ||
298 | pci_free_consistent(pdev, size, sge->respQ.entries, | ||
299 | sge->respQ.dma_addr); | ||
300 | } | ||
301 | |||
302 | for (i = 0; i < SGE_FREELQ_N; i++) { | ||
303 | struct freelQ *q = &sge->freelQ[i]; | ||
304 | |||
305 | if (q->centries) { | ||
306 | free_freelQ_buffers(pdev, q); | ||
307 | kfree(q->centries); | ||
308 | } | ||
309 | if (q->entries) { | ||
310 | size = sizeof(struct freelQ_e) * q->size; | ||
311 | pci_free_consistent(pdev, size, q->entries, | ||
312 | q->dma_addr); | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a | ||
319 | * response queue. | ||
320 | */ | ||
321 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | ||
322 | { | ||
323 | struct pci_dev *pdev = sge->adapter->pdev; | ||
324 | unsigned int size, i; | ||
325 | |||
326 | for (i = 0; i < SGE_FREELQ_N; i++) { | ||
327 | struct freelQ *q = &sge->freelQ[i]; | ||
328 | |||
329 | q->genbit = 1; | ||
330 | q->size = p->freelQ_size[i]; | ||
331 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; | ||
332 | size = sizeof(struct freelQ_e) * q->size; | ||
333 | q->entries = (struct freelQ_e *) | ||
334 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
335 | if (!q->entries) | ||
336 | goto err_no_mem; | ||
337 | memset(q->entries, 0, size); | ||
338 | size = sizeof(struct freelQ_ce) * q->size; | ||
339 | q->centries = kmalloc(size, GFP_KERNEL); | ||
340 | if (!q->centries) | ||
341 | goto err_no_mem; | ||
342 | memset(q->centries, 0, size); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Calculate the buffer sizes for the two free lists. FL0 accommodates | ||
347 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, | ||
348 | * including all the sk_buff overhead. | ||
349 | * | ||
350 | * Note: For T2 FL0 and FL1 are reversed. | ||
351 | */ | ||
352 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + | ||
353 | sizeof(struct cpl_rx_data) + | ||
354 | sge->freelQ[!sge->jumbo_fl].dma_offset; | ||
355 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - | ||
356 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
357 | |||
358 | /* | ||
359 | * Setup which skb recycle Q should be used when recycling buffers from | ||
360 | * each free list. | ||
361 | */ | ||
362 | sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; | ||
363 | sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; | ||
364 | |||
365 | sge->respQ.genbit = 1; | ||
366 | sge->respQ.size = SGE_RESPQ_E_N; | ||
367 | sge->respQ.credits = 0; | ||
368 | size = sizeof(struct respQ_e) * sge->respQ.size; | ||
369 | sge->respQ.entries = (struct respQ_e *) | ||
370 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); | ||
371 | if (!sge->respQ.entries) | ||
372 | goto err_no_mem; | ||
373 | memset(sge->respQ.entries, 0, size); | ||
374 | return 0; | ||
375 | |||
376 | err_no_mem: | ||
377 | free_rx_resources(sge); | ||
378 | return -ENOMEM; | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * Reclaims n TX descriptors and frees the buffers associated with them. | ||
383 | */ | ||
384 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) | ||
385 | { | ||
386 | struct cmdQ_ce *ce; | ||
387 | struct pci_dev *pdev = sge->adapter->pdev; | ||
388 | unsigned int cidx = q->cidx; | ||
389 | |||
390 | q->in_use -= n; | ||
391 | ce = &q->centries[cidx]; | ||
392 | while (n--) { | ||
393 | if (q->sop) | ||
394 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | ||
395 | pci_unmap_len(ce, dma_len), | ||
396 | PCI_DMA_TODEVICE); | ||
397 | else | ||
398 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | ||
399 | pci_unmap_len(ce, dma_len), | ||
400 | PCI_DMA_TODEVICE); | ||
401 | q->sop = 0; | ||
402 | if (ce->skb) { | ||
403 | dev_kfree_skb(ce->skb); | ||
404 | q->sop = 1; | ||
405 | } | ||
406 | ce++; | ||
407 | if (++cidx == q->size) { | ||
408 | cidx = 0; | ||
409 | ce = q->centries; | ||
410 | } | ||
411 | } | ||
412 | q->cidx = cidx; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * Free TX resources. | ||
417 | * | ||
418 | * Assumes that SGE is stopped and all interrupts are disabled. | ||
419 | */ | ||
420 | static void free_tx_resources(struct sge *sge) | ||
421 | { | ||
422 | struct pci_dev *pdev = sge->adapter->pdev; | ||
423 | unsigned int size, i; | ||
424 | |||
425 | for (i = 0; i < SGE_CMDQ_N; i++) { | ||
426 | struct cmdQ *q = &sge->cmdQ[i]; | ||
427 | |||
428 | if (q->centries) { | ||
429 | if (q->in_use) | ||
430 | free_cmdQ_buffers(sge, q, q->in_use); | ||
431 | kfree(q->centries); | ||
432 | } | ||
433 | if (q->entries) { | ||
434 | size = sizeof(struct cmdQ_e) * q->size; | ||
435 | pci_free_consistent(pdev, size, q->entries, | ||
436 | q->dma_addr); | ||
437 | } | ||
438 | } | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Allocates basic TX resources, consisting of memory mapped command Qs. | ||
443 | */ | ||
444 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | ||
445 | { | ||
446 | struct pci_dev *pdev = sge->adapter->pdev; | ||
447 | unsigned int size, i; | ||
448 | |||
449 | for (i = 0; i < SGE_CMDQ_N; i++) { | ||
450 | struct cmdQ *q = &sge->cmdQ[i]; | ||
451 | |||
452 | q->genbit = 1; | ||
453 | q->sop = 1; | ||
454 | q->size = p->cmdQ_size[i]; | ||
455 | q->in_use = 0; | ||
456 | q->status = 0; | ||
457 | q->processed = q->cleaned = 0; | ||
458 | q->stop_thres = 0; | ||
459 | spin_lock_init(&q->lock); | ||
460 | size = sizeof(struct cmdQ_e) * q->size; | ||
461 | q->entries = (struct cmdQ_e *) | ||
462 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
463 | if (!q->entries) | ||
464 | goto err_no_mem; | ||
465 | memset(q->entries, 0, size); | ||
466 | size = sizeof(struct cmdQ_ce) * q->size; | ||
467 | q->centries = kmalloc(size, GFP_KERNEL); | ||
468 | if (!q->centries) | ||
469 | goto err_no_mem; | ||
470 | memset(q->centries, 0, size); | ||
471 | } | ||
472 | |||
473 | /* | ||
474 | * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE | ||
475 | * only. For queue 0 set the stop threshold so we can handle one more | ||
476 | * packet from each port, plus reserve an additional 24 entries for | ||
477 | * Ethernet packets only. Queue 1 never suspends nor do we reserve | ||
478 | * space for Ethernet packets. | ||
479 | */ | ||
480 | sge->cmdQ[0].stop_thres = sge->adapter->params.nports * | ||
481 | (MAX_SKB_FRAGS + 1); | ||
482 | return 0; | ||
483 | |||
484 | err_no_mem: | ||
485 | free_tx_resources(sge); | ||
486 | return -ENOMEM; | ||
487 | } | ||
488 | |||
489 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, | ||
490 | u32 size, int base_reg_lo, | ||
491 | int base_reg_hi, int size_reg) | ||
492 | { | ||
493 | writel((u32)addr, adapter->regs + base_reg_lo); | ||
494 | writel(addr >> 32, adapter->regs + base_reg_hi); | ||
495 | writel(size, adapter->regs + size_reg); | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * Enable/disable VLAN acceleration. | ||
500 | */ | ||
501 | void t1_set_vlan_accel(struct adapter *adapter, int on_off) | ||
502 | { | ||
503 | struct sge *sge = adapter->sge; | ||
504 | |||
505 | sge->sge_control &= ~F_VLAN_XTRACT; | ||
506 | if (on_off) | ||
507 | sge->sge_control |= F_VLAN_XTRACT; | ||
508 | if (adapter->open_device_map) { | ||
509 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); | ||
510 | readl(adapter->regs + A_SG_CONTROL); /* flush */ | ||
511 | } | ||
512 | } | ||
513 | |||
514 | /* | ||
515 | * Programs the various SGE registers. However, the engine is not yet enabled, | ||
516 | * but sge->sge_control is setup and ready to go. | ||
517 | */ | ||
518 | static void configure_sge(struct sge *sge, struct sge_params *p) | ||
519 | { | ||
520 | struct adapter *ap = sge->adapter; | ||
521 | |||
522 | writel(0, ap->regs + A_SG_CONTROL); | ||
523 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, | ||
524 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); | ||
525 | setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, | ||
526 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); | ||
527 | setup_ring_params(ap, sge->freelQ[0].dma_addr, | ||
528 | sge->freelQ[0].size, A_SG_FL0BASELWR, | ||
529 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); | ||
530 | setup_ring_params(ap, sge->freelQ[1].dma_addr, | ||
531 | sge->freelQ[1].size, A_SG_FL1BASELWR, | ||
532 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); | ||
533 | |||
534 | /* The threshold comparison uses <. */ | ||
535 | writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); | ||
536 | |||
537 | setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, | ||
538 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); | ||
539 | writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); | ||
540 | |||
541 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | | ||
542 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | | ||
543 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | | ||
544 | F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS | | ||
545 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); | ||
546 | |||
547 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
548 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; | ||
549 | #endif | ||
550 | |||
551 | /* Initialize no-resource timer */ | ||
552 | sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); | ||
553 | |||
554 | t1_sge_set_coalesce_params(sge, p); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Return the payload capacity of the jumbo free-list buffers. | ||
559 | */ | ||
560 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) | ||
561 | { | ||
562 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - | ||
563 | sge->freelQ[sge->jumbo_fl].dma_offset - | ||
564 | sizeof(struct cpl_rx_data); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Frees all SGE related resources and the sge structure itself | ||
569 | */ | ||
570 | void t1_sge_destroy(struct sge *sge) | ||
571 | { | ||
572 | if (sge->espibug_skb) | ||
573 | kfree_skb(sge->espibug_skb); | ||
574 | |||
575 | free_tx_resources(sge); | ||
576 | free_rx_resources(sge); | ||
577 | kfree(sge); | ||
578 | } | ||
579 | |||
580 | /* | ||
581 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist | ||
582 | * context Q) until the Q is full or alloc_skb fails. | ||
583 | * | ||
584 | * It is possible that the generation bits already match, indicating that the | ||
585 | * buffer is already valid and nothing needs to be done. This happens when we | ||
586 | * copied a received buffer into a new sk_buff during the interrupt processing. | ||
587 | * | ||
588 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), | ||
589 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B | ||
590 | * aligned. | ||
591 | */ | ||
592 | static void refill_free_list(struct sge *sge, struct freelQ *q) | ||
593 | { | ||
594 | struct pci_dev *pdev = sge->adapter->pdev; | ||
595 | struct freelQ_ce *ce = &q->centries[q->pidx]; | ||
596 | struct freelQ_e *e = &q->entries[q->pidx]; | ||
597 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; | ||
598 | |||
599 | |||
600 | while (q->credits < q->size) { | ||
601 | struct sk_buff *skb; | ||
602 | dma_addr_t mapping; | ||
603 | |||
604 | skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); | ||
605 | if (!skb) | ||
606 | break; | ||
607 | |||
608 | skb_reserve(skb, q->dma_offset); | ||
609 | mapping = pci_map_single(pdev, skb->data, dma_len, | ||
610 | PCI_DMA_FROMDEVICE); | ||
611 | ce->skb = skb; | ||
612 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
613 | pci_unmap_len_set(ce, dma_len, dma_len); | ||
614 | e->addr_lo = (u32)mapping; | ||
615 | e->addr_hi = (u64)mapping >> 32; | ||
616 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); | ||
617 | wmb(); | ||
618 | e->gen2 = V_CMD_GEN2(q->genbit); | ||
619 | |||
620 | e++; | ||
621 | ce++; | ||
622 | if (++q->pidx == q->size) { | ||
623 | q->pidx = 0; | ||
624 | q->genbit ^= 1; | ||
625 | ce = q->centries; | ||
626 | e = q->entries; | ||
627 | } | ||
628 | q->credits++; | ||
629 | } | ||
630 | |||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 | ||
635 | * of both rings, we go into 'few interrupt mode' in order to give the system | ||
636 | * time to free up resources. | ||
637 | */ | ||
638 | static void freelQs_empty(struct sge *sge) | ||
639 | { | ||
640 | struct adapter *adapter = sge->adapter; | ||
641 | u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); | ||
642 | u32 irqholdoff_reg; | ||
643 | |||
644 | refill_free_list(sge, &sge->freelQ[0]); | ||
645 | refill_free_list(sge, &sge->freelQ[1]); | ||
646 | |||
647 | if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && | ||
648 | sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { | ||
649 | irq_reg |= F_FL_EXHAUSTED; | ||
650 | irqholdoff_reg = sge->fixed_intrtimer; | ||
651 | } else { | ||
652 | /* Clear the F_FL_EXHAUSTED interrupts for now */ | ||
653 | irq_reg &= ~F_FL_EXHAUSTED; | ||
654 | irqholdoff_reg = sge->intrtimer_nres; | ||
655 | } | ||
656 | writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); | ||
657 | writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); | ||
658 | |||
659 | /* We reenable the Qs to force a freelist GTS interrupt later */ | ||
660 | doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); | ||
661 | } | ||
662 | |||
663 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) | ||
664 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | ||
665 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ | ||
666 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | ||
667 | |||
668 | /* | ||
669 | * Disable SGE Interrupts | ||
670 | */ | ||
671 | void t1_sge_intr_disable(struct sge *sge) | ||
672 | { | ||
673 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); | ||
674 | |||
675 | writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); | ||
676 | writel(0, sge->adapter->regs + A_SG_INT_ENABLE); | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Enable SGE interrupts. | ||
681 | */ | ||
682 | void t1_sge_intr_enable(struct sge *sge) | ||
683 | { | ||
684 | u32 en = SGE_INT_ENABLE; | ||
685 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); | ||
686 | |||
687 | if (sge->adapter->flags & TSO_CAPABLE) | ||
688 | en &= ~F_PACKET_TOO_BIG; | ||
689 | writel(en, sge->adapter->regs + A_SG_INT_ENABLE); | ||
690 | writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); | ||
691 | } | ||
692 | |||
693 | /* | ||
694 | * Clear SGE interrupts. | ||
695 | */ | ||
696 | void t1_sge_intr_clear(struct sge *sge) | ||
697 | { | ||
698 | writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); | ||
699 | writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * SGE 'Error' interrupt handler | ||
704 | */ | ||
705 | int t1_sge_intr_error_handler(struct sge *sge) | ||
706 | { | ||
707 | struct adapter *adapter = sge->adapter; | ||
708 | u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); | ||
709 | |||
710 | if (adapter->flags & TSO_CAPABLE) | ||
711 | cause &= ~F_PACKET_TOO_BIG; | ||
712 | if (cause & F_RESPQ_EXHAUSTED) | ||
713 | sge->stats.respQ_empty++; | ||
714 | if (cause & F_RESPQ_OVERFLOW) { | ||
715 | sge->stats.respQ_overflow++; | ||
716 | CH_ALERT("%s: SGE response queue overflow\n", | ||
717 | adapter->name); | ||
718 | } | ||
719 | if (cause & F_FL_EXHAUSTED) { | ||
720 | sge->stats.freelistQ_empty++; | ||
721 | freelQs_empty(sge); | ||
722 | } | ||
723 | if (cause & F_PACKET_TOO_BIG) { | ||
724 | sge->stats.pkt_too_big++; | ||
725 | CH_ALERT("%s: SGE max packet size exceeded\n", | ||
726 | adapter->name); | ||
727 | } | ||
728 | if (cause & F_PACKET_MISMATCH) { | ||
729 | sge->stats.pkt_mismatch++; | ||
730 | CH_ALERT("%s: SGE packet mismatch\n", adapter->name); | ||
731 | } | ||
732 | if (cause & SGE_INT_FATAL) | ||
733 | t1_fatal_err(adapter); | ||
734 | |||
735 | writel(cause, adapter->regs + A_SG_INT_CAUSE); | ||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge) | ||
740 | { | ||
741 | return &sge->stats; | ||
742 | } | ||
743 | |||
744 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port) | ||
745 | { | ||
746 | return &sge->port_stats[port]; | ||
747 | } | ||
748 | |||
749 | /** | ||
750 | * recycle_fl_buf - recycle a free list buffer | ||
751 | * @fl: the free list | ||
752 | * @idx: index of buffer to recycle | ||
753 | * | ||
754 | * Recycles the specified buffer on the given free list by adding it at | ||
755 | * the next available slot on the list. | ||
756 | */ | ||
757 | static void recycle_fl_buf(struct freelQ *fl, int idx) | ||
758 | { | ||
759 | struct freelQ_e *from = &fl->entries[idx]; | ||
760 | struct freelQ_e *to = &fl->entries[fl->pidx]; | ||
761 | |||
762 | fl->centries[fl->pidx] = fl->centries[idx]; | ||
763 | to->addr_lo = from->addr_lo; | ||
764 | to->addr_hi = from->addr_hi; | ||
765 | to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); | ||
766 | wmb(); | ||
767 | to->gen2 = V_CMD_GEN2(fl->genbit); | ||
768 | fl->credits++; | ||
769 | |||
770 | if (++fl->pidx == fl->size) { | ||
771 | fl->pidx = 0; | ||
772 | fl->genbit ^= 1; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * get_packet - return the next ingress packet buffer | ||
778 | * @pdev: the PCI device that received the packet | ||
779 | * @fl: the SGE free list holding the packet | ||
780 | * @len: the actual packet length, excluding any SGE padding | ||
781 | * @dma_pad: padding at beginning of buffer left by SGE DMA | ||
782 | * @skb_pad: padding to be used if the packet is copied | ||
783 | * @copy_thres: length threshold under which a packet should be copied | ||
784 | * @drop_thres: # of remaining buffers before we start dropping packets | ||
785 | * | ||
786 | * Get the next packet from a free list and complete setup of the | ||
787 | * sk_buff. If the packet is small we make a copy and recycle the | ||
788 | * original buffer, otherwise we use the original buffer itself. If a | ||
789 | * positive drop threshold is supplied packets are dropped and their | ||
790 | * buffers recycled if (a) the number of remaining buffers is under the | ||
791 | * threshold and the packet is too big to copy, or (b) the packet should | ||
792 | * be copied but there is no memory for the copy. | ||
793 | */ | ||
794 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, | ||
795 | struct freelQ *fl, unsigned int len, | ||
796 | int dma_pad, int skb_pad, | ||
797 | unsigned int copy_thres, | ||
798 | unsigned int drop_thres) | ||
799 | { | ||
800 | struct sk_buff *skb; | ||
801 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | ||
802 | |||
803 | if (len < copy_thres) { | ||
804 | skb = alloc_skb(len + skb_pad, GFP_ATOMIC); | ||
805 | if (likely(skb != NULL)) { | ||
806 | skb_reserve(skb, skb_pad); | ||
807 | skb_put(skb, len); | ||
808 | pci_dma_sync_single_for_cpu(pdev, | ||
809 | pci_unmap_addr(ce, dma_addr), | ||
810 | pci_unmap_len(ce, dma_len), | ||
811 | PCI_DMA_FROMDEVICE); | ||
812 | memcpy(skb->data, ce->skb->data + dma_pad, len); | ||
813 | pci_dma_sync_single_for_device(pdev, | ||
814 | pci_unmap_addr(ce, dma_addr), | ||
815 | pci_unmap_len(ce, dma_len), | ||
816 | PCI_DMA_FROMDEVICE); | ||
817 | } else if (!drop_thres) | ||
818 | goto use_orig_buf; | ||
819 | |||
820 | recycle_fl_buf(fl, fl->cidx); | ||
821 | return skb; | ||
822 | } | ||
823 | |||
824 | if (fl->credits < drop_thres) { | ||
825 | recycle_fl_buf(fl, fl->cidx); | ||
826 | return NULL; | ||
827 | } | ||
828 | |||
829 | use_orig_buf: | ||
830 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | ||
831 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | ||
832 | skb = ce->skb; | ||
833 | skb_reserve(skb, dma_pad); | ||
834 | skb_put(skb, len); | ||
835 | return skb; | ||
836 | } | ||
837 | |||
838 | /** | ||
839 | * unexpected_offload - handle an unexpected offload packet | ||
840 | * @adapter: the adapter | ||
841 | * @fl: the free list that received the packet | ||
842 | * | ||
843 | * Called when we receive an unexpected offload packet (e.g., the TOE | ||
844 | * function is disabled or the card is a NIC). Prints a message and | ||
845 | * recycles the buffer. | ||
846 | */ | ||
847 | static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | ||
848 | { | ||
849 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | ||
850 | struct sk_buff *skb = ce->skb; | ||
851 | |||
852 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), | ||
853 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | ||
854 | CH_ERR("%s: unexpected offload packet, cmd %u\n", | ||
855 | adapter->name, *skb->data); | ||
856 | recycle_fl_buf(fl, fl->cidx); | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * Write the command descriptors to transmit the given skb starting at | ||
861 | * descriptor pidx with the given generation. | ||
862 | */ | ||
863 | static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, | ||
864 | unsigned int pidx, unsigned int gen, | ||
865 | struct cmdQ *q) | ||
866 | { | ||
867 | dma_addr_t mapping; | ||
868 | struct cmdQ_e *e, *e1; | ||
869 | struct cmdQ_ce *ce; | ||
870 | unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags; | ||
871 | |||
872 | mapping = pci_map_single(adapter->pdev, skb->data, | ||
873 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | ||
874 | ce = &q->centries[pidx]; | ||
875 | ce->skb = NULL; | ||
876 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
877 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); | ||
878 | |||
879 | flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) | | ||
880 | V_CMD_GEN2(gen); | ||
881 | e = &q->entries[pidx]; | ||
882 | e->addr_lo = (u32)mapping; | ||
883 | e->addr_hi = (u64)mapping >> 32; | ||
884 | e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen); | ||
885 | for (e1 = e, i = 0; nfrags--; i++) { | ||
886 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
887 | |||
888 | ce++; | ||
889 | e1++; | ||
890 | if (++pidx == q->size) { | ||
891 | pidx = 0; | ||
892 | gen ^= 1; | ||
893 | ce = q->centries; | ||
894 | e1 = q->entries; | ||
895 | } | ||
896 | |||
897 | mapping = pci_map_page(adapter->pdev, frag->page, | ||
898 | frag->page_offset, frag->size, | ||
899 | PCI_DMA_TODEVICE); | ||
900 | ce->skb = NULL; | ||
901 | pci_unmap_addr_set(ce, dma_addr, mapping); | ||
902 | pci_unmap_len_set(ce, dma_len, frag->size); | ||
903 | |||
904 | e1->addr_lo = (u32)mapping; | ||
905 | e1->addr_hi = (u64)mapping >> 32; | ||
906 | e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen); | ||
907 | e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) | | ||
908 | V_CMD_GEN2(gen); | ||
909 | } | ||
910 | |||
911 | ce->skb = skb; | ||
912 | wmb(); | ||
913 | e->flags = flags; | ||
914 | } | ||
915 | |||
916 | /* | ||
917 | * Clean up completed Tx buffers. | ||
918 | */ | ||
919 | static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) | ||
920 | { | ||
921 | unsigned int reclaim = q->processed - q->cleaned; | ||
922 | |||
923 | if (reclaim) { | ||
924 | free_cmdQ_buffers(sge, q, reclaim); | ||
925 | q->cleaned += reclaim; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | #ifndef SET_ETHTOOL_OPS | ||
930 | # define __netif_rx_complete(dev) netif_rx_complete(dev) | ||
931 | #endif | ||
932 | |||
933 | /* | ||
934 | * We cannot use the standard netif_rx_schedule_prep() because we have multiple | ||
935 | * ports plus the TOE all multiplexing onto a single response queue, therefore | ||
936 | * accepting new responses cannot depend on the state of any particular port. | ||
937 | * So define our own equivalent that omits the netif_running() test. | ||
938 | */ | ||
939 | static inline int napi_schedule_prep(struct net_device *dev) | ||
940 | { | ||
941 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
942 | } | ||
943 | |||
944 | |||
945 | /** | ||
946 | * sge_rx - process an ingress ethernet packet | ||
947 | * @sge: the sge structure | ||
948 | * @fl: the free list that contains the packet buffer | ||
949 | * @len: the packet length | ||
950 | * | ||
951 | * Process an ingress ethernet pakcet and deliver it to the stack. | ||
952 | */ | ||
953 | static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | ||
954 | { | ||
955 | struct sk_buff *skb; | ||
956 | struct cpl_rx_pkt *p; | ||
957 | struct adapter *adapter = sge->adapter; | ||
958 | |||
959 | sge->stats.ethernet_pkts++; | ||
960 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, | ||
961 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, | ||
962 | SGE_RX_DROP_THRES); | ||
963 | if (!skb) { | ||
964 | sge->port_stats[0].rx_drops++; /* charge only port 0 for now */ | ||
965 | return 0; | ||
966 | } | ||
967 | |||
968 | p = (struct cpl_rx_pkt *)skb->data; | ||
969 | skb_pull(skb, sizeof(*p)); | ||
970 | skb->dev = adapter->port[p->iff].dev; | ||
971 | skb->dev->last_rx = jiffies; | ||
972 | skb->protocol = eth_type_trans(skb, skb->dev); | ||
973 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | ||
974 | skb->protocol == htons(ETH_P_IP) && | ||
975 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | ||
976 | sge->port_stats[p->iff].rx_cso_good++; | ||
977 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
978 | } else | ||
979 | skb->ip_summed = CHECKSUM_NONE; | ||
980 | |||
981 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { | ||
982 | sge->port_stats[p->iff].vlan_xtract++; | ||
983 | if (adapter->params.sge.polling) | ||
984 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, | ||
985 | ntohs(p->vlan)); | ||
986 | else | ||
987 | vlan_hwaccel_rx(skb, adapter->vlan_grp, | ||
988 | ntohs(p->vlan)); | ||
989 | } else if (adapter->params.sge.polling) | ||
990 | netif_receive_skb(skb); | ||
991 | else | ||
992 | netif_rx(skb); | ||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Returns true if a command queue has enough available descriptors that | ||
998 | * we can resume Tx operation after temporarily disabling its packet queue. | ||
999 | */ | ||
1000 | static inline int enough_free_Tx_descs(const struct cmdQ *q) | ||
1001 | { | ||
1002 | unsigned int r = q->processed - q->cleaned; | ||
1003 | |||
1004 | return q->in_use - r < (q->size >> 1); | ||
1005 | } | ||
1006 | |||
1007 | /* | ||
1008 | * Called when sufficient space has become available in the SGE command queues | ||
1009 | * after the Tx packet schedulers have been suspended to restart the Tx path. | ||
1010 | */ | ||
1011 | static void restart_tx_queues(struct sge *sge) | ||
1012 | { | ||
1013 | struct adapter *adap = sge->adapter; | ||
1014 | |||
1015 | if (enough_free_Tx_descs(&sge->cmdQ[0])) { | ||
1016 | int i; | ||
1017 | |||
1018 | for_each_port(adap, i) { | ||
1019 | struct net_device *nd = adap->port[i].dev; | ||
1020 | |||
1021 | if (test_and_clear_bit(nd->if_port, | ||
1022 | &sge->stopped_tx_queues) && | ||
1023 | netif_running(nd)) { | ||
1024 | sge->stats.cmdQ_restarted[3]++; | ||
1025 | netif_wake_queue(nd); | ||
1026 | } | ||
1027 | } | ||
1028 | } | ||
1029 | } | ||
1030 | |||
1031 | /* | ||
1032 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 | ||
1033 | * information. | ||
1034 | */ | ||
1035 | static unsigned int update_tx_info(struct adapter *adapter, | ||
1036 | unsigned int flags, | ||
1037 | unsigned int pr0) | ||
1038 | { | ||
1039 | struct sge *sge = adapter->sge; | ||
1040 | struct cmdQ *cmdq = &sge->cmdQ[0]; | ||
1041 | |||
1042 | cmdq->processed += pr0; | ||
1043 | |||
1044 | if (flags & F_CMDQ0_ENABLE) { | ||
1045 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); | ||
1046 | |||
1047 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && | ||
1048 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { | ||
1049 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); | ||
1050 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | ||
1051 | } | ||
1052 | flags &= ~F_CMDQ0_ENABLE; | ||
1053 | } | ||
1054 | |||
1055 | if (unlikely(sge->stopped_tx_queues != 0)) | ||
1056 | restart_tx_queues(sge); | ||
1057 | |||
1058 | return flags; | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Process SGE responses, up to the supplied budget. Returns the number of | ||
1063 | * responses processed. A negative budget is effectively unlimited. | ||
1064 | */ | ||
1065 | static int process_responses(struct adapter *adapter, int budget) | ||
1066 | { | ||
1067 | struct sge *sge = adapter->sge; | ||
1068 | struct respQ *q = &sge->respQ; | ||
1069 | struct respQ_e *e = &q->entries[q->cidx]; | ||
1070 | int budget_left = budget; | ||
1071 | unsigned int flags = 0; | ||
1072 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | ||
1073 | |||
1074 | |||
1075 | while (likely(budget_left && e->GenerationBit == q->genbit)) { | ||
1076 | flags |= e->Qsleeping; | ||
1077 | |||
1078 | cmdq_processed[0] += e->Cmdq0CreditReturn; | ||
1079 | cmdq_processed[1] += e->Cmdq1CreditReturn; | ||
1080 | |||
1081 | /* We batch updates to the TX side to avoid cacheline | ||
1082 | * ping-pong of TX state information on MP where the sender | ||
1083 | * might run on a different CPU than this function... | ||
1084 | */ | ||
1085 | if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { | ||
1086 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | ||
1087 | cmdq_processed[0] = 0; | ||
1088 | } | ||
1089 | if (unlikely(cmdq_processed[1] > 16)) { | ||
1090 | sge->cmdQ[1].processed += cmdq_processed[1]; | ||
1091 | cmdq_processed[1] = 0; | ||
1092 | } | ||
1093 | if (likely(e->DataValid)) { | ||
1094 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; | ||
1095 | |||
1096 | if (unlikely(!e->Sop || !e->Eop)) | ||
1097 | BUG(); | ||
1098 | if (unlikely(e->Offload)) | ||
1099 | unexpected_offload(adapter, fl); | ||
1100 | else | ||
1101 | sge_rx(sge, fl, e->BufferLength); | ||
1102 | |||
1103 | /* | ||
1104 | * Note: this depends on each packet consuming a | ||
1105 | * single free-list buffer; cf. the BUG above. | ||
1106 | */ | ||
1107 | if (++fl->cidx == fl->size) | ||
1108 | fl->cidx = 0; | ||
1109 | if (unlikely(--fl->credits < | ||
1110 | fl->size - SGE_FREEL_REFILL_THRESH)) | ||
1111 | refill_free_list(sge, fl); | ||
1112 | } else | ||
1113 | sge->stats.pure_rsps++; | ||
1114 | |||
1115 | e++; | ||
1116 | if (unlikely(++q->cidx == q->size)) { | ||
1117 | q->cidx = 0; | ||
1118 | q->genbit ^= 1; | ||
1119 | e = q->entries; | ||
1120 | } | ||
1121 | prefetch(e); | ||
1122 | |||
1123 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { | ||
1124 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | ||
1125 | q->credits = 0; | ||
1126 | } | ||
1127 | --budget_left; | ||
1128 | } | ||
1129 | |||
1130 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | ||
1131 | sge->cmdQ[1].processed += cmdq_processed[1]; | ||
1132 | |||
1133 | budget -= budget_left; | ||
1134 | return budget; | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * A simpler version of process_responses() that handles only pure (i.e., | ||
1139 | * non data-carrying) responses. Such respones are too light-weight to justify | ||
1140 | * calling a softirq when using NAPI, so we handle them specially in hard | ||
1141 | * interrupt context. The function is called with a pointer to a response, | ||
1142 | * which the caller must ensure is a valid pure response. Returns 1 if it | ||
1143 | * encounters a valid data-carrying response, 0 otherwise. | ||
1144 | */ | ||
1145 | static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | ||
1146 | { | ||
1147 | struct sge *sge = adapter->sge; | ||
1148 | struct respQ *q = &sge->respQ; | ||
1149 | unsigned int flags = 0; | ||
1150 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | ||
1151 | |||
1152 | do { | ||
1153 | flags |= e->Qsleeping; | ||
1154 | |||
1155 | cmdq_processed[0] += e->Cmdq0CreditReturn; | ||
1156 | cmdq_processed[1] += e->Cmdq1CreditReturn; | ||
1157 | |||
1158 | e++; | ||
1159 | if (unlikely(++q->cidx == q->size)) { | ||
1160 | q->cidx = 0; | ||
1161 | q->genbit ^= 1; | ||
1162 | e = q->entries; | ||
1163 | } | ||
1164 | prefetch(e); | ||
1165 | |||
1166 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { | ||
1167 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | ||
1168 | q->credits = 0; | ||
1169 | } | ||
1170 | sge->stats.pure_rsps++; | ||
1171 | } while (e->GenerationBit == q->genbit && !e->DataValid); | ||
1172 | |||
1173 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | ||
1174 | sge->cmdQ[1].processed += cmdq_processed[1]; | ||
1175 | |||
1176 | return e->GenerationBit == q->genbit; | ||
1177 | } | ||
1178 | |||
1179 | /* | ||
1180 | * Handler for new data events when using NAPI. This does not need any locking | ||
1181 | * or protection from interrupts as data interrupts are off at this point and | ||
1182 | * other adapter interrupts do not interfere. | ||
1183 | */ | ||
1184 | static int t1_poll(struct net_device *dev, int *budget) | ||
1185 | { | ||
1186 | struct adapter *adapter = dev->priv; | ||
1187 | int effective_budget = min(*budget, dev->quota); | ||
1188 | |||
1189 | int work_done = process_responses(adapter, effective_budget); | ||
1190 | *budget -= work_done; | ||
1191 | dev->quota -= work_done; | ||
1192 | |||
1193 | if (work_done >= effective_budget) | ||
1194 | return 1; | ||
1195 | |||
1196 | __netif_rx_complete(dev); | ||
1197 | |||
1198 | /* | ||
1199 | * Because we don't atomically flush the following write it is | ||
1200 | * possible that in very rare cases it can reach the device in a way | ||
1201 | * that races with a new response being written plus an error interrupt | ||
1202 | * causing the NAPI interrupt handler below to return unhandled status | ||
1203 | * to the OS. To protect against this would require flushing the write | ||
1204 | * and doing both the write and the flush with interrupts off. Way too | ||
1205 | * expensive and unjustifiable given the rarity of the race. | ||
1206 | */ | ||
1207 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | ||
1208 | return 0; | ||
1209 | } | ||
1210 | |||
1211 | /* | ||
1212 | * Returns true if the device is already scheduled for polling. | ||
1213 | */ | ||
1214 | static inline int napi_is_scheduled(struct net_device *dev) | ||
1215 | { | ||
1216 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
1217 | } | ||
1218 | |||
1219 | /* | ||
1220 | * NAPI version of the main interrupt handler. | ||
1221 | */ | ||
1222 | static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs) | ||
1223 | { | ||
1224 | int handled; | ||
1225 | struct adapter *adapter = data; | ||
1226 | struct sge *sge = adapter->sge; | ||
1227 | struct respQ *q = &adapter->sge->respQ; | ||
1228 | |||
1229 | /* | ||
1230 | * Clear the SGE_DATA interrupt first thing. Normally the NAPI | ||
1231 | * handler has control of the response queue and the interrupt handler | ||
1232 | * can look at the queue reliably only once it knows NAPI is off. | ||
1233 | * We can't wait that long to clear the SGE_DATA interrupt because we | ||
1234 | * could race with t1_poll rearming the SGE interrupt, so we need to | ||
1235 | * clear the interrupt speculatively and really early on. | ||
1236 | */ | ||
1237 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | ||
1238 | |||
1239 | spin_lock(&adapter->async_lock); | ||
1240 | if (!napi_is_scheduled(sge->netdev)) { | ||
1241 | struct respQ_e *e = &q->entries[q->cidx]; | ||
1242 | |||
1243 | if (e->GenerationBit == q->genbit) { | ||
1244 | if (e->DataValid || | ||
1245 | process_pure_responses(adapter, e)) { | ||
1246 | if (likely(napi_schedule_prep(sge->netdev))) | ||
1247 | __netif_rx_schedule(sge->netdev); | ||
1248 | else | ||
1249 | printk(KERN_CRIT | ||
1250 | "NAPI schedule failure!\n"); | ||
1251 | } else | ||
1252 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | ||
1253 | handled = 1; | ||
1254 | goto unlock; | ||
1255 | } else | ||
1256 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | ||
1257 | } else | ||
1258 | if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) | ||
1259 | printk(KERN_ERR "data interrupt while NAPI running\n"); | ||
1260 | |||
1261 | handled = t1_slow_intr_handler(adapter); | ||
1262 | if (!handled) | ||
1263 | sge->stats.unhandled_irqs++; | ||
1264 | unlock: | ||
1265 | spin_unlock(&adapter->async_lock); | ||
1266 | return IRQ_RETVAL(handled != 0); | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * Main interrupt handler, optimized assuming that we took a 'DATA' | ||
1271 | * interrupt. | ||
1272 | * | ||
1273 | * 1. Clear the interrupt | ||
1274 | * 2. Loop while we find valid descriptors and process them; accumulate | ||
1275 | * information that can be processed after the loop | ||
1276 | * 3. Tell the SGE at which index we stopped processing descriptors | ||
1277 | * 4. Bookkeeping; free TX buffers, ring doorbell if there are any | ||
1278 | * outstanding TX buffers waiting, replenish RX buffers, potentially | ||
1279 | * reenable upper layers if they were turned off due to lack of TX | ||
1280 | * resources which are available again. | ||
1281 | * 5. If we took an interrupt, but no valid respQ descriptors was found we | ||
1282 | * let the slow_intr_handler run and do error handling. | ||
1283 | */ | ||
1284 | static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs) | ||
1285 | { | ||
1286 | int work_done; | ||
1287 | struct respQ_e *e; | ||
1288 | struct adapter *adapter = cookie; | ||
1289 | struct respQ *Q = &adapter->sge->respQ; | ||
1290 | |||
1291 | spin_lock(&adapter->async_lock); | ||
1292 | e = &Q->entries[Q->cidx]; | ||
1293 | prefetch(e); | ||
1294 | |||
1295 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | ||
1296 | |||
1297 | if (likely(e->GenerationBit == Q->genbit)) | ||
1298 | work_done = process_responses(adapter, -1); | ||
1299 | else | ||
1300 | work_done = t1_slow_intr_handler(adapter); | ||
1301 | |||
1302 | /* | ||
1303 | * The unconditional clearing of the PL_CAUSE above may have raced | ||
1304 | * with DMA completion and the corresponding generation of a response | ||
1305 | * to cause us to miss the resulting data interrupt. The next write | ||
1306 | * is also unconditional to recover the missed interrupt and render | ||
1307 | * this race harmless. | ||
1308 | */ | ||
1309 | writel(Q->cidx, adapter->regs + A_SG_SLEEPING); | ||
1310 | |||
1311 | if (!work_done) | ||
1312 | adapter->sge->stats.unhandled_irqs++; | ||
1313 | spin_unlock(&adapter->async_lock); | ||
1314 | return IRQ_RETVAL(work_done != 0); | ||
1315 | } | ||
1316 | |||
1317 | intr_handler_t t1_select_intr_handler(adapter_t *adapter) | ||
1318 | { | ||
1319 | return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt; | ||
1320 | } | ||
1321 | |||
1322 | /* | ||
1323 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | ||
1324 | * | ||
1325 | * The code figures out how many entries the sk_buff will require in the | ||
1326 | * cmdQ and updates the cmdQ data structure with the state once the enqueue | ||
1327 | * has complete. Then, it doesn't access the global structure anymore, but | ||
1328 | * uses the corresponding fields on the stack. In conjuction with a spinlock | ||
1329 | * around that code, we can make the function reentrant without holding the | ||
1330 | * lock when we actually enqueue (which might be expensive, especially on | ||
1331 | * architectures with IO MMUs). | ||
1332 | * | ||
1333 | * This runs with softirqs disabled. | ||
1334 | */ | ||
1335 | unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | ||
1336 | unsigned int qid, struct net_device *dev) | ||
1337 | { | ||
1338 | struct sge *sge = adapter->sge; | ||
1339 | struct cmdQ *q = &sge->cmdQ[qid]; | ||
1340 | unsigned int credits, pidx, genbit, count; | ||
1341 | |||
1342 | spin_lock(&q->lock); | ||
1343 | reclaim_completed_tx(sge, q); | ||
1344 | |||
1345 | pidx = q->pidx; | ||
1346 | credits = q->size - q->in_use; | ||
1347 | count = 1 + skb_shinfo(skb)->nr_frags; | ||
1348 | |||
1349 | { /* Ethernet packet */ | ||
1350 | if (unlikely(credits < count)) { | ||
1351 | netif_stop_queue(dev); | ||
1352 | set_bit(dev->if_port, &sge->stopped_tx_queues); | ||
1353 | sge->stats.cmdQ_full[3]++; | ||
1354 | spin_unlock(&q->lock); | ||
1355 | CH_ERR("%s: Tx ring full while queue awake!\n", | ||
1356 | adapter->name); | ||
1357 | return 1; | ||
1358 | } | ||
1359 | if (unlikely(credits - count < q->stop_thres)) { | ||
1360 | sge->stats.cmdQ_full[3]++; | ||
1361 | netif_stop_queue(dev); | ||
1362 | set_bit(dev->if_port, &sge->stopped_tx_queues); | ||
1363 | } | ||
1364 | } | ||
1365 | q->in_use += count; | ||
1366 | genbit = q->genbit; | ||
1367 | q->pidx += count; | ||
1368 | if (q->pidx >= q->size) { | ||
1369 | q->pidx -= q->size; | ||
1370 | q->genbit ^= 1; | ||
1371 | } | ||
1372 | spin_unlock(&q->lock); | ||
1373 | |||
1374 | write_tx_descs(adapter, skb, pidx, genbit, q); | ||
1375 | |||
1376 | /* | ||
1377 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring | ||
1378 | * the doorbell if the Q is asleep. There is a natural race, where | ||
1379 | * the hardware is going to sleep just after we checked, however, | ||
1380 | * then the interrupt handler will detect the outstanding TX packet | ||
1381 | * and ring the doorbell for us. | ||
1382 | */ | ||
1383 | if (qid) | ||
1384 | doorbell_pio(adapter, F_CMDQ1_ENABLE); | ||
1385 | else { | ||
1386 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
1387 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | ||
1388 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | ||
1389 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | ||
1390 | } | ||
1391 | } | ||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1395 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) | ||
1396 | |||
1397 | /* | ||
1398 | * eth_hdr_len - return the length of an Ethernet header | ||
1399 | * @data: pointer to the start of the Ethernet header | ||
1400 | * | ||
1401 | * Returns the length of an Ethernet header, including optional VLAN tag. | ||
1402 | */ | ||
1403 | static inline int eth_hdr_len(const void *data) | ||
1404 | { | ||
1405 | const struct ethhdr *e = data; | ||
1406 | |||
1407 | return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; | ||
1408 | } | ||
1409 | |||
1410 | /* | ||
1411 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. | ||
1412 | */ | ||
1413 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1414 | { | ||
1415 | struct adapter *adapter = dev->priv; | ||
1416 | struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port]; | ||
1417 | struct sge *sge = adapter->sge; | ||
1418 | struct cpl_tx_pkt *cpl; | ||
1419 | |||
1420 | #ifdef NETIF_F_TSO | ||
1421 | if (skb_shinfo(skb)->tso_size) { | ||
1422 | int eth_type; | ||
1423 | struct cpl_tx_pkt_lso *hdr; | ||
1424 | |||
1425 | st->tso++; | ||
1426 | |||
1427 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | ||
1428 | CPL_ETH_II : CPL_ETH_II_VLAN; | ||
1429 | |||
1430 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | ||
1431 | hdr->opcode = CPL_TX_PKT_LSO; | ||
1432 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | ||
1433 | hdr->ip_hdr_words = skb->nh.iph->ihl; | ||
1434 | hdr->tcp_hdr_words = skb->h.th->doff; | ||
1435 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | ||
1436 | skb_shinfo(skb)->tso_size)); | ||
1437 | hdr->len = htonl(skb->len - sizeof(*hdr)); | ||
1438 | cpl = (struct cpl_tx_pkt *)hdr; | ||
1439 | sge->stats.tx_lso_pkts++; | ||
1440 | } else | ||
1441 | #endif | ||
1442 | { | ||
1443 | /* | ||
1444 | * Packets shorter than ETH_HLEN can break the MAC, drop them | ||
1445 | * early. Also, we may get oversized packets because some | ||
1446 | * parts of the kernel don't handle our unusual hard_header_len | ||
1447 | * right, drop those too. | ||
1448 | */ | ||
1449 | if (unlikely(skb->len < ETH_HLEN || | ||
1450 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { | ||
1451 | dev_kfree_skb_any(skb); | ||
1452 | return NET_XMIT_SUCCESS; | ||
1453 | } | ||
1454 | |||
1455 | /* | ||
1456 | * We are using a non-standard hard_header_len and some kernel | ||
1457 | * components, such as pktgen, do not handle it right. | ||
1458 | * Complain when this happens but try to fix things up. | ||
1459 | */ | ||
1460 | if (unlikely(skb_headroom(skb) < | ||
1461 | dev->hard_header_len - ETH_HLEN)) { | ||
1462 | struct sk_buff *orig_skb = skb; | ||
1463 | |||
1464 | if (net_ratelimit()) | ||
1465 | printk(KERN_ERR "%s: inadequate headroom in " | ||
1466 | "Tx packet\n", dev->name); | ||
1467 | skb = skb_realloc_headroom(skb, sizeof(*cpl)); | ||
1468 | dev_kfree_skb_any(orig_skb); | ||
1469 | if (!skb) | ||
1470 | return -ENOMEM; | ||
1471 | } | ||
1472 | |||
1473 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | ||
1474 | skb->ip_summed == CHECKSUM_HW && | ||
1475 | skb->nh.iph->protocol == IPPROTO_UDP) | ||
1476 | if (unlikely(skb_checksum_help(skb, 0))) { | ||
1477 | dev_kfree_skb_any(skb); | ||
1478 | return -ENOMEM; | ||
1479 | } | ||
1480 | |||
1481 | /* Hmmm, assuming to catch the gratious arp... and we'll use | ||
1482 | * it to flush out stuck espi packets... | ||
1483 | */ | ||
1484 | if (unlikely(!adapter->sge->espibug_skb)) { | ||
1485 | if (skb->protocol == htons(ETH_P_ARP) && | ||
1486 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { | ||
1487 | adapter->sge->espibug_skb = skb; | ||
1488 | /* We want to re-use this skb later. We | ||
1489 | * simply bump the reference count and it | ||
1490 | * will not be freed... | ||
1491 | */ | ||
1492 | skb = skb_get(skb); | ||
1493 | } | ||
1494 | } | ||
1495 | |||
1496 | cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); | ||
1497 | cpl->opcode = CPL_TX_PKT; | ||
1498 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ | ||
1499 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; | ||
1500 | /* the length field isn't used so don't bother setting it */ | ||
1501 | |||
1502 | st->tx_cso += (skb->ip_summed == CHECKSUM_HW); | ||
1503 | sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW); | ||
1504 | sge->stats.tx_reg_pkts++; | ||
1505 | } | ||
1506 | cpl->iff = dev->if_port; | ||
1507 | |||
1508 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
1509 | if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { | ||
1510 | cpl->vlan_valid = 1; | ||
1511 | cpl->vlan = htons(vlan_tx_tag_get(skb)); | ||
1512 | st->vlan_insert++; | ||
1513 | } else | ||
1514 | #endif | ||
1515 | cpl->vlan_valid = 0; | ||
1516 | |||
1517 | dev->trans_start = jiffies; | ||
1518 | return t1_sge_tx(skb, adapter, 0, dev); | ||
1519 | } | ||
1520 | |||
1521 | /* | ||
1522 | * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. | ||
1523 | */ | ||
1524 | static void sge_tx_reclaim_cb(unsigned long data) | ||
1525 | { | ||
1526 | int i; | ||
1527 | struct sge *sge = (struct sge *)data; | ||
1528 | |||
1529 | for (i = 0; i < SGE_CMDQ_N; ++i) { | ||
1530 | struct cmdQ *q = &sge->cmdQ[i]; | ||
1531 | |||
1532 | if (!spin_trylock(&q->lock)) | ||
1533 | continue; | ||
1534 | |||
1535 | reclaim_completed_tx(sge, q); | ||
1536 | if (i == 0 && q->in_use) /* flush pending credits */ | ||
1537 | writel(F_CMDQ0_ENABLE, | ||
1538 | sge->adapter->regs + A_SG_DOORBELL); | ||
1539 | |||
1540 | spin_unlock(&q->lock); | ||
1541 | } | ||
1542 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | ||
1543 | } | ||
1544 | |||
1545 | /* | ||
1546 | * Propagate changes of the SGE coalescing parameters to the HW. | ||
1547 | */ | ||
1548 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) | ||
1549 | { | ||
1550 | sge->netdev->poll = t1_poll; | ||
1551 | sge->fixed_intrtimer = p->rx_coalesce_usecs * | ||
1552 | core_ticks_per_usec(sge->adapter); | ||
1553 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); | ||
1554 | return 0; | ||
1555 | } | ||
1556 | |||
1557 | /* | ||
1558 | * Allocates both RX and TX resources and configures the SGE. However, | ||
1559 | * the hardware is not enabled yet. | ||
1560 | */ | ||
1561 | int t1_sge_configure(struct sge *sge, struct sge_params *p) | ||
1562 | { | ||
1563 | if (alloc_rx_resources(sge, p)) | ||
1564 | return -ENOMEM; | ||
1565 | if (alloc_tx_resources(sge, p)) { | ||
1566 | free_rx_resources(sge); | ||
1567 | return -ENOMEM; | ||
1568 | } | ||
1569 | configure_sge(sge, p); | ||
1570 | |||
1571 | /* | ||
1572 | * Now that we have sized the free lists calculate the payload | ||
1573 | * capacity of the large buffers. Other parts of the driver use | ||
1574 | * this to set the max offload coalescing size so that RX packets | ||
1575 | * do not overflow our large buffers. | ||
1576 | */ | ||
1577 | p->large_buf_capacity = jumbo_payload_capacity(sge); | ||
1578 | return 0; | ||
1579 | } | ||
1580 | |||
1581 | /* | ||
1582 | * Disables the DMA engine. | ||
1583 | */ | ||
1584 | void t1_sge_stop(struct sge *sge) | ||
1585 | { | ||
1586 | writel(0, sge->adapter->regs + A_SG_CONTROL); | ||
1587 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | ||
1588 | if (is_T2(sge->adapter)) | ||
1589 | del_timer_sync(&sge->espibug_timer); | ||
1590 | del_timer_sync(&sge->tx_reclaim_timer); | ||
1591 | } | ||
1592 | |||
1593 | /* | ||
1594 | * Enables the DMA engine. | ||
1595 | */ | ||
1596 | void t1_sge_start(struct sge *sge) | ||
1597 | { | ||
1598 | refill_free_list(sge, &sge->freelQ[0]); | ||
1599 | refill_free_list(sge, &sge->freelQ[1]); | ||
1600 | |||
1601 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); | ||
1602 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); | ||
1603 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | ||
1604 | |||
1605 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | ||
1606 | |||
1607 | if (is_T2(sge->adapter)) | ||
1608 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | ||
1609 | } | ||
1610 | |||
1611 | /* | ||
1612 | * Callback for the T2 ESPI 'stuck packet feature' workaorund | ||
1613 | */ | ||
1614 | static void espibug_workaround(void *data) | ||
1615 | { | ||
1616 | struct adapter *adapter = (struct adapter *)data; | ||
1617 | struct sge *sge = adapter->sge; | ||
1618 | |||
1619 | if (netif_running(adapter->port[0].dev)) { | ||
1620 | struct sk_buff *skb = sge->espibug_skb; | ||
1621 | |||
1622 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | ||
1623 | |||
1624 | if ((seop & 0xfff0fff) == 0xfff && skb) { | ||
1625 | if (!skb->cb[0]) { | ||
1626 | u8 ch_mac_addr[ETH_ALEN] = | ||
1627 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | ||
1628 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | ||
1629 | ch_mac_addr, ETH_ALEN); | ||
1630 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | ||
1631 | ETH_ALEN); | ||
1632 | skb->cb[0] = 0xff; | ||
1633 | } | ||
1634 | |||
1635 | /* bump the reference count to avoid freeing of the | ||
1636 | * skb once the DMA has completed. | ||
1637 | */ | ||
1638 | skb = skb_get(skb); | ||
1639 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); | ||
1640 | } | ||
1641 | } | ||
1642 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | ||
1643 | } | ||
1644 | |||
1645 | /* | ||
1646 | * Creates a t1_sge structure and returns suggested resource parameters. | ||
1647 | */ | ||
1648 | struct sge * __devinit t1_sge_create(struct adapter *adapter, | ||
1649 | struct sge_params *p) | ||
1650 | { | ||
1651 | struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL); | ||
1652 | |||
1653 | if (!sge) | ||
1654 | return NULL; | ||
1655 | memset(sge, 0, sizeof(*sge)); | ||
1656 | |||
1657 | sge->adapter = adapter; | ||
1658 | sge->netdev = adapter->port[0].dev; | ||
1659 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; | ||
1660 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | ||
1661 | |||
1662 | init_timer(&sge->tx_reclaim_timer); | ||
1663 | sge->tx_reclaim_timer.data = (unsigned long)sge; | ||
1664 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; | ||
1665 | |||
1666 | if (is_T2(sge->adapter)) { | ||
1667 | init_timer(&sge->espibug_timer); | ||
1668 | sge->espibug_timer.function = (void *)&espibug_workaround; | ||
1669 | sge->espibug_timer.data = (unsigned long)sge->adapter; | ||
1670 | sge->espibug_timeout = 1; | ||
1671 | } | ||
1672 | |||
1673 | |||
1674 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; | ||
1675 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | ||
1676 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; | ||
1677 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; | ||
1678 | p->rx_coalesce_usecs = 50; | ||
1679 | p->coalesce_enable = 0; | ||
1680 | p->sample_interval_usecs = 0; | ||
1681 | p->polling = 0; | ||
1682 | |||
1683 | return sge; | ||
1684 | } | ||
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h new file mode 100644 index 000000000000..434b25586851 --- /dev/null +++ b/drivers/net/chelsio/sge.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: sge.h * | ||
4 | * $Revision: 1.11 $ * | ||
5 | * $Date: 2005/06/21 22:10:55 $ * | ||
6 | * Description: * | ||
7 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
8 | * * | ||
9 | * This program is free software; you can redistribute it and/or modify * | ||
10 | * it under the terms of the GNU General Public License, version 2, as * | ||
11 | * published by the Free Software Foundation. * | ||
12 | * * | ||
13 | * You should have received a copy of the GNU General Public License along * | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
15 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
16 | * * | ||
17 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
18 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
19 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
20 | * * | ||
21 | * http://www.chelsio.com * | ||
22 | * * | ||
23 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
24 | * All rights reserved. * | ||
25 | * * | ||
26 | * Maintainers: maintainers@chelsio.com * | ||
27 | * * | ||
28 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
29 | * Tina Yang <tainay@chelsio.com> * | ||
30 | * Felix Marti <felix@chelsio.com> * | ||
31 | * Scott Bardone <sbardone@chelsio.com> * | ||
32 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
33 | * Frank DiMambro <frank@chelsio.com> * | ||
34 | * * | ||
35 | * History: * | ||
36 | * * | ||
37 | ****************************************************************************/ | ||
38 | |||
39 | #ifndef _CXGB_SGE_H_ | ||
40 | #define _CXGB_SGE_H_ | ||
41 | |||
42 | #include <linux/types.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <asm/byteorder.h> | ||
45 | |||
46 | #ifndef IRQ_RETVAL | ||
47 | #define IRQ_RETVAL(x) | ||
48 | typedef void irqreturn_t; | ||
49 | #endif | ||
50 | |||
51 | typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *); | ||
52 | |||
53 | struct sge_intr_counts { | ||
54 | unsigned int respQ_empty; /* # times respQ empty */ | ||
55 | unsigned int respQ_overflow; /* # respQ overflow (fatal) */ | ||
56 | unsigned int freelistQ_empty; /* # times freelist empty */ | ||
57 | unsigned int pkt_too_big; /* packet too large (fatal) */ | ||
58 | unsigned int pkt_mismatch; | ||
59 | unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ | ||
60 | unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ | ||
61 | unsigned int ethernet_pkts; /* # of Ethernet packets received */ | ||
62 | unsigned int offload_pkts; /* # of offload packets received */ | ||
63 | unsigned int offload_bundles; /* # of offload pkt bundles delivered */ | ||
64 | unsigned int pure_rsps; /* # of non-payload responses */ | ||
65 | unsigned int unhandled_irqs; /* # of unhandled interrupts */ | ||
66 | unsigned int tx_ipfrags; | ||
67 | unsigned int tx_reg_pkts; | ||
68 | unsigned int tx_lso_pkts; | ||
69 | unsigned int tx_do_cksum; | ||
70 | }; | ||
71 | |||
72 | struct sge_port_stats { | ||
73 | unsigned long rx_cso_good; /* # of successful RX csum offloads */ | ||
74 | unsigned long tx_cso; /* # of TX checksum offloads */ | ||
75 | unsigned long vlan_xtract; /* # of VLAN tag extractions */ | ||
76 | unsigned long vlan_insert; /* # of VLAN tag extractions */ | ||
77 | unsigned long tso; /* # of TSO requests */ | ||
78 | unsigned long rx_drops; /* # of packets dropped due to no mem */ | ||
79 | }; | ||
80 | |||
81 | struct sk_buff; | ||
82 | struct net_device; | ||
83 | struct adapter; | ||
84 | struct sge_params; | ||
85 | struct sge; | ||
86 | |||
87 | struct sge *t1_sge_create(struct adapter *, struct sge_params *); | ||
88 | int t1_sge_configure(struct sge *, struct sge_params *); | ||
89 | int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); | ||
90 | void t1_sge_destroy(struct sge *); | ||
91 | intr_handler_t t1_select_intr_handler(adapter_t *adapter); | ||
92 | unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | ||
93 | unsigned int qid, struct net_device *netdev); | ||
94 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
95 | void t1_set_vlan_accel(struct adapter *adapter, int on_off); | ||
96 | void t1_sge_start(struct sge *); | ||
97 | void t1_sge_stop(struct sge *); | ||
98 | int t1_sge_intr_error_handler(struct sge *); | ||
99 | void t1_sge_intr_enable(struct sge *); | ||
100 | void t1_sge_intr_disable(struct sge *); | ||
101 | void t1_sge_intr_clear(struct sge *); | ||
102 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge); | ||
103 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port); | ||
104 | |||
105 | #endif /* _CXGB_SGE_H_ */ | ||
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c new file mode 100644 index 000000000000..1ebb5d149aef --- /dev/null +++ b/drivers/net/chelsio/subr.c | |||
@@ -0,0 +1,812 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: subr.c * | ||
4 | * $Revision: 1.27 $ * | ||
5 | * $Date: 2005/06/22 01:08:36 $ * | ||
6 | * Description: * | ||
7 | * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | ||
25 | * All rights reserved. * | ||
26 | * * | ||
27 | * Maintainers: maintainers@chelsio.com * | ||
28 | * * | ||
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | ||
30 | * Tina Yang <tainay@chelsio.com> * | ||
31 | * Felix Marti <felix@chelsio.com> * | ||
32 | * Scott Bardone <sbardone@chelsio.com> * | ||
33 | * Kurt Ottaway <kottaway@chelsio.com> * | ||
34 | * Frank DiMambro <frank@chelsio.com> * | ||
35 | * * | ||
36 | * History: * | ||
37 | * * | ||
38 | ****************************************************************************/ | ||
39 | |||
40 | #include "common.h" | ||
41 | #include "elmer0.h" | ||
42 | #include "regs.h" | ||
43 | #include "gmac.h" | ||
44 | #include "cphy.h" | ||
45 | #include "sge.h" | ||
46 | #include "espi.h" | ||
47 | |||
48 | /** | ||
49 | * t1_wait_op_done - wait until an operation is completed | ||
50 | * @adapter: the adapter performing the operation | ||
51 | * @reg: the register to check for completion | ||
52 | * @mask: a single-bit field within @reg that indicates completion | ||
53 | * @polarity: the value of the field when the operation is completed | ||
54 | * @attempts: number of check iterations | ||
55 | * @delay: delay in usecs between iterations | ||
56 | * | ||
57 | * Wait until an operation is completed by checking a bit in a register | ||
58 | * up to @attempts times. Returns %0 if the operation completes and %1 | ||
59 | * otherwise. | ||
60 | */ | ||
61 | static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, | ||
62 | int attempts, int delay) | ||
63 | { | ||
64 | while (1) { | ||
65 | u32 val = readl(adapter->regs + reg) & mask; | ||
66 | |||
67 | if (!!val == polarity) | ||
68 | return 0; | ||
69 | if (--attempts == 0) | ||
70 | return 1; | ||
71 | if (delay) | ||
72 | udelay(delay); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | #define TPI_ATTEMPTS 50 | ||
77 | |||
78 | /* | ||
79 | * Write a register over the TPI interface (unlocked and locked versions). | ||
80 | */ | ||
81 | static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) | ||
82 | { | ||
83 | int tpi_busy; | ||
84 | |||
85 | writel(addr, adapter->regs + A_TPI_ADDR); | ||
86 | writel(value, adapter->regs + A_TPI_WR_DATA); | ||
87 | writel(F_TPIWR, adapter->regs + A_TPI_CSR); | ||
88 | |||
89 | tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, | ||
90 | TPI_ATTEMPTS, 3); | ||
91 | if (tpi_busy) | ||
92 | CH_ALERT("%s: TPI write to 0x%x failed\n", | ||
93 | adapter->name, addr); | ||
94 | return tpi_busy; | ||
95 | } | ||
96 | |||
97 | int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) | ||
98 | { | ||
99 | int ret; | ||
100 | |||
101 | spin_lock(&(adapter)->tpi_lock); | ||
102 | ret = __t1_tpi_write(adapter, addr, value); | ||
103 | spin_unlock(&(adapter)->tpi_lock); | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Read a register over the TPI interface (unlocked and locked versions). | ||
109 | */ | ||
110 | static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) | ||
111 | { | ||
112 | int tpi_busy; | ||
113 | |||
114 | writel(addr, adapter->regs + A_TPI_ADDR); | ||
115 | writel(0, adapter->regs + A_TPI_CSR); | ||
116 | |||
117 | tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, | ||
118 | TPI_ATTEMPTS, 3); | ||
119 | if (tpi_busy) | ||
120 | CH_ALERT("%s: TPI read from 0x%x failed\n", | ||
121 | adapter->name, addr); | ||
122 | else | ||
123 | *valp = readl(adapter->regs + A_TPI_RD_DATA); | ||
124 | return tpi_busy; | ||
125 | } | ||
126 | |||
127 | int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) | ||
128 | { | ||
129 | int ret; | ||
130 | |||
131 | spin_lock(&(adapter)->tpi_lock); | ||
132 | ret = __t1_tpi_read(adapter, addr, valp); | ||
133 | spin_unlock(&(adapter)->tpi_lock); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Called when a port's link settings change to propagate the new values to the | ||
139 | * associated PHY and MAC. After performing the common tasks it invokes an | ||
140 | * OS-specific handler. | ||
141 | */ | ||
142 | /* static */ void link_changed(adapter_t *adapter, int port_id) | ||
143 | { | ||
144 | int link_ok, speed, duplex, fc; | ||
145 | struct cphy *phy = adapter->port[port_id].phy; | ||
146 | struct link_config *lc = &adapter->port[port_id].link_config; | ||
147 | |||
148 | phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); | ||
149 | |||
150 | lc->speed = speed < 0 ? SPEED_INVALID : speed; | ||
151 | lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; | ||
152 | if (!(lc->requested_fc & PAUSE_AUTONEG)) | ||
153 | fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
154 | |||
155 | if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { | ||
156 | /* Set MAC speed, duplex, and flow control to match PHY. */ | ||
157 | struct cmac *mac = adapter->port[port_id].mac; | ||
158 | |||
159 | mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); | ||
160 | lc->fc = (unsigned char)fc; | ||
161 | } | ||
162 | t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc); | ||
163 | } | ||
164 | |||
165 | static int t1_pci_intr_handler(adapter_t *adapter) | ||
166 | { | ||
167 | u32 pcix_cause; | ||
168 | |||
169 | pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); | ||
170 | |||
171 | if (pcix_cause) { | ||
172 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, | ||
173 | pcix_cause); | ||
174 | t1_fatal_err(adapter); /* PCI errors are fatal */ | ||
175 | } | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | |||
180 | /* | ||
181 | * Wait until Elmer's MI1 interface is ready for new operations. | ||
182 | */ | ||
183 | static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) | ||
184 | { | ||
185 | int attempts = 100, busy; | ||
186 | |||
187 | do { | ||
188 | u32 val; | ||
189 | |||
190 | __t1_tpi_read(adapter, mi1_reg, &val); | ||
191 | busy = val & F_MI1_OP_BUSY; | ||
192 | if (busy) | ||
193 | udelay(10); | ||
194 | } while (busy && --attempts); | ||
195 | if (busy) | ||
196 | CH_ALERT("%s: MDIO operation timed out\n", | ||
197 | adapter->name); | ||
198 | return busy; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * MI1 MDIO initialization. | ||
203 | */ | ||
204 | static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) | ||
205 | { | ||
206 | u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; | ||
207 | u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | | ||
208 | V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); | ||
209 | |||
210 | if (!(bi->caps & SUPPORTED_10000baseT_Full)) | ||
211 | val |= V_MI1_SOF(1); | ||
212 | t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); | ||
213 | } | ||
214 | |||
215 | static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
216 | int reg_addr, unsigned int *valp) | ||
217 | { | ||
218 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); | ||
219 | |||
220 | spin_lock(&(adapter)->tpi_lock); | ||
221 | |||
222 | /* Write the address we want. */ | ||
223 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | ||
224 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); | ||
225 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, | ||
226 | MI1_OP_INDIRECT_ADDRESS); | ||
227 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
228 | |||
229 | /* Write the operation we want. */ | ||
230 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); | ||
231 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
232 | |||
233 | /* Read the data. */ | ||
234 | __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp); | ||
235 | spin_unlock(&(adapter)->tpi_lock); | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr, | ||
240 | int reg_addr, unsigned int val) | ||
241 | { | ||
242 | u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); | ||
243 | |||
244 | spin_lock(&(adapter)->tpi_lock); | ||
245 | |||
246 | /* Write the address we want. */ | ||
247 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); | ||
248 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); | ||
249 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, | ||
250 | MI1_OP_INDIRECT_ADDRESS); | ||
251 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
252 | |||
253 | /* Write the data. */ | ||
254 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); | ||
255 | __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); | ||
256 | mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); | ||
257 | spin_unlock(&(adapter)->tpi_lock); | ||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static struct mdio_ops mi1_mdio_ext_ops = { | ||
262 | mi1_mdio_init, | ||
263 | mi1_mdio_ext_read, | ||
264 | mi1_mdio_ext_write | ||
265 | }; | ||
266 | |||
267 | enum { | ||
268 | CH_BRD_N110_1F, | ||
269 | CH_BRD_N210_1F, | ||
270 | }; | ||
271 | |||
272 | static struct board_info t1_board[] = { | ||
273 | |||
274 | { CHBT_BOARD_N110, 1/*ports#*/, | ||
275 | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1, | ||
276 | CHBT_MAC_PM3393, CHBT_PHY_88X2010, | ||
277 | 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/, | ||
278 | 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/, | ||
279 | 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops, | ||
280 | &t1_mv88x201x_ops, &mi1_mdio_ext_ops, | ||
281 | "Chelsio N110 1x10GBaseX NIC" }, | ||
282 | |||
283 | { CHBT_BOARD_N210, 1/*ports#*/, | ||
284 | SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2, | ||
285 | CHBT_MAC_PM3393, CHBT_PHY_88X2010, | ||
286 | 125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/, | ||
287 | 1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/, | ||
288 | 0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops, | ||
289 | &t1_mv88x201x_ops, &mi1_mdio_ext_ops, | ||
290 | "Chelsio N210 1x10GBaseX NIC" }, | ||
291 | |||
292 | }; | ||
293 | |||
294 | struct pci_device_id t1_pci_tbl[] = { | ||
295 | CH_DEVICE(7, 0, CH_BRD_N110_1F), | ||
296 | CH_DEVICE(10, 1, CH_BRD_N210_1F), | ||
297 | { 0, } | ||
298 | }; | ||
299 | |||
300 | MODULE_DEVICE_TABLE(pci, t1_pci_tbl); | ||
301 | |||
302 | /* | ||
303 | * Return the board_info structure with a given index. Out-of-range indices | ||
304 | * return NULL. | ||
305 | */ | ||
306 | const struct board_info *t1_get_board_info(unsigned int board_id) | ||
307 | { | ||
308 | return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; | ||
309 | } | ||
310 | |||
311 | struct chelsio_vpd_t { | ||
312 | u32 format_version; | ||
313 | u8 serial_number[16]; | ||
314 | u8 mac_base_address[6]; | ||
315 | u8 pad[2]; /* make multiple-of-4 size requirement explicit */ | ||
316 | }; | ||
317 | |||
318 | #define EEPROMSIZE (8 * 1024) | ||
319 | #define EEPROM_MAX_POLL 4 | ||
320 | |||
321 | /* | ||
322 | * Read SEEPROM. A zero is written to the flag register when the addres is | ||
323 | * written to the Control register. The hardware device will set the flag to a | ||
324 | * one when 4B have been transferred to the Data register. | ||
325 | */ | ||
326 | int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data) | ||
327 | { | ||
328 | int i = EEPROM_MAX_POLL; | ||
329 | u16 val; | ||
330 | |||
331 | if (addr >= EEPROMSIZE || (addr & 3)) | ||
332 | return -EINVAL; | ||
333 | |||
334 | pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); | ||
335 | do { | ||
336 | udelay(50); | ||
337 | pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); | ||
338 | } while (!(val & F_VPD_OP_FLAG) && --i); | ||
339 | |||
340 | if (!(val & F_VPD_OP_FLAG)) { | ||
341 | CH_ERR("%s: reading EEPROM address 0x%x failed\n", | ||
342 | adapter->name, addr); | ||
343 | return -EIO; | ||
344 | } | ||
345 | pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data); | ||
346 | *data = le32_to_cpu(*data); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) | ||
351 | { | ||
352 | int addr, ret = 0; | ||
353 | |||
354 | for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) | ||
355 | ret = t1_seeprom_read(adapter, addr, | ||
356 | (u32 *)((u8 *)vpd + addr)); | ||
357 | |||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Read a port's MAC address from the VPD ROM. | ||
363 | */ | ||
364 | static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) | ||
365 | { | ||
366 | struct chelsio_vpd_t vpd; | ||
367 | |||
368 | if (t1_eeprom_vpd_get(adapter, &vpd)) | ||
369 | return 1; | ||
370 | memcpy(mac_addr, vpd.mac_base_address, 5); | ||
371 | mac_addr[5] = vpd.mac_base_address[5] + index; | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Set up the MAC/PHY according to the requested link settings. | ||
377 | * | ||
378 | * If the PHY can auto-negotiate first decide what to advertise, then | ||
379 | * enable/disable auto-negotiation as desired and reset. | ||
380 | * | ||
381 | * If the PHY does not auto-negotiate we just reset it. | ||
382 | * | ||
383 | * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, | ||
384 | * otherwise do it later based on the outcome of auto-negotiation. | ||
385 | */ | ||
386 | int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) | ||
387 | { | ||
388 | unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | ||
389 | |||
390 | if (lc->supported & SUPPORTED_Autoneg) { | ||
391 | lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); | ||
392 | if (fc) { | ||
393 | lc->advertising |= ADVERTISED_ASYM_PAUSE; | ||
394 | if (fc == (PAUSE_RX | PAUSE_TX)) | ||
395 | lc->advertising |= ADVERTISED_PAUSE; | ||
396 | } | ||
397 | phy->ops->advertise(phy, lc->advertising); | ||
398 | |||
399 | if (lc->autoneg == AUTONEG_DISABLE) { | ||
400 | lc->speed = lc->requested_speed; | ||
401 | lc->duplex = lc->requested_duplex; | ||
402 | lc->fc = (unsigned char)fc; | ||
403 | mac->ops->set_speed_duplex_fc(mac, lc->speed, | ||
404 | lc->duplex, fc); | ||
405 | /* Also disables autoneg */ | ||
406 | phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); | ||
407 | phy->ops->reset(phy, 0); | ||
408 | } else | ||
409 | phy->ops->autoneg_enable(phy); /* also resets PHY */ | ||
410 | } else { | ||
411 | mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); | ||
412 | lc->fc = (unsigned char)fc; | ||
413 | phy->ops->reset(phy, 0); | ||
414 | } | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * External interrupt handler for boards using elmer0. | ||
420 | */ | ||
421 | int elmer0_ext_intr_handler(adapter_t *adapter) | ||
422 | { | ||
423 | struct cphy *phy; | ||
424 | int phy_cause; | ||
425 | u32 cause; | ||
426 | |||
427 | t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); | ||
428 | |||
429 | switch (board_info(adapter)->board) { | ||
430 | case CHBT_BOARD_N210: | ||
431 | case CHBT_BOARD_N110: | ||
432 | if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ | ||
433 | phy = adapter->port[0].phy; | ||
434 | phy_cause = phy->ops->interrupt_handler(phy); | ||
435 | if (phy_cause & cphy_cause_link_change) | ||
436 | link_changed(adapter, 0); | ||
437 | } | ||
438 | break; | ||
439 | } | ||
440 | t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /* Enables all interrupts. */ | ||
445 | void t1_interrupts_enable(adapter_t *adapter) | ||
446 | { | ||
447 | unsigned int i; | ||
448 | u32 pl_intr; | ||
449 | |||
450 | adapter->slow_intr_mask = F_PL_INTR_SGE_ERR; | ||
451 | |||
452 | t1_sge_intr_enable(adapter->sge); | ||
453 | if (adapter->espi) { | ||
454 | adapter->slow_intr_mask |= F_PL_INTR_ESPI; | ||
455 | t1_espi_intr_enable(adapter->espi); | ||
456 | } | ||
457 | |||
458 | /* Enable MAC/PHY interrupts for each port. */ | ||
459 | for_each_port(adapter, i) { | ||
460 | adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); | ||
461 | adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); | ||
462 | } | ||
463 | |||
464 | /* Enable PCIX & external chip interrupts on ASIC boards. */ | ||
465 | pl_intr = readl(adapter->regs + A_PL_ENABLE); | ||
466 | |||
467 | /* PCI-X interrupts */ | ||
468 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, | ||
469 | 0xffffffff); | ||
470 | |||
471 | adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; | ||
472 | pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; | ||
473 | writel(pl_intr, adapter->regs + A_PL_ENABLE); | ||
474 | } | ||
475 | |||
476 | /* Disables all interrupts. */ | ||
477 | void t1_interrupts_disable(adapter_t* adapter) | ||
478 | { | ||
479 | unsigned int i; | ||
480 | |||
481 | t1_sge_intr_disable(adapter->sge); | ||
482 | if (adapter->espi) | ||
483 | t1_espi_intr_disable(adapter->espi); | ||
484 | |||
485 | /* Disable MAC/PHY interrupts for each port. */ | ||
486 | for_each_port(adapter, i) { | ||
487 | adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); | ||
488 | adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); | ||
489 | } | ||
490 | |||
491 | /* Disable PCIX & external chip interrupts. */ | ||
492 | writel(0, adapter->regs + A_PL_ENABLE); | ||
493 | |||
494 | /* PCI-X interrupts */ | ||
495 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); | ||
496 | |||
497 | adapter->slow_intr_mask = 0; | ||
498 | } | ||
499 | |||
500 | /* Clears all interrupts */ | ||
501 | void t1_interrupts_clear(adapter_t* adapter) | ||
502 | { | ||
503 | unsigned int i; | ||
504 | u32 pl_intr; | ||
505 | |||
506 | |||
507 | t1_sge_intr_clear(adapter->sge); | ||
508 | if (adapter->espi) | ||
509 | t1_espi_intr_clear(adapter->espi); | ||
510 | |||
511 | /* Clear MAC/PHY interrupts for each port. */ | ||
512 | for_each_port(adapter, i) { | ||
513 | adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); | ||
514 | adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); | ||
515 | } | ||
516 | |||
517 | /* Enable interrupts for external devices. */ | ||
518 | pl_intr = readl(adapter->regs + A_PL_CAUSE); | ||
519 | |||
520 | writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, | ||
521 | adapter->regs + A_PL_CAUSE); | ||
522 | |||
523 | /* PCI-X interrupts */ | ||
524 | pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Slow path interrupt handler for ASICs. | ||
529 | */ | ||
530 | int t1_slow_intr_handler(adapter_t *adapter) | ||
531 | { | ||
532 | u32 cause = readl(adapter->regs + A_PL_CAUSE); | ||
533 | |||
534 | cause &= adapter->slow_intr_mask; | ||
535 | if (!cause) | ||
536 | return 0; | ||
537 | if (cause & F_PL_INTR_SGE_ERR) | ||
538 | t1_sge_intr_error_handler(adapter->sge); | ||
539 | if (cause & F_PL_INTR_ESPI) | ||
540 | t1_espi_intr_handler(adapter->espi); | ||
541 | if (cause & F_PL_INTR_PCIX) | ||
542 | t1_pci_intr_handler(adapter); | ||
543 | if (cause & F_PL_INTR_EXT) | ||
544 | t1_elmer0_ext_intr(adapter); | ||
545 | |||
546 | /* Clear the interrupts just processed. */ | ||
547 | writel(cause, adapter->regs + A_PL_CAUSE); | ||
548 | (void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */ | ||
549 | return 1; | ||
550 | } | ||
551 | |||
552 | /* Pause deadlock avoidance parameters */ | ||
553 | #define DROP_MSEC 16 | ||
554 | #define DROP_PKTS_CNT 1 | ||
555 | |||
556 | static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable) | ||
557 | { | ||
558 | u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG); | ||
559 | |||
560 | if (enable) | ||
561 | val |= csum_bit; | ||
562 | else | ||
563 | val &= ~csum_bit; | ||
564 | writel(val, adapter->regs + A_TP_GLOBAL_CONFIG); | ||
565 | } | ||
566 | |||
567 | void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable) | ||
568 | { | ||
569 | set_csum_offload(adapter, F_IP_CSUM, enable); | ||
570 | } | ||
571 | |||
572 | void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable) | ||
573 | { | ||
574 | set_csum_offload(adapter, F_UDP_CSUM, enable); | ||
575 | } | ||
576 | |||
577 | void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable) | ||
578 | { | ||
579 | set_csum_offload(adapter, F_TCP_CSUM, enable); | ||
580 | } | ||
581 | |||
582 | static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk) | ||
583 | { | ||
584 | u32 val; | ||
585 | |||
586 | val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | | ||
587 | F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; | ||
588 | val |= F_TP_IN_ESPI_CHECK_IP_CSUM | | ||
589 | F_TP_IN_ESPI_CHECK_TCP_CSUM; | ||
590 | writel(val, adapter->regs + A_TP_IN_CONFIG); | ||
591 | writel(F_TP_OUT_CSPI_CPL | | ||
592 | F_TP_OUT_ESPI_ETHERNET | | ||
593 | F_TP_OUT_ESPI_GENERATE_IP_CSUM | | ||
594 | F_TP_OUT_ESPI_GENERATE_TCP_CSUM, | ||
595 | adapter->regs + A_TP_OUT_CONFIG); | ||
596 | |||
597 | val = readl(adapter->regs + A_TP_GLOBAL_CONFIG); | ||
598 | val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM); | ||
599 | writel(val, adapter->regs + A_TP_GLOBAL_CONFIG); | ||
600 | |||
601 | /* | ||
602 | * Enable pause frame deadlock prevention. | ||
603 | */ | ||
604 | if (is_T2(adapter)) { | ||
605 | u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); | ||
606 | |||
607 | writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | | ||
608 | V_DROP_TICKS_CNT(drop_ticks) | | ||
609 | V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), | ||
610 | adapter->regs + A_TP_TX_DROP_CONFIG); | ||
611 | } | ||
612 | |||
613 | writel(F_TP_RESET, adapter->regs + A_TP_RESET); | ||
614 | } | ||
615 | |||
616 | int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, | ||
617 | struct adapter_params *p) | ||
618 | { | ||
619 | p->chip_version = bi->chip_term; | ||
620 | if (p->chip_version == CHBT_TERM_T1 || | ||
621 | p->chip_version == CHBT_TERM_T2) { | ||
622 | u32 val = readl(adapter->regs + A_TP_PC_CONFIG); | ||
623 | |||
624 | val = G_TP_PC_REV(val); | ||
625 | if (val == 2) | ||
626 | p->chip_revision = TERM_T1B; | ||
627 | else if (val == 3) | ||
628 | p->chip_revision = TERM_T2; | ||
629 | else | ||
630 | return -1; | ||
631 | } else | ||
632 | return -1; | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Enable board components other than the Chelsio chip, such as external MAC | ||
638 | * and PHY. | ||
639 | */ | ||
640 | static int board_init(adapter_t *adapter, const struct board_info *bi) | ||
641 | { | ||
642 | switch (bi->board) { | ||
643 | case CHBT_BOARD_N110: | ||
644 | case CHBT_BOARD_N210: | ||
645 | writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR); | ||
646 | t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); | ||
647 | break; | ||
648 | } | ||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * Initialize and configure the Terminator HW modules. Note that external | ||
654 | * MAC and PHYs are initialized separately. | ||
655 | */ | ||
656 | int t1_init_hw_modules(adapter_t *adapter) | ||
657 | { | ||
658 | int err = -EIO; | ||
659 | const struct board_info *bi = board_info(adapter); | ||
660 | |||
661 | if (!bi->clock_mc4) { | ||
662 | u32 val = readl(adapter->regs + A_MC4_CFG); | ||
663 | |||
664 | writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); | ||
665 | writel(F_M_BUS_ENABLE | F_TCAM_RESET, | ||
666 | adapter->regs + A_MC5_CONFIG); | ||
667 | } | ||
668 | |||
669 | if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, | ||
670 | bi->espi_nports)) | ||
671 | goto out_err; | ||
672 | |||
673 | t1_tp_reset(adapter, bi->clock_core); | ||
674 | |||
675 | err = t1_sge_configure(adapter->sge, &adapter->params.sge); | ||
676 | if (err) | ||
677 | goto out_err; | ||
678 | |||
679 | err = 0; | ||
680 | out_err: | ||
681 | return err; | ||
682 | } | ||
683 | |||
684 | /* | ||
685 | * Determine a card's PCI mode. | ||
686 | */ | ||
687 | static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) | ||
688 | { | ||
689 | static unsigned short speed_map[] = { 33, 66, 100, 133 }; | ||
690 | u32 pci_mode; | ||
691 | |||
692 | pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); | ||
693 | p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; | ||
694 | p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; | ||
695 | p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; | ||
696 | } | ||
697 | |||
698 | /* | ||
699 | * Release the structures holding the SW per-Terminator-HW-module state. | ||
700 | */ | ||
701 | void t1_free_sw_modules(adapter_t *adapter) | ||
702 | { | ||
703 | unsigned int i; | ||
704 | |||
705 | for_each_port(adapter, i) { | ||
706 | struct cmac *mac = adapter->port[i].mac; | ||
707 | struct cphy *phy = adapter->port[i].phy; | ||
708 | |||
709 | if (mac) | ||
710 | mac->ops->destroy(mac); | ||
711 | if (phy) | ||
712 | phy->ops->destroy(phy); | ||
713 | } | ||
714 | |||
715 | if (adapter->sge) | ||
716 | t1_sge_destroy(adapter->sge); | ||
717 | if (adapter->espi) | ||
718 | t1_espi_destroy(adapter->espi); | ||
719 | } | ||
720 | |||
721 | static void __devinit init_link_config(struct link_config *lc, | ||
722 | const struct board_info *bi) | ||
723 | { | ||
724 | lc->supported = bi->caps; | ||
725 | lc->requested_speed = lc->speed = SPEED_INVALID; | ||
726 | lc->requested_duplex = lc->duplex = DUPLEX_INVALID; | ||
727 | lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; | ||
728 | if (lc->supported & SUPPORTED_Autoneg) { | ||
729 | lc->advertising = lc->supported; | ||
730 | lc->autoneg = AUTONEG_ENABLE; | ||
731 | lc->requested_fc |= PAUSE_AUTONEG; | ||
732 | } else { | ||
733 | lc->advertising = 0; | ||
734 | lc->autoneg = AUTONEG_DISABLE; | ||
735 | } | ||
736 | } | ||
737 | |||
738 | |||
739 | /* | ||
740 | * Allocate and initialize the data structures that hold the SW state of | ||
741 | * the Terminator HW modules. | ||
742 | */ | ||
743 | int __devinit t1_init_sw_modules(adapter_t *adapter, | ||
744 | const struct board_info *bi) | ||
745 | { | ||
746 | unsigned int i; | ||
747 | |||
748 | adapter->params.brd_info = bi; | ||
749 | adapter->params.nports = bi->port_number; | ||
750 | adapter->params.stats_update_period = bi->gmac->stats_update_period; | ||
751 | |||
752 | adapter->sge = t1_sge_create(adapter, &adapter->params.sge); | ||
753 | if (!adapter->sge) { | ||
754 | CH_ERR("%s: SGE initialization failed\n", | ||
755 | adapter->name); | ||
756 | goto error; | ||
757 | } | ||
758 | |||
759 | if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { | ||
760 | CH_ERR("%s: ESPI initialization failed\n", | ||
761 | adapter->name); | ||
762 | goto error; | ||
763 | } | ||
764 | |||
765 | board_init(adapter, bi); | ||
766 | bi->mdio_ops->init(adapter, bi); | ||
767 | if (bi->gphy->reset) | ||
768 | bi->gphy->reset(adapter); | ||
769 | if (bi->gmac->reset) | ||
770 | bi->gmac->reset(adapter); | ||
771 | |||
772 | for_each_port(adapter, i) { | ||
773 | u8 hw_addr[6]; | ||
774 | struct cmac *mac; | ||
775 | int phy_addr = bi->mdio_phybaseaddr + i; | ||
776 | |||
777 | adapter->port[i].phy = bi->gphy->create(adapter, phy_addr, | ||
778 | bi->mdio_ops); | ||
779 | if (!adapter->port[i].phy) { | ||
780 | CH_ERR("%s: PHY %d initialization failed\n", | ||
781 | adapter->name, i); | ||
782 | goto error; | ||
783 | } | ||
784 | |||
785 | adapter->port[i].mac = mac = bi->gmac->create(adapter, i); | ||
786 | if (!mac) { | ||
787 | CH_ERR("%s: MAC %d initialization failed\n", | ||
788 | adapter->name, i); | ||
789 | goto error; | ||
790 | } | ||
791 | |||
792 | /* | ||
793 | * Get the port's MAC addresses either from the EEPROM if one | ||
794 | * exists or the one hardcoded in the MAC. | ||
795 | */ | ||
796 | if (vpd_macaddress_get(adapter, i, hw_addr)) { | ||
797 | CH_ERR("%s: could not read MAC address from VPD ROM\n", | ||
798 | adapter->port[i].dev->name); | ||
799 | goto error; | ||
800 | } | ||
801 | memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); | ||
802 | init_link_config(&adapter->port[i].link_config, bi); | ||
803 | } | ||
804 | |||
805 | get_pci_mode(adapter, &adapter->params.pci); | ||
806 | t1_interrupts_clear(adapter); | ||
807 | return 0; | ||
808 | |||
809 | error: | ||
810 | t1_free_sw_modules(adapter); | ||
811 | return -1; | ||
812 | } | ||
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h new file mode 100644 index 000000000000..81816c2b708a --- /dev/null +++ b/drivers/net/chelsio/suni1x10gexp_regs.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /***************************************************************************** | ||
2 | * * | ||
3 | * File: suni1x10gexp_regs.h * | ||
4 | * $Revision: 1.9 $ * | ||
5 | * $Date: 2005/06/22 00:17:04 $ * | ||
6 | * Description: * | ||
7 | * PMC/SIERRA (pm3393) MAC-PHY functionality. * | ||
8 | * part of the Chelsio 10Gb Ethernet Driver. * | ||
9 | * * | ||
10 | * This program is free software; you can redistribute it and/or modify * | ||
11 | * it under the terms of the GNU General Public License, version 2, as * | ||
12 | * published by the Free Software Foundation. * | ||
13 | * * | ||
14 | * You should have received a copy of the GNU General Public License along * | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | ||
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | ||
17 | * * | ||
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | ||
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | ||
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | ||
21 | * * | ||
22 | * http://www.chelsio.com * | ||
23 | * * | ||
24 | * Maintainers: maintainers@chelsio.com * | ||
25 | * * | ||
26 | * Authors: PMC/SIERRA * | ||
27 | * * | ||
28 | * History: * | ||
29 | * * | ||
30 | ****************************************************************************/ | ||
31 | |||
32 | #ifndef _CXGB_SUNI1x10GEXP_REGS_H_ | ||
33 | #define _CXGB_SUNI1x10GEXP_REGS_H_ | ||
34 | |||
35 | /******************************************************************************/ | ||
36 | /** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ | ||
37 | /******************************************************************************/ | ||
38 | /* Refer to the Register Bit Masks bellow for the naming of each register and */ | ||
39 | /* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ | ||
40 | /******************************************************************************/ | ||
41 | |||
42 | #define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 | ||
43 | #define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D | ||
44 | #define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E | ||
45 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 | ||
46 | #define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 | ||
47 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 | ||
48 | #define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 | ||
49 | #define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 | ||
50 | #define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 | ||
51 | #define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 | ||
52 | #define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 | ||
53 | #define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 | ||
54 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D | ||
55 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E | ||
56 | #define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F | ||
57 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A | ||
58 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B | ||
59 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C | ||
60 | #define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D | ||
61 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E | ||
62 | #define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 | ||
63 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 | ||
64 | #define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 | ||
65 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B | ||
66 | #define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C | ||
67 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 | ||
68 | #define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 | ||
69 | #define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 | ||
70 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 | ||
71 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 | ||
72 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103 | ||
73 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104 | ||
74 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105 | ||
75 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 | ||
76 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 | ||
77 | #define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 | ||
78 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 | ||
79 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 | ||
80 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 | ||
81 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 | ||
82 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 | ||
83 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 | ||
84 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 | ||
85 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C | ||
86 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 | ||
87 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 | ||
88 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C | ||
89 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 | ||
90 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 | ||
91 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 | ||
92 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 | ||
93 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C | ||
94 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 | ||
95 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 | ||
96 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 | ||
97 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 | ||
98 | #define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC | ||
99 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 | ||
100 | #define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A | ||
101 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 | ||
102 | #define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 | ||
103 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 | ||
104 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 | ||
105 | #define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 | ||
106 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 | ||
107 | #define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 | ||
108 | #define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 | ||
109 | #define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 | ||
110 | #define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 | ||
111 | #define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 | ||
112 | #define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 | ||
113 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 | ||
114 | #define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 | ||
115 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 | ||
116 | #define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 | ||
117 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C | ||
118 | #define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D | ||
119 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 | ||
120 | #define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 | ||
121 | |||
122 | /******************************************************************************/ | ||
123 | /* -- End register offset definitions -- */ | ||
124 | /******************************************************************************/ | ||
125 | |||
126 | /******************************************************************************/ | ||
127 | /** SUNI-1x10GE-XP REGISTER BIT MASKS **/ | ||
128 | /******************************************************************************/ | ||
129 | |||
130 | /*---------------------------------------------------------------------------- | ||
131 | * Register 0x0004: S/UNI-1x10GE-XP Device Status | ||
132 | * Bit 9 TOP_SXRA_EXPIRED | ||
133 | * Bit 8 TOP_MDIO_BUSY | ||
134 | * Bit 7 TOP_DTRB | ||
135 | * Bit 6 TOP_EXPIRED | ||
136 | * Bit 5 TOP_PAUSED | ||
137 | * Bit 4 TOP_PL4_ID_DOOL | ||
138 | * Bit 3 TOP_PL4_IS_DOOL | ||
139 | * Bit 2 TOP_PL4_ID_ROOL | ||
140 | * Bit 1 TOP_PL4_IS_ROOL | ||
141 | * Bit 0 TOP_PL4_OUT_ROOL | ||
142 | *----------------------------------------------------------------------------*/ | ||
143 | #define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 | ||
144 | #define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 | ||
145 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 | ||
146 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 | ||
147 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 | ||
148 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002 | ||
149 | #define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 | ||
150 | |||
151 | /*---------------------------------------------------------------------------- | ||
152 | * Register 0x000E:PM3393 Global interrupt enable | ||
153 | * Bit 15 TOP_INTE | ||
154 | *----------------------------------------------------------------------------*/ | ||
155 | #define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 | ||
156 | |||
157 | /*---------------------------------------------------------------------------- | ||
158 | * Register 0x2040: RXXG Configuration 1 | ||
159 | * Bit 15 RXXG_RXEN | ||
160 | * Bit 14 RXXG_ROCF | ||
161 | * Bit 13 RXXG_PAD_STRIP | ||
162 | * Bit 10 RXXG_PUREP | ||
163 | * Bit 9 RXXG_LONGP | ||
164 | * Bit 8 RXXG_PARF | ||
165 | * Bit 7 RXXG_FLCHK | ||
166 | * Bit 5 RXXG_PASS_CTRL | ||
167 | * Bit 3 RXXG_CRC_STRIP | ||
168 | * Bit 2-0 RXXG_MIFG | ||
169 | *----------------------------------------------------------------------------*/ | ||
170 | #define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 | ||
171 | #define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 | ||
172 | #define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 | ||
173 | #define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 | ||
174 | |||
175 | /*---------------------------------------------------------------------------- | ||
176 | * Register 0x2070: RXXG Address Filter Control 2 | ||
177 | * Bit 1 RXXG_PMODE | ||
178 | * Bit 0 RXXG_MHASH_EN | ||
179 | *----------------------------------------------------------------------------*/ | ||
180 | #define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002 | ||
181 | #define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 | ||
182 | |||
183 | /*---------------------------------------------------------------------------- | ||
184 | * Register 0x2100: MSTAT Control | ||
185 | * Bit 2 MSTAT_WRITE | ||
186 | * Bit 1 MSTAT_CLEAR | ||
187 | * Bit 0 MSTAT_SNAP | ||
188 | *----------------------------------------------------------------------------*/ | ||
189 | #define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 | ||
190 | #define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 | ||
191 | |||
192 | /*---------------------------------------------------------------------------- | ||
193 | * Register 0x3040: TXXG Configuration Register 1 | ||
194 | * Bit 15 TXXG_TXEN0 | ||
195 | * Bit 13 TXXG_HOSTPAUSE | ||
196 | * Bit 12-7 TXXG_IPGT | ||
197 | * Bit 5 TXXG_32BIT_ALIGN | ||
198 | * Bit 4 TXXG_CRCEN | ||
199 | * Bit 3 TXXG_FCTX | ||
200 | * Bit 2 TXXG_FCRX | ||
201 | * Bit 1 TXXG_PADEN | ||
202 | * Bit 0 TXXG_SPRE | ||
203 | *----------------------------------------------------------------------------*/ | ||
204 | #define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 | ||
205 | #define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 | ||
206 | #define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 | ||
207 | #define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 | ||
208 | #define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 | ||
209 | #define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 | ||
210 | #define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 | ||
211 | |||
212 | #endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ | ||
213 | |||
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index d0fa2448761d..25cc20e415da 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | 3 | ||
4 | Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. | 4 | Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the Free | 7 | under the terms of the GNU General Public License as published by the Free |
@@ -156,7 +156,7 @@ | |||
156 | 156 | ||
157 | #define DRV_NAME "e100" | 157 | #define DRV_NAME "e100" |
158 | #define DRV_EXT "-NAPI" | 158 | #define DRV_EXT "-NAPI" |
159 | #define DRV_VERSION "3.4.8-k2"DRV_EXT | 159 | #define DRV_VERSION "3.4.14-k2"DRV_EXT |
160 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" | 160 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" |
161 | #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" | 161 | #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" |
162 | #define PFX DRV_NAME ": " | 162 | #define PFX DRV_NAME ": " |
@@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ | 787 | #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ |
788 | #define E100_WAIT_SCB_FAST 20 /* delay like the old code */ | ||
788 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) | 789 | static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) |
789 | { | 790 | { |
790 | unsigned long flags; | 791 | unsigned long flags; |
@@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) | |||
798 | if(likely(!readb(&nic->csr->scb.cmd_lo))) | 799 | if(likely(!readb(&nic->csr->scb.cmd_lo))) |
799 | break; | 800 | break; |
800 | cpu_relax(); | 801 | cpu_relax(); |
801 | if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1))) | 802 | if(unlikely(i > E100_WAIT_SCB_FAST)) |
802 | udelay(5); | 803 | udelay(5); |
803 | } | 804 | } |
804 | if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { | 805 | if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { |
@@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) | |||
902 | 903 | ||
903 | static void e100_get_defaults(struct nic *nic) | 904 | static void e100_get_defaults(struct nic *nic) |
904 | { | 905 | { |
905 | struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; | 906 | struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; |
906 | struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; | 907 | struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; |
907 | 908 | ||
908 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); | 909 | pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); |
909 | /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ | 910 | /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ |
@@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1006 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1007 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1007 | } | 1008 | } |
1008 | 1009 | ||
1010 | /********************************************************/ | ||
1011 | /* Micro code for 8086:1229 Rev 8 */ | ||
1012 | /********************************************************/ | ||
1013 | |||
1014 | /* Parameter values for the D101M B-step */ | ||
1015 | #define D101M_CPUSAVER_TIMER_DWORD 78 | ||
1016 | #define D101M_CPUSAVER_BUNDLE_DWORD 65 | ||
1017 | #define D101M_CPUSAVER_MIN_SIZE_DWORD 126 | ||
1018 | |||
1019 | #define D101M_B_RCVBUNDLE_UCODE \ | ||
1020 | {\ | ||
1021 | 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \ | ||
1022 | 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \ | ||
1023 | 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \ | ||
1024 | 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \ | ||
1025 | 0x00380438, 0x00000000, 0x00140000, 0x00380555, \ | ||
1026 | 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \ | ||
1027 | 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \ | ||
1028 | 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \ | ||
1029 | 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \ | ||
1030 | 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \ | ||
1031 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1032 | 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \ | ||
1033 | 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \ | ||
1034 | 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \ | ||
1035 | 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \ | ||
1036 | 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \ | ||
1037 | 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \ | ||
1038 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1039 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1040 | 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \ | ||
1041 | 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \ | ||
1042 | 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \ | ||
1043 | 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \ | ||
1044 | 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \ | ||
1045 | 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \ | ||
1046 | 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \ | ||
1047 | 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \ | ||
1048 | 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \ | ||
1049 | 0x00380559, 0x00000000, 0x00000000, 0x00000000, \ | ||
1050 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1051 | 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \ | ||
1052 | 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \ | ||
1053 | 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \ | ||
1054 | } | ||
1055 | |||
1056 | /********************************************************/ | ||
1057 | /* Micro code for 8086:1229 Rev 9 */ | ||
1058 | /********************************************************/ | ||
1059 | |||
1060 | /* Parameter values for the D101S */ | ||
1061 | #define D101S_CPUSAVER_TIMER_DWORD 78 | ||
1062 | #define D101S_CPUSAVER_BUNDLE_DWORD 67 | ||
1063 | #define D101S_CPUSAVER_MIN_SIZE_DWORD 128 | ||
1064 | |||
1065 | #define D101S_RCVBUNDLE_UCODE \ | ||
1066 | {\ | ||
1067 | 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \ | ||
1068 | 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \ | ||
1069 | 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \ | ||
1070 | 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \ | ||
1071 | 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \ | ||
1072 | 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \ | ||
1073 | 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \ | ||
1074 | 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \ | ||
1075 | 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \ | ||
1076 | 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \ | ||
1077 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1078 | 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \ | ||
1079 | 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \ | ||
1080 | 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \ | ||
1081 | 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \ | ||
1082 | 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \ | ||
1083 | 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \ | ||
1084 | 0x00101313, 0x00380700, 0x00000000, 0x00000000, \ | ||
1085 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1086 | 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \ | ||
1087 | 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \ | ||
1088 | 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \ | ||
1089 | 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \ | ||
1090 | 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \ | ||
1091 | 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \ | ||
1092 | 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \ | ||
1093 | 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \ | ||
1094 | 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \ | ||
1095 | 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \ | ||
1096 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1097 | 0x00000000, 0x00000000, 0x00000000, 0x00130831, \ | ||
1098 | 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \ | ||
1099 | 0x00041000, 0x00010004, 0x00380700 \ | ||
1100 | } | ||
1101 | |||
1102 | /********************************************************/ | ||
1103 | /* Micro code for the 8086:1229 Rev F/10 */ | ||
1104 | /********************************************************/ | ||
1105 | |||
1106 | /* Parameter values for the D102 E-step */ | ||
1107 | #define D102_E_CPUSAVER_TIMER_DWORD 42 | ||
1108 | #define D102_E_CPUSAVER_BUNDLE_DWORD 54 | ||
1109 | #define D102_E_CPUSAVER_MIN_SIZE_DWORD 46 | ||
1110 | |||
1111 | #define D102_E_RCVBUNDLE_UCODE \ | ||
1112 | {\ | ||
1113 | 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \ | ||
1114 | 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \ | ||
1115 | 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \ | ||
1116 | 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \ | ||
1117 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1118 | 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \ | ||
1119 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1120 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1121 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1122 | 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \ | ||
1123 | 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \ | ||
1124 | 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \ | ||
1125 | 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \ | ||
1126 | 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \ | ||
1127 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1128 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1129 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1130 | 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \ | ||
1131 | 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \ | ||
1132 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1133 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1134 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1135 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1136 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1137 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1138 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1139 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1140 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1141 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1142 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1143 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1144 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1145 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, \ | ||
1146 | } | ||
1147 | |||
1009 | static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1148 | static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1010 | { | 1149 | { |
1011 | int i; | 1150 | /* *INDENT-OFF* */ |
1012 | static const u32 ucode[UCODE_SIZE] = { | 1151 | static struct { |
1013 | /* NFS packets are misinterpreted as TCO packets and | 1152 | u32 ucode[UCODE_SIZE + 1]; |
1014 | * incorrectly routed to the BMC over SMBus. This | 1153 | u8 mac; |
1015 | * microcode patch checks the fragmented IP bit in the | 1154 | u8 timer_dword; |
1016 | * NFS/UDP header to distinguish between NFS and TCO. */ | 1155 | u8 bundle_dword; |
1017 | 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, | 1156 | u8 min_size_dword; |
1018 | 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, | 1157 | } ucode_opts[] = { |
1019 | 0x00906EFD, 0x00900EFD, 0x00E00EF8, | 1158 | { D101M_B_RCVBUNDLE_UCODE, |
1020 | }; | 1159 | mac_82559_D101M, |
1160 | D101M_CPUSAVER_TIMER_DWORD, | ||
1161 | D101M_CPUSAVER_BUNDLE_DWORD, | ||
1162 | D101M_CPUSAVER_MIN_SIZE_DWORD }, | ||
1163 | { D101S_RCVBUNDLE_UCODE, | ||
1164 | mac_82559_D101S, | ||
1165 | D101S_CPUSAVER_TIMER_DWORD, | ||
1166 | D101S_CPUSAVER_BUNDLE_DWORD, | ||
1167 | D101S_CPUSAVER_MIN_SIZE_DWORD }, | ||
1168 | { D102_E_RCVBUNDLE_UCODE, | ||
1169 | mac_82551_F, | ||
1170 | D102_E_CPUSAVER_TIMER_DWORD, | ||
1171 | D102_E_CPUSAVER_BUNDLE_DWORD, | ||
1172 | D102_E_CPUSAVER_MIN_SIZE_DWORD }, | ||
1173 | { D102_E_RCVBUNDLE_UCODE, | ||
1174 | mac_82551_10, | ||
1175 | D102_E_CPUSAVER_TIMER_DWORD, | ||
1176 | D102_E_CPUSAVER_BUNDLE_DWORD, | ||
1177 | D102_E_CPUSAVER_MIN_SIZE_DWORD }, | ||
1178 | { {0}, 0, 0, 0, 0} | ||
1179 | }, *opts; | ||
1180 | /* *INDENT-ON* */ | ||
1181 | |||
1182 | #define BUNDLESMALL 1 | ||
1183 | #define BUNDLEMAX 50 | ||
1184 | #define INTDELAY 15000 | ||
1185 | |||
1186 | opts = ucode_opts; | ||
1187 | |||
1188 | /* do not load u-code for ICH devices */ | ||
1189 | if (nic->flags & ich) | ||
1190 | return; | ||
1191 | |||
1192 | /* Search for ucode match against h/w rev_id */ | ||
1193 | while (opts->mac) { | ||
1194 | if (nic->mac == opts->mac) { | ||
1195 | int i; | ||
1196 | u32 *ucode = opts->ucode; | ||
1197 | |||
1198 | /* Insert user-tunable settings */ | ||
1199 | ucode[opts->timer_dword] &= 0xFFFF0000; | ||
1200 | ucode[opts->timer_dword] |= | ||
1201 | (u16) INTDELAY; | ||
1202 | ucode[opts->bundle_dword] &= 0xFFFF0000; | ||
1203 | ucode[opts->bundle_dword] |= (u16) BUNDLEMAX; | ||
1204 | ucode[opts->min_size_dword] &= 0xFFFF0000; | ||
1205 | ucode[opts->min_size_dword] |= | ||
1206 | (BUNDLESMALL) ? 0xFFFF : 0xFF80; | ||
1207 | |||
1208 | for(i = 0; i < UCODE_SIZE; i++) | ||
1209 | cb->u.ucode[i] = cpu_to_le32(ucode[i]); | ||
1210 | cb->command = cpu_to_le16(cb_ucode); | ||
1211 | return; | ||
1212 | } | ||
1213 | opts++; | ||
1214 | } | ||
1021 | 1215 | ||
1022 | if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { | 1216 | cb->command = cpu_to_le16(cb_nop); |
1023 | for(i = 0; i < UCODE_SIZE; i++) | ||
1024 | cb->u.ucode[i] = cpu_to_le32(ucode[i]); | ||
1025 | cb->command = cpu_to_le16(cb_ucode); | ||
1026 | } else | ||
1027 | cb->command = cpu_to_le16(cb_nop); | ||
1028 | } | 1217 | } |
1029 | 1218 | ||
1030 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1219 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
@@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1307 | { | 1496 | { |
1308 | cb->command = nic->tx_command; | 1497 | cb->command = nic->tx_command; |
1309 | /* interrupt every 16 packets regardless of delay */ | 1498 | /* interrupt every 16 packets regardless of delay */ |
1310 | if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; | 1499 | if((nic->cbs_avail & ~15) == nic->cbs_avail) |
1500 | cb->command |= cpu_to_le16(cb_i); | ||
1311 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); | 1501 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); |
1312 | cb->u.tcb.tcb_byte_count = 0; | 1502 | cb->u.tcb.tcb_byte_count = 0; |
1313 | cb->u.tcb.threshold = nic->tx_threshold; | 1503 | cb->u.tcb.threshold = nic->tx_threshold; |
1314 | cb->u.tcb.tbd_count = 1; | 1504 | cb->u.tcb.tbd_count = 1; |
1315 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1505 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, |
1316 | skb->data, skb->len, PCI_DMA_TODEVICE)); | 1506 | skb->data, skb->len, PCI_DMA_TODEVICE)); |
1317 | // check for mapping failure? | 1507 | /* check for mapping failure? */ |
1318 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1508 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1319 | } | 1509 | } |
1320 | 1510 | ||
@@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, | |||
1539 | /* Don't indicate if hardware indicates errors */ | 1729 | /* Don't indicate if hardware indicates errors */ |
1540 | nic->net_stats.rx_dropped++; | 1730 | nic->net_stats.rx_dropped++; |
1541 | dev_kfree_skb_any(skb); | 1731 | dev_kfree_skb_any(skb); |
1542 | } else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) { | 1732 | } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { |
1543 | /* Don't indicate oversized frames */ | 1733 | /* Don't indicate oversized frames */ |
1544 | nic->rx_over_length_errors++; | 1734 | nic->rx_over_length_errors++; |
1545 | nic->net_stats.rx_dropped++; | 1735 | nic->net_stats.rx_dropped++; |
@@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget) | |||
1706 | static void e100_netpoll(struct net_device *netdev) | 1896 | static void e100_netpoll(struct net_device *netdev) |
1707 | { | 1897 | { |
1708 | struct nic *nic = netdev_priv(netdev); | 1898 | struct nic *nic = netdev_priv(netdev); |
1899 | |||
1709 | e100_disable_irq(nic); | 1900 | e100_disable_irq(nic); |
1710 | e100_intr(nic->pdev->irq, netdev, NULL); | 1901 | e100_intr(nic->pdev->irq, netdev, NULL); |
1711 | e100_tx_clean(nic); | 1902 | e100_tx_clean(nic); |
@@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev, | |||
2108 | } | 2299 | } |
2109 | for(i = 0; i < E100_TEST_LEN; i++) | 2300 | for(i = 0; i < E100_TEST_LEN; i++) |
2110 | test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; | 2301 | test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; |
2302 | |||
2303 | msleep_interruptible(4 * 1000); | ||
2111 | } | 2304 | } |
2112 | 2305 | ||
2113 | static int e100_phys_id(struct net_device *netdev, u32 data) | 2306 | static int e100_phys_id(struct net_device *netdev, u32 data) |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c new file mode 100644 index 000000000000..bf3440aa6c24 --- /dev/null +++ b/drivers/net/sis190.c | |||
@@ -0,0 +1,1843 @@ | |||
1 | /* | ||
2 | sis190.c: Silicon Integrated Systems SiS190 ethernet driver | ||
3 | |||
4 | Copyright (c) 2003 K.M. Liu <kmliu@sis.com> | ||
5 | Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com> | ||
6 | Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com> | ||
7 | |||
8 | Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 | ||
9 | genuine driver. | ||
10 | |||
11 | This software may be used and distributed according to the terms of | ||
12 | the GNU General Public License (GPL), incorporated herein by reference. | ||
13 | Drivers based on or derived from this code fall under the GPL and must | ||
14 | retain the authorship, copyright and license notice. This file is not | ||
15 | a complete program and may only be used when the entire operating | ||
16 | system is licensed under the GPL. | ||
17 | |||
18 | See the file COPYING in this distribution for more information. | ||
19 | |||
20 | */ | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/moduleparam.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | #include <linux/rtnetlink.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/ethtool.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/mii.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/crc32.h> | ||
32 | #include <linux/dma-mapping.h> | ||
33 | #include <asm/irq.h> | ||
34 | |||
35 | #define net_drv(p, arg...) if (netif_msg_drv(p)) \ | ||
36 | printk(arg) | ||
37 | #define net_probe(p, arg...) if (netif_msg_probe(p)) \ | ||
38 | printk(arg) | ||
39 | #define net_link(p, arg...) if (netif_msg_link(p)) \ | ||
40 | printk(arg) | ||
41 | #define net_intr(p, arg...) if (netif_msg_intr(p)) \ | ||
42 | printk(arg) | ||
43 | #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \ | ||
44 | printk(arg) | ||
45 | |||
46 | #define PHY_MAX_ADDR 32 | ||
47 | #define PHY_ID_ANY 0x1f | ||
48 | #define MII_REG_ANY 0x1f | ||
49 | |||
50 | #ifdef CONFIG_SIS190_NAPI | ||
51 | #define NAPI_SUFFIX "-NAPI" | ||
52 | #else | ||
53 | #define NAPI_SUFFIX "" | ||
54 | #endif | ||
55 | |||
56 | #define DRV_VERSION "1.2" NAPI_SUFFIX | ||
57 | #define DRV_NAME "sis190" | ||
58 | #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION | ||
59 | #define PFX DRV_NAME ": " | ||
60 | |||
61 | #ifdef CONFIG_SIS190_NAPI | ||
62 | #define sis190_rx_skb netif_receive_skb | ||
63 | #define sis190_rx_quota(count, quota) min(count, quota) | ||
64 | #else | ||
65 | #define sis190_rx_skb netif_rx | ||
66 | #define sis190_rx_quota(count, quota) count | ||
67 | #endif | ||
68 | |||
69 | #define MAC_ADDR_LEN 6 | ||
70 | |||
71 | #define NUM_TX_DESC 64 /* [8..1024] */ | ||
72 | #define NUM_RX_DESC 64 /* [8..8192] */ | ||
73 | #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) | ||
74 | #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) | ||
75 | #define RX_BUF_SIZE 1536 | ||
76 | #define RX_BUF_MASK 0xfff8 | ||
77 | |||
78 | #define SIS190_REGS_SIZE 0x80 | ||
79 | #define SIS190_TX_TIMEOUT (6*HZ) | ||
80 | #define SIS190_PHY_TIMEOUT (10*HZ) | ||
81 | #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
82 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ | ||
83 | NETIF_MSG_IFDOWN) | ||
84 | |||
85 | /* Enhanced PHY access register bit definitions */ | ||
86 | #define EhnMIIread 0x0000 | ||
87 | #define EhnMIIwrite 0x0020 | ||
88 | #define EhnMIIdataShift 16 | ||
89 | #define EhnMIIpmdShift 6 /* 7016 only */ | ||
90 | #define EhnMIIregShift 11 | ||
91 | #define EhnMIIreq 0x0010 | ||
92 | #define EhnMIInotDone 0x0010 | ||
93 | |||
94 | /* Write/read MMIO register */ | ||
95 | #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) | ||
96 | #define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) | ||
97 | #define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) | ||
98 | #define SIS_R8(reg) readb (ioaddr + (reg)) | ||
99 | #define SIS_R16(reg) readw (ioaddr + (reg)) | ||
100 | #define SIS_R32(reg) readl (ioaddr + (reg)) | ||
101 | |||
102 | #define SIS_PCI_COMMIT() SIS_R32(IntrControl) | ||
103 | |||
104 | enum sis190_registers { | ||
105 | TxControl = 0x00, | ||
106 | TxDescStartAddr = 0x04, | ||
107 | rsv0 = 0x08, // reserved | ||
108 | TxSts = 0x0c, // unused (Control/Status) | ||
109 | RxControl = 0x10, | ||
110 | RxDescStartAddr = 0x14, | ||
111 | rsv1 = 0x18, // reserved | ||
112 | RxSts = 0x1c, // unused | ||
113 | IntrStatus = 0x20, | ||
114 | IntrMask = 0x24, | ||
115 | IntrControl = 0x28, | ||
116 | IntrTimer = 0x2c, // unused (Interupt Timer) | ||
117 | PMControl = 0x30, // unused (Power Mgmt Control/Status) | ||
118 | rsv2 = 0x34, // reserved | ||
119 | ROMControl = 0x38, | ||
120 | ROMInterface = 0x3c, | ||
121 | StationControl = 0x40, | ||
122 | GMIIControl = 0x44, | ||
123 | GIoCR = 0x48, // unused (GMAC IO Compensation) | ||
124 | GIoCtrl = 0x4c, // unused (GMAC IO Control) | ||
125 | TxMacControl = 0x50, | ||
126 | TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) | ||
127 | RGDelay = 0x58, // unused (RGMII Tx Internal Delay) | ||
128 | rsv3 = 0x5c, // reserved | ||
129 | RxMacControl = 0x60, | ||
130 | RxMacAddr = 0x62, | ||
131 | RxHashTable = 0x68, | ||
132 | // Undocumented = 0x6c, | ||
133 | RxWolCtrl = 0x70, | ||
134 | RxWolData = 0x74, // unused (Rx WOL Data Access) | ||
135 | RxMPSControl = 0x78, // unused (Rx MPS Control) | ||
136 | rsv4 = 0x7c, // reserved | ||
137 | }; | ||
138 | |||
139 | enum sis190_register_content { | ||
140 | /* IntrStatus */ | ||
141 | SoftInt = 0x40000000, // unused | ||
142 | Timeup = 0x20000000, // unused | ||
143 | PauseFrame = 0x00080000, // unused | ||
144 | MagicPacket = 0x00040000, // unused | ||
145 | WakeupFrame = 0x00020000, // unused | ||
146 | LinkChange = 0x00010000, | ||
147 | RxQEmpty = 0x00000080, | ||
148 | RxQInt = 0x00000040, | ||
149 | TxQ1Empty = 0x00000020, // unused | ||
150 | TxQ1Int = 0x00000010, | ||
151 | TxQ0Empty = 0x00000008, // unused | ||
152 | TxQ0Int = 0x00000004, | ||
153 | RxHalt = 0x00000002, | ||
154 | TxHalt = 0x00000001, | ||
155 | |||
156 | /* {Rx/Tx}CmdBits */ | ||
157 | CmdReset = 0x10, | ||
158 | CmdRxEnb = 0x08, // unused | ||
159 | CmdTxEnb = 0x01, | ||
160 | RxBufEmpty = 0x01, // unused | ||
161 | |||
162 | /* Cfg9346Bits */ | ||
163 | Cfg9346_Lock = 0x00, // unused | ||
164 | Cfg9346_Unlock = 0xc0, // unused | ||
165 | |||
166 | /* RxMacControl */ | ||
167 | AcceptErr = 0x20, // unused | ||
168 | AcceptRunt = 0x10, // unused | ||
169 | AcceptBroadcast = 0x0800, | ||
170 | AcceptMulticast = 0x0400, | ||
171 | AcceptMyPhys = 0x0200, | ||
172 | AcceptAllPhys = 0x0100, | ||
173 | |||
174 | /* RxConfigBits */ | ||
175 | RxCfgFIFOShift = 13, | ||
176 | RxCfgDMAShift = 8, // 0x1a in RxControl ? | ||
177 | |||
178 | /* TxConfigBits */ | ||
179 | TxInterFrameGapShift = 24, | ||
180 | TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ | ||
181 | |||
182 | /* StationControl */ | ||
183 | _1000bpsF = 0x1c00, | ||
184 | _1000bpsH = 0x0c00, | ||
185 | _100bpsF = 0x1800, | ||
186 | _100bpsH = 0x0800, | ||
187 | _10bpsF = 0x1400, | ||
188 | _10bpsH = 0x0400, | ||
189 | |||
190 | LinkStatus = 0x02, // unused | ||
191 | FullDup = 0x01, // unused | ||
192 | |||
193 | /* TBICSRBit */ | ||
194 | TBILinkOK = 0x02000000, // unused | ||
195 | }; | ||
196 | |||
197 | struct TxDesc { | ||
198 | __le32 PSize; | ||
199 | __le32 status; | ||
200 | __le32 addr; | ||
201 | __le32 size; | ||
202 | }; | ||
203 | |||
204 | struct RxDesc { | ||
205 | __le32 PSize; | ||
206 | __le32 status; | ||
207 | __le32 addr; | ||
208 | __le32 size; | ||
209 | }; | ||
210 | |||
211 | enum _DescStatusBit { | ||
212 | /* _Desc.status */ | ||
213 | OWNbit = 0x80000000, // RXOWN/TXOWN | ||
214 | INTbit = 0x40000000, // RXINT/TXINT | ||
215 | CRCbit = 0x00020000, // CRCOFF/CRCEN | ||
216 | PADbit = 0x00010000, // PREADD/PADEN | ||
217 | /* _Desc.size */ | ||
218 | RingEnd = 0x80000000, | ||
219 | /* TxDesc.status */ | ||
220 | LSEN = 0x08000000, // TSO ? -- FR | ||
221 | IPCS = 0x04000000, | ||
222 | TCPCS = 0x02000000, | ||
223 | UDPCS = 0x01000000, | ||
224 | BSTEN = 0x00800000, | ||
225 | EXTEN = 0x00400000, | ||
226 | DEFEN = 0x00200000, | ||
227 | BKFEN = 0x00100000, | ||
228 | CRSEN = 0x00080000, | ||
229 | COLEN = 0x00040000, | ||
230 | THOL3 = 0x30000000, | ||
231 | THOL2 = 0x20000000, | ||
232 | THOL1 = 0x10000000, | ||
233 | THOL0 = 0x00000000, | ||
234 | /* RxDesc.status */ | ||
235 | IPON = 0x20000000, | ||
236 | TCPON = 0x10000000, | ||
237 | UDPON = 0x08000000, | ||
238 | Wakup = 0x00400000, | ||
239 | Magic = 0x00200000, | ||
240 | Pause = 0x00100000, | ||
241 | DEFbit = 0x00200000, | ||
242 | BCAST = 0x000c0000, | ||
243 | MCAST = 0x00080000, | ||
244 | UCAST = 0x00040000, | ||
245 | /* RxDesc.PSize */ | ||
246 | TAGON = 0x80000000, | ||
247 | RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR | ||
248 | ABORT = 0x00800000, | ||
249 | SHORT = 0x00400000, | ||
250 | LIMIT = 0x00200000, | ||
251 | MIIER = 0x00100000, | ||
252 | OVRUN = 0x00080000, | ||
253 | NIBON = 0x00040000, | ||
254 | COLON = 0x00020000, | ||
255 | CRCOK = 0x00010000, | ||
256 | RxSizeMask = 0x0000ffff | ||
257 | /* | ||
258 | * The asic could apparently do vlan, TSO, jumbo (sis191 only) and | ||
259 | * provide two (unused with Linux) Tx queues. No publically | ||
260 | * available documentation alas. | ||
261 | */ | ||
262 | }; | ||
263 | |||
264 | enum sis190_eeprom_access_register_bits { | ||
265 | EECS = 0x00000001, // unused | ||
266 | EECLK = 0x00000002, // unused | ||
267 | EEDO = 0x00000008, // unused | ||
268 | EEDI = 0x00000004, // unused | ||
269 | EEREQ = 0x00000080, | ||
270 | EEROP = 0x00000200, | ||
271 | EEWOP = 0x00000100 // unused | ||
272 | }; | ||
273 | |||
274 | /* EEPROM Addresses */ | ||
275 | enum sis190_eeprom_address { | ||
276 | EEPROMSignature = 0x00, | ||
277 | EEPROMCLK = 0x01, // unused | ||
278 | EEPROMInfo = 0x02, | ||
279 | EEPROMMACAddr = 0x03 | ||
280 | }; | ||
281 | |||
282 | struct sis190_private { | ||
283 | void __iomem *mmio_addr; | ||
284 | struct pci_dev *pci_dev; | ||
285 | struct net_device_stats stats; | ||
286 | spinlock_t lock; | ||
287 | u32 rx_buf_sz; | ||
288 | u32 cur_rx; | ||
289 | u32 cur_tx; | ||
290 | u32 dirty_rx; | ||
291 | u32 dirty_tx; | ||
292 | dma_addr_t rx_dma; | ||
293 | dma_addr_t tx_dma; | ||
294 | struct RxDesc *RxDescRing; | ||
295 | struct TxDesc *TxDescRing; | ||
296 | struct sk_buff *Rx_skbuff[NUM_RX_DESC]; | ||
297 | struct sk_buff *Tx_skbuff[NUM_TX_DESC]; | ||
298 | struct work_struct phy_task; | ||
299 | struct timer_list timer; | ||
300 | u32 msg_enable; | ||
301 | struct mii_if_info mii_if; | ||
302 | struct list_head first_phy; | ||
303 | }; | ||
304 | |||
305 | struct sis190_phy { | ||
306 | struct list_head list; | ||
307 | int phy_id; | ||
308 | u16 id[2]; | ||
309 | u16 status; | ||
310 | u8 type; | ||
311 | }; | ||
312 | |||
313 | enum sis190_phy_type { | ||
314 | UNKNOWN = 0x00, | ||
315 | HOME = 0x01, | ||
316 | LAN = 0x02, | ||
317 | MIX = 0x03 | ||
318 | }; | ||
319 | |||
320 | static struct mii_chip_info { | ||
321 | const char *name; | ||
322 | u16 id[2]; | ||
323 | unsigned int type; | ||
324 | } mii_chip_table[] = { | ||
325 | { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN }, | ||
326 | { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN }, | ||
327 | { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN }, | ||
328 | { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN }, | ||
329 | { NULL, } | ||
330 | }; | ||
331 | |||
332 | const static struct { | ||
333 | const char *name; | ||
334 | u8 version; /* depend on docs */ | ||
335 | u32 RxConfigMask; /* clear the bits supported by this chip */ | ||
336 | } sis_chip_info[] = { | ||
337 | { DRV_NAME, 0x00, 0xff7e1880, }, | ||
338 | }; | ||
339 | |||
340 | static struct pci_device_id sis190_pci_tbl[] __devinitdata = { | ||
341 | { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, | ||
342 | { 0, }, | ||
343 | }; | ||
344 | |||
345 | MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); | ||
346 | |||
347 | static int rx_copybreak = 200; | ||
348 | |||
349 | static struct { | ||
350 | u32 msg_enable; | ||
351 | } debug = { -1 }; | ||
352 | |||
353 | MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver"); | ||
354 | module_param(rx_copybreak, int, 0); | ||
355 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | ||
356 | module_param_named(debug, debug.msg_enable, int, 0); | ||
357 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | ||
358 | MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>"); | ||
359 | MODULE_VERSION(DRV_VERSION); | ||
360 | MODULE_LICENSE("GPL"); | ||
361 | |||
362 | static const u32 sis190_intr_mask = | ||
363 | RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt; | ||
364 | |||
365 | /* | ||
366 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | ||
367 | * The chips use a 64 element hash table based on the Ethernet CRC. | ||
368 | */ | ||
369 | static int multicast_filter_limit = 32; | ||
370 | |||
371 | static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) | ||
372 | { | ||
373 | unsigned int i; | ||
374 | |||
375 | SIS_W32(GMIIControl, ctl); | ||
376 | |||
377 | msleep(1); | ||
378 | |||
379 | for (i = 0; i < 100; i++) { | ||
380 | if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) | ||
381 | break; | ||
382 | msleep(1); | ||
383 | } | ||
384 | |||
385 | if (i > 999) | ||
386 | printk(KERN_ERR PFX "PHY command failed !\n"); | ||
387 | } | ||
388 | |||
389 | static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) | ||
390 | { | ||
391 | __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | | ||
392 | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | | ||
393 | (((u32) val) << EhnMIIdataShift)); | ||
394 | } | ||
395 | |||
396 | static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) | ||
397 | { | ||
398 | __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | | ||
399 | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); | ||
400 | |||
401 | return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); | ||
402 | } | ||
403 | |||
404 | static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) | ||
405 | { | ||
406 | struct sis190_private *tp = netdev_priv(dev); | ||
407 | |||
408 | mdio_write(tp->mmio_addr, phy_id, reg, val); | ||
409 | } | ||
410 | |||
411 | static int __mdio_read(struct net_device *dev, int phy_id, int reg) | ||
412 | { | ||
413 | struct sis190_private *tp = netdev_priv(dev); | ||
414 | |||
415 | return mdio_read(tp->mmio_addr, phy_id, reg); | ||
416 | } | ||
417 | |||
418 | static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) | ||
419 | { | ||
420 | mdio_read(ioaddr, phy_id, reg); | ||
421 | return mdio_read(ioaddr, phy_id, reg); | ||
422 | } | ||
423 | |||
424 | static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg) | ||
425 | { | ||
426 | u16 data = 0xffff; | ||
427 | unsigned int i; | ||
428 | |||
429 | if (!(SIS_R32(ROMControl) & 0x0002)) | ||
430 | return 0; | ||
431 | |||
432 | SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); | ||
433 | |||
434 | for (i = 0; i < 200; i++) { | ||
435 | if (!(SIS_R32(ROMInterface) & EEREQ)) { | ||
436 | data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; | ||
437 | break; | ||
438 | } | ||
439 | msleep(1); | ||
440 | } | ||
441 | |||
442 | return data; | ||
443 | } | ||
444 | |||
445 | static void sis190_irq_mask_and_ack(void __iomem *ioaddr) | ||
446 | { | ||
447 | SIS_W32(IntrMask, 0x00); | ||
448 | SIS_W32(IntrStatus, 0xffffffff); | ||
449 | SIS_PCI_COMMIT(); | ||
450 | } | ||
451 | |||
452 | static void sis190_asic_down(void __iomem *ioaddr) | ||
453 | { | ||
454 | /* Stop the chip's Tx and Rx DMA processes. */ | ||
455 | |||
456 | SIS_W32(TxControl, 0x1a00); | ||
457 | SIS_W32(RxControl, 0x1a00); | ||
458 | |||
459 | sis190_irq_mask_and_ack(ioaddr); | ||
460 | } | ||
461 | |||
462 | static void sis190_mark_as_last_descriptor(struct RxDesc *desc) | ||
463 | { | ||
464 | desc->size |= cpu_to_le32(RingEnd); | ||
465 | } | ||
466 | |||
467 | static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) | ||
468 | { | ||
469 | u32 eor = le32_to_cpu(desc->size) & RingEnd; | ||
470 | |||
471 | desc->PSize = 0x0; | ||
472 | desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); | ||
473 | wmb(); | ||
474 | desc->status = cpu_to_le32(OWNbit | INTbit); | ||
475 | } | ||
476 | |||
477 | static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, | ||
478 | u32 rx_buf_sz) | ||
479 | { | ||
480 | desc->addr = cpu_to_le32(mapping); | ||
481 | sis190_give_to_asic(desc, rx_buf_sz); | ||
482 | } | ||
483 | |||
484 | static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) | ||
485 | { | ||
486 | desc->PSize = 0x0; | ||
487 | desc->addr = 0xdeadbeef; | ||
488 | desc->size &= cpu_to_le32(RingEnd); | ||
489 | wmb(); | ||
490 | desc->status = 0x0; | ||
491 | } | ||
492 | |||
493 | static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, | ||
494 | struct RxDesc *desc, u32 rx_buf_sz) | ||
495 | { | ||
496 | struct sk_buff *skb; | ||
497 | dma_addr_t mapping; | ||
498 | int ret = 0; | ||
499 | |||
500 | skb = dev_alloc_skb(rx_buf_sz); | ||
501 | if (!skb) | ||
502 | goto err_out; | ||
503 | |||
504 | *sk_buff = skb; | ||
505 | |||
506 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, | ||
507 | PCI_DMA_FROMDEVICE); | ||
508 | |||
509 | sis190_map_to_asic(desc, mapping, rx_buf_sz); | ||
510 | out: | ||
511 | return ret; | ||
512 | |||
513 | err_out: | ||
514 | ret = -ENOMEM; | ||
515 | sis190_make_unusable_by_asic(desc); | ||
516 | goto out; | ||
517 | } | ||
518 | |||
519 | static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, | ||
520 | u32 start, u32 end) | ||
521 | { | ||
522 | u32 cur; | ||
523 | |||
524 | for (cur = start; cur < end; cur++) { | ||
525 | int ret, i = cur % NUM_RX_DESC; | ||
526 | |||
527 | if (tp->Rx_skbuff[i]) | ||
528 | continue; | ||
529 | |||
530 | ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, | ||
531 | tp->RxDescRing + i, tp->rx_buf_sz); | ||
532 | if (ret < 0) | ||
533 | break; | ||
534 | } | ||
535 | return cur - start; | ||
536 | } | ||
537 | |||
538 | static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, | ||
539 | struct RxDesc *desc, int rx_buf_sz) | ||
540 | { | ||
541 | int ret = -1; | ||
542 | |||
543 | if (pkt_size < rx_copybreak) { | ||
544 | struct sk_buff *skb; | ||
545 | |||
546 | skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); | ||
547 | if (skb) { | ||
548 | skb_reserve(skb, NET_IP_ALIGN); | ||
549 | eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); | ||
550 | *sk_buff = skb; | ||
551 | sis190_give_to_asic(desc, rx_buf_sz); | ||
552 | ret = 0; | ||
553 | } | ||
554 | } | ||
555 | return ret; | ||
556 | } | ||
557 | |||
558 | static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) | ||
559 | { | ||
560 | #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) | ||
561 | |||
562 | if ((status & CRCOK) && !(status & ErrMask)) | ||
563 | return 0; | ||
564 | |||
565 | if (!(status & CRCOK)) | ||
566 | stats->rx_crc_errors++; | ||
567 | else if (status & OVRUN) | ||
568 | stats->rx_over_errors++; | ||
569 | else if (status & (SHORT | LIMIT)) | ||
570 | stats->rx_length_errors++; | ||
571 | else if (status & (MIIER | NIBON | COLON)) | ||
572 | stats->rx_frame_errors++; | ||
573 | |||
574 | stats->rx_errors++; | ||
575 | return -1; | ||
576 | } | ||
577 | |||
578 | static int sis190_rx_interrupt(struct net_device *dev, | ||
579 | struct sis190_private *tp, void __iomem *ioaddr) | ||
580 | { | ||
581 | struct net_device_stats *stats = &tp->stats; | ||
582 | u32 rx_left, cur_rx = tp->cur_rx; | ||
583 | u32 delta, count; | ||
584 | |||
585 | rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; | ||
586 | rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); | ||
587 | |||
588 | for (; rx_left > 0; rx_left--, cur_rx++) { | ||
589 | unsigned int entry = cur_rx % NUM_RX_DESC; | ||
590 | struct RxDesc *desc = tp->RxDescRing + entry; | ||
591 | u32 status; | ||
592 | |||
593 | if (desc->status & OWNbit) | ||
594 | break; | ||
595 | |||
596 | status = le32_to_cpu(desc->PSize); | ||
597 | |||
598 | // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name, | ||
599 | // status); | ||
600 | |||
601 | if (sis190_rx_pkt_err(status, stats) < 0) | ||
602 | sis190_give_to_asic(desc, tp->rx_buf_sz); | ||
603 | else { | ||
604 | struct sk_buff *skb = tp->Rx_skbuff[entry]; | ||
605 | int pkt_size = (status & RxSizeMask) - 4; | ||
606 | void (*pci_action)(struct pci_dev *, dma_addr_t, | ||
607 | size_t, int) = pci_dma_sync_single_for_device; | ||
608 | |||
609 | if (unlikely(pkt_size > tp->rx_buf_sz)) { | ||
610 | net_intr(tp, KERN_INFO | ||
611 | "%s: (frag) status = %08x.\n", | ||
612 | dev->name, status); | ||
613 | stats->rx_dropped++; | ||
614 | stats->rx_length_errors++; | ||
615 | sis190_give_to_asic(desc, tp->rx_buf_sz); | ||
616 | continue; | ||
617 | } | ||
618 | |||
619 | pci_dma_sync_single_for_cpu(tp->pci_dev, | ||
620 | le32_to_cpu(desc->addr), tp->rx_buf_sz, | ||
621 | PCI_DMA_FROMDEVICE); | ||
622 | |||
623 | if (sis190_try_rx_copy(&skb, pkt_size, desc, | ||
624 | tp->rx_buf_sz)) { | ||
625 | pci_action = pci_unmap_single; | ||
626 | tp->Rx_skbuff[entry] = NULL; | ||
627 | sis190_make_unusable_by_asic(desc); | ||
628 | } | ||
629 | |||
630 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), | ||
631 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
632 | |||
633 | skb->dev = dev; | ||
634 | skb_put(skb, pkt_size); | ||
635 | skb->protocol = eth_type_trans(skb, dev); | ||
636 | |||
637 | sis190_rx_skb(skb); | ||
638 | |||
639 | dev->last_rx = jiffies; | ||
640 | stats->rx_packets++; | ||
641 | stats->rx_bytes += pkt_size; | ||
642 | if ((status & BCAST) == MCAST) | ||
643 | stats->multicast++; | ||
644 | } | ||
645 | } | ||
646 | count = cur_rx - tp->cur_rx; | ||
647 | tp->cur_rx = cur_rx; | ||
648 | |||
649 | delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); | ||
650 | if (!delta && count && netif_msg_intr(tp)) | ||
651 | printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name); | ||
652 | tp->dirty_rx += delta; | ||
653 | |||
654 | if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp)) | ||
655 | printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name); | ||
656 | |||
657 | return count; | ||
658 | } | ||
659 | |||
660 | static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, | ||
661 | struct TxDesc *desc) | ||
662 | { | ||
663 | unsigned int len; | ||
664 | |||
665 | len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; | ||
666 | |||
667 | pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); | ||
668 | |||
669 | memset(desc, 0x00, sizeof(*desc)); | ||
670 | } | ||
671 | |||
672 | static void sis190_tx_interrupt(struct net_device *dev, | ||
673 | struct sis190_private *tp, void __iomem *ioaddr) | ||
674 | { | ||
675 | u32 pending, dirty_tx = tp->dirty_tx; | ||
676 | /* | ||
677 | * It would not be needed if queueing was allowed to be enabled | ||
678 | * again too early (hint: think preempt and unclocked smp systems). | ||
679 | */ | ||
680 | unsigned int queue_stopped; | ||
681 | |||
682 | smp_rmb(); | ||
683 | pending = tp->cur_tx - dirty_tx; | ||
684 | queue_stopped = (pending == NUM_TX_DESC); | ||
685 | |||
686 | for (; pending; pending--, dirty_tx++) { | ||
687 | unsigned int entry = dirty_tx % NUM_TX_DESC; | ||
688 | struct TxDesc *txd = tp->TxDescRing + entry; | ||
689 | struct sk_buff *skb; | ||
690 | |||
691 | if (le32_to_cpu(txd->status) & OWNbit) | ||
692 | break; | ||
693 | |||
694 | skb = tp->Tx_skbuff[entry]; | ||
695 | |||
696 | tp->stats.tx_packets++; | ||
697 | tp->stats.tx_bytes += skb->len; | ||
698 | |||
699 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); | ||
700 | tp->Tx_skbuff[entry] = NULL; | ||
701 | dev_kfree_skb_irq(skb); | ||
702 | } | ||
703 | |||
704 | if (tp->dirty_tx != dirty_tx) { | ||
705 | tp->dirty_tx = dirty_tx; | ||
706 | smp_wmb(); | ||
707 | if (queue_stopped) | ||
708 | netif_wake_queue(dev); | ||
709 | } | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * The interrupt handler does all of the Rx thread work and cleans up after | ||
714 | * the Tx thread. | ||
715 | */ | ||
716 | static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs) | ||
717 | { | ||
718 | struct net_device *dev = __dev; | ||
719 | struct sis190_private *tp = netdev_priv(dev); | ||
720 | void __iomem *ioaddr = tp->mmio_addr; | ||
721 | unsigned int handled = 0; | ||
722 | u32 status; | ||
723 | |||
724 | status = SIS_R32(IntrStatus); | ||
725 | |||
726 | if ((status == 0xffffffff) || !status) | ||
727 | goto out; | ||
728 | |||
729 | handled = 1; | ||
730 | |||
731 | if (unlikely(!netif_running(dev))) { | ||
732 | sis190_asic_down(ioaddr); | ||
733 | goto out; | ||
734 | } | ||
735 | |||
736 | SIS_W32(IntrStatus, status); | ||
737 | |||
738 | // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status); | ||
739 | |||
740 | if (status & LinkChange) { | ||
741 | net_intr(tp, KERN_INFO "%s: link change.\n", dev->name); | ||
742 | schedule_work(&tp->phy_task); | ||
743 | } | ||
744 | |||
745 | if (status & RxQInt) | ||
746 | sis190_rx_interrupt(dev, tp, ioaddr); | ||
747 | |||
748 | if (status & TxQ0Int) | ||
749 | sis190_tx_interrupt(dev, tp, ioaddr); | ||
750 | out: | ||
751 | return IRQ_RETVAL(handled); | ||
752 | } | ||
753 | |||
754 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
755 | static void sis190_netpoll(struct net_device *dev) | ||
756 | { | ||
757 | struct sis190_private *tp = netdev_priv(dev); | ||
758 | struct pci_dev *pdev = tp->pci_dev; | ||
759 | |||
760 | disable_irq(pdev->irq); | ||
761 | sis190_interrupt(pdev->irq, dev, NULL); | ||
762 | enable_irq(pdev->irq); | ||
763 | } | ||
764 | #endif | ||
765 | |||
766 | static void sis190_free_rx_skb(struct sis190_private *tp, | ||
767 | struct sk_buff **sk_buff, struct RxDesc *desc) | ||
768 | { | ||
769 | struct pci_dev *pdev = tp->pci_dev; | ||
770 | |||
771 | pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, | ||
772 | PCI_DMA_FROMDEVICE); | ||
773 | dev_kfree_skb(*sk_buff); | ||
774 | *sk_buff = NULL; | ||
775 | sis190_make_unusable_by_asic(desc); | ||
776 | } | ||
777 | |||
778 | static void sis190_rx_clear(struct sis190_private *tp) | ||
779 | { | ||
780 | unsigned int i; | ||
781 | |||
782 | for (i = 0; i < NUM_RX_DESC; i++) { | ||
783 | if (!tp->Rx_skbuff[i]) | ||
784 | continue; | ||
785 | sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); | ||
786 | } | ||
787 | } | ||
788 | |||
789 | static void sis190_init_ring_indexes(struct sis190_private *tp) | ||
790 | { | ||
791 | tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; | ||
792 | } | ||
793 | |||
794 | static int sis190_init_ring(struct net_device *dev) | ||
795 | { | ||
796 | struct sis190_private *tp = netdev_priv(dev); | ||
797 | |||
798 | sis190_init_ring_indexes(tp); | ||
799 | |||
800 | memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); | ||
801 | memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); | ||
802 | |||
803 | if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) | ||
804 | goto err_rx_clear; | ||
805 | |||
806 | sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); | ||
807 | |||
808 | return 0; | ||
809 | |||
810 | err_rx_clear: | ||
811 | sis190_rx_clear(tp); | ||
812 | return -ENOMEM; | ||
813 | } | ||
814 | |||
815 | static void sis190_set_rx_mode(struct net_device *dev) | ||
816 | { | ||
817 | struct sis190_private *tp = netdev_priv(dev); | ||
818 | void __iomem *ioaddr = tp->mmio_addr; | ||
819 | unsigned long flags; | ||
820 | u32 mc_filter[2]; /* Multicast hash filter */ | ||
821 | u16 rx_mode; | ||
822 | |||
823 | if (dev->flags & IFF_PROMISC) { | ||
824 | /* Unconditionally log net taps. */ | ||
825 | net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n", | ||
826 | dev->name); | ||
827 | rx_mode = | ||
828 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | ||
829 | AcceptAllPhys; | ||
830 | mc_filter[1] = mc_filter[0] = 0xffffffff; | ||
831 | } else if ((dev->mc_count > multicast_filter_limit) || | ||
832 | (dev->flags & IFF_ALLMULTI)) { | ||
833 | /* Too many to filter perfectly -- accept all multicasts. */ | ||
834 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | ||
835 | mc_filter[1] = mc_filter[0] = 0xffffffff; | ||
836 | } else { | ||
837 | struct dev_mc_list *mclist; | ||
838 | unsigned int i; | ||
839 | |||
840 | rx_mode = AcceptBroadcast | AcceptMyPhys; | ||
841 | mc_filter[1] = mc_filter[0] = 0; | ||
842 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | ||
843 | i++, mclist = mclist->next) { | ||
844 | int bit_nr = | ||
845 | ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; | ||
846 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | ||
847 | rx_mode |= AcceptMulticast; | ||
848 | } | ||
849 | } | ||
850 | |||
851 | spin_lock_irqsave(&tp->lock, flags); | ||
852 | |||
853 | SIS_W16(RxMacControl, rx_mode | 0x2); | ||
854 | SIS_W32(RxHashTable, mc_filter[0]); | ||
855 | SIS_W32(RxHashTable + 4, mc_filter[1]); | ||
856 | |||
857 | spin_unlock_irqrestore(&tp->lock, flags); | ||
858 | } | ||
859 | |||
860 | static void sis190_soft_reset(void __iomem *ioaddr) | ||
861 | { | ||
862 | SIS_W32(IntrControl, 0x8000); | ||
863 | SIS_PCI_COMMIT(); | ||
864 | msleep(1); | ||
865 | SIS_W32(IntrControl, 0x0); | ||
866 | sis190_asic_down(ioaddr); | ||
867 | msleep(1); | ||
868 | } | ||
869 | |||
870 | static void sis190_hw_start(struct net_device *dev) | ||
871 | { | ||
872 | struct sis190_private *tp = netdev_priv(dev); | ||
873 | void __iomem *ioaddr = tp->mmio_addr; | ||
874 | |||
875 | sis190_soft_reset(ioaddr); | ||
876 | |||
877 | SIS_W32(TxDescStartAddr, tp->tx_dma); | ||
878 | SIS_W32(RxDescStartAddr, tp->rx_dma); | ||
879 | |||
880 | SIS_W32(IntrStatus, 0xffffffff); | ||
881 | SIS_W32(IntrMask, 0x0); | ||
882 | /* | ||
883 | * Default is 100Mbps. | ||
884 | * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09 | ||
885 | */ | ||
886 | SIS_W16(StationControl, 0x1901); | ||
887 | SIS_W32(GMIIControl, 0x0); | ||
888 | SIS_W32(TxMacControl, 0x60); | ||
889 | SIS_W16(RxMacControl, 0x02); | ||
890 | SIS_W32(RxHashTable, 0x0); | ||
891 | SIS_W32(0x6c, 0x0); | ||
892 | SIS_W32(RxWolCtrl, 0x0); | ||
893 | SIS_W32(RxWolData, 0x0); | ||
894 | |||
895 | SIS_PCI_COMMIT(); | ||
896 | |||
897 | sis190_set_rx_mode(dev); | ||
898 | |||
899 | /* Enable all known interrupts by setting the interrupt mask. */ | ||
900 | SIS_W32(IntrMask, sis190_intr_mask); | ||
901 | |||
902 | SIS_W32(TxControl, 0x1a00 | CmdTxEnb); | ||
903 | SIS_W32(RxControl, 0x1a1d); | ||
904 | |||
905 | netif_start_queue(dev); | ||
906 | } | ||
907 | |||
908 | static void sis190_phy_task(void * data) | ||
909 | { | ||
910 | struct net_device *dev = data; | ||
911 | struct sis190_private *tp = netdev_priv(dev); | ||
912 | void __iomem *ioaddr = tp->mmio_addr; | ||
913 | int phy_id = tp->mii_if.phy_id; | ||
914 | u16 val; | ||
915 | |||
916 | rtnl_lock(); | ||
917 | |||
918 | val = mdio_read(ioaddr, phy_id, MII_BMCR); | ||
919 | if (val & BMCR_RESET) { | ||
920 | // FIXME: needlessly high ? -- FR 02/07/2005 | ||
921 | mod_timer(&tp->timer, jiffies + HZ/10); | ||
922 | } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & | ||
923 | BMSR_ANEGCOMPLETE)) { | ||
924 | net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n", | ||
925 | dev->name); | ||
926 | mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); | ||
927 | mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); | ||
928 | } else { | ||
929 | /* Rejoice ! */ | ||
930 | struct { | ||
931 | int val; | ||
932 | const char *msg; | ||
933 | u16 ctl; | ||
934 | } reg31[] = { | ||
935 | { LPA_1000XFULL | LPA_SLCT, | ||
936 | "1000 Mbps Full Duplex", | ||
937 | 0x01 | _1000bpsF }, | ||
938 | { LPA_1000XHALF | LPA_SLCT, | ||
939 | "1000 Mbps Half Duplex", | ||
940 | 0x01 | _1000bpsH }, | ||
941 | { LPA_100FULL, | ||
942 | "100 Mbps Full Duplex", | ||
943 | 0x01 | _100bpsF }, | ||
944 | { LPA_100HALF, | ||
945 | "100 Mbps Half Duplex", | ||
946 | 0x01 | _100bpsH }, | ||
947 | { LPA_10FULL, | ||
948 | "10 Mbps Full Duplex", | ||
949 | 0x01 | _10bpsF }, | ||
950 | { LPA_10HALF, | ||
951 | "10 Mbps Half Duplex", | ||
952 | 0x01 | _10bpsH }, | ||
953 | { 0, "unknown", 0x0000 } | ||
954 | }, *p; | ||
955 | u16 adv; | ||
956 | |||
957 | val = mdio_read(ioaddr, phy_id, 0x1f); | ||
958 | net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val); | ||
959 | |||
960 | val = mdio_read(ioaddr, phy_id, MII_LPA); | ||
961 | adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); | ||
962 | net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n", | ||
963 | dev->name, val, adv); | ||
964 | |||
965 | val &= adv; | ||
966 | |||
967 | for (p = reg31; p->ctl; p++) { | ||
968 | if ((val & p->val) == p->val) | ||
969 | break; | ||
970 | } | ||
971 | if (p->ctl) | ||
972 | SIS_W16(StationControl, p->ctl); | ||
973 | net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name, | ||
974 | p->msg); | ||
975 | netif_carrier_on(dev); | ||
976 | } | ||
977 | |||
978 | rtnl_unlock(); | ||
979 | } | ||
980 | |||
981 | static void sis190_phy_timer(unsigned long __opaque) | ||
982 | { | ||
983 | struct net_device *dev = (struct net_device *)__opaque; | ||
984 | struct sis190_private *tp = netdev_priv(dev); | ||
985 | |||
986 | if (likely(netif_running(dev))) | ||
987 | schedule_work(&tp->phy_task); | ||
988 | } | ||
989 | |||
990 | static inline void sis190_delete_timer(struct net_device *dev) | ||
991 | { | ||
992 | struct sis190_private *tp = netdev_priv(dev); | ||
993 | |||
994 | del_timer_sync(&tp->timer); | ||
995 | } | ||
996 | |||
997 | static inline void sis190_request_timer(struct net_device *dev) | ||
998 | { | ||
999 | struct sis190_private *tp = netdev_priv(dev); | ||
1000 | struct timer_list *timer = &tp->timer; | ||
1001 | |||
1002 | init_timer(timer); | ||
1003 | timer->expires = jiffies + SIS190_PHY_TIMEOUT; | ||
1004 | timer->data = (unsigned long)dev; | ||
1005 | timer->function = sis190_phy_timer; | ||
1006 | add_timer(timer); | ||
1007 | } | ||
1008 | |||
1009 | static void sis190_set_rxbufsize(struct sis190_private *tp, | ||
1010 | struct net_device *dev) | ||
1011 | { | ||
1012 | unsigned int mtu = dev->mtu; | ||
1013 | |||
1014 | tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; | ||
1015 | /* RxDesc->size has a licence to kill the lower bits */ | ||
1016 | if (tp->rx_buf_sz & 0x07) { | ||
1017 | tp->rx_buf_sz += 8; | ||
1018 | tp->rx_buf_sz &= RX_BUF_MASK; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | static int sis190_open(struct net_device *dev) | ||
1023 | { | ||
1024 | struct sis190_private *tp = netdev_priv(dev); | ||
1025 | struct pci_dev *pdev = tp->pci_dev; | ||
1026 | int rc = -ENOMEM; | ||
1027 | |||
1028 | sis190_set_rxbufsize(tp, dev); | ||
1029 | |||
1030 | /* | ||
1031 | * Rx and Tx descriptors need 256 bytes alignment. | ||
1032 | * pci_alloc_consistent() guarantees a stronger alignment. | ||
1033 | */ | ||
1034 | tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); | ||
1035 | if (!tp->TxDescRing) | ||
1036 | goto out; | ||
1037 | |||
1038 | tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); | ||
1039 | if (!tp->RxDescRing) | ||
1040 | goto err_free_tx_0; | ||
1041 | |||
1042 | rc = sis190_init_ring(dev); | ||
1043 | if (rc < 0) | ||
1044 | goto err_free_rx_1; | ||
1045 | |||
1046 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | ||
1047 | |||
1048 | sis190_request_timer(dev); | ||
1049 | |||
1050 | rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev); | ||
1051 | if (rc < 0) | ||
1052 | goto err_release_timer_2; | ||
1053 | |||
1054 | sis190_hw_start(dev); | ||
1055 | out: | ||
1056 | return rc; | ||
1057 | |||
1058 | err_release_timer_2: | ||
1059 | sis190_delete_timer(dev); | ||
1060 | sis190_rx_clear(tp); | ||
1061 | err_free_rx_1: | ||
1062 | pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, | ||
1063 | tp->rx_dma); | ||
1064 | err_free_tx_0: | ||
1065 | pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, | ||
1066 | tp->tx_dma); | ||
1067 | goto out; | ||
1068 | } | ||
1069 | |||
1070 | static void sis190_tx_clear(struct sis190_private *tp) | ||
1071 | { | ||
1072 | unsigned int i; | ||
1073 | |||
1074 | for (i = 0; i < NUM_TX_DESC; i++) { | ||
1075 | struct sk_buff *skb = tp->Tx_skbuff[i]; | ||
1076 | |||
1077 | if (!skb) | ||
1078 | continue; | ||
1079 | |||
1080 | sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); | ||
1081 | tp->Tx_skbuff[i] = NULL; | ||
1082 | dev_kfree_skb(skb); | ||
1083 | |||
1084 | tp->stats.tx_dropped++; | ||
1085 | } | ||
1086 | tp->cur_tx = tp->dirty_tx = 0; | ||
1087 | } | ||
1088 | |||
1089 | static void sis190_down(struct net_device *dev) | ||
1090 | { | ||
1091 | struct sis190_private *tp = netdev_priv(dev); | ||
1092 | void __iomem *ioaddr = tp->mmio_addr; | ||
1093 | unsigned int poll_locked = 0; | ||
1094 | |||
1095 | sis190_delete_timer(dev); | ||
1096 | |||
1097 | netif_stop_queue(dev); | ||
1098 | |||
1099 | flush_scheduled_work(); | ||
1100 | |||
1101 | do { | ||
1102 | spin_lock_irq(&tp->lock); | ||
1103 | |||
1104 | sis190_asic_down(ioaddr); | ||
1105 | |||
1106 | spin_unlock_irq(&tp->lock); | ||
1107 | |||
1108 | synchronize_irq(dev->irq); | ||
1109 | |||
1110 | if (!poll_locked) { | ||
1111 | netif_poll_disable(dev); | ||
1112 | poll_locked++; | ||
1113 | } | ||
1114 | |||
1115 | synchronize_sched(); | ||
1116 | |||
1117 | } while (SIS_R32(IntrMask)); | ||
1118 | |||
1119 | sis190_tx_clear(tp); | ||
1120 | sis190_rx_clear(tp); | ||
1121 | } | ||
1122 | |||
1123 | static int sis190_close(struct net_device *dev) | ||
1124 | { | ||
1125 | struct sis190_private *tp = netdev_priv(dev); | ||
1126 | struct pci_dev *pdev = tp->pci_dev; | ||
1127 | |||
1128 | sis190_down(dev); | ||
1129 | |||
1130 | free_irq(dev->irq, dev); | ||
1131 | |||
1132 | netif_poll_enable(dev); | ||
1133 | |||
1134 | pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); | ||
1135 | pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); | ||
1136 | |||
1137 | tp->TxDescRing = NULL; | ||
1138 | tp->RxDescRing = NULL; | ||
1139 | |||
1140 | return 0; | ||
1141 | } | ||
1142 | |||
1143 | static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1144 | { | ||
1145 | struct sis190_private *tp = netdev_priv(dev); | ||
1146 | void __iomem *ioaddr = tp->mmio_addr; | ||
1147 | u32 len, entry, dirty_tx; | ||
1148 | struct TxDesc *desc; | ||
1149 | dma_addr_t mapping; | ||
1150 | |||
1151 | if (unlikely(skb->len < ETH_ZLEN)) { | ||
1152 | skb = skb_padto(skb, ETH_ZLEN); | ||
1153 | if (!skb) { | ||
1154 | tp->stats.tx_dropped++; | ||
1155 | goto out; | ||
1156 | } | ||
1157 | len = ETH_ZLEN; | ||
1158 | } else { | ||
1159 | len = skb->len; | ||
1160 | } | ||
1161 | |||
1162 | entry = tp->cur_tx % NUM_TX_DESC; | ||
1163 | desc = tp->TxDescRing + entry; | ||
1164 | |||
1165 | if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { | ||
1166 | netif_stop_queue(dev); | ||
1167 | net_tx_err(tp, KERN_ERR PFX | ||
1168 | "%s: BUG! Tx Ring full when queue awake!\n", | ||
1169 | dev->name); | ||
1170 | return NETDEV_TX_BUSY; | ||
1171 | } | ||
1172 | |||
1173 | mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); | ||
1174 | |||
1175 | tp->Tx_skbuff[entry] = skb; | ||
1176 | |||
1177 | desc->PSize = cpu_to_le32(len); | ||
1178 | desc->addr = cpu_to_le32(mapping); | ||
1179 | |||
1180 | desc->size = cpu_to_le32(len); | ||
1181 | if (entry == (NUM_TX_DESC - 1)) | ||
1182 | desc->size |= cpu_to_le32(RingEnd); | ||
1183 | |||
1184 | wmb(); | ||
1185 | |||
1186 | desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); | ||
1187 | |||
1188 | tp->cur_tx++; | ||
1189 | |||
1190 | smp_wmb(); | ||
1191 | |||
1192 | SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); | ||
1193 | |||
1194 | dev->trans_start = jiffies; | ||
1195 | |||
1196 | dirty_tx = tp->dirty_tx; | ||
1197 | if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { | ||
1198 | netif_stop_queue(dev); | ||
1199 | smp_rmb(); | ||
1200 | if (dirty_tx != tp->dirty_tx) | ||
1201 | netif_wake_queue(dev); | ||
1202 | } | ||
1203 | out: | ||
1204 | return NETDEV_TX_OK; | ||
1205 | } | ||
1206 | |||
1207 | static struct net_device_stats *sis190_get_stats(struct net_device *dev) | ||
1208 | { | ||
1209 | struct sis190_private *tp = netdev_priv(dev); | ||
1210 | |||
1211 | return &tp->stats; | ||
1212 | } | ||
1213 | |||
1214 | static void sis190_free_phy(struct list_head *first_phy) | ||
1215 | { | ||
1216 | struct sis190_phy *cur, *next; | ||
1217 | |||
1218 | list_for_each_entry_safe(cur, next, first_phy, list) { | ||
1219 | kfree(cur); | ||
1220 | } | ||
1221 | } | ||
1222 | |||
1223 | /** | ||
1224 | * sis190_default_phy - Select default PHY for sis190 mac. | ||
1225 | * @dev: the net device to probe for | ||
1226 | * | ||
1227 | * Select first detected PHY with link as default. | ||
1228 | * If no one is link on, select PHY whose types is HOME as default. | ||
1229 | * If HOME doesn't exist, select LAN. | ||
1230 | */ | ||
1231 | static u16 sis190_default_phy(struct net_device *dev) | ||
1232 | { | ||
1233 | struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; | ||
1234 | struct sis190_private *tp = netdev_priv(dev); | ||
1235 | struct mii_if_info *mii_if = &tp->mii_if; | ||
1236 | void __iomem *ioaddr = tp->mmio_addr; | ||
1237 | u16 status; | ||
1238 | |||
1239 | phy_home = phy_default = phy_lan = NULL; | ||
1240 | |||
1241 | list_for_each_entry(phy, &tp->first_phy, list) { | ||
1242 | status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); | ||
1243 | |||
1244 | // Link ON & Not select default PHY & not ghost PHY. | ||
1245 | if ((status & BMSR_LSTATUS) && | ||
1246 | !phy_default && | ||
1247 | (phy->type != UNKNOWN)) { | ||
1248 | phy_default = phy; | ||
1249 | } else { | ||
1250 | status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); | ||
1251 | mdio_write(ioaddr, phy->phy_id, MII_BMCR, | ||
1252 | status | BMCR_ANENABLE | BMCR_ISOLATE); | ||
1253 | if (phy->type == HOME) | ||
1254 | phy_home = phy; | ||
1255 | else if (phy->type == LAN) | ||
1256 | phy_lan = phy; | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | if (!phy_default) { | ||
1261 | if (phy_home) | ||
1262 | phy_default = phy_home; | ||
1263 | else if (phy_lan) | ||
1264 | phy_default = phy_lan; | ||
1265 | else | ||
1266 | phy_default = list_entry(&tp->first_phy, | ||
1267 | struct sis190_phy, list); | ||
1268 | } | ||
1269 | |||
1270 | if (mii_if->phy_id != phy_default->phy_id) { | ||
1271 | mii_if->phy_id = phy_default->phy_id; | ||
1272 | net_probe(tp, KERN_INFO | ||
1273 | "%s: Using transceiver at address %d as default.\n", | ||
1274 | pci_name(tp->pci_dev), mii_if->phy_id); | ||
1275 | } | ||
1276 | |||
1277 | status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); | ||
1278 | status &= (~BMCR_ISOLATE); | ||
1279 | |||
1280 | mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); | ||
1281 | status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); | ||
1282 | |||
1283 | return status; | ||
1284 | } | ||
1285 | |||
1286 | static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, | ||
1287 | struct sis190_phy *phy, unsigned int phy_id, | ||
1288 | u16 mii_status) | ||
1289 | { | ||
1290 | void __iomem *ioaddr = tp->mmio_addr; | ||
1291 | struct mii_chip_info *p; | ||
1292 | |||
1293 | INIT_LIST_HEAD(&phy->list); | ||
1294 | phy->status = mii_status; | ||
1295 | phy->phy_id = phy_id; | ||
1296 | |||
1297 | phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); | ||
1298 | phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); | ||
1299 | |||
1300 | for (p = mii_chip_table; p->type; p++) { | ||
1301 | if ((p->id[0] == phy->id[0]) && | ||
1302 | (p->id[1] == (phy->id[1] & 0xfff0))) { | ||
1303 | break; | ||
1304 | } | ||
1305 | } | ||
1306 | |||
1307 | if (p->id[1]) { | ||
1308 | phy->type = (p->type == MIX) ? | ||
1309 | ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? | ||
1310 | LAN : HOME) : p->type; | ||
1311 | } else | ||
1312 | phy->type = UNKNOWN; | ||
1313 | |||
1314 | net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n", | ||
1315 | pci_name(tp->pci_dev), | ||
1316 | (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id); | ||
1317 | } | ||
1318 | |||
1319 | /** | ||
1320 | * sis190_mii_probe - Probe MII PHY for sis190 | ||
1321 | * @dev: the net device to probe for | ||
1322 | * | ||
1323 | * Search for total of 32 possible mii phy addresses. | ||
1324 | * Identify and set current phy if found one, | ||
1325 | * return error if it failed to found. | ||
1326 | */ | ||
1327 | static int __devinit sis190_mii_probe(struct net_device *dev) | ||
1328 | { | ||
1329 | struct sis190_private *tp = netdev_priv(dev); | ||
1330 | struct mii_if_info *mii_if = &tp->mii_if; | ||
1331 | void __iomem *ioaddr = tp->mmio_addr; | ||
1332 | int phy_id; | ||
1333 | int rc = 0; | ||
1334 | |||
1335 | INIT_LIST_HEAD(&tp->first_phy); | ||
1336 | |||
1337 | for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { | ||
1338 | struct sis190_phy *phy; | ||
1339 | u16 status; | ||
1340 | |||
1341 | status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); | ||
1342 | |||
1343 | // Try next mii if the current one is not accessible. | ||
1344 | if (status == 0xffff || status == 0x0000) | ||
1345 | continue; | ||
1346 | |||
1347 | phy = kmalloc(sizeof(*phy), GFP_KERNEL); | ||
1348 | if (!phy) { | ||
1349 | sis190_free_phy(&tp->first_phy); | ||
1350 | rc = -ENOMEM; | ||
1351 | goto out; | ||
1352 | } | ||
1353 | |||
1354 | sis190_init_phy(dev, tp, phy, phy_id, status); | ||
1355 | |||
1356 | list_add(&tp->first_phy, &phy->list); | ||
1357 | } | ||
1358 | |||
1359 | if (list_empty(&tp->first_phy)) { | ||
1360 | net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n", | ||
1361 | pci_name(tp->pci_dev)); | ||
1362 | rc = -EIO; | ||
1363 | goto out; | ||
1364 | } | ||
1365 | |||
1366 | /* Select default PHY for mac */ | ||
1367 | sis190_default_phy(dev); | ||
1368 | |||
1369 | mii_if->dev = dev; | ||
1370 | mii_if->mdio_read = __mdio_read; | ||
1371 | mii_if->mdio_write = __mdio_write; | ||
1372 | mii_if->phy_id_mask = PHY_ID_ANY; | ||
1373 | mii_if->reg_num_mask = MII_REG_ANY; | ||
1374 | out: | ||
1375 | return rc; | ||
1376 | } | ||
1377 | |||
1378 | static void __devexit sis190_mii_remove(struct net_device *dev) | ||
1379 | { | ||
1380 | struct sis190_private *tp = netdev_priv(dev); | ||
1381 | |||
1382 | sis190_free_phy(&tp->first_phy); | ||
1383 | } | ||
1384 | |||
1385 | static void sis190_release_board(struct pci_dev *pdev) | ||
1386 | { | ||
1387 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1388 | struct sis190_private *tp = netdev_priv(dev); | ||
1389 | |||
1390 | iounmap(tp->mmio_addr); | ||
1391 | pci_release_regions(pdev); | ||
1392 | pci_disable_device(pdev); | ||
1393 | free_netdev(dev); | ||
1394 | } | ||
1395 | |||
1396 | static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) | ||
1397 | { | ||
1398 | struct sis190_private *tp; | ||
1399 | struct net_device *dev; | ||
1400 | void __iomem *ioaddr; | ||
1401 | int rc; | ||
1402 | |||
1403 | dev = alloc_etherdev(sizeof(*tp)); | ||
1404 | if (!dev) { | ||
1405 | net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n"); | ||
1406 | rc = -ENOMEM; | ||
1407 | goto err_out_0; | ||
1408 | } | ||
1409 | |||
1410 | SET_MODULE_OWNER(dev); | ||
1411 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1412 | |||
1413 | tp = netdev_priv(dev); | ||
1414 | tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); | ||
1415 | |||
1416 | rc = pci_enable_device(pdev); | ||
1417 | if (rc < 0) { | ||
1418 | net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev)); | ||
1419 | goto err_free_dev_1; | ||
1420 | } | ||
1421 | |||
1422 | rc = -ENODEV; | ||
1423 | |||
1424 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
1425 | net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n", | ||
1426 | pci_name(pdev)); | ||
1427 | goto err_pci_disable_2; | ||
1428 | } | ||
1429 | if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { | ||
1430 | net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n", | ||
1431 | pci_name(pdev)); | ||
1432 | goto err_pci_disable_2; | ||
1433 | } | ||
1434 | |||
1435 | rc = pci_request_regions(pdev, DRV_NAME); | ||
1436 | if (rc < 0) { | ||
1437 | net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n", | ||
1438 | pci_name(pdev)); | ||
1439 | goto err_pci_disable_2; | ||
1440 | } | ||
1441 | |||
1442 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
1443 | if (rc < 0) { | ||
1444 | net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n", | ||
1445 | pci_name(pdev)); | ||
1446 | goto err_free_res_3; | ||
1447 | } | ||
1448 | |||
1449 | pci_set_master(pdev); | ||
1450 | |||
1451 | ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); | ||
1452 | if (!ioaddr) { | ||
1453 | net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n", | ||
1454 | pci_name(pdev)); | ||
1455 | rc = -EIO; | ||
1456 | goto err_free_res_3; | ||
1457 | } | ||
1458 | |||
1459 | tp->pci_dev = pdev; | ||
1460 | tp->mmio_addr = ioaddr; | ||
1461 | |||
1462 | sis190_irq_mask_and_ack(ioaddr); | ||
1463 | |||
1464 | sis190_soft_reset(ioaddr); | ||
1465 | out: | ||
1466 | return dev; | ||
1467 | |||
1468 | err_free_res_3: | ||
1469 | pci_release_regions(pdev); | ||
1470 | err_pci_disable_2: | ||
1471 | pci_disable_device(pdev); | ||
1472 | err_free_dev_1: | ||
1473 | free_netdev(dev); | ||
1474 | err_out_0: | ||
1475 | dev = ERR_PTR(rc); | ||
1476 | goto out; | ||
1477 | } | ||
1478 | |||
1479 | static void sis190_tx_timeout(struct net_device *dev) | ||
1480 | { | ||
1481 | struct sis190_private *tp = netdev_priv(dev); | ||
1482 | void __iomem *ioaddr = tp->mmio_addr; | ||
1483 | u8 tmp8; | ||
1484 | |||
1485 | /* Disable Tx, if not already */ | ||
1486 | tmp8 = SIS_R8(TxControl); | ||
1487 | if (tmp8 & CmdTxEnb) | ||
1488 | SIS_W8(TxControl, tmp8 & ~CmdTxEnb); | ||
1489 | |||
1490 | |||
1491 | net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n", | ||
1492 | dev->name, SIS_R32(TxControl), SIS_R32(TxSts)); | ||
1493 | |||
1494 | /* Disable interrupts by clearing the interrupt mask. */ | ||
1495 | SIS_W32(IntrMask, 0x0000); | ||
1496 | |||
1497 | /* Stop a shared interrupt from scavenging while we are. */ | ||
1498 | spin_lock_irq(&tp->lock); | ||
1499 | sis190_tx_clear(tp); | ||
1500 | spin_unlock_irq(&tp->lock); | ||
1501 | |||
1502 | /* ...and finally, reset everything. */ | ||
1503 | sis190_hw_start(dev); | ||
1504 | |||
1505 | netif_wake_queue(dev); | ||
1506 | } | ||
1507 | |||
1508 | static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, | ||
1509 | struct net_device *dev) | ||
1510 | { | ||
1511 | struct sis190_private *tp = netdev_priv(dev); | ||
1512 | void __iomem *ioaddr = tp->mmio_addr; | ||
1513 | u16 sig; | ||
1514 | int i; | ||
1515 | |||
1516 | net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n", | ||
1517 | pci_name(pdev)); | ||
1518 | |||
1519 | /* Check to see if there is a sane EEPROM */ | ||
1520 | sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); | ||
1521 | |||
1522 | if ((sig == 0xffff) || (sig == 0x0000)) { | ||
1523 | net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n", | ||
1524 | pci_name(pdev), sig); | ||
1525 | return -EIO; | ||
1526 | } | ||
1527 | |||
1528 | /* Get MAC address from EEPROM */ | ||
1529 | for (i = 0; i < MAC_ADDR_LEN / 2; i++) { | ||
1530 | __le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); | ||
1531 | |||
1532 | ((u16 *)dev->dev_addr)[0] = le16_to_cpu(w); | ||
1533 | } | ||
1534 | |||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | /** | ||
1539 | * sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model | ||
1540 | * @pdev: PCI device | ||
1541 | * @dev: network device to get address for | ||
1542 | * | ||
1543 | * SiS965 model, use APC CMOS RAM to store MAC address. | ||
1544 | * APC CMOS RAM is accessed through ISA bridge. | ||
1545 | * MAC address is read into @net_dev->dev_addr. | ||
1546 | */ | ||
1547 | static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, | ||
1548 | struct net_device *dev) | ||
1549 | { | ||
1550 | struct sis190_private *tp = netdev_priv(dev); | ||
1551 | struct pci_dev *isa_bridge; | ||
1552 | u8 reg, tmp8; | ||
1553 | int i; | ||
1554 | |||
1555 | net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n", | ||
1556 | pci_name(pdev)); | ||
1557 | |||
1558 | isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL); | ||
1559 | if (!isa_bridge) { | ||
1560 | net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n", | ||
1561 | pci_name(pdev)); | ||
1562 | return -EIO; | ||
1563 | } | ||
1564 | |||
1565 | /* Enable port 78h & 79h to access APC Registers. */ | ||
1566 | pci_read_config_byte(isa_bridge, 0x48, &tmp8); | ||
1567 | reg = (tmp8 & ~0x02); | ||
1568 | pci_write_config_byte(isa_bridge, 0x48, reg); | ||
1569 | udelay(50); | ||
1570 | pci_read_config_byte(isa_bridge, 0x48, ®); | ||
1571 | |||
1572 | for (i = 0; i < MAC_ADDR_LEN; i++) { | ||
1573 | outb(0x9 + i, 0x78); | ||
1574 | dev->dev_addr[i] = inb(0x79); | ||
1575 | } | ||
1576 | |||
1577 | outb(0x12, 0x78); | ||
1578 | reg = inb(0x79); | ||
1579 | |||
1580 | /* Restore the value to ISA Bridge */ | ||
1581 | pci_write_config_byte(isa_bridge, 0x48, tmp8); | ||
1582 | pci_dev_put(isa_bridge); | ||
1583 | |||
1584 | return 0; | ||
1585 | } | ||
1586 | |||
1587 | /** | ||
1588 | * sis190_init_rxfilter - Initialize the Rx filter | ||
1589 | * @dev: network device to initialize | ||
1590 | * | ||
1591 | * Set receive filter address to our MAC address | ||
1592 | * and enable packet filtering. | ||
1593 | */ | ||
1594 | static inline void sis190_init_rxfilter(struct net_device *dev) | ||
1595 | { | ||
1596 | struct sis190_private *tp = netdev_priv(dev); | ||
1597 | void __iomem *ioaddr = tp->mmio_addr; | ||
1598 | u16 ctl; | ||
1599 | int i; | ||
1600 | |||
1601 | ctl = SIS_R16(RxMacControl); | ||
1602 | /* | ||
1603 | * Disable packet filtering before setting filter. | ||
1604 | * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits | ||
1605 | * only and followed by RxMacAddr (6 bytes). Strange. -- FR | ||
1606 | */ | ||
1607 | SIS_W16(RxMacControl, ctl & ~0x0f00); | ||
1608 | |||
1609 | for (i = 0; i < MAC_ADDR_LEN; i++) | ||
1610 | SIS_W8(RxMacAddr + i, dev->dev_addr[i]); | ||
1611 | |||
1612 | SIS_W16(RxMacControl, ctl); | ||
1613 | SIS_PCI_COMMIT(); | ||
1614 | } | ||
1615 | |||
1616 | static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) | ||
1617 | { | ||
1618 | u8 from; | ||
1619 | |||
1620 | pci_read_config_byte(pdev, 0x73, &from); | ||
1621 | |||
1622 | return (from & 0x00000001) ? | ||
1623 | sis190_get_mac_addr_from_apc(pdev, dev) : | ||
1624 | sis190_get_mac_addr_from_eeprom(pdev, dev); | ||
1625 | } | ||
1626 | |||
1627 | static void sis190_set_speed_auto(struct net_device *dev) | ||
1628 | { | ||
1629 | struct sis190_private *tp = netdev_priv(dev); | ||
1630 | void __iomem *ioaddr = tp->mmio_addr; | ||
1631 | int phy_id = tp->mii_if.phy_id; | ||
1632 | int val; | ||
1633 | |||
1634 | net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name); | ||
1635 | |||
1636 | val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); | ||
1637 | |||
1638 | // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 | ||
1639 | // unchanged. | ||
1640 | mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | | ||
1641 | ADVERTISE_100FULL | ADVERTISE_10FULL | | ||
1642 | ADVERTISE_100HALF | ADVERTISE_10HALF); | ||
1643 | |||
1644 | // Enable 1000 Full Mode. | ||
1645 | mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); | ||
1646 | |||
1647 | // Enable auto-negotiation and restart auto-negotiation. | ||
1648 | mdio_write(ioaddr, phy_id, MII_BMCR, | ||
1649 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); | ||
1650 | } | ||
1651 | |||
1652 | static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1653 | { | ||
1654 | struct sis190_private *tp = netdev_priv(dev); | ||
1655 | |||
1656 | return mii_ethtool_gset(&tp->mii_if, cmd); | ||
1657 | } | ||
1658 | |||
1659 | static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1660 | { | ||
1661 | struct sis190_private *tp = netdev_priv(dev); | ||
1662 | |||
1663 | return mii_ethtool_sset(&tp->mii_if, cmd); | ||
1664 | } | ||
1665 | |||
1666 | static void sis190_get_drvinfo(struct net_device *dev, | ||
1667 | struct ethtool_drvinfo *info) | ||
1668 | { | ||
1669 | struct sis190_private *tp = netdev_priv(dev); | ||
1670 | |||
1671 | strcpy(info->driver, DRV_NAME); | ||
1672 | strcpy(info->version, DRV_VERSION); | ||
1673 | strcpy(info->bus_info, pci_name(tp->pci_dev)); | ||
1674 | } | ||
1675 | |||
1676 | static int sis190_get_regs_len(struct net_device *dev) | ||
1677 | { | ||
1678 | return SIS190_REGS_SIZE; | ||
1679 | } | ||
1680 | |||
1681 | static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
1682 | void *p) | ||
1683 | { | ||
1684 | struct sis190_private *tp = netdev_priv(dev); | ||
1685 | unsigned long flags; | ||
1686 | |||
1687 | if (regs->len > SIS190_REGS_SIZE) | ||
1688 | regs->len = SIS190_REGS_SIZE; | ||
1689 | |||
1690 | spin_lock_irqsave(&tp->lock, flags); | ||
1691 | memcpy_fromio(p, tp->mmio_addr, regs->len); | ||
1692 | spin_unlock_irqrestore(&tp->lock, flags); | ||
1693 | } | ||
1694 | |||
1695 | static int sis190_nway_reset(struct net_device *dev) | ||
1696 | { | ||
1697 | struct sis190_private *tp = netdev_priv(dev); | ||
1698 | |||
1699 | return mii_nway_restart(&tp->mii_if); | ||
1700 | } | ||
1701 | |||
1702 | static u32 sis190_get_msglevel(struct net_device *dev) | ||
1703 | { | ||
1704 | struct sis190_private *tp = netdev_priv(dev); | ||
1705 | |||
1706 | return tp->msg_enable; | ||
1707 | } | ||
1708 | |||
1709 | static void sis190_set_msglevel(struct net_device *dev, u32 value) | ||
1710 | { | ||
1711 | struct sis190_private *tp = netdev_priv(dev); | ||
1712 | |||
1713 | tp->msg_enable = value; | ||
1714 | } | ||
1715 | |||
1716 | static struct ethtool_ops sis190_ethtool_ops = { | ||
1717 | .get_settings = sis190_get_settings, | ||
1718 | .set_settings = sis190_set_settings, | ||
1719 | .get_drvinfo = sis190_get_drvinfo, | ||
1720 | .get_regs_len = sis190_get_regs_len, | ||
1721 | .get_regs = sis190_get_regs, | ||
1722 | .get_link = ethtool_op_get_link, | ||
1723 | .get_msglevel = sis190_get_msglevel, | ||
1724 | .set_msglevel = sis190_set_msglevel, | ||
1725 | .nway_reset = sis190_nway_reset, | ||
1726 | }; | ||
1727 | |||
1728 | static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
1729 | { | ||
1730 | struct sis190_private *tp = netdev_priv(dev); | ||
1731 | |||
1732 | return !netif_running(dev) ? -EINVAL : | ||
1733 | generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); | ||
1734 | } | ||
1735 | |||
1736 | static int __devinit sis190_init_one(struct pci_dev *pdev, | ||
1737 | const struct pci_device_id *ent) | ||
1738 | { | ||
1739 | static int printed_version = 0; | ||
1740 | struct sis190_private *tp; | ||
1741 | struct net_device *dev; | ||
1742 | void __iomem *ioaddr; | ||
1743 | int rc; | ||
1744 | |||
1745 | if (!printed_version) { | ||
1746 | net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); | ||
1747 | printed_version = 1; | ||
1748 | } | ||
1749 | |||
1750 | dev = sis190_init_board(pdev); | ||
1751 | if (IS_ERR(dev)) { | ||
1752 | rc = PTR_ERR(dev); | ||
1753 | goto out; | ||
1754 | } | ||
1755 | |||
1756 | tp = netdev_priv(dev); | ||
1757 | ioaddr = tp->mmio_addr; | ||
1758 | |||
1759 | rc = sis190_get_mac_addr(pdev, dev); | ||
1760 | if (rc < 0) | ||
1761 | goto err_release_board; | ||
1762 | |||
1763 | sis190_init_rxfilter(dev); | ||
1764 | |||
1765 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | ||
1766 | |||
1767 | dev->open = sis190_open; | ||
1768 | dev->stop = sis190_close; | ||
1769 | dev->do_ioctl = sis190_ioctl; | ||
1770 | dev->get_stats = sis190_get_stats; | ||
1771 | dev->tx_timeout = sis190_tx_timeout; | ||
1772 | dev->watchdog_timeo = SIS190_TX_TIMEOUT; | ||
1773 | dev->hard_start_xmit = sis190_start_xmit; | ||
1774 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1775 | dev->poll_controller = sis190_netpoll; | ||
1776 | #endif | ||
1777 | dev->set_multicast_list = sis190_set_rx_mode; | ||
1778 | SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); | ||
1779 | dev->irq = pdev->irq; | ||
1780 | dev->base_addr = (unsigned long) 0xdead; | ||
1781 | |||
1782 | spin_lock_init(&tp->lock); | ||
1783 | |||
1784 | rc = sis190_mii_probe(dev); | ||
1785 | if (rc < 0) | ||
1786 | goto err_release_board; | ||
1787 | |||
1788 | rc = register_netdev(dev); | ||
1789 | if (rc < 0) | ||
1790 | goto err_remove_mii; | ||
1791 | |||
1792 | pci_set_drvdata(pdev, dev); | ||
1793 | |||
1794 | net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " | ||
1795 | "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", | ||
1796 | pci_name(pdev), sis_chip_info[ent->driver_data].name, | ||
1797 | ioaddr, dev->irq, | ||
1798 | dev->dev_addr[0], dev->dev_addr[1], | ||
1799 | dev->dev_addr[2], dev->dev_addr[3], | ||
1800 | dev->dev_addr[4], dev->dev_addr[5]); | ||
1801 | |||
1802 | netif_carrier_off(dev); | ||
1803 | |||
1804 | sis190_set_speed_auto(dev); | ||
1805 | out: | ||
1806 | return rc; | ||
1807 | |||
1808 | err_remove_mii: | ||
1809 | sis190_mii_remove(dev); | ||
1810 | err_release_board: | ||
1811 | sis190_release_board(pdev); | ||
1812 | goto out; | ||
1813 | } | ||
1814 | |||
1815 | static void __devexit sis190_remove_one(struct pci_dev *pdev) | ||
1816 | { | ||
1817 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1818 | |||
1819 | sis190_mii_remove(dev); | ||
1820 | unregister_netdev(dev); | ||
1821 | sis190_release_board(pdev); | ||
1822 | pci_set_drvdata(pdev, NULL); | ||
1823 | } | ||
1824 | |||
1825 | static struct pci_driver sis190_pci_driver = { | ||
1826 | .name = DRV_NAME, | ||
1827 | .id_table = sis190_pci_tbl, | ||
1828 | .probe = sis190_init_one, | ||
1829 | .remove = __devexit_p(sis190_remove_one), | ||
1830 | }; | ||
1831 | |||
1832 | static int __init sis190_init_module(void) | ||
1833 | { | ||
1834 | return pci_module_init(&sis190_pci_driver); | ||
1835 | } | ||
1836 | |||
1837 | static void __exit sis190_cleanup_module(void) | ||
1838 | { | ||
1839 | pci_unregister_driver(&sis190_pci_driver); | ||
1840 | } | ||
1841 | |||
1842 | module_init(sis190_init_module); | ||
1843 | module_exit(sis190_cleanup_module); | ||
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig index e2cdaf876201..8c9634a98c11 100644 --- a/drivers/net/tulip/Kconfig +++ b/drivers/net/tulip/Kconfig | |||
@@ -135,6 +135,18 @@ config DM9102 | |||
135 | <file:Documentation/networking/net-modules.txt>. The module will | 135 | <file:Documentation/networking/net-modules.txt>. The module will |
136 | be called dmfe. | 136 | be called dmfe. |
137 | 137 | ||
138 | config ULI526X | ||
139 | tristate "ULi M526x controller support" | ||
140 | depends on NET_TULIP && PCI | ||
141 | select CRC32 | ||
142 | ---help--- | ||
143 | This driver is for ULi M5261/M5263 10/100M Ethernet Controller | ||
144 | (<http://www.uli.com.tw/>). | ||
145 | |||
146 | To compile this driver as a module, choose M here and read | ||
147 | <file:Documentation/networking/net-modules.txt>. The module will | ||
148 | be called uli526x. | ||
149 | |||
138 | config PCMCIA_XIRCOM | 150 | config PCMCIA_XIRCOM |
139 | tristate "Xircom CardBus support (new driver)" | 151 | tristate "Xircom CardBus support (new driver)" |
140 | depends on NET_TULIP && CARDBUS | 152 | depends on NET_TULIP && CARDBUS |
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile index 8bb9b4683979..451090d6fcca 100644 --- a/drivers/net/tulip/Makefile +++ b/drivers/net/tulip/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840) += winbond-840.o | |||
9 | obj-$(CONFIG_DE2104X) += de2104x.o | 9 | obj-$(CONFIG_DE2104X) += de2104x.o |
10 | obj-$(CONFIG_TULIP) += tulip.o | 10 | obj-$(CONFIG_TULIP) += tulip.o |
11 | obj-$(CONFIG_DE4X5) += de4x5.o | 11 | obj-$(CONFIG_DE4X5) += de4x5.o |
12 | obj-$(CONFIG_ULI526X) += uli526x.o | ||
12 | 13 | ||
13 | # Declare multi-part drivers. | 14 | # Declare multi-part drivers. |
14 | 15 | ||
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c index e26c31f944bf..f53396fe79c9 100644 --- a/drivers/net/tulip/media.c +++ b/drivers/net/tulip/media.c | |||
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) | |||
81 | return retval & 0xffff; | 81 | return retval & 0xffff; |
82 | } | 82 | } |
83 | 83 | ||
84 | if(tp->chip_id == ULI526X && tp->revision >= 0x40) { | ||
85 | int value; | ||
86 | int i = 1000; | ||
87 | |||
88 | value = ioread32(ioaddr + CSR9); | ||
89 | iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9); | ||
90 | |||
91 | value = (phy_id << 21) | (location << 16) | 0x08000000; | ||
92 | iowrite32(value, ioaddr + CSR10); | ||
93 | |||
94 | while(--i > 0) { | ||
95 | mdio_delay(); | ||
96 | if(ioread32(ioaddr + CSR10) & 0x10000000) | ||
97 | break; | ||
98 | } | ||
99 | retval = ioread32(ioaddr + CSR10); | ||
100 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
101 | return retval & 0xFFFF; | ||
102 | } | ||
103 | /* Establish sync by sending at least 32 logic ones. */ | 84 | /* Establish sync by sending at least 32 logic ones. */ |
104 | for (i = 32; i >= 0; i--) { | 85 | for (i = 32; i >= 0; i--) { |
105 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); | 86 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); |
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) | |||
159 | spin_unlock_irqrestore(&tp->mii_lock, flags); | 140 | spin_unlock_irqrestore(&tp->mii_lock, flags); |
160 | return; | 141 | return; |
161 | } | 142 | } |
162 | if (tp->chip_id == ULI526X && tp->revision >= 0x40) { | ||
163 | int value; | ||
164 | int i = 1000; | ||
165 | |||
166 | value = ioread32(ioaddr + CSR9); | ||
167 | iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9); | ||
168 | |||
169 | value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF); | ||
170 | iowrite32(value, ioaddr + CSR10); | ||
171 | |||
172 | while(--i > 0) { | ||
173 | if (ioread32(ioaddr + CSR10) & 0x10000000) | ||
174 | break; | ||
175 | } | ||
176 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
177 | return; | ||
178 | } | ||
179 | 143 | ||
180 | /* Establish sync by sending 32 logic ones. */ | 144 | /* Establish sync by sending 32 logic ones. */ |
181 | for (i = 32; i >= 0; i--) { | 145 | for (i = 32; i >= 0; i--) { |
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c index 691568283553..e058a9fbfe88 100644 --- a/drivers/net/tulip/timer.c +++ b/drivers/net/tulip/timer.c | |||
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data) | |||
39 | case MX98713: | 39 | case MX98713: |
40 | case COMPEX9881: | 40 | case COMPEX9881: |
41 | case DM910X: | 41 | case DM910X: |
42 | case ULI526X: | ||
43 | default: { | 42 | default: { |
44 | struct medialeaf *mleaf; | 43 | struct medialeaf *mleaf; |
45 | unsigned char *p; | 44 | unsigned char *p; |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 20346d847d9e..05d2d96f7be2 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -88,7 +88,6 @@ enum chips { | |||
88 | I21145, | 88 | I21145, |
89 | DM910X, | 89 | DM910X, |
90 | CONEXANT, | 90 | CONEXANT, |
91 | ULI526X | ||
92 | }; | 91 | }; |
93 | 92 | ||
94 | 93 | ||
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp) | |||
482 | 481 | ||
483 | static inline void tulip_restart_rxtx(struct tulip_private *tp) | 482 | static inline void tulip_restart_rxtx(struct tulip_private *tp) |
484 | { | 483 | { |
485 | if(!(tp->chip_id == ULI526X && | 484 | tulip_stop_rxtx(tp); |
486 | (tp->revision == 0x40 || tp->revision == 0x50))) { | 485 | udelay(5); |
487 | tulip_stop_rxtx(tp); | ||
488 | udelay(5); | ||
489 | } | ||
490 | tulip_start_rxtx(tp); | 486 | tulip_start_rxtx(tp); |
491 | } | 487 | } |
492 | 488 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index d45d8f56e5b4..05da5bea564c 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = { | |||
199 | { "Conexant LANfinity", 256, 0x0001ebef, | 199 | { "Conexant LANfinity", 256, 0x0001ebef, |
200 | HAS_MII | HAS_ACPI, tulip_timer }, | 200 | HAS_MII | HAS_ACPI, tulip_timer }, |
201 | 201 | ||
202 | /* ULi526X */ | ||
203 | { "ULi M5261/M5263", 128, 0x0001ebef, | ||
204 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer }, | ||
205 | }; | 202 | }; |
206 | 203 | ||
207 | 204 | ||
@@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tbl[] = { | |||
239 | { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 236 | { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
240 | { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 237 | { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
241 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | 238 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
242 | { 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */ | ||
243 | { 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */ | ||
244 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ | 239 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ |
245 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ | 240 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ |
246 | { } /* terminate list */ | 241 | { } /* terminate list */ |
@@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_device *dev) | |||
522 | dev->name); | 517 | dev->name); |
523 | } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 | 518 | } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 |
524 | || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 | 519 | || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 |
525 | || tp->chip_id == DM910X || tp->chip_id == ULI526X) { | 520 | || tp->chip_id == DM910X) { |
526 | printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " | 521 | printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " |
527 | "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", | 522 | "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", |
528 | dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), | 523 | dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), |
@@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_device *dev) | |||
1103 | entry = tp->cur_tx++ % TX_RING_SIZE; | 1098 | entry = tp->cur_tx++ % TX_RING_SIZE; |
1104 | 1099 | ||
1105 | if (entry != 0) { | 1100 | if (entry != 0) { |
1106 | /* Avoid a chip errata by prefixing a dummy entry. Don't do | 1101 | /* Avoid a chip errata by prefixing a dummy entry. */ |
1107 | this on the ULI526X as it triggers a different problem */ | 1102 | tp->tx_buffers[entry].skb = NULL; |
1108 | if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) { | 1103 | tp->tx_buffers[entry].mapping = 0; |
1109 | tp->tx_buffers[entry].skb = NULL; | 1104 | tp->tx_ring[entry].length = |
1110 | tp->tx_buffers[entry].mapping = 0; | 1105 | (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; |
1111 | tp->tx_ring[entry].length = | 1106 | tp->tx_ring[entry].buffer1 = 0; |
1112 | (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; | 1107 | /* Must set DescOwned later to avoid race with chip */ |
1113 | tp->tx_ring[entry].buffer1 = 0; | 1108 | dummy = entry; |
1114 | /* Must set DescOwned later to avoid race with chip */ | 1109 | entry = tp->cur_tx++ % TX_RING_SIZE; |
1115 | dummy = entry; | 1110 | |
1116 | entry = tp->cur_tx++ % TX_RING_SIZE; | ||
1117 | } | ||
1118 | } | 1111 | } |
1119 | 1112 | ||
1120 | tp->tx_buffers[entry].skb = NULL; | 1113 | tp->tx_buffers[entry].skb = NULL; |
@@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev) | |||
1235 | { | 1228 | { |
1236 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) | 1229 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) |
1237 | return 1; | 1230 | return 1; |
1238 | if (pdev->vendor == 0x10b9 && pdev->device == 0x5261) | ||
1239 | return 1; | ||
1240 | if (pdev->vendor == 0x10b9 && pdev->device == 0x5263) | ||
1241 | return 1; | ||
1242 | return 0; | 1231 | return 0; |
1243 | } | 1232 | } |
1244 | 1233 | ||
@@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1680 | switch (chip_idx) { | 1669 | switch (chip_idx) { |
1681 | case DC21140: | 1670 | case DC21140: |
1682 | case DM910X: | 1671 | case DM910X: |
1683 | case ULI526X: | ||
1684 | default: | 1672 | default: |
1685 | if (tp->mtable) | 1673 | if (tp->mtable) |
1686 | iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); | 1674 | iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c new file mode 100644 index 000000000000..5ae22b7bc5ca --- /dev/null +++ b/drivers/net/tulip/uli526x.c | |||
@@ -0,0 +1,1749 @@ | |||
1 | /* | ||
2 | This program is free software; you can redistribute it and/or | ||
3 | modify it under the terms of the GNU General Public License | ||
4 | as published by the Free Software Foundation; either version 2 | ||
5 | of the License, or (at your option) any later version. | ||
6 | |||
7 | This program is distributed in the hope that it will be useful, | ||
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | GNU General Public License for more details. | ||
11 | |||
12 | |||
13 | */ | ||
14 | |||
15 | #define DRV_NAME "uli526x" | ||
16 | #define DRV_VERSION "0.9.3" | ||
17 | #define DRV_RELDATE "2005-7-29" | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/timer.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | |||
38 | #include <asm/processor.h> | ||
39 | #include <asm/bitops.h> | ||
40 | #include <asm/io.h> | ||
41 | #include <asm/dma.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | |||
44 | |||
45 | /* Board/System/Debug information/definition ---------------- */ | ||
46 | #define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ | ||
47 | #define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/ | ||
48 | |||
49 | #define ULI526X_IO_SIZE 0x100 | ||
50 | #define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */ | ||
51 | #define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */ | ||
52 | #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ | ||
53 | #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ | ||
54 | #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) | ||
55 | #define TX_BUF_ALLOC 0x600 | ||
56 | #define RX_ALLOC_SIZE 0x620 | ||
57 | #define ULI526X_RESET 1 | ||
58 | #define CR0_DEFAULT 0 | ||
59 | #define CR6_DEFAULT 0x22200000 | ||
60 | #define CR7_DEFAULT 0x180c1 | ||
61 | #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ | ||
62 | #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ | ||
63 | #define MAX_PACKET_SIZE 1514 | ||
64 | #define ULI5261_MAX_MULTICAST 14 | ||
65 | #define RX_COPY_SIZE 100 | ||
66 | #define MAX_CHECK_PACKET 0x8000 | ||
67 | |||
68 | #define ULI526X_10MHF 0 | ||
69 | #define ULI526X_100MHF 1 | ||
70 | #define ULI526X_10MFD 4 | ||
71 | #define ULI526X_100MFD 5 | ||
72 | #define ULI526X_AUTO 8 | ||
73 | |||
74 | #define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */ | ||
75 | #define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */ | ||
76 | #define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */ | ||
77 | #define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */ | ||
78 | #define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */ | ||
79 | #define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */ | ||
80 | |||
81 | #define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ | ||
82 | #define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ | ||
83 | #define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ | ||
84 | |||
85 | #define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value)) | ||
86 | |||
87 | #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half"); | ||
88 | |||
89 | |||
90 | /* CR9 definition: SROM/MII */ | ||
91 | #define CR9_SROM_READ 0x4800 | ||
92 | #define CR9_SRCS 0x1 | ||
93 | #define CR9_SRCLK 0x2 | ||
94 | #define CR9_CRDOUT 0x8 | ||
95 | #define SROM_DATA_0 0x0 | ||
96 | #define SROM_DATA_1 0x4 | ||
97 | #define PHY_DATA_1 0x20000 | ||
98 | #define PHY_DATA_0 0x00000 | ||
99 | #define MDCLKH 0x10000 | ||
100 | |||
101 | #define PHY_POWER_DOWN 0x800 | ||
102 | |||
103 | #define SROM_V41_CODE 0x14 | ||
104 | |||
105 | #define SROM_CLK_WRITE(data, ioaddr) \ | ||
106 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
107 | udelay(5); \ | ||
108 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
109 | udelay(5); \ | ||
110 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
111 | udelay(5); | ||
112 | |||
113 | /* Structure/enum declaration ------------------------------- */ | ||
114 | struct tx_desc { | ||
115 | u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ | ||
116 | char *tx_buf_ptr; /* Data for us */ | ||
117 | struct tx_desc *next_tx_desc; | ||
118 | } __attribute__(( aligned(32) )); | ||
119 | |||
120 | struct rx_desc { | ||
121 | u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ | ||
122 | struct sk_buff *rx_skb_ptr; /* Data for us */ | ||
123 | struct rx_desc *next_rx_desc; | ||
124 | } __attribute__(( aligned(32) )); | ||
125 | |||
126 | struct uli526x_board_info { | ||
127 | u32 chip_id; /* Chip vendor/Device ID */ | ||
128 | struct net_device *next_dev; /* next device */ | ||
129 | struct pci_dev *pdev; /* PCI device */ | ||
130 | spinlock_t lock; | ||
131 | |||
132 | long ioaddr; /* I/O base address */ | ||
133 | u32 cr0_data; | ||
134 | u32 cr5_data; | ||
135 | u32 cr6_data; | ||
136 | u32 cr7_data; | ||
137 | u32 cr15_data; | ||
138 | |||
139 | /* pointer for memory physical address */ | ||
140 | dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ | ||
141 | dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ | ||
142 | dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ | ||
143 | dma_addr_t first_tx_desc_dma; | ||
144 | dma_addr_t first_rx_desc_dma; | ||
145 | |||
146 | /* descriptor pointer */ | ||
147 | unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ | ||
148 | unsigned char *buf_pool_start; /* Tx buffer pool align dword */ | ||
149 | unsigned char *desc_pool_ptr; /* descriptor pool memory */ | ||
150 | struct tx_desc *first_tx_desc; | ||
151 | struct tx_desc *tx_insert_ptr; | ||
152 | struct tx_desc *tx_remove_ptr; | ||
153 | struct rx_desc *first_rx_desc; | ||
154 | struct rx_desc *rx_insert_ptr; | ||
155 | struct rx_desc *rx_ready_ptr; /* packet come pointer */ | ||
156 | unsigned long tx_packet_cnt; /* transmitted packet count */ | ||
157 | unsigned long rx_avail_cnt; /* available rx descriptor count */ | ||
158 | unsigned long interval_rx_cnt; /* rx packet count a callback time */ | ||
159 | |||
160 | u16 dbug_cnt; | ||
161 | u16 NIC_capability; /* NIC media capability */ | ||
162 | u16 PHY_reg4; /* Saved Phyxcer register 4 value */ | ||
163 | |||
164 | u8 media_mode; /* user specify media mode */ | ||
165 | u8 op_mode; /* real work media mode */ | ||
166 | u8 phy_addr; | ||
167 | u8 link_failed; /* Ever link failed */ | ||
168 | u8 wait_reset; /* Hardware failed, need to reset */ | ||
169 | struct timer_list timer; | ||
170 | |||
171 | /* System defined statistic counter */ | ||
172 | struct net_device_stats stats; | ||
173 | |||
174 | /* Driver defined statistic counter */ | ||
175 | unsigned long tx_fifo_underrun; | ||
176 | unsigned long tx_loss_carrier; | ||
177 | unsigned long tx_no_carrier; | ||
178 | unsigned long tx_late_collision; | ||
179 | unsigned long tx_excessive_collision; | ||
180 | unsigned long tx_jabber_timeout; | ||
181 | unsigned long reset_count; | ||
182 | unsigned long reset_cr8; | ||
183 | unsigned long reset_fatal; | ||
184 | unsigned long reset_TXtimeout; | ||
185 | |||
186 | /* NIC SROM data */ | ||
187 | unsigned char srom[128]; | ||
188 | u8 init; | ||
189 | }; | ||
190 | |||
191 | enum uli526x_offsets { | ||
192 | DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, | ||
193 | DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, | ||
194 | DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, | ||
195 | DCR15 = 0x78 | ||
196 | }; | ||
197 | |||
198 | enum uli526x_CR6_bits { | ||
199 | CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, | ||
200 | CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, | ||
201 | CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 | ||
202 | }; | ||
203 | |||
204 | /* Global variable declaration ----------------------------- */ | ||
205 | static int __devinitdata printed_version; | ||
206 | static char version[] __devinitdata = | ||
207 | KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version " | ||
208 | DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
209 | |||
210 | static int uli526x_debug; | ||
211 | static unsigned char uli526x_media_mode = ULI526X_AUTO; | ||
212 | static u32 uli526x_cr6_user_set; | ||
213 | |||
214 | /* For module input parameter */ | ||
215 | static int debug; | ||
216 | static u32 cr6set; | ||
217 | static unsigned char mode = 8; | ||
218 | |||
219 | /* function declaration ------------------------------------- */ | ||
220 | static int uli526x_open(struct net_device *); | ||
221 | static int uli526x_start_xmit(struct sk_buff *, struct net_device *); | ||
222 | static int uli526x_stop(struct net_device *); | ||
223 | static struct net_device_stats * uli526x_get_stats(struct net_device *); | ||
224 | static void uli526x_set_filter_mode(struct net_device *); | ||
225 | static struct ethtool_ops netdev_ethtool_ops; | ||
226 | static u16 read_srom_word(long, int); | ||
227 | static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *); | ||
228 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); | ||
229 | static void allocate_rx_buffer(struct uli526x_board_info *); | ||
230 | static void update_cr6(u32, unsigned long); | ||
231 | static void send_filter_frame(struct net_device *, int); | ||
232 | static u16 phy_read(unsigned long, u8, u8, u32); | ||
233 | static u16 phy_readby_cr10(unsigned long, u8, u8); | ||
234 | static void phy_write(unsigned long, u8, u8, u16, u32); | ||
235 | static void phy_writeby_cr10(unsigned long, u8, u8, u16); | ||
236 | static void phy_write_1bit(unsigned long, u32, u32); | ||
237 | static u16 phy_read_1bit(unsigned long, u32); | ||
238 | static u8 uli526x_sense_speed(struct uli526x_board_info *); | ||
239 | static void uli526x_process_mode(struct uli526x_board_info *); | ||
240 | static void uli526x_timer(unsigned long); | ||
241 | static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *); | ||
242 | static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *); | ||
243 | static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *); | ||
244 | static void uli526x_dynamic_reset(struct net_device *); | ||
245 | static void uli526x_free_rxbuffer(struct uli526x_board_info *); | ||
246 | static void uli526x_init(struct net_device *); | ||
247 | static void uli526x_set_phyxcer(struct uli526x_board_info *); | ||
248 | |||
249 | /* ULI526X network board routine ---------------------------- */ | ||
250 | |||
251 | /* | ||
252 | * Search ULI526X board, allocate space and register it | ||
253 | */ | ||
254 | |||
255 | static int __devinit uli526x_init_one (struct pci_dev *pdev, | ||
256 | const struct pci_device_id *ent) | ||
257 | { | ||
258 | struct uli526x_board_info *db; /* board information structure */ | ||
259 | struct net_device *dev; | ||
260 | int i, err; | ||
261 | |||
262 | ULI526X_DBUG(0, "uli526x_init_one()", 0); | ||
263 | |||
264 | if (!printed_version++) | ||
265 | printk(version); | ||
266 | |||
267 | /* Init network device */ | ||
268 | dev = alloc_etherdev(sizeof(*db)); | ||
269 | if (dev == NULL) | ||
270 | return -ENOMEM; | ||
271 | SET_MODULE_OWNER(dev); | ||
272 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
273 | |||
274 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
275 | printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n"); | ||
276 | err = -ENODEV; | ||
277 | goto err_out_free; | ||
278 | } | ||
279 | |||
280 | /* Enable Master/IO access, Disable memory access */ | ||
281 | err = pci_enable_device(pdev); | ||
282 | if (err) | ||
283 | goto err_out_free; | ||
284 | |||
285 | if (!pci_resource_start(pdev, 0)) { | ||
286 | printk(KERN_ERR DRV_NAME ": I/O base is zero\n"); | ||
287 | err = -ENODEV; | ||
288 | goto err_out_disable; | ||
289 | } | ||
290 | |||
291 | if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { | ||
292 | printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n"); | ||
293 | err = -ENODEV; | ||
294 | goto err_out_disable; | ||
295 | } | ||
296 | |||
297 | if (pci_request_regions(pdev, DRV_NAME)) { | ||
298 | printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); | ||
299 | err = -ENODEV; | ||
300 | goto err_out_disable; | ||
301 | } | ||
302 | |||
303 | /* Init system & device */ | ||
304 | db = netdev_priv(dev); | ||
305 | |||
306 | /* Allocate Tx/Rx descriptor memory */ | ||
307 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | ||
308 | if(db->desc_pool_ptr == NULL) | ||
309 | { | ||
310 | err = -ENOMEM; | ||
311 | goto err_out_nomem; | ||
312 | } | ||
313 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
314 | if(db->buf_pool_ptr == NULL) | ||
315 | { | ||
316 | err = -ENOMEM; | ||
317 | goto err_out_nomem; | ||
318 | } | ||
319 | |||
320 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | ||
321 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | ||
322 | db->buf_pool_start = db->buf_pool_ptr; | ||
323 | db->buf_pool_dma_start = db->buf_pool_dma_ptr; | ||
324 | |||
325 | db->chip_id = ent->driver_data; | ||
326 | db->ioaddr = pci_resource_start(pdev, 0); | ||
327 | |||
328 | db->pdev = pdev; | ||
329 | db->init = 1; | ||
330 | |||
331 | dev->base_addr = db->ioaddr; | ||
332 | dev->irq = pdev->irq; | ||
333 | pci_set_drvdata(pdev, dev); | ||
334 | |||
335 | /* Register some necessary functions */ | ||
336 | dev->open = &uli526x_open; | ||
337 | dev->hard_start_xmit = &uli526x_start_xmit; | ||
338 | dev->stop = &uli526x_stop; | ||
339 | dev->get_stats = &uli526x_get_stats; | ||
340 | dev->set_multicast_list = &uli526x_set_filter_mode; | ||
341 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
342 | spin_lock_init(&db->lock); | ||
343 | |||
344 | |||
345 | /* read 64 word srom data */ | ||
346 | for (i = 0; i < 64; i++) | ||
347 | ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
348 | |||
349 | /* Set Node address */ | ||
350 | if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ | ||
351 | { | ||
352 | outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode | ||
353 | outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port | ||
354 | outl(0, db->ioaddr + DCR14); //Clear reset port | ||
355 | outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer | ||
356 | outl(0, db->ioaddr + DCR14); //Clear reset port | ||
357 | outl(0, db->ioaddr + DCR13); //Clear CR13 | ||
358 | outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port | ||
359 | //Read MAC address from CR14 | ||
360 | for (i = 0; i < 6; i++) | ||
361 | dev->dev_addr[i] = inl(db->ioaddr + DCR14); | ||
362 | //Read end | ||
363 | outl(0, db->ioaddr + DCR13); //Clear CR13 | ||
364 | outl(0, db->ioaddr + DCR0); //Clear CR0 | ||
365 | udelay(10); | ||
366 | } | ||
367 | else /*Exist SROM*/ | ||
368 | { | ||
369 | for (i = 0; i < 6; i++) | ||
370 | dev->dev_addr[i] = db->srom[20 + i]; | ||
371 | } | ||
372 | err = register_netdev (dev); | ||
373 | if (err) | ||
374 | goto err_out_res; | ||
375 | |||
376 | printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); | ||
377 | |||
378 | for (i = 0; i < 6; i++) | ||
379 | printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); | ||
380 | printk(", irq %d.\n", dev->irq); | ||
381 | |||
382 | pci_set_master(pdev); | ||
383 | |||
384 | return 0; | ||
385 | |||
386 | err_out_res: | ||
387 | pci_release_regions(pdev); | ||
388 | err_out_nomem: | ||
389 | if(db->desc_pool_ptr) | ||
390 | pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, | ||
391 | db->desc_pool_ptr, db->desc_pool_dma_ptr); | ||
392 | |||
393 | if(db->buf_pool_ptr != NULL) | ||
394 | pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
395 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
396 | err_out_disable: | ||
397 | pci_disable_device(pdev); | ||
398 | err_out_free: | ||
399 | pci_set_drvdata(pdev, NULL); | ||
400 | free_netdev(dev); | ||
401 | |||
402 | return err; | ||
403 | } | ||
404 | |||
405 | |||
406 | static void __devexit uli526x_remove_one (struct pci_dev *pdev) | ||
407 | { | ||
408 | struct net_device *dev = pci_get_drvdata(pdev); | ||
409 | struct uli526x_board_info *db = netdev_priv(dev); | ||
410 | |||
411 | ULI526X_DBUG(0, "uli526x_remove_one()", 0); | ||
412 | |||
413 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | ||
414 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | ||
415 | db->desc_pool_dma_ptr); | ||
416 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
417 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
418 | unregister_netdev(dev); | ||
419 | pci_release_regions(pdev); | ||
420 | free_netdev(dev); /* free board information */ | ||
421 | pci_set_drvdata(pdev, NULL); | ||
422 | pci_disable_device(pdev); | ||
423 | ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); | ||
424 | } | ||
425 | |||
426 | |||
427 | /* | ||
428 | * Open the interface. | ||
429 | * The interface is opened whenever "ifconfig" activates it. | ||
430 | */ | ||
431 | |||
432 | static int uli526x_open(struct net_device *dev) | ||
433 | { | ||
434 | int ret; | ||
435 | struct uli526x_board_info *db = netdev_priv(dev); | ||
436 | |||
437 | ULI526X_DBUG(0, "uli526x_open", 0); | ||
438 | |||
439 | ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev); | ||
440 | if (ret) | ||
441 | return ret; | ||
442 | |||
443 | /* system variable init */ | ||
444 | db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; | ||
445 | db->tx_packet_cnt = 0; | ||
446 | db->rx_avail_cnt = 0; | ||
447 | db->link_failed = 1; | ||
448 | netif_carrier_off(dev); | ||
449 | db->wait_reset = 0; | ||
450 | |||
451 | db->NIC_capability = 0xf; /* All capability*/ | ||
452 | db->PHY_reg4 = 0x1e0; | ||
453 | |||
454 | /* CR6 operation mode decision */ | ||
455 | db->cr6_data |= ULI526X_TXTH_256; | ||
456 | db->cr0_data = CR0_DEFAULT; | ||
457 | |||
458 | /* Initialize ULI526X board */ | ||
459 | uli526x_init(dev); | ||
460 | |||
461 | /* Active System Interface */ | ||
462 | netif_wake_queue(dev); | ||
463 | |||
464 | /* set and active a timer process */ | ||
465 | init_timer(&db->timer); | ||
466 | db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; | ||
467 | db->timer.data = (unsigned long)dev; | ||
468 | db->timer.function = &uli526x_timer; | ||
469 | add_timer(&db->timer); | ||
470 | |||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | |||
475 | /* Initialize ULI526X board | ||
476 | * Reset ULI526X board | ||
477 | * Initialize TX/Rx descriptor chain structure | ||
478 | * Send the set-up frame | ||
479 | * Enable Tx/Rx machine | ||
480 | */ | ||
481 | |||
482 | static void uli526x_init(struct net_device *dev) | ||
483 | { | ||
484 | struct uli526x_board_info *db = netdev_priv(dev); | ||
485 | unsigned long ioaddr = db->ioaddr; | ||
486 | u8 phy_tmp; | ||
487 | u16 phy_value; | ||
488 | u16 phy_reg_reset; | ||
489 | |||
490 | ULI526X_DBUG(0, "uli526x_init()", 0); | ||
491 | |||
492 | /* Reset M526x MAC controller */ | ||
493 | outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ | ||
494 | udelay(100); | ||
495 | outl(db->cr0_data, ioaddr + DCR0); | ||
496 | udelay(5); | ||
497 | |||
498 | /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ | ||
499 | db->phy_addr = 1; | ||
500 | for(phy_tmp=0;phy_tmp<32;phy_tmp++) | ||
501 | { | ||
502 | phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add | ||
503 | if(phy_value != 0xffff&&phy_value!=0) | ||
504 | { | ||
505 | db->phy_addr = phy_tmp; | ||
506 | break; | ||
507 | } | ||
508 | } | ||
509 | if(phy_tmp == 32) | ||
510 | printk(KERN_WARNING "Can not find the phy address!!!"); | ||
511 | /* Parser SROM and media mode */ | ||
512 | db->media_mode = uli526x_media_mode; | ||
513 | |||
514 | /* Phyxcer capability setting */ | ||
515 | phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); | ||
516 | phy_reg_reset = (phy_reg_reset | 0x8000); | ||
517 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); | ||
518 | udelay(500); | ||
519 | |||
520 | /* Process Phyxcer Media Mode */ | ||
521 | uli526x_set_phyxcer(db); | ||
522 | |||
523 | /* Media Mode Process */ | ||
524 | if ( !(db->media_mode & ULI526X_AUTO) ) | ||
525 | db->op_mode = db->media_mode; /* Force Mode */ | ||
526 | |||
527 | /* Initialize Transmit/Receive decriptor and CR3/4 */ | ||
528 | uli526x_descriptor_init(db, ioaddr); | ||
529 | |||
530 | /* Init CR6 to program M526X operation */ | ||
531 | update_cr6(db->cr6_data, ioaddr); | ||
532 | |||
533 | /* Send setup frame */ | ||
534 | send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */ | ||
535 | |||
536 | /* Init CR7, interrupt active bit */ | ||
537 | db->cr7_data = CR7_DEFAULT; | ||
538 | outl(db->cr7_data, ioaddr + DCR7); | ||
539 | |||
540 | /* Init CR15, Tx jabber and Rx watchdog timer */ | ||
541 | outl(db->cr15_data, ioaddr + DCR15); | ||
542 | |||
543 | /* Enable ULI526X Tx/Rx function */ | ||
544 | db->cr6_data |= CR6_RXSC | CR6_TXSC; | ||
545 | update_cr6(db->cr6_data, ioaddr); | ||
546 | } | ||
547 | |||
548 | |||
549 | /* | ||
550 | * Hardware start transmission. | ||
551 | * Send a packet to media from the upper layer. | ||
552 | */ | ||
553 | |||
554 | static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
555 | { | ||
556 | struct uli526x_board_info *db = netdev_priv(dev); | ||
557 | struct tx_desc *txptr; | ||
558 | unsigned long flags; | ||
559 | |||
560 | ULI526X_DBUG(0, "uli526x_start_xmit", 0); | ||
561 | |||
562 | /* Resource flag check */ | ||
563 | netif_stop_queue(dev); | ||
564 | |||
565 | /* Too large packet check */ | ||
566 | if (skb->len > MAX_PACKET_SIZE) { | ||
567 | printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len); | ||
568 | dev_kfree_skb(skb); | ||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | spin_lock_irqsave(&db->lock, flags); | ||
573 | |||
574 | /* No Tx resource check, it never happen nromally */ | ||
575 | if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { | ||
576 | spin_unlock_irqrestore(&db->lock, flags); | ||
577 | printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt); | ||
578 | return 1; | ||
579 | } | ||
580 | |||
581 | /* Disable NIC interrupt */ | ||
582 | outl(0, dev->base_addr + DCR7); | ||
583 | |||
584 | /* transmit this packet */ | ||
585 | txptr = db->tx_insert_ptr; | ||
586 | memcpy(txptr->tx_buf_ptr, skb->data, skb->len); | ||
587 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | ||
588 | |||
589 | /* Point to next transmit free descriptor */ | ||
590 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
591 | |||
592 | /* Transmit Packet Process */ | ||
593 | if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { | ||
594 | txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ | ||
595 | db->tx_packet_cnt++; /* Ready to send */ | ||
596 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
597 | dev->trans_start = jiffies; /* saved time stamp */ | ||
598 | } | ||
599 | |||
600 | /* Tx resource check */ | ||
601 | if ( db->tx_packet_cnt < TX_FREE_DESC_CNT ) | ||
602 | netif_wake_queue(dev); | ||
603 | |||
604 | /* Restore CR7 to enable interrupt */ | ||
605 | spin_unlock_irqrestore(&db->lock, flags); | ||
606 | outl(db->cr7_data, dev->base_addr + DCR7); | ||
607 | |||
608 | /* free this SKB */ | ||
609 | dev_kfree_skb(skb); | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | |||
615 | /* | ||
616 | * Stop the interface. | ||
617 | * The interface is stopped when it is brought. | ||
618 | */ | ||
619 | |||
620 | static int uli526x_stop(struct net_device *dev) | ||
621 | { | ||
622 | struct uli526x_board_info *db = netdev_priv(dev); | ||
623 | unsigned long ioaddr = dev->base_addr; | ||
624 | |||
625 | ULI526X_DBUG(0, "uli526x_stop", 0); | ||
626 | |||
627 | /* disable system */ | ||
628 | netif_stop_queue(dev); | ||
629 | |||
630 | /* deleted timer */ | ||
631 | del_timer_sync(&db->timer); | ||
632 | |||
633 | /* Reset & stop ULI526X board */ | ||
634 | outl(ULI526X_RESET, ioaddr + DCR0); | ||
635 | udelay(5); | ||
636 | phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); | ||
637 | |||
638 | /* free interrupt */ | ||
639 | free_irq(dev->irq, dev); | ||
640 | |||
641 | /* free allocated rx buffer */ | ||
642 | uli526x_free_rxbuffer(db); | ||
643 | |||
644 | #if 0 | ||
645 | /* show statistic counter */ | ||
646 | printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | ||
647 | db->tx_fifo_underrun, db->tx_excessive_collision, | ||
648 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, | ||
649 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, | ||
650 | db->reset_fatal, db->reset_TXtimeout); | ||
651 | #endif | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | |||
657 | /* | ||
658 | * M5261/M5263 insterrupt handler | ||
659 | * receive the packet to upper layer, free the transmitted packet | ||
660 | */ | ||
661 | |||
662 | static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
663 | { | ||
664 | struct net_device *dev = dev_id; | ||
665 | struct uli526x_board_info *db = netdev_priv(dev); | ||
666 | unsigned long ioaddr = dev->base_addr; | ||
667 | unsigned long flags; | ||
668 | |||
669 | if (!dev) { | ||
670 | ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0); | ||
671 | return IRQ_NONE; | ||
672 | } | ||
673 | |||
674 | spin_lock_irqsave(&db->lock, flags); | ||
675 | outl(0, ioaddr + DCR7); | ||
676 | |||
677 | /* Got ULI526X status */ | ||
678 | db->cr5_data = inl(ioaddr + DCR5); | ||
679 | outl(db->cr5_data, ioaddr + DCR5); | ||
680 | if ( !(db->cr5_data & 0x180c1) ) { | ||
681 | spin_unlock_irqrestore(&db->lock, flags); | ||
682 | outl(db->cr7_data, ioaddr + DCR7); | ||
683 | return IRQ_HANDLED; | ||
684 | } | ||
685 | |||
686 | /* Check system status */ | ||
687 | if (db->cr5_data & 0x2000) { | ||
688 | /* system bus error happen */ | ||
689 | ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data); | ||
690 | db->reset_fatal++; | ||
691 | db->wait_reset = 1; /* Need to RESET */ | ||
692 | spin_unlock_irqrestore(&db->lock, flags); | ||
693 | return IRQ_HANDLED; | ||
694 | } | ||
695 | |||
696 | /* Received the coming packet */ | ||
697 | if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) | ||
698 | uli526x_rx_packet(dev, db); | ||
699 | |||
700 | /* reallocate rx descriptor buffer */ | ||
701 | if (db->rx_avail_cnt<RX_DESC_CNT) | ||
702 | allocate_rx_buffer(db); | ||
703 | |||
704 | /* Free the transmitted descriptor */ | ||
705 | if ( db->cr5_data & 0x01) | ||
706 | uli526x_free_tx_pkt(dev, db); | ||
707 | |||
708 | /* Restore CR7 to enable interrupt mask */ | ||
709 | outl(db->cr7_data, ioaddr + DCR7); | ||
710 | |||
711 | spin_unlock_irqrestore(&db->lock, flags); | ||
712 | return IRQ_HANDLED; | ||
713 | } | ||
714 | |||
715 | |||
716 | /* | ||
717 | * Free TX resource after TX complete | ||
718 | */ | ||
719 | |||
720 | static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db) | ||
721 | { | ||
722 | struct tx_desc *txptr; | ||
723 | u32 tdes0; | ||
724 | |||
725 | txptr = db->tx_remove_ptr; | ||
726 | while(db->tx_packet_cnt) { | ||
727 | tdes0 = le32_to_cpu(txptr->tdes0); | ||
728 | /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ | ||
729 | if (tdes0 & 0x80000000) | ||
730 | break; | ||
731 | |||
732 | /* A packet sent completed */ | ||
733 | db->tx_packet_cnt--; | ||
734 | db->stats.tx_packets++; | ||
735 | |||
736 | /* Transmit statistic counter */ | ||
737 | if ( tdes0 != 0x7fffffff ) { | ||
738 | /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */ | ||
739 | db->stats.collisions += (tdes0 >> 3) & 0xf; | ||
740 | db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; | ||
741 | if (tdes0 & TDES0_ERR_MASK) { | ||
742 | db->stats.tx_errors++; | ||
743 | if (tdes0 & 0x0002) { /* UnderRun */ | ||
744 | db->tx_fifo_underrun++; | ||
745 | if ( !(db->cr6_data & CR6_SFT) ) { | ||
746 | db->cr6_data = db->cr6_data | CR6_SFT; | ||
747 | update_cr6(db->cr6_data, db->ioaddr); | ||
748 | } | ||
749 | } | ||
750 | if (tdes0 & 0x0100) | ||
751 | db->tx_excessive_collision++; | ||
752 | if (tdes0 & 0x0200) | ||
753 | db->tx_late_collision++; | ||
754 | if (tdes0 & 0x0400) | ||
755 | db->tx_no_carrier++; | ||
756 | if (tdes0 & 0x0800) | ||
757 | db->tx_loss_carrier++; | ||
758 | if (tdes0 & 0x4000) | ||
759 | db->tx_jabber_timeout++; | ||
760 | } | ||
761 | } | ||
762 | |||
763 | txptr = txptr->next_tx_desc; | ||
764 | }/* End of while */ | ||
765 | |||
766 | /* Update TX remove pointer to next */ | ||
767 | db->tx_remove_ptr = txptr; | ||
768 | |||
769 | /* Resource available check */ | ||
770 | if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT ) | ||
771 | netif_wake_queue(dev); /* Active upper layer, send again */ | ||
772 | } | ||
773 | |||
774 | |||
775 | /* | ||
776 | * Receive the come packet and pass to upper layer | ||
777 | */ | ||
778 | |||
779 | static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db) | ||
780 | { | ||
781 | struct rx_desc *rxptr; | ||
782 | struct sk_buff *skb; | ||
783 | int rxlen; | ||
784 | u32 rdes0; | ||
785 | |||
786 | rxptr = db->rx_ready_ptr; | ||
787 | |||
788 | while(db->rx_avail_cnt) { | ||
789 | rdes0 = le32_to_cpu(rxptr->rdes0); | ||
790 | if (rdes0 & 0x80000000) /* packet owner check */ | ||
791 | { | ||
792 | break; | ||
793 | } | ||
794 | |||
795 | db->rx_avail_cnt--; | ||
796 | db->interval_rx_cnt++; | ||
797 | |||
798 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
799 | if ( (rdes0 & 0x300) != 0x300) { | ||
800 | /* A packet without First/Last flag */ | ||
801 | /* reuse this SKB */ | ||
802 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
803 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
804 | } else { | ||
805 | /* A packet with First/Last flag */ | ||
806 | rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; | ||
807 | |||
808 | /* error summary bit check */ | ||
809 | if (rdes0 & 0x8000) { | ||
810 | /* This is a error packet */ | ||
811 | //printk(DRV_NAME ": rdes0: %lx\n", rdes0); | ||
812 | db->stats.rx_errors++; | ||
813 | if (rdes0 & 1) | ||
814 | db->stats.rx_fifo_errors++; | ||
815 | if (rdes0 & 2) | ||
816 | db->stats.rx_crc_errors++; | ||
817 | if (rdes0 & 0x80) | ||
818 | db->stats.rx_length_errors++; | ||
819 | } | ||
820 | |||
821 | if ( !(rdes0 & 0x8000) || | ||
822 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | ||
823 | skb = rxptr->rx_skb_ptr; | ||
824 | |||
825 | /* Good packet, send to upper layer */ | ||
826 | /* Shorst packet used new SKB */ | ||
827 | if ( (rxlen < RX_COPY_SIZE) && | ||
828 | ( (skb = dev_alloc_skb(rxlen + 2) ) | ||
829 | != NULL) ) { | ||
830 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | ||
831 | skb->dev = dev; | ||
832 | skb_reserve(skb, 2); /* 16byte align */ | ||
833 | memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); | ||
834 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
835 | } else { | ||
836 | skb->dev = dev; | ||
837 | skb_put(skb, rxlen); | ||
838 | } | ||
839 | skb->protocol = eth_type_trans(skb, dev); | ||
840 | netif_rx(skb); | ||
841 | dev->last_rx = jiffies; | ||
842 | db->stats.rx_packets++; | ||
843 | db->stats.rx_bytes += rxlen; | ||
844 | |||
845 | } else { | ||
846 | /* Reuse SKB buffer when the packet is error */ | ||
847 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
848 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
849 | } | ||
850 | } | ||
851 | |||
852 | rxptr = rxptr->next_rx_desc; | ||
853 | } | ||
854 | |||
855 | db->rx_ready_ptr = rxptr; | ||
856 | } | ||
857 | |||
858 | |||
859 | /* | ||
860 | * Get statistics from driver. | ||
861 | */ | ||
862 | |||
863 | static struct net_device_stats * uli526x_get_stats(struct net_device *dev) | ||
864 | { | ||
865 | struct uli526x_board_info *db = netdev_priv(dev); | ||
866 | |||
867 | ULI526X_DBUG(0, "uli526x_get_stats", 0); | ||
868 | return &db->stats; | ||
869 | } | ||
870 | |||
871 | |||
872 | /* | ||
873 | * Set ULI526X multicast address | ||
874 | */ | ||
875 | |||
876 | static void uli526x_set_filter_mode(struct net_device * dev) | ||
877 | { | ||
878 | struct uli526x_board_info *db = dev->priv; | ||
879 | unsigned long flags; | ||
880 | |||
881 | ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0); | ||
882 | spin_lock_irqsave(&db->lock, flags); | ||
883 | |||
884 | if (dev->flags & IFF_PROMISC) { | ||
885 | ULI526X_DBUG(0, "Enable PROM Mode", 0); | ||
886 | db->cr6_data |= CR6_PM | CR6_PBF; | ||
887 | update_cr6(db->cr6_data, db->ioaddr); | ||
888 | spin_unlock_irqrestore(&db->lock, flags); | ||
889 | return; | ||
890 | } | ||
891 | |||
892 | if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) { | ||
893 | ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count); | ||
894 | db->cr6_data &= ~(CR6_PM | CR6_PBF); | ||
895 | db->cr6_data |= CR6_PAM; | ||
896 | spin_unlock_irqrestore(&db->lock, flags); | ||
897 | return; | ||
898 | } | ||
899 | |||
900 | ULI526X_DBUG(0, "Set multicast address", dev->mc_count); | ||
901 | send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */ | ||
902 | spin_unlock_irqrestore(&db->lock, flags); | ||
903 | } | ||
904 | |||
905 | static void | ||
906 | ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) | ||
907 | { | ||
908 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
909 | SUPPORTED_10baseT_Full | | ||
910 | SUPPORTED_100baseT_Half | | ||
911 | SUPPORTED_100baseT_Full | | ||
912 | SUPPORTED_Autoneg | | ||
913 | SUPPORTED_MII); | ||
914 | |||
915 | ecmd->advertising = (ADVERTISED_10baseT_Half | | ||
916 | ADVERTISED_10baseT_Full | | ||
917 | ADVERTISED_100baseT_Half | | ||
918 | ADVERTISED_100baseT_Full | | ||
919 | ADVERTISED_Autoneg | | ||
920 | ADVERTISED_MII); | ||
921 | |||
922 | |||
923 | ecmd->port = PORT_MII; | ||
924 | ecmd->phy_address = db->phy_addr; | ||
925 | |||
926 | ecmd->transceiver = XCVR_EXTERNAL; | ||
927 | |||
928 | ecmd->speed = 10; | ||
929 | ecmd->duplex = DUPLEX_HALF; | ||
930 | |||
931 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) | ||
932 | { | ||
933 | ecmd->speed = 100; | ||
934 | } | ||
935 | if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) | ||
936 | { | ||
937 | ecmd->duplex = DUPLEX_FULL; | ||
938 | } | ||
939 | if(db->link_failed) | ||
940 | { | ||
941 | ecmd->speed = -1; | ||
942 | ecmd->duplex = -1; | ||
943 | } | ||
944 | |||
945 | if (db->media_mode & ULI526X_AUTO) | ||
946 | { | ||
947 | ecmd->autoneg = AUTONEG_ENABLE; | ||
948 | } | ||
949 | } | ||
950 | |||
951 | static void netdev_get_drvinfo(struct net_device *dev, | ||
952 | struct ethtool_drvinfo *info) | ||
953 | { | ||
954 | struct uli526x_board_info *np = netdev_priv(dev); | ||
955 | |||
956 | strcpy(info->driver, DRV_NAME); | ||
957 | strcpy(info->version, DRV_VERSION); | ||
958 | if (np->pdev) | ||
959 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
960 | else | ||
961 | sprintf(info->bus_info, "EISA 0x%lx %d", | ||
962 | dev->base_addr, dev->irq); | ||
963 | } | ||
964 | |||
965 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | ||
966 | struct uli526x_board_info *np = netdev_priv(dev); | ||
967 | |||
968 | ULi_ethtool_gset(np, cmd); | ||
969 | |||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | static u32 netdev_get_link(struct net_device *dev) { | ||
974 | struct uli526x_board_info *np = netdev_priv(dev); | ||
975 | |||
976 | if(np->link_failed) | ||
977 | return 0; | ||
978 | else | ||
979 | return 1; | ||
980 | } | ||
981 | |||
982 | static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
983 | { | ||
984 | wol->supported = WAKE_PHY | WAKE_MAGIC; | ||
985 | wol->wolopts = 0; | ||
986 | } | ||
987 | |||
988 | static struct ethtool_ops netdev_ethtool_ops = { | ||
989 | .get_drvinfo = netdev_get_drvinfo, | ||
990 | .get_settings = netdev_get_settings, | ||
991 | .get_link = netdev_get_link, | ||
992 | .get_wol = uli526x_get_wol, | ||
993 | }; | ||
994 | |||
995 | /* | ||
996 | * A periodic timer routine | ||
997 | * Dynamic media sense, allocate Rx buffer... | ||
998 | */ | ||
999 | |||
1000 | static void uli526x_timer(unsigned long data) | ||
1001 | { | ||
1002 | u32 tmp_cr8; | ||
1003 | unsigned char tmp_cr12=0; | ||
1004 | struct net_device *dev = (struct net_device *) data; | ||
1005 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1006 | unsigned long flags; | ||
1007 | u8 TmpSpeed=10; | ||
1008 | |||
1009 | //ULI526X_DBUG(0, "uli526x_timer()", 0); | ||
1010 | spin_lock_irqsave(&db->lock, flags); | ||
1011 | |||
1012 | |||
1013 | /* Dynamic reset ULI526X : system error or transmit time-out */ | ||
1014 | tmp_cr8 = inl(db->ioaddr + DCR8); | ||
1015 | if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { | ||
1016 | db->reset_cr8++; | ||
1017 | db->wait_reset = 1; | ||
1018 | } | ||
1019 | db->interval_rx_cnt = 0; | ||
1020 | |||
1021 | /* TX polling kick monitor */ | ||
1022 | if ( db->tx_packet_cnt && | ||
1023 | time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) { | ||
1024 | outl(0x1, dev->base_addr + DCR1); // Tx polling again | ||
1025 | |||
1026 | // TX Timeout | ||
1027 | if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) { | ||
1028 | db->reset_TXtimeout++; | ||
1029 | db->wait_reset = 1; | ||
1030 | printk( "%s: Tx timeout - resetting\n", | ||
1031 | dev->name); | ||
1032 | } | ||
1033 | } | ||
1034 | |||
1035 | if (db->wait_reset) { | ||
1036 | ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); | ||
1037 | db->reset_count++; | ||
1038 | uli526x_dynamic_reset(dev); | ||
1039 | db->timer.expires = ULI526X_TIMER_WUT; | ||
1040 | add_timer(&db->timer); | ||
1041 | spin_unlock_irqrestore(&db->lock, flags); | ||
1042 | return; | ||
1043 | } | ||
1044 | |||
1045 | /* Link status check, Dynamic media type change */ | ||
1046 | if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) | ||
1047 | tmp_cr12 = 3; | ||
1048 | |||
1049 | if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { | ||
1050 | /* Link Failed */ | ||
1051 | ULI526X_DBUG(0, "Link Failed", tmp_cr12); | ||
1052 | netif_carrier_off(dev); | ||
1053 | printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); | ||
1054 | db->link_failed = 1; | ||
1055 | |||
1056 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | ||
1057 | /* AUTO don't need */ | ||
1058 | if ( !(db->media_mode & 0x8) ) | ||
1059 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | ||
1060 | |||
1061 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | ||
1062 | if (db->media_mode & ULI526X_AUTO) { | ||
1063 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | ||
1064 | update_cr6(db->cr6_data, db->ioaddr); | ||
1065 | } | ||
1066 | } else | ||
1067 | if ((tmp_cr12 & 0x3) && db->link_failed) { | ||
1068 | ULI526X_DBUG(0, "Link link OK", tmp_cr12); | ||
1069 | db->link_failed = 0; | ||
1070 | |||
1071 | /* Auto Sense Speed */ | ||
1072 | if ( (db->media_mode & ULI526X_AUTO) && | ||
1073 | uli526x_sense_speed(db) ) | ||
1074 | db->link_failed = 1; | ||
1075 | uli526x_process_mode(db); | ||
1076 | |||
1077 | if(db->link_failed==0) | ||
1078 | { | ||
1079 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) | ||
1080 | { | ||
1081 | TmpSpeed = 100; | ||
1082 | } | ||
1083 | if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) | ||
1084 | { | ||
1085 | printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed); | ||
1086 | } | ||
1087 | else | ||
1088 | { | ||
1089 | printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed); | ||
1090 | } | ||
1091 | netif_carrier_on(dev); | ||
1092 | } | ||
1093 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | ||
1094 | } | ||
1095 | else if(!(tmp_cr12 & 0x3) && db->link_failed) | ||
1096 | { | ||
1097 | if(db->init==1) | ||
1098 | { | ||
1099 | printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name); | ||
1100 | netif_carrier_off(dev); | ||
1101 | } | ||
1102 | } | ||
1103 | db->init=0; | ||
1104 | |||
1105 | /* Timer active again */ | ||
1106 | db->timer.expires = ULI526X_TIMER_WUT; | ||
1107 | add_timer(&db->timer); | ||
1108 | spin_unlock_irqrestore(&db->lock, flags); | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | /* | ||
1113 | * Dynamic reset the ULI526X board | ||
1114 | * Stop ULI526X board | ||
1115 | * Free Tx/Rx allocated memory | ||
1116 | * Reset ULI526X board | ||
1117 | * Re-initialize ULI526X board | ||
1118 | */ | ||
1119 | |||
1120 | static void uli526x_dynamic_reset(struct net_device *dev) | ||
1121 | { | ||
1122 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1123 | |||
1124 | ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); | ||
1125 | |||
1126 | /* Sopt MAC controller */ | ||
1127 | db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ | ||
1128 | update_cr6(db->cr6_data, dev->base_addr); | ||
1129 | outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ | ||
1130 | outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); | ||
1131 | |||
1132 | /* Disable upper layer interface */ | ||
1133 | netif_stop_queue(dev); | ||
1134 | |||
1135 | /* Free Rx Allocate buffer */ | ||
1136 | uli526x_free_rxbuffer(db); | ||
1137 | |||
1138 | /* system variable init */ | ||
1139 | db->tx_packet_cnt = 0; | ||
1140 | db->rx_avail_cnt = 0; | ||
1141 | db->link_failed = 1; | ||
1142 | db->init=1; | ||
1143 | db->wait_reset = 0; | ||
1144 | |||
1145 | /* Re-initialize ULI526X board */ | ||
1146 | uli526x_init(dev); | ||
1147 | |||
1148 | /* Restart upper layer interface */ | ||
1149 | netif_wake_queue(dev); | ||
1150 | } | ||
1151 | |||
1152 | |||
1153 | /* | ||
1154 | * free all allocated rx buffer | ||
1155 | */ | ||
1156 | |||
1157 | static void uli526x_free_rxbuffer(struct uli526x_board_info * db) | ||
1158 | { | ||
1159 | ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0); | ||
1160 | |||
1161 | /* free allocated rx buffer */ | ||
1162 | while (db->rx_avail_cnt) { | ||
1163 | dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); | ||
1164 | db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; | ||
1165 | db->rx_avail_cnt--; | ||
1166 | } | ||
1167 | } | ||
1168 | |||
1169 | |||
1170 | /* | ||
1171 | * Reuse the SK buffer | ||
1172 | */ | ||
1173 | |||
1174 | static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb) | ||
1175 | { | ||
1176 | struct rx_desc *rxptr = db->rx_insert_ptr; | ||
1177 | |||
1178 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | ||
1179 | rxptr->rx_skb_ptr = skb; | ||
1180 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1181 | wmb(); | ||
1182 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1183 | db->rx_avail_cnt++; | ||
1184 | db->rx_insert_ptr = rxptr->next_rx_desc; | ||
1185 | } else | ||
1186 | ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); | ||
1187 | } | ||
1188 | |||
1189 | |||
1190 | /* | ||
1191 | * Initialize transmit/Receive descriptor | ||
1192 | * Using Chain structure, and allocate Tx/Rx buffer | ||
1193 | */ | ||
1194 | |||
1195 | static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr) | ||
1196 | { | ||
1197 | struct tx_desc *tmp_tx; | ||
1198 | struct rx_desc *tmp_rx; | ||
1199 | unsigned char *tmp_buf; | ||
1200 | dma_addr_t tmp_tx_dma, tmp_rx_dma; | ||
1201 | dma_addr_t tmp_buf_dma; | ||
1202 | int i; | ||
1203 | |||
1204 | ULI526X_DBUG(0, "uli526x_descriptor_init()", 0); | ||
1205 | |||
1206 | /* tx descriptor start pointer */ | ||
1207 | db->tx_insert_ptr = db->first_tx_desc; | ||
1208 | db->tx_remove_ptr = db->first_tx_desc; | ||
1209 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | ||
1210 | |||
1211 | /* rx descriptor start pointer */ | ||
1212 | db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1213 | db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1214 | db->rx_insert_ptr = db->first_rx_desc; | ||
1215 | db->rx_ready_ptr = db->first_rx_desc; | ||
1216 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | ||
1217 | |||
1218 | /* Init Transmit chain */ | ||
1219 | tmp_buf = db->buf_pool_start; | ||
1220 | tmp_buf_dma = db->buf_pool_dma_start; | ||
1221 | tmp_tx_dma = db->first_tx_desc_dma; | ||
1222 | for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { | ||
1223 | tmp_tx->tx_buf_ptr = tmp_buf; | ||
1224 | tmp_tx->tdes0 = cpu_to_le32(0); | ||
1225 | tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ | ||
1226 | tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); | ||
1227 | tmp_tx_dma += sizeof(struct tx_desc); | ||
1228 | tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); | ||
1229 | tmp_tx->next_tx_desc = tmp_tx + 1; | ||
1230 | tmp_buf = tmp_buf + TX_BUF_ALLOC; | ||
1231 | tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; | ||
1232 | } | ||
1233 | (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); | ||
1234 | tmp_tx->next_tx_desc = db->first_tx_desc; | ||
1235 | |||
1236 | /* Init Receive descriptor chain */ | ||
1237 | tmp_rx_dma=db->first_rx_desc_dma; | ||
1238 | for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { | ||
1239 | tmp_rx->rdes0 = cpu_to_le32(0); | ||
1240 | tmp_rx->rdes1 = cpu_to_le32(0x01000600); | ||
1241 | tmp_rx_dma += sizeof(struct rx_desc); | ||
1242 | tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); | ||
1243 | tmp_rx->next_rx_desc = tmp_rx + 1; | ||
1244 | } | ||
1245 | (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); | ||
1246 | tmp_rx->next_rx_desc = db->first_rx_desc; | ||
1247 | |||
1248 | /* pre-allocate Rx buffer */ | ||
1249 | allocate_rx_buffer(db); | ||
1250 | } | ||
1251 | |||
1252 | |||
1253 | /* | ||
1254 | * Update CR6 value | ||
1255 | * Firstly stop ULI526X, then written value and start | ||
1256 | */ | ||
1257 | |||
1258 | static void update_cr6(u32 cr6_data, unsigned long ioaddr) | ||
1259 | { | ||
1260 | |||
1261 | outl(cr6_data, ioaddr + DCR6); | ||
1262 | udelay(5); | ||
1263 | } | ||
1264 | |||
1265 | |||
1266 | /* | ||
1267 | * Send a setup frame for M5261/M5263 | ||
1268 | * This setup frame initialize ULI526X address filter mode | ||
1269 | */ | ||
1270 | |||
1271 | static void send_filter_frame(struct net_device *dev, int mc_cnt) | ||
1272 | { | ||
1273 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1274 | struct dev_mc_list *mcptr; | ||
1275 | struct tx_desc *txptr; | ||
1276 | u16 * addrptr; | ||
1277 | u32 * suptr; | ||
1278 | int i; | ||
1279 | |||
1280 | ULI526X_DBUG(0, "send_filter_frame()", 0); | ||
1281 | |||
1282 | txptr = db->tx_insert_ptr; | ||
1283 | suptr = (u32 *) txptr->tx_buf_ptr; | ||
1284 | |||
1285 | /* Node address */ | ||
1286 | addrptr = (u16 *) dev->dev_addr; | ||
1287 | *suptr++ = addrptr[0]; | ||
1288 | *suptr++ = addrptr[1]; | ||
1289 | *suptr++ = addrptr[2]; | ||
1290 | |||
1291 | /* broadcast address */ | ||
1292 | *suptr++ = 0xffff; | ||
1293 | *suptr++ = 0xffff; | ||
1294 | *suptr++ = 0xffff; | ||
1295 | |||
1296 | /* fit the multicast address */ | ||
1297 | for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { | ||
1298 | addrptr = (u16 *) mcptr->dmi_addr; | ||
1299 | *suptr++ = addrptr[0]; | ||
1300 | *suptr++ = addrptr[1]; | ||
1301 | *suptr++ = addrptr[2]; | ||
1302 | } | ||
1303 | |||
1304 | for (; i<14; i++) { | ||
1305 | *suptr++ = 0xffff; | ||
1306 | *suptr++ = 0xffff; | ||
1307 | *suptr++ = 0xffff; | ||
1308 | } | ||
1309 | |||
1310 | /* prepare the setup frame */ | ||
1311 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
1312 | txptr->tdes1 = cpu_to_le32(0x890000c0); | ||
1313 | |||
1314 | /* Resource Check and Send the setup packet */ | ||
1315 | if (db->tx_packet_cnt < TX_DESC_CNT) { | ||
1316 | /* Resource Empty */ | ||
1317 | db->tx_packet_cnt++; | ||
1318 | txptr->tdes0 = cpu_to_le32(0x80000000); | ||
1319 | update_cr6(db->cr6_data | 0x2000, dev->base_addr); | ||
1320 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
1321 | update_cr6(db->cr6_data, dev->base_addr); | ||
1322 | dev->trans_start = jiffies; | ||
1323 | } else | ||
1324 | printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n"); | ||
1325 | } | ||
1326 | |||
1327 | |||
1328 | /* | ||
1329 | * Allocate rx buffer, | ||
1330 | * As possible as allocate maxiumn Rx buffer | ||
1331 | */ | ||
1332 | |||
1333 | static void allocate_rx_buffer(struct uli526x_board_info *db) | ||
1334 | { | ||
1335 | struct rx_desc *rxptr; | ||
1336 | struct sk_buff *skb; | ||
1337 | |||
1338 | rxptr = db->rx_insert_ptr; | ||
1339 | |||
1340 | while(db->rx_avail_cnt < RX_DESC_CNT) { | ||
1341 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | ||
1342 | break; | ||
1343 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | ||
1344 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1345 | wmb(); | ||
1346 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1347 | rxptr = rxptr->next_rx_desc; | ||
1348 | db->rx_avail_cnt++; | ||
1349 | } | ||
1350 | |||
1351 | db->rx_insert_ptr = rxptr; | ||
1352 | } | ||
1353 | |||
1354 | |||
1355 | /* | ||
1356 | * Read one word data from the serial ROM | ||
1357 | */ | ||
1358 | |||
1359 | static u16 read_srom_word(long ioaddr, int offset) | ||
1360 | { | ||
1361 | int i; | ||
1362 | u16 srom_data = 0; | ||
1363 | long cr9_ioaddr = ioaddr + DCR9; | ||
1364 | |||
1365 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1366 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1367 | |||
1368 | /* Send the Read Command 110b */ | ||
1369 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1370 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1371 | SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); | ||
1372 | |||
1373 | /* Send the offset */ | ||
1374 | for (i = 5; i >= 0; i--) { | ||
1375 | srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; | ||
1376 | SROM_CLK_WRITE(srom_data, cr9_ioaddr); | ||
1377 | } | ||
1378 | |||
1379 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1380 | |||
1381 | for (i = 16; i > 0; i--) { | ||
1382 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | ||
1383 | udelay(5); | ||
1384 | srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1385 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1386 | udelay(5); | ||
1387 | } | ||
1388 | |||
1389 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1390 | return srom_data; | ||
1391 | } | ||
1392 | |||
1393 | |||
1394 | /* | ||
1395 | * Auto sense the media mode | ||
1396 | */ | ||
1397 | |||
1398 | static u8 uli526x_sense_speed(struct uli526x_board_info * db) | ||
1399 | { | ||
1400 | u8 ErrFlag = 0; | ||
1401 | u16 phy_mode; | ||
1402 | |||
1403 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1404 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1405 | |||
1406 | if ( (phy_mode & 0x24) == 0x24 ) { | ||
1407 | |||
1408 | phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); | ||
1409 | if(phy_mode&0x8000) | ||
1410 | phy_mode = 0x8000; | ||
1411 | else if(phy_mode&0x4000) | ||
1412 | phy_mode = 0x4000; | ||
1413 | else if(phy_mode&0x2000) | ||
1414 | phy_mode = 0x2000; | ||
1415 | else | ||
1416 | phy_mode = 0x1000; | ||
1417 | |||
1418 | /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */ | ||
1419 | switch (phy_mode) { | ||
1420 | case 0x1000: db->op_mode = ULI526X_10MHF; break; | ||
1421 | case 0x2000: db->op_mode = ULI526X_10MFD; break; | ||
1422 | case 0x4000: db->op_mode = ULI526X_100MHF; break; | ||
1423 | case 0x8000: db->op_mode = ULI526X_100MFD; break; | ||
1424 | default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break; | ||
1425 | } | ||
1426 | } else { | ||
1427 | db->op_mode = ULI526X_10MHF; | ||
1428 | ULI526X_DBUG(0, "Link Failed :", phy_mode); | ||
1429 | ErrFlag = 1; | ||
1430 | } | ||
1431 | |||
1432 | return ErrFlag; | ||
1433 | } | ||
1434 | |||
1435 | |||
1436 | /* | ||
1437 | * Set 10/100 phyxcer capability | ||
1438 | * AUTO mode : phyxcer register4 is NIC capability | ||
1439 | * Force mode: phyxcer register4 is the force media | ||
1440 | */ | ||
1441 | |||
1442 | static void uli526x_set_phyxcer(struct uli526x_board_info *db) | ||
1443 | { | ||
1444 | u16 phy_reg; | ||
1445 | |||
1446 | /* Phyxcer capability setting */ | ||
1447 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; | ||
1448 | |||
1449 | if (db->media_mode & ULI526X_AUTO) { | ||
1450 | /* AUTO Mode */ | ||
1451 | phy_reg |= db->PHY_reg4; | ||
1452 | } else { | ||
1453 | /* Force Mode */ | ||
1454 | switch(db->media_mode) { | ||
1455 | case ULI526X_10MHF: phy_reg |= 0x20; break; | ||
1456 | case ULI526X_10MFD: phy_reg |= 0x40; break; | ||
1457 | case ULI526X_100MHF: phy_reg |= 0x80; break; | ||
1458 | case ULI526X_100MFD: phy_reg |= 0x100; break; | ||
1459 | } | ||
1460 | |||
1461 | } | ||
1462 | |||
1463 | /* Write new capability to Phyxcer Reg4 */ | ||
1464 | if ( !(phy_reg & 0x01e0)) { | ||
1465 | phy_reg|=db->PHY_reg4; | ||
1466 | db->media_mode|=ULI526X_AUTO; | ||
1467 | } | ||
1468 | phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); | ||
1469 | |||
1470 | /* Restart Auto-Negotiation */ | ||
1471 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); | ||
1472 | udelay(50); | ||
1473 | } | ||
1474 | |||
1475 | |||
1476 | /* | ||
1477 | * Process op-mode | ||
1478 | AUTO mode : PHY controller in Auto-negotiation Mode | ||
1479 | * Force mode: PHY controller in force mode with HUB | ||
1480 | * N-way force capability with SWITCH | ||
1481 | */ | ||
1482 | |||
1483 | static void uli526x_process_mode(struct uli526x_board_info *db) | ||
1484 | { | ||
1485 | u16 phy_reg; | ||
1486 | |||
1487 | /* Full Duplex Mode Check */ | ||
1488 | if (db->op_mode & 0x4) | ||
1489 | db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ | ||
1490 | else | ||
1491 | db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ | ||
1492 | |||
1493 | update_cr6(db->cr6_data, db->ioaddr); | ||
1494 | |||
1495 | /* 10/100M phyxcer force mode need */ | ||
1496 | if ( !(db->media_mode & 0x8)) { | ||
1497 | /* Forece Mode */ | ||
1498 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); | ||
1499 | if ( !(phy_reg & 0x1) ) { | ||
1500 | /* parter without N-Way capability */ | ||
1501 | phy_reg = 0x0; | ||
1502 | switch(db->op_mode) { | ||
1503 | case ULI526X_10MHF: phy_reg = 0x0; break; | ||
1504 | case ULI526X_10MFD: phy_reg = 0x100; break; | ||
1505 | case ULI526X_100MHF: phy_reg = 0x2000; break; | ||
1506 | case ULI526X_100MFD: phy_reg = 0x2100; break; | ||
1507 | } | ||
1508 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | ||
1509 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | ||
1510 | } | ||
1511 | } | ||
1512 | } | ||
1513 | |||
1514 | |||
1515 | /* | ||
1516 | * Write a word to Phy register | ||
1517 | */ | ||
1518 | |||
1519 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) | ||
1520 | { | ||
1521 | u16 i; | ||
1522 | unsigned long ioaddr; | ||
1523 | |||
1524 | if(chip_id == PCI_ULI5263_ID) | ||
1525 | { | ||
1526 | phy_writeby_cr10(iobase, phy_addr, offset, phy_data); | ||
1527 | return; | ||
1528 | } | ||
1529 | /* M5261/M5263 Chip */ | ||
1530 | ioaddr = iobase + DCR9; | ||
1531 | |||
1532 | /* Send 33 synchronization clock to Phy controller */ | ||
1533 | for (i = 0; i < 35; i++) | ||
1534 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1535 | |||
1536 | /* Send start command(01) to Phy */ | ||
1537 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1538 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1539 | |||
1540 | /* Send write command(01) to Phy */ | ||
1541 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1542 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1543 | |||
1544 | /* Send Phy address */ | ||
1545 | for (i = 0x10; i > 0; i = i >> 1) | ||
1546 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1547 | |||
1548 | /* Send register address */ | ||
1549 | for (i = 0x10; i > 0; i = i >> 1) | ||
1550 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1551 | |||
1552 | /* written trasnition */ | ||
1553 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1554 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1555 | |||
1556 | /* Write a word data to PHY controller */ | ||
1557 | for ( i = 0x8000; i > 0; i >>= 1) | ||
1558 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1559 | |||
1560 | } | ||
1561 | |||
1562 | |||
1563 | /* | ||
1564 | * Read a word data from phy register | ||
1565 | */ | ||
1566 | |||
1567 | static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | ||
1568 | { | ||
1569 | int i; | ||
1570 | u16 phy_data; | ||
1571 | unsigned long ioaddr; | ||
1572 | |||
1573 | if(chip_id == PCI_ULI5263_ID) | ||
1574 | return phy_readby_cr10(iobase, phy_addr, offset); | ||
1575 | /* M5261/M5263 Chip */ | ||
1576 | ioaddr = iobase + DCR9; | ||
1577 | |||
1578 | /* Send 33 synchronization clock to Phy controller */ | ||
1579 | for (i = 0; i < 35; i++) | ||
1580 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1581 | |||
1582 | /* Send start command(01) to Phy */ | ||
1583 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1584 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1585 | |||
1586 | /* Send read command(10) to Phy */ | ||
1587 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1588 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1589 | |||
1590 | /* Send Phy address */ | ||
1591 | for (i = 0x10; i > 0; i = i >> 1) | ||
1592 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1593 | |||
1594 | /* Send register address */ | ||
1595 | for (i = 0x10; i > 0; i = i >> 1) | ||
1596 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1597 | |||
1598 | /* Skip transition state */ | ||
1599 | phy_read_1bit(ioaddr, chip_id); | ||
1600 | |||
1601 | /* read 16bit data */ | ||
1602 | for (phy_data = 0, i = 0; i < 16; i++) { | ||
1603 | phy_data <<= 1; | ||
1604 | phy_data |= phy_read_1bit(ioaddr, chip_id); | ||
1605 | } | ||
1606 | |||
1607 | return phy_data; | ||
1608 | } | ||
1609 | |||
1610 | static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) | ||
1611 | { | ||
1612 | unsigned long ioaddr,cr10_value; | ||
1613 | |||
1614 | ioaddr = iobase + DCR10; | ||
1615 | cr10_value = phy_addr; | ||
1616 | cr10_value = (cr10_value<<5) + offset; | ||
1617 | cr10_value = (cr10_value<<16) + 0x08000000; | ||
1618 | outl(cr10_value,ioaddr); | ||
1619 | udelay(1); | ||
1620 | while(1) | ||
1621 | { | ||
1622 | cr10_value = inl(ioaddr); | ||
1623 | if(cr10_value&0x10000000) | ||
1624 | break; | ||
1625 | } | ||
1626 | return (cr10_value&0x0ffff); | ||
1627 | } | ||
1628 | |||
1629 | static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) | ||
1630 | { | ||
1631 | unsigned long ioaddr,cr10_value; | ||
1632 | |||
1633 | ioaddr = iobase + DCR10; | ||
1634 | cr10_value = phy_addr; | ||
1635 | cr10_value = (cr10_value<<5) + offset; | ||
1636 | cr10_value = (cr10_value<<16) + 0x04000000 + phy_data; | ||
1637 | outl(cr10_value,ioaddr); | ||
1638 | udelay(1); | ||
1639 | } | ||
1640 | /* | ||
1641 | * Write one bit data to Phy Controller | ||
1642 | */ | ||
1643 | |||
1644 | static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) | ||
1645 | { | ||
1646 | outl(phy_data , ioaddr); /* MII Clock Low */ | ||
1647 | udelay(1); | ||
1648 | outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ | ||
1649 | udelay(1); | ||
1650 | outl(phy_data , ioaddr); /* MII Clock Low */ | ||
1651 | udelay(1); | ||
1652 | } | ||
1653 | |||
1654 | |||
1655 | /* | ||
1656 | * Read one bit phy data from PHY controller | ||
1657 | */ | ||
1658 | |||
1659 | static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) | ||
1660 | { | ||
1661 | u16 phy_data; | ||
1662 | |||
1663 | outl(0x50000 , ioaddr); | ||
1664 | udelay(1); | ||
1665 | phy_data = ( inl(ioaddr) >> 19 ) & 0x1; | ||
1666 | outl(0x40000 , ioaddr); | ||
1667 | udelay(1); | ||
1668 | |||
1669 | return phy_data; | ||
1670 | } | ||
1671 | |||
1672 | |||
1673 | static struct pci_device_id uli526x_pci_tbl[] = { | ||
1674 | { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, | ||
1675 | { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, | ||
1676 | { 0, } | ||
1677 | }; | ||
1678 | MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl); | ||
1679 | |||
1680 | |||
1681 | static struct pci_driver uli526x_driver = { | ||
1682 | .name = "uli526x", | ||
1683 | .id_table = uli526x_pci_tbl, | ||
1684 | .probe = uli526x_init_one, | ||
1685 | .remove = __devexit_p(uli526x_remove_one), | ||
1686 | }; | ||
1687 | |||
1688 | MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); | ||
1689 | MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver"); | ||
1690 | MODULE_LICENSE("GPL"); | ||
1691 | |||
1692 | MODULE_PARM(debug, "i"); | ||
1693 | MODULE_PARM(mode, "i"); | ||
1694 | MODULE_PARM(cr6set, "i"); | ||
1695 | MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)"); | ||
1696 | MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | ||
1697 | |||
1698 | /* Description: | ||
1699 | * when user used insmod to add module, system invoked init_module() | ||
1700 | * to register the services. | ||
1701 | */ | ||
1702 | |||
1703 | static int __init uli526x_init_module(void) | ||
1704 | { | ||
1705 | int rc; | ||
1706 | |||
1707 | printk(version); | ||
1708 | printed_version = 1; | ||
1709 | |||
1710 | ULI526X_DBUG(0, "init_module() ", debug); | ||
1711 | |||
1712 | if (debug) | ||
1713 | uli526x_debug = debug; /* set debug flag */ | ||
1714 | if (cr6set) | ||
1715 | uli526x_cr6_user_set = cr6set; | ||
1716 | |||
1717 | switch(mode) { | ||
1718 | case ULI526X_10MHF: | ||
1719 | case ULI526X_100MHF: | ||
1720 | case ULI526X_10MFD: | ||
1721 | case ULI526X_100MFD: | ||
1722 | uli526x_media_mode = mode; | ||
1723 | break; | ||
1724 | default:uli526x_media_mode = ULI526X_AUTO; | ||
1725 | break; | ||
1726 | } | ||
1727 | |||
1728 | rc = pci_module_init(&uli526x_driver); | ||
1729 | if (rc < 0) | ||
1730 | return rc; | ||
1731 | |||
1732 | return 0; | ||
1733 | } | ||
1734 | |||
1735 | |||
1736 | /* | ||
1737 | * Description: | ||
1738 | * when user used rmmod to delete module, system invoked clean_module() | ||
1739 | * to un-register all registered services. | ||
1740 | */ | ||
1741 | |||
1742 | static void __exit uli526x_cleanup_module(void) | ||
1743 | { | ||
1744 | ULI526X_DBUG(0, "uli526x_clean_module() ", debug); | ||
1745 | pci_unregister_driver(&uli526x_driver); | ||
1746 | } | ||
1747 | |||
1748 | module_init(uli526x_init_module); | ||
1749 | module_exit(uli526x_cleanup_module); | ||
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 6d864c502a1f..6b0e6464eb39 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * FIXME: IO should be max 256 bytes. However, since we may | 40 | * FIXME: IO should be max 256 bytes. However, since we may |
41 | * have a P2P bridge below a cardbus bridge, we need 4K. | 41 | * have a P2P bridge below a cardbus bridge, we need 4K. |
42 | */ | 42 | */ |
43 | #define CARDBUS_IO_SIZE (256) | 43 | #define CARDBUS_IO_SIZE (4*1024) |
44 | #define CARDBUS_MEM_SIZE (32*1024*1024) | 44 | #define CARDBUS_MEM_SIZE (32*1024*1024) |
45 | 45 | ||
46 | static void __devinit | 46 | static void __devinit |
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 179c95c878ac..31065261de8e 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -189,7 +189,6 @@ static void ahci_irq_clear(struct ata_port *ap); | |||
189 | static void ahci_eng_timeout(struct ata_port *ap); | 189 | static void ahci_eng_timeout(struct ata_port *ap); |
190 | static int ahci_port_start(struct ata_port *ap); | 190 | static int ahci_port_start(struct ata_port *ap); |
191 | static void ahci_port_stop(struct ata_port *ap); | 191 | static void ahci_port_stop(struct ata_port *ap); |
192 | static void ahci_host_stop(struct ata_host_set *host_set); | ||
193 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); | 192 | static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); |
194 | static void ahci_qc_prep(struct ata_queued_cmd *qc); | 193 | static void ahci_qc_prep(struct ata_queued_cmd *qc); |
195 | static u8 ahci_check_status(struct ata_port *ap); | 194 | static u8 ahci_check_status(struct ata_port *ap); |
@@ -242,7 +241,6 @@ static struct ata_port_operations ahci_ops = { | |||
242 | 241 | ||
243 | .port_start = ahci_port_start, | 242 | .port_start = ahci_port_start, |
244 | .port_stop = ahci_port_stop, | 243 | .port_stop = ahci_port_stop, |
245 | .host_stop = ahci_host_stop, | ||
246 | }; | 244 | }; |
247 | 245 | ||
248 | static struct ata_port_info ahci_port_info[] = { | 246 | static struct ata_port_info ahci_port_info[] = { |
@@ -296,17 +294,9 @@ static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int | |||
296 | return base + 0x100 + (port * 0x80); | 294 | return base + 0x100 + (port * 0x80); |
297 | } | 295 | } |
298 | 296 | ||
299 | static inline void *ahci_port_base (void *base, unsigned int port) | 297 | static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port) |
300 | { | 298 | { |
301 | return (void *) ahci_port_base_ul((unsigned long)base, port); | 299 | return (void __iomem *) ahci_port_base_ul((unsigned long)base, port); |
302 | } | ||
303 | |||
304 | static void ahci_host_stop(struct ata_host_set *host_set) | ||
305 | { | ||
306 | struct ahci_host_priv *hpriv = host_set->private_data; | ||
307 | kfree(hpriv); | ||
308 | |||
309 | ata_host_stop(host_set); | ||
310 | } | 300 | } |
311 | 301 | ||
312 | static int ahci_port_start(struct ata_port *ap) | 302 | static int ahci_port_start(struct ata_port *ap) |
@@ -314,8 +304,9 @@ static int ahci_port_start(struct ata_port *ap) | |||
314 | struct device *dev = ap->host_set->dev; | 304 | struct device *dev = ap->host_set->dev; |
315 | struct ahci_host_priv *hpriv = ap->host_set->private_data; | 305 | struct ahci_host_priv *hpriv = ap->host_set->private_data; |
316 | struct ahci_port_priv *pp; | 306 | struct ahci_port_priv *pp; |
317 | void *mem, *mmio = ap->host_set->mmio_base; | 307 | void __iomem *mmio = ap->host_set->mmio_base; |
318 | void *port_mmio = ahci_port_base(mmio, ap->port_no); | 308 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
309 | void *mem; | ||
319 | dma_addr_t mem_dma; | 310 | dma_addr_t mem_dma; |
320 | 311 | ||
321 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); | 312 | pp = kmalloc(sizeof(*pp), GFP_KERNEL); |
@@ -383,8 +374,8 @@ static void ahci_port_stop(struct ata_port *ap) | |||
383 | { | 374 | { |
384 | struct device *dev = ap->host_set->dev; | 375 | struct device *dev = ap->host_set->dev; |
385 | struct ahci_port_priv *pp = ap->private_data; | 376 | struct ahci_port_priv *pp = ap->private_data; |
386 | void *mmio = ap->host_set->mmio_base; | 377 | void __iomem *mmio = ap->host_set->mmio_base; |
387 | void *port_mmio = ahci_port_base(mmio, ap->port_no); | 378 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
388 | u32 tmp; | 379 | u32 tmp; |
389 | 380 | ||
390 | tmp = readl(port_mmio + PORT_CMD); | 381 | tmp = readl(port_mmio + PORT_CMD); |
@@ -546,8 +537,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) | |||
546 | 537 | ||
547 | static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) | 538 | static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) |
548 | { | 539 | { |
549 | void *mmio = ap->host_set->mmio_base; | 540 | void __iomem *mmio = ap->host_set->mmio_base; |
550 | void *port_mmio = ahci_port_base(mmio, ap->port_no); | 541 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
551 | u32 tmp; | 542 | u32 tmp; |
552 | int work; | 543 | int work; |
553 | 544 | ||
@@ -595,8 +586,8 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) | |||
595 | static void ahci_eng_timeout(struct ata_port *ap) | 586 | static void ahci_eng_timeout(struct ata_port *ap) |
596 | { | 587 | { |
597 | struct ata_host_set *host_set = ap->host_set; | 588 | struct ata_host_set *host_set = ap->host_set; |
598 | void *mmio = host_set->mmio_base; | 589 | void __iomem *mmio = host_set->mmio_base; |
599 | void *port_mmio = ahci_port_base(mmio, ap->port_no); | 590 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
600 | struct ata_queued_cmd *qc; | 591 | struct ata_queued_cmd *qc; |
601 | unsigned long flags; | 592 | unsigned long flags; |
602 | 593 | ||
@@ -626,8 +617,8 @@ static void ahci_eng_timeout(struct ata_port *ap) | |||
626 | 617 | ||
627 | static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | 618 | static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
628 | { | 619 | { |
629 | void *mmio = ap->host_set->mmio_base; | 620 | void __iomem *mmio = ap->host_set->mmio_base; |
630 | void *port_mmio = ahci_port_base(mmio, ap->port_no); | 621 | void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); |
631 | u32 status, serr, ci; | 622 | u32 status, serr, ci; |
632 | 623 | ||
633 | serr = readl(port_mmio + PORT_SCR_ERR); | 624 | serr = readl(port_mmio + PORT_SCR_ERR); |
@@ -663,7 +654,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
663 | struct ata_host_set *host_set = dev_instance; | 654 | struct ata_host_set *host_set = dev_instance; |
664 | struct ahci_host_priv *hpriv; | 655 | struct ahci_host_priv *hpriv; |
665 | unsigned int i, handled = 0; | 656 | unsigned int i, handled = 0; |
666 | void *mmio; | 657 | void __iomem *mmio; |
667 | u32 irq_stat, irq_ack = 0; | 658 | u32 irq_stat, irq_ack = 0; |
668 | 659 | ||
669 | VPRINTK("ENTER\n"); | 660 | VPRINTK("ENTER\n"); |
@@ -709,7 +700,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * | |||
709 | static int ahci_qc_issue(struct ata_queued_cmd *qc) | 700 | static int ahci_qc_issue(struct ata_queued_cmd *qc) |
710 | { | 701 | { |
711 | struct ata_port *ap = qc->ap; | 702 | struct ata_port *ap = qc->ap; |
712 | void *port_mmio = (void *) ap->ioaddr.cmd_addr; | 703 | void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; |
713 | 704 | ||
714 | writel(1, port_mmio + PORT_CMD_ISSUE); | 705 | writel(1, port_mmio + PORT_CMD_ISSUE); |
715 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ | 706 | readl(port_mmio + PORT_CMD_ISSUE); /* flush */ |
@@ -894,7 +885,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent) | |||
894 | { | 885 | { |
895 | struct ahci_host_priv *hpriv = probe_ent->private_data; | 886 | struct ahci_host_priv *hpriv = probe_ent->private_data; |
896 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | 887 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); |
897 | void *mmio = probe_ent->mmio_base; | 888 | void __iomem *mmio = probe_ent->mmio_base; |
898 | u32 vers, cap, impl, speed; | 889 | u32 vers, cap, impl, speed; |
899 | const char *speed_s; | 890 | const char *speed_s; |
900 | u16 cc; | 891 | u16 cc; |
@@ -967,7 +958,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
967 | struct ata_probe_ent *probe_ent = NULL; | 958 | struct ata_probe_ent *probe_ent = NULL; |
968 | struct ahci_host_priv *hpriv; | 959 | struct ahci_host_priv *hpriv; |
969 | unsigned long base; | 960 | unsigned long base; |
970 | void *mmio_base; | 961 | void __iomem *mmio_base; |
971 | unsigned int board_idx = (unsigned int) ent->driver_data; | 962 | unsigned int board_idx = (unsigned int) ent->driver_data; |
972 | int have_msi, pci_dev_busy = 0; | 963 | int have_msi, pci_dev_busy = 0; |
973 | int rc; | 964 | int rc; |
@@ -1004,8 +995,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1004 | probe_ent->dev = pci_dev_to_dev(pdev); | 995 | probe_ent->dev = pci_dev_to_dev(pdev); |
1005 | INIT_LIST_HEAD(&probe_ent->node); | 996 | INIT_LIST_HEAD(&probe_ent->node); |
1006 | 997 | ||
1007 | mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR), | 998 | mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0); |
1008 | pci_resource_len(pdev, AHCI_PCI_BAR)); | ||
1009 | if (mmio_base == NULL) { | 999 | if (mmio_base == NULL) { |
1010 | rc = -ENOMEM; | 1000 | rc = -ENOMEM; |
1011 | goto err_out_free_ent; | 1001 | goto err_out_free_ent; |
@@ -1049,7 +1039,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1049 | err_out_hpriv: | 1039 | err_out_hpriv: |
1050 | kfree(hpriv); | 1040 | kfree(hpriv); |
1051 | err_out_iounmap: | 1041 | err_out_iounmap: |
1052 | iounmap(mmio_base); | 1042 | pci_iounmap(pdev, mmio_base); |
1053 | err_out_free_ent: | 1043 | err_out_free_ent: |
1054 | kfree(probe_ent); | 1044 | kfree(probe_ent); |
1055 | err_out_msi: | 1045 | err_out_msi: |
@@ -1089,7 +1079,8 @@ static void ahci_remove_one (struct pci_dev *pdev) | |||
1089 | scsi_host_put(ap->host); | 1079 | scsi_host_put(ap->host); |
1090 | } | 1080 | } |
1091 | 1081 | ||
1092 | host_set->ops->host_stop(host_set); | 1082 | kfree(hpriv); |
1083 | pci_iounmap(pdev, host_set->mmio_base); | ||
1093 | kfree(host_set); | 1084 | kfree(host_set); |
1094 | 1085 | ||
1095 | if (have_msi) | 1086 | if (have_msi) |
@@ -1106,7 +1097,6 @@ static int __init ahci_init(void) | |||
1106 | return pci_module_init(&ahci_pci_driver); | 1097 | return pci_module_init(&ahci_pci_driver); |
1107 | } | 1098 | } |
1108 | 1099 | ||
1109 | |||
1110 | static void __exit ahci_exit(void) | 1100 | static void __exit ahci_exit(void) |
1111 | { | 1101 | { |
1112 | pci_unregister_driver(&ahci_pci_driver); | 1102 | pci_unregister_driver(&ahci_pci_driver); |
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index fb28c1261848..deec0cef88d9 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c | |||
@@ -583,8 +583,7 @@ static void pci_enable_intx(struct pci_dev *pdev) | |||
583 | #define AHCI_ENABLE (1 << 31) | 583 | #define AHCI_ENABLE (1 << 31) |
584 | static int piix_disable_ahci(struct pci_dev *pdev) | 584 | static int piix_disable_ahci(struct pci_dev *pdev) |
585 | { | 585 | { |
586 | void *mmio; | 586 | void __iomem *mmio; |
587 | unsigned long addr; | ||
588 | u32 tmp; | 587 | u32 tmp; |
589 | int rc = 0; | 588 | int rc = 0; |
590 | 589 | ||
@@ -592,11 +591,11 @@ static int piix_disable_ahci(struct pci_dev *pdev) | |||
592 | * works because this device is usually set up by BIOS. | 591 | * works because this device is usually set up by BIOS. |
593 | */ | 592 | */ |
594 | 593 | ||
595 | addr = pci_resource_start(pdev, AHCI_PCI_BAR); | 594 | if (!pci_resource_start(pdev, AHCI_PCI_BAR) || |
596 | if (!addr || !pci_resource_len(pdev, AHCI_PCI_BAR)) | 595 | !pci_resource_len(pdev, AHCI_PCI_BAR)) |
597 | return 0; | 596 | return 0; |
598 | 597 | ||
599 | mmio = ioremap(addr, 64); | 598 | mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64); |
600 | if (!mmio) | 599 | if (!mmio) |
601 | return -ENOMEM; | 600 | return -ENOMEM; |
602 | 601 | ||
@@ -610,7 +609,7 @@ static int piix_disable_ahci(struct pci_dev *pdev) | |||
610 | rc = -EIO; | 609 | rc = -EIO; |
611 | } | 610 | } |
612 | 611 | ||
613 | iounmap(mmio); | 612 | pci_iounmap(pdev, mmio); |
614 | return rc; | 613 | return rc; |
615 | } | 614 | } |
616 | 615 | ||
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index d824938d05c9..9fb9814525a3 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -4204,6 +4204,15 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) | |||
4204 | 4204 | ||
4205 | 4205 | ||
4206 | 4206 | ||
4207 | #ifdef CONFIG_PCI | ||
4208 | |||
4209 | void ata_pci_host_stop (struct ata_host_set *host_set) | ||
4210 | { | ||
4211 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
4212 | |||
4213 | pci_iounmap(pdev, host_set->mmio_base); | ||
4214 | } | ||
4215 | |||
4207 | /** | 4216 | /** |
4208 | * ata_pci_init_native_mode - Initialize native-mode driver | 4217 | * ata_pci_init_native_mode - Initialize native-mode driver |
4209 | * @pdev: pci device to be initialized | 4218 | * @pdev: pci device to be initialized |
@@ -4216,7 +4225,6 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) | |||
4216 | * ata_probe_ent structure should then be freed with kfree(). | 4225 | * ata_probe_ent structure should then be freed with kfree(). |
4217 | */ | 4226 | */ |
4218 | 4227 | ||
4219 | #ifdef CONFIG_PCI | ||
4220 | struct ata_probe_ent * | 4228 | struct ata_probe_ent * |
4221 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) | 4229 | ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) |
4222 | { | 4230 | { |
@@ -4599,6 +4607,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_simulate); | |||
4599 | 4607 | ||
4600 | #ifdef CONFIG_PCI | 4608 | #ifdef CONFIG_PCI |
4601 | EXPORT_SYMBOL_GPL(pci_test_config_bits); | 4609 | EXPORT_SYMBOL_GPL(pci_test_config_bits); |
4610 | EXPORT_SYMBOL_GPL(ata_pci_host_stop); | ||
4602 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); | 4611 | EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); |
4603 | EXPORT_SYMBOL_GPL(ata_pci_init_one); | 4612 | EXPORT_SYMBOL_GPL(ata_pci_init_one); |
4604 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); | 4613 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index 03d9bc6e69df..a1d62dee3be6 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c | |||
@@ -351,6 +351,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
351 | static void nv_host_stop (struct ata_host_set *host_set) | 351 | static void nv_host_stop (struct ata_host_set *host_set) |
352 | { | 352 | { |
353 | struct nv_host *host = host_set->private_data; | 353 | struct nv_host *host = host_set->private_data; |
354 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
354 | 355 | ||
355 | // Disable hotplug event interrupts. | 356 | // Disable hotplug event interrupts. |
356 | if (host->host_desc->disable_hotplug) | 357 | if (host->host_desc->disable_hotplug) |
@@ -358,7 +359,8 @@ static void nv_host_stop (struct ata_host_set *host_set) | |||
358 | 359 | ||
359 | kfree(host); | 360 | kfree(host); |
360 | 361 | ||
361 | ata_host_stop(host_set); | 362 | if (host_set->mmio_base) |
363 | pci_iounmap(pdev, host_set->mmio_base); | ||
362 | } | 364 | } |
363 | 365 | ||
364 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 366 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -420,8 +422,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
420 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { | 422 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { |
421 | unsigned long base; | 423 | unsigned long base; |
422 | 424 | ||
423 | probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5), | 425 | probe_ent->mmio_base = pci_iomap(pdev, 5, 0); |
424 | pci_resource_len(pdev, 5)); | ||
425 | if (probe_ent->mmio_base == NULL) { | 426 | if (probe_ent->mmio_base == NULL) { |
426 | rc = -EIO; | 427 | rc = -EIO; |
427 | goto err_out_free_host; | 428 | goto err_out_free_host; |
@@ -457,7 +458,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
457 | 458 | ||
458 | err_out_iounmap: | 459 | err_out_iounmap: |
459 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) | 460 | if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) |
460 | iounmap(probe_ent->mmio_base); | 461 | pci_iounmap(pdev, probe_ent->mmio_base); |
461 | err_out_free_host: | 462 | err_out_free_host: |
462 | kfree(host); | 463 | kfree(host); |
463 | err_out_free_ent: | 464 | err_out_free_ent: |
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 7c4f6ecc1cc9..538ad727bd2e 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c | |||
@@ -92,6 +92,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); | |||
92 | static void pdc_irq_clear(struct ata_port *ap); | 92 | static void pdc_irq_clear(struct ata_port *ap); |
93 | static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); | 93 | static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); |
94 | 94 | ||
95 | |||
95 | static Scsi_Host_Template pdc_ata_sht = { | 96 | static Scsi_Host_Template pdc_ata_sht = { |
96 | .module = THIS_MODULE, | 97 | .module = THIS_MODULE, |
97 | .name = DRV_NAME, | 98 | .name = DRV_NAME, |
@@ -132,7 +133,7 @@ static struct ata_port_operations pdc_sata_ops = { | |||
132 | .scr_write = pdc_sata_scr_write, | 133 | .scr_write = pdc_sata_scr_write, |
133 | .port_start = pdc_port_start, | 134 | .port_start = pdc_port_start, |
134 | .port_stop = pdc_port_stop, | 135 | .port_stop = pdc_port_stop, |
135 | .host_stop = ata_host_stop, | 136 | .host_stop = ata_pci_host_stop, |
136 | }; | 137 | }; |
137 | 138 | ||
138 | static struct ata_port_operations pdc_pata_ops = { | 139 | static struct ata_port_operations pdc_pata_ops = { |
@@ -153,7 +154,7 @@ static struct ata_port_operations pdc_pata_ops = { | |||
153 | 154 | ||
154 | .port_start = pdc_port_start, | 155 | .port_start = pdc_port_start, |
155 | .port_stop = pdc_port_stop, | 156 | .port_stop = pdc_port_stop, |
156 | .host_stop = ata_host_stop, | 157 | .host_stop = ata_pci_host_stop, |
157 | }; | 158 | }; |
158 | 159 | ||
159 | static struct ata_port_info pdc_port_info[] = { | 160 | static struct ata_port_info pdc_port_info[] = { |
@@ -282,7 +283,7 @@ static void pdc_port_stop(struct ata_port *ap) | |||
282 | 283 | ||
283 | static void pdc_reset_port(struct ata_port *ap) | 284 | static void pdc_reset_port(struct ata_port *ap) |
284 | { | 285 | { |
285 | void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; | 286 | void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; |
286 | unsigned int i; | 287 | unsigned int i; |
287 | u32 tmp; | 288 | u32 tmp; |
288 | 289 | ||
@@ -418,7 +419,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap, | |||
418 | u8 status; | 419 | u8 status; |
419 | unsigned int handled = 0, have_err = 0; | 420 | unsigned int handled = 0, have_err = 0; |
420 | u32 tmp; | 421 | u32 tmp; |
421 | void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; | 422 | void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; |
422 | 423 | ||
423 | tmp = readl(mmio); | 424 | tmp = readl(mmio); |
424 | if (tmp & PDC_ERR_MASK) { | 425 | if (tmp & PDC_ERR_MASK) { |
@@ -447,7 +448,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap, | |||
447 | static void pdc_irq_clear(struct ata_port *ap) | 448 | static void pdc_irq_clear(struct ata_port *ap) |
448 | { | 449 | { |
449 | struct ata_host_set *host_set = ap->host_set; | 450 | struct ata_host_set *host_set = ap->host_set; |
450 | void *mmio = host_set->mmio_base; | 451 | void __iomem *mmio = host_set->mmio_base; |
451 | 452 | ||
452 | readl(mmio + PDC_INT_SEQMASK); | 453 | readl(mmio + PDC_INT_SEQMASK); |
453 | } | 454 | } |
@@ -459,7 +460,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r | |||
459 | u32 mask = 0; | 460 | u32 mask = 0; |
460 | unsigned int i, tmp; | 461 | unsigned int i, tmp; |
461 | unsigned int handled = 0; | 462 | unsigned int handled = 0; |
462 | void *mmio_base; | 463 | void __iomem *mmio_base; |
463 | 464 | ||
464 | VPRINTK("ENTER\n"); | 465 | VPRINTK("ENTER\n"); |
465 | 466 | ||
@@ -581,7 +582,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) | |||
581 | 582 | ||
582 | static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) | 583 | static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) |
583 | { | 584 | { |
584 | void *mmio = pe->mmio_base; | 585 | void __iomem *mmio = pe->mmio_base; |
585 | u32 tmp; | 586 | u32 tmp; |
586 | 587 | ||
587 | /* | 588 | /* |
@@ -624,7 +625,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
624 | static int printed_version; | 625 | static int printed_version; |
625 | struct ata_probe_ent *probe_ent = NULL; | 626 | struct ata_probe_ent *probe_ent = NULL; |
626 | unsigned long base; | 627 | unsigned long base; |
627 | void *mmio_base; | 628 | void __iomem *mmio_base; |
628 | unsigned int board_idx = (unsigned int) ent->driver_data; | 629 | unsigned int board_idx = (unsigned int) ent->driver_data; |
629 | int pci_dev_busy = 0; | 630 | int pci_dev_busy = 0; |
630 | int rc; | 631 | int rc; |
@@ -663,8 +664,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
663 | probe_ent->dev = pci_dev_to_dev(pdev); | 664 | probe_ent->dev = pci_dev_to_dev(pdev); |
664 | INIT_LIST_HEAD(&probe_ent->node); | 665 | INIT_LIST_HEAD(&probe_ent->node); |
665 | 666 | ||
666 | mmio_base = ioremap(pci_resource_start(pdev, 3), | 667 | mmio_base = pci_iomap(pdev, 3, 0); |
667 | pci_resource_len(pdev, 3)); | ||
668 | if (mmio_base == NULL) { | 668 | if (mmio_base == NULL) { |
669 | rc = -ENOMEM; | 669 | rc = -ENOMEM; |
670 | goto err_out_free_ent; | 670 | goto err_out_free_ent; |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 9c99ab433bd3..029c2482e127 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -538,11 +538,12 @@ static void qs_port_stop(struct ata_port *ap) | |||
538 | static void qs_host_stop(struct ata_host_set *host_set) | 538 | static void qs_host_stop(struct ata_host_set *host_set) |
539 | { | 539 | { |
540 | void __iomem *mmio_base = host_set->mmio_base; | 540 | void __iomem *mmio_base = host_set->mmio_base; |
541 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
541 | 542 | ||
542 | writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ | 543 | writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ |
543 | writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ | 544 | writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ |
544 | 545 | ||
545 | ata_host_stop(host_set); | 546 | pci_iounmap(pdev, mmio_base); |
546 | } | 547 | } |
547 | 548 | ||
548 | static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) | 549 | static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) |
@@ -646,8 +647,7 @@ static int qs_ata_init_one(struct pci_dev *pdev, | |||
646 | goto err_out_regions; | 647 | goto err_out_regions; |
647 | } | 648 | } |
648 | 649 | ||
649 | mmio_base = ioremap(pci_resource_start(pdev, 4), | 650 | mmio_base = pci_iomap(pdev, 4, 0); |
650 | pci_resource_len(pdev, 4)); | ||
651 | if (mmio_base == NULL) { | 651 | if (mmio_base == NULL) { |
652 | rc = -ENOMEM; | 652 | rc = -ENOMEM; |
653 | goto err_out_regions; | 653 | goto err_out_regions; |
@@ -697,7 +697,7 @@ static int qs_ata_init_one(struct pci_dev *pdev, | |||
697 | return 0; | 697 | return 0; |
698 | 698 | ||
699 | err_out_iounmap: | 699 | err_out_iounmap: |
700 | iounmap(mmio_base); | 700 | pci_iounmap(pdev, mmio_base); |
701 | err_out_regions: | 701 | err_out_regions: |
702 | pci_release_regions(pdev); | 702 | pci_release_regions(pdev); |
703 | err_out: | 703 | err_out: |
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index 71d49548f0a3..ba98a175ee3a 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c | |||
@@ -86,6 +86,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); | |||
86 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); | 86 | static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); |
87 | static void sil_post_set_mode (struct ata_port *ap); | 87 | static void sil_post_set_mode (struct ata_port *ap); |
88 | 88 | ||
89 | |||
89 | static struct pci_device_id sil_pci_tbl[] = { | 90 | static struct pci_device_id sil_pci_tbl[] = { |
90 | { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, | 91 | { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, |
91 | { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, | 92 | { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, |
@@ -172,7 +173,7 @@ static struct ata_port_operations sil_ops = { | |||
172 | .scr_write = sil_scr_write, | 173 | .scr_write = sil_scr_write, |
173 | .port_start = ata_port_start, | 174 | .port_start = ata_port_start, |
174 | .port_stop = ata_port_stop, | 175 | .port_stop = ata_port_stop, |
175 | .host_stop = ata_host_stop, | 176 | .host_stop = ata_pci_host_stop, |
176 | }; | 177 | }; |
177 | 178 | ||
178 | static struct ata_port_info sil_port_info[] = { | 179 | static struct ata_port_info sil_port_info[] = { |
@@ -231,6 +232,7 @@ MODULE_LICENSE("GPL"); | |||
231 | MODULE_DEVICE_TABLE(pci, sil_pci_tbl); | 232 | MODULE_DEVICE_TABLE(pci, sil_pci_tbl); |
232 | MODULE_VERSION(DRV_VERSION); | 233 | MODULE_VERSION(DRV_VERSION); |
233 | 234 | ||
235 | |||
234 | static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) | 236 | static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) |
235 | { | 237 | { |
236 | u8 cache_line = 0; | 238 | u8 cache_line = 0; |
@@ -242,7 +244,8 @@ static void sil_post_set_mode (struct ata_port *ap) | |||
242 | { | 244 | { |
243 | struct ata_host_set *host_set = ap->host_set; | 245 | struct ata_host_set *host_set = ap->host_set; |
244 | struct ata_device *dev; | 246 | struct ata_device *dev; |
245 | void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode; | 247 | void __iomem *addr = |
248 | host_set->mmio_base + sil_port[ap->port_no].xfer_mode; | ||
246 | u32 tmp, dev_mode[2]; | 249 | u32 tmp, dev_mode[2]; |
247 | unsigned int i; | 250 | unsigned int i; |
248 | 251 | ||
@@ -375,7 +378,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
375 | static int printed_version; | 378 | static int printed_version; |
376 | struct ata_probe_ent *probe_ent = NULL; | 379 | struct ata_probe_ent *probe_ent = NULL; |
377 | unsigned long base; | 380 | unsigned long base; |
378 | void *mmio_base; | 381 | void __iomem *mmio_base; |
379 | int rc; | 382 | int rc; |
380 | unsigned int i; | 383 | unsigned int i; |
381 | int pci_dev_busy = 0; | 384 | int pci_dev_busy = 0; |
@@ -425,8 +428,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
425 | probe_ent->irq_flags = SA_SHIRQ; | 428 | probe_ent->irq_flags = SA_SHIRQ; |
426 | probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; | 429 | probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; |
427 | 430 | ||
428 | mmio_base = ioremap(pci_resource_start(pdev, 5), | 431 | mmio_base = pci_iomap(pdev, 5, 0); |
429 | pci_resource_len(pdev, 5)); | ||
430 | if (mmio_base == NULL) { | 432 | if (mmio_base == NULL) { |
431 | rc = -ENOMEM; | 433 | rc = -ENOMEM; |
432 | goto err_out_free_ent; | 434 | goto err_out_free_ent; |
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index 19d3bb3b0fb6..d89d968bedac 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c | |||
@@ -318,7 +318,7 @@ static struct ata_port_operations k2_sata_ops = { | |||
318 | .scr_write = k2_sata_scr_write, | 318 | .scr_write = k2_sata_scr_write, |
319 | .port_start = ata_port_start, | 319 | .port_start = ata_port_start, |
320 | .port_stop = ata_port_stop, | 320 | .port_stop = ata_port_stop, |
321 | .host_stop = ata_host_stop, | 321 | .host_stop = ata_pci_host_stop, |
322 | }; | 322 | }; |
323 | 323 | ||
324 | static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) | 324 | static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) |
@@ -346,7 +346,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
346 | static int printed_version; | 346 | static int printed_version; |
347 | struct ata_probe_ent *probe_ent = NULL; | 347 | struct ata_probe_ent *probe_ent = NULL; |
348 | unsigned long base; | 348 | unsigned long base; |
349 | void *mmio_base; | 349 | void __iomem *mmio_base; |
350 | int pci_dev_busy = 0; | 350 | int pci_dev_busy = 0; |
351 | int rc; | 351 | int rc; |
352 | int i; | 352 | int i; |
@@ -392,8 +392,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e | |||
392 | probe_ent->dev = pci_dev_to_dev(pdev); | 392 | probe_ent->dev = pci_dev_to_dev(pdev); |
393 | INIT_LIST_HEAD(&probe_ent->node); | 393 | INIT_LIST_HEAD(&probe_ent->node); |
394 | 394 | ||
395 | mmio_base = ioremap(pci_resource_start(pdev, 5), | 395 | mmio_base = pci_iomap(pdev, 5, 0); |
396 | pci_resource_len(pdev, 5)); | ||
397 | if (mmio_base == NULL) { | 396 | if (mmio_base == NULL) { |
398 | rc = -ENOMEM; | 397 | rc = -ENOMEM; |
399 | goto err_out_free_ent; | 398 | goto err_out_free_ent; |
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index c72fcc46f0fa..540a85191172 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -245,13 +245,14 @@ static struct pci_driver pdc_sata_pci_driver = { | |||
245 | 245 | ||
246 | static void pdc20621_host_stop(struct ata_host_set *host_set) | 246 | static void pdc20621_host_stop(struct ata_host_set *host_set) |
247 | { | 247 | { |
248 | struct pci_dev *pdev = to_pci_dev(host_set->dev); | ||
248 | struct pdc_host_priv *hpriv = host_set->private_data; | 249 | struct pdc_host_priv *hpriv = host_set->private_data; |
249 | void *dimm_mmio = hpriv->dimm_mmio; | 250 | void *dimm_mmio = hpriv->dimm_mmio; |
250 | 251 | ||
251 | iounmap(dimm_mmio); | 252 | pci_iounmap(pdev, dimm_mmio); |
252 | kfree(hpriv); | 253 | kfree(hpriv); |
253 | 254 | ||
254 | ata_host_stop(host_set); | 255 | pci_iounmap(pdev, host_set->mmio_base); |
255 | } | 256 | } |
256 | 257 | ||
257 | static int pdc_port_start(struct ata_port *ap) | 258 | static int pdc_port_start(struct ata_port *ap) |
@@ -451,9 +452,9 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
451 | struct scatterlist *sg = qc->sg; | 452 | struct scatterlist *sg = qc->sg; |
452 | struct ata_port *ap = qc->ap; | 453 | struct ata_port *ap = qc->ap; |
453 | struct pdc_port_priv *pp = ap->private_data; | 454 | struct pdc_port_priv *pp = ap->private_data; |
454 | void *mmio = ap->host_set->mmio_base; | 455 | void __iomem *mmio = ap->host_set->mmio_base; |
455 | struct pdc_host_priv *hpriv = ap->host_set->private_data; | 456 | struct pdc_host_priv *hpriv = ap->host_set->private_data; |
456 | void *dimm_mmio = hpriv->dimm_mmio; | 457 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
457 | unsigned int portno = ap->port_no; | 458 | unsigned int portno = ap->port_no; |
458 | unsigned int i, last, idx, total_len = 0, sgt_len; | 459 | unsigned int i, last, idx, total_len = 0, sgt_len; |
459 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; | 460 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; |
@@ -513,9 +514,9 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) | |||
513 | { | 514 | { |
514 | struct ata_port *ap = qc->ap; | 515 | struct ata_port *ap = qc->ap; |
515 | struct pdc_port_priv *pp = ap->private_data; | 516 | struct pdc_port_priv *pp = ap->private_data; |
516 | void *mmio = ap->host_set->mmio_base; | 517 | void __iomem *mmio = ap->host_set->mmio_base; |
517 | struct pdc_host_priv *hpriv = ap->host_set->private_data; | 518 | struct pdc_host_priv *hpriv = ap->host_set->private_data; |
518 | void *dimm_mmio = hpriv->dimm_mmio; | 519 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
519 | unsigned int portno = ap->port_no; | 520 | unsigned int portno = ap->port_no; |
520 | unsigned int i; | 521 | unsigned int i; |
521 | 522 | ||
@@ -565,7 +566,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, | |||
565 | { | 566 | { |
566 | struct ata_port *ap = qc->ap; | 567 | struct ata_port *ap = qc->ap; |
567 | struct ata_host_set *host_set = ap->host_set; | 568 | struct ata_host_set *host_set = ap->host_set; |
568 | void *mmio = host_set->mmio_base; | 569 | void __iomem *mmio = host_set->mmio_base; |
569 | 570 | ||
570 | /* hard-code chip #0 */ | 571 | /* hard-code chip #0 */ |
571 | mmio += PDC_CHIP0_OFS; | 572 | mmio += PDC_CHIP0_OFS; |
@@ -639,7 +640,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) | |||
639 | struct ata_port *ap = qc->ap; | 640 | struct ata_port *ap = qc->ap; |
640 | struct ata_host_set *host_set = ap->host_set; | 641 | struct ata_host_set *host_set = ap->host_set; |
641 | unsigned int port_no = ap->port_no; | 642 | unsigned int port_no = ap->port_no; |
642 | void *mmio = host_set->mmio_base; | 643 | void __iomem *mmio = host_set->mmio_base; |
643 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | 644 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
644 | u8 seq = (u8) (port_no + 1); | 645 | u8 seq = (u8) (port_no + 1); |
645 | unsigned int port_ofs; | 646 | unsigned int port_ofs; |
@@ -699,7 +700,7 @@ static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc) | |||
699 | static inline unsigned int pdc20621_host_intr( struct ata_port *ap, | 700 | static inline unsigned int pdc20621_host_intr( struct ata_port *ap, |
700 | struct ata_queued_cmd *qc, | 701 | struct ata_queued_cmd *qc, |
701 | unsigned int doing_hdma, | 702 | unsigned int doing_hdma, |
702 | void *mmio) | 703 | void __iomem *mmio) |
703 | { | 704 | { |
704 | unsigned int port_no = ap->port_no; | 705 | unsigned int port_no = ap->port_no; |
705 | unsigned int port_ofs = | 706 | unsigned int port_ofs = |
@@ -778,7 +779,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, | |||
778 | static void pdc20621_irq_clear(struct ata_port *ap) | 779 | static void pdc20621_irq_clear(struct ata_port *ap) |
779 | { | 780 | { |
780 | struct ata_host_set *host_set = ap->host_set; | 781 | struct ata_host_set *host_set = ap->host_set; |
781 | void *mmio = host_set->mmio_base; | 782 | void __iomem *mmio = host_set->mmio_base; |
782 | 783 | ||
783 | mmio += PDC_CHIP0_OFS; | 784 | mmio += PDC_CHIP0_OFS; |
784 | 785 | ||
@@ -792,7 +793,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re | |||
792 | u32 mask = 0; | 793 | u32 mask = 0; |
793 | unsigned int i, tmp, port_no; | 794 | unsigned int i, tmp, port_no; |
794 | unsigned int handled = 0; | 795 | unsigned int handled = 0; |
795 | void *mmio_base; | 796 | void __iomem *mmio_base; |
796 | 797 | ||
797 | VPRINTK("ENTER\n"); | 798 | VPRINTK("ENTER\n"); |
798 | 799 | ||
@@ -940,9 +941,9 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, | |||
940 | u16 idx; | 941 | u16 idx; |
941 | u8 page_mask; | 942 | u8 page_mask; |
942 | long dist; | 943 | long dist; |
943 | void *mmio = pe->mmio_base; | 944 | void __iomem *mmio = pe->mmio_base; |
944 | struct pdc_host_priv *hpriv = pe->private_data; | 945 | struct pdc_host_priv *hpriv = pe->private_data; |
945 | void *dimm_mmio = hpriv->dimm_mmio; | 946 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
946 | 947 | ||
947 | /* hard-code chip #0 */ | 948 | /* hard-code chip #0 */ |
948 | mmio += PDC_CHIP0_OFS; | 949 | mmio += PDC_CHIP0_OFS; |
@@ -996,9 +997,9 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, | |||
996 | u16 idx; | 997 | u16 idx; |
997 | u8 page_mask; | 998 | u8 page_mask; |
998 | long dist; | 999 | long dist; |
999 | void *mmio = pe->mmio_base; | 1000 | void __iomem *mmio = pe->mmio_base; |
1000 | struct pdc_host_priv *hpriv = pe->private_data; | 1001 | struct pdc_host_priv *hpriv = pe->private_data; |
1001 | void *dimm_mmio = hpriv->dimm_mmio; | 1002 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
1002 | 1003 | ||
1003 | /* hard-code chip #0 */ | 1004 | /* hard-code chip #0 */ |
1004 | mmio += PDC_CHIP0_OFS; | 1005 | mmio += PDC_CHIP0_OFS; |
@@ -1044,7 +1045,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, | |||
1044 | static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, | 1045 | static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, |
1045 | u32 subaddr, u32 *pdata) | 1046 | u32 subaddr, u32 *pdata) |
1046 | { | 1047 | { |
1047 | void *mmio = pe->mmio_base; | 1048 | void __iomem *mmio = pe->mmio_base; |
1048 | u32 i2creg = 0; | 1049 | u32 i2creg = 0; |
1049 | u32 status; | 1050 | u32 status; |
1050 | u32 count =0; | 1051 | u32 count =0; |
@@ -1103,7 +1104,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe) | |||
1103 | u32 data = 0; | 1104 | u32 data = 0; |
1104 | int size, i; | 1105 | int size, i; |
1105 | u8 bdimmsize; | 1106 | u8 bdimmsize; |
1106 | void *mmio = pe->mmio_base; | 1107 | void __iomem *mmio = pe->mmio_base; |
1107 | static const struct { | 1108 | static const struct { |
1108 | unsigned int reg; | 1109 | unsigned int reg; |
1109 | unsigned int ofs; | 1110 | unsigned int ofs; |
@@ -1166,7 +1167,7 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe) | |||
1166 | { | 1167 | { |
1167 | u32 data, spd0; | 1168 | u32 data, spd0; |
1168 | int error, i; | 1169 | int error, i; |
1169 | void *mmio = pe->mmio_base; | 1170 | void __iomem *mmio = pe->mmio_base; |
1170 | 1171 | ||
1171 | /* hard-code chip #0 */ | 1172 | /* hard-code chip #0 */ |
1172 | mmio += PDC_CHIP0_OFS; | 1173 | mmio += PDC_CHIP0_OFS; |
@@ -1220,7 +1221,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) | |||
1220 | u32 ticks=0; | 1221 | u32 ticks=0; |
1221 | u32 clock=0; | 1222 | u32 clock=0; |
1222 | u32 fparam=0; | 1223 | u32 fparam=0; |
1223 | void *mmio = pe->mmio_base; | 1224 | void __iomem *mmio = pe->mmio_base; |
1224 | 1225 | ||
1225 | /* hard-code chip #0 */ | 1226 | /* hard-code chip #0 */ |
1226 | mmio += PDC_CHIP0_OFS; | 1227 | mmio += PDC_CHIP0_OFS; |
@@ -1344,7 +1345,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) | |||
1344 | static void pdc_20621_init(struct ata_probe_ent *pe) | 1345 | static void pdc_20621_init(struct ata_probe_ent *pe) |
1345 | { | 1346 | { |
1346 | u32 tmp; | 1347 | u32 tmp; |
1347 | void *mmio = pe->mmio_base; | 1348 | void __iomem *mmio = pe->mmio_base; |
1348 | 1349 | ||
1349 | /* hard-code chip #0 */ | 1350 | /* hard-code chip #0 */ |
1350 | mmio += PDC_CHIP0_OFS; | 1351 | mmio += PDC_CHIP0_OFS; |
@@ -1377,7 +1378,8 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1377 | static int printed_version; | 1378 | static int printed_version; |
1378 | struct ata_probe_ent *probe_ent = NULL; | 1379 | struct ata_probe_ent *probe_ent = NULL; |
1379 | unsigned long base; | 1380 | unsigned long base; |
1380 | void *mmio_base, *dimm_mmio = NULL; | 1381 | void __iomem *mmio_base; |
1382 | void __iomem *dimm_mmio = NULL; | ||
1381 | struct pdc_host_priv *hpriv = NULL; | 1383 | struct pdc_host_priv *hpriv = NULL; |
1382 | unsigned int board_idx = (unsigned int) ent->driver_data; | 1384 | unsigned int board_idx = (unsigned int) ent->driver_data; |
1383 | int pci_dev_busy = 0; | 1385 | int pci_dev_busy = 0; |
@@ -1417,8 +1419,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1417 | probe_ent->dev = pci_dev_to_dev(pdev); | 1419 | probe_ent->dev = pci_dev_to_dev(pdev); |
1418 | INIT_LIST_HEAD(&probe_ent->node); | 1420 | INIT_LIST_HEAD(&probe_ent->node); |
1419 | 1421 | ||
1420 | mmio_base = ioremap(pci_resource_start(pdev, 3), | 1422 | mmio_base = pci_iomap(pdev, 3, 0); |
1421 | pci_resource_len(pdev, 3)); | ||
1422 | if (mmio_base == NULL) { | 1423 | if (mmio_base == NULL) { |
1423 | rc = -ENOMEM; | 1424 | rc = -ENOMEM; |
1424 | goto err_out_free_ent; | 1425 | goto err_out_free_ent; |
@@ -1432,8 +1433,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1432 | } | 1433 | } |
1433 | memset(hpriv, 0, sizeof(*hpriv)); | 1434 | memset(hpriv, 0, sizeof(*hpriv)); |
1434 | 1435 | ||
1435 | dimm_mmio = ioremap(pci_resource_start(pdev, 4), | 1436 | dimm_mmio = pci_iomap(pdev, 4, 0); |
1436 | pci_resource_len(pdev, 4)); | ||
1437 | if (!dimm_mmio) { | 1437 | if (!dimm_mmio) { |
1438 | kfree(hpriv); | 1438 | kfree(hpriv); |
1439 | rc = -ENOMEM; | 1439 | rc = -ENOMEM; |
@@ -1480,9 +1480,9 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id * | |||
1480 | 1480 | ||
1481 | err_out_iounmap_dimm: /* only get to this label if 20621 */ | 1481 | err_out_iounmap_dimm: /* only get to this label if 20621 */ |
1482 | kfree(hpriv); | 1482 | kfree(hpriv); |
1483 | iounmap(dimm_mmio); | 1483 | pci_iounmap(pdev, dimm_mmio); |
1484 | err_out_iounmap: | 1484 | err_out_iounmap: |
1485 | iounmap(mmio_base); | 1485 | pci_iounmap(pdev, mmio_base); |
1486 | err_out_free_ent: | 1486 | err_out_free_ent: |
1487 | kfree(probe_ent); | 1487 | kfree(probe_ent); |
1488 | err_out_regions: | 1488 | err_out_regions: |
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 3985f344da4d..cf94e0158a8d 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c | |||
@@ -252,7 +252,7 @@ static struct ata_port_operations vsc_sata_ops = { | |||
252 | .scr_write = vsc_sata_scr_write, | 252 | .scr_write = vsc_sata_scr_write, |
253 | .port_start = ata_port_start, | 253 | .port_start = ata_port_start, |
254 | .port_stop = ata_port_stop, | 254 | .port_stop = ata_port_stop, |
255 | .host_stop = ata_host_stop, | 255 | .host_stop = ata_pci_host_stop, |
256 | }; | 256 | }; |
257 | 257 | ||
258 | static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) | 258 | static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) |
@@ -326,8 +326,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d | |||
326 | probe_ent->dev = pci_dev_to_dev(pdev); | 326 | probe_ent->dev = pci_dev_to_dev(pdev); |
327 | INIT_LIST_HEAD(&probe_ent->node); | 327 | INIT_LIST_HEAD(&probe_ent->node); |
328 | 328 | ||
329 | mmio_base = ioremap(pci_resource_start(pdev, 0), | 329 | mmio_base = pci_iomap(pdev, 0, 0); |
330 | pci_resource_len(pdev, 0)); | ||
331 | if (mmio_base == NULL) { | 330 | if (mmio_base == NULL) { |
332 | rc = -ENOMEM; | 331 | rc = -ENOMEM; |
333 | goto err_out_free_ent; | 332 | goto err_out_free_ent; |