diff options
272 files changed, 4464 insertions, 6165 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2c85c0692b01..f084af0cb8e0 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -2629,8 +2629,10 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 2629 | aux-ide-disks -- unplug non-primary-master IDE devices | 2629 | aux-ide-disks -- unplug non-primary-master IDE devices |
| 2630 | nics -- unplug network devices | 2630 | nics -- unplug network devices |
| 2631 | all -- unplug all emulated devices (NICs and IDE disks) | 2631 | all -- unplug all emulated devices (NICs and IDE disks) |
| 2632 | ignore -- continue loading the Xen platform PCI driver even | 2632 | unnecessary -- unplugging emulated devices is |
| 2633 | if the version check failed | 2633 | unnecessary even if the host did not respond to |
| 2634 | the unplug protocol | ||
| 2635 | never -- do not unplug even if version check succeeds | ||
| 2634 | 2636 | ||
| 2635 | xirc2ps_cs= [NET,PCMCIA] | 2637 | xirc2ps_cs= [NET,PCMCIA] |
| 2636 | Format: | 2638 | Format: |
diff --git a/MAINTAINERS b/MAINTAINERS index 433f35385756..a1df54b0af79 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -454,6 +454,17 @@ L: linux-rdma@vger.kernel.org | |||
| 454 | S: Maintained | 454 | S: Maintained |
| 455 | F: drivers/infiniband/hw/amso1100/ | 455 | F: drivers/infiniband/hw/amso1100/ |
| 456 | 456 | ||
| 457 | ANALOG DEVICES INC ASOC DRIVERS | ||
| 458 | L: uclinux-dist-devel@blackfin.uclinux.org | ||
| 459 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
| 460 | W: http://blackfin.uclinux.org/ | ||
| 461 | S: Supported | ||
| 462 | F: sound/soc/blackfin/* | ||
| 463 | F: sound/soc/codecs/ad1* | ||
| 464 | F: sound/soc/codecs/adau* | ||
| 465 | F: sound/soc/codecs/adav* | ||
| 466 | F: sound/soc/codecs/ssm* | ||
| 467 | |||
| 457 | AOA (Apple Onboard Audio) ALSA DRIVER | 468 | AOA (Apple Onboard Audio) ALSA DRIVER |
| 458 | M: Johannes Berg <johannes@sipsolutions.net> | 469 | M: Johannes Berg <johannes@sipsolutions.net> |
| 459 | L: linuxppc-dev@lists.ozlabs.org | 470 | L: linuxppc-dev@lists.ozlabs.org |
| @@ -1408,8 +1408,8 @@ checkstack: | |||
| 1408 | $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ | 1408 | $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \ |
| 1409 | $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH) | 1409 | $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH) |
| 1410 | 1410 | ||
| 1411 | kernelrelease: include/config/kernel.release | 1411 | kernelrelease: |
| 1412 | @echo $(KERNELRELEASE) | 1412 | @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" |
| 1413 | 1413 | ||
| 1414 | kernelversion: | 1414 | kernelversion: |
| 1415 | @echo $(KERNELVERSION) | 1415 | @echo $(KERNELVERSION) |
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c index 575ff1ae85a7..339150ab0ea5 100644 --- a/arch/arm/mach-imx/mach-cpuimx27.c +++ b/arch/arm/mach-imx/mach-cpuimx27.c | |||
| @@ -279,13 +279,13 @@ static void __init eukrea_cpuimx27_init(void) | |||
| 279 | #if defined(CONFIG_USB_ULPI) | 279 | #if defined(CONFIG_USB_ULPI) |
| 280 | if (otg_mode_host) { | 280 | if (otg_mode_host) { |
| 281 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 281 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 282 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 282 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 283 | 283 | ||
| 284 | mxc_register_device(&mxc_otg_host, &otg_pdata); | 284 | mxc_register_device(&mxc_otg_host, &otg_pdata); |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 287 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 288 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 288 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 289 | 289 | ||
| 290 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); | 290 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); |
| 291 | #endif | 291 | #endif |
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index a389d1148f18..23c9e1f37b9c 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c | |||
| @@ -419,13 +419,13 @@ static void __init pca100_init(void) | |||
| 419 | #if defined(CONFIG_USB_ULPI) | 419 | #if defined(CONFIG_USB_ULPI) |
| 420 | if (otg_mode_host) { | 420 | if (otg_mode_host) { |
| 421 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 421 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 422 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 422 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 423 | 423 | ||
| 424 | mxc_register_device(&mxc_otg_host, &otg_pdata); | 424 | mxc_register_device(&mxc_otg_host, &otg_pdata); |
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 427 | usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 428 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 428 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 429 | 429 | ||
| 430 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); | 430 | mxc_register_device(&mxc_usbh2, &usbh2_pdata); |
| 431 | #endif | 431 | #endif |
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c index 56b2e26d23b4..a5f0174290b4 100644 --- a/arch/arm/mach-mx25/mach-cpuimx25.c +++ b/arch/arm/mach-mx25/mach-cpuimx25.c | |||
| @@ -138,7 +138,7 @@ static void __init eukrea_cpuimx25_init(void) | |||
| 138 | #if defined(CONFIG_USB_ULPI) | 138 | #if defined(CONFIG_USB_ULPI) |
| 139 | if (otg_mode_host) { | 139 | if (otg_mode_host) { |
| 140 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 140 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 141 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 141 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 142 | 142 | ||
| 143 | mxc_register_device(&mxc_otg, &otg_pdata); | 143 | mxc_register_device(&mxc_otg, &otg_pdata); |
| 144 | } | 144 | } |
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c index 63f970f340a2..9770a6a973be 100644 --- a/arch/arm/mach-mx3/mach-cpuimx35.c +++ b/arch/arm/mach-mx3/mach-cpuimx35.c | |||
| @@ -192,7 +192,7 @@ static void __init mxc_board_init(void) | |||
| 192 | #if defined(CONFIG_USB_ULPI) | 192 | #if defined(CONFIG_USB_ULPI) |
| 193 | if (otg_mode_host) { | 193 | if (otg_mode_host) { |
| 194 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, | 194 | otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops, |
| 195 | USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT); | 195 | ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT); |
| 196 | 196 | ||
| 197 | mxc_register_device(&mxc_otg_host, &otg_pdata); | 197 | mxc_register_device(&mxc_otg_host, &otg_pdata); |
| 198 | } | 198 | } |
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index d5872cd967ab..3f7ef4d97791 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h | |||
| @@ -22,7 +22,9 @@ | |||
| 22 | 22 | ||
| 23 | #include <asm-generic/bitops/sched.h> | 23 | #include <asm-generic/bitops/sched.h> |
| 24 | #include <asm-generic/bitops/ffs.h> | 24 | #include <asm-generic/bitops/ffs.h> |
| 25 | #include <asm-generic/bitops/const_hweight.h> | ||
| 25 | #include <asm-generic/bitops/lock.h> | 26 | #include <asm-generic/bitops/lock.h> |
| 27 | |||
| 26 | #include <asm-generic/bitops/ext2-non-atomic.h> | 28 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 27 | #include <asm-generic/bitops/ext2-atomic.h> | 29 | #include <asm-generic/bitops/ext2-atomic.h> |
| 28 | #include <asm-generic/bitops/minix.h> | 30 | #include <asm-generic/bitops/minix.h> |
| @@ -115,7 +117,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 115 | * of bits set) of a N-bit word | 117 | * of bits set) of a N-bit word |
| 116 | */ | 118 | */ |
| 117 | 119 | ||
| 118 | static inline unsigned int hweight32(unsigned int w) | 120 | static inline unsigned int __arch_hweight32(unsigned int w) |
| 119 | { | 121 | { |
| 120 | unsigned int res; | 122 | unsigned int res; |
| 121 | 123 | ||
| @@ -125,19 +127,20 @@ static inline unsigned int hweight32(unsigned int w) | |||
| 125 | return res; | 127 | return res; |
| 126 | } | 128 | } |
| 127 | 129 | ||
| 128 | static inline unsigned int hweight64(__u64 w) | 130 | static inline unsigned int __arch_hweight64(__u64 w) |
| 129 | { | 131 | { |
| 130 | return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); | 132 | return __arch_hweight32((unsigned int)(w >> 32)) + |
| 133 | __arch_hweight32((unsigned int)w); | ||
| 131 | } | 134 | } |
| 132 | 135 | ||
| 133 | static inline unsigned int hweight16(unsigned int w) | 136 | static inline unsigned int __arch_hweight16(unsigned int w) |
| 134 | { | 137 | { |
| 135 | return hweight32(w & 0xffff); | 138 | return __arch_hweight32(w & 0xffff); |
| 136 | } | 139 | } |
| 137 | 140 | ||
| 138 | static inline unsigned int hweight8(unsigned int w) | 141 | static inline unsigned int __arch_hweight8(unsigned int w) |
| 139 | { | 142 | { |
| 140 | return hweight32(w & 0xff); | 143 | return __arch_hweight32(w & 0xff); |
| 141 | } | 144 | } |
| 142 | 145 | ||
| 143 | #endif /* _BLACKFIN_BITOPS_H */ | 146 | #endif /* _BLACKFIN_BITOPS_H */ |
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index 22886cbdae7a..14fcd254b185 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h | |||
| @@ -389,8 +389,11 @@ | |||
| 389 | #define __NR_rt_tgsigqueueinfo 368 | 389 | #define __NR_rt_tgsigqueueinfo 368 |
| 390 | #define __NR_perf_event_open 369 | 390 | #define __NR_perf_event_open 369 |
| 391 | #define __NR_recvmmsg 370 | 391 | #define __NR_recvmmsg 370 |
| 392 | #define __NR_fanotify_init 371 | ||
| 393 | #define __NR_fanotify_mark 372 | ||
| 394 | #define __NR_prlimit64 373 | ||
| 392 | 395 | ||
| 393 | #define __NR_syscall 371 | 396 | #define __NR_syscall 374 |
| 394 | #define NR_syscalls __NR_syscall | 397 | #define NR_syscalls __NR_syscall |
| 395 | 398 | ||
| 396 | /* Old optional stuff no one actually uses */ | 399 | /* Old optional stuff no one actually uses */ |
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index a5847f5d67c7..af1bffa21dc1 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
| @@ -1628,6 +1628,9 @@ ENTRY(_sys_call_table) | |||
| 1628 | .long _sys_rt_tgsigqueueinfo | 1628 | .long _sys_rt_tgsigqueueinfo |
| 1629 | .long _sys_perf_event_open | 1629 | .long _sys_perf_event_open |
| 1630 | .long _sys_recvmmsg /* 370 */ | 1630 | .long _sys_recvmmsg /* 370 */ |
| 1631 | .long _sys_fanotify_init | ||
| 1632 | .long _sys_fanotify_mark | ||
| 1633 | .long _sys_prlimit64 | ||
| 1631 | 1634 | ||
| 1632 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1635 | .rept NR_syscalls-(.-_sys_call_table)/4 |
| 1633 | .long _sys_ni_syscall | 1636 | .long _sys_ni_syscall |
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 4e34880bea03..159acb02cfd4 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c | |||
| @@ -25,7 +25,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
| 25 | unsigned long addr; | 25 | unsigned long addr; |
| 26 | void *ret; | 26 | void *ret; |
| 27 | 27 | ||
| 28 | printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp); | 28 | pr_debug("dma_alloc_coherent(%s,%zu,%x)\n", |
| 29 | dev ? dev_name(dev) : "?", size, gfp); | ||
| 29 | 30 | ||
| 30 | if (0xbe000000 - pci_sram_allocated >= size) { | 31 | if (0xbe000000 - pci_sram_allocated >= size) { |
| 31 | size = (size + 255) & ~255; | 32 | size = (size + 255) & ~255; |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index e3ea151c9597..b7212b619c52 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | |||
| 164 | all: zImage | 164 | all: zImage |
| 165 | 165 | ||
| 166 | # With make 3.82 we cannot mix normal and wildcard targets | 166 | # With make 3.82 we cannot mix normal and wildcard targets |
| 167 | BOOT_TARGETS1 := zImage zImage.initrd uImaged | 167 | BOOT_TARGETS1 := zImage zImage.initrd uImage |
| 168 | BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% | 168 | BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% |
| 169 | 169 | ||
| 170 | PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) | 170 | PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) |
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 5806ef0b860b..a30370396250 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts | |||
| @@ -163,6 +163,14 @@ | |||
| 163 | interrupts = <0x1e 4>; | 163 | interrupts = <0x1e 4>; |
| 164 | }; | 164 | }; |
| 165 | 165 | ||
| 166 | SATA0: sata@bffd1000 { | ||
| 167 | compatible = "amcc,sata-460ex"; | ||
| 168 | reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>; | ||
| 169 | interrupt-parent = <&UIC3>; | ||
| 170 | interrupts = <0x0 0x4 /* SATA */ | ||
| 171 | 0x5 0x4>; /* AHBDMA */ | ||
| 172 | }; | ||
| 173 | |||
| 166 | POB0: opb { | 174 | POB0: opb { |
| 167 | compatible = "ibm,opb-460ex", "ibm,opb"; | 175 | compatible = "ibm,opb-460ex", "ibm,opb"; |
| 168 | #address-cells = <1>; | 176 | #address-cells = <1>; |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 0e398cfee2c8..acac35d5b382 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
| @@ -433,7 +433,7 @@ typedef struct { | |||
| 433 | * with. However gcc is not clever enough to compute the | 433 | * with. However gcc is not clever enough to compute the |
| 434 | * modulus (2^n-1) without a second multiply. | 434 | * modulus (2^n-1) without a second multiply. |
| 435 | */ | 435 | */ |
| 436 | #define vsid_scrample(protovsid, size) \ | 436 | #define vsid_scramble(protovsid, size) \ |
| 437 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) | 437 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) |
| 438 | 438 | ||
| 439 | #else /* 1 */ | 439 | #else /* 1 */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d8be016d2ede..ff0005eec7dd 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -951,7 +951,14 @@ | |||
| 951 | #ifdef CONFIG_PPC64 | 951 | #ifdef CONFIG_PPC64 |
| 952 | 952 | ||
| 953 | extern void ppc64_runlatch_on(void); | 953 | extern void ppc64_runlatch_on(void); |
| 954 | extern void ppc64_runlatch_off(void); | 954 | extern void __ppc64_runlatch_off(void); |
| 955 | |||
| 956 | #define ppc64_runlatch_off() \ | ||
| 957 | do { \ | ||
| 958 | if (cpu_has_feature(CPU_FTR_CTRL) && \ | ||
| 959 | test_thread_flag(TIF_RUNLATCH)) \ | ||
| 960 | __ppc64_runlatch_off(); \ | ||
| 961 | } while (0) | ||
| 955 | 962 | ||
| 956 | extern unsigned long scom970_read(unsigned int address); | 963 | extern unsigned long scom970_read(unsigned int address); |
| 957 | extern void scom970_write(unsigned int address, unsigned long value); | 964 | extern void scom970_write(unsigned int address, unsigned long value); |
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 24cd9281ec37..8447d89fbe72 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h | |||
| @@ -21,15 +21,20 @@ | |||
| 21 | /* | 21 | /* |
| 22 | * the semaphore definition | 22 | * the semaphore definition |
| 23 | */ | 23 | */ |
| 24 | struct rw_semaphore { | 24 | #ifdef CONFIG_PPC64 |
| 25 | /* XXX this should be able to be an atomic_t -- paulus */ | 25 | # define RWSEM_ACTIVE_MASK 0xffffffffL |
| 26 | signed int count; | 26 | #else |
| 27 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | 27 | # define RWSEM_ACTIVE_MASK 0x0000ffffL |
| 28 | #define RWSEM_ACTIVE_BIAS 0x00000001 | 28 | #endif |
| 29 | #define RWSEM_ACTIVE_MASK 0x0000ffff | 29 | |
| 30 | #define RWSEM_WAITING_BIAS (-0x00010000) | 30 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
| 31 | #define RWSEM_ACTIVE_BIAS 0x00000001L | ||
| 32 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
| 31 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 33 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
| 32 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 34 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
| 35 | |||
| 36 | struct rw_semaphore { | ||
| 37 | long count; | ||
| 33 | spinlock_t wait_lock; | 38 | spinlock_t wait_lock; |
| 34 | struct list_head wait_list; | 39 | struct list_head wait_list; |
| 35 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 40 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -43,9 +48,13 @@ struct rw_semaphore { | |||
| 43 | # define __RWSEM_DEP_MAP_INIT(lockname) | 48 | # define __RWSEM_DEP_MAP_INIT(lockname) |
| 44 | #endif | 49 | #endif |
| 45 | 50 | ||
| 46 | #define __RWSEM_INITIALIZER(name) \ | 51 | #define __RWSEM_INITIALIZER(name) \ |
| 47 | { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | 52 | { \ |
| 48 | LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } | 53 | RWSEM_UNLOCKED_VALUE, \ |
| 54 | __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
| 55 | LIST_HEAD_INIT((name).wait_list) \ | ||
| 56 | __RWSEM_DEP_MAP_INIT(name) \ | ||
| 57 | } | ||
| 49 | 58 | ||
| 50 | #define DECLARE_RWSEM(name) \ | 59 | #define DECLARE_RWSEM(name) \ |
| 51 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 60 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
| @@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 70 | */ | 79 | */ |
| 71 | static inline void __down_read(struct rw_semaphore *sem) | 80 | static inline void __down_read(struct rw_semaphore *sem) |
| 72 | { | 81 | { |
| 73 | if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) | 82 | if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) |
| 74 | rwsem_down_read_failed(sem); | 83 | rwsem_down_read_failed(sem); |
| 75 | } | 84 | } |
| 76 | 85 | ||
| 77 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 86 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
| 78 | { | 87 | { |
| 79 | int tmp; | 88 | long tmp; |
| 80 | 89 | ||
| 81 | while ((tmp = sem->count) >= 0) { | 90 | while ((tmp = sem->count) >= 0) { |
| 82 | if (tmp == cmpxchg(&sem->count, tmp, | 91 | if (tmp == cmpxchg(&sem->count, tmp, |
| @@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) | |||
| 92 | */ | 101 | */ |
| 93 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 102 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
| 94 | { | 103 | { |
| 95 | int tmp; | 104 | long tmp; |
| 96 | 105 | ||
| 97 | tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, | 106 | tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, |
| 98 | (atomic_t *)(&sem->count)); | 107 | (atomic_long_t *)&sem->count); |
| 99 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) | 108 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) |
| 100 | rwsem_down_write_failed(sem); | 109 | rwsem_down_write_failed(sem); |
| 101 | } | 110 | } |
| @@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
| 107 | 116 | ||
| 108 | static inline int __down_write_trylock(struct rw_semaphore *sem) | 117 | static inline int __down_write_trylock(struct rw_semaphore *sem) |
| 109 | { | 118 | { |
| 110 | int tmp; | 119 | long tmp; |
| 111 | 120 | ||
| 112 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | 121 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, |
| 113 | RWSEM_ACTIVE_WRITE_BIAS); | 122 | RWSEM_ACTIVE_WRITE_BIAS); |
| @@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) | |||
| 119 | */ | 128 | */ |
| 120 | static inline void __up_read(struct rw_semaphore *sem) | 129 | static inline void __up_read(struct rw_semaphore *sem) |
| 121 | { | 130 | { |
| 122 | int tmp; | 131 | long tmp; |
| 123 | 132 | ||
| 124 | tmp = atomic_dec_return((atomic_t *)(&sem->count)); | 133 | tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); |
| 125 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) | 134 | if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) |
| 126 | rwsem_wake(sem); | 135 | rwsem_wake(sem); |
| 127 | } | 136 | } |
| @@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
| 131 | */ | 140 | */ |
| 132 | static inline void __up_write(struct rw_semaphore *sem) | 141 | static inline void __up_write(struct rw_semaphore *sem) |
| 133 | { | 142 | { |
| 134 | if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, | 143 | if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, |
| 135 | (atomic_t *)(&sem->count)) < 0)) | 144 | (atomic_long_t *)&sem->count) < 0)) |
| 136 | rwsem_wake(sem); | 145 | rwsem_wake(sem); |
| 137 | } | 146 | } |
| 138 | 147 | ||
| 139 | /* | 148 | /* |
| 140 | * implement atomic add functionality | 149 | * implement atomic add functionality |
| 141 | */ | 150 | */ |
| 142 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 151 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
| 143 | { | 152 | { |
| 144 | atomic_add(delta, (atomic_t *)(&sem->count)); | 153 | atomic_long_add(delta, (atomic_long_t *)&sem->count); |
| 145 | } | 154 | } |
| 146 | 155 | ||
| 147 | /* | 156 | /* |
| @@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | |||
| 149 | */ | 158 | */ |
| 150 | static inline void __downgrade_write(struct rw_semaphore *sem) | 159 | static inline void __downgrade_write(struct rw_semaphore *sem) |
| 151 | { | 160 | { |
| 152 | int tmp; | 161 | long tmp; |
| 153 | 162 | ||
| 154 | tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); | 163 | tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, |
| 164 | (atomic_long_t *)&sem->count); | ||
| 155 | if (tmp < 0) | 165 | if (tmp < 0) |
| 156 | rwsem_downgrade_wake(sem); | 166 | rwsem_downgrade_wake(sem); |
| 157 | } | 167 | } |
| @@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
| 159 | /* | 169 | /* |
| 160 | * implement exchange and add functionality | 170 | * implement exchange and add functionality |
| 161 | */ | 171 | */ |
| 162 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 172 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
| 163 | { | 173 | { |
| 164 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | 174 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
| 165 | } | 175 | } |
| 166 | 176 | ||
| 167 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
| 168 | { | 178 | { |
| 169 | return (sem->count != 0); | 179 | return sem->count != 0; |
| 170 | } | 180 | } |
| 171 | 181 | ||
| 172 | #endif /* __KERNEL__ */ | 182 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index a5ee345b6a5c..3d212669a130 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -326,3 +326,6 @@ SYSCALL_SPU(perf_event_open) | |||
| 326 | COMPAT_SYS_SPU(preadv) | 326 | COMPAT_SYS_SPU(preadv) |
| 327 | COMPAT_SYS_SPU(pwritev) | 327 | COMPAT_SYS_SPU(pwritev) |
| 328 | COMPAT_SYS(rt_tgsigqueueinfo) | 328 | COMPAT_SYS(rt_tgsigqueueinfo) |
| 329 | SYSCALL(fanotify_init) | ||
| 330 | COMPAT_SYS(fanotify_mark) | ||
| 331 | SYSCALL_SPU(prlimit64) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index f0a10266e7f7..597e6f9d094a 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -345,10 +345,13 @@ | |||
| 345 | #define __NR_preadv 320 | 345 | #define __NR_preadv 320 |
| 346 | #define __NR_pwritev 321 | 346 | #define __NR_pwritev 321 |
| 347 | #define __NR_rt_tgsigqueueinfo 322 | 347 | #define __NR_rt_tgsigqueueinfo 322 |
| 348 | #define __NR_fanotify_init 323 | ||
| 349 | #define __NR_fanotify_mark 324 | ||
| 350 | #define __NR_prlimit64 325 | ||
| 348 | 351 | ||
| 349 | #ifdef __KERNEL__ | 352 | #ifdef __KERNEL__ |
| 350 | 353 | ||
| 351 | #define __NR_syscalls 323 | 354 | #define __NR_syscalls 326 |
| 352 | 355 | ||
| 353 | #define __NR__exit __NR_exit | 356 | #define __NR__exit __NR_exit |
| 354 | #define NR_syscalls __NR_syscalls | 357 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 65e2b4e10f97..1f9123f412ec 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
| @@ -1826,7 +1826,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
| 1826 | .cpu_features = CPU_FTRS_47X, | 1826 | .cpu_features = CPU_FTRS_47X, |
| 1827 | .cpu_user_features = COMMON_USER_BOOKE | | 1827 | .cpu_user_features = COMMON_USER_BOOKE | |
| 1828 | PPC_FEATURE_HAS_FPU, | 1828 | PPC_FEATURE_HAS_FPU, |
| 1829 | .cpu_user_features = COMMON_USER_BOOKE, | ||
| 1830 | .mmu_features = MMU_FTR_TYPE_47x | | 1829 | .mmu_features = MMU_FTR_TYPE_47x | |
| 1831 | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, | 1830 | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, |
| 1832 | .icache_bsize = 32, | 1831 | .icache_bsize = 32, |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 417f7b05a9ce..4457382f8667 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
| @@ -402,6 +402,18 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
| 402 | */ | 402 | */ |
| 403 | hard_irq_disable(); | 403 | hard_irq_disable(); |
| 404 | 404 | ||
| 405 | /* | ||
| 406 | * Make a note of crashing cpu. Will be used in machine_kexec | ||
| 407 | * such that another IPI will not be sent. | ||
| 408 | */ | ||
| 409 | crashing_cpu = smp_processor_id(); | ||
| 410 | crash_save_cpu(regs, crashing_cpu); | ||
| 411 | crash_kexec_prepare_cpus(crashing_cpu); | ||
| 412 | cpu_set(crashing_cpu, cpus_in_crash); | ||
| 413 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | ||
| 414 | crash_kexec_wait_realmode(crashing_cpu); | ||
| 415 | #endif | ||
| 416 | |||
| 405 | for_each_irq(i) { | 417 | for_each_irq(i) { |
| 406 | struct irq_desc *desc = irq_to_desc(i); | 418 | struct irq_desc *desc = irq_to_desc(i); |
| 407 | 419 | ||
| @@ -438,18 +450,8 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
| 438 | crash_shutdown_cpu = -1; | 450 | crash_shutdown_cpu = -1; |
| 439 | __debugger_fault_handler = old_handler; | 451 | __debugger_fault_handler = old_handler; |
| 440 | 452 | ||
| 441 | /* | ||
| 442 | * Make a note of crashing cpu. Will be used in machine_kexec | ||
| 443 | * such that another IPI will not be sent. | ||
| 444 | */ | ||
| 445 | crashing_cpu = smp_processor_id(); | ||
| 446 | crash_save_cpu(regs, crashing_cpu); | ||
| 447 | crash_kexec_prepare_cpus(crashing_cpu); | ||
| 448 | cpu_set(crashing_cpu, cpus_in_crash); | ||
| 449 | crash_kexec_stop_spus(); | 453 | crash_kexec_stop_spus(); |
| 450 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | 454 | |
| 451 | crash_kexec_wait_realmode(crashing_cpu); | ||
| 452 | #endif | ||
| 453 | if (ppc_md.kexec_cpu_down) | 455 | if (ppc_md.kexec_cpu_down) |
| 454 | ppc_md.kexec_cpu_down(1, 0); | 456 | ppc_md.kexec_cpu_down(1, 0); |
| 455 | } | 457 | } |
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 5ab484ef06a7..562305b40a8e 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
| @@ -113,6 +113,10 @@ _ENTRY(_start); | |||
| 113 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ | 113 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ |
| 114 | stw r6, 0(r5) | 114 | stw r6, 0(r5) |
| 115 | 115 | ||
| 116 | /* Clear the Machine Check Syndrome Register */ | ||
| 117 | li r0,0 | ||
| 118 | mtspr SPRN_MCSR,r0 | ||
| 119 | |||
| 116 | /* Let's move on */ | 120 | /* Let's move on */ |
| 117 | lis r4,start_kernel@h | 121 | lis r4,start_kernel@h |
| 118 | ori r4,r4,start_kernel@l | 122 | ori r4,r4,start_kernel@l |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 844a44b64472..4d6681dce816 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
| @@ -572,9 +572,6 @@ __secondary_start: | |||
| 572 | /* Set thread priority to MEDIUM */ | 572 | /* Set thread priority to MEDIUM */ |
| 573 | HMT_MEDIUM | 573 | HMT_MEDIUM |
| 574 | 574 | ||
| 575 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | ||
| 576 | bl .early_setup_secondary | ||
| 577 | |||
| 578 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | 575 | /* Initialize the kernel stack. Just a repeat for iSeries. */ |
| 579 | LOAD_REG_ADDR(r3, current_set) | 576 | LOAD_REG_ADDR(r3, current_set) |
| 580 | sldi r28,r24,3 /* get current_set[cpu#] */ | 577 | sldi r28,r24,3 /* get current_set[cpu#] */ |
| @@ -582,6 +579,9 @@ __secondary_start: | |||
| 582 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | 579 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD |
| 583 | std r1,PACAKSAVE(r13) | 580 | std r1,PACAKSAVE(r13) |
| 584 | 581 | ||
| 582 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | ||
| 583 | bl .early_setup_secondary | ||
| 584 | |||
| 585 | /* Clear backchain so we get nice backtraces */ | 585 | /* Clear backchain so we get nice backtraces */ |
| 586 | li r7,0 | 586 | li r7,0 |
| 587 | mtlr r7 | 587 | mtlr r7 |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 049dda60e475..39a2baa6ad58 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
| @@ -94,9 +94,9 @@ void cpu_idle(void) | |||
| 94 | HMT_medium(); | 94 | HMT_medium(); |
| 95 | ppc64_runlatch_on(); | 95 | ppc64_runlatch_on(); |
| 96 | tick_nohz_restart_sched_tick(); | 96 | tick_nohz_restart_sched_tick(); |
| 97 | preempt_enable_no_resched(); | ||
| 97 | if (cpu_should_die()) | 98 | if (cpu_should_die()) |
| 98 | cpu_die(); | 99 | cpu_die(); |
| 99 | preempt_enable_no_resched(); | ||
| 100 | schedule(); | 100 | schedule(); |
| 101 | preempt_disable(); | 101 | preempt_disable(); |
| 102 | } | 102 | } |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d3ce67cf03be..4a65386995d7 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -67,6 +67,7 @@ | |||
| 67 | #include <asm/machdep.h> | 67 | #include <asm/machdep.h> |
| 68 | #include <asm/udbg.h> | 68 | #include <asm/udbg.h> |
| 69 | #include <asm/dbell.h> | 69 | #include <asm/dbell.h> |
| 70 | #include <asm/smp.h> | ||
| 70 | 71 | ||
| 71 | #ifdef CONFIG_PPC64 | 72 | #ifdef CONFIG_PPC64 |
| 72 | #include <asm/paca.h> | 73 | #include <asm/paca.h> |
| @@ -446,22 +447,23 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |||
| 446 | void exc_lvl_ctx_init(void) | 447 | void exc_lvl_ctx_init(void) |
| 447 | { | 448 | { |
| 448 | struct thread_info *tp; | 449 | struct thread_info *tp; |
| 449 | int i; | 450 | int i, hw_cpu; |
| 450 | 451 | ||
| 451 | for_each_possible_cpu(i) { | 452 | for_each_possible_cpu(i) { |
| 452 | memset((void *)critirq_ctx[i], 0, THREAD_SIZE); | 453 | hw_cpu = get_hard_smp_processor_id(i); |
| 453 | tp = critirq_ctx[i]; | 454 | memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); |
| 455 | tp = critirq_ctx[hw_cpu]; | ||
| 454 | tp->cpu = i; | 456 | tp->cpu = i; |
| 455 | tp->preempt_count = 0; | 457 | tp->preempt_count = 0; |
| 456 | 458 | ||
| 457 | #ifdef CONFIG_BOOKE | 459 | #ifdef CONFIG_BOOKE |
| 458 | memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); | 460 | memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); |
| 459 | tp = dbgirq_ctx[i]; | 461 | tp = dbgirq_ctx[hw_cpu]; |
| 460 | tp->cpu = i; | 462 | tp->cpu = i; |
| 461 | tp->preempt_count = 0; | 463 | tp->preempt_count = 0; |
| 462 | 464 | ||
| 463 | memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); | 465 | memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); |
| 464 | tp = mcheckirq_ctx[i]; | 466 | tp = mcheckirq_ctx[hw_cpu]; |
| 465 | tp->cpu = i; | 467 | tp->cpu = i; |
| 466 | tp->preempt_count = HARDIRQ_OFFSET; | 468 | tp->preempt_count = HARDIRQ_OFFSET; |
| 467 | #endif | 469 | #endif |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 6ddb795f83e8..e751506323b4 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
| @@ -336,7 +336,7 @@ static void __devinit __of_scan_bus(struct device_node *node, | |||
| 336 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | 336 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || |
| 337 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { | 337 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { |
| 338 | struct device_node *child = pci_device_to_OF_node(dev); | 338 | struct device_node *child = pci_device_to_OF_node(dev); |
| 339 | if (dev) | 339 | if (child) |
| 340 | of_scan_pci_bridge(child, dev); | 340 | of_scan_pci_bridge(child, dev); |
| 341 | } | 341 | } |
| 342 | } | 342 | } |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 91356ffda2ca..b1c648a36b03 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -728,7 +728,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
| 728 | p->thread.regs = childregs; | 728 | p->thread.regs = childregs; |
| 729 | if (clone_flags & CLONE_SETTLS) { | 729 | if (clone_flags & CLONE_SETTLS) { |
| 730 | #ifdef CONFIG_PPC64 | 730 | #ifdef CONFIG_PPC64 |
| 731 | if (!test_thread_flag(TIF_32BIT)) | 731 | if (!is_32bit_task()) |
| 732 | childregs->gpr[13] = childregs->gpr[6]; | 732 | childregs->gpr[13] = childregs->gpr[6]; |
| 733 | else | 733 | else |
| 734 | #endif | 734 | #endif |
| @@ -823,7 +823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
| 823 | regs->nip = start; | 823 | regs->nip = start; |
| 824 | regs->msr = MSR_USER; | 824 | regs->msr = MSR_USER; |
| 825 | #else | 825 | #else |
| 826 | if (!test_thread_flag(TIF_32BIT)) { | 826 | if (!is_32bit_task()) { |
| 827 | unsigned long entry, toc; | 827 | unsigned long entry, toc; |
| 828 | 828 | ||
| 829 | /* start is a relocated pointer to the function descriptor for | 829 | /* start is a relocated pointer to the function descriptor for |
| @@ -995,7 +995,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp, | |||
| 995 | if (usp == 0) | 995 | if (usp == 0) |
| 996 | usp = regs->gpr[1]; /* stack pointer for child */ | 996 | usp = regs->gpr[1]; /* stack pointer for child */ |
| 997 | #ifdef CONFIG_PPC64 | 997 | #ifdef CONFIG_PPC64 |
| 998 | if (test_thread_flag(TIF_32BIT)) { | 998 | if (is_32bit_task()) { |
| 999 | parent_tidp = TRUNC_PTR(parent_tidp); | 999 | parent_tidp = TRUNC_PTR(parent_tidp); |
| 1000 | child_tidp = TRUNC_PTR(child_tidp); | 1000 | child_tidp = TRUNC_PTR(child_tidp); |
| 1001 | } | 1001 | } |
| @@ -1199,19 +1199,17 @@ void ppc64_runlatch_on(void) | |||
| 1199 | } | 1199 | } |
| 1200 | } | 1200 | } |
| 1201 | 1201 | ||
| 1202 | void ppc64_runlatch_off(void) | 1202 | void __ppc64_runlatch_off(void) |
| 1203 | { | 1203 | { |
| 1204 | unsigned long ctrl; | 1204 | unsigned long ctrl; |
| 1205 | 1205 | ||
| 1206 | if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { | 1206 | HMT_medium(); |
| 1207 | HMT_medium(); | ||
| 1208 | 1207 | ||
| 1209 | clear_thread_flag(TIF_RUNLATCH); | 1208 | clear_thread_flag(TIF_RUNLATCH); |
| 1210 | 1209 | ||
| 1211 | ctrl = mfspr(SPRN_CTRLF); | 1210 | ctrl = mfspr(SPRN_CTRLF); |
| 1212 | ctrl &= ~CTRL_RUNLATCH; | 1211 | ctrl &= ~CTRL_RUNLATCH; |
| 1213 | mtspr(SPRN_CTRLT, ctrl); | 1212 | mtspr(SPRN_CTRLT, ctrl); |
| 1214 | } | ||
| 1215 | } | 1213 | } |
| 1216 | #endif | 1214 | #endif |
| 1217 | 1215 | ||
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a10ffc85ada7..93666f9cabf1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
| @@ -258,17 +258,18 @@ static void __init irqstack_early_init(void) | |||
| 258 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 258 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 259 | static void __init exc_lvl_early_init(void) | 259 | static void __init exc_lvl_early_init(void) |
| 260 | { | 260 | { |
| 261 | unsigned int i; | 261 | unsigned int i, hw_cpu; |
| 262 | 262 | ||
| 263 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 263 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
| 264 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 264 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ |
| 265 | for_each_possible_cpu(i) { | 265 | for_each_possible_cpu(i) { |
| 266 | critirq_ctx[i] = (struct thread_info *) | 266 | hw_cpu = get_hard_smp_processor_id(i); |
| 267 | critirq_ctx[hw_cpu] = (struct thread_info *) | ||
| 267 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 268 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
| 268 | #ifdef CONFIG_BOOKE | 269 | #ifdef CONFIG_BOOKE |
| 269 | dbgirq_ctx[i] = (struct thread_info *) | 270 | dbgirq_ctx[hw_cpu] = (struct thread_info *) |
| 270 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 271 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
| 271 | mcheckirq_ctx[i] = (struct thread_info *) | 272 | mcheckirq_ctx[hw_cpu] = (struct thread_info *) |
| 272 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 273 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
| 273 | #endif | 274 | #endif |
| 274 | } | 275 | } |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 1bee4b68fa45..e72690ec9b87 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -95,7 +95,7 @@ int ucache_bsize; | |||
| 95 | 95 | ||
| 96 | #ifdef CONFIG_SMP | 96 | #ifdef CONFIG_SMP |
| 97 | 97 | ||
| 98 | static int smt_enabled_cmdline; | 98 | static char *smt_enabled_cmdline; |
| 99 | 99 | ||
| 100 | /* Look for ibm,smt-enabled OF option */ | 100 | /* Look for ibm,smt-enabled OF option */ |
| 101 | static void check_smt_enabled(void) | 101 | static void check_smt_enabled(void) |
| @@ -103,37 +103,46 @@ static void check_smt_enabled(void) | |||
| 103 | struct device_node *dn; | 103 | struct device_node *dn; |
| 104 | const char *smt_option; | 104 | const char *smt_option; |
| 105 | 105 | ||
| 106 | /* Allow the command line to overrule the OF option */ | 106 | /* Default to enabling all threads */ |
| 107 | if (smt_enabled_cmdline) | 107 | smt_enabled_at_boot = threads_per_core; |
| 108 | return; | ||
| 109 | |||
| 110 | dn = of_find_node_by_path("/options"); | ||
| 111 | |||
| 112 | if (dn) { | ||
| 113 | smt_option = of_get_property(dn, "ibm,smt-enabled", NULL); | ||
| 114 | 108 | ||
| 115 | if (smt_option) { | 109 | /* Allow the command line to overrule the OF option */ |
| 116 | if (!strcmp(smt_option, "on")) | 110 | if (smt_enabled_cmdline) { |
| 117 | smt_enabled_at_boot = 1; | 111 | if (!strcmp(smt_enabled_cmdline, "on")) |
| 118 | else if (!strcmp(smt_option, "off")) | 112 | smt_enabled_at_boot = threads_per_core; |
| 119 | smt_enabled_at_boot = 0; | 113 | else if (!strcmp(smt_enabled_cmdline, "off")) |
| 120 | } | 114 | smt_enabled_at_boot = 0; |
| 121 | } | 115 | else { |
| 116 | long smt; | ||
| 117 | int rc; | ||
| 118 | |||
| 119 | rc = strict_strtol(smt_enabled_cmdline, 10, &smt); | ||
| 120 | if (!rc) | ||
| 121 | smt_enabled_at_boot = | ||
| 122 | min(threads_per_core, (int)smt); | ||
| 123 | } | ||
| 124 | } else { | ||
| 125 | dn = of_find_node_by_path("/options"); | ||
| 126 | if (dn) { | ||
| 127 | smt_option = of_get_property(dn, "ibm,smt-enabled", | ||
| 128 | NULL); | ||
| 129 | |||
| 130 | if (smt_option) { | ||
| 131 | if (!strcmp(smt_option, "on")) | ||
| 132 | smt_enabled_at_boot = threads_per_core; | ||
| 133 | else if (!strcmp(smt_option, "off")) | ||
| 134 | smt_enabled_at_boot = 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | of_node_put(dn); | ||
| 138 | } | ||
| 139 | } | ||
| 122 | } | 140 | } |
| 123 | 141 | ||
| 124 | /* Look for smt-enabled= cmdline option */ | 142 | /* Look for smt-enabled= cmdline option */ |
| 125 | static int __init early_smt_enabled(char *p) | 143 | static int __init early_smt_enabled(char *p) |
| 126 | { | 144 | { |
| 127 | smt_enabled_cmdline = 1; | 145 | smt_enabled_cmdline = p; |
| 128 | |||
| 129 | if (!p) | ||
| 130 | return 0; | ||
| 131 | |||
| 132 | if (!strcmp(p, "on") || !strcmp(p, "1")) | ||
| 133 | smt_enabled_at_boot = 1; | ||
| 134 | else if (!strcmp(p, "off") || !strcmp(p, "0")) | ||
| 135 | smt_enabled_at_boot = 0; | ||
| 136 | |||
| 137 | return 0; | 146 | return 0; |
| 138 | } | 147 | } |
| 139 | early_param("smt-enabled", early_smt_enabled); | 148 | early_param("smt-enabled", early_smt_enabled); |
| @@ -380,8 +389,8 @@ void __init setup_system(void) | |||
| 380 | */ | 389 | */ |
| 381 | xmon_setup(); | 390 | xmon_setup(); |
| 382 | 391 | ||
| 383 | check_smt_enabled(); | ||
| 384 | smp_setup_cpu_maps(); | 392 | smp_setup_cpu_maps(); |
| 393 | check_smt_enabled(); | ||
| 385 | 394 | ||
| 386 | #ifdef CONFIG_SMP | 395 | #ifdef CONFIG_SMP |
| 387 | /* Release secondary cpus out of their spinloops at 0x60 now that | 396 | /* Release secondary cpus out of their spinloops at 0x60 now that |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a61b3ddd7bb3..0008bc58e826 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
| @@ -427,11 +427,11 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
| 427 | #endif | 427 | #endif |
| 428 | 428 | ||
| 429 | if (!cpu_callin_map[cpu]) { | 429 | if (!cpu_callin_map[cpu]) { |
| 430 | printk("Processor %u is stuck.\n", cpu); | 430 | printk(KERN_ERR "Processor %u is stuck.\n", cpu); |
| 431 | return -ENOENT; | 431 | return -ENOENT; |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | printk("Processor %u found.\n", cpu); | 434 | DBG("Processor %u found.\n", cpu); |
| 435 | 435 | ||
| 436 | if (smp_ops->give_timebase) | 436 | if (smp_ops->give_timebase) |
| 437 | smp_ops->give_timebase(); | 437 | smp_ops->give_timebase(); |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 20fd701a686a..b1b6043a56c4 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
| @@ -616,3 +616,11 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, | |||
| 616 | 616 | ||
| 617 | return sys_sync_file_range(fd, offset, nbytes, flags); | 617 | return sys_sync_file_range(fd, offset, nbytes, flags); |
| 618 | } | 618 | } |
| 619 | |||
| 620 | asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags, | ||
| 621 | unsigned mask_hi, unsigned mask_lo, | ||
| 622 | int dfd, const char __user *pathname) | ||
| 623 | { | ||
| 624 | u64 mask = ((u64)mask_hi << 32) | mask_lo; | ||
| 625 | return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); | ||
| 626 | } | ||
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 00b9436f7652..fa3469ddaef8 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
| @@ -1059,7 +1059,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) | |||
| 1059 | if (!dma_window) | 1059 | if (!dma_window) |
| 1060 | return NULL; | 1060 | return NULL; |
| 1061 | 1061 | ||
| 1062 | tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); | 1062 | tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); |
| 1063 | if (tbl == NULL) | 1063 | if (tbl == NULL) |
| 1064 | return NULL; | 1064 | return NULL; |
| 1065 | 1065 | ||
| @@ -1072,6 +1072,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) | |||
| 1072 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; | 1072 | tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; |
| 1073 | tbl->it_busno = 0; | 1073 | tbl->it_busno = 0; |
| 1074 | tbl->it_type = TCE_VB; | 1074 | tbl->it_type = TCE_VB; |
| 1075 | tbl->it_blocksize = 16; | ||
| 1075 | 1076 | ||
| 1076 | return iommu_init_table(tbl, -1); | 1077 | return iommu_init_table(tbl, -1); |
| 1077 | } | 1078 | } |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 71f1415e2472..ace85fa74b29 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -79,7 +79,9 @@ | |||
| 79 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 79 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
| 80 | 80 | ||
| 81 | phys_addr_t memstart_addr = ~0; | 81 | phys_addr_t memstart_addr = ~0; |
| 82 | EXPORT_SYMBOL_GPL(memstart_addr); | ||
| 82 | phys_addr_t kernstart_addr; | 83 | phys_addr_t kernstart_addr; |
| 84 | EXPORT_SYMBOL_GPL(kernstart_addr); | ||
| 83 | 85 | ||
| 84 | void free_initmem(void) | 86 | void free_initmem(void) |
| 85 | { | 87 | { |
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index cfa768203d08..b9d9fed8f36e 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S | |||
| @@ -200,6 +200,7 @@ _GLOBAL(_tlbivax_bcast) | |||
| 200 | rlwimi r5,r4,0,16,31 | 200 | rlwimi r5,r4,0,16,31 |
| 201 | wrteei 0 | 201 | wrteei 0 |
| 202 | mtspr SPRN_MMUCR,r5 | 202 | mtspr SPRN_MMUCR,r5 |
| 203 | isync | ||
| 203 | /* tlbivax 0,r3 - use .long to avoid binutils deps */ | 204 | /* tlbivax 0,r3 - use .long to avoid binutils deps */ |
| 204 | .long 0x7c000624 | (r3 << 11) | 205 | .long 0x7c000624 | (r3 << 11) |
| 205 | isync | 206 | isync |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d1663db7810f..81c9208025fa 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
| @@ -106,8 +106,7 @@ config MMIO_NVRAM | |||
| 106 | 106 | ||
| 107 | config MPIC_U3_HT_IRQS | 107 | config MPIC_U3_HT_IRQS |
| 108 | bool | 108 | bool |
| 109 | depends on PPC_MAPLE | 109 | default n |
| 110 | default y | ||
| 111 | 110 | ||
| 112 | config MPIC_BROKEN_REGREAD | 111 | config MPIC_BROKEN_REGREAD |
| 113 | bool | 112 | bool |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 58b13ce3847e..26a067122a54 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
| @@ -477,7 +477,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, | |||
| 477 | 477 | ||
| 478 | ioid = cell_iommu_get_ioid(np); | 478 | ioid = cell_iommu_get_ioid(np); |
| 479 | 479 | ||
| 480 | window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); | 480 | window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); |
| 481 | BUG_ON(window == NULL); | 481 | BUG_ON(window == NULL); |
| 482 | 482 | ||
| 483 | window->offset = offset; | 483 | window->offset = offset; |
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index ce61cea0afb5..d8b76335bd13 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c | |||
| @@ -184,7 +184,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev) | |||
| 184 | 184 | ||
| 185 | BUG_ON(lsn == NULL); | 185 | BUG_ON(lsn == NULL); |
| 186 | 186 | ||
| 187 | tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); | 187 | tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL); |
| 188 | 188 | ||
| 189 | iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl); | 189 | iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl); |
| 190 | 190 | ||
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 39df6ab1735a..df423993f175 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c | |||
| @@ -2873,12 +2873,11 @@ set_initial_features(void) | |||
| 2873 | 2873 | ||
| 2874 | /* Switch airport off */ | 2874 | /* Switch airport off */ |
| 2875 | for_each_node_by_name(np, "radio") { | 2875 | for_each_node_by_name(np, "radio") { |
| 2876 | if (np && np->parent == macio_chips[0].of_node) { | 2876 | if (np->parent == macio_chips[0].of_node) { |
| 2877 | macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; | 2877 | macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; |
| 2878 | core99_airport_enable(np, 0, 0); | 2878 | core99_airport_enable(np, 0, 0); |
| 2879 | } | 2879 | } |
| 2880 | } | 2880 | } |
| 2881 | of_node_put(np); | ||
| 2882 | } | 2881 | } |
| 2883 | 2882 | ||
| 2884 | /* On all machines that support sound PM, switch sound off */ | 2883 | /* On all machines that support sound PM, switch sound off */ |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index ab2027cdf893..3bc075c788ef 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
| @@ -1155,13 +1155,11 @@ void __init pmac_pcibios_after_init(void) | |||
| 1155 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0); | 1155 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0); |
| 1156 | } | 1156 | } |
| 1157 | } | 1157 | } |
| 1158 | of_node_put(nd); | ||
| 1159 | for_each_node_by_name(nd, "ethernet") { | 1158 | for_each_node_by_name(nd, "ethernet") { |
| 1160 | if (nd->parent && of_device_is_compatible(nd, "gmac") | 1159 | if (nd->parent && of_device_is_compatible(nd, "gmac") |
| 1161 | && of_device_is_compatible(nd->parent, "uni-north")) | 1160 | && of_device_is_compatible(nd->parent, "uni-north")) |
| 1162 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0); | 1161 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0); |
| 1163 | } | 1162 | } |
| 1164 | of_node_put(nd); | ||
| 1165 | } | 1163 | } |
| 1166 | 1164 | ||
| 1167 | void pmac_pci_fixup_cardbus(struct pci_dev* dev) | 1165 | void pmac_pci_fixup_cardbus(struct pci_dev* dev) |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 395848e30c52..a77bcaed80af 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
| @@ -403,7 +403,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) | |||
| 403 | pci->phb->dma_window_size = 0x8000000ul; | 403 | pci->phb->dma_window_size = 0x8000000ul; |
| 404 | pci->phb->dma_window_base_cur = 0x8000000ul; | 404 | pci->phb->dma_window_base_cur = 0x8000000ul; |
| 405 | 405 | ||
| 406 | tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 406 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
| 407 | pci->phb->node); | 407 | pci->phb->node); |
| 408 | 408 | ||
| 409 | iommu_table_setparms(pci->phb, dn, tbl); | 409 | iommu_table_setparms(pci->phb, dn, tbl); |
| @@ -448,7 +448,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) | |||
| 448 | pdn->full_name, ppci->iommu_table); | 448 | pdn->full_name, ppci->iommu_table); |
| 449 | 449 | ||
| 450 | if (!ppci->iommu_table) { | 450 | if (!ppci->iommu_table) { |
| 451 | tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 451 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
| 452 | ppci->phb->node); | 452 | ppci->phb->node); |
| 453 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, | 453 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, |
| 454 | bus->number); | 454 | bus->number); |
| @@ -478,7 +478,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) | |||
| 478 | struct pci_controller *phb = PCI_DN(dn)->phb; | 478 | struct pci_controller *phb = PCI_DN(dn)->phb; |
| 479 | 479 | ||
| 480 | pr_debug(" --> first child, no bridge. Allocating iommu table.\n"); | 480 | pr_debug(" --> first child, no bridge. Allocating iommu table.\n"); |
| 481 | tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 481 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
| 482 | phb->node); | 482 | phb->node); |
| 483 | iommu_table_setparms(phb, dn, tbl); | 483 | iommu_table_setparms(phb, dn, tbl); |
| 484 | PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); | 484 | PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node); |
| @@ -544,7 +544,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
| 544 | 544 | ||
| 545 | pci = PCI_DN(pdn); | 545 | pci = PCI_DN(pdn); |
| 546 | if (!pci->iommu_table) { | 546 | if (!pci->iommu_table) { |
| 547 | tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 547 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
| 548 | pci->phb->node); | 548 | pci->phb->node); |
| 549 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, | 549 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, |
| 550 | pci->phb->bus->number); | 550 | pci->phb->bus->number); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 3b1bf61c45be..0317cce877c6 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
| @@ -182,10 +182,13 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
| 182 | /* Special case - we inhibit secondary thread startup | 182 | /* Special case - we inhibit secondary thread startup |
| 183 | * during boot if the user requests it. | 183 | * during boot if the user requests it. |
| 184 | */ | 184 | */ |
| 185 | if (system_state < SYSTEM_RUNNING && | 185 | if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { |
| 186 | cpu_has_feature(CPU_FTR_SMT) && | 186 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) |
| 187 | !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) | 187 | return 0; |
| 188 | return 0; | 188 | if (smt_enabled_at_boot |
| 189 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) | ||
| 190 | return 0; | ||
| 191 | } | ||
| 189 | 192 | ||
| 190 | return 1; | 193 | return 1; |
| 191 | } | 194 | } |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 5b22b07c8f67..93834b0d8272 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -928,8 +928,10 @@ void xics_migrate_irqs_away(void) | |||
| 928 | if (xics_status[0] != hw_cpu) | 928 | if (xics_status[0] != hw_cpu) |
| 929 | goto unlock; | 929 | goto unlock; |
| 930 | 930 | ||
| 931 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | 931 | /* This is expected during cpu offline. */ |
| 932 | virq, cpu); | 932 | if (cpu_online(cpu)) |
| 933 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | ||
| 934 | virq, cpu); | ||
| 933 | 935 | ||
| 934 | /* Reset affinity to all cpus */ | 936 | /* Reset affinity to all cpus */ |
| 935 | cpumask_setall(irq_to_desc(virq)->affinity); | 937 | cpumask_setall(irq_to_desc(virq)->affinity); |
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 670a1d1745d2..bb8343d157bc 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
| @@ -97,6 +97,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |||
| 97 | { | 97 | { |
| 98 | pte_t pte = huge_ptep_get(ptep); | 98 | pte_t pte = huge_ptep_get(ptep); |
| 99 | 99 | ||
| 100 | mm->context.flush_mm = 1; | ||
| 100 | pmd_clear((pmd_t *) ptep); | 101 | pmd_clear((pmd_t *) ptep); |
| 101 | return pte; | 102 | return pte; |
| 102 | } | 103 | } |
| @@ -167,7 +168,8 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, | |||
| 167 | ({ \ | 168 | ({ \ |
| 168 | pte_t __pte = huge_ptep_get(__ptep); \ | 169 | pte_t __pte = huge_ptep_get(__ptep); \ |
| 169 | if (pte_write(__pte)) { \ | 170 | if (pte_write(__pte)) { \ |
| 170 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 171 | (__mm)->context.flush_mm = 1; \ |
| 172 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
| 171 | (__mm) != current->active_mm) \ | 173 | (__mm) != current->active_mm) \ |
| 172 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | 174 | huge_ptep_invalidate(__mm, __addr, __ptep); \ |
| 173 | set_huge_pte_at(__mm, __addr, __ptep, \ | 175 | set_huge_pte_at(__mm, __addr, __ptep, \ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 99e3409102b9..78522cdefdd4 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | #define __MMU_H | 2 | #define __MMU_H |
| 3 | 3 | ||
| 4 | typedef struct { | 4 | typedef struct { |
| 5 | atomic_t attach_count; | ||
| 6 | unsigned int flush_mm; | ||
| 5 | spinlock_t list_lock; | 7 | spinlock_t list_lock; |
| 6 | struct list_head crst_list; | 8 | struct list_head crst_list; |
| 7 | struct list_head pgtable_list; | 9 | struct list_head pgtable_list; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 976e273988c2..a6f0e7cc9cde 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
| @@ -11,11 +11,14 @@ | |||
| 11 | 11 | ||
| 12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
| 13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
| 14 | #include <asm/tlbflush.h> | ||
| 14 | #include <asm-generic/mm_hooks.h> | 15 | #include <asm-generic/mm_hooks.h> |
| 15 | 16 | ||
| 16 | static inline int init_new_context(struct task_struct *tsk, | 17 | static inline int init_new_context(struct task_struct *tsk, |
| 17 | struct mm_struct *mm) | 18 | struct mm_struct *mm) |
| 18 | { | 19 | { |
| 20 | atomic_set(&mm->context.attach_count, 0); | ||
| 21 | mm->context.flush_mm = 0; | ||
| 19 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 22 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
| 20 | #ifdef CONFIG_64BIT | 23 | #ifdef CONFIG_64BIT |
| 21 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
| @@ -76,6 +79,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
| 76 | { | 79 | { |
| 77 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); | 80 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
| 78 | update_mm(next, tsk); | 81 | update_mm(next, tsk); |
| 82 | atomic_dec(&prev->context.attach_count); | ||
| 83 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | ||
| 84 | atomic_inc(&next->context.attach_count); | ||
| 85 | /* Check for TLBs not flushed yet */ | ||
| 86 | if (next->context.flush_mm) | ||
| 87 | __tlb_flush_mm(next); | ||
| 79 | } | 88 | } |
| 80 | 89 | ||
| 81 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 90 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 89a504c3f12e..3157441ee1da 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
| @@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm, | |||
| 880 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | 880 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
| 881 | ({ \ | 881 | ({ \ |
| 882 | pte_t __pte = *(__ptep); \ | 882 | pte_t __pte = *(__ptep); \ |
| 883 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 883 | (__mm)->context.flush_mm = 1; \ |
| 884 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
| 884 | (__mm) != current->active_mm) \ | 885 | (__mm) != current->active_mm) \ |
| 885 | ptep_invalidate(__mm, __address, __ptep); \ | 886 | ptep_invalidate(__mm, __address, __ptep); \ |
| 886 | else \ | 887 | else \ |
| @@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
| 923 | ({ \ | 924 | ({ \ |
| 924 | pte_t __pte = *(__ptep); \ | 925 | pte_t __pte = *(__ptep); \ |
| 925 | if (pte_write(__pte)) { \ | 926 | if (pte_write(__pte)) { \ |
| 926 | if (atomic_read(&(__mm)->mm_users) > 1 || \ | 927 | (__mm)->context.flush_mm = 1; \ |
| 928 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ | ||
| 927 | (__mm) != current->active_mm) \ | 929 | (__mm) != current->active_mm) \ |
| 928 | ptep_invalidate(__mm, __addr, __ptep); \ | 930 | ptep_invalidate(__mm, __addr, __ptep); \ |
| 929 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ | 931 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 81150b053689..fd1c00d08bf5 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
| @@ -50,8 +50,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, | |||
| 50 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | 50 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
| 51 | 51 | ||
| 52 | tlb->mm = mm; | 52 | tlb->mm = mm; |
| 53 | tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || | 53 | tlb->fullmm = full_mm_flush; |
| 54 | (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); | ||
| 55 | tlb->nr_ptes = 0; | 54 | tlb->nr_ptes = 0; |
| 56 | tlb->nr_pxds = TLB_NR_PTRS; | 55 | tlb->nr_pxds = TLB_NR_PTRS; |
| 57 | if (tlb->fullmm) | 56 | if (tlb->fullmm) |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 304cffa623e1..29d5d6d4becc 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
| @@ -94,8 +94,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
| 94 | 94 | ||
| 95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 95 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) |
| 96 | { | 96 | { |
| 97 | if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) | 97 | spin_lock(&mm->page_table_lock); |
| 98 | if (mm->context.flush_mm) { | ||
| 98 | __tlb_flush_mm(mm); | 99 | __tlb_flush_mm(mm); |
| 100 | mm->context.flush_mm = 0; | ||
| 101 | } | ||
| 102 | spin_unlock(&mm->page_table_lock); | ||
| 99 | } | 103 | } |
| 100 | 104 | ||
| 101 | /* | 105 | /* |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 403fb430a896..ff579b6bde06 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
| @@ -42,8 +42,8 @@ long sys_clone(unsigned long newsp, unsigned long clone_flags, | |||
| 42 | int __user *parent_tidptr, int __user *child_tidptr); | 42 | int __user *parent_tidptr, int __user *child_tidptr); |
| 43 | long sys_vfork(void); | 43 | long sys_vfork(void); |
| 44 | void execve_tail(void); | 44 | void execve_tail(void); |
| 45 | long sys_execve(const char __user *name, char __user * __user *argv, | 45 | long sys_execve(const char __user *name, const char __user *const __user *argv, |
| 46 | char __user * __user *envp); | 46 | const char __user *const __user *envp); |
| 47 | long sys_sigsuspend(int history0, int history1, old_sigset_t mask); | 47 | long sys_sigsuspend(int history0, int history1, old_sigset_t mask); |
| 48 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | 48 | long sys_sigaction(int sig, const struct old_sigaction __user *act, |
| 49 | struct old_sigaction __user *oact); | 49 | struct old_sigaction __user *oact); |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 541053ed234e..8127ebd59c4d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -583,6 +583,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
| 583 | sf->gprs[9] = (unsigned long) sf; | 583 | sf->gprs[9] = (unsigned long) sf; |
| 584 | cpu_lowcore->save_area[15] = (unsigned long) sf; | 584 | cpu_lowcore->save_area[15] = (unsigned long) sf; |
| 585 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); | 585 | __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); |
| 586 | atomic_inc(&init_mm.context.attach_count); | ||
| 586 | asm volatile( | 587 | asm volatile( |
| 587 | " stam 0,15,0(%0)" | 588 | " stam 0,15,0(%0)" |
| 588 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | 589 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); |
| @@ -659,6 +660,7 @@ void __cpu_die(unsigned int cpu) | |||
| 659 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) | 660 | while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) |
| 660 | udelay(10); | 661 | udelay(10); |
| 661 | smp_free_lowcore(cpu); | 662 | smp_free_lowcore(cpu); |
| 663 | atomic_dec(&init_mm.context.attach_count); | ||
| 662 | pr_info("Processor %d stopped\n", cpu); | 664 | pr_info("Processor %d stopped\n", cpu); |
| 663 | } | 665 | } |
| 664 | 666 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index acc91c75bc94..30eb6d02ddb8 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -74,6 +74,8 @@ void __init paging_init(void) | |||
| 74 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 74 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
| 75 | __raw_local_irq_ssm(ssm_mask); | 75 | __raw_local_irq_ssm(ssm_mask); |
| 76 | 76 | ||
| 77 | atomic_set(&init_mm.context.attach_count, 1); | ||
| 78 | |||
| 77 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 79 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 78 | sparse_init(); | 80 | sparse_init(); |
| 79 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 81 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index f0c74227c737..bdb2ff880bdd 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
| @@ -20,14 +20,14 @@ | |||
| 20 | #define atomic64_set(v, i) (((v)->counter) = i) | 20 | #define atomic64_set(v, i) (((v)->counter) = i) |
| 21 | 21 | ||
| 22 | extern void atomic_add(int, atomic_t *); | 22 | extern void atomic_add(int, atomic_t *); |
| 23 | extern void atomic64_add(int, atomic64_t *); | 23 | extern void atomic64_add(long, atomic64_t *); |
| 24 | extern void atomic_sub(int, atomic_t *); | 24 | extern void atomic_sub(int, atomic_t *); |
| 25 | extern void atomic64_sub(int, atomic64_t *); | 25 | extern void atomic64_sub(long, atomic64_t *); |
| 26 | 26 | ||
| 27 | extern int atomic_add_ret(int, atomic_t *); | 27 | extern int atomic_add_ret(int, atomic_t *); |
| 28 | extern long atomic64_add_ret(int, atomic64_t *); | 28 | extern long atomic64_add_ret(long, atomic64_t *); |
| 29 | extern int atomic_sub_ret(int, atomic_t *); | 29 | extern int atomic_sub_ret(int, atomic_t *); |
| 30 | extern long atomic64_sub_ret(int, atomic64_t *); | 30 | extern long atomic64_sub_ret(long, atomic64_t *); |
| 31 | 31 | ||
| 32 | #define atomic_dec_return(v) atomic_sub_ret(1, v) | 32 | #define atomic_dec_return(v) atomic_sub_ret(1, v) |
| 33 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) | 33 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) |
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h index fa1fdf67e350..db3af0d30fb1 100644 --- a/arch/sparc/include/asm/backoff.h +++ b/arch/sparc/include/asm/backoff.h | |||
| @@ -8,6 +8,9 @@ | |||
| 8 | #define BACKOFF_SETUP(reg) \ | 8 | #define BACKOFF_SETUP(reg) \ |
| 9 | mov 1, reg | 9 | mov 1, reg |
| 10 | 10 | ||
| 11 | #define BACKOFF_LABEL(spin_label, continue_label) \ | ||
| 12 | spin_label | ||
| 13 | |||
| 11 | #define BACKOFF_SPIN(reg, tmp, label) \ | 14 | #define BACKOFF_SPIN(reg, tmp, label) \ |
| 12 | mov reg, tmp; \ | 15 | mov reg, tmp; \ |
| 13 | 88: brnz,pt tmp, 88b; \ | 16 | 88: brnz,pt tmp, 88b; \ |
| @@ -22,9 +25,11 @@ | |||
| 22 | #else | 25 | #else |
| 23 | 26 | ||
| 24 | #define BACKOFF_SETUP(reg) | 27 | #define BACKOFF_SETUP(reg) |
| 25 | #define BACKOFF_SPIN(reg, tmp, label) \ | 28 | |
| 26 | ba,pt %xcc, label; \ | 29 | #define BACKOFF_LABEL(spin_label, continue_label) \ |
| 27 | nop; | 30 | continue_label |
| 31 | |||
| 32 | #define BACKOFF_SPIN(reg, tmp, label) | ||
| 28 | 33 | ||
| 29 | #endif | 34 | #endif |
| 30 | 35 | ||
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index a5db0317b5fb..3e0b2d62303d 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h | |||
| @@ -185,9 +185,8 @@ extern int prom_getunumber(int syndrome_code, | |||
| 185 | char *buf, int buflen); | 185 | char *buf, int buflen); |
| 186 | 186 | ||
| 187 | /* Retain physical memory to the caller across soft resets. */ | 187 | /* Retain physical memory to the caller across soft resets. */ |
| 188 | extern unsigned long prom_retain(const char *name, | 188 | extern int prom_retain(const char *name, unsigned long size, |
| 189 | unsigned long pa_low, unsigned long pa_high, | 189 | unsigned long align, unsigned long *paddr); |
| 190 | long size, long align); | ||
| 191 | 190 | ||
| 192 | /* Load explicit I/D TLB entries into the calling processor. */ | 191 | /* Load explicit I/D TLB entries into the calling processor. */ |
| 193 | extern long prom_itlb_load(unsigned long index, | 192 | extern long prom_itlb_load(unsigned long index, |
| @@ -287,26 +286,6 @@ extern void prom_sun4v_guest_soft_state(void); | |||
| 287 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); | 286 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); |
| 288 | 287 | ||
| 289 | /* Client interface level routines. */ | 288 | /* Client interface level routines. */ |
| 290 | extern long p1275_cmd(const char *, long, ...); | 289 | extern void p1275_cmd_direct(unsigned long *); |
| 291 | |||
| 292 | #if 0 | ||
| 293 | #define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x)) | ||
| 294 | #else | ||
| 295 | #define P1275_SIZE(x) x | ||
| 296 | #endif | ||
| 297 | |||
| 298 | /* We support at most 16 input and 1 output argument */ | ||
| 299 | #define P1275_ARG_NUMBER 0 | ||
| 300 | #define P1275_ARG_IN_STRING 1 | ||
| 301 | #define P1275_ARG_OUT_BUF 2 | ||
| 302 | #define P1275_ARG_OUT_32B 3 | ||
| 303 | #define P1275_ARG_IN_FUNCTION 4 | ||
| 304 | #define P1275_ARG_IN_BUF 5 | ||
| 305 | #define P1275_ARG_IN_64B 6 | ||
| 306 | |||
| 307 | #define P1275_IN(x) ((x) & 0xf) | ||
| 308 | #define P1275_OUT(x) (((x) << 4) & 0xf0) | ||
| 309 | #define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o)) | ||
| 310 | #define P1275_ARG(n,x) ((x) << ((n)*3 + 8)) | ||
| 311 | 290 | ||
| 312 | #endif /* !(__SPARC64_OPLIB_H) */ | 291 | #endif /* !(__SPARC64_OPLIB_H) */ |
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h deleted file mode 100644 index e4c61a18bb28..000000000000 --- a/arch/sparc/include/asm/rwsem-const.h +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | /* rwsem-const.h: RW semaphore counter constants. */ | ||
| 2 | #ifndef _SPARC64_RWSEM_CONST_H | ||
| 3 | #define _SPARC64_RWSEM_CONST_H | ||
| 4 | |||
| 5 | #define RWSEM_UNLOCKED_VALUE 0x00000000 | ||
| 6 | #define RWSEM_ACTIVE_BIAS 0x00000001 | ||
| 7 | #define RWSEM_ACTIVE_MASK 0x0000ffff | ||
| 8 | #define RWSEM_WAITING_BIAS (-0x00010000) | ||
| 9 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
| 10 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
| 11 | |||
| 12 | #endif /* _SPARC64_RWSEM_CONST_H */ | ||
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index 6e5621006f85..a2b4302869bc 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h | |||
| @@ -15,16 +15,21 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 18 | #include <asm/rwsem-const.h> | ||
| 19 | 18 | ||
| 20 | struct rwsem_waiter; | 19 | struct rwsem_waiter; |
| 21 | 20 | ||
| 22 | struct rw_semaphore { | 21 | struct rw_semaphore { |
| 23 | signed int count; | 22 | signed long count; |
| 24 | spinlock_t wait_lock; | 23 | #define RWSEM_UNLOCKED_VALUE 0x00000000L |
| 25 | struct list_head wait_list; | 24 | #define RWSEM_ACTIVE_BIAS 0x00000001L |
| 25 | #define RWSEM_ACTIVE_MASK 0xffffffffL | ||
| 26 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | ||
| 27 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
| 28 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
| 29 | spinlock_t wait_lock; | ||
| 30 | struct list_head wait_list; | ||
| 26 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 31 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 27 | struct lockdep_map dep_map; | 32 | struct lockdep_map dep_map; |
| 28 | #endif | 33 | #endif |
| 29 | }; | 34 | }; |
| 30 | 35 | ||
| @@ -41,6 +46,11 @@ struct rw_semaphore { | |||
| 41 | #define DECLARE_RWSEM(name) \ | 46 | #define DECLARE_RWSEM(name) \ |
| 42 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 47 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
| 43 | 48 | ||
| 49 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
| 50 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
| 51 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
| 52 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
| 53 | |||
| 44 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | 54 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
| 45 | struct lock_class_key *key); | 55 | struct lock_class_key *key); |
| 46 | 56 | ||
| @@ -51,27 +61,103 @@ do { \ | |||
| 51 | __init_rwsem((sem), #sem, &__key); \ | 61 | __init_rwsem((sem), #sem, &__key); \ |
| 52 | } while (0) | 62 | } while (0) |
| 53 | 63 | ||
| 54 | extern void __down_read(struct rw_semaphore *sem); | 64 | /* |
| 55 | extern int __down_read_trylock(struct rw_semaphore *sem); | 65 | * lock for reading |
| 56 | extern void __down_write(struct rw_semaphore *sem); | 66 | */ |
| 57 | extern int __down_write_trylock(struct rw_semaphore *sem); | 67 | static inline void __down_read(struct rw_semaphore *sem) |
| 58 | extern void __up_read(struct rw_semaphore *sem); | 68 | { |
| 59 | extern void __up_write(struct rw_semaphore *sem); | 69 | if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L)) |
| 60 | extern void __downgrade_write(struct rw_semaphore *sem); | 70 | rwsem_down_read_failed(sem); |
| 71 | } | ||
| 72 | |||
| 73 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
| 74 | { | ||
| 75 | long tmp; | ||
| 76 | |||
| 77 | while ((tmp = sem->count) >= 0L) { | ||
| 78 | if (tmp == cmpxchg(&sem->count, tmp, | ||
| 79 | tmp + RWSEM_ACTIVE_READ_BIAS)) { | ||
| 80 | return 1; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | return 0; | ||
| 84 | } | ||
| 61 | 85 | ||
| 86 | /* | ||
| 87 | * lock for writing | ||
| 88 | */ | ||
| 62 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | 89 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
| 63 | { | 90 | { |
| 64 | __down_write(sem); | 91 | long tmp; |
| 92 | |||
| 93 | tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
| 94 | (atomic64_t *)(&sem->count)); | ||
| 95 | if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) | ||
| 96 | rwsem_down_write_failed(sem); | ||
| 65 | } | 97 | } |
| 66 | 98 | ||
| 67 | static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) | 99 | static inline void __down_write(struct rw_semaphore *sem) |
| 68 | { | 100 | { |
| 69 | return atomic_add_return(delta, (atomic_t *)(&sem->count)); | 101 | __down_write_nested(sem, 0); |
| 102 | } | ||
| 103 | |||
| 104 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
| 105 | { | ||
| 106 | long tmp; | ||
| 107 | |||
| 108 | tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | ||
| 109 | RWSEM_ACTIVE_WRITE_BIAS); | ||
| 110 | return tmp == RWSEM_UNLOCKED_VALUE; | ||
| 70 | } | 111 | } |
| 71 | 112 | ||
| 72 | static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) | 113 | /* |
| 114 | * unlock after reading | ||
| 115 | */ | ||
| 116 | static inline void __up_read(struct rw_semaphore *sem) | ||
| 117 | { | ||
| 118 | long tmp; | ||
| 119 | |||
| 120 | tmp = atomic64_dec_return((atomic64_t *)(&sem->count)); | ||
| 121 | if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L)) | ||
| 122 | rwsem_wake(sem); | ||
| 123 | } | ||
| 124 | |||
| 125 | /* | ||
| 126 | * unlock after writing | ||
| 127 | */ | ||
| 128 | static inline void __up_write(struct rw_semaphore *sem) | ||
| 129 | { | ||
| 130 | if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS, | ||
| 131 | (atomic64_t *)(&sem->count)) < 0L)) | ||
| 132 | rwsem_wake(sem); | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * implement atomic add functionality | ||
| 137 | */ | ||
| 138 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) | ||
| 139 | { | ||
| 140 | atomic64_add(delta, (atomic64_t *)(&sem->count)); | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * downgrade write lock to read lock | ||
| 145 | */ | ||
| 146 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
| 147 | { | ||
| 148 | long tmp; | ||
| 149 | |||
| 150 | tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count)); | ||
| 151 | if (tmp < 0L) | ||
| 152 | rwsem_downgrade_wake(sem); | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | ||
| 156 | * implement exchange and add functionality | ||
| 157 | */ | ||
| 158 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | ||
| 73 | { | 159 | { |
| 74 | atomic_add(delta, (atomic_t *)(&sem->count)); | 160 | return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); |
| 75 | } | 161 | } |
| 76 | 162 | ||
| 77 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | 163 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index d24cfe16afc1..e3b65d8cf41b 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h | |||
| @@ -106,6 +106,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |||
| 106 | */ | 106 | */ |
| 107 | #define write_pic(__p) \ | 107 | #define write_pic(__p) \ |
| 108 | __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ | 108 | __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ |
| 109 | " nop\n\t" \ | ||
| 109 | ".align 64\n" \ | 110 | ".align 64\n" \ |
| 110 | "99:wr %0, 0x0, %%pic\n\t" \ | 111 | "99:wr %0, 0x0, %%pic\n\t" \ |
| 111 | "rd %%pic, %%g0" : : "r" (__p)) | 112 | "rd %%pic, %%g0" : : "r" (__p)) |
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index c4b5e03af115..846d1c4374ea 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
| @@ -15,7 +15,7 @@ lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o | |||
| 15 | lib-$(CONFIG_SPARC32) += copy_user.o locks.o | 15 | lib-$(CONFIG_SPARC32) += copy_user.o locks.o |
| 16 | lib-y += atomic_$(BITS).o | 16 | lib-y += atomic_$(BITS).o |
| 17 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o | 17 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o |
| 18 | lib-y += rwsem_$(BITS).o | 18 | lib-$(CONFIG_SPARC32) += rwsem_32.o |
| 19 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o | 19 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o |
| 20 | 20 | ||
| 21 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o | 21 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o |
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 0268210ca168..59186e0fcf39 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S | |||
| @@ -21,7 +21,7 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ | |||
| 21 | add %g1, %o0, %g7 | 21 | add %g1, %o0, %g7 |
| 22 | cas [%o1], %g1, %g7 | 22 | cas [%o1], %g1, %g7 |
| 23 | cmp %g1, %g7 | 23 | cmp %g1, %g7 |
| 24 | bne,pn %icc, 2f | 24 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) |
| 25 | nop | 25 | nop |
| 26 | retl | 26 | retl |
| 27 | nop | 27 | nop |
| @@ -36,7 +36,7 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
| 36 | sub %g1, %o0, %g7 | 36 | sub %g1, %o0, %g7 |
| 37 | cas [%o1], %g1, %g7 | 37 | cas [%o1], %g1, %g7 |
| 38 | cmp %g1, %g7 | 38 | cmp %g1, %g7 |
| 39 | bne,pn %icc, 2f | 39 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) |
| 40 | nop | 40 | nop |
| 41 | retl | 41 | retl |
| 42 | nop | 42 | nop |
| @@ -51,11 +51,10 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | |||
| 51 | add %g1, %o0, %g7 | 51 | add %g1, %o0, %g7 |
| 52 | cas [%o1], %g1, %g7 | 52 | cas [%o1], %g1, %g7 |
| 53 | cmp %g1, %g7 | 53 | cmp %g1, %g7 |
| 54 | bne,pn %icc, 2f | 54 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) |
| 55 | add %g7, %o0, %g7 | 55 | add %g1, %o0, %g1 |
| 56 | sra %g7, 0, %o0 | ||
| 57 | retl | 56 | retl |
| 58 | nop | 57 | sra %g1, 0, %o0 |
| 59 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 58 | 2: BACKOFF_SPIN(%o2, %o3, 1b) |
| 60 | .size atomic_add_ret, .-atomic_add_ret | 59 | .size atomic_add_ret, .-atomic_add_ret |
| 61 | 60 | ||
| @@ -67,11 +66,10 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
| 67 | sub %g1, %o0, %g7 | 66 | sub %g1, %o0, %g7 |
| 68 | cas [%o1], %g1, %g7 | 67 | cas [%o1], %g1, %g7 |
| 69 | cmp %g1, %g7 | 68 | cmp %g1, %g7 |
| 70 | bne,pn %icc, 2f | 69 | bne,pn %icc, BACKOFF_LABEL(2f, 1b) |
| 71 | sub %g7, %o0, %g7 | 70 | sub %g1, %o0, %g1 |
| 72 | sra %g7, 0, %o0 | ||
| 73 | retl | 71 | retl |
| 74 | nop | 72 | sra %g1, 0, %o0 |
| 75 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 73 | 2: BACKOFF_SPIN(%o2, %o3, 1b) |
| 76 | .size atomic_sub_ret, .-atomic_sub_ret | 74 | .size atomic_sub_ret, .-atomic_sub_ret |
| 77 | 75 | ||
| @@ -83,7 +81,7 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ | |||
| 83 | add %g1, %o0, %g7 | 81 | add %g1, %o0, %g7 |
| 84 | casx [%o1], %g1, %g7 | 82 | casx [%o1], %g1, %g7 |
| 85 | cmp %g1, %g7 | 83 | cmp %g1, %g7 |
| 86 | bne,pn %xcc, 2f | 84 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 87 | nop | 85 | nop |
| 88 | retl | 86 | retl |
| 89 | nop | 87 | nop |
| @@ -98,7 +96,7 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
| 98 | sub %g1, %o0, %g7 | 96 | sub %g1, %o0, %g7 |
| 99 | casx [%o1], %g1, %g7 | 97 | casx [%o1], %g1, %g7 |
| 100 | cmp %g1, %g7 | 98 | cmp %g1, %g7 |
| 101 | bne,pn %xcc, 2f | 99 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 102 | nop | 100 | nop |
| 103 | retl | 101 | retl |
| 104 | nop | 102 | nop |
| @@ -113,11 +111,10 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ | |||
| 113 | add %g1, %o0, %g7 | 111 | add %g1, %o0, %g7 |
| 114 | casx [%o1], %g1, %g7 | 112 | casx [%o1], %g1, %g7 |
| 115 | cmp %g1, %g7 | 113 | cmp %g1, %g7 |
| 116 | bne,pn %xcc, 2f | 114 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 117 | add %g7, %o0, %g7 | ||
| 118 | mov %g7, %o0 | ||
| 119 | retl | ||
| 120 | nop | 115 | nop |
| 116 | retl | ||
| 117 | add %g1, %o0, %o0 | ||
| 121 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 118 | 2: BACKOFF_SPIN(%o2, %o3, 1b) |
| 122 | .size atomic64_add_ret, .-atomic64_add_ret | 119 | .size atomic64_add_ret, .-atomic64_add_ret |
| 123 | 120 | ||
| @@ -129,10 +126,9 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ | |||
| 129 | sub %g1, %o0, %g7 | 126 | sub %g1, %o0, %g7 |
| 130 | casx [%o1], %g1, %g7 | 127 | casx [%o1], %g1, %g7 |
| 131 | cmp %g1, %g7 | 128 | cmp %g1, %g7 |
| 132 | bne,pn %xcc, 2f | 129 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 133 | sub %g7, %o0, %g7 | ||
| 134 | mov %g7, %o0 | ||
| 135 | retl | ||
| 136 | nop | 130 | nop |
| 131 | retl | ||
| 132 | sub %g1, %o0, %o0 | ||
| 137 | 2: BACKOFF_SPIN(%o2, %o3, 1b) | 133 | 2: BACKOFF_SPIN(%o2, %o3, 1b) |
| 138 | .size atomic64_sub_ret, .-atomic64_sub_ret | 134 | .size atomic64_sub_ret, .-atomic64_sub_ret |
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S index 2b7228cb8c22..3dc61d5537c0 100644 --- a/arch/sparc/lib/bitops.S +++ b/arch/sparc/lib/bitops.S | |||
| @@ -22,7 +22,7 @@ test_and_set_bit: /* %o0=nr, %o1=addr */ | |||
| 22 | or %g7, %o2, %g1 | 22 | or %g7, %o2, %g1 |
| 23 | casx [%o1], %g7, %g1 | 23 | casx [%o1], %g7, %g1 |
| 24 | cmp %g7, %g1 | 24 | cmp %g7, %g1 |
| 25 | bne,pn %xcc, 2f | 25 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 26 | and %g7, %o2, %g2 | 26 | and %g7, %o2, %g2 |
| 27 | clr %o0 | 27 | clr %o0 |
| 28 | movrne %g2, 1, %o0 | 28 | movrne %g2, 1, %o0 |
| @@ -45,7 +45,7 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */ | |||
| 45 | andn %g7, %o2, %g1 | 45 | andn %g7, %o2, %g1 |
| 46 | casx [%o1], %g7, %g1 | 46 | casx [%o1], %g7, %g1 |
| 47 | cmp %g7, %g1 | 47 | cmp %g7, %g1 |
| 48 | bne,pn %xcc, 2f | 48 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 49 | and %g7, %o2, %g2 | 49 | and %g7, %o2, %g2 |
| 50 | clr %o0 | 50 | clr %o0 |
| 51 | movrne %g2, 1, %o0 | 51 | movrne %g2, 1, %o0 |
| @@ -68,7 +68,7 @@ test_and_change_bit: /* %o0=nr, %o1=addr */ | |||
| 68 | xor %g7, %o2, %g1 | 68 | xor %g7, %o2, %g1 |
| 69 | casx [%o1], %g7, %g1 | 69 | casx [%o1], %g7, %g1 |
| 70 | cmp %g7, %g1 | 70 | cmp %g7, %g1 |
| 71 | bne,pn %xcc, 2f | 71 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 72 | and %g7, %o2, %g2 | 72 | and %g7, %o2, %g2 |
| 73 | clr %o0 | 73 | clr %o0 |
| 74 | movrne %g2, 1, %o0 | 74 | movrne %g2, 1, %o0 |
| @@ -91,7 +91,7 @@ set_bit: /* %o0=nr, %o1=addr */ | |||
| 91 | or %g7, %o2, %g1 | 91 | or %g7, %o2, %g1 |
| 92 | casx [%o1], %g7, %g1 | 92 | casx [%o1], %g7, %g1 |
| 93 | cmp %g7, %g1 | 93 | cmp %g7, %g1 |
| 94 | bne,pn %xcc, 2f | 94 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 95 | nop | 95 | nop |
| 96 | retl | 96 | retl |
| 97 | nop | 97 | nop |
| @@ -112,7 +112,7 @@ clear_bit: /* %o0=nr, %o1=addr */ | |||
| 112 | andn %g7, %o2, %g1 | 112 | andn %g7, %o2, %g1 |
| 113 | casx [%o1], %g7, %g1 | 113 | casx [%o1], %g7, %g1 |
| 114 | cmp %g7, %g1 | 114 | cmp %g7, %g1 |
| 115 | bne,pn %xcc, 2f | 115 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 116 | nop | 116 | nop |
| 117 | retl | 117 | retl |
| 118 | nop | 118 | nop |
| @@ -133,7 +133,7 @@ change_bit: /* %o0=nr, %o1=addr */ | |||
| 133 | xor %g7, %o2, %g1 | 133 | xor %g7, %o2, %g1 |
| 134 | casx [%o1], %g7, %g1 | 134 | casx [%o1], %g7, %g1 |
| 135 | cmp %g7, %g1 | 135 | cmp %g7, %g1 |
| 136 | bne,pn %xcc, 2f | 136 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b) |
| 137 | nop | 137 | nop |
| 138 | retl | 138 | retl |
| 139 | nop | 139 | nop |
diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S deleted file mode 100644 index 91a7d29a79d5..000000000000 --- a/arch/sparc/lib/rwsem_64.S +++ /dev/null | |||
| @@ -1,163 +0,0 @@ | |||
| 1 | /* rwsem.S: RW semaphore assembler. | ||
| 2 | * | ||
| 3 | * Written by David S. Miller (davem@redhat.com), 2001. | ||
| 4 | * Derived from asm-i386/rwsem.h | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <asm/rwsem-const.h> | ||
| 8 | |||
| 9 | .section .sched.text, "ax" | ||
| 10 | |||
| 11 | .globl __down_read | ||
| 12 | __down_read: | ||
| 13 | 1: lduw [%o0], %g1 | ||
| 14 | add %g1, 1, %g7 | ||
| 15 | cas [%o0], %g1, %g7 | ||
| 16 | cmp %g1, %g7 | ||
| 17 | bne,pn %icc, 1b | ||
| 18 | add %g7, 1, %g7 | ||
| 19 | cmp %g7, 0 | ||
| 20 | bl,pn %icc, 3f | ||
| 21 | nop | ||
| 22 | 2: | ||
| 23 | retl | ||
| 24 | nop | ||
| 25 | 3: | ||
| 26 | save %sp, -192, %sp | ||
| 27 | call rwsem_down_read_failed | ||
| 28 | mov %i0, %o0 | ||
| 29 | ret | ||
| 30 | restore | ||
| 31 | .size __down_read, .-__down_read | ||
| 32 | |||
| 33 | .globl __down_read_trylock | ||
| 34 | __down_read_trylock: | ||
| 35 | 1: lduw [%o0], %g1 | ||
| 36 | add %g1, 1, %g7 | ||
| 37 | cmp %g7, 0 | ||
| 38 | bl,pn %icc, 2f | ||
| 39 | mov 0, %o1 | ||
| 40 | cas [%o0], %g1, %g7 | ||
| 41 | cmp %g1, %g7 | ||
| 42 | bne,pn %icc, 1b | ||
| 43 | mov 1, %o1 | ||
| 44 | 2: retl | ||
| 45 | mov %o1, %o0 | ||
| 46 | .size __down_read_trylock, .-__down_read_trylock | ||
| 47 | |||
| 48 | .globl __down_write | ||
| 49 | __down_write: | ||
| 50 | sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 51 | or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 52 | 1: | ||
| 53 | lduw [%o0], %g3 | ||
| 54 | add %g3, %g1, %g7 | ||
| 55 | cas [%o0], %g3, %g7 | ||
| 56 | cmp %g3, %g7 | ||
| 57 | bne,pn %icc, 1b | ||
| 58 | cmp %g7, 0 | ||
| 59 | bne,pn %icc, 3f | ||
| 60 | nop | ||
| 61 | 2: retl | ||
| 62 | nop | ||
| 63 | 3: | ||
| 64 | save %sp, -192, %sp | ||
| 65 | call rwsem_down_write_failed | ||
| 66 | mov %i0, %o0 | ||
| 67 | ret | ||
| 68 | restore | ||
| 69 | .size __down_write, .-__down_write | ||
| 70 | |||
| 71 | .globl __down_write_trylock | ||
| 72 | __down_write_trylock: | ||
| 73 | sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 74 | or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 75 | 1: | ||
| 76 | lduw [%o0], %g3 | ||
| 77 | cmp %g3, 0 | ||
| 78 | bne,pn %icc, 2f | ||
| 79 | mov 0, %o1 | ||
| 80 | add %g3, %g1, %g7 | ||
| 81 | cas [%o0], %g3, %g7 | ||
| 82 | cmp %g3, %g7 | ||
| 83 | bne,pn %icc, 1b | ||
| 84 | mov 1, %o1 | ||
| 85 | 2: retl | ||
| 86 | mov %o1, %o0 | ||
| 87 | .size __down_write_trylock, .-__down_write_trylock | ||
| 88 | |||
| 89 | .globl __up_read | ||
| 90 | __up_read: | ||
| 91 | 1: | ||
| 92 | lduw [%o0], %g1 | ||
| 93 | sub %g1, 1, %g7 | ||
| 94 | cas [%o0], %g1, %g7 | ||
| 95 | cmp %g1, %g7 | ||
| 96 | bne,pn %icc, 1b | ||
| 97 | cmp %g7, 0 | ||
| 98 | bl,pn %icc, 3f | ||
| 99 | nop | ||
| 100 | 2: retl | ||
| 101 | nop | ||
| 102 | 3: sethi %hi(RWSEM_ACTIVE_MASK), %g1 | ||
| 103 | sub %g7, 1, %g7 | ||
| 104 | or %g1, %lo(RWSEM_ACTIVE_MASK), %g1 | ||
| 105 | andcc %g7, %g1, %g0 | ||
| 106 | bne,pn %icc, 2b | ||
| 107 | nop | ||
| 108 | save %sp, -192, %sp | ||
| 109 | call rwsem_wake | ||
| 110 | mov %i0, %o0 | ||
| 111 | ret | ||
| 112 | restore | ||
| 113 | .size __up_read, .-__up_read | ||
| 114 | |||
| 115 | .globl __up_write | ||
| 116 | __up_write: | ||
| 117 | sethi %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 118 | or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 | ||
| 119 | 1: | ||
| 120 | lduw [%o0], %g3 | ||
| 121 | sub %g3, %g1, %g7 | ||
| 122 | cas [%o0], %g3, %g7 | ||
| 123 | cmp %g3, %g7 | ||
| 124 | bne,pn %icc, 1b | ||
| 125 | sub %g7, %g1, %g7 | ||
| 126 | cmp %g7, 0 | ||
| 127 | bl,pn %icc, 3f | ||
| 128 | nop | ||
| 129 | 2: | ||
| 130 | retl | ||
| 131 | nop | ||
| 132 | 3: | ||
| 133 | save %sp, -192, %sp | ||
| 134 | call rwsem_wake | ||
| 135 | mov %i0, %o0 | ||
| 136 | ret | ||
| 137 | restore | ||
| 138 | .size __up_write, .-__up_write | ||
| 139 | |||
| 140 | .globl __downgrade_write | ||
| 141 | __downgrade_write: | ||
| 142 | sethi %hi(RWSEM_WAITING_BIAS), %g1 | ||
| 143 | or %g1, %lo(RWSEM_WAITING_BIAS), %g1 | ||
| 144 | 1: | ||
| 145 | lduw [%o0], %g3 | ||
| 146 | sub %g3, %g1, %g7 | ||
| 147 | cas [%o0], %g3, %g7 | ||
| 148 | cmp %g3, %g7 | ||
| 149 | bne,pn %icc, 1b | ||
| 150 | sub %g7, %g1, %g7 | ||
| 151 | cmp %g7, 0 | ||
| 152 | bl,pn %icc, 3f | ||
| 153 | nop | ||
| 154 | 2: | ||
| 155 | retl | ||
| 156 | nop | ||
| 157 | 3: | ||
| 158 | save %sp, -192, %sp | ||
| 159 | call rwsem_downgrade_wake | ||
| 160 | mov %i0, %o0 | ||
| 161 | ret | ||
| 162 | restore | ||
| 163 | .size __downgrade_write, .-__downgrade_write | ||
diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S index 5f27ad779c0c..9c86b4b7d429 100644 --- a/arch/sparc/prom/cif.S +++ b/arch/sparc/prom/cif.S | |||
| @@ -9,18 +9,18 @@ | |||
| 9 | #include <asm/thread_info.h> | 9 | #include <asm/thread_info.h> |
| 10 | 10 | ||
| 11 | .text | 11 | .text |
| 12 | .globl prom_cif_interface | 12 | .globl prom_cif_direct |
| 13 | prom_cif_interface: | 13 | prom_cif_direct: |
| 14 | sethi %hi(p1275buf), %o0 | 14 | sethi %hi(p1275buf), %o1 |
| 15 | or %o0, %lo(p1275buf), %o0 | 15 | or %o1, %lo(p1275buf), %o1 |
| 16 | ldx [%o0 + 0x010], %o1 ! prom_cif_stack | 16 | ldx [%o1 + 0x0010], %o2 ! prom_cif_stack |
| 17 | save %o1, -192, %sp | 17 | save %o2, -192, %sp |
| 18 | ldx [%i0 + 0x008], %l2 ! prom_cif_handler | 18 | ldx [%i1 + 0x0008], %l2 ! prom_cif_handler |
| 19 | mov %g4, %l0 | 19 | mov %g4, %l0 |
| 20 | mov %g5, %l1 | 20 | mov %g5, %l1 |
| 21 | mov %g6, %l3 | 21 | mov %g6, %l3 |
| 22 | call %l2 | 22 | call %l2 |
| 23 | add %i0, 0x018, %o0 ! prom_args | 23 | mov %i0, %o0 ! prom_args |
| 24 | mov %l0, %g4 | 24 | mov %l0, %g4 |
| 25 | mov %l1, %g5 | 25 | mov %l1, %g5 |
| 26 | mov %l3, %g6 | 26 | mov %l3, %g6 |
diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c index f55d58a8a156..10322dc2f557 100644 --- a/arch/sparc/prom/console_64.c +++ b/arch/sparc/prom/console_64.c | |||
| @@ -21,14 +21,22 @@ extern int prom_stdin, prom_stdout; | |||
| 21 | inline int | 21 | inline int |
| 22 | prom_nbgetchar(void) | 22 | prom_nbgetchar(void) |
| 23 | { | 23 | { |
| 24 | unsigned long args[7]; | ||
| 24 | char inc; | 25 | char inc; |
| 25 | 26 | ||
| 26 | if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)| | 27 | args[0] = (unsigned long) "read"; |
| 27 | P1275_INOUT(3,1), | 28 | args[1] = 3; |
| 28 | prom_stdin, &inc, P1275_SIZE(1)) == 1) | 29 | args[2] = 1; |
| 30 | args[3] = (unsigned int) prom_stdin; | ||
| 31 | args[4] = (unsigned long) &inc; | ||
| 32 | args[5] = 1; | ||
| 33 | args[6] = (unsigned long) -1; | ||
| 34 | |||
| 35 | p1275_cmd_direct(args); | ||
| 36 | |||
| 37 | if (args[6] == 1) | ||
| 29 | return inc; | 38 | return inc; |
| 30 | else | 39 | return -1; |
| 31 | return -1; | ||
| 32 | } | 40 | } |
| 33 | 41 | ||
| 34 | /* Non blocking put character to console device, returns -1 if | 42 | /* Non blocking put character to console device, returns -1 if |
| @@ -37,12 +45,22 @@ prom_nbgetchar(void) | |||
| 37 | inline int | 45 | inline int |
| 38 | prom_nbputchar(char c) | 46 | prom_nbputchar(char c) |
| 39 | { | 47 | { |
| 48 | unsigned long args[7]; | ||
| 40 | char outc; | 49 | char outc; |
| 41 | 50 | ||
| 42 | outc = c; | 51 | outc = c; |
| 43 | if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| | 52 | |
| 44 | P1275_INOUT(3,1), | 53 | args[0] = (unsigned long) "write"; |
| 45 | prom_stdout, &outc, P1275_SIZE(1)) == 1) | 54 | args[1] = 3; |
| 55 | args[2] = 1; | ||
| 56 | args[3] = (unsigned int) prom_stdout; | ||
| 57 | args[4] = (unsigned long) &outc; | ||
| 58 | args[5] = 1; | ||
| 59 | args[6] = (unsigned long) -1; | ||
| 60 | |||
| 61 | p1275_cmd_direct(args); | ||
| 62 | |||
| 63 | if (args[6] == 1) | ||
| 46 | return 0; | 64 | return 0; |
| 47 | else | 65 | else |
| 48 | return -1; | 66 | return -1; |
| @@ -67,7 +85,15 @@ prom_putchar(char c) | |||
| 67 | void | 85 | void |
| 68 | prom_puts(const char *s, int len) | 86 | prom_puts(const char *s, int len) |
| 69 | { | 87 | { |
| 70 | p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| | 88 | unsigned long args[7]; |
| 71 | P1275_INOUT(3,1), | 89 | |
| 72 | prom_stdout, s, P1275_SIZE(len)); | 90 | args[0] = (unsigned long) "write"; |
| 91 | args[1] = 3; | ||
| 92 | args[2] = 1; | ||
| 93 | args[3] = (unsigned int) prom_stdout; | ||
| 94 | args[4] = (unsigned long) s; | ||
| 95 | args[5] = len; | ||
| 96 | args[6] = (unsigned long) -1; | ||
| 97 | |||
| 98 | p1275_cmd_direct(args); | ||
| 73 | } | 99 | } |
diff --git a/arch/sparc/prom/devops_64.c b/arch/sparc/prom/devops_64.c index 9dbd803e46e1..a017119e7ef1 100644 --- a/arch/sparc/prom/devops_64.c +++ b/arch/sparc/prom/devops_64.c | |||
| @@ -18,16 +18,32 @@ | |||
| 18 | int | 18 | int |
| 19 | prom_devopen(const char *dstr) | 19 | prom_devopen(const char *dstr) |
| 20 | { | 20 | { |
| 21 | return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| | 21 | unsigned long args[5]; |
| 22 | P1275_INOUT(1,1), | 22 | |
| 23 | dstr); | 23 | args[0] = (unsigned long) "open"; |
| 24 | args[1] = 1; | ||
| 25 | args[2] = 1; | ||
| 26 | args[3] = (unsigned long) dstr; | ||
| 27 | args[4] = (unsigned long) -1; | ||
| 28 | |||
| 29 | p1275_cmd_direct(args); | ||
| 30 | |||
| 31 | return (int) args[4]; | ||
| 24 | } | 32 | } |
| 25 | 33 | ||
| 26 | /* Close the device described by device handle 'dhandle'. */ | 34 | /* Close the device described by device handle 'dhandle'. */ |
| 27 | int | 35 | int |
| 28 | prom_devclose(int dhandle) | 36 | prom_devclose(int dhandle) |
| 29 | { | 37 | { |
| 30 | p1275_cmd ("close", P1275_INOUT(1,0), dhandle); | 38 | unsigned long args[4]; |
| 39 | |||
| 40 | args[0] = (unsigned long) "close"; | ||
| 41 | args[1] = 1; | ||
| 42 | args[2] = 0; | ||
| 43 | args[3] = (unsigned int) dhandle; | ||
| 44 | |||
| 45 | p1275_cmd_direct(args); | ||
| 46 | |||
| 31 | return 0; | 47 | return 0; |
| 32 | } | 48 | } |
| 33 | 49 | ||
| @@ -37,5 +53,15 @@ prom_devclose(int dhandle) | |||
| 37 | void | 53 | void |
| 38 | prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) | 54 | prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) |
| 39 | { | 55 | { |
| 40 | p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo); | 56 | unsigned long args[7]; |
| 57 | |||
| 58 | args[0] = (unsigned long) "seek"; | ||
| 59 | args[1] = 3; | ||
| 60 | args[2] = 1; | ||
| 61 | args[3] = (unsigned int) dhandle; | ||
| 62 | args[4] = seekhi; | ||
| 63 | args[5] = seeklo; | ||
| 64 | args[6] = (unsigned long) -1; | ||
| 65 | |||
| 66 | p1275_cmd_direct(args); | ||
| 41 | } | 67 | } |
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index 39fc6af21b7c..6cb1581d6aef 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c | |||
| @@ -20,10 +20,17 @@ | |||
| 20 | 20 | ||
| 21 | int prom_service_exists(const char *service_name) | 21 | int prom_service_exists(const char *service_name) |
| 22 | { | 22 | { |
| 23 | int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) | | 23 | unsigned long args[5]; |
| 24 | P1275_INOUT(1, 1), service_name); | ||
| 25 | 24 | ||
| 26 | if (err) | 25 | args[0] = (unsigned long) "test"; |
| 26 | args[1] = 1; | ||
| 27 | args[2] = 1; | ||
| 28 | args[3] = (unsigned long) service_name; | ||
| 29 | args[4] = (unsigned long) -1; | ||
| 30 | |||
| 31 | p1275_cmd_direct(args); | ||
| 32 | |||
| 33 | if (args[4]) | ||
| 27 | return 0; | 34 | return 0; |
| 28 | return 1; | 35 | return 1; |
| 29 | } | 36 | } |
| @@ -31,30 +38,47 @@ int prom_service_exists(const char *service_name) | |||
| 31 | void prom_sun4v_guest_soft_state(void) | 38 | void prom_sun4v_guest_soft_state(void) |
| 32 | { | 39 | { |
| 33 | const char *svc = "SUNW,soft-state-supported"; | 40 | const char *svc = "SUNW,soft-state-supported"; |
| 41 | unsigned long args[3]; | ||
| 34 | 42 | ||
| 35 | if (!prom_service_exists(svc)) | 43 | if (!prom_service_exists(svc)) |
| 36 | return; | 44 | return; |
| 37 | p1275_cmd(svc, P1275_INOUT(0, 0)); | 45 | args[0] = (unsigned long) svc; |
| 46 | args[1] = 0; | ||
| 47 | args[2] = 0; | ||
| 48 | p1275_cmd_direct(args); | ||
| 38 | } | 49 | } |
| 39 | 50 | ||
| 40 | /* Reset and reboot the machine with the command 'bcommand'. */ | 51 | /* Reset and reboot the machine with the command 'bcommand'. */ |
| 41 | void prom_reboot(const char *bcommand) | 52 | void prom_reboot(const char *bcommand) |
| 42 | { | 53 | { |
| 54 | unsigned long args[4]; | ||
| 55 | |||
| 43 | #ifdef CONFIG_SUN_LDOMS | 56 | #ifdef CONFIG_SUN_LDOMS |
| 44 | if (ldom_domaining_enabled) | 57 | if (ldom_domaining_enabled) |
| 45 | ldom_reboot(bcommand); | 58 | ldom_reboot(bcommand); |
| 46 | #endif | 59 | #endif |
| 47 | p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | | 60 | args[0] = (unsigned long) "boot"; |
| 48 | P1275_INOUT(1, 0), bcommand); | 61 | args[1] = 1; |
| 62 | args[2] = 0; | ||
| 63 | args[3] = (unsigned long) bcommand; | ||
| 64 | |||
| 65 | p1275_cmd_direct(args); | ||
| 49 | } | 66 | } |
| 50 | 67 | ||
| 51 | /* Forth evaluate the expression contained in 'fstring'. */ | 68 | /* Forth evaluate the expression contained in 'fstring'. */ |
| 52 | void prom_feval(const char *fstring) | 69 | void prom_feval(const char *fstring) |
| 53 | { | 70 | { |
| 71 | unsigned long args[5]; | ||
| 72 | |||
| 54 | if (!fstring || fstring[0] == 0) | 73 | if (!fstring || fstring[0] == 0) |
| 55 | return; | 74 | return; |
| 56 | p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) | | 75 | args[0] = (unsigned long) "interpret"; |
| 57 | P1275_INOUT(1, 1), fstring); | 76 | args[1] = 1; |
| 77 | args[2] = 1; | ||
| 78 | args[3] = (unsigned long) fstring; | ||
| 79 | args[4] = (unsigned long) -1; | ||
| 80 | |||
| 81 | p1275_cmd_direct(args); | ||
| 58 | } | 82 | } |
| 59 | EXPORT_SYMBOL(prom_feval); | 83 | EXPORT_SYMBOL(prom_feval); |
| 60 | 84 | ||
| @@ -68,6 +92,7 @@ extern void smp_release(void); | |||
| 68 | */ | 92 | */ |
| 69 | void prom_cmdline(void) | 93 | void prom_cmdline(void) |
| 70 | { | 94 | { |
| 95 | unsigned long args[3]; | ||
| 71 | unsigned long flags; | 96 | unsigned long flags; |
| 72 | 97 | ||
| 73 | local_irq_save(flags); | 98 | local_irq_save(flags); |
| @@ -76,7 +101,11 @@ void prom_cmdline(void) | |||
| 76 | smp_capture(); | 101 | smp_capture(); |
| 77 | #endif | 102 | #endif |
| 78 | 103 | ||
| 79 | p1275_cmd("enter", P1275_INOUT(0, 0)); | 104 | args[0] = (unsigned long) "enter"; |
| 105 | args[1] = 0; | ||
| 106 | args[2] = 0; | ||
| 107 | |||
| 108 | p1275_cmd_direct(args); | ||
| 80 | 109 | ||
| 81 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
| 82 | smp_release(); | 111 | smp_release(); |
| @@ -90,22 +119,32 @@ void prom_cmdline(void) | |||
| 90 | */ | 119 | */ |
| 91 | void notrace prom_halt(void) | 120 | void notrace prom_halt(void) |
| 92 | { | 121 | { |
| 122 | unsigned long args[3]; | ||
| 123 | |||
| 93 | #ifdef CONFIG_SUN_LDOMS | 124 | #ifdef CONFIG_SUN_LDOMS |
| 94 | if (ldom_domaining_enabled) | 125 | if (ldom_domaining_enabled) |
| 95 | ldom_power_off(); | 126 | ldom_power_off(); |
| 96 | #endif | 127 | #endif |
| 97 | again: | 128 | again: |
| 98 | p1275_cmd("exit", P1275_INOUT(0, 0)); | 129 | args[0] = (unsigned long) "exit"; |
| 130 | args[1] = 0; | ||
| 131 | args[2] = 0; | ||
| 132 | p1275_cmd_direct(args); | ||
| 99 | goto again; /* PROM is out to get me -DaveM */ | 133 | goto again; /* PROM is out to get me -DaveM */ |
| 100 | } | 134 | } |
| 101 | 135 | ||
| 102 | void prom_halt_power_off(void) | 136 | void prom_halt_power_off(void) |
| 103 | { | 137 | { |
| 138 | unsigned long args[3]; | ||
| 139 | |||
| 104 | #ifdef CONFIG_SUN_LDOMS | 140 | #ifdef CONFIG_SUN_LDOMS |
| 105 | if (ldom_domaining_enabled) | 141 | if (ldom_domaining_enabled) |
| 106 | ldom_power_off(); | 142 | ldom_power_off(); |
| 107 | #endif | 143 | #endif |
| 108 | p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); | 144 | args[0] = (unsigned long) "SUNW,power-off"; |
| 145 | args[1] = 0; | ||
| 146 | args[2] = 0; | ||
| 147 | p1275_cmd_direct(args); | ||
| 109 | 148 | ||
| 110 | /* if nothing else helps, we just halt */ | 149 | /* if nothing else helps, we just halt */ |
| 111 | prom_halt(); | 150 | prom_halt(); |
| @@ -114,10 +153,15 @@ void prom_halt_power_off(void) | |||
| 114 | /* Set prom sync handler to call function 'funcp'. */ | 153 | /* Set prom sync handler to call function 'funcp'. */ |
| 115 | void prom_setcallback(callback_func_t funcp) | 154 | void prom_setcallback(callback_func_t funcp) |
| 116 | { | 155 | { |
| 156 | unsigned long args[5]; | ||
| 117 | if (!funcp) | 157 | if (!funcp) |
| 118 | return; | 158 | return; |
| 119 | p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) | | 159 | args[0] = (unsigned long) "set-callback"; |
| 120 | P1275_INOUT(1, 1), funcp); | 160 | args[1] = 1; |
| 161 | args[2] = 1; | ||
| 162 | args[3] = (unsigned long) funcp; | ||
| 163 | args[4] = (unsigned long) -1; | ||
| 164 | p1275_cmd_direct(args); | ||
| 121 | } | 165 | } |
| 122 | 166 | ||
| 123 | /* Get the idprom and stuff it into buffer 'idbuf'. Returns the | 167 | /* Get the idprom and stuff it into buffer 'idbuf'. Returns the |
| @@ -173,57 +217,61 @@ static int prom_get_memory_ihandle(void) | |||
| 173 | } | 217 | } |
| 174 | 218 | ||
| 175 | /* Load explicit I/D TLB entries. */ | 219 | /* Load explicit I/D TLB entries. */ |
| 220 | static long tlb_load(const char *type, unsigned long index, | ||
| 221 | unsigned long tte_data, unsigned long vaddr) | ||
| 222 | { | ||
| 223 | unsigned long args[9]; | ||
| 224 | |||
| 225 | args[0] = (unsigned long) prom_callmethod_name; | ||
| 226 | args[1] = 5; | ||
| 227 | args[2] = 1; | ||
| 228 | args[3] = (unsigned long) type; | ||
| 229 | args[4] = (unsigned int) prom_get_mmu_ihandle(); | ||
| 230 | args[5] = vaddr; | ||
| 231 | args[6] = tte_data; | ||
| 232 | args[7] = index; | ||
| 233 | args[8] = (unsigned long) -1; | ||
| 234 | |||
| 235 | p1275_cmd_direct(args); | ||
| 236 | |||
| 237 | return (long) args[8]; | ||
| 238 | } | ||
| 239 | |||
| 176 | long prom_itlb_load(unsigned long index, | 240 | long prom_itlb_load(unsigned long index, |
| 177 | unsigned long tte_data, | 241 | unsigned long tte_data, |
| 178 | unsigned long vaddr) | 242 | unsigned long vaddr) |
| 179 | { | 243 | { |
| 180 | return p1275_cmd(prom_callmethod_name, | 244 | return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); |
| 181 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | ||
| 182 | P1275_ARG(2, P1275_ARG_IN_64B) | | ||
| 183 | P1275_ARG(3, P1275_ARG_IN_64B) | | ||
| 184 | P1275_INOUT(5, 1)), | ||
| 185 | "SUNW,itlb-load", | ||
| 186 | prom_get_mmu_ihandle(), | ||
| 187 | /* And then our actual args are pushed backwards. */ | ||
| 188 | vaddr, | ||
| 189 | tte_data, | ||
| 190 | index); | ||
| 191 | } | 245 | } |
| 192 | 246 | ||
| 193 | long prom_dtlb_load(unsigned long index, | 247 | long prom_dtlb_load(unsigned long index, |
| 194 | unsigned long tte_data, | 248 | unsigned long tte_data, |
| 195 | unsigned long vaddr) | 249 | unsigned long vaddr) |
| 196 | { | 250 | { |
| 197 | return p1275_cmd(prom_callmethod_name, | 251 | return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); |
| 198 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | ||
| 199 | P1275_ARG(2, P1275_ARG_IN_64B) | | ||
| 200 | P1275_ARG(3, P1275_ARG_IN_64B) | | ||
| 201 | P1275_INOUT(5, 1)), | ||
| 202 | "SUNW,dtlb-load", | ||
| 203 | prom_get_mmu_ihandle(), | ||
| 204 | /* And then our actual args are pushed backwards. */ | ||
| 205 | vaddr, | ||
| 206 | tte_data, | ||
| 207 | index); | ||
| 208 | } | 252 | } |
| 209 | 253 | ||
| 210 | int prom_map(int mode, unsigned long size, | 254 | int prom_map(int mode, unsigned long size, |
| 211 | unsigned long vaddr, unsigned long paddr) | 255 | unsigned long vaddr, unsigned long paddr) |
| 212 | { | 256 | { |
| 213 | int ret = p1275_cmd(prom_callmethod_name, | 257 | unsigned long args[11]; |
| 214 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 258 | int ret; |
| 215 | P1275_ARG(3, P1275_ARG_IN_64B) | | 259 | |
| 216 | P1275_ARG(4, P1275_ARG_IN_64B) | | 260 | args[0] = (unsigned long) prom_callmethod_name; |
| 217 | P1275_ARG(6, P1275_ARG_IN_64B) | | 261 | args[1] = 7; |
| 218 | P1275_INOUT(7, 1)), | 262 | args[2] = 1; |
| 219 | prom_map_name, | 263 | args[3] = (unsigned long) prom_map_name; |
| 220 | prom_get_mmu_ihandle(), | 264 | args[4] = (unsigned int) prom_get_mmu_ihandle(); |
| 221 | mode, | 265 | args[5] = (unsigned int) mode; |
| 222 | size, | 266 | args[6] = size; |
| 223 | vaddr, | 267 | args[7] = vaddr; |
| 224 | 0, | 268 | args[8] = 0; |
| 225 | paddr); | 269 | args[9] = paddr; |
| 226 | 270 | args[10] = (unsigned long) -1; | |
| 271 | |||
| 272 | p1275_cmd_direct(args); | ||
| 273 | |||
| 274 | ret = (int) args[10]; | ||
| 227 | if (ret == 0) | 275 | if (ret == 0) |
| 228 | ret = -1; | 276 | ret = -1; |
| 229 | return ret; | 277 | return ret; |
| @@ -231,40 +279,51 @@ int prom_map(int mode, unsigned long size, | |||
| 231 | 279 | ||
| 232 | void prom_unmap(unsigned long size, unsigned long vaddr) | 280 | void prom_unmap(unsigned long size, unsigned long vaddr) |
| 233 | { | 281 | { |
| 234 | p1275_cmd(prom_callmethod_name, | 282 | unsigned long args[7]; |
| 235 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 283 | |
| 236 | P1275_ARG(2, P1275_ARG_IN_64B) | | 284 | args[0] = (unsigned long) prom_callmethod_name; |
| 237 | P1275_ARG(3, P1275_ARG_IN_64B) | | 285 | args[1] = 4; |
| 238 | P1275_INOUT(4, 0)), | 286 | args[2] = 0; |
| 239 | prom_unmap_name, | 287 | args[3] = (unsigned long) prom_unmap_name; |
| 240 | prom_get_mmu_ihandle(), | 288 | args[4] = (unsigned int) prom_get_mmu_ihandle(); |
| 241 | size, | 289 | args[5] = size; |
| 242 | vaddr); | 290 | args[6] = vaddr; |
| 291 | |||
| 292 | p1275_cmd_direct(args); | ||
| 243 | } | 293 | } |
| 244 | 294 | ||
| 245 | /* Set aside physical memory which is not touched or modified | 295 | /* Set aside physical memory which is not touched or modified |
| 246 | * across soft resets. | 296 | * across soft resets. |
| 247 | */ | 297 | */ |
| 248 | unsigned long prom_retain(const char *name, | 298 | int prom_retain(const char *name, unsigned long size, |
| 249 | unsigned long pa_low, unsigned long pa_high, | 299 | unsigned long align, unsigned long *paddr) |
| 250 | long size, long align) | ||
| 251 | { | 300 | { |
| 252 | /* XXX I don't think we return multiple values correctly. | 301 | unsigned long args[11]; |
| 253 | * XXX OBP supposedly returns pa_low/pa_high here, how does | 302 | |
| 254 | * XXX it work? | 303 | args[0] = (unsigned long) prom_callmethod_name; |
| 304 | args[1] = 5; | ||
| 305 | args[2] = 3; | ||
| 306 | args[3] = (unsigned long) "SUNW,retain"; | ||
| 307 | args[4] = (unsigned int) prom_get_memory_ihandle(); | ||
| 308 | args[5] = align; | ||
| 309 | args[6] = size; | ||
| 310 | args[7] = (unsigned long) name; | ||
| 311 | args[8] = (unsigned long) -1; | ||
| 312 | args[9] = (unsigned long) -1; | ||
| 313 | args[10] = (unsigned long) -1; | ||
| 314 | |||
| 315 | p1275_cmd_direct(args); | ||
| 316 | |||
| 317 | if (args[8]) | ||
| 318 | return (int) args[8]; | ||
| 319 | |||
| 320 | /* Next we get "phys_high" then "phys_low". On 64-bit | ||
| 321 | * the phys_high cell is don't care since the phys_low | ||
| 322 | * cell has the full value. | ||
| 255 | */ | 323 | */ |
| 324 | *paddr = args[10]; | ||
| 256 | 325 | ||
| 257 | /* If align is zero, the pa_low/pa_high args are passed, | 326 | return 0; |
| 258 | * else they are not. | ||
| 259 | */ | ||
| 260 | if (align == 0) | ||
| 261 | return p1275_cmd("SUNW,retain", | ||
| 262 | (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)), | ||
| 263 | name, pa_low, pa_high, size, align); | ||
| 264 | else | ||
| 265 | return p1275_cmd("SUNW,retain", | ||
| 266 | (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)), | ||
| 267 | name, size, align); | ||
| 268 | } | 327 | } |
| 269 | 328 | ||
| 270 | /* Get "Unumber" string for the SIMM at the given | 329 | /* Get "Unumber" string for the SIMM at the given |
| @@ -277,62 +336,129 @@ int prom_getunumber(int syndrome_code, | |||
| 277 | unsigned long phys_addr, | 336 | unsigned long phys_addr, |
| 278 | char *buf, int buflen) | 337 | char *buf, int buflen) |
| 279 | { | 338 | { |
| 280 | return p1275_cmd(prom_callmethod_name, | 339 | unsigned long args[12]; |
| 281 | (P1275_ARG(0, P1275_ARG_IN_STRING) | | 340 | |
| 282 | P1275_ARG(3, P1275_ARG_OUT_BUF) | | 341 | args[0] = (unsigned long) prom_callmethod_name; |
| 283 | P1275_ARG(6, P1275_ARG_IN_64B) | | 342 | args[1] = 7; |
| 284 | P1275_INOUT(8, 2)), | 343 | args[2] = 2; |
| 285 | "SUNW,get-unumber", prom_get_memory_ihandle(), | 344 | args[3] = (unsigned long) "SUNW,get-unumber"; |
| 286 | buflen, buf, P1275_SIZE(buflen), | 345 | args[4] = (unsigned int) prom_get_memory_ihandle(); |
| 287 | 0, phys_addr, syndrome_code); | 346 | args[5] = buflen; |
| 347 | args[6] = (unsigned long) buf; | ||
| 348 | args[7] = 0; | ||
| 349 | args[8] = phys_addr; | ||
| 350 | args[9] = (unsigned int) syndrome_code; | ||
| 351 | args[10] = (unsigned long) -1; | ||
| 352 | args[11] = (unsigned long) -1; | ||
| 353 | |||
| 354 | p1275_cmd_direct(args); | ||
| 355 | |||
| 356 | return (int) args[10]; | ||
| 288 | } | 357 | } |
| 289 | 358 | ||
| 290 | /* Power management extensions. */ | 359 | /* Power management extensions. */ |
| 291 | void prom_sleepself(void) | 360 | void prom_sleepself(void) |
| 292 | { | 361 | { |
| 293 | p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0)); | 362 | unsigned long args[3]; |
| 363 | |||
| 364 | args[0] = (unsigned long) "SUNW,sleep-self"; | ||
| 365 | args[1] = 0; | ||
| 366 | args[2] = 0; | ||
| 367 | p1275_cmd_direct(args); | ||
| 294 | } | 368 | } |
| 295 | 369 | ||
| 296 | int prom_sleepsystem(void) | 370 | int prom_sleepsystem(void) |
| 297 | { | 371 | { |
| 298 | return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1)); | 372 | unsigned long args[4]; |
| 373 | |||
| 374 | args[0] = (unsigned long) "SUNW,sleep-system"; | ||
| 375 | args[1] = 0; | ||
| 376 | args[2] = 1; | ||
| 377 | args[3] = (unsigned long) -1; | ||
| 378 | p1275_cmd_direct(args); | ||
| 379 | |||
| 380 | return (int) args[3]; | ||
| 299 | } | 381 | } |
| 300 | 382 | ||
| 301 | int prom_wakeupsystem(void) | 383 | int prom_wakeupsystem(void) |
| 302 | { | 384 | { |
| 303 | return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1)); | 385 | unsigned long args[4]; |
| 386 | |||
| 387 | args[0] = (unsigned long) "SUNW,wakeup-system"; | ||
| 388 | args[1] = 0; | ||
| 389 | args[2] = 1; | ||
| 390 | args[3] = (unsigned long) -1; | ||
| 391 | p1275_cmd_direct(args); | ||
| 392 | |||
| 393 | return (int) args[3]; | ||
| 304 | } | 394 | } |
| 305 | 395 | ||
| 306 | #ifdef CONFIG_SMP | 396 | #ifdef CONFIG_SMP |
| 307 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) | 397 | void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) |
| 308 | { | 398 | { |
| 309 | p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg); | 399 | unsigned long args[6]; |
| 400 | |||
| 401 | args[0] = (unsigned long) "SUNW,start-cpu"; | ||
| 402 | args[1] = 3; | ||
| 403 | args[2] = 0; | ||
| 404 | args[3] = (unsigned int) cpunode; | ||
| 405 | args[4] = pc; | ||
| 406 | args[5] = arg; | ||
| 407 | p1275_cmd_direct(args); | ||
| 310 | } | 408 | } |
| 311 | 409 | ||
| 312 | void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) | 410 | void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) |
| 313 | { | 411 | { |
| 314 | p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0), | 412 | unsigned long args[6]; |
| 315 | cpuid, pc, arg); | 413 | |
| 414 | args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid"; | ||
| 415 | args[1] = 3; | ||
| 416 | args[2] = 0; | ||
| 417 | args[3] = (unsigned int) cpuid; | ||
| 418 | args[4] = pc; | ||
| 419 | args[5] = arg; | ||
| 420 | p1275_cmd_direct(args); | ||
| 316 | } | 421 | } |
| 317 | 422 | ||
| 318 | void prom_stopcpu_cpuid(int cpuid) | 423 | void prom_stopcpu_cpuid(int cpuid) |
| 319 | { | 424 | { |
| 320 | p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0), | 425 | unsigned long args[4]; |
| 321 | cpuid); | 426 | |
| 427 | args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid"; | ||
| 428 | args[1] = 1; | ||
| 429 | args[2] = 0; | ||
| 430 | args[3] = (unsigned int) cpuid; | ||
| 431 | p1275_cmd_direct(args); | ||
| 322 | } | 432 | } |
| 323 | 433 | ||
| 324 | void prom_stopself(void) | 434 | void prom_stopself(void) |
| 325 | { | 435 | { |
| 326 | p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0)); | 436 | unsigned long args[3]; |
| 437 | |||
| 438 | args[0] = (unsigned long) "SUNW,stop-self"; | ||
| 439 | args[1] = 0; | ||
| 440 | args[2] = 0; | ||
| 441 | p1275_cmd_direct(args); | ||
| 327 | } | 442 | } |
| 328 | 443 | ||
| 329 | void prom_idleself(void) | 444 | void prom_idleself(void) |
| 330 | { | 445 | { |
| 331 | p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0)); | 446 | unsigned long args[3]; |
| 447 | |||
| 448 | args[0] = (unsigned long) "SUNW,idle-self"; | ||
| 449 | args[1] = 0; | ||
| 450 | args[2] = 0; | ||
| 451 | p1275_cmd_direct(args); | ||
| 332 | } | 452 | } |
| 333 | 453 | ||
| 334 | void prom_resumecpu(int cpunode) | 454 | void prom_resumecpu(int cpunode) |
| 335 | { | 455 | { |
| 336 | p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode); | 456 | unsigned long args[4]; |
| 457 | |||
| 458 | args[0] = (unsigned long) "SUNW,resume-cpu"; | ||
| 459 | args[1] = 1; | ||
| 460 | args[2] = 0; | ||
| 461 | args[3] = (unsigned int) cpunode; | ||
| 462 | p1275_cmd_direct(args); | ||
| 337 | } | 463 | } |
| 338 | #endif | 464 | #endif |
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index 2d8b70d397f1..fa6e4e219b9c 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c | |||
| @@ -22,13 +22,11 @@ struct { | |||
| 22 | long prom_callback; /* 0x00 */ | 22 | long prom_callback; /* 0x00 */ |
| 23 | void (*prom_cif_handler)(long *); /* 0x08 */ | 23 | void (*prom_cif_handler)(long *); /* 0x08 */ |
| 24 | unsigned long prom_cif_stack; /* 0x10 */ | 24 | unsigned long prom_cif_stack; /* 0x10 */ |
| 25 | unsigned long prom_args [23]; /* 0x18 */ | ||
| 26 | char prom_buffer [3000]; | ||
| 27 | } p1275buf; | 25 | } p1275buf; |
| 28 | 26 | ||
| 29 | extern void prom_world(int); | 27 | extern void prom_world(int); |
| 30 | 28 | ||
| 31 | extern void prom_cif_interface(void); | 29 | extern void prom_cif_direct(unsigned long *args); |
| 32 | extern void prom_cif_callback(void); | 30 | extern void prom_cif_callback(void); |
| 33 | 31 | ||
| 34 | /* | 32 | /* |
| @@ -36,114 +34,20 @@ extern void prom_cif_callback(void); | |||
| 36 | */ | 34 | */ |
| 37 | DEFINE_RAW_SPINLOCK(prom_entry_lock); | 35 | DEFINE_RAW_SPINLOCK(prom_entry_lock); |
| 38 | 36 | ||
| 39 | long p1275_cmd(const char *service, long fmt, ...) | 37 | void p1275_cmd_direct(unsigned long *args) |
| 40 | { | 38 | { |
| 41 | char *p, *q; | ||
| 42 | unsigned long flags; | 39 | unsigned long flags; |
| 43 | int nargs, nrets, i; | ||
| 44 | va_list list; | ||
| 45 | long attrs, x; | ||
| 46 | |||
| 47 | p = p1275buf.prom_buffer; | ||
| 48 | 40 | ||
| 49 | raw_local_save_flags(flags); | 41 | raw_local_save_flags(flags); |
| 50 | raw_local_irq_restore(PIL_NMI); | 42 | raw_local_irq_restore(PIL_NMI); |
| 51 | raw_spin_lock(&prom_entry_lock); | 43 | raw_spin_lock(&prom_entry_lock); |
| 52 | 44 | ||
| 53 | p1275buf.prom_args[0] = (unsigned long)p; /* service */ | ||
| 54 | strcpy (p, service); | ||
| 55 | p = (char *)(((long)(strchr (p, 0) + 8)) & ~7); | ||
| 56 | p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */ | ||
| 57 | p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */ | ||
| 58 | attrs = fmt >> 8; | ||
| 59 | va_start(list, fmt); | ||
| 60 | for (i = 0; i < nargs; i++, attrs >>= 3) { | ||
| 61 | switch (attrs & 0x7) { | ||
| 62 | case P1275_ARG_NUMBER: | ||
| 63 | p1275buf.prom_args[i + 3] = | ||
| 64 | (unsigned)va_arg(list, long); | ||
| 65 | break; | ||
| 66 | case P1275_ARG_IN_64B: | ||
| 67 | p1275buf.prom_args[i + 3] = | ||
| 68 | va_arg(list, unsigned long); | ||
| 69 | break; | ||
| 70 | case P1275_ARG_IN_STRING: | ||
| 71 | strcpy (p, va_arg(list, char *)); | ||
| 72 | p1275buf.prom_args[i + 3] = (unsigned long)p; | ||
| 73 | p = (char *)(((long)(strchr (p, 0) + 8)) & ~7); | ||
| 74 | break; | ||
| 75 | case P1275_ARG_OUT_BUF: | ||
| 76 | (void) va_arg(list, char *); | ||
| 77 | p1275buf.prom_args[i + 3] = (unsigned long)p; | ||
| 78 | x = va_arg(list, long); | ||
| 79 | i++; attrs >>= 3; | ||
| 80 | p = (char *)(((long)(p + (int)x + 7)) & ~7); | ||
| 81 | p1275buf.prom_args[i + 3] = x; | ||
| 82 | break; | ||
| 83 | case P1275_ARG_IN_BUF: | ||
| 84 | q = va_arg(list, char *); | ||
| 85 | p1275buf.prom_args[i + 3] = (unsigned long)p; | ||
| 86 | x = va_arg(list, long); | ||
| 87 | i++; attrs >>= 3; | ||
| 88 | memcpy (p, q, (int)x); | ||
| 89 | p = (char *)(((long)(p + (int)x + 7)) & ~7); | ||
| 90 | p1275buf.prom_args[i + 3] = x; | ||
| 91 | break; | ||
| 92 | case P1275_ARG_OUT_32B: | ||
| 93 | (void) va_arg(list, char *); | ||
| 94 | p1275buf.prom_args[i + 3] = (unsigned long)p; | ||
| 95 | p += 32; | ||
| 96 | break; | ||
| 97 | case P1275_ARG_IN_FUNCTION: | ||
| 98 | p1275buf.prom_args[i + 3] = | ||
| 99 | (unsigned long)prom_cif_callback; | ||
| 100 | p1275buf.prom_callback = va_arg(list, long); | ||
| 101 | break; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | va_end(list); | ||
| 105 | |||
| 106 | prom_world(1); | 45 | prom_world(1); |
| 107 | prom_cif_interface(); | 46 | prom_cif_direct(args); |
| 108 | prom_world(0); | 47 | prom_world(0); |
| 109 | 48 | ||
| 110 | attrs = fmt >> 8; | ||
| 111 | va_start(list, fmt); | ||
| 112 | for (i = 0; i < nargs; i++, attrs >>= 3) { | ||
| 113 | switch (attrs & 0x7) { | ||
| 114 | case P1275_ARG_NUMBER: | ||
| 115 | (void) va_arg(list, long); | ||
| 116 | break; | ||
| 117 | case P1275_ARG_IN_STRING: | ||
| 118 | (void) va_arg(list, char *); | ||
| 119 | break; | ||
| 120 | case P1275_ARG_IN_FUNCTION: | ||
| 121 | (void) va_arg(list, long); | ||
| 122 | break; | ||
| 123 | case P1275_ARG_IN_BUF: | ||
| 124 | (void) va_arg(list, char *); | ||
| 125 | (void) va_arg(list, long); | ||
| 126 | i++; attrs >>= 3; | ||
| 127 | break; | ||
| 128 | case P1275_ARG_OUT_BUF: | ||
| 129 | p = va_arg(list, char *); | ||
| 130 | x = va_arg(list, long); | ||
| 131 | memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x); | ||
| 132 | i++; attrs >>= 3; | ||
| 133 | break; | ||
| 134 | case P1275_ARG_OUT_32B: | ||
| 135 | p = va_arg(list, char *); | ||
| 136 | memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32); | ||
| 137 | break; | ||
| 138 | } | ||
| 139 | } | ||
| 140 | va_end(list); | ||
| 141 | x = p1275buf.prom_args [nargs + 3]; | ||
| 142 | |||
| 143 | raw_spin_unlock(&prom_entry_lock); | 49 | raw_spin_unlock(&prom_entry_lock); |
| 144 | raw_local_irq_restore(flags); | 50 | raw_local_irq_restore(flags); |
| 145 | |||
| 146 | return x; | ||
| 147 | } | 51 | } |
| 148 | 52 | ||
| 149 | void prom_cif_init(void *cif_handler, void *cif_stack) | 53 | void prom_cif_init(void *cif_handler, void *cif_stack) |
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 3c0d2dd9f693..9d3f9137a43a 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c | |||
| @@ -16,22 +16,39 @@ | |||
| 16 | #include <asm/oplib.h> | 16 | #include <asm/oplib.h> |
| 17 | #include <asm/ldc.h> | 17 | #include <asm/ldc.h> |
| 18 | 18 | ||
| 19 | static int prom_node_to_node(const char *type, int node) | ||
| 20 | { | ||
| 21 | unsigned long args[5]; | ||
| 22 | |||
| 23 | args[0] = (unsigned long) type; | ||
| 24 | args[1] = 1; | ||
| 25 | args[2] = 1; | ||
| 26 | args[3] = (unsigned int) node; | ||
| 27 | args[4] = (unsigned long) -1; | ||
| 28 | |||
| 29 | p1275_cmd_direct(args); | ||
| 30 | |||
| 31 | return (int) args[4]; | ||
| 32 | } | ||
| 33 | |||
| 19 | /* Return the child of node 'node' or zero if no this node has no | 34 | /* Return the child of node 'node' or zero if no this node has no |
| 20 | * direct descendent. | 35 | * direct descendent. |
| 21 | */ | 36 | */ |
| 22 | inline int __prom_getchild(int node) | 37 | inline int __prom_getchild(int node) |
| 23 | { | 38 | { |
| 24 | return p1275_cmd ("child", P1275_INOUT(1, 1), node); | 39 | return prom_node_to_node("child", node); |
| 25 | } | 40 | } |
| 26 | 41 | ||
| 27 | inline int prom_getchild(int node) | 42 | inline int prom_getchild(int node) |
| 28 | { | 43 | { |
| 29 | int cnode; | 44 | int cnode; |
| 30 | 45 | ||
| 31 | if(node == -1) return 0; | 46 | if (node == -1) |
| 47 | return 0; | ||
| 32 | cnode = __prom_getchild(node); | 48 | cnode = __prom_getchild(node); |
| 33 | if(cnode == -1) return 0; | 49 | if (cnode == -1) |
| 34 | return (int)cnode; | 50 | return 0; |
| 51 | return cnode; | ||
| 35 | } | 52 | } |
| 36 | EXPORT_SYMBOL(prom_getchild); | 53 | EXPORT_SYMBOL(prom_getchild); |
| 37 | 54 | ||
| @@ -39,10 +56,12 @@ inline int prom_getparent(int node) | |||
| 39 | { | 56 | { |
| 40 | int cnode; | 57 | int cnode; |
| 41 | 58 | ||
| 42 | if(node == -1) return 0; | 59 | if (node == -1) |
| 43 | cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node); | 60 | return 0; |
| 44 | if(cnode == -1) return 0; | 61 | cnode = prom_node_to_node("parent", node); |
| 45 | return (int)cnode; | 62 | if (cnode == -1) |
| 63 | return 0; | ||
| 64 | return cnode; | ||
| 46 | } | 65 | } |
| 47 | 66 | ||
| 48 | /* Return the next sibling of node 'node' or zero if no more siblings | 67 | /* Return the next sibling of node 'node' or zero if no more siblings |
| @@ -50,7 +69,7 @@ inline int prom_getparent(int node) | |||
| 50 | */ | 69 | */ |
| 51 | inline int __prom_getsibling(int node) | 70 | inline int __prom_getsibling(int node) |
| 52 | { | 71 | { |
| 53 | return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); | 72 | return prom_node_to_node(prom_peer_name, node); |
| 54 | } | 73 | } |
| 55 | 74 | ||
| 56 | inline int prom_getsibling(int node) | 75 | inline int prom_getsibling(int node) |
| @@ -72,11 +91,21 @@ EXPORT_SYMBOL(prom_getsibling); | |||
| 72 | */ | 91 | */ |
| 73 | inline int prom_getproplen(int node, const char *prop) | 92 | inline int prom_getproplen(int node, const char *prop) |
| 74 | { | 93 | { |
| 75 | if((!node) || (!prop)) return -1; | 94 | unsigned long args[6]; |
| 76 | return p1275_cmd ("getproplen", | 95 | |
| 77 | P1275_ARG(1,P1275_ARG_IN_STRING)| | 96 | if (!node || !prop) |
| 78 | P1275_INOUT(2, 1), | 97 | return -1; |
| 79 | node, prop); | 98 | |
| 99 | args[0] = (unsigned long) "getproplen"; | ||
| 100 | args[1] = 2; | ||
| 101 | args[2] = 1; | ||
| 102 | args[3] = (unsigned int) node; | ||
| 103 | args[4] = (unsigned long) prop; | ||
| 104 | args[5] = (unsigned long) -1; | ||
| 105 | |||
| 106 | p1275_cmd_direct(args); | ||
| 107 | |||
| 108 | return (int) args[5]; | ||
| 80 | } | 109 | } |
| 81 | EXPORT_SYMBOL(prom_getproplen); | 110 | EXPORT_SYMBOL(prom_getproplen); |
| 82 | 111 | ||
| @@ -87,19 +116,25 @@ EXPORT_SYMBOL(prom_getproplen); | |||
| 87 | inline int prom_getproperty(int node, const char *prop, | 116 | inline int prom_getproperty(int node, const char *prop, |
| 88 | char *buffer, int bufsize) | 117 | char *buffer, int bufsize) |
| 89 | { | 118 | { |
| 119 | unsigned long args[8]; | ||
| 90 | int plen; | 120 | int plen; |
| 91 | 121 | ||
| 92 | plen = prom_getproplen(node, prop); | 122 | plen = prom_getproplen(node, prop); |
| 93 | if ((plen > bufsize) || (plen == 0) || (plen == -1)) { | 123 | if ((plen > bufsize) || (plen == 0) || (plen == -1)) |
| 94 | return -1; | 124 | return -1; |
| 95 | } else { | 125 | |
| 96 | /* Ok, things seem all right. */ | 126 | args[0] = (unsigned long) prom_getprop_name; |
| 97 | return p1275_cmd(prom_getprop_name, | 127 | args[1] = 4; |
| 98 | P1275_ARG(1,P1275_ARG_IN_STRING)| | 128 | args[2] = 1; |
| 99 | P1275_ARG(2,P1275_ARG_OUT_BUF)| | 129 | args[3] = (unsigned int) node; |
| 100 | P1275_INOUT(4, 1), | 130 | args[4] = (unsigned long) prop; |
| 101 | node, prop, buffer, P1275_SIZE(plen)); | 131 | args[5] = (unsigned long) buffer; |
| 102 | } | 132 | args[6] = bufsize; |
| 133 | args[7] = (unsigned long) -1; | ||
| 134 | |||
| 135 | p1275_cmd_direct(args); | ||
| 136 | |||
| 137 | return (int) args[7]; | ||
| 103 | } | 138 | } |
| 104 | EXPORT_SYMBOL(prom_getproperty); | 139 | EXPORT_SYMBOL(prom_getproperty); |
| 105 | 140 | ||
| @@ -110,7 +145,7 @@ inline int prom_getint(int node, const char *prop) | |||
| 110 | { | 145 | { |
| 111 | int intprop; | 146 | int intprop; |
| 112 | 147 | ||
| 113 | if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) | 148 | if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) |
| 114 | return intprop; | 149 | return intprop; |
| 115 | 150 | ||
| 116 | return -1; | 151 | return -1; |
| @@ -126,7 +161,8 @@ int prom_getintdefault(int node, const char *property, int deflt) | |||
| 126 | int retval; | 161 | int retval; |
| 127 | 162 | ||
| 128 | retval = prom_getint(node, property); | 163 | retval = prom_getint(node, property); |
| 129 | if(retval == -1) return deflt; | 164 | if (retval == -1) |
| 165 | return deflt; | ||
| 130 | 166 | ||
| 131 | return retval; | 167 | return retval; |
| 132 | } | 168 | } |
| @@ -138,7 +174,8 @@ int prom_getbool(int node, const char *prop) | |||
| 138 | int retval; | 174 | int retval; |
| 139 | 175 | ||
| 140 | retval = prom_getproplen(node, prop); | 176 | retval = prom_getproplen(node, prop); |
| 141 | if(retval == -1) return 0; | 177 | if (retval == -1) |
| 178 | return 0; | ||
| 142 | return 1; | 179 | return 1; |
| 143 | } | 180 | } |
| 144 | EXPORT_SYMBOL(prom_getbool); | 181 | EXPORT_SYMBOL(prom_getbool); |
| @@ -152,7 +189,8 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) | |||
| 152 | int len; | 189 | int len; |
| 153 | 190 | ||
| 154 | len = prom_getproperty(node, prop, user_buf, ubuf_size); | 191 | len = prom_getproperty(node, prop, user_buf, ubuf_size); |
| 155 | if(len != -1) return; | 192 | if (len != -1) |
| 193 | return; | ||
| 156 | user_buf[0] = 0; | 194 | user_buf[0] = 0; |
| 157 | } | 195 | } |
| 158 | EXPORT_SYMBOL(prom_getstring); | 196 | EXPORT_SYMBOL(prom_getstring); |
| @@ -164,7 +202,8 @@ int prom_nodematch(int node, const char *name) | |||
| 164 | { | 202 | { |
| 165 | char namebuf[128]; | 203 | char namebuf[128]; |
| 166 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); | 204 | prom_getproperty(node, "name", namebuf, sizeof(namebuf)); |
| 167 | if(strcmp(namebuf, name) == 0) return 1; | 205 | if (strcmp(namebuf, name) == 0) |
| 206 | return 1; | ||
| 168 | return 0; | 207 | return 0; |
| 169 | } | 208 | } |
| 170 | 209 | ||
| @@ -190,16 +229,29 @@ int prom_searchsiblings(int node_start, const char *nodename) | |||
| 190 | } | 229 | } |
| 191 | EXPORT_SYMBOL(prom_searchsiblings); | 230 | EXPORT_SYMBOL(prom_searchsiblings); |
| 192 | 231 | ||
| 232 | static const char *prom_nextprop_name = "nextprop"; | ||
| 233 | |||
| 193 | /* Return the first property type for node 'node'. | 234 | /* Return the first property type for node 'node'. |
| 194 | * buffer should be at least 32B in length | 235 | * buffer should be at least 32B in length |
| 195 | */ | 236 | */ |
| 196 | inline char *prom_firstprop(int node, char *buffer) | 237 | inline char *prom_firstprop(int node, char *buffer) |
| 197 | { | 238 | { |
| 239 | unsigned long args[7]; | ||
| 240 | |||
| 198 | *buffer = 0; | 241 | *buffer = 0; |
| 199 | if(node == -1) return buffer; | 242 | if (node == -1) |
| 200 | p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)| | 243 | return buffer; |
| 201 | P1275_INOUT(3, 0), | 244 | |
| 202 | node, (char *) 0x0, buffer); | 245 | args[0] = (unsigned long) prom_nextprop_name; |
| 246 | args[1] = 3; | ||
| 247 | args[2] = 1; | ||
| 248 | args[3] = (unsigned int) node; | ||
| 249 | args[4] = 0; | ||
| 250 | args[5] = (unsigned long) buffer; | ||
| 251 | args[6] = (unsigned long) -1; | ||
| 252 | |||
| 253 | p1275_cmd_direct(args); | ||
| 254 | |||
| 203 | return buffer; | 255 | return buffer; |
| 204 | } | 256 | } |
| 205 | EXPORT_SYMBOL(prom_firstprop); | 257 | EXPORT_SYMBOL(prom_firstprop); |
| @@ -210,9 +262,10 @@ EXPORT_SYMBOL(prom_firstprop); | |||
| 210 | */ | 262 | */ |
| 211 | inline char *prom_nextprop(int node, const char *oprop, char *buffer) | 263 | inline char *prom_nextprop(int node, const char *oprop, char *buffer) |
| 212 | { | 264 | { |
| 265 | unsigned long args[7]; | ||
| 213 | char buf[32]; | 266 | char buf[32]; |
| 214 | 267 | ||
| 215 | if(node == -1) { | 268 | if (node == -1) { |
| 216 | *buffer = 0; | 269 | *buffer = 0; |
| 217 | return buffer; | 270 | return buffer; |
| 218 | } | 271 | } |
| @@ -220,10 +273,17 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer) | |||
| 220 | strcpy (buf, oprop); | 273 | strcpy (buf, oprop); |
| 221 | oprop = buf; | 274 | oprop = buf; |
| 222 | } | 275 | } |
| 223 | p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)| | 276 | |
| 224 | P1275_ARG(2,P1275_ARG_OUT_32B)| | 277 | args[0] = (unsigned long) prom_nextprop_name; |
| 225 | P1275_INOUT(3, 0), | 278 | args[1] = 3; |
| 226 | node, oprop, buffer); | 279 | args[2] = 1; |
| 280 | args[3] = (unsigned int) node; | ||
| 281 | args[4] = (unsigned long) oprop; | ||
| 282 | args[5] = (unsigned long) buffer; | ||
| 283 | args[6] = (unsigned long) -1; | ||
| 284 | |||
| 285 | p1275_cmd_direct(args); | ||
| 286 | |||
| 227 | return buffer; | 287 | return buffer; |
| 228 | } | 288 | } |
| 229 | EXPORT_SYMBOL(prom_nextprop); | 289 | EXPORT_SYMBOL(prom_nextprop); |
| @@ -231,12 +291,19 @@ EXPORT_SYMBOL(prom_nextprop); | |||
| 231 | int | 291 | int |
| 232 | prom_finddevice(const char *name) | 292 | prom_finddevice(const char *name) |
| 233 | { | 293 | { |
| 294 | unsigned long args[5]; | ||
| 295 | |||
| 234 | if (!name) | 296 | if (!name) |
| 235 | return 0; | 297 | return 0; |
| 236 | return p1275_cmd(prom_finddev_name, | 298 | args[0] = (unsigned long) "finddevice"; |
| 237 | P1275_ARG(0,P1275_ARG_IN_STRING)| | 299 | args[1] = 1; |
| 238 | P1275_INOUT(1, 1), | 300 | args[2] = 1; |
| 239 | name); | 301 | args[3] = (unsigned long) name; |
| 302 | args[4] = (unsigned long) -1; | ||
| 303 | |||
| 304 | p1275_cmd_direct(args); | ||
| 305 | |||
| 306 | return (int) args[4]; | ||
| 240 | } | 307 | } |
| 241 | EXPORT_SYMBOL(prom_finddevice); | 308 | EXPORT_SYMBOL(prom_finddevice); |
| 242 | 309 | ||
| @@ -247,7 +314,7 @@ int prom_node_has_property(int node, const char *prop) | |||
| 247 | *buf = 0; | 314 | *buf = 0; |
| 248 | do { | 315 | do { |
| 249 | prom_nextprop(node, buf, buf); | 316 | prom_nextprop(node, buf, buf); |
| 250 | if(!strcmp(buf, prop)) | 317 | if (!strcmp(buf, prop)) |
| 251 | return 1; | 318 | return 1; |
| 252 | } while (*buf); | 319 | } while (*buf); |
| 253 | return 0; | 320 | return 0; |
| @@ -260,6 +327,8 @@ EXPORT_SYMBOL(prom_node_has_property); | |||
| 260 | int | 327 | int |
| 261 | prom_setprop(int node, const char *pname, char *value, int size) | 328 | prom_setprop(int node, const char *pname, char *value, int size) |
| 262 | { | 329 | { |
| 330 | unsigned long args[8]; | ||
| 331 | |||
| 263 | if (size == 0) | 332 | if (size == 0) |
| 264 | return 0; | 333 | return 0; |
| 265 | if ((pname == 0) || (value == 0)) | 334 | if ((pname == 0) || (value == 0)) |
| @@ -271,19 +340,37 @@ prom_setprop(int node, const char *pname, char *value, int size) | |||
| 271 | return 0; | 340 | return 0; |
| 272 | } | 341 | } |
| 273 | #endif | 342 | #endif |
| 274 | return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)| | 343 | args[0] = (unsigned long) "setprop"; |
| 275 | P1275_ARG(2,P1275_ARG_IN_BUF)| | 344 | args[1] = 4; |
| 276 | P1275_INOUT(4, 1), | 345 | args[2] = 1; |
| 277 | node, pname, value, P1275_SIZE(size)); | 346 | args[3] = (unsigned int) node; |
| 347 | args[4] = (unsigned long) pname; | ||
| 348 | args[5] = (unsigned long) value; | ||
| 349 | args[6] = size; | ||
| 350 | args[7] = (unsigned long) -1; | ||
| 351 | |||
| 352 | p1275_cmd_direct(args); | ||
| 353 | |||
| 354 | return (int) args[7]; | ||
| 278 | } | 355 | } |
| 279 | EXPORT_SYMBOL(prom_setprop); | 356 | EXPORT_SYMBOL(prom_setprop); |
| 280 | 357 | ||
| 281 | inline int prom_inst2pkg(int inst) | 358 | inline int prom_inst2pkg(int inst) |
| 282 | { | 359 | { |
| 360 | unsigned long args[5]; | ||
| 283 | int node; | 361 | int node; |
| 284 | 362 | ||
| 285 | node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst); | 363 | args[0] = (unsigned long) "instance-to-package"; |
| 286 | if (node == -1) return 0; | 364 | args[1] = 1; |
| 365 | args[2] = 1; | ||
| 366 | args[3] = (unsigned int) inst; | ||
| 367 | args[4] = (unsigned long) -1; | ||
| 368 | |||
| 369 | p1275_cmd_direct(args); | ||
| 370 | |||
| 371 | node = (int) args[4]; | ||
| 372 | if (node == -1) | ||
| 373 | return 0; | ||
| 287 | return node; | 374 | return node; |
| 288 | } | 375 | } |
| 289 | 376 | ||
| @@ -296,17 +383,28 @@ prom_pathtoinode(const char *path) | |||
| 296 | int node, inst; | 383 | int node, inst; |
| 297 | 384 | ||
| 298 | inst = prom_devopen (path); | 385 | inst = prom_devopen (path); |
| 299 | if (inst == 0) return 0; | 386 | if (inst == 0) |
| 300 | node = prom_inst2pkg (inst); | 387 | return 0; |
| 301 | prom_devclose (inst); | 388 | node = prom_inst2pkg(inst); |
| 302 | if (node == -1) return 0; | 389 | prom_devclose(inst); |
| 390 | if (node == -1) | ||
| 391 | return 0; | ||
| 303 | return node; | 392 | return node; |
| 304 | } | 393 | } |
| 305 | 394 | ||
| 306 | int prom_ihandle2path(int handle, char *buffer, int bufsize) | 395 | int prom_ihandle2path(int handle, char *buffer, int bufsize) |
| 307 | { | 396 | { |
| 308 | return p1275_cmd("instance-to-path", | 397 | unsigned long args[7]; |
| 309 | P1275_ARG(1,P1275_ARG_OUT_BUF)| | 398 | |
| 310 | P1275_INOUT(3, 1), | 399 | args[0] = (unsigned long) "instance-to-path"; |
| 311 | handle, buffer, P1275_SIZE(bufsize)); | 400 | args[1] = 3; |
| 401 | args[2] = 1; | ||
| 402 | args[3] = (unsigned int) handle; | ||
| 403 | args[4] = (unsigned long) buffer; | ||
| 404 | args[5] = bufsize; | ||
| 405 | args[6] = (unsigned long) -1; | ||
| 406 | |||
| 407 | p1275_cmd_direct(args); | ||
| 408 | |||
| 409 | return (int) args[6]; | ||
| 312 | } | 410 | } |
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 554c002a1e1a..0f456386cce5 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c | |||
| @@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void) | |||
| 72 | { | 72 | { |
| 73 | int r; | 73 | int r; |
| 74 | 74 | ||
| 75 | /* user explicitly requested no unplug */ | ||
| 76 | if (xen_emul_unplug & XEN_UNPLUG_NEVER) | ||
| 77 | return; | ||
| 75 | /* check the version of the xen platform PCI device */ | 78 | /* check the version of the xen platform PCI device */ |
| 76 | r = check_platform_magic(); | 79 | r = check_platform_magic(); |
| 77 | /* If the version matches enable the Xen platform PCI driver. | 80 | /* If the version matches enable the Xen platform PCI driver. |
| 78 | * Also enable the Xen platform PCI driver if the version is really old | 81 | * Also enable the Xen platform PCI driver if the host does |
| 79 | * and the user told us to ignore it. */ | 82 | * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC) |
| 83 | * but the user told us that unplugging is unnecessary. */ | ||
| 80 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && | 84 | if (r && !(r == XEN_PLATFORM_ERR_MAGIC && |
| 81 | (xen_emul_unplug & XEN_UNPLUG_IGNORE))) | 85 | (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))) |
| 82 | return; | 86 | return; |
| 83 | /* Set the default value of xen_emul_unplug depending on whether or | 87 | /* Set the default value of xen_emul_unplug depending on whether or |
| 84 | * not the Xen PV frontends and the Xen platform PCI driver have | 88 | * not the Xen PV frontends and the Xen platform PCI driver have |
| @@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void) | |||
| 99 | } | 103 | } |
| 100 | } | 104 | } |
| 101 | /* Now unplug the emulated devices */ | 105 | /* Now unplug the emulated devices */ |
| 102 | if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE)) | 106 | if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)) |
| 103 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); | 107 | outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); |
| 104 | xen_platform_pci_unplug = xen_emul_unplug; | 108 | xen_platform_pci_unplug = xen_emul_unplug; |
| 105 | } | 109 | } |
| @@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg) | |||
| 125 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; | 129 | xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; |
| 126 | else if (!strncmp(p, "nics", l)) | 130 | else if (!strncmp(p, "nics", l)) |
| 127 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; | 131 | xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; |
| 128 | else if (!strncmp(p, "ignore", l)) | 132 | else if (!strncmp(p, "unnecessary", l)) |
| 129 | xen_emul_unplug |= XEN_UNPLUG_IGNORE; | 133 | xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY; |
| 134 | else if (!strncmp(p, "never", l)) | ||
| 135 | xen_emul_unplug |= XEN_UNPLUG_NEVER; | ||
| 130 | else | 136 | else |
| 131 | printk(KERN_WARNING "unrecognised option '%s' " | 137 | printk(KERN_WARNING "unrecognised option '%s' " |
| 132 | "in parameter 'xen_emul_unplug'\n", p); | 138 | "in parameter 'xen_emul_unplug'\n", p); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index c8a44f5e0584..40af43ebd92d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
| 568 | out: | 568 | out: |
| 569 | if (retval) { | 569 | if (retval) { |
| 570 | release_firmware(firmware); | 570 | release_firmware(firmware); |
| 571 | firmware_p = NULL; | 571 | *firmware_p = NULL; |
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | return retval; | 574 | return retval; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ac1b682edecb..ab735a605cf3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
| 834 | char *type; | 834 | char *type; |
| 835 | int len; | 835 | int len; |
| 836 | /* no unplug has been done: do not hook devices != xen vbds */ | 836 | /* no unplug has been done: do not hook devices != xen vbds */ |
| 837 | if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) { | 837 | if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { |
| 838 | int major; | 838 | int major; |
| 839 | 839 | ||
| 840 | if (!VDEV_IS_EXTENDED(vdevice)) | 840 | if (!VDEV_IS_EXTENDED(vdevice)) |
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index 07f3ea38b582..d4b71e8d0d23 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
| @@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile ) | |||
| 1650 | /* disable DSS reporting */ | 1650 | /* disable DSS reporting */ |
| 1651 | i2QueueCommands(PTYPE_INLINE, pCh, 100, 4, | 1651 | i2QueueCommands(PTYPE_INLINE, pCh, 100, 4, |
| 1652 | CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP); | 1652 | CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP); |
| 1653 | if ( !tty || (tty->termios->c_cflag & HUPCL) ) { | 1653 | if (tty->termios->c_cflag & HUPCL) { |
| 1654 | i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN); | 1654 | i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN); |
| 1655 | pCh->dataSetOut &= ~(I2_DTR | I2_RTS); | 1655 | pCh->dataSetOut &= ~(I2_DTR | I2_RTS); |
| 1656 | i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25)); | 1656 | i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25)); |
| @@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg ) | |||
| 2930 | if ( pCh ) | 2930 | if ( pCh ) |
| 2931 | { | 2931 | { |
| 2932 | rc = copy_to_user(argp, pCh, sizeof(i2ChanStr)); | 2932 | rc = copy_to_user(argp, pCh, sizeof(i2ChanStr)); |
| 2933 | if (rc) | ||
| 2934 | rc = -EFAULT; | ||
| 2933 | } else { | 2935 | } else { |
| 2934 | rc = -ENODEV; | 2936 | rc = -ENODEV; |
| 2935 | } | 2937 | } |
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 79c3bc69165a..7c79d243acc9 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c | |||
| @@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info, | |||
| 1244 | } | 1244 | } |
| 1245 | info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); | 1245 | info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); |
| 1246 | configure_r_port(tty, info, NULL); | 1246 | configure_r_port(tty, info, NULL); |
| 1247 | mutex_unlock(&info->port.mutex); | ||
| 1247 | return 0; | 1248 | return 0; |
| 1248 | } | 1249 | } |
| 1249 | 1250 | ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index fef80cfcab5c..e63b830c86cc 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
| @@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp) | |||
| 691 | if (info->port.count == 1) { | 691 | if (info->port.count == 1) { |
| 692 | /* 1st open on this device, init hardware */ | 692 | /* 1st open on this device, init hardware */ |
| 693 | retval = startup(info); | 693 | retval = startup(info); |
| 694 | if (retval < 0) | 694 | if (retval < 0) { |
| 695 | mutex_unlock(&info->port.mutex); | ||
| 695 | goto cleanup; | 696 | goto cleanup; |
| 697 | } | ||
| 696 | } | 698 | } |
| 697 | mutex_unlock(&info->port.mutex); | 699 | mutex_unlock(&info->port.mutex); |
| 698 | retval = block_til_ready(tty, filp, info); | 700 | retval = block_til_ready(tty, filp, info); |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 90288ec7c284..84da748555bc 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -55,6 +55,9 @@ | |||
| 55 | static int drm_version(struct drm_device *dev, void *data, | 55 | static int drm_version(struct drm_device *dev, void *data, |
| 56 | struct drm_file *file_priv); | 56 | struct drm_file *file_priv); |
| 57 | 57 | ||
| 58 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | ||
| 59 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} | ||
| 60 | |||
| 58 | /** Ioctl table */ | 61 | /** Ioctl table */ |
| 59 | static struct drm_ioctl_desc drm_ioctls[] = { | 62 | static struct drm_ioctl_desc drm_ioctls[] = { |
| 60 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), | 63 | DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), |
| @@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp, | |||
| 421 | int retcode = -EINVAL; | 424 | int retcode = -EINVAL; |
| 422 | char stack_kdata[128]; | 425 | char stack_kdata[128]; |
| 423 | char *kdata = NULL; | 426 | char *kdata = NULL; |
| 427 | unsigned int usize, asize; | ||
| 424 | 428 | ||
| 425 | dev = file_priv->minor->dev; | 429 | dev = file_priv->minor->dev; |
| 426 | atomic_inc(&dev->ioctl_count); | 430 | atomic_inc(&dev->ioctl_count); |
| @@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp, | |||
| 436 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) | 440 | ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) |
| 437 | goto err_i1; | 441 | goto err_i1; |
| 438 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && | 442 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && |
| 439 | (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) | 443 | (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { |
| 444 | u32 drv_size; | ||
| 440 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; | 445 | ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; |
| 446 | drv_size = _IOC_SIZE(ioctl->cmd_drv); | ||
| 447 | usize = asize = _IOC_SIZE(cmd); | ||
| 448 | if (drv_size > asize) | ||
| 449 | asize = drv_size; | ||
| 450 | } | ||
| 441 | else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { | 451 | else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { |
| 442 | ioctl = &drm_ioctls[nr]; | 452 | ioctl = &drm_ioctls[nr]; |
| 443 | cmd = ioctl->cmd; | 453 | cmd = ioctl->cmd; |
| 454 | usize = asize = _IOC_SIZE(cmd); | ||
| 444 | } else | 455 | } else |
| 445 | goto err_i1; | 456 | goto err_i1; |
| 446 | 457 | ||
| @@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp, | |||
| 460 | retcode = -EACCES; | 471 | retcode = -EACCES; |
| 461 | } else { | 472 | } else { |
| 462 | if (cmd & (IOC_IN | IOC_OUT)) { | 473 | if (cmd & (IOC_IN | IOC_OUT)) { |
| 463 | if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { | 474 | if (asize <= sizeof(stack_kdata)) { |
| 464 | kdata = stack_kdata; | 475 | kdata = stack_kdata; |
| 465 | } else { | 476 | } else { |
| 466 | kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); | 477 | kdata = kmalloc(asize, GFP_KERNEL); |
| 467 | if (!kdata) { | 478 | if (!kdata) { |
| 468 | retcode = -ENOMEM; | 479 | retcode = -ENOMEM; |
| 469 | goto err_i1; | 480 | goto err_i1; |
| @@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp, | |||
| 473 | 484 | ||
| 474 | if (cmd & IOC_IN) { | 485 | if (cmd & IOC_IN) { |
| 475 | if (copy_from_user(kdata, (void __user *)arg, | 486 | if (copy_from_user(kdata, (void __user *)arg, |
| 476 | _IOC_SIZE(cmd)) != 0) { | 487 | usize) != 0) { |
| 477 | retcode = -EFAULT; | 488 | retcode = -EFAULT; |
| 478 | goto err_i1; | 489 | goto err_i1; |
| 479 | } | 490 | } |
| 480 | } | 491 | } else |
| 492 | memset(kdata, 0, usize); | ||
| 493 | |||
| 481 | if (ioctl->flags & DRM_UNLOCKED) | 494 | if (ioctl->flags & DRM_UNLOCKED) |
| 482 | retcode = func(dev, kdata, file_priv); | 495 | retcode = func(dev, kdata, file_priv); |
| 483 | else { | 496 | else { |
| @@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp, | |||
| 488 | 501 | ||
| 489 | if (cmd & IOC_OUT) { | 502 | if (cmd & IOC_OUT) { |
| 490 | if (copy_to_user((void __user *)arg, kdata, | 503 | if (copy_to_user((void __user *)arg, kdata, |
| 491 | _IOC_SIZE(cmd)) != 0) | 504 | usize) != 0) |
| 492 | retcode = -EFAULT; | 505 | retcode = -EFAULT; |
| 493 | } | 506 | } |
| 494 | } | 507 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index de82e201d682..8dd7e6f86bb3 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn | |||
| 94 | int i; | 94 | int i; |
| 95 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; | 95 | enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; |
| 96 | struct drm_fb_helper_cmdline_mode *cmdline_mode; | 96 | struct drm_fb_helper_cmdline_mode *cmdline_mode; |
| 97 | struct drm_connector *connector = fb_helper_conn->connector; | 97 | struct drm_connector *connector; |
| 98 | 98 | ||
| 99 | if (!fb_helper_conn) | 99 | if (!fb_helper_conn) |
| 100 | return false; | 100 | return false; |
| 101 | connector = fb_helper_conn->connector; | ||
| 101 | 102 | ||
| 102 | cmdline_mode = &fb_helper_conn->cmdline_mode; | 103 | cmdline_mode = &fb_helper_conn->cmdline_mode; |
| 103 | if (!mode_option) | 104 | if (!mode_option) |
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 3778360eceea..fda67468e603 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c | |||
| @@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 138 | break; | 138 | break; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | if (!agpmem) | 141 | if (&agpmem->head == &dev->agp->memory) |
| 142 | goto vm_fault_error; | 142 | goto vm_fault_error; |
| 143 | 143 | ||
| 144 | /* | 144 | /* |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 0e6c131313d9..61b4caf220fa 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
| @@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 1255 | } | 1255 | } |
| 1256 | 1256 | ||
| 1257 | struct drm_ioctl_desc i810_ioctls[] = { | 1257 | struct drm_ioctl_desc i810_ioctls[] = { |
| 1258 | DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1258 | DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 1259 | DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | 1259 | DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), |
| 1260 | DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), | 1260 | DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1261 | DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1261 | DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 1262 | DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), | 1262 | DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), |
| 1263 | DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), | 1263 | DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), |
| 1264 | DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), | 1264 | DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1265 | DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), | 1265 | DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), |
| 1266 | DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), | 1266 | DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), |
| 1267 | DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), | 1267 | DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), |
| 1268 | DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), | 1268 | DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), |
| 1269 | DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), | 1269 | DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), |
| 1270 | DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1270 | DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 1271 | DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), | 1271 | DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), |
| 1272 | DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), | 1272 | DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1273 | }; | 1273 | }; |
| 1274 | 1274 | ||
| 1275 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); | 1275 | int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 5168862c9227..671aa18415ac 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
| @@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 1524 | } | 1524 | } |
| 1525 | 1525 | ||
| 1526 | struct drm_ioctl_desc i830_ioctls[] = { | 1526 | struct drm_ioctl_desc i830_ioctls[] = { |
| 1527 | DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1527 | DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 1528 | DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), | 1528 | DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), |
| 1529 | DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), | 1529 | DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1530 | DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), | 1530 | DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 1531 | DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), | 1531 | DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), |
| 1532 | DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), | 1532 | DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), |
| 1533 | DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), | 1533 | DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1534 | DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), | 1534 | DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), |
| 1535 | DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), | 1535 | DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), |
| 1536 | DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), | 1536 | DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), |
| 1537 | DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), | 1537 | DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), |
| 1538 | DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), | 1538 | DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), |
| 1539 | DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), | 1539 | DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), |
| 1540 | DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), | 1540 | DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), |
| 1541 | }; | 1541 | }; |
| 1542 | 1542 | ||
| 1543 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); | 1543 | int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 44af317731b6..a7ec93e62f81 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -2367,46 +2367,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | |||
| 2367 | } | 2367 | } |
| 2368 | 2368 | ||
| 2369 | struct drm_ioctl_desc i915_ioctls[] = { | 2369 | struct drm_ioctl_desc i915_ioctls[] = { |
| 2370 | DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2370 | DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2371 | DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | 2371 | DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), |
| 2372 | DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), | 2372 | DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), |
| 2373 | DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | 2373 | DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), |
| 2374 | DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | 2374 | DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), |
| 2375 | DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | 2375 | DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), |
| 2376 | DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), | 2376 | DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), |
| 2377 | DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2377 | DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2378 | DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), | 2378 | DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), |
| 2379 | DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), | 2379 | DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), |
| 2380 | DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2380 | DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2381 | DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | 2381 | DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), |
| 2382 | DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | 2382 | DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2383 | DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | 2383 | DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2384 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | 2384 | DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), |
| 2385 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | 2385 | DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), |
| 2386 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 2386 | DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 2387 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2387 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 2388 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | 2388 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
| 2389 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), | 2389 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), |
| 2390 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2390 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 2391 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2391 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 2392 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | 2392 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 2393 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), | 2393 | DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 2394 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2394 | DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 2395 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 2395 | DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
| 2396 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), | 2396 | DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), |
| 2397 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), | 2397 | DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), |
| 2398 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), | 2398 | DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), |
| 2399 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), | 2399 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), |
| 2400 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), | 2400 | DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), |
| 2401 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), | 2401 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), |
| 2402 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), | 2402 | DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), |
| 2403 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), | 2403 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), |
| 2404 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), | 2404 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), |
| 2405 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), | 2405 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), |
| 2406 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), | 2406 | DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), |
| 2407 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), | 2407 | DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), |
| 2408 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 2408 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
| 2409 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), | 2409 | DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), |
| 2410 | }; | 2410 | }; |
| 2411 | 2411 | ||
| 2412 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | 2412 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 23157e1de3be..11a3394f5fe1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -992,7 +992,7 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
| 992 | 992 | ||
| 993 | /* Wait for vblank interrupt bit to set */ | 993 | /* Wait for vblank interrupt bit to set */ |
| 994 | if (wait_for((I915_READ(pipestat_reg) & | 994 | if (wait_for((I915_READ(pipestat_reg) & |
| 995 | PIPE_VBLANK_INTERRUPT_STATUS) == 0, | 995 | PIPE_VBLANK_INTERRUPT_STATUS), |
| 996 | 50, 0)) | 996 | 50, 0)) |
| 997 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 997 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
| 998 | } | 998 | } |
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index fff82045c427..9ce2827f8c00 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c | |||
| @@ -1085,19 +1085,19 @@ file_priv) | |||
| 1085 | } | 1085 | } |
| 1086 | 1086 | ||
| 1087 | struct drm_ioctl_desc mga_ioctls[] = { | 1087 | struct drm_ioctl_desc mga_ioctls[] = { |
| 1088 | DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1088 | DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1089 | DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), | 1089 | DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), |
| 1090 | DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), | 1090 | DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), |
| 1091 | DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), | 1091 | DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH), |
| 1092 | DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), | 1092 | DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH), |
| 1093 | DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), | 1093 | DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH), |
| 1094 | DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), | 1094 | DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH), |
| 1095 | DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), | 1095 | DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH), |
| 1096 | DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), | 1096 | DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH), |
| 1097 | DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), | 1097 | DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH), |
| 1098 | DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), | 1098 | DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH), |
| 1099 | DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), | 1099 | DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), |
| 1100 | DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1100 | DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1101 | }; | 1101 | }; |
| 1102 | 1102 | ||
| 1103 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); | 1103 | int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 0b69a9628c95..e4f33a4edea1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb, | |||
| 2166 | uint32_t val = 0; | 2166 | uint32_t val = 0; |
| 2167 | 2167 | ||
| 2168 | if (off < pci_resource_len(dev->pdev, 1)) { | 2168 | if (off < pci_resource_len(dev->pdev, 1)) { |
| 2169 | uint32_t __iomem *p = | 2169 | uint8_t __iomem *p = |
| 2170 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); | 2170 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); |
| 2171 | 2171 | ||
| 2172 | val = ioread32(p + (off & ~PAGE_MASK)); | 2172 | val = ioread32(p + (off & ~PAGE_MASK)); |
| @@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb, | |||
| 2182 | uint32_t off, uint32_t val) | 2182 | uint32_t off, uint32_t val) |
| 2183 | { | 2183 | { |
| 2184 | if (off < pci_resource_len(dev->pdev, 1)) { | 2184 | if (off < pci_resource_len(dev->pdev, 1)) { |
| 2185 | uint32_t __iomem *p = | 2185 | uint8_t __iomem *p = |
| 2186 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); | 2186 | io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); |
| 2187 | 2187 | ||
| 2188 | iowrite32(val, p + (off & ~PAGE_MASK)); | 2188 | iowrite32(val, p + (off & ~PAGE_MASK)); |
| @@ -4587,7 +4587,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4587 | return 1; | 4587 | return 1; |
| 4588 | } | 4588 | } |
| 4589 | 4589 | ||
| 4590 | NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); | 4590 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); |
| 4591 | nouveau_bios_run_init_table(dev, script, dcbent); | 4591 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 4592 | } else | 4592 | } else |
| 4593 | if (pxclk == -1) { | 4593 | if (pxclk == -1) { |
| @@ -4597,7 +4597,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4597 | return 1; | 4597 | return 1; |
| 4598 | } | 4598 | } |
| 4599 | 4599 | ||
| 4600 | NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); | 4600 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); |
| 4601 | nouveau_bios_run_init_table(dev, script, dcbent); | 4601 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 4602 | } else | 4602 | } else |
| 4603 | if (pxclk == -2) { | 4603 | if (pxclk == -2) { |
| @@ -4610,7 +4610,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4610 | return 1; | 4610 | return 1; |
| 4611 | } | 4611 | } |
| 4612 | 4612 | ||
| 4613 | NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); | 4613 | NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); |
| 4614 | nouveau_bios_run_init_table(dev, script, dcbent); | 4614 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 4615 | } else | 4615 | } else |
| 4616 | if (pxclk > 0) { | 4616 | if (pxclk > 0) { |
| @@ -4622,7 +4622,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4622 | return 1; | 4622 | return 1; |
| 4623 | } | 4623 | } |
| 4624 | 4624 | ||
| 4625 | NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); | 4625 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); |
| 4626 | nouveau_bios_run_init_table(dev, script, dcbent); | 4626 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 4627 | } else | 4627 | } else |
| 4628 | if (pxclk < 0) { | 4628 | if (pxclk < 0) { |
| @@ -4634,7 +4634,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 4634 | return 1; | 4634 | return 1; |
| 4635 | } | 4635 | } |
| 4636 | 4636 | ||
| 4637 | NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); | 4637 | NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); |
| 4638 | nouveau_bios_run_init_table(dev, script, dcbent); | 4638 | nouveau_bios_run_init_table(dev, script, dcbent); |
| 4639 | } | 4639 | } |
| 4640 | 4640 | ||
| @@ -5357,19 +5357,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, | |||
| 5357 | } | 5357 | } |
| 5358 | 5358 | ||
| 5359 | tmdstableptr = ROM16(bios->data[bitentry->offset]); | 5359 | tmdstableptr = ROM16(bios->data[bitentry->offset]); |
| 5360 | 5360 | if (!tmdstableptr) { | |
| 5361 | if (tmdstableptr == 0x0) { | ||
| 5362 | NV_ERROR(dev, "Pointer to TMDS table invalid\n"); | 5361 | NV_ERROR(dev, "Pointer to TMDS table invalid\n"); |
| 5363 | return -EINVAL; | 5362 | return -EINVAL; |
| 5364 | } | 5363 | } |
| 5365 | 5364 | ||
| 5365 | NV_INFO(dev, "TMDS table version %d.%d\n", | ||
| 5366 | bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); | ||
| 5367 | |||
| 5366 | /* nv50+ has v2.0, but we don't parse it atm */ | 5368 | /* nv50+ has v2.0, but we don't parse it atm */ |
| 5367 | if (bios->data[tmdstableptr] != 0x11) { | 5369 | if (bios->data[tmdstableptr] != 0x11) |
| 5368 | NV_WARN(dev, | ||
| 5369 | "TMDS table revision %d.%d not currently supported\n", | ||
| 5370 | bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); | ||
| 5371 | return -ENOSYS; | 5370 | return -ENOSYS; |
| 5372 | } | ||
| 5373 | 5371 | ||
| 5374 | /* | 5372 | /* |
| 5375 | * These two scripts are odd: they don't seem to get run even when | 5373 | * These two scripts are odd: they don't seem to get run even when |
| @@ -5809,6 +5807,22 @@ parse_dcb_gpio_table(struct nvbios *bios) | |||
| 5809 | gpio->line = tvdac_gpio[1] >> 4; | 5807 | gpio->line = tvdac_gpio[1] >> 4; |
| 5810 | gpio->invert = tvdac_gpio[0] & 2; | 5808 | gpio->invert = tvdac_gpio[0] & 2; |
| 5811 | } | 5809 | } |
| 5810 | } else { | ||
| 5811 | /* | ||
| 5812 | * No systematic way to store GPIO info on pre-v2.2 | ||
| 5813 | * DCBs, try to match the PCI device IDs. | ||
| 5814 | */ | ||
| 5815 | |||
| 5816 | /* Apple iMac G4 NV18 */ | ||
| 5817 | if (dev->pdev->device == 0x0189 && | ||
| 5818 | dev->pdev->subsystem_vendor == 0x10de && | ||
| 5819 | dev->pdev->subsystem_device == 0x0010) { | ||
| 5820 | struct dcb_gpio_entry *gpio = new_gpio_entry(bios); | ||
| 5821 | |||
| 5822 | gpio->tag = DCB_GPIO_TVDAC0; | ||
| 5823 | gpio->line = 4; | ||
| 5824 | } | ||
| 5825 | |||
| 5812 | } | 5826 | } |
| 5813 | 5827 | ||
| 5814 | if (!gpio_table_ptr) | 5828 | if (!gpio_table_ptr) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 84f85183d041..f6f44779d82f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -36,6 +36,21 @@ | |||
| 36 | #include <linux/log2.h> | 36 | #include <linux/log2.h> |
| 37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| 38 | 38 | ||
| 39 | int | ||
| 40 | nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) | ||
| 41 | { | ||
| 42 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | ||
| 43 | int ret; | ||
| 44 | |||
| 45 | if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) | ||
| 46 | return 0; | ||
| 47 | |||
| 48 | spin_lock(&nvbo->bo.lock); | ||
| 49 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | ||
| 50 | spin_unlock(&nvbo->bo.lock); | ||
| 51 | return ret; | ||
| 52 | } | ||
| 53 | |||
| 39 | static void | 54 | static void |
| 40 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | 55 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
| 41 | { | 56 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 90fdcda332be..0480f064f2c1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
| @@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |||
| 426 | ***********************************/ | 426 | ***********************************/ |
| 427 | 427 | ||
| 428 | struct drm_ioctl_desc nouveau_ioctls[] = { | 428 | struct drm_ioctl_desc nouveau_ioctls[] = { |
| 429 | DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), | 429 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), |
| 430 | DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 430 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 431 | DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), | 431 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), |
| 432 | DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), | 432 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), |
| 433 | DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), | 433 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), |
| 434 | DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), | 434 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), |
| 435 | DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), | 435 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), |
| 436 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), | 436 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), |
| 437 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), | 437 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), |
| 438 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), | 438 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), |
| 439 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), | 439 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), |
| 440 | DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), | 440 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), |
| 441 | }; | 441 | }; |
| 442 | 442 | ||
| 443 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | 443 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b1b22baf1428..a1473fff06ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, | |||
| 104 | int i; | 104 | int i; |
| 105 | 105 | ||
| 106 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 106 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
| 107 | struct nouveau_i2c_chan *i2c; | 107 | struct nouveau_i2c_chan *i2c = NULL; |
| 108 | struct nouveau_encoder *nv_encoder; | 108 | struct nouveau_encoder *nv_encoder; |
| 109 | struct drm_mode_object *obj; | 109 | struct drm_mode_object *obj; |
| 110 | int id; | 110 | int id; |
| @@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, | |||
| 117 | if (!obj) | 117 | if (!obj) |
| 118 | continue; | 118 | continue; |
| 119 | nv_encoder = nouveau_encoder(obj_to_encoder(obj)); | 119 | nv_encoder = nouveau_encoder(obj_to_encoder(obj)); |
| 120 | i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | 120 | |
| 121 | if (nv_encoder->dcb->i2c_index < 0xf) | ||
| 122 | i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); | ||
| 121 | 123 | ||
| 122 | if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { | 124 | if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { |
| 123 | *pnv_encoder = nv_encoder; | 125 | *pnv_encoder = nv_encoder; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e424bf74d706..1e093a069b7b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); | |||
| 1165 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); | 1165 | extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); |
| 1166 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); | 1166 | extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); |
| 1167 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); | 1167 | extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); |
| 1168 | extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); | ||
| 1168 | 1169 | ||
| 1169 | /* nouveau_fence.c */ | 1170 | /* nouveau_fence.c */ |
| 1170 | struct nouveau_fence; | 1171 | struct nouveau_fence; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0f417ac1b696..79fc5ffff226 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
| @@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
| 361 | 361 | ||
| 362 | list_for_each_entry(nvbo, list, entry) { | 362 | list_for_each_entry(nvbo, list, entry) { |
| 363 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; | 363 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; |
| 364 | struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; | ||
| 365 | 364 | ||
| 366 | if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { | 365 | ret = nouveau_bo_sync_gpu(nvbo, chan); |
| 367 | spin_lock(&nvbo->bo.lock); | 366 | if (unlikely(ret)) { |
| 368 | ret = ttm_bo_wait(&nvbo->bo, false, false, false); | 367 | NV_ERROR(dev, "fail pre-validate sync\n"); |
| 369 | spin_unlock(&nvbo->bo.lock); | 368 | return ret; |
| 370 | if (unlikely(ret)) { | ||
| 371 | NV_ERROR(dev, "fail wait other chan\n"); | ||
| 372 | return ret; | ||
| 373 | } | ||
| 374 | } | 369 | } |
| 375 | 370 | ||
| 376 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, | 371 | ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, |
| @@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
| 381 | return ret; | 376 | return ret; |
| 382 | } | 377 | } |
| 383 | 378 | ||
| 384 | nvbo->channel = chan; | 379 | nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; |
| 385 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, | 380 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, |
| 386 | false, false, false); | 381 | false, false, false); |
| 387 | nvbo->channel = NULL; | 382 | nvbo->channel = NULL; |
| @@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, | |||
| 390 | return ret; | 385 | return ret; |
| 391 | } | 386 | } |
| 392 | 387 | ||
| 388 | ret = nouveau_bo_sync_gpu(nvbo, chan); | ||
| 389 | if (unlikely(ret)) { | ||
| 390 | NV_ERROR(dev, "fail post-validate sync\n"); | ||
| 391 | return ret; | ||
| 392 | } | ||
| 393 | |||
| 393 | if (nvbo->bo.offset == b->presumed.offset && | 394 | if (nvbo->bo.offset == b->presumed.offset && |
| 394 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && | 395 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
| 395 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || | 396 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
| @@ -615,6 +616,21 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
| 615 | 616 | ||
| 616 | mutex_lock(&dev->struct_mutex); | 617 | mutex_lock(&dev->struct_mutex); |
| 617 | 618 | ||
| 619 | /* Mark push buffers as being used on PFIFO, the validation code | ||
| 620 | * will then make sure that if the pushbuf bo moves, that they | ||
| 621 | * happen on the kernel channel, which will in turn cause a sync | ||
| 622 | * to happen before we try and submit the push buffer. | ||
| 623 | */ | ||
| 624 | for (i = 0; i < req->nr_push; i++) { | ||
| 625 | if (push[i].bo_index >= req->nr_buffers) { | ||
| 626 | NV_ERROR(dev, "push %d buffer not in list\n", i); | ||
| 627 | ret = -EINVAL; | ||
| 628 | goto out; | ||
| 629 | } | ||
| 630 | |||
| 631 | bo[push[i].bo_index].read_domains |= (1 << 31); | ||
| 632 | } | ||
| 633 | |||
| 618 | /* Validate buffer list */ | 634 | /* Validate buffer list */ |
| 619 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, | 635 | ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, |
| 620 | req->nr_buffers, &op, &do_reloc); | 636 | req->nr_buffers, &op, &do_reloc); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 0bd407ca3d42..84614858728b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
| @@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index) | |||
| 163 | if (entry->chan) | 163 | if (entry->chan) |
| 164 | return -EEXIST; | 164 | return -EEXIST; |
| 165 | 165 | ||
| 166 | if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) { | 166 | if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) { |
| 167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); | 167 | NV_ERROR(dev, "unknown i2c port %d\n", entry->read); |
| 168 | return -EINVAL; | 168 | return -EINVAL; |
| 169 | } | 169 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 491767fe4fcf..6b9187d7f67d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -214,6 +214,7 @@ int | |||
| 214 | nouveau_sgdma_init(struct drm_device *dev) | 214 | nouveau_sgdma_init(struct drm_device *dev) |
| 215 | { | 215 | { |
| 216 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 216 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 217 | struct pci_dev *pdev = dev->pdev; | ||
| 217 | struct nouveau_gpuobj *gpuobj = NULL; | 218 | struct nouveau_gpuobj *gpuobj = NULL; |
| 218 | uint32_t aper_size, obj_size; | 219 | uint32_t aper_size, obj_size; |
| 219 | int i, ret; | 220 | int i, ret; |
| @@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
| 239 | 240 | ||
| 240 | dev_priv->gart_info.sg_dummy_page = | 241 | dev_priv->gart_info.sg_dummy_page = |
| 241 | alloc_page(GFP_KERNEL|__GFP_DMA32); | 242 | alloc_page(GFP_KERNEL|__GFP_DMA32); |
| 243 | if (!dev_priv->gart_info.sg_dummy_page) { | ||
| 244 | nouveau_gpuobj_del(dev, &gpuobj); | ||
| 245 | return -ENOMEM; | ||
| 246 | } | ||
| 247 | |||
| 242 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); | 248 | set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); |
| 243 | dev_priv->gart_info.sg_dummy_bus = | 249 | dev_priv->gart_info.sg_dummy_bus = |
| 244 | pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, | 250 | pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, |
| 245 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 251 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 252 | if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { | ||
| 253 | nouveau_gpuobj_del(dev, &gpuobj); | ||
| 254 | return -EFAULT; | ||
| 255 | } | ||
| 246 | 256 | ||
| 247 | if (dev_priv->card_type < NV_50) { | 257 | if (dev_priv->card_type < NV_50) { |
| 248 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and | 258 | /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 44fefb0c7083..eefa5c856932 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
| @@ -129,6 +129,14 @@ get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) | |||
| 129 | return false; | 129 | return false; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | /* MSI nForce2 IGP */ | ||
| 133 | if (dev->pdev->device == 0x01f0 && | ||
| 134 | dev->pdev->subsystem_vendor == 0x1462 && | ||
| 135 | dev->pdev->subsystem_device == 0x5710) { | ||
| 136 | *pin_mask = 0xc; | ||
| 137 | return false; | ||
| 138 | } | ||
| 139 | |||
| 132 | return true; | 140 | return true; |
| 133 | } | 141 | } |
| 134 | 142 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 37c7b48ab24a..c95bf9b681dd 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -278,7 +278,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
| 278 | /*XXX: incorrect, but needed to make hash func "work" */ | 278 | /*XXX: incorrect, but needed to make hash func "work" */ |
| 279 | dev_priv->ramht_offset = 0x10000; | 279 | dev_priv->ramht_offset = 0x10000; |
| 280 | dev_priv->ramht_bits = 9; | 280 | dev_priv->ramht_bits = 9; |
| 281 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits); | 281 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; |
| 282 | return 0; | 282 | return 0; |
| 283 | } | 283 | } |
| 284 | 284 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 3ab3cdc42173..6b451f864783 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
| @@ -142,14 +142,16 @@ int | |||
| 142 | nvc0_instmem_suspend(struct drm_device *dev) | 142 | nvc0_instmem_suspend(struct drm_device *dev) |
| 143 | { | 143 | { |
| 144 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 144 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 145 | u32 *buf; | ||
| 145 | int i; | 146 | int i; |
| 146 | 147 | ||
| 147 | dev_priv->susres.ramin_copy = vmalloc(65536); | 148 | dev_priv->susres.ramin_copy = vmalloc(65536); |
| 148 | if (!dev_priv->susres.ramin_copy) | 149 | if (!dev_priv->susres.ramin_copy) |
| 149 | return -ENOMEM; | 150 | return -ENOMEM; |
| 151 | buf = dev_priv->susres.ramin_copy; | ||
| 150 | 152 | ||
| 151 | for (i = 0x700000; i < 0x710000; i += 4) | 153 | for (i = 0; i < 65536; i += 4) |
| 152 | dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i); | 154 | buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i); |
| 153 | return 0; | 155 | return 0; |
| 154 | } | 156 | } |
| 155 | 157 | ||
| @@ -157,14 +159,15 @@ void | |||
| 157 | nvc0_instmem_resume(struct drm_device *dev) | 159 | nvc0_instmem_resume(struct drm_device *dev) |
| 158 | { | 160 | { |
| 159 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 161 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 162 | u32 *buf = dev_priv->susres.ramin_copy; | ||
| 160 | u64 chan; | 163 | u64 chan; |
| 161 | int i; | 164 | int i; |
| 162 | 165 | ||
| 163 | chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; | 166 | chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; |
| 164 | nv_wr32(dev, 0x001700, chan >> 16); | 167 | nv_wr32(dev, 0x001700, chan >> 16); |
| 165 | 168 | ||
| 166 | for (i = 0x700000; i < 0x710000; i += 4) | 169 | for (i = 0; i < 65536; i += 4) |
| 167 | nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]); | 170 | nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]); |
| 168 | vfree(dev_priv->susres.ramin_copy); | 171 | vfree(dev_priv->susres.ramin_copy); |
| 169 | dev_priv->susres.ramin_copy = NULL; | 172 | dev_priv->susres.ramin_copy = NULL; |
| 170 | 173 | ||
| @@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev) | |||
| 221 | /*XXX: incorrect, but needed to make hash func "work" */ | 224 | /*XXX: incorrect, but needed to make hash func "work" */ |
| 222 | dev_priv->ramht_offset = 0x10000; | 225 | dev_priv->ramht_offset = 0x10000; |
| 223 | dev_priv->ramht_bits = 9; | 226 | dev_priv->ramht_bits = 9; |
| 224 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits); | 227 | dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; |
| 225 | return 0; | 228 | return 0; |
| 226 | } | 229 | } |
| 227 | 230 | ||
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 077af1f2f9b4..a9e33ce65918 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c | |||
| @@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) | |||
| 1639 | r128_do_cleanup_pageflip(dev); | 1639 | r128_do_cleanup_pageflip(dev); |
| 1640 | } | 1640 | } |
| 1641 | } | 1641 | } |
| 1642 | |||
| 1643 | void r128_driver_lastclose(struct drm_device *dev) | 1642 | void r128_driver_lastclose(struct drm_device *dev) |
| 1644 | { | 1643 | { |
| 1645 | r128_do_cleanup_cce(dev); | 1644 | r128_do_cleanup_cce(dev); |
| 1646 | } | 1645 | } |
| 1647 | 1646 | ||
| 1648 | struct drm_ioctl_desc r128_ioctls[] = { | 1647 | struct drm_ioctl_desc r128_ioctls[] = { |
| 1649 | DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1648 | DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1650 | DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1649 | DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1651 | DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1650 | DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1652 | DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1651 | DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1653 | DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), | 1652 | DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), |
| 1654 | DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), | 1653 | DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH), |
| 1655 | DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), | 1654 | DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), |
| 1656 | DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), | 1655 | DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH), |
| 1657 | DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), | 1656 | DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH), |
| 1658 | DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), | 1657 | DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH), |
| 1659 | DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), | 1658 | DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH), |
| 1660 | DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), | 1659 | DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH), |
| 1661 | DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), | 1660 | DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH), |
| 1662 | DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), | 1661 | DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH), |
| 1663 | DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), | 1662 | DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH), |
| 1664 | DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1663 | DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1665 | DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), | 1664 | DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), |
| 1666 | }; | 1665 | }; |
| 1667 | 1666 | ||
| 1668 | int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); | 1667 | int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 12ad512bd3d3..577239a24fd5 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -471,6 +471,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 471 | struct radeon_encoder *radeon_encoder = NULL; | 471 | struct radeon_encoder *radeon_encoder = NULL; |
| 472 | u32 adjusted_clock = mode->clock; | 472 | u32 adjusted_clock = mode->clock; |
| 473 | int encoder_mode = 0; | 473 | int encoder_mode = 0; |
| 474 | u32 dp_clock = mode->clock; | ||
| 475 | int bpc = 8; | ||
| 474 | 476 | ||
| 475 | /* reset the pll flags */ | 477 | /* reset the pll flags */ |
| 476 | pll->flags = 0; | 478 | pll->flags = 0; |
| @@ -513,6 +515,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 513 | if (encoder->crtc == crtc) { | 515 | if (encoder->crtc == crtc) { |
| 514 | radeon_encoder = to_radeon_encoder(encoder); | 516 | radeon_encoder = to_radeon_encoder(encoder); |
| 515 | encoder_mode = atombios_get_encoder_mode(encoder); | 517 | encoder_mode = atombios_get_encoder_mode(encoder); |
| 518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { | ||
| 519 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 520 | if (connector) { | ||
| 521 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 522 | struct radeon_connector_atom_dig *dig_connector = | ||
| 523 | radeon_connector->con_priv; | ||
| 524 | |||
| 525 | dp_clock = dig_connector->dp_clock; | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 516 | if (ASIC_IS_AVIVO(rdev)) { | 529 | if (ASIC_IS_AVIVO(rdev)) { |
| 517 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ | 530 | /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ |
| 518 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) | 531 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) |
| @@ -555,6 +568,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 555 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); | 568 | args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); |
| 556 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; | 569 | args.v1.ucTransmitterID = radeon_encoder->encoder_id; |
| 557 | args.v1.ucEncodeMode = encoder_mode; | 570 | args.v1.ucEncodeMode = encoder_mode; |
| 571 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | ||
| 572 | /* may want to enable SS on DP eventually */ | ||
| 573 | /* args.v1.ucConfig |= | ||
| 574 | ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ | ||
| 575 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
| 576 | args.v1.ucConfig |= | ||
| 577 | ADJUST_DISPLAY_CONFIG_SS_ENABLE; | ||
| 578 | } | ||
| 558 | 579 | ||
| 559 | atom_execute_table(rdev->mode_info.atom_context, | 580 | atom_execute_table(rdev->mode_info.atom_context, |
| 560 | index, (uint32_t *)&args); | 581 | index, (uint32_t *)&args); |
| @@ -568,10 +589,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 568 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 589 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { |
| 569 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 590 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 570 | 591 | ||
| 571 | if (encoder_mode == ATOM_ENCODER_MODE_DP) | 592 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
| 593 | /* may want to enable SS on DP/eDP eventually */ | ||
| 594 | /*args.v3.sInput.ucDispPllConfig |= | ||
| 595 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
| 572 | args.v3.sInput.ucDispPllConfig |= | 596 | args.v3.sInput.ucDispPllConfig |= |
| 573 | DISPPLL_CONFIG_COHERENT_MODE; | 597 | DISPPLL_CONFIG_COHERENT_MODE; |
| 574 | else { | 598 | /* 16200 or 27000 */ |
| 599 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 600 | } else { | ||
| 601 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
| 602 | /* deep color support */ | ||
| 603 | args.v3.sInput.usPixelClock = | ||
| 604 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
| 605 | } | ||
| 575 | if (dig->coherent_mode) | 606 | if (dig->coherent_mode) |
| 576 | args.v3.sInput.ucDispPllConfig |= | 607 | args.v3.sInput.ucDispPllConfig |= |
| 577 | DISPPLL_CONFIG_COHERENT_MODE; | 608 | DISPPLL_CONFIG_COHERENT_MODE; |
| @@ -580,13 +611,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 580 | DISPPLL_CONFIG_DUAL_LINK; | 611 | DISPPLL_CONFIG_DUAL_LINK; |
| 581 | } | 612 | } |
| 582 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 613 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 583 | /* may want to enable SS on DP/eDP eventually */ | 614 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { |
| 584 | /*args.v3.sInput.ucDispPllConfig |= | 615 | /* may want to enable SS on DP/eDP eventually */ |
| 585 | DISPPLL_CONFIG_SS_ENABLE;*/ | 616 | /*args.v3.sInput.ucDispPllConfig |= |
| 586 | if (encoder_mode == ATOM_ENCODER_MODE_DP) | 617 | DISPPLL_CONFIG_SS_ENABLE;*/ |
| 587 | args.v3.sInput.ucDispPllConfig |= | 618 | args.v3.sInput.ucDispPllConfig |= |
| 588 | DISPPLL_CONFIG_COHERENT_MODE; | 619 | DISPPLL_CONFIG_COHERENT_MODE; |
| 589 | else { | 620 | /* 16200 or 27000 */ |
| 621 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
| 622 | } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { | ||
| 623 | /* want to enable SS on LVDS eventually */ | ||
| 624 | /*args.v3.sInput.ucDispPllConfig |= | ||
| 625 | DISPPLL_CONFIG_SS_ENABLE;*/ | ||
| 626 | } else { | ||
| 590 | if (mode->clock > 165000) | 627 | if (mode->clock > 165000) |
| 591 | args.v3.sInput.ucDispPllConfig |= | 628 | args.v3.sInput.ucDispPllConfig |= |
| 592 | DISPPLL_CONFIG_DUAL_LINK; | 629 | DISPPLL_CONFIG_DUAL_LINK; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 36e0d4b545e6..4e7778d44b8d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
| @@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder, | |||
| 610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | 610 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; |
| 611 | else | 611 | else |
| 612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | 612 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; |
| 613 | if (dig_connector->linkb) | 613 | if (dig->linkb) |
| 614 | enc_id |= ATOM_DP_CONFIG_LINK_B; | 614 | enc_id |= ATOM_DP_CONFIG_LINK_B; |
| 615 | else | 615 | else |
| 616 | enc_id |= ATOM_DP_CONFIG_LINK_A; | 616 | enc_id |= ATOM_DP_CONFIG_LINK_A; |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index f40dfb77f9b1..bd2f33e5c91a 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
| @@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | mode.mode = info.mode; | 158 | mode.mode = info.mode; |
| 159 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; | 159 | /* chips with the agp to pcie bridge don't have the AGP_STATUS register |
| 160 | * Just use the whatever mode the host sets up. | ||
| 161 | */ | ||
| 162 | if (rdev->family <= CHIP_RV350) | ||
| 163 | agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; | ||
| 164 | else | ||
| 165 | agp_status = mode.mode; | ||
| 160 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); | 166 | is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
| 161 | 167 | ||
| 162 | if (is_v3) { | 168 | if (is_v3) { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 646f96f97c77..a21bf88e8c2d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = { | |||
| 733 | .set_engine_clock = &radeon_atom_set_engine_clock, | 733 | .set_engine_clock = &radeon_atom_set_engine_clock, |
| 734 | .get_memory_clock = &radeon_atom_get_memory_clock, | 734 | .get_memory_clock = &radeon_atom_get_memory_clock, |
| 735 | .set_memory_clock = &radeon_atom_set_memory_clock, | 735 | .set_memory_clock = &radeon_atom_set_memory_clock, |
| 736 | .get_pcie_lanes = NULL, | ||
| 736 | .set_pcie_lanes = NULL, | 737 | .set_pcie_lanes = NULL, |
| 737 | .set_clock_gating = NULL, | 738 | .set_clock_gating = NULL, |
| 738 | .set_surface_reg = r600_set_surface_reg, | 739 | .set_surface_reg = r600_set_surface_reg, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6d30868744ee..61141981880d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -32,11 +32,11 @@ | |||
| 32 | 32 | ||
| 33 | /* from radeon_encoder.c */ | 33 | /* from radeon_encoder.c */ |
| 34 | extern uint32_t | 34 | extern uint32_t |
| 35 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, | 35 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, |
| 36 | uint8_t dac); | 36 | uint8_t dac); |
| 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 37 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
| 38 | extern void | 38 | extern void |
| 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, | 39 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
| 40 | uint32_t supported_device); | 40 | uint32_t supported_device); |
| 41 | 41 | ||
| 42 | /* from radeon_connector.c */ | 42 | /* from radeon_connector.c */ |
| @@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 46 | uint32_t supported_device, | 46 | uint32_t supported_device, |
| 47 | int connector_type, | 47 | int connector_type, |
| 48 | struct radeon_i2c_bus_rec *i2c_bus, | 48 | struct radeon_i2c_bus_rec *i2c_bus, |
| 49 | bool linkb, uint32_t igp_lane_info, | 49 | uint32_t igp_lane_info, |
| 50 | uint16_t connector_object_id, | 50 | uint16_t connector_object_id, |
| 51 | struct radeon_hpd *hpd, | 51 | struct radeon_hpd *hpd, |
| 52 | struct radeon_router *router); | 52 | struct radeon_router *router); |
| 53 | 53 | ||
| 54 | /* from radeon_legacy_encoder.c */ | 54 | /* from radeon_legacy_encoder.c */ |
| 55 | extern void | 55 | extern void |
| 56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, | 56 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
| 57 | uint32_t supported_device); | 57 | uint32_t supported_device); |
| 58 | 58 | ||
| 59 | union atom_supported_devices { | 59 | union atom_supported_devices { |
| @@ -226,6 +226,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device | |||
| 226 | struct radeon_hpd hpd; | 226 | struct radeon_hpd hpd; |
| 227 | u32 reg; | 227 | u32 reg; |
| 228 | 228 | ||
| 229 | memset(&hpd, 0, sizeof(struct radeon_hpd)); | ||
| 230 | |||
| 229 | if (ASIC_IS_DCE4(rdev)) | 231 | if (ASIC_IS_DCE4(rdev)) |
| 230 | reg = EVERGREEN_DC_GPIO_HPD_A; | 232 | reg = EVERGREEN_DC_GPIO_HPD_A; |
| 231 | else | 233 | else |
| @@ -477,7 +479,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
| 477 | int i, j, k, path_size, device_support; | 479 | int i, j, k, path_size, device_support; |
| 478 | int connector_type; | 480 | int connector_type; |
| 479 | u16 igp_lane_info, conn_id, connector_object_id; | 481 | u16 igp_lane_info, conn_id, connector_object_id; |
| 480 | bool linkb; | ||
| 481 | struct radeon_i2c_bus_rec ddc_bus; | 482 | struct radeon_i2c_bus_rec ddc_bus; |
| 482 | struct radeon_router router; | 483 | struct radeon_router router; |
| 483 | struct radeon_gpio_rec gpio; | 484 | struct radeon_gpio_rec gpio; |
| @@ -510,7 +511,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
| 510 | addr += path_size; | 511 | addr += path_size; |
| 511 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; | 512 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; |
| 512 | path_size += le16_to_cpu(path->usSize); | 513 | path_size += le16_to_cpu(path->usSize); |
| 513 | linkb = false; | 514 | |
| 514 | if (device_support & le16_to_cpu(path->usDeviceTag)) { | 515 | if (device_support & le16_to_cpu(path->usDeviceTag)) { |
| 515 | uint8_t con_obj_id, con_obj_num, con_obj_type; | 516 | uint8_t con_obj_id, con_obj_num, con_obj_type; |
| 516 | 517 | ||
| @@ -601,13 +602,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
| 601 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 602 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
| 602 | 603 | ||
| 603 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { | 604 | if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { |
| 604 | if (grph_obj_num == 2) | 605 | u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); |
| 605 | linkb = true; | ||
| 606 | else | ||
| 607 | linkb = false; | ||
| 608 | 606 | ||
| 609 | radeon_add_atom_encoder(dev, | 607 | radeon_add_atom_encoder(dev, |
| 610 | grph_obj_id, | 608 | encoder_obj, |
| 611 | le16_to_cpu | 609 | le16_to_cpu |
| 612 | (path-> | 610 | (path-> |
| 613 | usDeviceTag)); | 611 | usDeviceTag)); |
| @@ -744,7 +742,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
| 744 | le16_to_cpu(path-> | 742 | le16_to_cpu(path-> |
| 745 | usDeviceTag), | 743 | usDeviceTag), |
| 746 | connector_type, &ddc_bus, | 744 | connector_type, &ddc_bus, |
| 747 | linkb, igp_lane_info, | 745 | igp_lane_info, |
| 748 | connector_object_id, | 746 | connector_object_id, |
| 749 | &hpd, | 747 | &hpd, |
| 750 | &router); | 748 | &router); |
| @@ -933,13 +931,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
| 933 | 931 | ||
| 934 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) | 932 | if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) |
| 935 | radeon_add_atom_encoder(dev, | 933 | radeon_add_atom_encoder(dev, |
| 936 | radeon_get_encoder_id(dev, | 934 | radeon_get_encoder_enum(dev, |
| 937 | (1 << i), | 935 | (1 << i), |
| 938 | dac), | 936 | dac), |
| 939 | (1 << i)); | 937 | (1 << i)); |
| 940 | else | 938 | else |
| 941 | radeon_add_legacy_encoder(dev, | 939 | radeon_add_legacy_encoder(dev, |
| 942 | radeon_get_encoder_id(dev, | 940 | radeon_get_encoder_enum(dev, |
| 943 | (1 << i), | 941 | (1 << i), |
| 944 | dac), | 942 | dac), |
| 945 | (1 << i)); | 943 | (1 << i)); |
| @@ -996,7 +994,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
| 996 | bios_connectors[i]. | 994 | bios_connectors[i]. |
| 997 | connector_type, | 995 | connector_type, |
| 998 | &bios_connectors[i].ddc_bus, | 996 | &bios_connectors[i].ddc_bus, |
| 999 | false, 0, | 997 | 0, |
| 1000 | connector_object_id, | 998 | connector_object_id, |
| 1001 | &bios_connectors[i].hpd, | 999 | &bios_connectors[i].hpd, |
| 1002 | &router); | 1000 | &router); |
| @@ -1183,7 +1181,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) | |||
| 1183 | return true; | 1181 | return true; |
| 1184 | break; | 1182 | break; |
| 1185 | case 2: | 1183 | case 2: |
| 1186 | if (igp_info->info_2.ucMemoryType & 0x0f) | 1184 | if (igp_info->info_2.ulBootUpSidePortClock) |
| 1187 | return true; | 1185 | return true; |
| 1188 | break; | 1186 | break; |
| 1189 | default: | 1187 | default: |
| @@ -1305,6 +1303,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
| 1305 | union lvds_info *lvds_info; | 1303 | union lvds_info *lvds_info; |
| 1306 | uint8_t frev, crev; | 1304 | uint8_t frev, crev; |
| 1307 | struct radeon_encoder_atom_dig *lvds = NULL; | 1305 | struct radeon_encoder_atom_dig *lvds = NULL; |
| 1306 | int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 1308 | 1307 | ||
| 1309 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 1308 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
| 1310 | &frev, &crev, &data_offset)) { | 1309 | &frev, &crev, &data_offset)) { |
| @@ -1368,6 +1367,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
| 1368 | } | 1367 | } |
| 1369 | 1368 | ||
| 1370 | encoder->native_mode = lvds->native_mode; | 1369 | encoder->native_mode = lvds->native_mode; |
| 1370 | |||
| 1371 | if (encoder_enum == 2) | ||
| 1372 | lvds->linkb = true; | ||
| 1373 | else | ||
| 1374 | lvds->linkb = false; | ||
| 1375 | |||
| 1371 | } | 1376 | } |
| 1372 | return lvds; | 1377 | return lvds; |
| 1373 | } | 1378 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 885dcfac1838..bd74e428bd14 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -39,8 +39,8 @@ | |||
| 39 | 39 | ||
| 40 | /* from radeon_encoder.c */ | 40 | /* from radeon_encoder.c */ |
| 41 | extern uint32_t | 41 | extern uint32_t |
| 42 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, | 42 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, |
| 43 | uint8_t dac); | 43 | uint8_t dac); |
| 44 | extern void radeon_link_encoder_connector(struct drm_device *dev); | 44 | extern void radeon_link_encoder_connector(struct drm_device *dev); |
| 45 | 45 | ||
| 46 | /* from radeon_connector.c */ | 46 | /* from radeon_connector.c */ |
| @@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 55 | 55 | ||
| 56 | /* from radeon_legacy_encoder.c */ | 56 | /* from radeon_legacy_encoder.c */ |
| 57 | extern void | 57 | extern void |
| 58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, | 58 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, |
| 59 | uint32_t supported_device); | 59 | uint32_t supported_device); |
| 60 | 60 | ||
| 61 | /* old legacy ATI BIOS routines */ | 61 | /* old legacy ATI BIOS routines */ |
| @@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1505 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1505 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1506 | hpd.hpd = RADEON_HPD_NONE; | 1506 | hpd.hpd = RADEON_HPD_NONE; |
| 1507 | radeon_add_legacy_encoder(dev, | 1507 | radeon_add_legacy_encoder(dev, |
| 1508 | radeon_get_encoder_id(dev, | 1508 | radeon_get_encoder_enum(dev, |
| 1509 | ATOM_DEVICE_CRT1_SUPPORT, | 1509 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1510 | 1), | 1510 | 1), |
| 1511 | ATOM_DEVICE_CRT1_SUPPORT); | 1511 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1520 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); | 1520 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); |
| 1521 | hpd.hpd = RADEON_HPD_NONE; | 1521 | hpd.hpd = RADEON_HPD_NONE; |
| 1522 | radeon_add_legacy_encoder(dev, | 1522 | radeon_add_legacy_encoder(dev, |
| 1523 | radeon_get_encoder_id(dev, | 1523 | radeon_get_encoder_enum(dev, |
| 1524 | ATOM_DEVICE_LCD1_SUPPORT, | 1524 | ATOM_DEVICE_LCD1_SUPPORT, |
| 1525 | 0), | 1525 | 0), |
| 1526 | ATOM_DEVICE_LCD1_SUPPORT); | 1526 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1535 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1535 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1536 | hpd.hpd = RADEON_HPD_NONE; | 1536 | hpd.hpd = RADEON_HPD_NONE; |
| 1537 | radeon_add_legacy_encoder(dev, | 1537 | radeon_add_legacy_encoder(dev, |
| 1538 | radeon_get_encoder_id(dev, | 1538 | radeon_get_encoder_enum(dev, |
| 1539 | ATOM_DEVICE_CRT1_SUPPORT, | 1539 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1540 | 1), | 1540 | 1), |
| 1541 | ATOM_DEVICE_CRT1_SUPPORT); | 1541 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1550 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1550 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1551 | hpd.hpd = RADEON_HPD_1; | 1551 | hpd.hpd = RADEON_HPD_1; |
| 1552 | radeon_add_legacy_encoder(dev, | 1552 | radeon_add_legacy_encoder(dev, |
| 1553 | radeon_get_encoder_id(dev, | 1553 | radeon_get_encoder_enum(dev, |
| 1554 | ATOM_DEVICE_DFP1_SUPPORT, | 1554 | ATOM_DEVICE_DFP1_SUPPORT, |
| 1555 | 0), | 1555 | 0), |
| 1556 | ATOM_DEVICE_DFP1_SUPPORT); | 1556 | ATOM_DEVICE_DFP1_SUPPORT); |
| 1557 | radeon_add_legacy_encoder(dev, | 1557 | radeon_add_legacy_encoder(dev, |
| 1558 | radeon_get_encoder_id(dev, | 1558 | radeon_get_encoder_enum(dev, |
| 1559 | ATOM_DEVICE_CRT2_SUPPORT, | 1559 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1560 | 2), | 1560 | 2), |
| 1561 | ATOM_DEVICE_CRT2_SUPPORT); | 1561 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1571 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1571 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1572 | hpd.hpd = RADEON_HPD_NONE; | 1572 | hpd.hpd = RADEON_HPD_NONE; |
| 1573 | radeon_add_legacy_encoder(dev, | 1573 | radeon_add_legacy_encoder(dev, |
| 1574 | radeon_get_encoder_id(dev, | 1574 | radeon_get_encoder_enum(dev, |
| 1575 | ATOM_DEVICE_CRT1_SUPPORT, | 1575 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1576 | 1), | 1576 | 1), |
| 1577 | ATOM_DEVICE_CRT1_SUPPORT); | 1577 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1588 | ddc_i2c.valid = false; | 1588 | ddc_i2c.valid = false; |
| 1589 | hpd.hpd = RADEON_HPD_NONE; | 1589 | hpd.hpd = RADEON_HPD_NONE; |
| 1590 | radeon_add_legacy_encoder(dev, | 1590 | radeon_add_legacy_encoder(dev, |
| 1591 | radeon_get_encoder_id(dev, | 1591 | radeon_get_encoder_enum(dev, |
| 1592 | ATOM_DEVICE_TV1_SUPPORT, | 1592 | ATOM_DEVICE_TV1_SUPPORT, |
| 1593 | 2), | 1593 | 2), |
| 1594 | ATOM_DEVICE_TV1_SUPPORT); | 1594 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1607 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1607 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1608 | hpd.hpd = RADEON_HPD_NONE; | 1608 | hpd.hpd = RADEON_HPD_NONE; |
| 1609 | radeon_add_legacy_encoder(dev, | 1609 | radeon_add_legacy_encoder(dev, |
| 1610 | radeon_get_encoder_id(dev, | 1610 | radeon_get_encoder_enum(dev, |
| 1611 | ATOM_DEVICE_LCD1_SUPPORT, | 1611 | ATOM_DEVICE_LCD1_SUPPORT, |
| 1612 | 0), | 1612 | 0), |
| 1613 | ATOM_DEVICE_LCD1_SUPPORT); | 1613 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1619 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1619 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1620 | hpd.hpd = RADEON_HPD_NONE; | 1620 | hpd.hpd = RADEON_HPD_NONE; |
| 1621 | radeon_add_legacy_encoder(dev, | 1621 | radeon_add_legacy_encoder(dev, |
| 1622 | radeon_get_encoder_id(dev, | 1622 | radeon_get_encoder_enum(dev, |
| 1623 | ATOM_DEVICE_CRT2_SUPPORT, | 1623 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1624 | 2), | 1624 | 2), |
| 1625 | ATOM_DEVICE_CRT2_SUPPORT); | 1625 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1631 | ddc_i2c.valid = false; | 1631 | ddc_i2c.valid = false; |
| 1632 | hpd.hpd = RADEON_HPD_NONE; | 1632 | hpd.hpd = RADEON_HPD_NONE; |
| 1633 | radeon_add_legacy_encoder(dev, | 1633 | radeon_add_legacy_encoder(dev, |
| 1634 | radeon_get_encoder_id(dev, | 1634 | radeon_get_encoder_enum(dev, |
| 1635 | ATOM_DEVICE_TV1_SUPPORT, | 1635 | ATOM_DEVICE_TV1_SUPPORT, |
| 1636 | 2), | 1636 | 2), |
| 1637 | ATOM_DEVICE_TV1_SUPPORT); | 1637 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1648 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1648 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1649 | hpd.hpd = RADEON_HPD_NONE; | 1649 | hpd.hpd = RADEON_HPD_NONE; |
| 1650 | radeon_add_legacy_encoder(dev, | 1650 | radeon_add_legacy_encoder(dev, |
| 1651 | radeon_get_encoder_id(dev, | 1651 | radeon_get_encoder_enum(dev, |
| 1652 | ATOM_DEVICE_LCD1_SUPPORT, | 1652 | ATOM_DEVICE_LCD1_SUPPORT, |
| 1653 | 0), | 1653 | 0), |
| 1654 | ATOM_DEVICE_LCD1_SUPPORT); | 1654 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1660 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1660 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1661 | hpd.hpd = RADEON_HPD_2; /* ??? */ | 1661 | hpd.hpd = RADEON_HPD_2; /* ??? */ |
| 1662 | radeon_add_legacy_encoder(dev, | 1662 | radeon_add_legacy_encoder(dev, |
| 1663 | radeon_get_encoder_id(dev, | 1663 | radeon_get_encoder_enum(dev, |
| 1664 | ATOM_DEVICE_DFP2_SUPPORT, | 1664 | ATOM_DEVICE_DFP2_SUPPORT, |
| 1665 | 0), | 1665 | 0), |
| 1666 | ATOM_DEVICE_DFP2_SUPPORT); | 1666 | ATOM_DEVICE_DFP2_SUPPORT); |
| 1667 | radeon_add_legacy_encoder(dev, | 1667 | radeon_add_legacy_encoder(dev, |
| 1668 | radeon_get_encoder_id(dev, | 1668 | radeon_get_encoder_enum(dev, |
| 1669 | ATOM_DEVICE_CRT1_SUPPORT, | 1669 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1670 | 1), | 1670 | 1), |
| 1671 | ATOM_DEVICE_CRT1_SUPPORT); | 1671 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1680 | ddc_i2c.valid = false; | 1680 | ddc_i2c.valid = false; |
| 1681 | hpd.hpd = RADEON_HPD_NONE; | 1681 | hpd.hpd = RADEON_HPD_NONE; |
| 1682 | radeon_add_legacy_encoder(dev, | 1682 | radeon_add_legacy_encoder(dev, |
| 1683 | radeon_get_encoder_id(dev, | 1683 | radeon_get_encoder_enum(dev, |
| 1684 | ATOM_DEVICE_TV1_SUPPORT, | 1684 | ATOM_DEVICE_TV1_SUPPORT, |
| 1685 | 2), | 1685 | 2), |
| 1686 | ATOM_DEVICE_TV1_SUPPORT); | 1686 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1697 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1697 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1698 | hpd.hpd = RADEON_HPD_NONE; | 1698 | hpd.hpd = RADEON_HPD_NONE; |
| 1699 | radeon_add_legacy_encoder(dev, | 1699 | radeon_add_legacy_encoder(dev, |
| 1700 | radeon_get_encoder_id(dev, | 1700 | radeon_get_encoder_enum(dev, |
| 1701 | ATOM_DEVICE_LCD1_SUPPORT, | 1701 | ATOM_DEVICE_LCD1_SUPPORT, |
| 1702 | 0), | 1702 | 0), |
| 1703 | ATOM_DEVICE_LCD1_SUPPORT); | 1703 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1709 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1709 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1710 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1710 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
| 1711 | radeon_add_legacy_encoder(dev, | 1711 | radeon_add_legacy_encoder(dev, |
| 1712 | radeon_get_encoder_id(dev, | 1712 | radeon_get_encoder_enum(dev, |
| 1713 | ATOM_DEVICE_DFP1_SUPPORT, | 1713 | ATOM_DEVICE_DFP1_SUPPORT, |
| 1714 | 0), | 1714 | 0), |
| 1715 | ATOM_DEVICE_DFP1_SUPPORT); | 1715 | ATOM_DEVICE_DFP1_SUPPORT); |
| 1716 | radeon_add_legacy_encoder(dev, | 1716 | radeon_add_legacy_encoder(dev, |
| 1717 | radeon_get_encoder_id(dev, | 1717 | radeon_get_encoder_enum(dev, |
| 1718 | ATOM_DEVICE_CRT1_SUPPORT, | 1718 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1719 | 1), | 1719 | 1), |
| 1720 | ATOM_DEVICE_CRT1_SUPPORT); | 1720 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1728 | ddc_i2c.valid = false; | 1728 | ddc_i2c.valid = false; |
| 1729 | hpd.hpd = RADEON_HPD_NONE; | 1729 | hpd.hpd = RADEON_HPD_NONE; |
| 1730 | radeon_add_legacy_encoder(dev, | 1730 | radeon_add_legacy_encoder(dev, |
| 1731 | radeon_get_encoder_id(dev, | 1731 | radeon_get_encoder_enum(dev, |
| 1732 | ATOM_DEVICE_TV1_SUPPORT, | 1732 | ATOM_DEVICE_TV1_SUPPORT, |
| 1733 | 2), | 1733 | 2), |
| 1734 | ATOM_DEVICE_TV1_SUPPORT); | 1734 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1745 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1745 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1746 | hpd.hpd = RADEON_HPD_NONE; | 1746 | hpd.hpd = RADEON_HPD_NONE; |
| 1747 | radeon_add_legacy_encoder(dev, | 1747 | radeon_add_legacy_encoder(dev, |
| 1748 | radeon_get_encoder_id(dev, | 1748 | radeon_get_encoder_enum(dev, |
| 1749 | ATOM_DEVICE_LCD1_SUPPORT, | 1749 | ATOM_DEVICE_LCD1_SUPPORT, |
| 1750 | 0), | 1750 | 0), |
| 1751 | ATOM_DEVICE_LCD1_SUPPORT); | 1751 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1757 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1757 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1758 | hpd.hpd = RADEON_HPD_NONE; | 1758 | hpd.hpd = RADEON_HPD_NONE; |
| 1759 | radeon_add_legacy_encoder(dev, | 1759 | radeon_add_legacy_encoder(dev, |
| 1760 | radeon_get_encoder_id(dev, | 1760 | radeon_get_encoder_enum(dev, |
| 1761 | ATOM_DEVICE_CRT1_SUPPORT, | 1761 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1762 | 1), | 1762 | 1), |
| 1763 | ATOM_DEVICE_CRT1_SUPPORT); | 1763 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1769 | ddc_i2c.valid = false; | 1769 | ddc_i2c.valid = false; |
| 1770 | hpd.hpd = RADEON_HPD_NONE; | 1770 | hpd.hpd = RADEON_HPD_NONE; |
| 1771 | radeon_add_legacy_encoder(dev, | 1771 | radeon_add_legacy_encoder(dev, |
| 1772 | radeon_get_encoder_id(dev, | 1772 | radeon_get_encoder_enum(dev, |
| 1773 | ATOM_DEVICE_TV1_SUPPORT, | 1773 | ATOM_DEVICE_TV1_SUPPORT, |
| 1774 | 2), | 1774 | 2), |
| 1775 | ATOM_DEVICE_TV1_SUPPORT); | 1775 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1786 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1786 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
| 1787 | hpd.hpd = RADEON_HPD_2; /* ??? */ | 1787 | hpd.hpd = RADEON_HPD_2; /* ??? */ |
| 1788 | radeon_add_legacy_encoder(dev, | 1788 | radeon_add_legacy_encoder(dev, |
| 1789 | radeon_get_encoder_id(dev, | 1789 | radeon_get_encoder_enum(dev, |
| 1790 | ATOM_DEVICE_DFP2_SUPPORT, | 1790 | ATOM_DEVICE_DFP2_SUPPORT, |
| 1791 | 0), | 1791 | 0), |
| 1792 | ATOM_DEVICE_DFP2_SUPPORT); | 1792 | ATOM_DEVICE_DFP2_SUPPORT); |
| 1793 | radeon_add_legacy_encoder(dev, | 1793 | radeon_add_legacy_encoder(dev, |
| 1794 | radeon_get_encoder_id(dev, | 1794 | radeon_get_encoder_enum(dev, |
| 1795 | ATOM_DEVICE_CRT2_SUPPORT, | 1795 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1796 | 2), | 1796 | 2), |
| 1797 | ATOM_DEVICE_CRT2_SUPPORT); | 1797 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1806 | ddc_i2c.valid = false; | 1806 | ddc_i2c.valid = false; |
| 1807 | hpd.hpd = RADEON_HPD_NONE; | 1807 | hpd.hpd = RADEON_HPD_NONE; |
| 1808 | radeon_add_legacy_encoder(dev, | 1808 | radeon_add_legacy_encoder(dev, |
| 1809 | radeon_get_encoder_id(dev, | 1809 | radeon_get_encoder_enum(dev, |
| 1810 | ATOM_DEVICE_TV1_SUPPORT, | 1810 | ATOM_DEVICE_TV1_SUPPORT, |
| 1811 | 2), | 1811 | 2), |
| 1812 | ATOM_DEVICE_TV1_SUPPORT); | 1812 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1823 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1823 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
| 1824 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1824 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
| 1825 | radeon_add_legacy_encoder(dev, | 1825 | radeon_add_legacy_encoder(dev, |
| 1826 | radeon_get_encoder_id(dev, | 1826 | radeon_get_encoder_enum(dev, |
| 1827 | ATOM_DEVICE_DFP1_SUPPORT, | 1827 | ATOM_DEVICE_DFP1_SUPPORT, |
| 1828 | 0), | 1828 | 0), |
| 1829 | ATOM_DEVICE_DFP1_SUPPORT); | 1829 | ATOM_DEVICE_DFP1_SUPPORT); |
| 1830 | radeon_add_legacy_encoder(dev, | 1830 | radeon_add_legacy_encoder(dev, |
| 1831 | radeon_get_encoder_id(dev, | 1831 | radeon_get_encoder_enum(dev, |
| 1832 | ATOM_DEVICE_CRT2_SUPPORT, | 1832 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1833 | 2), | 1833 | 2), |
| 1834 | ATOM_DEVICE_CRT2_SUPPORT); | 1834 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1842 | ddc_i2c.valid = false; | 1842 | ddc_i2c.valid = false; |
| 1843 | hpd.hpd = RADEON_HPD_NONE; | 1843 | hpd.hpd = RADEON_HPD_NONE; |
| 1844 | radeon_add_legacy_encoder(dev, | 1844 | radeon_add_legacy_encoder(dev, |
| 1845 | radeon_get_encoder_id(dev, | 1845 | radeon_get_encoder_enum(dev, |
| 1846 | ATOM_DEVICE_TV1_SUPPORT, | 1846 | ATOM_DEVICE_TV1_SUPPORT, |
| 1847 | 2), | 1847 | 2), |
| 1848 | ATOM_DEVICE_TV1_SUPPORT); | 1848 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1859 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); | 1859 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); |
| 1860 | hpd.hpd = RADEON_HPD_1; /* ??? */ | 1860 | hpd.hpd = RADEON_HPD_1; /* ??? */ |
| 1861 | radeon_add_legacy_encoder(dev, | 1861 | radeon_add_legacy_encoder(dev, |
| 1862 | radeon_get_encoder_id(dev, | 1862 | radeon_get_encoder_enum(dev, |
| 1863 | ATOM_DEVICE_DFP1_SUPPORT, | 1863 | ATOM_DEVICE_DFP1_SUPPORT, |
| 1864 | 0), | 1864 | 0), |
| 1865 | ATOM_DEVICE_DFP1_SUPPORT); | 1865 | ATOM_DEVICE_DFP1_SUPPORT); |
| @@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1871 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); | 1871 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
| 1872 | hpd.hpd = RADEON_HPD_NONE; | 1872 | hpd.hpd = RADEON_HPD_NONE; |
| 1873 | radeon_add_legacy_encoder(dev, | 1873 | radeon_add_legacy_encoder(dev, |
| 1874 | radeon_get_encoder_id(dev, | 1874 | radeon_get_encoder_enum(dev, |
| 1875 | ATOM_DEVICE_CRT2_SUPPORT, | 1875 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1876 | 2), | 1876 | 2), |
| 1877 | ATOM_DEVICE_CRT2_SUPPORT); | 1877 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1883 | ddc_i2c.valid = false; | 1883 | ddc_i2c.valid = false; |
| 1884 | hpd.hpd = RADEON_HPD_NONE; | 1884 | hpd.hpd = RADEON_HPD_NONE; |
| 1885 | radeon_add_legacy_encoder(dev, | 1885 | radeon_add_legacy_encoder(dev, |
| 1886 | radeon_get_encoder_id(dev, | 1886 | radeon_get_encoder_enum(dev, |
| 1887 | ATOM_DEVICE_TV1_SUPPORT, | 1887 | ATOM_DEVICE_TV1_SUPPORT, |
| 1888 | 2), | 1888 | 2), |
| 1889 | ATOM_DEVICE_TV1_SUPPORT); | 1889 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1900 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1900 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1901 | hpd.hpd = RADEON_HPD_NONE; | 1901 | hpd.hpd = RADEON_HPD_NONE; |
| 1902 | radeon_add_legacy_encoder(dev, | 1902 | radeon_add_legacy_encoder(dev, |
| 1903 | radeon_get_encoder_id(dev, | 1903 | radeon_get_encoder_enum(dev, |
| 1904 | ATOM_DEVICE_CRT1_SUPPORT, | 1904 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1905 | 1), | 1905 | 1), |
| 1906 | ATOM_DEVICE_CRT1_SUPPORT); | 1906 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1912 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1912 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
| 1913 | hpd.hpd = RADEON_HPD_NONE; | 1913 | hpd.hpd = RADEON_HPD_NONE; |
| 1914 | radeon_add_legacy_encoder(dev, | 1914 | radeon_add_legacy_encoder(dev, |
| 1915 | radeon_get_encoder_id(dev, | 1915 | radeon_get_encoder_enum(dev, |
| 1916 | ATOM_DEVICE_CRT2_SUPPORT, | 1916 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1917 | 2), | 1917 | 2), |
| 1918 | ATOM_DEVICE_CRT2_SUPPORT); | 1918 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1924 | ddc_i2c.valid = false; | 1924 | ddc_i2c.valid = false; |
| 1925 | hpd.hpd = RADEON_HPD_NONE; | 1925 | hpd.hpd = RADEON_HPD_NONE; |
| 1926 | radeon_add_legacy_encoder(dev, | 1926 | radeon_add_legacy_encoder(dev, |
| 1927 | radeon_get_encoder_id(dev, | 1927 | radeon_get_encoder_enum(dev, |
| 1928 | ATOM_DEVICE_TV1_SUPPORT, | 1928 | ATOM_DEVICE_TV1_SUPPORT, |
| 1929 | 2), | 1929 | 2), |
| 1930 | ATOM_DEVICE_TV1_SUPPORT); | 1930 | ATOM_DEVICE_TV1_SUPPORT); |
| @@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1941 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); | 1941 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
| 1942 | hpd.hpd = RADEON_HPD_NONE; | 1942 | hpd.hpd = RADEON_HPD_NONE; |
| 1943 | radeon_add_legacy_encoder(dev, | 1943 | radeon_add_legacy_encoder(dev, |
| 1944 | radeon_get_encoder_id(dev, | 1944 | radeon_get_encoder_enum(dev, |
| 1945 | ATOM_DEVICE_CRT1_SUPPORT, | 1945 | ATOM_DEVICE_CRT1_SUPPORT, |
| 1946 | 1), | 1946 | 1), |
| 1947 | ATOM_DEVICE_CRT1_SUPPORT); | 1947 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
| 1952 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); | 1952 | ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); |
| 1953 | hpd.hpd = RADEON_HPD_NONE; | 1953 | hpd.hpd = RADEON_HPD_NONE; |
| 1954 | radeon_add_legacy_encoder(dev, | 1954 | radeon_add_legacy_encoder(dev, |
| 1955 | radeon_get_encoder_id(dev, | 1955 | radeon_get_encoder_enum(dev, |
| 1956 | ATOM_DEVICE_CRT2_SUPPORT, | 1956 | ATOM_DEVICE_CRT2_SUPPORT, |
| 1957 | 2), | 1957 | 2), |
| 1958 | ATOM_DEVICE_CRT2_SUPPORT); | 1958 | ATOM_DEVICE_CRT2_SUPPORT); |
| @@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2109 | else | 2109 | else |
| 2110 | devices = ATOM_DEVICE_DFP1_SUPPORT; | 2110 | devices = ATOM_DEVICE_DFP1_SUPPORT; |
| 2111 | radeon_add_legacy_encoder(dev, | 2111 | radeon_add_legacy_encoder(dev, |
| 2112 | radeon_get_encoder_id | 2112 | radeon_get_encoder_enum |
| 2113 | (dev, devices, 0), | 2113 | (dev, devices, 0), |
| 2114 | devices); | 2114 | devices); |
| 2115 | radeon_add_legacy_connector(dev, i, devices, | 2115 | radeon_add_legacy_connector(dev, i, devices, |
| @@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2123 | if (tmp & 0x1) { | 2123 | if (tmp & 0x1) { |
| 2124 | devices = ATOM_DEVICE_CRT2_SUPPORT; | 2124 | devices = ATOM_DEVICE_CRT2_SUPPORT; |
| 2125 | radeon_add_legacy_encoder(dev, | 2125 | radeon_add_legacy_encoder(dev, |
| 2126 | radeon_get_encoder_id | 2126 | radeon_get_encoder_enum |
| 2127 | (dev, | 2127 | (dev, |
| 2128 | ATOM_DEVICE_CRT2_SUPPORT, | 2128 | ATOM_DEVICE_CRT2_SUPPORT, |
| 2129 | 2), | 2129 | 2), |
| @@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2131 | } else { | 2131 | } else { |
| 2132 | devices = ATOM_DEVICE_CRT1_SUPPORT; | 2132 | devices = ATOM_DEVICE_CRT1_SUPPORT; |
| 2133 | radeon_add_legacy_encoder(dev, | 2133 | radeon_add_legacy_encoder(dev, |
| 2134 | radeon_get_encoder_id | 2134 | radeon_get_encoder_enum |
| 2135 | (dev, | 2135 | (dev, |
| 2136 | ATOM_DEVICE_CRT1_SUPPORT, | 2136 | ATOM_DEVICE_CRT1_SUPPORT, |
| 2137 | 1), | 2137 | 1), |
| @@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2151 | if (tmp & 0x1) { | 2151 | if (tmp & 0x1) { |
| 2152 | devices |= ATOM_DEVICE_CRT2_SUPPORT; | 2152 | devices |= ATOM_DEVICE_CRT2_SUPPORT; |
| 2153 | radeon_add_legacy_encoder(dev, | 2153 | radeon_add_legacy_encoder(dev, |
| 2154 | radeon_get_encoder_id | 2154 | radeon_get_encoder_enum |
| 2155 | (dev, | 2155 | (dev, |
| 2156 | ATOM_DEVICE_CRT2_SUPPORT, | 2156 | ATOM_DEVICE_CRT2_SUPPORT, |
| 2157 | 2), | 2157 | 2), |
| @@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2159 | } else { | 2159 | } else { |
| 2160 | devices |= ATOM_DEVICE_CRT1_SUPPORT; | 2160 | devices |= ATOM_DEVICE_CRT1_SUPPORT; |
| 2161 | radeon_add_legacy_encoder(dev, | 2161 | radeon_add_legacy_encoder(dev, |
| 2162 | radeon_get_encoder_id | 2162 | radeon_get_encoder_enum |
| 2163 | (dev, | 2163 | (dev, |
| 2164 | ATOM_DEVICE_CRT1_SUPPORT, | 2164 | ATOM_DEVICE_CRT1_SUPPORT, |
| 2165 | 1), | 2165 | 1), |
| @@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2168 | if ((tmp >> 4) & 0x1) { | 2168 | if ((tmp >> 4) & 0x1) { |
| 2169 | devices |= ATOM_DEVICE_DFP2_SUPPORT; | 2169 | devices |= ATOM_DEVICE_DFP2_SUPPORT; |
| 2170 | radeon_add_legacy_encoder(dev, | 2170 | radeon_add_legacy_encoder(dev, |
| 2171 | radeon_get_encoder_id | 2171 | radeon_get_encoder_enum |
| 2172 | (dev, | 2172 | (dev, |
| 2173 | ATOM_DEVICE_DFP2_SUPPORT, | 2173 | ATOM_DEVICE_DFP2_SUPPORT, |
| 2174 | 0), | 2174 | 0), |
| @@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2177 | } else { | 2177 | } else { |
| 2178 | devices |= ATOM_DEVICE_DFP1_SUPPORT; | 2178 | devices |= ATOM_DEVICE_DFP1_SUPPORT; |
| 2179 | radeon_add_legacy_encoder(dev, | 2179 | radeon_add_legacy_encoder(dev, |
| 2180 | radeon_get_encoder_id | 2180 | radeon_get_encoder_enum |
| 2181 | (dev, | 2181 | (dev, |
| 2182 | ATOM_DEVICE_DFP1_SUPPORT, | 2182 | ATOM_DEVICE_DFP1_SUPPORT, |
| 2183 | 0), | 2183 | 0), |
| @@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2202 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; | 2202 | connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; |
| 2203 | } | 2203 | } |
| 2204 | radeon_add_legacy_encoder(dev, | 2204 | radeon_add_legacy_encoder(dev, |
| 2205 | radeon_get_encoder_id | 2205 | radeon_get_encoder_enum |
| 2206 | (dev, devices, 0), | 2206 | (dev, devices, 0), |
| 2207 | devices); | 2207 | devices); |
| 2208 | radeon_add_legacy_connector(dev, i, devices, | 2208 | radeon_add_legacy_connector(dev, i, devices, |
| @@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2215 | case CONNECTOR_CTV_LEGACY: | 2215 | case CONNECTOR_CTV_LEGACY: |
| 2216 | case CONNECTOR_STV_LEGACY: | 2216 | case CONNECTOR_STV_LEGACY: |
| 2217 | radeon_add_legacy_encoder(dev, | 2217 | radeon_add_legacy_encoder(dev, |
| 2218 | radeon_get_encoder_id | 2218 | radeon_get_encoder_enum |
| 2219 | (dev, | 2219 | (dev, |
| 2220 | ATOM_DEVICE_TV1_SUPPORT, | 2220 | ATOM_DEVICE_TV1_SUPPORT, |
| 2221 | 2), | 2221 | 2), |
| @@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2242 | DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); | 2242 | DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); |
| 2243 | 2243 | ||
| 2244 | radeon_add_legacy_encoder(dev, | 2244 | radeon_add_legacy_encoder(dev, |
| 2245 | radeon_get_encoder_id(dev, | 2245 | radeon_get_encoder_enum(dev, |
| 2246 | ATOM_DEVICE_CRT1_SUPPORT, | 2246 | ATOM_DEVICE_CRT1_SUPPORT, |
| 2247 | 1), | 2247 | 1), |
| 2248 | ATOM_DEVICE_CRT1_SUPPORT); | 2248 | ATOM_DEVICE_CRT1_SUPPORT); |
| 2249 | radeon_add_legacy_encoder(dev, | 2249 | radeon_add_legacy_encoder(dev, |
| 2250 | radeon_get_encoder_id(dev, | 2250 | radeon_get_encoder_enum(dev, |
| 2251 | ATOM_DEVICE_DFP1_SUPPORT, | 2251 | ATOM_DEVICE_DFP1_SUPPORT, |
| 2252 | 0), | 2252 | 0), |
| 2253 | ATOM_DEVICE_DFP1_SUPPORT); | 2253 | ATOM_DEVICE_DFP1_SUPPORT); |
| @@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2268 | DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); | 2268 | DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); |
| 2269 | if (crt_info) { | 2269 | if (crt_info) { |
| 2270 | radeon_add_legacy_encoder(dev, | 2270 | radeon_add_legacy_encoder(dev, |
| 2271 | radeon_get_encoder_id(dev, | 2271 | radeon_get_encoder_enum(dev, |
| 2272 | ATOM_DEVICE_CRT1_SUPPORT, | 2272 | ATOM_DEVICE_CRT1_SUPPORT, |
| 2273 | 1), | 2273 | 1), |
| 2274 | ATOM_DEVICE_CRT1_SUPPORT); | 2274 | ATOM_DEVICE_CRT1_SUPPORT); |
| @@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2297 | COMBIOS_LCD_DDC_INFO_TABLE); | 2297 | COMBIOS_LCD_DDC_INFO_TABLE); |
| 2298 | 2298 | ||
| 2299 | radeon_add_legacy_encoder(dev, | 2299 | radeon_add_legacy_encoder(dev, |
| 2300 | radeon_get_encoder_id(dev, | 2300 | radeon_get_encoder_enum(dev, |
| 2301 | ATOM_DEVICE_LCD1_SUPPORT, | 2301 | ATOM_DEVICE_LCD1_SUPPORT, |
| 2302 | 0), | 2302 | 0), |
| 2303 | ATOM_DEVICE_LCD1_SUPPORT); | 2303 | ATOM_DEVICE_LCD1_SUPPORT); |
| @@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2351 | hpd.hpd = RADEON_HPD_NONE; | 2351 | hpd.hpd = RADEON_HPD_NONE; |
| 2352 | ddc_i2c.valid = false; | 2352 | ddc_i2c.valid = false; |
| 2353 | radeon_add_legacy_encoder(dev, | 2353 | radeon_add_legacy_encoder(dev, |
| 2354 | radeon_get_encoder_id | 2354 | radeon_get_encoder_enum |
| 2355 | (dev, | 2355 | (dev, |
| 2356 | ATOM_DEVICE_TV1_SUPPORT, | 2356 | ATOM_DEVICE_TV1_SUPPORT, |
| 2357 | 2), | 2357 | 2), |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 47c4b276d30c..31a09cd279ab 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -977,24 +977,25 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto | |||
| 977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 977 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 978 | enum drm_connector_status ret = connector_status_disconnected; | 978 | enum drm_connector_status ret = connector_status_disconnected; |
| 979 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | 979 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; |
| 980 | u8 sink_type; | ||
| 981 | 980 | ||
| 982 | if (radeon_connector->edid) { | 981 | if (radeon_connector->edid) { |
| 983 | kfree(radeon_connector->edid); | 982 | kfree(radeon_connector->edid); |
| 984 | radeon_connector->edid = NULL; | 983 | radeon_connector->edid = NULL; |
| 985 | } | 984 | } |
| 986 | 985 | ||
| 987 | sink_type = radeon_dp_getsinktype(radeon_connector); | 986 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
| 988 | if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 987 | /* eDP is always DP */ |
| 989 | (sink_type == CONNECTOR_OBJECT_ID_eDP)) { | 988 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
| 990 | if (radeon_dp_getdpcd(radeon_connector)) { | 989 | if (radeon_dp_getdpcd(radeon_connector)) |
| 991 | radeon_dig_connector->dp_sink_type = sink_type; | ||
| 992 | ret = connector_status_connected; | 990 | ret = connector_status_connected; |
| 993 | } | ||
| 994 | } else { | 991 | } else { |
| 995 | if (radeon_ddc_probe(radeon_connector)) { | 992 | radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
| 996 | radeon_dig_connector->dp_sink_type = sink_type; | 993 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
| 997 | ret = connector_status_connected; | 994 | if (radeon_dp_getdpcd(radeon_connector)) |
| 995 | ret = connector_status_connected; | ||
| 996 | } else { | ||
| 997 | if (radeon_ddc_probe(radeon_connector)) | ||
| 998 | ret = connector_status_connected; | ||
| 998 | } | 999 | } |
| 999 | } | 1000 | } |
| 1000 | 1001 | ||
| @@ -1037,7 +1038,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1037 | uint32_t supported_device, | 1038 | uint32_t supported_device, |
| 1038 | int connector_type, | 1039 | int connector_type, |
| 1039 | struct radeon_i2c_bus_rec *i2c_bus, | 1040 | struct radeon_i2c_bus_rec *i2c_bus, |
| 1040 | bool linkb, | ||
| 1041 | uint32_t igp_lane_info, | 1041 | uint32_t igp_lane_info, |
| 1042 | uint16_t connector_object_id, | 1042 | uint16_t connector_object_id, |
| 1043 | struct radeon_hpd *hpd, | 1043 | struct radeon_hpd *hpd, |
| @@ -1128,7 +1128,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1128 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1128 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| 1129 | if (!radeon_dig_connector) | 1129 | if (!radeon_dig_connector) |
| 1130 | goto failed; | 1130 | goto failed; |
| 1131 | radeon_dig_connector->linkb = linkb; | ||
| 1132 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1131 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
| 1133 | radeon_connector->con_priv = radeon_dig_connector; | 1132 | radeon_connector->con_priv = radeon_dig_connector; |
| 1134 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1133 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
| @@ -1158,7 +1157,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1158 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1157 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| 1159 | if (!radeon_dig_connector) | 1158 | if (!radeon_dig_connector) |
| 1160 | goto failed; | 1159 | goto failed; |
| 1161 | radeon_dig_connector->linkb = linkb; | ||
| 1162 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1160 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
| 1163 | radeon_connector->con_priv = radeon_dig_connector; | 1161 | radeon_connector->con_priv = radeon_dig_connector; |
| 1164 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1162 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); |
| @@ -1182,7 +1180,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1182 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1180 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| 1183 | if (!radeon_dig_connector) | 1181 | if (!radeon_dig_connector) |
| 1184 | goto failed; | 1182 | goto failed; |
| 1185 | radeon_dig_connector->linkb = linkb; | ||
| 1186 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1183 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
| 1187 | radeon_connector->con_priv = radeon_dig_connector; | 1184 | radeon_connector->con_priv = radeon_dig_connector; |
| 1188 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); | 1185 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); |
| @@ -1229,7 +1226,6 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1229 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); | 1226 | radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
| 1230 | if (!radeon_dig_connector) | 1227 | if (!radeon_dig_connector) |
| 1231 | goto failed; | 1228 | goto failed; |
| 1232 | radeon_dig_connector->linkb = linkb; | ||
| 1233 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1229 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
| 1234 | radeon_connector->con_priv = radeon_dig_connector; | 1230 | radeon_connector->con_priv = radeon_dig_connector; |
| 1235 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); | 1231 | drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4f7a170d1566..69b3c2291e92 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 | |||
| 199 | mc->mc_vram_size = mc->aper_size; | 199 | mc->mc_vram_size = mc->aper_size; |
| 200 | } | 200 | } |
| 201 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; | 201 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
| 202 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { | 202 | if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { |
| 203 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); | 203 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); |
| 204 | mc->real_vram_size = mc->aper_size; | 204 | mc->real_vram_size = mc->aper_size; |
| 205 | mc->mc_vram_size = mc->aper_size; | 205 | mc->mc_vram_size = mc->aper_size; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5764f4d3b4f1..6dd434ad2429 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev) | |||
| 1094 | radeon_i2c_fini(rdev); | 1094 | radeon_i2c_fini(rdev); |
| 1095 | } | 1095 | } |
| 1096 | 1096 | ||
| 1097 | static bool is_hdtv_mode(struct drm_display_mode *mode) | ||
| 1098 | { | ||
| 1099 | /* try and guess if this is a tv or a monitor */ | ||
| 1100 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ | ||
| 1101 | (mode->vdisplay == 576) || /* 576p */ | ||
| 1102 | (mode->vdisplay == 720) || /* 720p */ | ||
| 1103 | (mode->vdisplay == 1080)) /* 1080p */ | ||
| 1104 | return true; | ||
| 1105 | else | ||
| 1106 | return false; | ||
| 1107 | } | ||
| 1108 | |||
| 1097 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | 1109 | bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
| 1098 | struct drm_display_mode *mode, | 1110 | struct drm_display_mode *mode, |
| 1099 | struct drm_display_mode *adjusted_mode) | 1111 | struct drm_display_mode *adjusted_mode) |
| @@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
| 1141 | if (ASIC_IS_AVIVO(rdev) && | 1153 | if (ASIC_IS_AVIVO(rdev) && |
| 1142 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || | 1154 | ((radeon_encoder->underscan_type == UNDERSCAN_ON) || |
| 1143 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && | 1155 | ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && |
| 1144 | drm_detect_hdmi_monitor(radeon_connector->edid)))) { | 1156 | drm_detect_hdmi_monitor(radeon_connector->edid) && |
| 1157 | is_hdtv_mode(mode)))) { | ||
| 1145 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; | 1158 | radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; |
| 1146 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; | 1159 | radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; |
| 1147 | radeon_crtc->rmx_type = RMX_FULL; | 1160 | radeon_crtc->rmx_type = RMX_FULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 263c8098d7dd..2c293e8304d6 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev) | |||
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | uint32_t | 83 | uint32_t |
| 84 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 84 | radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
| 85 | { | 85 | { |
| 86 | struct radeon_device *rdev = dev->dev_private; | 86 | struct radeon_device *rdev = dev->dev_private; |
| 87 | uint32_t ret = 0; | 87 | uint32_t ret = 0; |
| @@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t | |||
| 97 | if ((rdev->family == CHIP_RS300) || | 97 | if ((rdev->family == CHIP_RS300) || |
| 98 | (rdev->family == CHIP_RS400) || | 98 | (rdev->family == CHIP_RS400) || |
| 99 | (rdev->family == CHIP_RS480)) | 99 | (rdev->family == CHIP_RS480)) |
| 100 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; | 100 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
| 101 | else if (ASIC_IS_AVIVO(rdev)) | 101 | else if (ASIC_IS_AVIVO(rdev)) |
| 102 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; | 102 | ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1; |
| 103 | else | 103 | else |
| 104 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; | 104 | ret = ENCODER_INTERNAL_DAC1_ENUM_ID1; |
| 105 | break; | 105 | break; |
| 106 | case 2: /* dac b */ | 106 | case 2: /* dac b */ |
| 107 | if (ASIC_IS_AVIVO(rdev)) | 107 | if (ASIC_IS_AVIVO(rdev)) |
| 108 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; | 108 | ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1; |
| 109 | else { | 109 | else { |
| 110 | /*if (rdev->family == CHIP_R200) | 110 | /*if (rdev->family == CHIP_R200) |
| 111 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 111 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
| 112 | else*/ | 112 | else*/ |
| 113 | ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; | 113 | ret = ENCODER_INTERNAL_DAC2_ENUM_ID1; |
| 114 | } | 114 | } |
| 115 | break; | 115 | break; |
| 116 | case 3: /* external dac */ | 116 | case 3: /* external dac */ |
| 117 | if (ASIC_IS_AVIVO(rdev)) | 117 | if (ASIC_IS_AVIVO(rdev)) |
| 118 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; | 118 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
| 119 | else | 119 | else |
| 120 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 120 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
| 121 | break; | 121 | break; |
| 122 | } | 122 | } |
| 123 | break; | 123 | break; |
| 124 | case ATOM_DEVICE_LCD1_SUPPORT: | 124 | case ATOM_DEVICE_LCD1_SUPPORT: |
| 125 | if (ASIC_IS_AVIVO(rdev)) | 125 | if (ASIC_IS_AVIVO(rdev)) |
| 126 | ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; | 126 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
| 127 | else | 127 | else |
| 128 | ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; | 128 | ret = ENCODER_INTERNAL_LVDS_ENUM_ID1; |
| 129 | break; | 129 | break; |
| 130 | case ATOM_DEVICE_DFP1_SUPPORT: | 130 | case ATOM_DEVICE_DFP1_SUPPORT: |
| 131 | if ((rdev->family == CHIP_RS300) || | 131 | if ((rdev->family == CHIP_RS300) || |
| 132 | (rdev->family == CHIP_RS400) || | 132 | (rdev->family == CHIP_RS400) || |
| 133 | (rdev->family == CHIP_RS480)) | 133 | (rdev->family == CHIP_RS480)) |
| 134 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 134 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
| 135 | else if (ASIC_IS_AVIVO(rdev)) | 135 | else if (ASIC_IS_AVIVO(rdev)) |
| 136 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; | 136 | ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1; |
| 137 | else | 137 | else |
| 138 | ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; | 138 | ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1; |
| 139 | break; | 139 | break; |
| 140 | case ATOM_DEVICE_LCD2_SUPPORT: | 140 | case ATOM_DEVICE_LCD2_SUPPORT: |
| 141 | case ATOM_DEVICE_DFP2_SUPPORT: | 141 | case ATOM_DEVICE_DFP2_SUPPORT: |
| 142 | if ((rdev->family == CHIP_RS600) || | 142 | if ((rdev->family == CHIP_RS600) || |
| 143 | (rdev->family == CHIP_RS690) || | 143 | (rdev->family == CHIP_RS690) || |
| 144 | (rdev->family == CHIP_RS740)) | 144 | (rdev->family == CHIP_RS740)) |
| 145 | ret = ENCODER_OBJECT_ID_INTERNAL_DDI; | 145 | ret = ENCODER_INTERNAL_DDI_ENUM_ID1; |
| 146 | else if (ASIC_IS_AVIVO(rdev)) | 146 | else if (ASIC_IS_AVIVO(rdev)) |
| 147 | ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; | 147 | ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1; |
| 148 | else | 148 | else |
| 149 | ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; | 149 | ret = ENCODER_INTERNAL_DVO1_ENUM_ID1; |
| 150 | break; | 150 | break; |
| 151 | case ATOM_DEVICE_DFP3_SUPPORT: | 151 | case ATOM_DEVICE_DFP3_SUPPORT: |
| 152 | ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; | 152 | ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1; |
| 153 | break; | 153 | break; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| @@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
| 228 | return NULL; | 228 | return NULL; |
| 229 | } | 229 | } |
| 230 | 230 | ||
| 231 | static struct radeon_connector_atom_dig * | ||
| 232 | radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) | ||
| 233 | { | ||
| 234 | struct drm_device *dev = encoder->dev; | ||
| 235 | struct radeon_device *rdev = dev->dev_private; | ||
| 236 | struct drm_connector *connector; | ||
| 237 | struct radeon_connector *radeon_connector; | ||
| 238 | struct radeon_connector_atom_dig *dig_connector; | ||
| 239 | |||
| 240 | if (!rdev->is_atom_bios) | ||
| 241 | return NULL; | ||
| 242 | |||
| 243 | connector = radeon_get_connector_for_encoder(encoder); | ||
| 244 | if (!connector) | ||
| 245 | return NULL; | ||
| 246 | |||
| 247 | radeon_connector = to_radeon_connector(connector); | ||
| 248 | |||
| 249 | if (!radeon_connector->con_priv) | ||
| 250 | return NULL; | ||
| 251 | |||
| 252 | dig_connector = radeon_connector->con_priv; | ||
| 253 | |||
| 254 | return dig_connector; | ||
| 255 | } | ||
| 256 | |||
| 257 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, | 231 | void radeon_panel_mode_fixup(struct drm_encoder *encoder, |
| 258 | struct drm_display_mode *adjusted_mode) | 232 | struct drm_display_mode *adjusted_mode) |
| 259 | { | 233 | { |
| @@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 512 | struct radeon_device *rdev = dev->dev_private; | 486 | struct radeon_device *rdev = dev->dev_private; |
| 513 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 487 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 514 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 488 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 515 | struct radeon_connector_atom_dig *dig_connector = | ||
| 516 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
| 517 | union lvds_encoder_control args; | 489 | union lvds_encoder_control args; |
| 518 | int index = 0; | 490 | int index = 0; |
| 519 | int hdmi_detected = 0; | 491 | int hdmi_detected = 0; |
| 520 | uint8_t frev, crev; | 492 | uint8_t frev, crev; |
| 521 | 493 | ||
| 522 | if (!dig || !dig_connector) | 494 | if (!dig) |
| 523 | return; | 495 | return; |
| 524 | 496 | ||
| 525 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 497 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
| @@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 562 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) | 534 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) |
| 563 | args.v1.ucMisc |= (1 << 1); | 535 | args.v1.ucMisc |= (1 << 1); |
| 564 | } else { | 536 | } else { |
| 565 | if (dig_connector->linkb) | 537 | if (dig->linkb) |
| 566 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 538 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
| 567 | if (radeon_encoder->pixel_clock > 165000) | 539 | if (radeon_encoder->pixel_clock > 165000) |
| 568 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 540 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| @@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 601 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | 573 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
| 602 | } | 574 | } |
| 603 | } else { | 575 | } else { |
| 604 | if (dig_connector->linkb) | 576 | if (dig->linkb) |
| 605 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | 577 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; |
| 606 | if (radeon_encoder->pixel_clock > 165000) | 578 | if (radeon_encoder->pixel_clock > 165000) |
| 607 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 579 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| @@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 623 | int | 595 | int |
| 624 | atombios_get_encoder_mode(struct drm_encoder *encoder) | 596 | atombios_get_encoder_mode(struct drm_encoder *encoder) |
| 625 | { | 597 | { |
| 598 | struct drm_device *dev = encoder->dev; | ||
| 599 | struct radeon_device *rdev = dev->dev_private; | ||
| 626 | struct drm_connector *connector; | 600 | struct drm_connector *connector; |
| 627 | struct radeon_connector *radeon_connector; | 601 | struct radeon_connector *radeon_connector; |
| 628 | struct radeon_connector_atom_dig *dig_connector; | 602 | struct radeon_connector_atom_dig *dig_connector; |
| @@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
| 636 | switch (connector->connector_type) { | 610 | switch (connector->connector_type) { |
| 637 | case DRM_MODE_CONNECTOR_DVII: | 611 | case DRM_MODE_CONNECTOR_DVII: |
| 638 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | 612 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
| 639 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 613 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
| 640 | return ATOM_ENCODER_MODE_HDMI; | 614 | /* fix me */ |
| 641 | else if (radeon_connector->use_digital) | 615 | if (ASIC_IS_DCE4(rdev)) |
| 616 | return ATOM_ENCODER_MODE_DVI; | ||
| 617 | else | ||
| 618 | return ATOM_ENCODER_MODE_HDMI; | ||
| 619 | } else if (radeon_connector->use_digital) | ||
| 642 | return ATOM_ENCODER_MODE_DVI; | 620 | return ATOM_ENCODER_MODE_DVI; |
| 643 | else | 621 | else |
| 644 | return ATOM_ENCODER_MODE_CRT; | 622 | return ATOM_ENCODER_MODE_CRT; |
| @@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
| 646 | case DRM_MODE_CONNECTOR_DVID: | 624 | case DRM_MODE_CONNECTOR_DVID: |
| 647 | case DRM_MODE_CONNECTOR_HDMIA: | 625 | case DRM_MODE_CONNECTOR_HDMIA: |
| 648 | default: | 626 | default: |
| 649 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 627 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
| 650 | return ATOM_ENCODER_MODE_HDMI; | 628 | /* fix me */ |
| 651 | else | 629 | if (ASIC_IS_DCE4(rdev)) |
| 630 | return ATOM_ENCODER_MODE_DVI; | ||
| 631 | else | ||
| 632 | return ATOM_ENCODER_MODE_HDMI; | ||
| 633 | } else | ||
| 652 | return ATOM_ENCODER_MODE_DVI; | 634 | return ATOM_ENCODER_MODE_DVI; |
| 653 | break; | 635 | break; |
| 654 | case DRM_MODE_CONNECTOR_LVDS: | 636 | case DRM_MODE_CONNECTOR_LVDS: |
| @@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
| 660 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | 642 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
| 661 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | 643 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
| 662 | return ATOM_ENCODER_MODE_DP; | 644 | return ATOM_ENCODER_MODE_DP; |
| 663 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 645 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { |
| 664 | return ATOM_ENCODER_MODE_HDMI; | 646 | /* fix me */ |
| 665 | else | 647 | if (ASIC_IS_DCE4(rdev)) |
| 648 | return ATOM_ENCODER_MODE_DVI; | ||
| 649 | else | ||
| 650 | return ATOM_ENCODER_MODE_HDMI; | ||
| 651 | } else | ||
| 666 | return ATOM_ENCODER_MODE_DVI; | 652 | return ATOM_ENCODER_MODE_DVI; |
| 667 | break; | 653 | break; |
| 668 | case DRM_MODE_CONNECTOR_DVIA: | 654 | case DRM_MODE_CONNECTOR_DVIA: |
| @@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
| 729 | struct radeon_device *rdev = dev->dev_private; | 715 | struct radeon_device *rdev = dev->dev_private; |
| 730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 716 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 717 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 732 | struct radeon_connector_atom_dig *dig_connector = | 718 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 733 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
| 734 | union dig_encoder_control args; | 719 | union dig_encoder_control args; |
| 735 | int index = 0; | 720 | int index = 0; |
| 736 | uint8_t frev, crev; | 721 | uint8_t frev, crev; |
| 722 | int dp_clock = 0; | ||
| 723 | int dp_lane_count = 0; | ||
| 724 | |||
| 725 | if (connector) { | ||
| 726 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 727 | struct radeon_connector_atom_dig *dig_connector = | ||
| 728 | radeon_connector->con_priv; | ||
| 737 | 729 | ||
| 738 | if (!dig || !dig_connector) | 730 | dp_clock = dig_connector->dp_clock; |
| 731 | dp_lane_count = dig_connector->dp_lane_count; | ||
| 732 | } | ||
| 733 | |||
| 734 | /* no dig encoder assigned */ | ||
| 735 | if (dig->dig_encoder == -1) | ||
| 739 | return; | 736 | return; |
| 740 | 737 | ||
| 741 | memset(&args, 0, sizeof(args)); | 738 | memset(&args, 0, sizeof(args)); |
| @@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
| 757 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | 754 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); |
| 758 | 755 | ||
| 759 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | 756 | if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { |
| 760 | if (dig_connector->dp_clock == 270000) | 757 | if (dp_clock == 270000) |
| 761 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | 758 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; |
| 762 | args.v1.ucLaneNum = dig_connector->dp_lane_count; | 759 | args.v1.ucLaneNum = dp_lane_count; |
| 763 | } else if (radeon_encoder->pixel_clock > 165000) | 760 | } else if (radeon_encoder->pixel_clock > 165000) |
| 764 | args.v1.ucLaneNum = 8; | 761 | args.v1.ucLaneNum = 8; |
| 765 | else | 762 | else |
| @@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
| 781 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; | 778 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; |
| 782 | break; | 779 | break; |
| 783 | } | 780 | } |
| 784 | if (dig_connector->linkb) | 781 | if (dig->linkb) |
| 785 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | 782 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; |
| 786 | else | 783 | else |
| 787 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | 784 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; |
| @@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 804 | struct radeon_device *rdev = dev->dev_private; | 801 | struct radeon_device *rdev = dev->dev_private; |
| 805 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 802 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 806 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 803 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 807 | struct radeon_connector_atom_dig *dig_connector = | 804 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 808 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
| 809 | struct drm_connector *connector; | ||
| 810 | struct radeon_connector *radeon_connector; | ||
| 811 | union dig_transmitter_control args; | 805 | union dig_transmitter_control args; |
| 812 | int index = 0; | 806 | int index = 0; |
| 813 | uint8_t frev, crev; | 807 | uint8_t frev, crev; |
| 814 | bool is_dp = false; | 808 | bool is_dp = false; |
| 815 | int pll_id = 0; | 809 | int pll_id = 0; |
| 810 | int dp_clock = 0; | ||
| 811 | int dp_lane_count = 0; | ||
| 812 | int connector_object_id = 0; | ||
| 813 | int igp_lane_info = 0; | ||
| 816 | 814 | ||
| 817 | if (!dig || !dig_connector) | 815 | if (connector) { |
| 818 | return; | 816 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 817 | struct radeon_connector_atom_dig *dig_connector = | ||
| 818 | radeon_connector->con_priv; | ||
| 819 | 819 | ||
| 820 | connector = radeon_get_connector_for_encoder(encoder); | 820 | dp_clock = dig_connector->dp_clock; |
| 821 | radeon_connector = to_radeon_connector(connector); | 821 | dp_lane_count = dig_connector->dp_lane_count; |
| 822 | connector_object_id = | ||
| 823 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 824 | igp_lane_info = dig_connector->igp_lane_info; | ||
| 825 | } | ||
| 826 | |||
| 827 | /* no dig encoder assigned */ | ||
| 828 | if (dig->dig_encoder == -1) | ||
| 829 | return; | ||
| 822 | 830 | ||
| 823 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) | 831 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) |
| 824 | is_dp = true; | 832 | is_dp = true; |
| 825 | 833 | ||
| 826 | memset(&args, 0, sizeof(args)); | 834 | memset(&args, 0, sizeof(args)); |
| 827 | 835 | ||
| 828 | if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) | 836 | switch (radeon_encoder->encoder_id) { |
| 837 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
| 838 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
| 839 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
| 829 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | 840 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); |
| 830 | else { | 841 | break; |
| 831 | switch (radeon_encoder->encoder_id) { | 842 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
| 832 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 843 | index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); |
| 833 | index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); | 844 | break; |
| 834 | break; | ||
| 835 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
| 836 | index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl); | ||
| 837 | break; | ||
| 838 | } | ||
| 839 | } | 845 | } |
| 840 | 846 | ||
| 841 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | 847 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
| @@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 843 | 849 | ||
| 844 | args.v1.ucAction = action; | 850 | args.v1.ucAction = action; |
| 845 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 851 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
| 846 | args.v1.usInitInfo = radeon_connector->connector_object_id; | 852 | args.v1.usInitInfo = connector_object_id; |
| 847 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | 853 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { |
| 848 | args.v1.asMode.ucLaneSel = lane_num; | 854 | args.v1.asMode.ucLaneSel = lane_num; |
| 849 | args.v1.asMode.ucLaneSet = lane_set; | 855 | args.v1.asMode.ucLaneSet = lane_set; |
| 850 | } else { | 856 | } else { |
| 851 | if (is_dp) | 857 | if (is_dp) |
| 852 | args.v1.usPixelClock = | 858 | args.v1.usPixelClock = |
| 853 | cpu_to_le16(dig_connector->dp_clock / 10); | 859 | cpu_to_le16(dp_clock / 10); |
| 854 | else if (radeon_encoder->pixel_clock > 165000) | 860 | else if (radeon_encoder->pixel_clock > 165000) |
| 855 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | 861 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); |
| 856 | else | 862 | else |
| @@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 858 | } | 864 | } |
| 859 | if (ASIC_IS_DCE4(rdev)) { | 865 | if (ASIC_IS_DCE4(rdev)) { |
| 860 | if (is_dp) | 866 | if (is_dp) |
| 861 | args.v3.ucLaneNum = dig_connector->dp_lane_count; | 867 | args.v3.ucLaneNum = dp_lane_count; |
| 862 | else if (radeon_encoder->pixel_clock > 165000) | 868 | else if (radeon_encoder->pixel_clock > 165000) |
| 863 | args.v3.ucLaneNum = 8; | 869 | args.v3.ucLaneNum = 8; |
| 864 | else | 870 | else |
| 865 | args.v3.ucLaneNum = 4; | 871 | args.v3.ucLaneNum = 4; |
| 866 | 872 | ||
| 867 | if (dig_connector->linkb) { | 873 | if (dig->linkb) { |
| 868 | args.v3.acConfig.ucLinkSel = 1; | 874 | args.v3.acConfig.ucLinkSel = 1; |
| 869 | args.v3.acConfig.ucEncoderSel = 1; | 875 | args.v3.acConfig.ucEncoderSel = 1; |
| 870 | } | 876 | } |
| @@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 904 | } | 910 | } |
| 905 | } else if (ASIC_IS_DCE32(rdev)) { | 911 | } else if (ASIC_IS_DCE32(rdev)) { |
| 906 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; | 912 | args.v2.acConfig.ucEncoderSel = dig->dig_encoder; |
| 907 | if (dig_connector->linkb) | 913 | if (dig->linkb) |
| 908 | args.v2.acConfig.ucLinkSel = 1; | 914 | args.v2.acConfig.ucLinkSel = 1; |
| 909 | 915 | ||
| 910 | switch (radeon_encoder->encoder_id) { | 916 | switch (radeon_encoder->encoder_id) { |
| @@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t | |||
| 938 | if ((rdev->flags & RADEON_IS_IGP) && | 944 | if ((rdev->flags & RADEON_IS_IGP) && |
| 939 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { | 945 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { |
| 940 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { | 946 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { |
| 941 | if (dig_connector->igp_lane_info & 0x1) | 947 | if (igp_lane_info & 0x1) |
| 942 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | 948 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
| 943 | else if (dig_connector->igp_lane_info & 0x2) | 949 | else if (igp_lane_info & 0x2) |
| 944 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | 950 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; |
| 945 | else if (dig_connector->igp_lane_info & 0x4) | 951 | else if (igp_lane_info & 0x4) |
| 946 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | 952 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; |
| 947 | else if (dig_connector->igp_lane_info & 0x8) | 953 | else if (igp_lane_info & 0x8) |
| 948 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | 954 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
| 949 | } else { | 955 | } else { |
| 950 | if (dig_connector->igp_lane_info & 0x3) | 956 | if (igp_lane_info & 0x3) |
| 951 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | 957 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
| 952 | else if (dig_connector->igp_lane_info & 0xc) | 958 | else if (igp_lane_info & 0xc) |
| 953 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | 959 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
| 954 | } | 960 | } |
| 955 | } | 961 | } |
| 956 | 962 | ||
| 957 | if (dig_connector->linkb) | 963 | if (dig->linkb) |
| 958 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | 964 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; |
| 959 | else | 965 | else |
| 960 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | 966 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; |
| @@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1072 | if (is_dig) { | 1078 | if (is_dig) { |
| 1073 | switch (mode) { | 1079 | switch (mode) { |
| 1074 | case DRM_MODE_DPMS_ON: | 1080 | case DRM_MODE_DPMS_ON: |
| 1075 | if (!ASIC_IS_DCE4(rdev)) | 1081 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
| 1076 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
| 1077 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1082 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1078 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 1083 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 1079 | 1084 | ||
| @@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1085 | case DRM_MODE_DPMS_STANDBY: | 1090 | case DRM_MODE_DPMS_STANDBY: |
| 1086 | case DRM_MODE_DPMS_SUSPEND: | 1091 | case DRM_MODE_DPMS_SUSPEND: |
| 1087 | case DRM_MODE_DPMS_OFF: | 1092 | case DRM_MODE_DPMS_OFF: |
| 1088 | if (!ASIC_IS_DCE4(rdev)) | 1093 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
| 1089 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
| 1090 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | 1094 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { |
| 1091 | if (ASIC_IS_DCE4(rdev)) | 1095 | if (ASIC_IS_DCE4(rdev)) |
| 1092 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); | 1096 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); |
| @@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
| 1290 | uint32_t dig_enc_in_use = 0; | 1294 | uint32_t dig_enc_in_use = 0; |
| 1291 | 1295 | ||
| 1292 | if (ASIC_IS_DCE4(rdev)) { | 1296 | if (ASIC_IS_DCE4(rdev)) { |
| 1293 | struct radeon_connector_atom_dig *dig_connector = | 1297 | dig = radeon_encoder->enc_priv; |
| 1294 | radeon_get_atom_connector_priv_from_encoder(encoder); | ||
| 1295 | |||
| 1296 | switch (radeon_encoder->encoder_id) { | 1298 | switch (radeon_encoder->encoder_id) { |
| 1297 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1299 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
| 1298 | if (dig_connector->linkb) | 1300 | if (dig->linkb) |
| 1299 | return 1; | 1301 | return 1; |
| 1300 | else | 1302 | else |
| 1301 | return 0; | 1303 | return 0; |
| 1302 | break; | 1304 | break; |
| 1303 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1305 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
| 1304 | if (dig_connector->linkb) | 1306 | if (dig->linkb) |
| 1305 | return 3; | 1307 | return 3; |
| 1306 | else | 1308 | else |
| 1307 | return 2; | 1309 | return 2; |
| 1308 | break; | 1310 | break; |
| 1309 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1311 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
| 1310 | if (dig_connector->linkb) | 1312 | if (dig->linkb) |
| 1311 | return 5; | 1313 | return 5; |
| 1312 | else | 1314 | else |
| 1313 | return 4; | 1315 | return 4; |
| @@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) | |||
| 1641 | struct radeon_encoder_atom_dig * | 1643 | struct radeon_encoder_atom_dig * |
| 1642 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | 1644 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) |
| 1643 | { | 1645 | { |
| 1646 | int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
| 1644 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | 1647 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); |
| 1645 | 1648 | ||
| 1646 | if (!dig) | 1649 | if (!dig) |
| @@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | |||
| 1650 | dig->coherent_mode = true; | 1653 | dig->coherent_mode = true; |
| 1651 | dig->dig_encoder = -1; | 1654 | dig->dig_encoder = -1; |
| 1652 | 1655 | ||
| 1656 | if (encoder_enum == 2) | ||
| 1657 | dig->linkb = true; | ||
| 1658 | else | ||
| 1659 | dig->linkb = false; | ||
| 1660 | |||
| 1653 | return dig; | 1661 | return dig; |
| 1654 | } | 1662 | } |
| 1655 | 1663 | ||
| 1656 | void | 1664 | void |
| 1657 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1665 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) |
| 1658 | { | 1666 | { |
| 1659 | struct radeon_device *rdev = dev->dev_private; | 1667 | struct radeon_device *rdev = dev->dev_private; |
| 1660 | struct drm_encoder *encoder; | 1668 | struct drm_encoder *encoder; |
| @@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1663 | /* see if we already added it */ | 1671 | /* see if we already added it */ |
| 1664 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1672 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 1665 | radeon_encoder = to_radeon_encoder(encoder); | 1673 | radeon_encoder = to_radeon_encoder(encoder); |
| 1666 | if (radeon_encoder->encoder_id == encoder_id) { | 1674 | if (radeon_encoder->encoder_enum == encoder_enum) { |
| 1667 | radeon_encoder->devices |= supported_device; | 1675 | radeon_encoder->devices |= supported_device; |
| 1668 | return; | 1676 | return; |
| 1669 | } | 1677 | } |
| @@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1691 | 1699 | ||
| 1692 | radeon_encoder->enc_priv = NULL; | 1700 | radeon_encoder->enc_priv = NULL; |
| 1693 | 1701 | ||
| 1694 | radeon_encoder->encoder_id = encoder_id; | 1702 | radeon_encoder->encoder_enum = encoder_enum; |
| 1703 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 1695 | radeon_encoder->devices = supported_device; | 1704 | radeon_encoder->devices = supported_device; |
| 1696 | radeon_encoder->rmx_type = RMX_OFF; | 1705 | radeon_encoder->rmx_type = RMX_OFF; |
| 1697 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | 1706 | radeon_encoder->underscan_type = UNDERSCAN_OFF; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index dbf86962bdd1..c74a8b20d941 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
| 118 | aligned_size = ALIGN(size, PAGE_SIZE); | 118 | aligned_size = ALIGN(size, PAGE_SIZE); |
| 119 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 119 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
| 120 | RADEON_GEM_DOMAIN_VRAM, | 120 | RADEON_GEM_DOMAIN_VRAM, |
| 121 | false, ttm_bo_type_kernel, | 121 | false, true, |
| 122 | &gobj); | 122 | &gobj); |
| 123 | if (ret) { | 123 | if (ret) { |
| 124 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | 124 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index bfd2ce5f5372..0416804d8f30 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) | |||
| 99 | } | 99 | } |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | /* switch the pads to ddc mode */ | ||
| 103 | if (ASIC_IS_DCE3(rdev) && rec->hw_capable) { | ||
| 104 | temp = RREG32(rec->mask_clk_reg); | ||
| 105 | temp &= ~(1 << 16); | ||
| 106 | WREG32(rec->mask_clk_reg, temp); | ||
| 107 | } | ||
| 108 | |||
| 102 | /* clear the output pin values */ | 109 | /* clear the output pin values */ |
| 103 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; | 110 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; |
| 104 | WREG32(rec->a_clk_reg, temp); | 111 | WREG32(rec->a_clk_reg, temp); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 059bfa4098d7..a108c7ed14f5 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
| 121 | * chips. Disable MSI on them for now. | 121 | * chips. Disable MSI on them for now. |
| 122 | */ | 122 | */ |
| 123 | if ((rdev->family >= CHIP_RV380) && | 123 | if ((rdev->family >= CHIP_RV380) && |
| 124 | (!(rdev->flags & RADEON_IS_IGP))) { | 124 | (!(rdev->flags & RADEON_IS_IGP)) && |
| 125 | (!(rdev->flags & RADEON_IS_AGP))) { | ||
| 125 | int ret = pci_enable_msi(rdev->pdev); | 126 | int ret = pci_enable_msi(rdev->pdev); |
| 126 | if (!ret) { | 127 | if (!ret) { |
| 127 | rdev->msi_enabled = 1; | 128 | rdev->msi_enabled = 1; |
| 128 | DRM_INFO("radeon: using MSI.\n"); | 129 | dev_info(rdev->dev, "radeon: using MSI.\n"); |
| 129 | } | 130 | } |
| 130 | } | 131 | } |
| 131 | rdev->irq.installed = true; | 132 | rdev->irq.installed = true; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b1c8ace5f080..5eee3c41d124 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 161 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); | 161 | DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); |
| 162 | return -EINVAL; | 162 | return -EINVAL; |
| 163 | } | 163 | } |
| 164 | break; | ||
| 164 | case RADEON_INFO_WANT_HYPERZ: | 165 | case RADEON_INFO_WANT_HYPERZ: |
| 165 | /* The "value" here is both an input and output parameter. | 166 | /* The "value" here is both an input and output parameter. |
| 166 | * If the input value is 1, filp requests hyper-z access. | 167 | * If the input value is 1, filp requests hyper-z access. |
| @@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms) | |||
| 323 | 324 | ||
| 324 | 325 | ||
| 325 | struct drm_ioctl_desc radeon_ioctls_kms[] = { | 326 | struct drm_ioctl_desc radeon_ioctls_kms[] = { |
| 326 | DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 327 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 327 | DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 328 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 328 | DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 329 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 329 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 330 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 330 | DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), | 331 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), |
| 331 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), | 332 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), |
| 332 | DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), | 333 | DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), |
| 333 | DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), | 334 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), |
| 334 | DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), | 335 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), |
| 335 | DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), | 336 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), |
| 336 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), | 337 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), |
| 337 | DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), | 338 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), |
| 338 | DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), | 339 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), |
| 339 | DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), | 340 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), |
| 340 | DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 341 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 341 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), | 342 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), |
| 342 | DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), | 343 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), |
| 343 | DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), | 344 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), |
| 344 | DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), | 345 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), |
| 345 | DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), | 346 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), |
| 346 | DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), | 347 | DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), |
| 347 | DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 348 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 348 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), | 349 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), |
| 349 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), | 350 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), |
| 350 | DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), | 351 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), |
| 351 | DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), | 352 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), |
| 352 | DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), | 353 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), |
| 353 | /* KMS */ | 354 | /* KMS */ |
| 354 | DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), | 355 | DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 355 | DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), | 356 | DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 356 | DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), | 357 | DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 357 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), | 358 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 358 | DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), | 359 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 359 | DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), | 360 | DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 360 | DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), | 361 | DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 361 | DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), | 362 | DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 362 | DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), | 363 | DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 363 | DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), | 364 | DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 364 | DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), | 365 | DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 365 | DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), | 366 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), |
| 366 | }; | 367 | }; |
| 367 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); | 368 | int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 989df519a1e4..305049afde15 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, | |||
| 272 | if (!ref_div) | 272 | if (!ref_div) |
| 273 | return 1; | 273 | return 1; |
| 274 | 274 | ||
| 275 | vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; | 275 | vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; |
| 276 | 276 | ||
| 277 | /* | 277 | /* |
| 278 | * This is horribly crude: the VCO frequency range is divided into | 278 | * This is horribly crude: the VCO frequency range is divided into |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index b8149cbc0c70..0b8397000f4c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra | |||
| 1345 | } | 1345 | } |
| 1346 | 1346 | ||
| 1347 | void | 1347 | void |
| 1348 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1348 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) |
| 1349 | { | 1349 | { |
| 1350 | struct radeon_device *rdev = dev->dev_private; | 1350 | struct radeon_device *rdev = dev->dev_private; |
| 1351 | struct drm_encoder *encoder; | 1351 | struct drm_encoder *encoder; |
| @@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1354 | /* see if we already added it */ | 1354 | /* see if we already added it */ |
| 1355 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 1355 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 1356 | radeon_encoder = to_radeon_encoder(encoder); | 1356 | radeon_encoder = to_radeon_encoder(encoder); |
| 1357 | if (radeon_encoder->encoder_id == encoder_id) { | 1357 | if (radeon_encoder->encoder_enum == encoder_enum) { |
| 1358 | radeon_encoder->devices |= supported_device; | 1358 | radeon_encoder->devices |= supported_device; |
| 1359 | return; | 1359 | return; |
| 1360 | } | 1360 | } |
| @@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1374 | 1374 | ||
| 1375 | radeon_encoder->enc_priv = NULL; | 1375 | radeon_encoder->enc_priv = NULL; |
| 1376 | 1376 | ||
| 1377 | radeon_encoder->encoder_id = encoder_id; | 1377 | radeon_encoder->encoder_enum = encoder_enum; |
| 1378 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
| 1378 | radeon_encoder->devices = supported_device; | 1379 | radeon_encoder->devices = supported_device; |
| 1379 | radeon_encoder->rmx_type = RMX_OFF; | 1380 | radeon_encoder->rmx_type = RMX_OFF; |
| 1380 | 1381 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 5bbc086b9267..8f93e2b4b0c8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -342,6 +342,7 @@ struct radeon_atom_ss { | |||
| 342 | }; | 342 | }; |
| 343 | 343 | ||
| 344 | struct radeon_encoder_atom_dig { | 344 | struct radeon_encoder_atom_dig { |
| 345 | bool linkb; | ||
| 345 | /* atom dig */ | 346 | /* atom dig */ |
| 346 | bool coherent_mode; | 347 | bool coherent_mode; |
| 347 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ | 348 | int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ |
| @@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac { | |||
| 360 | 361 | ||
| 361 | struct radeon_encoder { | 362 | struct radeon_encoder { |
| 362 | struct drm_encoder base; | 363 | struct drm_encoder base; |
| 364 | uint32_t encoder_enum; | ||
| 363 | uint32_t encoder_id; | 365 | uint32_t encoder_id; |
| 364 | uint32_t devices; | 366 | uint32_t devices; |
| 365 | uint32_t active_device; | 367 | uint32_t active_device; |
| @@ -378,7 +380,6 @@ struct radeon_encoder { | |||
| 378 | 380 | ||
| 379 | struct radeon_connector_atom_dig { | 381 | struct radeon_connector_atom_dig { |
| 380 | uint32_t igp_lane_info; | 382 | uint32_t igp_lane_info; |
| 381 | bool linkb; | ||
| 382 | /* displayport */ | 383 | /* displayport */ |
| 383 | struct radeon_i2c_chan *dp_i2c_bus; | 384 | struct radeon_i2c_chan *dp_i2c_bus; |
| 384 | u8 dpcd[8]; | 385 | u8 dpcd[8]; |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 58038f5cab38..477ba673e1b4 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
| 226 | { | 226 | { |
| 227 | int i; | 227 | int i; |
| 228 | 228 | ||
| 229 | /* no need to take locks, etc. if nothing's going to change */ | ||
| 230 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | ||
| 231 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | ||
| 232 | return; | ||
| 233 | |||
| 229 | mutex_lock(&rdev->ddev->struct_mutex); | 234 | mutex_lock(&rdev->ddev->struct_mutex); |
| 230 | mutex_lock(&rdev->vram_mutex); | 235 | mutex_lock(&rdev->vram_mutex); |
| 231 | mutex_lock(&rdev->cp.mutex); | 236 | mutex_lock(&rdev->cp.mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index b3ba44c0a818..4ae5a3d1074e 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c | |||
| @@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | |||
| 3228 | } | 3228 | } |
| 3229 | 3229 | ||
| 3230 | struct drm_ioctl_desc radeon_ioctls[] = { | 3230 | struct drm_ioctl_desc radeon_ioctls[] = { |
| 3231 | DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3231 | DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3232 | DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3232 | DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3233 | DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3233 | DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3234 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3234 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3235 | DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), | 3235 | DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), |
| 3236 | DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), | 3236 | DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), |
| 3237 | DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), | 3237 | DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH), |
| 3238 | DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), | 3238 | DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), |
| 3239 | DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), | 3239 | DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH), |
| 3240 | DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), | 3240 | DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), |
| 3241 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), | 3241 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), |
| 3242 | DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), | 3242 | DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH), |
| 3243 | DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), | 3243 | DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), |
| 3244 | DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), | 3244 | DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), |
| 3245 | DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3245 | DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3246 | DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), | 3246 | DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), |
| 3247 | DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), | 3247 | DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), |
| 3248 | DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), | 3248 | DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), |
| 3249 | DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), | 3249 | DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH), |
| 3250 | DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), | 3250 | DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), |
| 3251 | DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), | 3251 | DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH), |
| 3252 | DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 3252 | DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 3253 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), | 3253 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), |
| 3254 | DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), | 3254 | DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), |
| 3255 | DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), | 3255 | DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), |
| 3256 | DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), | 3256 | DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), |
| 3257 | DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), | 3257 | DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), |
| 3258 | DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) | 3258 | DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) |
| 3259 | }; | 3259 | }; |
| 3260 | 3260 | ||
| 3261 | int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); | 3261 | int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); |
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 976dc8d25280..bf5f83ea14fe 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c | |||
| @@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) | |||
| 1082 | } | 1082 | } |
| 1083 | 1083 | ||
| 1084 | struct drm_ioctl_desc savage_ioctls[] = { | 1084 | struct drm_ioctl_desc savage_ioctls[] = { |
| 1085 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1085 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
| 1086 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), | 1086 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), |
| 1087 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), | 1087 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), |
| 1088 | DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), | 1088 | DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), |
| 1089 | }; | 1089 | }; |
| 1090 | 1090 | ||
| 1091 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); | 1091 | int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); |
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 07d0f2979cac..7fe2b63412ce 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c | |||
| @@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, | |||
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | struct drm_ioctl_desc sis_ioctls[] = { | 322 | struct drm_ioctl_desc sis_ioctls[] = { |
| 323 | DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), | 323 | DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), |
| 324 | DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), | 324 | DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH), |
| 325 | DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), | 325 | DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), |
| 326 | DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), | 326 | DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), |
| 327 | DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), | 327 | DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH), |
| 328 | DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), | 328 | DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), |
| 329 | }; | 329 | }; |
| 330 | 330 | ||
| 331 | int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); | 331 | int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); |
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 68dda74a50ae..cc0ffa9abd00 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c | |||
| @@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * | |||
| 722 | } | 722 | } |
| 723 | 723 | ||
| 724 | struct drm_ioctl_desc via_ioctls[] = { | 724 | struct drm_ioctl_desc via_ioctls[] = { |
| 725 | DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), | 725 | DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), |
| 726 | DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), | 726 | DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), |
| 727 | DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), | 727 | DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), |
| 728 | DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), | 728 | DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), |
| 729 | DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), | 729 | DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), |
| 730 | DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), | 730 | DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), |
| 731 | DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), | 731 | DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH), |
| 732 | DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), | 732 | DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), |
| 733 | DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), | 733 | DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH), |
| 734 | DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), | 734 | DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), |
| 735 | DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), | 735 | DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), |
| 736 | DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), | 736 | DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), |
| 737 | DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), | 737 | DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), |
| 738 | DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) | 738 | DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) |
| 739 | }; | 739 | }; |
| 740 | 740 | ||
| 741 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); | 741 | int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9dd395b90216..72ec2e2b6e97 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -99,47 +99,47 @@ | |||
| 99 | */ | 99 | */ |
| 100 | 100 | ||
| 101 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | 101 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ |
| 102 | [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} | 102 | [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} |
| 103 | 103 | ||
| 104 | /** | 104 | /** |
| 105 | * Ioctl definitions. | 105 | * Ioctl definitions. |
| 106 | */ | 106 | */ |
| 107 | 107 | ||
| 108 | static struct drm_ioctl_desc vmw_ioctls[] = { | 108 | static struct drm_ioctl_desc vmw_ioctls[] = { |
| 109 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, | 109 | VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, |
| 110 | DRM_AUTH | DRM_UNLOCKED), | 110 | DRM_AUTH | DRM_UNLOCKED), |
| 111 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | 111 | VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
| 112 | DRM_AUTH | DRM_UNLOCKED), | 112 | DRM_AUTH | DRM_UNLOCKED), |
| 113 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | 113 | VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
| 114 | DRM_AUTH | DRM_UNLOCKED), | 114 | DRM_AUTH | DRM_UNLOCKED), |
| 115 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, | 115 | VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, |
| 116 | vmw_kms_cursor_bypass_ioctl, | 116 | vmw_kms_cursor_bypass_ioctl, |
| 117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 118 | 118 | ||
| 119 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, | 119 | VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
| 120 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 120 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 121 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, | 121 | VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
| 122 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 122 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, | 123 | VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
| 124 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | 124 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 125 | 125 | ||
| 126 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | 126 | VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
| 127 | DRM_AUTH | DRM_UNLOCKED), | 127 | DRM_AUTH | DRM_UNLOCKED), |
| 128 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | 128 | VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
| 129 | DRM_AUTH | DRM_UNLOCKED), | 129 | DRM_AUTH | DRM_UNLOCKED), |
| 130 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | 130 | VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
| 131 | DRM_AUTH | DRM_UNLOCKED), | 131 | DRM_AUTH | DRM_UNLOCKED), |
| 132 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | 132 | VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
| 133 | DRM_AUTH | DRM_UNLOCKED), | 133 | DRM_AUTH | DRM_UNLOCKED), |
| 134 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 134 | VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
| 135 | DRM_AUTH | DRM_UNLOCKED), | 135 | DRM_AUTH | DRM_UNLOCKED), |
| 136 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, | 136 | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, |
| 137 | DRM_AUTH | DRM_UNLOCKED), | 137 | DRM_AUTH | DRM_UNLOCKED), |
| 138 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 138 | VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
| 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), | 139 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
| 140 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 140 | VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
| 141 | DRM_AUTH | DRM_UNLOCKED), | 141 | DRM_AUTH | DRM_UNLOCKED), |
| 142 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, | 142 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, |
| 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) | 143 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) |
| 144 | }; | 144 | }; |
| 145 | 145 | ||
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig index 5dbcbe3a54a6..b99b906ea9b1 100644 --- a/drivers/isdn/hardware/avm/Kconfig +++ b/drivers/isdn/hardware/avm/Kconfig | |||
| @@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA | |||
| 36 | 36 | ||
| 37 | config ISDN_DRV_AVMB1_B1PCMCIA | 37 | config ISDN_DRV_AVMB1_B1PCMCIA |
| 38 | tristate "AVM B1/M1/M2 PCMCIA support" | 38 | tristate "AVM B1/M1/M2 PCMCIA support" |
| 39 | depends on PCMCIA | ||
| 39 | help | 40 | help |
| 40 | Enable support for the PCMCIA version of the AVM B1 card. | 41 | Enable support for the PCMCIA version of the AVM B1 card. |
| 41 | 42 | ||
| 42 | config ISDN_DRV_AVMB1_AVM_CS | 43 | config ISDN_DRV_AVMB1_AVM_CS |
| 43 | tristate "AVM B1/M1/M2 PCMCIA cs module" | 44 | tristate "AVM B1/M1/M2 PCMCIA cs module" |
| 44 | depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA | 45 | depends on ISDN_DRV_AVMB1_B1PCMCIA |
| 45 | help | 46 | help |
| 46 | Enable the PCMCIA client driver for the AVM B1/M1/M2 | 47 | Enable the PCMCIA client driver for the AVM B1/M1/M2 |
| 47 | PCMCIA cards. | 48 | PCMCIA cards. |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 35bc2737412f..2d17e76066bd 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #include <linux/syscalls.h> | 45 | #include <linux/syscalls.h> |
| 46 | #include <linux/suspend.h> | 46 | #include <linux/suspend.h> |
| 47 | #include <linux/cpu.h> | 47 | #include <linux/cpu.h> |
| 48 | #include <linux/compat.h> | ||
| 48 | #include <asm/prom.h> | 49 | #include <asm/prom.h> |
| 49 | #include <asm/machdep.h> | 50 | #include <asm/machdep.h> |
| 50 | #include <asm/io.h> | 51 | #include <asm/io.h> |
| @@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp, | |||
| 2349 | return ret; | 2350 | return ret; |
| 2350 | } | 2351 | } |
| 2351 | 2352 | ||
| 2353 | #ifdef CONFIG_COMPAT | ||
| 2354 | #define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) | ||
| 2355 | #define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) | ||
| 2356 | #define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) | ||
| 2357 | #define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) | ||
| 2358 | #define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) | ||
| 2359 | #define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) | ||
| 2360 | |||
| 2361 | static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) | ||
| 2362 | { | ||
| 2363 | switch (cmd) { | ||
| 2364 | case PMU_IOC_SLEEP: | ||
| 2365 | break; | ||
| 2366 | case PMU_IOC_GET_BACKLIGHT32: | ||
| 2367 | cmd = PMU_IOC_GET_BACKLIGHT; | ||
| 2368 | break; | ||
| 2369 | case PMU_IOC_SET_BACKLIGHT32: | ||
| 2370 | cmd = PMU_IOC_SET_BACKLIGHT; | ||
| 2371 | break; | ||
| 2372 | case PMU_IOC_GET_MODEL32: | ||
| 2373 | cmd = PMU_IOC_GET_MODEL; | ||
| 2374 | break; | ||
| 2375 | case PMU_IOC_HAS_ADB32: | ||
| 2376 | cmd = PMU_IOC_HAS_ADB; | ||
| 2377 | break; | ||
| 2378 | case PMU_IOC_CAN_SLEEP32: | ||
| 2379 | cmd = PMU_IOC_CAN_SLEEP; | ||
| 2380 | break; | ||
| 2381 | case PMU_IOC_GRAB_BACKLIGHT32: | ||
| 2382 | cmd = PMU_IOC_GRAB_BACKLIGHT; | ||
| 2383 | break; | ||
| 2384 | default: | ||
| 2385 | return -ENOIOCTLCMD; | ||
| 2386 | } | ||
| 2387 | return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
| 2388 | } | ||
| 2389 | #endif | ||
| 2390 | |||
| 2352 | static const struct file_operations pmu_device_fops = { | 2391 | static const struct file_operations pmu_device_fops = { |
| 2353 | .read = pmu_read, | 2392 | .read = pmu_read, |
| 2354 | .write = pmu_write, | 2393 | .write = pmu_write, |
| 2355 | .poll = pmu_fpoll, | 2394 | .poll = pmu_fpoll, |
| 2356 | .unlocked_ioctl = pmu_unlocked_ioctl, | 2395 | .unlocked_ioctl = pmu_unlocked_ioctl, |
| 2396 | #ifdef CONFIG_COMPAT | ||
| 2397 | .compat_ioctl = compat_pmu_ioctl, | ||
| 2398 | #endif | ||
| 2357 | .open = pmu_open, | 2399 | .open = pmu_open, |
| 2358 | .release = pmu_release, | 2400 | .release = pmu_release, |
| 2359 | }; | 2401 | }; |
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig index decdeda840d0..fd0830ed10d8 100644 --- a/drivers/media/dvb/mantis/Kconfig +++ b/drivers/media/dvb/mantis/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config MANTIS_CORE | 1 | config MANTIS_CORE |
| 2 | tristate "Mantis/Hopper PCI bridge based devices" | 2 | tristate "Mantis/Hopper PCI bridge based devices" |
| 3 | depends on PCI && I2C && INPUT | 3 | depends on PCI && I2C && INPUT && IR_CORE |
| 4 | 4 | ||
| 5 | help | 5 | help |
| 6 | Support for PCI cards based on the Mantis and Hopper PCi bridge. | 6 | Support for PCI cards based on the Mantis and Hopper PCi bridge. |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5a6895320b48..2cc81a54cbf3 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -928,6 +928,16 @@ config SMC91X | |||
| 928 | The module will be called smc91x. If you want to compile it as a | 928 | The module will be called smc91x. If you want to compile it as a |
| 929 | module, say M here and read <file:Documentation/kbuild/modules.txt>. | 929 | module, say M here and read <file:Documentation/kbuild/modules.txt>. |
| 930 | 930 | ||
| 931 | config PXA168_ETH | ||
| 932 | tristate "Marvell pxa168 ethernet support" | ||
| 933 | depends on CPU_PXA168 | ||
| 934 | select PHYLIB | ||
| 935 | help | ||
| 936 | This driver supports the pxa168 Ethernet ports. | ||
| 937 | |||
| 938 | To compile this driver as a module, choose M here. The module | ||
| 939 | will be called pxa168_eth. | ||
| 940 | |||
| 931 | config NET_NETX | 941 | config NET_NETX |
| 932 | tristate "NetX Ethernet support" | 942 | tristate "NetX Ethernet support" |
| 933 | select MII | 943 | select MII |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 56e8c27f77ce..3e8f150c4b14 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
| @@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/ | |||
| 244 | obj-$(CONFIG_SMC91X) += smc91x.o | 244 | obj-$(CONFIG_SMC91X) += smc91x.o |
| 245 | obj-$(CONFIG_SMC911X) += smc911x.o | 245 | obj-$(CONFIG_SMC911X) += smc911x.o |
| 246 | obj-$(CONFIG_SMSC911X) += smsc911x.o | 246 | obj-$(CONFIG_SMSC911X) += smsc911x.o |
| 247 | obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o | ||
| 247 | obj-$(CONFIG_BFIN_MAC) += bfin_mac.o | 248 | obj-$(CONFIG_BFIN_MAC) += bfin_mac.o |
| 248 | obj-$(CONFIG_DM9000) += dm9000.o | 249 | obj-$(CONFIG_DM9000) += dm9000.o |
| 249 | obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o | 250 | obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 53af9c93e75c..0c2d96ed561c 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
| @@ -20,8 +20,8 @@ | |||
| 20 | * (you will need to reboot afterwards) */ | 20 | * (you will need to reboot afterwards) */ |
| 21 | /* #define BNX2X_STOP_ON_ERROR */ | 21 | /* #define BNX2X_STOP_ON_ERROR */ |
| 22 | 22 | ||
| 23 | #define DRV_MODULE_VERSION "1.52.53-3" | 23 | #define DRV_MODULE_VERSION "1.52.53-4" |
| 24 | #define DRV_MODULE_RELDATE "2010/18/04" | 24 | #define DRV_MODULE_RELDATE "2010/16/08" |
| 25 | #define BNX2X_BC_VER 0x040200 | 25 | #define BNX2X_BC_VER 0x040200 |
| 26 | 26 | ||
| 27 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 27 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index b4ec2b02a465..f8c3f08e4ce7 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
| @@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
| 4328 | val |= aeu_gpio_mask; | 4328 | val |= aeu_gpio_mask; |
| 4329 | REG_WR(bp, offset, val); | 4329 | REG_WR(bp, offset, val); |
| 4330 | } | 4330 | } |
| 4331 | bp->port.need_hw_lock = 1; | ||
| 4331 | break; | 4332 | break; |
| 4332 | 4333 | ||
| 4333 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
| 4334 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: | 4334 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: |
| 4335 | bp->port.need_hw_lock = 1; | ||
| 4336 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
| 4335 | /* add SPIO 5 to group 0 */ | 4337 | /* add SPIO 5 to group 0 */ |
| 4336 | { | 4338 | { |
| 4337 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4339 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
| @@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp) | |||
| 4341 | REG_WR(bp, reg_addr, val); | 4343 | REG_WR(bp, reg_addr, val); |
| 4342 | } | 4344 | } |
| 4343 | break; | 4345 | break; |
| 4344 | 4346 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | |
| 4347 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: | ||
| 4348 | bp->port.need_hw_lock = 1; | ||
| 4349 | break; | ||
| 4345 | default: | 4350 | default: |
| 4346 | break; | 4351 | break; |
| 4347 | } | 4352 | } |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index a4a0d2b6eb1c..d3d4a57e2450 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
| @@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
| 936 | ew32(IMC, 0xffffffff); | 936 | ew32(IMC, 0xffffffff); |
| 937 | icr = er32(ICR); | 937 | icr = er32(ICR); |
| 938 | 938 | ||
| 939 | /* Install any alternate MAC address into RAR0 */ | 939 | if (hw->mac.type == e1000_82571) { |
| 940 | ret_val = e1000_check_alt_mac_addr_generic(hw); | 940 | /* Install any alternate MAC address into RAR0 */ |
| 941 | if (ret_val) | 941 | ret_val = e1000_check_alt_mac_addr_generic(hw); |
| 942 | return ret_val; | 942 | if (ret_val) |
| 943 | return ret_val; | ||
| 943 | 944 | ||
| 944 | e1000e_set_laa_state_82571(hw, true); | 945 | e1000e_set_laa_state_82571(hw, true); |
| 946 | } | ||
| 945 | 947 | ||
| 946 | /* Reinitialize the 82571 serdes link state machine */ | 948 | /* Reinitialize the 82571 serdes link state machine */ |
| 947 | if (hw->phy.media_type == e1000_media_type_internal_serdes) | 949 | if (hw->phy.media_type == e1000_media_type_internal_serdes) |
| @@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) | |||
| 1618 | { | 1620 | { |
| 1619 | s32 ret_val = 0; | 1621 | s32 ret_val = 0; |
| 1620 | 1622 | ||
| 1621 | /* | 1623 | if (hw->mac.type == e1000_82571) { |
| 1622 | * If there's an alternate MAC address place it in RAR0 | 1624 | /* |
| 1623 | * so that it will override the Si installed default perm | 1625 | * If there's an alternate MAC address place it in RAR0 |
| 1624 | * address. | 1626 | * so that it will override the Si installed default perm |
| 1625 | */ | 1627 | * address. |
| 1626 | ret_val = e1000_check_alt_mac_addr_generic(hw); | 1628 | */ |
| 1627 | if (ret_val) | 1629 | ret_val = e1000_check_alt_mac_addr_generic(hw); |
| 1628 | goto out; | 1630 | if (ret_val) |
| 1631 | goto out; | ||
| 1632 | } | ||
| 1629 | 1633 | ||
| 1630 | ret_val = e1000_read_mac_addr_generic(hw); | 1634 | ret_val = e1000_read_mac_addr_generic(hw); |
| 1631 | 1635 | ||
| @@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = { | |||
| 1833 | | FLAG_HAS_SMART_POWER_DOWN | 1837 | | FLAG_HAS_SMART_POWER_DOWN |
| 1834 | | FLAG_HAS_AMT | 1838 | | FLAG_HAS_AMT |
| 1835 | | FLAG_HAS_SWSM_ON_LOAD, | 1839 | | FLAG_HAS_SWSM_ON_LOAD, |
| 1840 | .flags2 = FLAG2_DISABLE_ASPM_L1, | ||
| 1836 | .pba = 20, | 1841 | .pba = 20, |
| 1837 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, | 1842 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
| 1838 | .get_variants = e1000_get_variants_82571, | 1843 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 307a72f483ee..93b3bedae8d2 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
| @@ -621,6 +621,7 @@ | |||
| 621 | #define E1000_FLASH_UPDATES 2000 | 621 | #define E1000_FLASH_UPDATES 2000 |
| 622 | 622 | ||
| 623 | /* NVM Word Offsets */ | 623 | /* NVM Word Offsets */ |
| 624 | #define NVM_COMPAT 0x0003 | ||
| 624 | #define NVM_ID_LED_SETTINGS 0x0004 | 625 | #define NVM_ID_LED_SETTINGS 0x0004 |
| 625 | #define NVM_INIT_CONTROL2_REG 0x000F | 626 | #define NVM_INIT_CONTROL2_REG 0x000F |
| 626 | #define NVM_INIT_CONTROL3_PORT_B 0x0014 | 627 | #define NVM_INIT_CONTROL3_PORT_B 0x0014 |
| @@ -643,6 +644,9 @@ | |||
| 643 | /* Mask bits for fields in Word 0x1a of the NVM */ | 644 | /* Mask bits for fields in Word 0x1a of the NVM */ |
| 644 | #define NVM_WORD1A_ASPM_MASK 0x000C | 645 | #define NVM_WORD1A_ASPM_MASK 0x000C |
| 645 | 646 | ||
| 647 | /* Mask bits for fields in Word 0x03 of the EEPROM */ | ||
| 648 | #define NVM_COMPAT_LOM 0x0800 | ||
| 649 | |||
| 646 | /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ | 650 | /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ |
| 647 | #define NVM_SUM 0xBABA | 651 | #define NVM_SUM 0xBABA |
| 648 | 652 | ||
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index df4a27922931..0fd4eb5ac5fb 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
| @@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) | |||
| 183 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; | 183 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; |
| 184 | u8 alt_mac_addr[ETH_ALEN]; | 184 | u8 alt_mac_addr[ETH_ALEN]; |
| 185 | 185 | ||
| 186 | ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); | ||
| 187 | if (ret_val) | ||
| 188 | goto out; | ||
| 189 | |||
| 190 | /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ | ||
| 191 | if (!((nvm_data & NVM_COMPAT_LOM) || | ||
| 192 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || | ||
| 193 | (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) | ||
| 194 | goto out; | ||
| 195 | |||
| 186 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 196 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
| 187 | &nvm_alt_mac_addr_offset); | 197 | &nvm_alt_mac_addr_offset); |
| 188 | if (ret_val) { | 198 | if (ret_val) { |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 99a929964e3c..1846623c6ae6 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
| 41 | 41 | ||
| 42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
| 43 | #define DRV_VERSION "EHEA_0105" | 43 | #define DRV_VERSION "EHEA_0106" |
| 44 | 44 | ||
| 45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
| 46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
| @@ -400,6 +400,7 @@ struct ehea_port_res { | |||
| 400 | u32 poll_counter; | 400 | u32 poll_counter; |
| 401 | struct net_lro_mgr lro_mgr; | 401 | struct net_lro_mgr lro_mgr; |
| 402 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; | 402 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; |
| 403 | int sq_restart_flag; | ||
| 403 | }; | 404 | }; |
| 404 | 405 | ||
| 405 | 406 | ||
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 897719b49f96..a333b42111b8 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
| @@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
| 776 | return processed; | 776 | return processed; |
| 777 | } | 777 | } |
| 778 | 778 | ||
| 779 | #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull | ||
| 780 | |||
| 781 | static void reset_sq_restart_flag(struct ehea_port *port) | ||
| 782 | { | ||
| 783 | int i; | ||
| 784 | |||
| 785 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | ||
| 786 | struct ehea_port_res *pr = &port->port_res[i]; | ||
| 787 | pr->sq_restart_flag = 0; | ||
| 788 | } | ||
| 789 | } | ||
| 790 | |||
| 791 | static void check_sqs(struct ehea_port *port) | ||
| 792 | { | ||
| 793 | struct ehea_swqe *swqe; | ||
| 794 | int swqe_index; | ||
| 795 | int i, k; | ||
| 796 | |||
| 797 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | ||
| 798 | struct ehea_port_res *pr = &port->port_res[i]; | ||
| 799 | k = 0; | ||
| 800 | swqe = ehea_get_swqe(pr->qp, &swqe_index); | ||
| 801 | memset(swqe, 0, SWQE_HEADER_SIZE); | ||
| 802 | atomic_dec(&pr->swqe_avail); | ||
| 803 | |||
| 804 | swqe->tx_control |= EHEA_SWQE_PURGE; | ||
| 805 | swqe->wr_id = SWQE_RESTART_CHECK; | ||
| 806 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | ||
| 807 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; | ||
| 808 | swqe->immediate_data_length = 80; | ||
| 809 | |||
| 810 | ehea_post_swqe(pr->qp, swqe); | ||
| 811 | |||
| 812 | while (pr->sq_restart_flag == 0) { | ||
| 813 | msleep(5); | ||
| 814 | if (++k == 100) { | ||
| 815 | ehea_error("HW/SW queues out of sync"); | ||
| 816 | ehea_schedule_port_reset(pr->port); | ||
| 817 | return; | ||
| 818 | } | ||
| 819 | } | ||
| 820 | } | ||
| 821 | |||
| 822 | return; | ||
| 823 | } | ||
| 824 | |||
| 825 | |||
| 779 | static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | 826 | static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) |
| 780 | { | 827 | { |
| 781 | struct sk_buff *skb; | 828 | struct sk_buff *skb; |
| @@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
| 793 | 840 | ||
| 794 | cqe_counter++; | 841 | cqe_counter++; |
| 795 | rmb(); | 842 | rmb(); |
| 843 | |||
| 844 | if (cqe->wr_id == SWQE_RESTART_CHECK) { | ||
| 845 | pr->sq_restart_flag = 1; | ||
| 846 | swqe_av++; | ||
| 847 | break; | ||
| 848 | } | ||
| 849 | |||
| 796 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | 850 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { |
| 797 | ehea_error("Bad send completion status=0x%04X", | 851 | ehea_error("Bad send completion status=0x%04X", |
| 798 | cqe->status); | 852 | cqe->status); |
| @@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port) | |||
| 2675 | int k = 0; | 2729 | int k = 0; |
| 2676 | while (atomic_read(&pr->swqe_avail) < swqe_max) { | 2730 | while (atomic_read(&pr->swqe_avail) < swqe_max) { |
| 2677 | msleep(5); | 2731 | msleep(5); |
| 2678 | if (++k == 20) | 2732 | if (++k == 20) { |
| 2733 | ehea_error("WARNING: sq not flushed completely"); | ||
| 2679 | break; | 2734 | break; |
| 2735 | } | ||
| 2680 | } | 2736 | } |
| 2681 | } | 2737 | } |
| 2682 | } | 2738 | } |
| @@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
| 2917 | port_napi_disable(port); | 2973 | port_napi_disable(port); |
| 2918 | mutex_unlock(&port->port_lock); | 2974 | mutex_unlock(&port->port_lock); |
| 2919 | } | 2975 | } |
| 2976 | reset_sq_restart_flag(port); | ||
| 2920 | } | 2977 | } |
| 2921 | 2978 | ||
| 2922 | /* Unregister old memory region */ | 2979 | /* Unregister old memory region */ |
| @@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
| 2951 | mutex_lock(&port->port_lock); | 3008 | mutex_lock(&port->port_lock); |
| 2952 | port_napi_enable(port); | 3009 | port_napi_enable(port); |
| 2953 | ret = ehea_restart_qps(dev); | 3010 | ret = ehea_restart_qps(dev); |
| 3011 | check_sqs(port); | ||
| 2954 | if (!ret) | 3012 | if (!ret) |
| 2955 | netif_wake_queue(dev); | 3013 | netif_wake_queue(dev); |
| 2956 | mutex_unlock(&port->port_lock); | 3014 | mutex_unlock(&port->port_lock); |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 2602852cc55a..4734c939ad03 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
| @@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1113 | struct ibmveth_adapter *adapter = netdev_priv(dev); | 1113 | struct ibmveth_adapter *adapter = netdev_priv(dev); |
| 1114 | struct vio_dev *viodev = adapter->vdev; | 1114 | struct vio_dev *viodev = adapter->vdev; |
| 1115 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; | 1115 | int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; |
| 1116 | int i; | 1116 | int i, rc; |
| 1117 | int need_restart = 0; | ||
| 1117 | 1118 | ||
| 1118 | if (new_mtu < IBMVETH_MAX_MTU) | 1119 | if (new_mtu < IBMVETH_MAX_MTU) |
| 1119 | return -EINVAL; | 1120 | return -EINVAL; |
| @@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1127 | 1128 | ||
| 1128 | /* Deactivate all the buffer pools so that the next loop can activate | 1129 | /* Deactivate all the buffer pools so that the next loop can activate |
| 1129 | only the buffer pools necessary to hold the new MTU */ | 1130 | only the buffer pools necessary to hold the new MTU */ |
| 1130 | for (i = 0; i < IbmVethNumBufferPools; i++) | 1131 | if (netif_running(adapter->netdev)) { |
| 1131 | if (adapter->rx_buff_pool[i].active) { | 1132 | need_restart = 1; |
| 1132 | ibmveth_free_buffer_pool(adapter, | 1133 | adapter->pool_config = 1; |
| 1133 | &adapter->rx_buff_pool[i]); | 1134 | ibmveth_close(adapter->netdev); |
| 1134 | adapter->rx_buff_pool[i].active = 0; | 1135 | adapter->pool_config = 0; |
| 1135 | } | 1136 | } |
| 1136 | 1137 | ||
| 1137 | /* Look for an active buffer pool that can hold the new MTU */ | 1138 | /* Look for an active buffer pool that can hold the new MTU */ |
| 1138 | for(i = 0; i<IbmVethNumBufferPools; i++) { | 1139 | for(i = 0; i<IbmVethNumBufferPools; i++) { |
| 1139 | adapter->rx_buff_pool[i].active = 1; | 1140 | adapter->rx_buff_pool[i].active = 1; |
| 1140 | 1141 | ||
| 1141 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1142 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { |
| 1142 | if (netif_running(adapter->netdev)) { | ||
| 1143 | adapter->pool_config = 1; | ||
| 1144 | ibmveth_close(adapter->netdev); | ||
| 1145 | adapter->pool_config = 0; | ||
| 1146 | dev->mtu = new_mtu; | ||
| 1147 | vio_cmo_set_dev_desired(viodev, | ||
| 1148 | ibmveth_get_desired_dma | ||
| 1149 | (viodev)); | ||
| 1150 | return ibmveth_open(adapter->netdev); | ||
| 1151 | } | ||
| 1152 | dev->mtu = new_mtu; | 1143 | dev->mtu = new_mtu; |
| 1153 | vio_cmo_set_dev_desired(viodev, | 1144 | vio_cmo_set_dev_desired(viodev, |
| 1154 | ibmveth_get_desired_dma | 1145 | ibmveth_get_desired_dma |
| 1155 | (viodev)); | 1146 | (viodev)); |
| 1147 | if (need_restart) { | ||
| 1148 | return ibmveth_open(adapter->netdev); | ||
| 1149 | } | ||
| 1156 | return 0; | 1150 | return 0; |
| 1157 | } | 1151 | } |
| 1158 | } | 1152 | } |
| 1153 | |||
| 1154 | if (need_restart && (rc = ibmveth_open(adapter->netdev))) | ||
| 1155 | return rc; | ||
| 1156 | |||
| 1159 | return -EINVAL; | 1157 | return -EINVAL; |
| 1160 | } | 1158 | } |
| 1161 | 1159 | ||
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index c7b624711f5e..bdf2149e5296 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
| @@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev) | |||
| 902 | disable_irq(lp->tx_irq); | 902 | disable_irq(lp->tx_irq); |
| 903 | disable_irq(lp->rx_irq); | 903 | disable_irq(lp->rx_irq); |
| 904 | 904 | ||
| 905 | ll_temac_rx_irq(lp->tx_irq, lp); | 905 | ll_temac_rx_irq(lp->tx_irq, ndev); |
| 906 | ll_temac_tx_irq(lp->rx_irq, lp); | 906 | ll_temac_tx_irq(lp->rx_irq, ndev); |
| 907 | 907 | ||
| 908 | enable_irq(lp->tx_irq); | 908 | enable_irq(lp->tx_irq); |
| 909 | enable_irq(lp->rx_irq); | 909 | enable_irq(lp->rx_irq); |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index ffa1b9ce1cc5..6dca3574e355 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
| @@ -53,8 +53,8 @@ | |||
| 53 | 53 | ||
| 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 | 54 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
| 55 | #define _NETXEN_NIC_LINUX_MINOR 0 | 55 | #define _NETXEN_NIC_LINUX_MINOR 0 |
| 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 73 | 56 | #define _NETXEN_NIC_LINUX_SUBVERSION 74 |
| 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.73" | 57 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.74" |
| 58 | 58 | ||
| 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) | 59 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) |
| 60 | #define _major(v) (((v) >> 24) & 0xff) | 60 | #define _major(v) (((v) >> 24) & 0xff) |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index c865dda2adf1..cabae7bb1fc6 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
| @@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, | |||
| 1805 | netxen_ctx_msg msg = 0; | 1805 | netxen_ctx_msg msg = 0; |
| 1806 | struct list_head *head; | 1806 | struct list_head *head; |
| 1807 | 1807 | ||
| 1808 | spin_lock(&rds_ring->lock); | ||
| 1809 | |||
| 1810 | producer = rds_ring->producer; | 1808 | producer = rds_ring->producer; |
| 1811 | 1809 | ||
| 1812 | head = &rds_ring->free_list; | 1810 | head = &rds_ring->free_list; |
| @@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, | |||
| 1853 | NETXEN_RCV_PRODUCER_OFFSET), msg); | 1851 | NETXEN_RCV_PRODUCER_OFFSET), msg); |
| 1854 | } | 1852 | } |
| 1855 | } | 1853 | } |
| 1856 | |||
| 1857 | spin_unlock(&rds_ring->lock); | ||
| 1858 | } | 1854 | } |
| 1859 | 1855 | ||
| 1860 | static void | 1856 | static void |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index fd86e18604e6..cb30df106a2c 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | |||
| 2032 | struct netxen_adapter *adapter = netdev_priv(netdev); | 2032 | struct netxen_adapter *adapter = netdev_priv(netdev); |
| 2033 | struct net_device_stats *stats = &netdev->stats; | 2033 | struct net_device_stats *stats = &netdev->stats; |
| 2034 | 2034 | ||
| 2035 | memset(stats, 0, sizeof(*stats)); | ||
| 2036 | |||
| 2037 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; | 2035 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; |
| 2038 | stats->tx_packets = adapter->stats.xmitfinished; | 2036 | stats->tx_packets = adapter->stats.xmitfinished; |
| 2039 | stats->rx_bytes = adapter->stats.rxbytes; | 2037 | stats->rx_bytes = adapter->stats.rxbytes; |
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c new file mode 100644 index 000000000000..ecc64d750cce --- /dev/null +++ b/drivers/net/pxa168_eth.c | |||
| @@ -0,0 +1,1666 @@ | |||
| 1 | /* | ||
| 2 | * PXA168 ethernet driver. | ||
| 3 | * Most of the code is derived from mv643xx ethernet driver. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2010 Marvell International Ltd. | ||
| 6 | * Sachin Sanap <ssanap@marvell.com> | ||
| 7 | * Philip Rakity <prakity@marvell.com> | ||
| 8 | * Mark Brown <markb@marvell.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License | ||
| 12 | * as published by the Free Software Foundation; either version 2 | ||
| 13 | * of the License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, | ||
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 18 | * GNU General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software | ||
| 22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/dma-mapping.h> | ||
| 27 | #include <linux/in.h> | ||
| 28 | #include <linux/ip.h> | ||
| 29 | #include <linux/tcp.h> | ||
| 30 | #include <linux/udp.h> | ||
| 31 | #include <linux/etherdevice.h> | ||
| 32 | #include <linux/bitops.h> | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/ethtool.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/kernel.h> | ||
| 38 | #include <linux/workqueue.h> | ||
| 39 | #include <linux/clk.h> | ||
| 40 | #include <linux/phy.h> | ||
| 41 | #include <linux/io.h> | ||
| 42 | #include <linux/types.h> | ||
| 43 | #include <asm/pgtable.h> | ||
| 44 | #include <asm/system.h> | ||
| 45 | #include <linux/delay.h> | ||
| 46 | #include <linux/dma-mapping.h> | ||
| 47 | #include <asm/cacheflush.h> | ||
| 48 | #include <linux/pxa168_eth.h> | ||
| 49 | |||
| 50 | #define DRIVER_NAME "pxa168-eth" | ||
| 51 | #define DRIVER_VERSION "0.3" | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Registers | ||
| 55 | */ | ||
| 56 | |||
| 57 | #define PHY_ADDRESS 0x0000 | ||
| 58 | #define SMI 0x0010 | ||
| 59 | #define PORT_CONFIG 0x0400 | ||
| 60 | #define PORT_CONFIG_EXT 0x0408 | ||
| 61 | #define PORT_COMMAND 0x0410 | ||
| 62 | #define PORT_STATUS 0x0418 | ||
| 63 | #define HTPR 0x0428 | ||
| 64 | #define SDMA_CONFIG 0x0440 | ||
| 65 | #define SDMA_CMD 0x0448 | ||
| 66 | #define INT_CAUSE 0x0450 | ||
| 67 | #define INT_W_CLEAR 0x0454 | ||
| 68 | #define INT_MASK 0x0458 | ||
| 69 | #define ETH_F_RX_DESC_0 0x0480 | ||
| 70 | #define ETH_C_RX_DESC_0 0x04A0 | ||
| 71 | #define ETH_C_TX_DESC_1 0x04E4 | ||
| 72 | |||
| 73 | /* smi register */ | ||
| 74 | #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ | ||
| 75 | #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ | ||
| 76 | #define SMI_OP_W (0 << 26) /* Write operation */ | ||
| 77 | #define SMI_OP_R (1 << 26) /* Read operation */ | ||
| 78 | |||
| 79 | #define PHY_WAIT_ITERATIONS 10 | ||
| 80 | |||
| 81 | #define PXA168_ETH_PHY_ADDR_DEFAULT 0 | ||
| 82 | /* RX & TX descriptor command */ | ||
| 83 | #define BUF_OWNED_BY_DMA (1 << 31) | ||
| 84 | |||
| 85 | /* RX descriptor status */ | ||
| 86 | #define RX_EN_INT (1 << 23) | ||
| 87 | #define RX_FIRST_DESC (1 << 17) | ||
| 88 | #define RX_LAST_DESC (1 << 16) | ||
| 89 | #define RX_ERROR (1 << 15) | ||
| 90 | |||
| 91 | /* TX descriptor command */ | ||
| 92 | #define TX_EN_INT (1 << 23) | ||
| 93 | #define TX_GEN_CRC (1 << 22) | ||
| 94 | #define TX_ZERO_PADDING (1 << 18) | ||
| 95 | #define TX_FIRST_DESC (1 << 17) | ||
| 96 | #define TX_LAST_DESC (1 << 16) | ||
| 97 | #define TX_ERROR (1 << 15) | ||
| 98 | |||
| 99 | /* SDMA_CMD */ | ||
| 100 | #define SDMA_CMD_AT (1 << 31) | ||
| 101 | #define SDMA_CMD_TXDL (1 << 24) | ||
| 102 | #define SDMA_CMD_TXDH (1 << 23) | ||
| 103 | #define SDMA_CMD_AR (1 << 15) | ||
| 104 | #define SDMA_CMD_ERD (1 << 7) | ||
| 105 | |||
| 106 | /* Bit definitions of the Port Config Reg */ | ||
| 107 | #define PCR_HS (1 << 12) | ||
| 108 | #define PCR_EN (1 << 7) | ||
| 109 | #define PCR_PM (1 << 0) | ||
| 110 | |||
| 111 | /* Bit definitions of the Port Config Extend Reg */ | ||
| 112 | #define PCXR_2BSM (1 << 28) | ||
| 113 | #define PCXR_DSCP_EN (1 << 21) | ||
| 114 | #define PCXR_MFL_1518 (0 << 14) | ||
| 115 | #define PCXR_MFL_1536 (1 << 14) | ||
| 116 | #define PCXR_MFL_2048 (2 << 14) | ||
| 117 | #define PCXR_MFL_64K (3 << 14) | ||
| 118 | #define PCXR_FLP (1 << 11) | ||
| 119 | #define PCXR_PRIO_TX_OFF 3 | ||
| 120 | #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) | ||
| 121 | |||
| 122 | /* Bit definitions of the SDMA Config Reg */ | ||
| 123 | #define SDCR_BSZ_OFF 12 | ||
| 124 | #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) | ||
| 125 | #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) | ||
| 126 | #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) | ||
| 127 | #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) | ||
| 128 | #define SDCR_BLMR (1 << 6) | ||
| 129 | #define SDCR_BLMT (1 << 7) | ||
| 130 | #define SDCR_RIFB (1 << 9) | ||
| 131 | #define SDCR_RC_OFF 2 | ||
| 132 | #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) | ||
| 133 | |||
| 134 | /* | ||
| 135 | * Bit definitions of the Interrupt Cause Reg | ||
| 136 | * and Interrupt MASK Reg is the same | ||
| 137 | */ | ||
| 138 | #define ICR_RXBUF (1 << 0) | ||
| 139 | #define ICR_TXBUF_H (1 << 2) | ||
| 140 | #define ICR_TXBUF_L (1 << 3) | ||
| 141 | #define ICR_TXEND_H (1 << 6) | ||
| 142 | #define ICR_TXEND_L (1 << 7) | ||
| 143 | #define ICR_RXERR (1 << 8) | ||
| 144 | #define ICR_TXERR_H (1 << 10) | ||
| 145 | #define ICR_TXERR_L (1 << 11) | ||
| 146 | #define ICR_TX_UDR (1 << 13) | ||
| 147 | #define ICR_MII_CH (1 << 28) | ||
| 148 | |||
| 149 | #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ | ||
| 150 | ICR_TXERR_H | ICR_TXERR_L |\ | ||
| 151 | ICR_TXEND_H | ICR_TXEND_L |\ | ||
| 152 | ICR_RXBUF | ICR_RXERR | ICR_MII_CH) | ||
| 153 | |||
| 154 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
| 155 | |||
| 156 | #define NUM_RX_DESCS 64 | ||
| 157 | #define NUM_TX_DESCS 64 | ||
| 158 | |||
| 159 | #define HASH_ADD 0 | ||
| 160 | #define HASH_DELETE 1 | ||
| 161 | #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ | ||
| 162 | #define HOP_NUMBER 12 | ||
| 163 | |||
| 164 | /* Bit definitions for Port status */ | ||
| 165 | #define PORT_SPEED_100 (1 << 0) | ||
| 166 | #define FULL_DUPLEX (1 << 1) | ||
| 167 | #define FLOW_CONTROL_ENABLED (1 << 2) | ||
| 168 | #define LINK_UP (1 << 3) | ||
| 169 | |||
| 170 | /* Bit definitions for work to be done */ | ||
| 171 | #define WORK_LINK (1 << 0) | ||
| 172 | #define WORK_TX_DONE (1 << 1) | ||
| 173 | |||
| 174 | /* | ||
| 175 | * Misc definitions. | ||
| 176 | */ | ||
| 177 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) | ||
| 178 | |||
| 179 | struct rx_desc { | ||
| 180 | u32 cmd_sts; /* Descriptor command status */ | ||
| 181 | u16 byte_cnt; /* Descriptor buffer byte count */ | ||
| 182 | u16 buf_size; /* Buffer size */ | ||
| 183 | u32 buf_ptr; /* Descriptor buffer pointer */ | ||
| 184 | u32 next_desc_ptr; /* Next descriptor pointer */ | ||
| 185 | }; | ||
| 186 | |||
| 187 | struct tx_desc { | ||
| 188 | u32 cmd_sts; /* Command/status field */ | ||
| 189 | u16 reserved; | ||
| 190 | u16 byte_cnt; /* buffer byte count */ | ||
| 191 | u32 buf_ptr; /* pointer to buffer for this descriptor */ | ||
| 192 | u32 next_desc_ptr; /* Pointer to next descriptor */ | ||
| 193 | }; | ||
| 194 | |||
| 195 | struct pxa168_eth_private { | ||
| 196 | int port_num; /* User Ethernet port number */ | ||
| 197 | |||
| 198 | int rx_resource_err; /* Rx ring resource error flag */ | ||
| 199 | |||
| 200 | /* Next available and first returning Rx resource */ | ||
| 201 | int rx_curr_desc_q, rx_used_desc_q; | ||
| 202 | |||
| 203 | /* Next available and first returning Tx resource */ | ||
| 204 | int tx_curr_desc_q, tx_used_desc_q; | ||
| 205 | |||
| 206 | struct rx_desc *p_rx_desc_area; | ||
| 207 | dma_addr_t rx_desc_dma; | ||
| 208 | int rx_desc_area_size; | ||
| 209 | struct sk_buff **rx_skb; | ||
| 210 | |||
| 211 | struct tx_desc *p_tx_desc_area; | ||
| 212 | dma_addr_t tx_desc_dma; | ||
| 213 | int tx_desc_area_size; | ||
| 214 | struct sk_buff **tx_skb; | ||
| 215 | |||
| 216 | struct work_struct tx_timeout_task; | ||
| 217 | |||
| 218 | struct net_device *dev; | ||
| 219 | struct napi_struct napi; | ||
| 220 | u8 work_todo; | ||
| 221 | int skb_size; | ||
| 222 | |||
| 223 | struct net_device_stats stats; | ||
| 224 | /* Size of Tx Ring per queue */ | ||
| 225 | int tx_ring_size; | ||
| 226 | /* Number of tx descriptors in use */ | ||
| 227 | int tx_desc_count; | ||
| 228 | /* Size of Rx Ring per queue */ | ||
| 229 | int rx_ring_size; | ||
| 230 | /* Number of rx descriptors in use */ | ||
| 231 | int rx_desc_count; | ||
| 232 | |||
| 233 | /* | ||
| 234 | * Used in case RX Ring is empty, which can occur when | ||
| 235 | * system does not have resources (skb's) | ||
| 236 | */ | ||
| 237 | struct timer_list timeout; | ||
| 238 | struct mii_bus *smi_bus; | ||
| 239 | struct phy_device *phy; | ||
| 240 | |||
| 241 | /* clock */ | ||
| 242 | struct clk *clk; | ||
| 243 | struct pxa168_eth_platform_data *pd; | ||
| 244 | /* | ||
| 245 | * Ethernet controller base address. | ||
| 246 | */ | ||
| 247 | void __iomem *base; | ||
| 248 | |||
| 249 | /* Pointer to the hardware address filter table */ | ||
| 250 | void *htpr; | ||
| 251 | dma_addr_t htpr_dma; | ||
| 252 | }; | ||
| 253 | |||
| 254 | struct addr_table_entry { | ||
| 255 | __le32 lo; | ||
| 256 | __le32 hi; | ||
| 257 | }; | ||
| 258 | |||
| 259 | /* Bit fields of a Hash Table Entry */ | ||
| 260 | enum hash_table_entry { | ||
| 261 | HASH_ENTRY_VALID = 1, | ||
| 262 | SKIP = 2, | ||
| 263 | HASH_ENTRY_RECEIVE_DISCARD = 4, | ||
| 264 | HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 | ||
| 265 | }; | ||
| 266 | |||
| 267 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); | ||
| 268 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); | ||
| 269 | static int pxa168_init_hw(struct pxa168_eth_private *pep); | ||
| 270 | static void eth_port_reset(struct net_device *dev); | ||
| 271 | static void eth_port_start(struct net_device *dev); | ||
| 272 | static int pxa168_eth_open(struct net_device *dev); | ||
| 273 | static int pxa168_eth_stop(struct net_device *dev); | ||
| 274 | static int ethernet_phy_setup(struct net_device *dev); | ||
| 275 | |||
| 276 | static inline u32 rdl(struct pxa168_eth_private *pep, int offset) | ||
| 277 | { | ||
| 278 | return readl(pep->base + offset); | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) | ||
| 282 | { | ||
| 283 | writel(data, pep->base + offset); | ||
| 284 | } | ||
| 285 | |||
| 286 | static void abort_dma(struct pxa168_eth_private *pep) | ||
| 287 | { | ||
| 288 | int delay; | ||
| 289 | int max_retries = 40; | ||
| 290 | |||
| 291 | do { | ||
| 292 | wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); | ||
| 293 | udelay(100); | ||
| 294 | |||
| 295 | delay = 10; | ||
| 296 | while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) | ||
| 297 | && delay-- > 0) { | ||
| 298 | udelay(10); | ||
| 299 | } | ||
| 300 | } while (max_retries-- > 0 && delay <= 0); | ||
| 301 | |||
| 302 | if (max_retries <= 0) | ||
| 303 | printk(KERN_ERR "%s : DMA Stuck\n", __func__); | ||
| 304 | } | ||
| 305 | |||
| 306 | static int ethernet_phy_get(struct pxa168_eth_private *pep) | ||
| 307 | { | ||
| 308 | unsigned int reg_data; | ||
| 309 | |||
| 310 | reg_data = rdl(pep, PHY_ADDRESS); | ||
| 311 | |||
| 312 | return (reg_data >> (5 * pep->port_num)) & 0x1f; | ||
| 313 | } | ||
| 314 | |||
| 315 | static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) | ||
| 316 | { | ||
| 317 | u32 reg_data; | ||
| 318 | int addr_shift = 5 * pep->port_num; | ||
| 319 | |||
| 320 | reg_data = rdl(pep, PHY_ADDRESS); | ||
| 321 | reg_data &= ~(0x1f << addr_shift); | ||
| 322 | reg_data |= (phy_addr & 0x1f) << addr_shift; | ||
| 323 | wrl(pep, PHY_ADDRESS, reg_data); | ||
| 324 | } | ||
| 325 | |||
| 326 | static void ethernet_phy_reset(struct pxa168_eth_private *pep) | ||
| 327 | { | ||
| 328 | int data; | ||
| 329 | |||
| 330 | data = phy_read(pep->phy, MII_BMCR); | ||
| 331 | if (data < 0) | ||
| 332 | return; | ||
| 333 | |||
| 334 | data |= BMCR_RESET; | ||
| 335 | if (phy_write(pep->phy, MII_BMCR, data) < 0) | ||
| 336 | return; | ||
| 337 | |||
| 338 | do { | ||
| 339 | data = phy_read(pep->phy, MII_BMCR); | ||
| 340 | } while (data >= 0 && data & BMCR_RESET); | ||
| 341 | } | ||
| 342 | |||
| 343 | static void rxq_refill(struct net_device *dev) | ||
| 344 | { | ||
| 345 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 346 | struct sk_buff *skb; | ||
| 347 | struct rx_desc *p_used_rx_desc; | ||
| 348 | int used_rx_desc; | ||
| 349 | |||
| 350 | while (pep->rx_desc_count < pep->rx_ring_size) { | ||
| 351 | int size; | ||
| 352 | |||
| 353 | skb = dev_alloc_skb(pep->skb_size); | ||
| 354 | if (!skb) | ||
| 355 | break; | ||
| 356 | if (SKB_DMA_REALIGN) | ||
| 357 | skb_reserve(skb, SKB_DMA_REALIGN); | ||
| 358 | pep->rx_desc_count++; | ||
| 359 | /* Get 'used' Rx descriptor */ | ||
| 360 | used_rx_desc = pep->rx_used_desc_q; | ||
| 361 | p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; | ||
| 362 | size = skb->end - skb->data; | ||
| 363 | p_used_rx_desc->buf_ptr = dma_map_single(NULL, | ||
| 364 | skb->data, | ||
| 365 | size, | ||
| 366 | DMA_FROM_DEVICE); | ||
| 367 | p_used_rx_desc->buf_size = size; | ||
| 368 | pep->rx_skb[used_rx_desc] = skb; | ||
| 369 | |||
| 370 | /* Return the descriptor to DMA ownership */ | ||
| 371 | wmb(); | ||
| 372 | p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; | ||
| 373 | wmb(); | ||
| 374 | |||
| 375 | /* Move the used descriptor pointer to the next descriptor */ | ||
| 376 | pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; | ||
| 377 | |||
| 378 | /* Any Rx return cancels the Rx resource error status */ | ||
| 379 | pep->rx_resource_err = 0; | ||
| 380 | |||
| 381 | skb_reserve(skb, ETH_HW_IP_ALIGN); | ||
| 382 | } | ||
| 383 | |||
| 384 | /* | ||
| 385 | * If RX ring is empty of SKB, set a timer to try allocating | ||
| 386 | * again at a later time. | ||
| 387 | */ | ||
| 388 | if (pep->rx_desc_count == 0) { | ||
| 389 | pep->timeout.expires = jiffies + (HZ / 10); | ||
| 390 | add_timer(&pep->timeout); | ||
| 391 | } | ||
| 392 | } | ||
| 393 | |||
| 394 | static inline void rxq_refill_timer_wrapper(unsigned long data) | ||
| 395 | { | ||
| 396 | struct pxa168_eth_private *pep = (void *)data; | ||
| 397 | napi_schedule(&pep->napi); | ||
| 398 | } | ||
| 399 | |||
| 400 | static inline u8 flip_8_bits(u8 x) | ||
| 401 | { | ||
| 402 | return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) | ||
| 403 | | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) | ||
| 404 | | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) | ||
| 405 | | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); | ||
| 406 | } | ||
| 407 | |||
| 408 | static void nibble_swap_every_byte(unsigned char *mac_addr) | ||
| 409 | { | ||
| 410 | int i; | ||
| 411 | for (i = 0; i < ETH_ALEN; i++) { | ||
| 412 | mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | | ||
| 413 | ((mac_addr[i] & 0xf0) >> 4); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | static void inverse_every_nibble(unsigned char *mac_addr) | ||
| 418 | { | ||
| 419 | int i; | ||
| 420 | for (i = 0; i < ETH_ALEN; i++) | ||
| 421 | mac_addr[i] = flip_8_bits(mac_addr[i]); | ||
| 422 | } | ||
| 423 | |||
| 424 | /* | ||
| 425 | * ---------------------------------------------------------------------------- | ||
| 426 | * This function will calculate the hash function of the address. | ||
| 427 | * Inputs | ||
| 428 | * mac_addr_orig - MAC address. | ||
| 429 | * Outputs | ||
| 430 | * return the calculated entry. | ||
| 431 | */ | ||
| 432 | static u32 hash_function(unsigned char *mac_addr_orig) | ||
| 433 | { | ||
| 434 | u32 hash_result; | ||
| 435 | u32 addr0; | ||
| 436 | u32 addr1; | ||
| 437 | u32 addr2; | ||
| 438 | u32 addr3; | ||
| 439 | unsigned char mac_addr[ETH_ALEN]; | ||
| 440 | |||
| 441 | /* Make a copy of MAC address since we are going to performe bit | ||
| 442 | * operations on it | ||
| 443 | */ | ||
| 444 | memcpy(mac_addr, mac_addr_orig, ETH_ALEN); | ||
| 445 | |||
| 446 | nibble_swap_every_byte(mac_addr); | ||
| 447 | inverse_every_nibble(mac_addr); | ||
| 448 | |||
| 449 | addr0 = (mac_addr[5] >> 2) & 0x3f; | ||
| 450 | addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); | ||
| 451 | addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; | ||
| 452 | addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); | ||
| 453 | |||
| 454 | hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); | ||
| 455 | hash_result = hash_result & 0x07ff; | ||
| 456 | return hash_result; | ||
| 457 | } | ||
| 458 | |||
| 459 | /* | ||
| 460 | * ---------------------------------------------------------------------------- | ||
| 461 | * This function will add/del an entry to the address table. | ||
| 462 | * Inputs | ||
| 463 | * pep - ETHERNET . | ||
| 464 | * mac_addr - MAC address. | ||
| 465 | * skip - if 1, skip this address.Used in case of deleting an entry which is a | ||
| 466 | * part of chain in the hash table.We cant just delete the entry since | ||
| 467 | * that will break the chain.We need to defragment the tables time to | ||
| 468 | * time. | ||
| 469 | * rd - 0 Discard packet upon match. | ||
| 470 | * - 1 Receive packet upon match. | ||
| 471 | * Outputs | ||
| 472 | * address table entry is added/deleted. | ||
| 473 | * 0 if success. | ||
| 474 | * -ENOSPC if table full | ||
| 475 | */ | ||
| 476 | static int add_del_hash_entry(struct pxa168_eth_private *pep, | ||
| 477 | unsigned char *mac_addr, | ||
| 478 | u32 rd, u32 skip, int del) | ||
| 479 | { | ||
| 480 | struct addr_table_entry *entry, *start; | ||
| 481 | u32 new_high; | ||
| 482 | u32 new_low; | ||
| 483 | u32 i; | ||
| 484 | |||
| 485 | new_low = (((mac_addr[1] >> 4) & 0xf) << 15) | ||
| 486 | | (((mac_addr[1] >> 0) & 0xf) << 11) | ||
| 487 | | (((mac_addr[0] >> 4) & 0xf) << 7) | ||
| 488 | | (((mac_addr[0] >> 0) & 0xf) << 3) | ||
| 489 | | (((mac_addr[3] >> 4) & 0x1) << 31) | ||
| 490 | | (((mac_addr[3] >> 0) & 0xf) << 27) | ||
| 491 | | (((mac_addr[2] >> 4) & 0xf) << 23) | ||
| 492 | | (((mac_addr[2] >> 0) & 0xf) << 19) | ||
| 493 | | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) | ||
| 494 | | HASH_ENTRY_VALID; | ||
| 495 | |||
| 496 | new_high = (((mac_addr[5] >> 4) & 0xf) << 15) | ||
| 497 | | (((mac_addr[5] >> 0) & 0xf) << 11) | ||
| 498 | | (((mac_addr[4] >> 4) & 0xf) << 7) | ||
| 499 | | (((mac_addr[4] >> 0) & 0xf) << 3) | ||
| 500 | | (((mac_addr[3] >> 5) & 0x7) << 0); | ||
| 501 | |||
| 502 | /* | ||
| 503 | * Pick the appropriate table, start scanning for free/reusable | ||
| 504 | * entries at the index obtained by hashing the specified MAC address | ||
| 505 | */ | ||
| 506 | start = (struct addr_table_entry *)(pep->htpr); | ||
| 507 | entry = start + hash_function(mac_addr); | ||
| 508 | for (i = 0; i < HOP_NUMBER; i++) { | ||
| 509 | if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { | ||
| 510 | break; | ||
| 511 | } else { | ||
| 512 | /* if same address put in same position */ | ||
| 513 | if (((le32_to_cpu(entry->lo) & 0xfffffff8) == | ||
| 514 | (new_low & 0xfffffff8)) && | ||
| 515 | (le32_to_cpu(entry->hi) == new_high)) { | ||
| 516 | break; | ||
| 517 | } | ||
| 518 | } | ||
| 519 | if (entry == start + 0x7ff) | ||
| 520 | entry = start; | ||
| 521 | else | ||
| 522 | entry++; | ||
| 523 | } | ||
| 524 | |||
| 525 | if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && | ||
| 526 | (le32_to_cpu(entry->hi) != new_high) && del) | ||
| 527 | return 0; | ||
| 528 | |||
| 529 | if (i == HOP_NUMBER) { | ||
| 530 | if (!del) { | ||
| 531 | printk(KERN_INFO "%s: table section is full, need to " | ||
| 532 | "move to 16kB implementation?\n", | ||
| 533 | __FILE__); | ||
| 534 | return -ENOSPC; | ||
| 535 | } else | ||
| 536 | return 0; | ||
| 537 | } | ||
| 538 | |||
| 539 | /* | ||
| 540 | * Update the selected entry | ||
| 541 | */ | ||
| 542 | if (del) { | ||
| 543 | entry->hi = 0; | ||
| 544 | entry->lo = 0; | ||
| 545 | } else { | ||
| 546 | entry->hi = cpu_to_le32(new_high); | ||
| 547 | entry->lo = cpu_to_le32(new_low); | ||
| 548 | } | ||
| 549 | |||
| 550 | return 0; | ||
| 551 | } | ||
| 552 | |||
| 553 | /* | ||
| 554 | * ---------------------------------------------------------------------------- | ||
| 555 | * Create an addressTable entry from MAC address info | ||
| 556 | * found in the specifed net_device struct | ||
| 557 | * | ||
| 558 | * Input : pointer to ethernet interface network device structure | ||
| 559 | * Output : N/A | ||
| 560 | */ | ||
| 561 | static void update_hash_table_mac_address(struct pxa168_eth_private *pep, | ||
| 562 | unsigned char *oaddr, | ||
| 563 | unsigned char *addr) | ||
| 564 | { | ||
| 565 | /* Delete old entry */ | ||
| 566 | if (oaddr) | ||
| 567 | add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); | ||
| 568 | /* Add new entry */ | ||
| 569 | add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); | ||
| 570 | } | ||
| 571 | |||
| 572 | static int init_hash_table(struct pxa168_eth_private *pep) | ||
| 573 | { | ||
| 574 | /* | ||
| 575 | * Hardware expects CPU to build a hash table based on a predefined | ||
| 576 | * hash function and populate it based on hardware address. The | ||
| 577 | * location of the hash table is identified by 32-bit pointer stored | ||
| 578 | * in HTPR internal register. Two possible sizes exists for the hash | ||
| 579 | * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB | ||
| 580 | * (16kB of DRAM required (4 x 4 kB banks)).We currently only support | ||
| 581 | * 1/2kB. | ||
| 582 | */ | ||
| 583 | /* TODO: Add support for 8kB hash table and alternative hash | ||
| 584 | * function.Driver can dynamically switch to them if the 1/2kB hash | ||
| 585 | * table is full. | ||
| 586 | */ | ||
| 587 | if (pep->htpr == NULL) { | ||
| 588 | pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, | ||
| 589 | HASH_ADDR_TABLE_SIZE, | ||
| 590 | &pep->htpr_dma, GFP_KERNEL); | ||
| 591 | if (pep->htpr == NULL) | ||
| 592 | return -ENOMEM; | ||
| 593 | } | ||
| 594 | memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); | ||
| 595 | wrl(pep, HTPR, pep->htpr_dma); | ||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | |||
| 599 | static void pxa168_eth_set_rx_mode(struct net_device *dev) | ||
| 600 | { | ||
| 601 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 602 | struct netdev_hw_addr *ha; | ||
| 603 | u32 val; | ||
| 604 | |||
| 605 | val = rdl(pep, PORT_CONFIG); | ||
| 606 | if (dev->flags & IFF_PROMISC) | ||
| 607 | val |= PCR_PM; | ||
| 608 | else | ||
| 609 | val &= ~PCR_PM; | ||
| 610 | wrl(pep, PORT_CONFIG, val); | ||
| 611 | |||
| 612 | /* | ||
| 613 | * Remove the old list of MAC address and add dev->addr | ||
| 614 | * and multicast address. | ||
| 615 | */ | ||
| 616 | memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); | ||
| 617 | update_hash_table_mac_address(pep, NULL, dev->dev_addr); | ||
| 618 | |||
| 619 | netdev_for_each_mc_addr(ha, dev) | ||
| 620 | update_hash_table_mac_address(pep, NULL, ha->addr); | ||
| 621 | } | ||
| 622 | |||
| 623 | static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) | ||
| 624 | { | ||
| 625 | struct sockaddr *sa = addr; | ||
| 626 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 627 | unsigned char oldMac[ETH_ALEN]; | ||
| 628 | |||
| 629 | if (!is_valid_ether_addr(sa->sa_data)) | ||
| 630 | return -EINVAL; | ||
| 631 | memcpy(oldMac, dev->dev_addr, ETH_ALEN); | ||
| 632 | memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); | ||
| 633 | netif_addr_lock_bh(dev); | ||
| 634 | update_hash_table_mac_address(pep, oldMac, dev->dev_addr); | ||
| 635 | netif_addr_unlock_bh(dev); | ||
| 636 | return 0; | ||
| 637 | } | ||
| 638 | |||
| 639 | static void eth_port_start(struct net_device *dev) | ||
| 640 | { | ||
| 641 | unsigned int val = 0; | ||
| 642 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 643 | int tx_curr_desc, rx_curr_desc; | ||
| 644 | |||
| 645 | /* Perform PHY reset, if there is a PHY. */ | ||
| 646 | if (pep->phy != NULL) { | ||
| 647 | struct ethtool_cmd cmd; | ||
| 648 | |||
| 649 | pxa168_get_settings(pep->dev, &cmd); | ||
| 650 | ethernet_phy_reset(pep); | ||
| 651 | pxa168_set_settings(pep->dev, &cmd); | ||
| 652 | } | ||
| 653 | |||
| 654 | /* Assignment of Tx CTRP of given queue */ | ||
| 655 | tx_curr_desc = pep->tx_curr_desc_q; | ||
| 656 | wrl(pep, ETH_C_TX_DESC_1, | ||
| 657 | (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc)); | ||
| 658 | |||
| 659 | /* Assignment of Rx CRDP of given queue */ | ||
| 660 | rx_curr_desc = pep->rx_curr_desc_q; | ||
| 661 | wrl(pep, ETH_C_RX_DESC_0, | ||
| 662 | (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); | ||
| 663 | |||
| 664 | wrl(pep, ETH_F_RX_DESC_0, | ||
| 665 | (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); | ||
| 666 | |||
| 667 | /* Clear all interrupts */ | ||
| 668 | wrl(pep, INT_CAUSE, 0); | ||
| 669 | |||
| 670 | /* Enable all interrupts for receive, transmit and error. */ | ||
| 671 | wrl(pep, INT_MASK, ALL_INTS); | ||
| 672 | |||
| 673 | val = rdl(pep, PORT_CONFIG); | ||
| 674 | val |= PCR_EN; | ||
| 675 | wrl(pep, PORT_CONFIG, val); | ||
| 676 | |||
| 677 | /* Start RX DMA engine */ | ||
| 678 | val = rdl(pep, SDMA_CMD); | ||
| 679 | val |= SDMA_CMD_ERD; | ||
| 680 | wrl(pep, SDMA_CMD, val); | ||
| 681 | } | ||
| 682 | |||
| 683 | static void eth_port_reset(struct net_device *dev) | ||
| 684 | { | ||
| 685 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 686 | unsigned int val = 0; | ||
| 687 | |||
| 688 | /* Stop all interrupts for receive, transmit and error. */ | ||
| 689 | wrl(pep, INT_MASK, 0); | ||
| 690 | |||
| 691 | /* Clear all interrupts */ | ||
| 692 | wrl(pep, INT_CAUSE, 0); | ||
| 693 | |||
| 694 | /* Stop RX DMA */ | ||
| 695 | val = rdl(pep, SDMA_CMD); | ||
| 696 | val &= ~SDMA_CMD_ERD; /* abort dma command */ | ||
| 697 | |||
| 698 | /* Abort any transmit and receive operations and put DMA | ||
| 699 | * in idle state. | ||
| 700 | */ | ||
| 701 | abort_dma(pep); | ||
| 702 | |||
| 703 | /* Disable port */ | ||
| 704 | val = rdl(pep, PORT_CONFIG); | ||
| 705 | val &= ~PCR_EN; | ||
| 706 | wrl(pep, PORT_CONFIG, val); | ||
| 707 | } | ||
| 708 | |||
| 709 | /* | ||
| 710 | * txq_reclaim - Free the tx desc data for completed descriptors | ||
| 711 | * If force is non-zero, frees uncompleted descriptors as well | ||
| 712 | */ | ||
| 713 | static int txq_reclaim(struct net_device *dev, int force) | ||
| 714 | { | ||
| 715 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 716 | struct tx_desc *desc; | ||
| 717 | u32 cmd_sts; | ||
| 718 | struct sk_buff *skb; | ||
| 719 | int tx_index; | ||
| 720 | dma_addr_t addr; | ||
| 721 | int count; | ||
| 722 | int released = 0; | ||
| 723 | |||
| 724 | netif_tx_lock(dev); | ||
| 725 | |||
| 726 | pep->work_todo &= ~WORK_TX_DONE; | ||
| 727 | while (pep->tx_desc_count > 0) { | ||
| 728 | tx_index = pep->tx_used_desc_q; | ||
| 729 | desc = &pep->p_tx_desc_area[tx_index]; | ||
| 730 | cmd_sts = desc->cmd_sts; | ||
| 731 | if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { | ||
| 732 | if (released > 0) { | ||
| 733 | goto txq_reclaim_end; | ||
| 734 | } else { | ||
| 735 | released = -1; | ||
| 736 | goto txq_reclaim_end; | ||
| 737 | } | ||
| 738 | } | ||
| 739 | pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; | ||
| 740 | pep->tx_desc_count--; | ||
| 741 | addr = desc->buf_ptr; | ||
| 742 | count = desc->byte_cnt; | ||
| 743 | skb = pep->tx_skb[tx_index]; | ||
| 744 | if (skb) | ||
| 745 | pep->tx_skb[tx_index] = NULL; | ||
| 746 | |||
| 747 | if (cmd_sts & TX_ERROR) { | ||
| 748 | if (net_ratelimit()) | ||
| 749 | printk(KERN_ERR "%s: Error in TX\n", dev->name); | ||
| 750 | dev->stats.tx_errors++; | ||
| 751 | } | ||
| 752 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); | ||
| 753 | if (skb) | ||
| 754 | dev_kfree_skb_irq(skb); | ||
| 755 | released++; | ||
| 756 | } | ||
| 757 | txq_reclaim_end: | ||
| 758 | netif_tx_unlock(dev); | ||
| 759 | return released; | ||
| 760 | } | ||
| 761 | |||
| 762 | static void pxa168_eth_tx_timeout(struct net_device *dev) | ||
| 763 | { | ||
| 764 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 765 | |||
| 766 | printk(KERN_INFO "%s: TX timeout desc_count %d\n", | ||
| 767 | dev->name, pep->tx_desc_count); | ||
| 768 | |||
| 769 | schedule_work(&pep->tx_timeout_task); | ||
| 770 | } | ||
| 771 | |||
| 772 | static void pxa168_eth_tx_timeout_task(struct work_struct *work) | ||
| 773 | { | ||
| 774 | struct pxa168_eth_private *pep = container_of(work, | ||
| 775 | struct pxa168_eth_private, | ||
| 776 | tx_timeout_task); | ||
| 777 | struct net_device *dev = pep->dev; | ||
| 778 | pxa168_eth_stop(dev); | ||
| 779 | pxa168_eth_open(dev); | ||
| 780 | } | ||
| 781 | |||
| 782 | static int rxq_process(struct net_device *dev, int budget) | ||
| 783 | { | ||
| 784 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 785 | struct net_device_stats *stats = &dev->stats; | ||
| 786 | unsigned int received_packets = 0; | ||
| 787 | struct sk_buff *skb; | ||
| 788 | |||
| 789 | while (budget-- > 0) { | ||
| 790 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | ||
| 791 | struct rx_desc *rx_desc; | ||
| 792 | unsigned int cmd_sts; | ||
| 793 | |||
| 794 | /* Do not process Rx ring in case of Rx ring resource error */ | ||
| 795 | if (pep->rx_resource_err) | ||
| 796 | break; | ||
| 797 | rx_curr_desc = pep->rx_curr_desc_q; | ||
| 798 | rx_used_desc = pep->rx_used_desc_q; | ||
| 799 | rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; | ||
| 800 | cmd_sts = rx_desc->cmd_sts; | ||
| 801 | rmb(); | ||
| 802 | if (cmd_sts & (BUF_OWNED_BY_DMA)) | ||
| 803 | break; | ||
| 804 | skb = pep->rx_skb[rx_curr_desc]; | ||
| 805 | pep->rx_skb[rx_curr_desc] = NULL; | ||
| 806 | |||
| 807 | rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; | ||
| 808 | pep->rx_curr_desc_q = rx_next_curr_desc; | ||
| 809 | |||
| 810 | /* Rx descriptors exhausted. */ | ||
| 811 | /* Set the Rx ring resource error flag */ | ||
| 812 | if (rx_next_curr_desc == rx_used_desc) | ||
| 813 | pep->rx_resource_err = 1; | ||
| 814 | pep->rx_desc_count--; | ||
| 815 | dma_unmap_single(NULL, rx_desc->buf_ptr, | ||
| 816 | rx_desc->buf_size, | ||
| 817 | DMA_FROM_DEVICE); | ||
| 818 | received_packets++; | ||
| 819 | /* | ||
| 820 | * Update statistics. | ||
| 821 | * Note byte count includes 4 byte CRC count | ||
| 822 | */ | ||
| 823 | stats->rx_packets++; | ||
| 824 | stats->rx_bytes += rx_desc->byte_cnt; | ||
| 825 | /* | ||
| 826 | * In case received a packet without first / last bits on OR | ||
| 827 | * the error summary bit is on, the packets needs to be droped. | ||
| 828 | */ | ||
| 829 | if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | ||
| 830 | (RX_FIRST_DESC | RX_LAST_DESC)) | ||
| 831 | || (cmd_sts & RX_ERROR)) { | ||
| 832 | |||
| 833 | stats->rx_dropped++; | ||
| 834 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != | ||
| 835 | (RX_FIRST_DESC | RX_LAST_DESC)) { | ||
| 836 | if (net_ratelimit()) | ||
| 837 | printk(KERN_ERR | ||
| 838 | "%s: Rx pkt on multiple desc\n", | ||
| 839 | dev->name); | ||
| 840 | } | ||
| 841 | if (cmd_sts & RX_ERROR) | ||
| 842 | stats->rx_errors++; | ||
| 843 | dev_kfree_skb_irq(skb); | ||
| 844 | } else { | ||
| 845 | /* | ||
| 846 | * The -4 is for the CRC in the trailer of the | ||
| 847 | * received packet | ||
| 848 | */ | ||
| 849 | skb_put(skb, rx_desc->byte_cnt - 4); | ||
| 850 | skb->protocol = eth_type_trans(skb, dev); | ||
| 851 | netif_receive_skb(skb); | ||
| 852 | } | ||
| 853 | dev->last_rx = jiffies; | ||
| 854 | } | ||
| 855 | /* Fill RX ring with skb's */ | ||
| 856 | rxq_refill(dev); | ||
| 857 | return received_packets; | ||
| 858 | } | ||
| 859 | |||
| 860 | static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, | ||
| 861 | struct net_device *dev) | ||
| 862 | { | ||
| 863 | u32 icr; | ||
| 864 | int ret = 0; | ||
| 865 | |||
| 866 | icr = rdl(pep, INT_CAUSE); | ||
| 867 | if (icr == 0) | ||
| 868 | return IRQ_NONE; | ||
| 869 | |||
| 870 | wrl(pep, INT_CAUSE, ~icr); | ||
| 871 | if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { | ||
| 872 | pep->work_todo |= WORK_TX_DONE; | ||
| 873 | ret = 1; | ||
| 874 | } | ||
| 875 | if (icr & ICR_RXBUF) | ||
| 876 | ret = 1; | ||
| 877 | if (icr & ICR_MII_CH) { | ||
| 878 | pep->work_todo |= WORK_LINK; | ||
| 879 | ret = 1; | ||
| 880 | } | ||
| 881 | return ret; | ||
| 882 | } | ||
| 883 | |||
| 884 | static void handle_link_event(struct pxa168_eth_private *pep) | ||
| 885 | { | ||
| 886 | struct net_device *dev = pep->dev; | ||
| 887 | u32 port_status; | ||
| 888 | int speed; | ||
| 889 | int duplex; | ||
| 890 | int fc; | ||
| 891 | |||
| 892 | port_status = rdl(pep, PORT_STATUS); | ||
| 893 | if (!(port_status & LINK_UP)) { | ||
| 894 | if (netif_carrier_ok(dev)) { | ||
| 895 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
| 896 | netif_carrier_off(dev); | ||
| 897 | txq_reclaim(dev, 1); | ||
| 898 | } | ||
| 899 | return; | ||
| 900 | } | ||
| 901 | if (port_status & PORT_SPEED_100) | ||
| 902 | speed = 100; | ||
| 903 | else | ||
| 904 | speed = 10; | ||
| 905 | |||
| 906 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; | ||
| 907 | fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; | ||
| 908 | printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " | ||
| 909 | "flow control %sabled\n", dev->name, | ||
| 910 | speed, duplex ? "full" : "half", fc ? "en" : "dis"); | ||
| 911 | if (!netif_carrier_ok(dev)) | ||
| 912 | netif_carrier_on(dev); | ||
| 913 | } | ||
| 914 | |||
| 915 | static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) | ||
| 916 | { | ||
| 917 | struct net_device *dev = (struct net_device *)dev_id; | ||
| 918 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 919 | |||
| 920 | if (unlikely(!pxa168_eth_collect_events(pep, dev))) | ||
| 921 | return IRQ_NONE; | ||
| 922 | /* Disable interrupts */ | ||
| 923 | wrl(pep, INT_MASK, 0); | ||
| 924 | napi_schedule(&pep->napi); | ||
| 925 | return IRQ_HANDLED; | ||
| 926 | } | ||
| 927 | |||
| 928 | static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) | ||
| 929 | { | ||
| 930 | int skb_size; | ||
| 931 | |||
| 932 | /* | ||
| 933 | * Reserve 2+14 bytes for an ethernet header (the hardware | ||
| 934 | * automatically prepends 2 bytes of dummy data to each | ||
| 935 | * received packet), 16 bytes for up to four VLAN tags, and | ||
| 936 | * 4 bytes for the trailing FCS -- 36 bytes total. | ||
| 937 | */ | ||
| 938 | skb_size = pep->dev->mtu + 36; | ||
| 939 | |||
| 940 | /* | ||
| 941 | * Make sure that the skb size is a multiple of 8 bytes, as | ||
| 942 | * the lower three bits of the receive descriptor's buffer | ||
| 943 | * size field are ignored by the hardware. | ||
| 944 | */ | ||
| 945 | pep->skb_size = (skb_size + 7) & ~7; | ||
| 946 | |||
| 947 | /* | ||
| 948 | * If NET_SKB_PAD is smaller than a cache line, | ||
| 949 | * netdev_alloc_skb() will cause skb->data to be misaligned | ||
| 950 | * to a cache line boundary. If this is the case, include | ||
| 951 | * some extra space to allow re-aligning the data area. | ||
| 952 | */ | ||
| 953 | pep->skb_size += SKB_DMA_REALIGN; | ||
| 954 | |||
| 955 | } | ||
| 956 | |||
| 957 | static int set_port_config_ext(struct pxa168_eth_private *pep) | ||
| 958 | { | ||
| 959 | int skb_size; | ||
| 960 | |||
| 961 | pxa168_eth_recalc_skb_size(pep); | ||
| 962 | if (pep->skb_size <= 1518) | ||
| 963 | skb_size = PCXR_MFL_1518; | ||
| 964 | else if (pep->skb_size <= 1536) | ||
| 965 | skb_size = PCXR_MFL_1536; | ||
| 966 | else if (pep->skb_size <= 2048) | ||
| 967 | skb_size = PCXR_MFL_2048; | ||
| 968 | else | ||
| 969 | skb_size = PCXR_MFL_64K; | ||
| 970 | |||
| 971 | /* Extended Port Configuration */ | ||
| 972 | wrl(pep, | ||
| 973 | PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ | ||
| 974 | PCXR_DSCP_EN | /* Enable DSCP in IP */ | ||
| 975 | skb_size | PCXR_FLP | /* do not force link pass */ | ||
| 976 | PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ | ||
| 977 | |||
| 978 | return 0; | ||
| 979 | } | ||
| 980 | |||
| 981 | static int pxa168_init_hw(struct pxa168_eth_private *pep) | ||
| 982 | { | ||
| 983 | int err = 0; | ||
| 984 | |||
| 985 | /* Disable interrupts */ | ||
| 986 | wrl(pep, INT_MASK, 0); | ||
| 987 | wrl(pep, INT_CAUSE, 0); | ||
| 988 | /* Write to ICR to clear interrupts. */ | ||
| 989 | wrl(pep, INT_W_CLEAR, 0); | ||
| 990 | /* Abort any transmit and receive operations and put DMA | ||
| 991 | * in idle state. | ||
| 992 | */ | ||
| 993 | abort_dma(pep); | ||
| 994 | /* Initialize address hash table */ | ||
| 995 | err = init_hash_table(pep); | ||
| 996 | if (err) | ||
| 997 | return err; | ||
| 998 | /* SDMA configuration */ | ||
| 999 | wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ | ||
| 1000 | SDCR_RIFB | /* Rx interrupt on frame */ | ||
| 1001 | SDCR_BLMT | /* Little endian transmit */ | ||
| 1002 | SDCR_BLMR | /* Little endian receive */ | ||
| 1003 | SDCR_RC_MAX_RETRANS); /* Max retransmit count */ | ||
| 1004 | /* Port Configuration */ | ||
| 1005 | wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ | ||
| 1006 | set_port_config_ext(pep); | ||
| 1007 | |||
| 1008 | return err; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | static int rxq_init(struct net_device *dev) | ||
| 1012 | { | ||
| 1013 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1014 | struct rx_desc *p_rx_desc; | ||
| 1015 | int size = 0, i = 0; | ||
| 1016 | int rx_desc_num = pep->rx_ring_size; | ||
| 1017 | |||
| 1018 | /* Allocate RX skb rings */ | ||
| 1019 | pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, | ||
| 1020 | GFP_KERNEL); | ||
| 1021 | if (!pep->rx_skb) { | ||
| 1022 | printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name); | ||
| 1023 | return -ENOMEM; | ||
| 1024 | } | ||
| 1025 | /* Allocate RX ring */ | ||
| 1026 | pep->rx_desc_count = 0; | ||
| 1027 | size = pep->rx_ring_size * sizeof(struct rx_desc); | ||
| 1028 | pep->rx_desc_area_size = size; | ||
| 1029 | pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||
| 1030 | &pep->rx_desc_dma, GFP_KERNEL); | ||
| 1031 | if (!pep->p_rx_desc_area) { | ||
| 1032 | printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", | ||
| 1033 | dev->name, size); | ||
| 1034 | goto out; | ||
| 1035 | } | ||
| 1036 | memset((void *)pep->p_rx_desc_area, 0, size); | ||
| 1037 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | ||
| 1038 | p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; | ||
| 1039 | for (i = 0; i < rx_desc_num; i++) { | ||
| 1040 | p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + | ||
| 1041 | ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); | ||
| 1042 | } | ||
| 1043 | /* Save Rx desc pointer to driver struct. */ | ||
| 1044 | pep->rx_curr_desc_q = 0; | ||
| 1045 | pep->rx_used_desc_q = 0; | ||
| 1046 | pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); | ||
| 1047 | return 0; | ||
| 1048 | out: | ||
| 1049 | kfree(pep->rx_skb); | ||
| 1050 | return -ENOMEM; | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | static void rxq_deinit(struct net_device *dev) | ||
| 1054 | { | ||
| 1055 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1056 | int curr; | ||
| 1057 | |||
| 1058 | /* Free preallocated skb's on RX rings */ | ||
| 1059 | for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { | ||
| 1060 | if (pep->rx_skb[curr]) { | ||
| 1061 | dev_kfree_skb(pep->rx_skb[curr]); | ||
| 1062 | pep->rx_desc_count--; | ||
| 1063 | } | ||
| 1064 | } | ||
| 1065 | if (pep->rx_desc_count) | ||
| 1066 | printk(KERN_ERR | ||
| 1067 | "Error in freeing Rx Ring. %d skb's still\n", | ||
| 1068 | pep->rx_desc_count); | ||
| 1069 | /* Free RX ring */ | ||
| 1070 | if (pep->p_rx_desc_area) | ||
| 1071 | dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, | ||
| 1072 | pep->p_rx_desc_area, pep->rx_desc_dma); | ||
| 1073 | kfree(pep->rx_skb); | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | static int txq_init(struct net_device *dev) | ||
| 1077 | { | ||
| 1078 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1079 | struct tx_desc *p_tx_desc; | ||
| 1080 | int size = 0, i = 0; | ||
| 1081 | int tx_desc_num = pep->tx_ring_size; | ||
| 1082 | |||
| 1083 | pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, | ||
| 1084 | GFP_KERNEL); | ||
| 1085 | if (!pep->tx_skb) { | ||
| 1086 | printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name); | ||
| 1087 | return -ENOMEM; | ||
| 1088 | } | ||
| 1089 | /* Allocate TX ring */ | ||
| 1090 | pep->tx_desc_count = 0; | ||
| 1091 | size = pep->tx_ring_size * sizeof(struct tx_desc); | ||
| 1092 | pep->tx_desc_area_size = size; | ||
| 1093 | pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, | ||
| 1094 | &pep->tx_desc_dma, GFP_KERNEL); | ||
| 1095 | if (!pep->p_tx_desc_area) { | ||
| 1096 | printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", | ||
| 1097 | dev->name, size); | ||
| 1098 | goto out; | ||
| 1099 | } | ||
| 1100 | memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); | ||
| 1101 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||
| 1102 | p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; | ||
| 1103 | for (i = 0; i < tx_desc_num; i++) { | ||
| 1104 | p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + | ||
| 1105 | ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); | ||
| 1106 | } | ||
| 1107 | pep->tx_curr_desc_q = 0; | ||
| 1108 | pep->tx_used_desc_q = 0; | ||
| 1109 | pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); | ||
| 1110 | return 0; | ||
| 1111 | out: | ||
| 1112 | kfree(pep->tx_skb); | ||
| 1113 | return -ENOMEM; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | static void txq_deinit(struct net_device *dev) | ||
| 1117 | { | ||
| 1118 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1119 | |||
| 1120 | /* Free outstanding skb's on TX ring */ | ||
| 1121 | txq_reclaim(dev, 1); | ||
| 1122 | BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); | ||
| 1123 | /* Free TX ring */ | ||
| 1124 | if (pep->p_tx_desc_area) | ||
| 1125 | dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, | ||
| 1126 | pep->p_tx_desc_area, pep->tx_desc_dma); | ||
| 1127 | kfree(pep->tx_skb); | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | static int pxa168_eth_open(struct net_device *dev) | ||
| 1131 | { | ||
| 1132 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1133 | int err; | ||
| 1134 | |||
| 1135 | err = request_irq(dev->irq, pxa168_eth_int_handler, | ||
| 1136 | IRQF_DISABLED, dev->name, dev); | ||
| 1137 | if (err) { | ||
| 1138 | dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); | ||
| 1139 | return -EAGAIN; | ||
| 1140 | } | ||
| 1141 | pep->rx_resource_err = 0; | ||
| 1142 | err = rxq_init(dev); | ||
| 1143 | if (err != 0) | ||
| 1144 | goto out_free_irq; | ||
| 1145 | err = txq_init(dev); | ||
| 1146 | if (err != 0) | ||
| 1147 | goto out_free_rx_skb; | ||
| 1148 | pep->rx_used_desc_q = 0; | ||
| 1149 | pep->rx_curr_desc_q = 0; | ||
| 1150 | |||
| 1151 | /* Fill RX ring with skb's */ | ||
| 1152 | rxq_refill(dev); | ||
| 1153 | pep->rx_used_desc_q = 0; | ||
| 1154 | pep->rx_curr_desc_q = 0; | ||
| 1155 | netif_carrier_off(dev); | ||
| 1156 | eth_port_start(dev); | ||
| 1157 | napi_enable(&pep->napi); | ||
| 1158 | return 0; | ||
| 1159 | out_free_rx_skb: | ||
| 1160 | rxq_deinit(dev); | ||
| 1161 | out_free_irq: | ||
| 1162 | free_irq(dev->irq, dev); | ||
| 1163 | return err; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | static int pxa168_eth_stop(struct net_device *dev) | ||
| 1167 | { | ||
| 1168 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1169 | eth_port_reset(dev); | ||
| 1170 | |||
| 1171 | /* Disable interrupts */ | ||
| 1172 | wrl(pep, INT_MASK, 0); | ||
| 1173 | wrl(pep, INT_CAUSE, 0); | ||
| 1174 | /* Write to ICR to clear interrupts. */ | ||
| 1175 | wrl(pep, INT_W_CLEAR, 0); | ||
| 1176 | napi_disable(&pep->napi); | ||
| 1177 | del_timer_sync(&pep->timeout); | ||
| 1178 | netif_carrier_off(dev); | ||
| 1179 | free_irq(dev->irq, dev); | ||
| 1180 | rxq_deinit(dev); | ||
| 1181 | txq_deinit(dev); | ||
| 1182 | |||
| 1183 | return 0; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) | ||
| 1187 | { | ||
| 1188 | int retval; | ||
| 1189 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1190 | |||
| 1191 | if ((mtu > 9500) || (mtu < 68)) | ||
| 1192 | return -EINVAL; | ||
| 1193 | |||
| 1194 | dev->mtu = mtu; | ||
| 1195 | retval = set_port_config_ext(pep); | ||
| 1196 | |||
| 1197 | if (!netif_running(dev)) | ||
| 1198 | return 0; | ||
| 1199 | |||
| 1200 | /* | ||
| 1201 | * Stop and then re-open the interface. This will allocate RX | ||
| 1202 | * skbs of the new MTU. | ||
| 1203 | * There is a possible danger that the open will not succeed, | ||
| 1204 | * due to memory being full. | ||
| 1205 | */ | ||
| 1206 | pxa168_eth_stop(dev); | ||
| 1207 | if (pxa168_eth_open(dev)) { | ||
| 1208 | dev_printk(KERN_ERR, &dev->dev, | ||
| 1209 | "fatal error on re-opening device after " | ||
| 1210 | "MTU change\n"); | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | return 0; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) | ||
| 1217 | { | ||
| 1218 | int tx_desc_curr; | ||
| 1219 | |||
| 1220 | tx_desc_curr = pep->tx_curr_desc_q; | ||
| 1221 | pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; | ||
| 1222 | BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); | ||
| 1223 | pep->tx_desc_count++; | ||
| 1224 | |||
| 1225 | return tx_desc_curr; | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | static int pxa168_rx_poll(struct napi_struct *napi, int budget) | ||
| 1229 | { | ||
| 1230 | struct pxa168_eth_private *pep = | ||
| 1231 | container_of(napi, struct pxa168_eth_private, napi); | ||
| 1232 | struct net_device *dev = pep->dev; | ||
| 1233 | int work_done = 0; | ||
| 1234 | |||
| 1235 | if (unlikely(pep->work_todo & WORK_LINK)) { | ||
| 1236 | pep->work_todo &= ~(WORK_LINK); | ||
| 1237 | handle_link_event(pep); | ||
| 1238 | } | ||
| 1239 | /* | ||
| 1240 | * We call txq_reclaim every time since in NAPI interupts are disabled | ||
| 1241 | * and due to this we miss the TX_DONE interrupt,which is not updated in | ||
| 1242 | * interrupt status register. | ||
| 1243 | */ | ||
| 1244 | txq_reclaim(dev, 0); | ||
| 1245 | if (netif_queue_stopped(dev) | ||
| 1246 | && pep->tx_ring_size - pep->tx_desc_count > 1) { | ||
| 1247 | netif_wake_queue(dev); | ||
| 1248 | } | ||
| 1249 | work_done = rxq_process(dev, budget); | ||
| 1250 | if (work_done < budget) { | ||
| 1251 | napi_complete(napi); | ||
| 1252 | wrl(pep, INT_MASK, ALL_INTS); | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | return work_done; | ||
| 1256 | } | ||
| 1257 | |||
| 1258 | static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 1259 | { | ||
| 1260 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1261 | struct net_device_stats *stats = &dev->stats; | ||
| 1262 | struct tx_desc *desc; | ||
| 1263 | int tx_index; | ||
| 1264 | int length; | ||
| 1265 | |||
| 1266 | tx_index = eth_alloc_tx_desc_index(pep); | ||
| 1267 | desc = &pep->p_tx_desc_area[tx_index]; | ||
| 1268 | length = skb->len; | ||
| 1269 | pep->tx_skb[tx_index] = skb; | ||
| 1270 | desc->byte_cnt = length; | ||
| 1271 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); | ||
| 1272 | wmb(); | ||
| 1273 | desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | | ||
| 1274 | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; | ||
| 1275 | wmb(); | ||
| 1276 | wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); | ||
| 1277 | |||
| 1278 | stats->tx_bytes += skb->len; | ||
| 1279 | stats->tx_packets++; | ||
| 1280 | dev->trans_start = jiffies; | ||
| 1281 | if (pep->tx_ring_size - pep->tx_desc_count <= 1) { | ||
| 1282 | /* We handled the current skb, but now we are out of space.*/ | ||
| 1283 | netif_stop_queue(dev); | ||
| 1284 | } | ||
| 1285 | |||
| 1286 | return NETDEV_TX_OK; | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | static int smi_wait_ready(struct pxa168_eth_private *pep) | ||
| 1290 | { | ||
| 1291 | int i = 0; | ||
| 1292 | |||
| 1293 | /* wait for the SMI register to become available */ | ||
| 1294 | for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { | ||
| 1295 | if (i == PHY_WAIT_ITERATIONS) | ||
| 1296 | return -ETIMEDOUT; | ||
| 1297 | msleep(10); | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | return 0; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) | ||
| 1304 | { | ||
| 1305 | struct pxa168_eth_private *pep = bus->priv; | ||
| 1306 | int i = 0; | ||
| 1307 | int val; | ||
| 1308 | |||
| 1309 | if (smi_wait_ready(pep)) { | ||
| 1310 | printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); | ||
| 1311 | return -ETIMEDOUT; | ||
| 1312 | } | ||
| 1313 | wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); | ||
| 1314 | /* now wait for the data to be valid */ | ||
| 1315 | for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { | ||
| 1316 | if (i == PHY_WAIT_ITERATIONS) { | ||
| 1317 | printk(KERN_WARNING | ||
| 1318 | "pxa168_eth: SMI bus read not valid\n"); | ||
| 1319 | return -ENODEV; | ||
| 1320 | } | ||
| 1321 | msleep(10); | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | return val & 0xffff; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, | ||
| 1328 | u16 value) | ||
| 1329 | { | ||
| 1330 | struct pxa168_eth_private *pep = bus->priv; | ||
| 1331 | |||
| 1332 | if (smi_wait_ready(pep)) { | ||
| 1333 | printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); | ||
| 1334 | return -ETIMEDOUT; | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | | ||
| 1338 | SMI_OP_W | (value & 0xffff)); | ||
| 1339 | |||
| 1340 | if (smi_wait_ready(pep)) { | ||
| 1341 | printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); | ||
| 1342 | return -ETIMEDOUT; | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | return 0; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, | ||
| 1349 | int cmd) | ||
| 1350 | { | ||
| 1351 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1352 | if (pep->phy != NULL) | ||
| 1353 | return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd); | ||
| 1354 | |||
| 1355 | return -EOPNOTSUPP; | ||
| 1356 | } | ||
| 1357 | |||
| 1358 | static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) | ||
| 1359 | { | ||
| 1360 | struct mii_bus *bus = pep->smi_bus; | ||
| 1361 | struct phy_device *phydev; | ||
| 1362 | int start; | ||
| 1363 | int num; | ||
| 1364 | int i; | ||
| 1365 | |||
| 1366 | if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { | ||
| 1367 | /* Scan entire range */ | ||
| 1368 | start = ethernet_phy_get(pep); | ||
| 1369 | num = 32; | ||
| 1370 | } else { | ||
| 1371 | /* Use phy addr specific to platform */ | ||
| 1372 | start = phy_addr & 0x1f; | ||
| 1373 | num = 1; | ||
| 1374 | } | ||
| 1375 | phydev = NULL; | ||
| 1376 | for (i = 0; i < num; i++) { | ||
| 1377 | int addr = (start + i) & 0x1f; | ||
| 1378 | if (bus->phy_map[addr] == NULL) | ||
| 1379 | mdiobus_scan(bus, addr); | ||
| 1380 | |||
| 1381 | if (phydev == NULL) { | ||
| 1382 | phydev = bus->phy_map[addr]; | ||
| 1383 | if (phydev != NULL) | ||
| 1384 | ethernet_phy_set_addr(pep, addr); | ||
| 1385 | } | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | return phydev; | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) | ||
| 1392 | { | ||
| 1393 | struct phy_device *phy = pep->phy; | ||
| 1394 | ethernet_phy_reset(pep); | ||
| 1395 | |||
| 1396 | phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); | ||
| 1397 | |||
| 1398 | if (speed == 0) { | ||
| 1399 | phy->autoneg = AUTONEG_ENABLE; | ||
| 1400 | phy->speed = 0; | ||
| 1401 | phy->duplex = 0; | ||
| 1402 | phy->supported &= PHY_BASIC_FEATURES; | ||
| 1403 | phy->advertising = phy->supported | ADVERTISED_Autoneg; | ||
| 1404 | } else { | ||
| 1405 | phy->autoneg = AUTONEG_DISABLE; | ||
| 1406 | phy->advertising = 0; | ||
| 1407 | phy->speed = speed; | ||
| 1408 | phy->duplex = duplex; | ||
| 1409 | } | ||
| 1410 | phy_start_aneg(phy); | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | static int ethernet_phy_setup(struct net_device *dev) | ||
| 1414 | { | ||
| 1415 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1416 | |||
| 1417 | if (pep->pd != NULL) { | ||
| 1418 | if (pep->pd->init) | ||
| 1419 | pep->pd->init(); | ||
| 1420 | } | ||
| 1421 | pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); | ||
| 1422 | if (pep->phy != NULL) | ||
| 1423 | phy_init(pep, pep->pd->speed, pep->pd->duplex); | ||
| 1424 | update_hash_table_mac_address(pep, NULL, dev->dev_addr); | ||
| 1425 | |||
| 1426 | return 0; | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 1430 | { | ||
| 1431 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1432 | int err; | ||
| 1433 | |||
| 1434 | err = phy_read_status(pep->phy); | ||
| 1435 | if (err == 0) | ||
| 1436 | err = phy_ethtool_gset(pep->phy, cmd); | ||
| 1437 | |||
| 1438 | return err; | ||
| 1439 | } | ||
| 1440 | |||
| 1441 | static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 1442 | { | ||
| 1443 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1444 | |||
| 1445 | return phy_ethtool_sset(pep->phy, cmd); | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | static void pxa168_get_drvinfo(struct net_device *dev, | ||
| 1449 | struct ethtool_drvinfo *info) | ||
| 1450 | { | ||
| 1451 | strncpy(info->driver, DRIVER_NAME, 32); | ||
| 1452 | strncpy(info->version, DRIVER_VERSION, 32); | ||
| 1453 | strncpy(info->fw_version, "N/A", 32); | ||
| 1454 | strncpy(info->bus_info, "N/A", 32); | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | static u32 pxa168_get_link(struct net_device *dev) | ||
| 1458 | { | ||
| 1459 | return !!netif_carrier_ok(dev); | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | static const struct ethtool_ops pxa168_ethtool_ops = { | ||
| 1463 | .get_settings = pxa168_get_settings, | ||
| 1464 | .set_settings = pxa168_set_settings, | ||
| 1465 | .get_drvinfo = pxa168_get_drvinfo, | ||
| 1466 | .get_link = pxa168_get_link, | ||
| 1467 | }; | ||
| 1468 | |||
| 1469 | static const struct net_device_ops pxa168_eth_netdev_ops = { | ||
| 1470 | .ndo_open = pxa168_eth_open, | ||
| 1471 | .ndo_stop = pxa168_eth_stop, | ||
| 1472 | .ndo_start_xmit = pxa168_eth_start_xmit, | ||
| 1473 | .ndo_set_rx_mode = pxa168_eth_set_rx_mode, | ||
| 1474 | .ndo_set_mac_address = pxa168_eth_set_mac_address, | ||
| 1475 | .ndo_validate_addr = eth_validate_addr, | ||
| 1476 | .ndo_do_ioctl = pxa168_eth_do_ioctl, | ||
| 1477 | .ndo_change_mtu = pxa168_eth_change_mtu, | ||
| 1478 | .ndo_tx_timeout = pxa168_eth_tx_timeout, | ||
| 1479 | }; | ||
| 1480 | |||
| 1481 | static int pxa168_eth_probe(struct platform_device *pdev) | ||
| 1482 | { | ||
| 1483 | struct pxa168_eth_private *pep = NULL; | ||
| 1484 | struct net_device *dev = NULL; | ||
| 1485 | struct resource *res; | ||
| 1486 | struct clk *clk; | ||
| 1487 | int err; | ||
| 1488 | |||
| 1489 | printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); | ||
| 1490 | |||
| 1491 | clk = clk_get(&pdev->dev, "MFUCLK"); | ||
| 1492 | if (IS_ERR(clk)) { | ||
| 1493 | printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", | ||
| 1494 | DRIVER_NAME); | ||
| 1495 | return -ENODEV; | ||
| 1496 | } | ||
| 1497 | clk_enable(clk); | ||
| 1498 | |||
| 1499 | dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); | ||
| 1500 | if (!dev) { | ||
| 1501 | err = -ENOMEM; | ||
| 1502 | goto out; | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | platform_set_drvdata(pdev, dev); | ||
| 1506 | pep = netdev_priv(dev); | ||
| 1507 | pep->dev = dev; | ||
| 1508 | pep->clk = clk; | ||
| 1509 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1510 | if (res == NULL) { | ||
| 1511 | err = -ENODEV; | ||
| 1512 | goto out; | ||
| 1513 | } | ||
| 1514 | pep->base = ioremap(res->start, res->end - res->start + 1); | ||
| 1515 | if (pep->base == NULL) { | ||
| 1516 | err = -ENOMEM; | ||
| 1517 | goto out; | ||
| 1518 | } | ||
| 1519 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
| 1520 | BUG_ON(!res); | ||
| 1521 | dev->irq = res->start; | ||
| 1522 | dev->netdev_ops = &pxa168_eth_netdev_ops; | ||
| 1523 | dev->watchdog_timeo = 2 * HZ; | ||
| 1524 | dev->base_addr = 0; | ||
| 1525 | SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); | ||
| 1526 | |||
| 1527 | INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); | ||
| 1528 | |||
| 1529 | printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); | ||
| 1530 | random_ether_addr(dev->dev_addr); | ||
| 1531 | |||
| 1532 | pep->pd = pdev->dev.platform_data; | ||
| 1533 | pep->rx_ring_size = NUM_RX_DESCS; | ||
| 1534 | if (pep->pd->rx_queue_size) | ||
| 1535 | pep->rx_ring_size = pep->pd->rx_queue_size; | ||
| 1536 | |||
| 1537 | pep->tx_ring_size = NUM_TX_DESCS; | ||
| 1538 | if (pep->pd->tx_queue_size) | ||
| 1539 | pep->tx_ring_size = pep->pd->tx_queue_size; | ||
| 1540 | |||
| 1541 | pep->port_num = pep->pd->port_number; | ||
| 1542 | /* Hardware supports only 3 ports */ | ||
| 1543 | BUG_ON(pep->port_num > 2); | ||
| 1544 | netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); | ||
| 1545 | |||
| 1546 | memset(&pep->timeout, 0, sizeof(struct timer_list)); | ||
| 1547 | init_timer(&pep->timeout); | ||
| 1548 | pep->timeout.function = rxq_refill_timer_wrapper; | ||
| 1549 | pep->timeout.data = (unsigned long)pep; | ||
| 1550 | |||
| 1551 | pep->smi_bus = mdiobus_alloc(); | ||
| 1552 | if (pep->smi_bus == NULL) { | ||
| 1553 | err = -ENOMEM; | ||
| 1554 | goto out; | ||
| 1555 | } | ||
| 1556 | pep->smi_bus->priv = pep; | ||
| 1557 | pep->smi_bus->name = "pxa168_eth smi"; | ||
| 1558 | pep->smi_bus->read = pxa168_smi_read; | ||
| 1559 | pep->smi_bus->write = pxa168_smi_write; | ||
| 1560 | snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); | ||
| 1561 | pep->smi_bus->parent = &pdev->dev; | ||
| 1562 | pep->smi_bus->phy_mask = 0xffffffff; | ||
| 1563 | if (mdiobus_register(pep->smi_bus) < 0) { | ||
| 1564 | err = -ENOMEM; | ||
| 1565 | goto out; | ||
| 1566 | } | ||
| 1567 | pxa168_init_hw(pep); | ||
| 1568 | err = ethernet_phy_setup(dev); | ||
| 1569 | if (err) | ||
| 1570 | goto out; | ||
| 1571 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 1572 | err = register_netdev(dev); | ||
| 1573 | if (err) | ||
| 1574 | goto out; | ||
| 1575 | return 0; | ||
| 1576 | out: | ||
| 1577 | if (pep->clk) { | ||
| 1578 | clk_disable(pep->clk); | ||
| 1579 | clk_put(pep->clk); | ||
| 1580 | pep->clk = NULL; | ||
| 1581 | } | ||
| 1582 | if (pep->base) { | ||
| 1583 | iounmap(pep->base); | ||
| 1584 | pep->base = NULL; | ||
| 1585 | } | ||
| 1586 | if (dev) | ||
| 1587 | free_netdev(dev); | ||
| 1588 | return err; | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | static int pxa168_eth_remove(struct platform_device *pdev) | ||
| 1592 | { | ||
| 1593 | struct net_device *dev = platform_get_drvdata(pdev); | ||
| 1594 | struct pxa168_eth_private *pep = netdev_priv(dev); | ||
| 1595 | |||
| 1596 | if (pep->htpr) { | ||
| 1597 | dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, | ||
| 1598 | pep->htpr, pep->htpr_dma); | ||
| 1599 | pep->htpr = NULL; | ||
| 1600 | } | ||
| 1601 | if (pep->clk) { | ||
| 1602 | clk_disable(pep->clk); | ||
| 1603 | clk_put(pep->clk); | ||
| 1604 | pep->clk = NULL; | ||
| 1605 | } | ||
| 1606 | if (pep->phy != NULL) | ||
| 1607 | phy_detach(pep->phy); | ||
| 1608 | |||
| 1609 | iounmap(pep->base); | ||
| 1610 | pep->base = NULL; | ||
| 1611 | unregister_netdev(dev); | ||
| 1612 | flush_scheduled_work(); | ||
| 1613 | free_netdev(dev); | ||
| 1614 | platform_set_drvdata(pdev, NULL); | ||
| 1615 | return 0; | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | static void pxa168_eth_shutdown(struct platform_device *pdev) | ||
| 1619 | { | ||
| 1620 | struct net_device *dev = platform_get_drvdata(pdev); | ||
| 1621 | eth_port_reset(dev); | ||
| 1622 | } | ||
| 1623 | |||
| 1624 | #ifdef CONFIG_PM | ||
| 1625 | static int pxa168_eth_resume(struct platform_device *pdev) | ||
| 1626 | { | ||
| 1627 | return -ENOSYS; | ||
| 1628 | } | ||
| 1629 | |||
| 1630 | static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) | ||
| 1631 | { | ||
| 1632 | return -ENOSYS; | ||
| 1633 | } | ||
| 1634 | |||
| 1635 | #else | ||
| 1636 | #define pxa168_eth_resume NULL | ||
| 1637 | #define pxa168_eth_suspend NULL | ||
| 1638 | #endif | ||
| 1639 | |||
| 1640 | static struct platform_driver pxa168_eth_driver = { | ||
| 1641 | .probe = pxa168_eth_probe, | ||
| 1642 | .remove = pxa168_eth_remove, | ||
| 1643 | .shutdown = pxa168_eth_shutdown, | ||
| 1644 | .resume = pxa168_eth_resume, | ||
| 1645 | .suspend = pxa168_eth_suspend, | ||
| 1646 | .driver = { | ||
| 1647 | .name = DRIVER_NAME, | ||
| 1648 | }, | ||
| 1649 | }; | ||
| 1650 | |||
| 1651 | static int __init pxa168_init_module(void) | ||
| 1652 | { | ||
| 1653 | return platform_driver_register(&pxa168_eth_driver); | ||
| 1654 | } | ||
| 1655 | |||
| 1656 | static void __exit pxa168_cleanup_module(void) | ||
| 1657 | { | ||
| 1658 | platform_driver_unregister(&pxa168_eth_driver); | ||
| 1659 | } | ||
| 1660 | |||
| 1661 | module_init(pxa168_init_module); | ||
| 1662 | module_exit(pxa168_cleanup_module); | ||
| 1663 | |||
| 1664 | MODULE_LICENSE("GPL"); | ||
| 1665 | MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); | ||
| 1666 | MODULE_ALIAS("platform:pxa168_eth"); | ||
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index bf6d87adda4f..213e3656d953 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
| @@ -1983,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) | |||
| 1983 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1983 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
| 1984 | struct net_device_stats *stats = &netdev->stats; | 1984 | struct net_device_stats *stats = &netdev->stats; |
| 1985 | 1985 | ||
| 1986 | memset(stats, 0, sizeof(*stats)); | ||
| 1987 | |||
| 1988 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; | 1986 | stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; |
| 1989 | stats->tx_packets = adapter->stats.xmitfinished; | 1987 | stats->tx_packets = adapter->stats.xmitfinished; |
| 1990 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; | 1988 | stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index f5a9eb1df593..79fd02bc69fd 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
| @@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { | |||
| 1437 | 1437 | ||
| 1438 | static int sh_eth_drv_probe(struct platform_device *pdev) | 1438 | static int sh_eth_drv_probe(struct platform_device *pdev) |
| 1439 | { | 1439 | { |
| 1440 | int ret, i, devno = 0; | 1440 | int ret, devno = 0; |
| 1441 | struct resource *res; | 1441 | struct resource *res; |
| 1442 | struct net_device *ndev = NULL; | 1442 | struct net_device *ndev = NULL; |
| 1443 | struct sh_eth_private *mdp; | 1443 | struct sh_eth_private *mdp; |
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 08e7b6abacdd..8ed30fa35d0a 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c | |||
| @@ -58,6 +58,7 @@ | |||
| 58 | #define USB_PRODUCT_IPHONE 0x1290 | 58 | #define USB_PRODUCT_IPHONE 0x1290 |
| 59 | #define USB_PRODUCT_IPHONE_3G 0x1292 | 59 | #define USB_PRODUCT_IPHONE_3G 0x1292 |
| 60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 | 60 | #define USB_PRODUCT_IPHONE_3GS 0x1294 |
| 61 | #define USB_PRODUCT_IPHONE_4 0x1297 | ||
| 61 | 62 | ||
| 62 | #define IPHETH_USBINTF_CLASS 255 | 63 | #define IPHETH_USBINTF_CLASS 255 |
| 63 | #define IPHETH_USBINTF_SUBCLASS 253 | 64 | #define IPHETH_USBINTF_SUBCLASS 253 |
| @@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = { | |||
| 92 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, | 93 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, |
| 93 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | 94 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, |
| 94 | IPHETH_USBINTF_PROTO) }, | 95 | IPHETH_USBINTF_PROTO) }, |
| 96 | { USB_DEVICE_AND_INTERFACE_INFO( | ||
| 97 | USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, | ||
| 98 | IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, | ||
| 99 | IPHETH_USBINTF_PROTO) }, | ||
| 95 | { } | 100 | { } |
| 96 | }; | 101 | }; |
| 97 | MODULE_DEVICE_TABLE(usb, ipheth_table); | 102 | MODULE_DEVICE_TABLE(usb, ipheth_table); |
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index a105087af963..f9aa1bc0a947 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c | |||
| @@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) | |||
| 732 | 732 | ||
| 733 | /* Nothing to do for ADMtek BBP */ | 733 | /* Nothing to do for ADMtek BBP */ |
| 734 | } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) | 734 | } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) |
| 735 | wiphy_debug(dev->wiphy, "unsupported bbp type %d\n", | 735 | wiphy_debug(dev->wiphy, "unsupported BBP type %d\n", |
| 736 | priv->bbp_type); | 736 | priv->bbp_type); |
| 737 | 737 | ||
| 738 | ADM8211_RESTORE(); | 738 | ADM8211_RESTORE(); |
| @@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) | |||
| 1032 | break; | 1032 | break; |
| 1033 | } | 1033 | } |
| 1034 | } else | 1034 | } else |
| 1035 | wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type); | 1035 | wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type); |
| 1036 | 1036 | ||
| 1037 | ADM8211_CSR_WRITE(SYNRF, 0); | 1037 | ADM8211_CSR_WRITE(SYNRF, 0); |
| 1038 | 1038 | ||
| @@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev) | |||
| 1525 | retval = request_irq(priv->pdev->irq, adm8211_interrupt, | 1525 | retval = request_irq(priv->pdev->irq, adm8211_interrupt, |
| 1526 | IRQF_SHARED, "adm8211", dev); | 1526 | IRQF_SHARED, "adm8211", dev); |
| 1527 | if (retval) { | 1527 | if (retval) { |
| 1528 | wiphy_err(dev->wiphy, "failed to register irq handler\n"); | 1528 | wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); |
| 1529 | goto fail; | 1529 | goto fail; |
| 1530 | } | 1530 | } |
| 1531 | 1531 | ||
| @@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev, | |||
| 1902 | goto err_free_eeprom; | 1902 | goto err_free_eeprom; |
| 1903 | } | 1903 | } |
| 1904 | 1904 | ||
| 1905 | wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n", | 1905 | wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n", |
| 1906 | dev->wiphy->perm_addr, pdev->revision); | 1906 | dev->wiphy->perm_addr, pdev->revision); |
| 1907 | 1907 | ||
| 1908 | return 0; | 1908 | return 0; |
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index d5140a87f073..1128fa8c9ed5 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c | |||
| @@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv) | |||
| 655 | exit: | 655 | exit: |
| 656 | kfree(hwcfg); | 656 | kfree(hwcfg); |
| 657 | if (ret < 0) | 657 | if (ret < 0) |
| 658 | wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n", | 658 | wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n", |
| 659 | ret); | 659 | ret); |
| 660 | 660 | ||
| 661 | return ret; | 661 | return ret; |
| @@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv) | |||
| 960 | sizeof(struct mib_mac_addr)); | 960 | sizeof(struct mib_mac_addr)); |
| 961 | if (ret < 0) { | 961 | if (ret < 0) { |
| 962 | wiphy_err(priv->hw->wiphy, | 962 | wiphy_err(priv->hw->wiphy, |
| 963 | "at76_get_mib (mac_addr) failed: %d\n", ret); | 963 | "at76_get_mib (MAC_ADDR) failed: %d\n", ret); |
| 964 | goto exit; | 964 | goto exit; |
| 965 | } | 965 | } |
| 966 | 966 | ||
| @@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv) | |||
| 989 | sizeof(struct mib_mac_wep)); | 989 | sizeof(struct mib_mac_wep)); |
| 990 | if (ret < 0) { | 990 | if (ret < 0) { |
| 991 | wiphy_err(priv->hw->wiphy, | 991 | wiphy_err(priv->hw->wiphy, |
| 992 | "at76_get_mib (mac_wep) failed: %d\n", ret); | 992 | "at76_get_mib (MAC_WEP) failed: %d\n", ret); |
| 993 | goto exit; | 993 | goto exit; |
| 994 | } | 994 | } |
| 995 | 995 | ||
| @@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) | |||
| 1026 | sizeof(struct mib_mac_mgmt)); | 1026 | sizeof(struct mib_mac_mgmt)); |
| 1027 | if (ret < 0) { | 1027 | if (ret < 0) { |
| 1028 | wiphy_err(priv->hw->wiphy, | 1028 | wiphy_err(priv->hw->wiphy, |
| 1029 | "at76_get_mib (mac_mgmt) failed: %d\n", ret); | 1029 | "at76_get_mib (MAC_MGMT) failed: %d\n", ret); |
| 1030 | goto exit; | 1030 | goto exit; |
| 1031 | } | 1031 | } |
| 1032 | 1032 | ||
| @@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv) | |||
| 1062 | ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); | 1062 | ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); |
| 1063 | if (ret < 0) { | 1063 | if (ret < 0) { |
| 1064 | wiphy_err(priv->hw->wiphy, | 1064 | wiphy_err(priv->hw->wiphy, |
| 1065 | "at76_get_mib (mac) failed: %d\n", ret); | 1065 | "at76_get_mib (MAC) failed: %d\n", ret); |
| 1066 | goto exit; | 1066 | goto exit; |
| 1067 | } | 1067 | } |
| 1068 | 1068 | ||
| @@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv) | |||
| 1099 | ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); | 1099 | ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); |
| 1100 | if (ret < 0) { | 1100 | if (ret < 0) { |
| 1101 | wiphy_err(priv->hw->wiphy, | 1101 | wiphy_err(priv->hw->wiphy, |
| 1102 | "at76_get_mib (phy) failed: %d\n", ret); | 1102 | "at76_get_mib (PHY) failed: %d\n", ret); |
| 1103 | goto exit; | 1103 | goto exit; |
| 1104 | } | 1104 | } |
| 1105 | 1105 | ||
| @@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv) | |||
| 1132 | ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); | 1132 | ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local)); |
| 1133 | if (ret < 0) { | 1133 | if (ret < 0) { |
| 1134 | wiphy_err(priv->hw->wiphy, | 1134 | wiphy_err(priv->hw->wiphy, |
| 1135 | "at76_get_mib (local) failed: %d\n", ret); | 1135 | "at76_get_mib (LOCAL) failed: %d\n", ret); |
| 1136 | goto exit; | 1136 | goto exit; |
| 1137 | } | 1137 | } |
| 1138 | 1138 | ||
| @@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv) | |||
| 1158 | sizeof(struct mib_mdomain)); | 1158 | sizeof(struct mib_mdomain)); |
| 1159 | if (ret < 0) { | 1159 | if (ret < 0) { |
| 1160 | wiphy_err(priv->hw->wiphy, | 1160 | wiphy_err(priv->hw->wiphy, |
| 1161 | "at76_get_mib (mdomain) failed: %d\n", ret); | 1161 | "at76_get_mib (MDOMAIN) failed: %d\n", ret); |
| 1162 | goto exit; | 1162 | goto exit; |
| 1163 | } | 1163 | } |
| 1164 | 1164 | ||
| @@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv) | |||
| 1229 | struct sk_buff *skb = priv->rx_skb; | 1229 | struct sk_buff *skb = priv->rx_skb; |
| 1230 | 1230 | ||
| 1231 | if (!priv->rx_urb) { | 1231 | if (!priv->rx_urb) { |
| 1232 | wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n", | 1232 | wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n", |
| 1233 | __func__); | 1233 | __func__); |
| 1234 | return -EFAULT; | 1234 | return -EFAULT; |
| 1235 | } | 1235 | } |
| @@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
| 1792 | wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret); | 1792 | wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret); |
| 1793 | if (ret == -EINVAL) | 1793 | if (ret == -EINVAL) |
| 1794 | wiphy_err(priv->hw->wiphy, | 1794 | wiphy_err(priv->hw->wiphy, |
| 1795 | "-einval: tx urb %p hcpriv %p complete %p\n", | 1795 | "-EINVAL: tx urb %p hcpriv %p complete %p\n", |
| 1796 | priv->tx_urb, | 1796 | priv->tx_urb, |
| 1797 | priv->tx_urb->hcpriv, priv->tx_urb->complete); | 1797 | priv->tx_urb->hcpriv, priv->tx_urb->complete); |
| 1798 | } | 1798 | } |
| @@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv, | |||
| 2310 | 2310 | ||
| 2311 | priv->mac80211_registered = 1; | 2311 | priv->mac80211_registered = 1; |
| 2312 | 2312 | ||
| 2313 | wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n", | 2313 | wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n", |
| 2314 | dev_name(&interface->dev), priv->mac_addr, | 2314 | dev_name(&interface->dev), priv->mac_addr, |
| 2315 | priv->fw_version.major, priv->fw_version.minor, | 2315 | priv->fw_version.major, priv->fw_version.minor, |
| 2316 | priv->fw_version.patch, priv->fw_version.build); | 2316 | priv->fw_version.patch, priv->fw_version.build); |
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index c67b05f3bcbd..debfb0fbc7c5 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c | |||
| @@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar) | |||
| 245 | { | 245 | { |
| 246 | int i; | 246 | int i; |
| 247 | 247 | ||
| 248 | wiphy_debug(ar->hw->wiphy, "qos queue stats\n"); | 248 | wiphy_debug(ar->hw->wiphy, "QoS queue stats\n"); |
| 249 | 249 | ||
| 250 | for (i = 0; i < __AR9170_NUM_TXQ; i++) | 250 | for (i = 0; i < __AR9170_NUM_TXQ; i++) |
| 251 | wiphy_debug(ar->hw->wiphy, | 251 | wiphy_debug(ar->hw->wiphy, |
| @@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar, | |||
| 387 | if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { | 387 | if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) { |
| 388 | #ifdef AR9170_QUEUE_DEBUG | 388 | #ifdef AR9170_QUEUE_DEBUG |
| 389 | wiphy_debug(ar->hw->wiphy, | 389 | wiphy_debug(ar->hw->wiphy, |
| 390 | "skip frame => da %pm != %pm\n", | 390 | "skip frame => DA %pM != %pM\n", |
| 391 | mac, ieee80211_get_DA(hdr)); | 391 | mac, ieee80211_get_DA(hdr)); |
| 392 | ar9170_print_txheader(ar, skb); | 392 | ar9170_print_txheader(ar, skb); |
| 393 | #endif /* AR9170_QUEUE_DEBUG */ | 393 | #endif /* AR9170_QUEUE_DEBUG */ |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 1189dbb6e2a6..996e9d7d7586 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
| @@ -2723,14 +2723,6 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) | |||
| 2723 | 2723 | ||
| 2724 | packet = &priv->rx_buffers[i]; | 2724 | packet = &priv->rx_buffers[i]; |
| 2725 | 2725 | ||
| 2726 | /* Sync the DMA for the STATUS buffer so CPU is sure to get | ||
| 2727 | * the correct values */ | ||
| 2728 | pci_dma_sync_single_for_cpu(priv->pci_dev, | ||
| 2729 | sq->nic + | ||
| 2730 | sizeof(struct ipw2100_status) * i, | ||
| 2731 | sizeof(struct ipw2100_status), | ||
| 2732 | PCI_DMA_FROMDEVICE); | ||
| 2733 | |||
| 2734 | /* Sync the DMA for the RX buffer so CPU is sure to get | 2726 | /* Sync the DMA for the RX buffer so CPU is sure to get |
| 2735 | * the correct values */ | 2727 | * the correct values */ |
| 2736 | pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, | 2728 | pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index fec026212326..0b779a41a142 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c | |||
| @@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = { | |||
| 265 | .support_ct_kill_exit = true, | 265 | .support_ct_kill_exit = true, |
| 266 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, | 266 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, |
| 267 | .chain_noise_scale = 1000, | 267 | .chain_noise_scale = 1000, |
| 268 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 268 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 269 | .max_event_log_size = 128, | 269 | .max_event_log_size = 128, |
| 270 | .ucode_tracing = true, | 270 | .ucode_tracing = true, |
| 271 | .sensitivity_calib_by_driver = true, | 271 | .sensitivity_calib_by_driver = true, |
| @@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = { | |||
| 297 | .support_ct_kill_exit = true, | 297 | .support_ct_kill_exit = true, |
| 298 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, | 298 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, |
| 299 | .chain_noise_scale = 1000, | 299 | .chain_noise_scale = 1000, |
| 300 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 300 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 301 | .max_event_log_size = 128, | 301 | .max_event_log_size = 128, |
| 302 | .ucode_tracing = true, | 302 | .ucode_tracing = true, |
| 303 | .sensitivity_calib_by_driver = true, | 303 | .sensitivity_calib_by_driver = true, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 6950a783913b..8ccfcd08218d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
| @@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = { | |||
| 2731 | .led_compensation = 64, | 2731 | .led_compensation = 64, |
| 2732 | .broken_powersave = true, | 2732 | .broken_powersave = true, |
| 2733 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 2733 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 2734 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2734 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 2735 | .max_event_log_size = 512, | 2735 | .max_event_log_size = 512, |
| 2736 | .tx_power_by_driver = true, | 2736 | .tx_power_by_driver = true, |
| 2737 | }; | 2737 | }; |
| @@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = { | |||
| 2752 | .led_compensation = 64, | 2752 | .led_compensation = 64, |
| 2753 | .broken_powersave = true, | 2753 | .broken_powersave = true, |
| 2754 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 2754 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 2755 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2755 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 2756 | .max_event_log_size = 512, | 2756 | .max_event_log_size = 512, |
| 2757 | .tx_power_by_driver = true, | 2757 | .tx_power_by_driver = true, |
| 2758 | }; | 2758 | }; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index d6da356608fa..d92b72909233 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
| @@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = { | |||
| 2322 | .led_compensation = 61, | 2322 | .led_compensation = 61, |
| 2323 | .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, | 2323 | .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, |
| 2324 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 2324 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 2325 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 2325 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 2326 | .temperature_kelvin = true, | 2326 | .temperature_kelvin = true, |
| 2327 | .max_event_log_size = 512, | 2327 | .max_event_log_size = 512, |
| 2328 | .tx_power_by_driver = true, | 2328 | .tx_power_by_driver = true, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index aacf3770f075..48bdcd8d2e94 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
| @@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = { | |||
| 510 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 510 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 511 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 511 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 512 | .chain_noise_scale = 1000, | 512 | .chain_noise_scale = 1000, |
| 513 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 513 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 514 | .max_event_log_size = 512, | 514 | .max_event_log_size = 512, |
| 515 | .ucode_tracing = true, | 515 | .ucode_tracing = true, |
| 516 | .sensitivity_calib_by_driver = true, | 516 | .sensitivity_calib_by_driver = true, |
| @@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = { | |||
| 541 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 541 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 542 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 542 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 543 | .chain_noise_scale = 1000, | 543 | .chain_noise_scale = 1000, |
| 544 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 544 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 545 | .max_event_log_size = 512, | 545 | .max_event_log_size = 512, |
| 546 | .ucode_tracing = true, | 546 | .ucode_tracing = true, |
| 547 | .sensitivity_calib_by_driver = true, | 547 | .sensitivity_calib_by_driver = true, |
| @@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = { | |||
| 570 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 570 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 571 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 571 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 572 | .chain_noise_scale = 1000, | 572 | .chain_noise_scale = 1000, |
| 573 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 573 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 574 | .max_event_log_size = 512, | 574 | .max_event_log_size = 512, |
| 575 | .ucode_tracing = true, | 575 | .ucode_tracing = true, |
| 576 | .sensitivity_calib_by_driver = true, | 576 | .sensitivity_calib_by_driver = true, |
| @@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = { | |||
| 601 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 601 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 602 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 602 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 603 | .chain_noise_scale = 1000, | 603 | .chain_noise_scale = 1000, |
| 604 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 604 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 605 | .max_event_log_size = 512, | 605 | .max_event_log_size = 512, |
| 606 | .ucode_tracing = true, | 606 | .ucode_tracing = true, |
| 607 | .sensitivity_calib_by_driver = true, | 607 | .sensitivity_calib_by_driver = true, |
| @@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = { | |||
| 632 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 632 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 633 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 633 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 634 | .chain_noise_scale = 1000, | 634 | .chain_noise_scale = 1000, |
| 635 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 635 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 636 | .max_event_log_size = 512, | 636 | .max_event_log_size = 512, |
| 637 | .ucode_tracing = true, | 637 | .ucode_tracing = true, |
| 638 | .sensitivity_calib_by_driver = true, | 638 | .sensitivity_calib_by_driver = true, |
| @@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = { | |||
| 663 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 663 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 664 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 664 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 665 | .chain_noise_scale = 1000, | 665 | .chain_noise_scale = 1000, |
| 666 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 666 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 667 | .max_event_log_size = 512, | 667 | .max_event_log_size = 512, |
| 668 | .ucode_tracing = true, | 668 | .ucode_tracing = true, |
| 669 | .sensitivity_calib_by_driver = true, | 669 | .sensitivity_calib_by_driver = true, |
| @@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = { | |||
| 693 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, | 693 | .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, |
| 694 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, | 694 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, |
| 695 | .chain_noise_scale = 1000, | 695 | .chain_noise_scale = 1000, |
| 696 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 696 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 697 | .max_event_log_size = 512, | 697 | .max_event_log_size = 512, |
| 698 | .ucode_tracing = true, | 698 | .ucode_tracing = true, |
| 699 | .sensitivity_calib_by_driver = true, | 699 | .sensitivity_calib_by_driver = true, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af4fd50f3405..cee06b968de8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
| @@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = { | |||
| 388 | .support_ct_kill_exit = true, | 388 | .support_ct_kill_exit = true, |
| 389 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 389 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 390 | .chain_noise_scale = 1000, | 390 | .chain_noise_scale = 1000, |
| 391 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 391 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 392 | .max_event_log_size = 512, | 392 | .max_event_log_size = 512, |
| 393 | .ucode_tracing = true, | 393 | .ucode_tracing = true, |
| 394 | .sensitivity_calib_by_driver = true, | 394 | .sensitivity_calib_by_driver = true, |
| @@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = { | |||
| 424 | .support_ct_kill_exit = true, | 424 | .support_ct_kill_exit = true, |
| 425 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 425 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 426 | .chain_noise_scale = 1000, | 426 | .chain_noise_scale = 1000, |
| 427 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 427 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 428 | .max_event_log_size = 512, | 428 | .max_event_log_size = 512, |
| 429 | .sensitivity_calib_by_driver = true, | 429 | .sensitivity_calib_by_driver = true, |
| 430 | .chain_noise_calib_by_driver = true, | 430 | .chain_noise_calib_by_driver = true, |
| @@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = { | |||
| 459 | .support_ct_kill_exit = true, | 459 | .support_ct_kill_exit = true, |
| 460 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 460 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 461 | .chain_noise_scale = 1000, | 461 | .chain_noise_scale = 1000, |
| 462 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 462 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 463 | .max_event_log_size = 512, | 463 | .max_event_log_size = 512, |
| 464 | .sensitivity_calib_by_driver = true, | 464 | .sensitivity_calib_by_driver = true, |
| 465 | .chain_noise_calib_by_driver = true, | 465 | .chain_noise_calib_by_driver = true, |
| @@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = { | |||
| 496 | .support_ct_kill_exit = true, | 496 | .support_ct_kill_exit = true, |
| 497 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 497 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 498 | .chain_noise_scale = 1000, | 498 | .chain_noise_scale = 1000, |
| 499 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 499 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 500 | .max_event_log_size = 512, | 500 | .max_event_log_size = 512, |
| 501 | .sensitivity_calib_by_driver = true, | 501 | .sensitivity_calib_by_driver = true, |
| 502 | .chain_noise_calib_by_driver = true, | 502 | .chain_noise_calib_by_driver = true, |
| @@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = { | |||
| 532 | .support_ct_kill_exit = true, | 532 | .support_ct_kill_exit = true, |
| 533 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 533 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 534 | .chain_noise_scale = 1000, | 534 | .chain_noise_scale = 1000, |
| 535 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 535 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 536 | .max_event_log_size = 512, | 536 | .max_event_log_size = 512, |
| 537 | .sensitivity_calib_by_driver = true, | 537 | .sensitivity_calib_by_driver = true, |
| 538 | .chain_noise_calib_by_driver = true, | 538 | .chain_noise_calib_by_driver = true, |
| @@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = { | |||
| 570 | .support_ct_kill_exit = true, | 570 | .support_ct_kill_exit = true, |
| 571 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 571 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 572 | .chain_noise_scale = 1000, | 572 | .chain_noise_scale = 1000, |
| 573 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 573 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 574 | .max_event_log_size = 512, | 574 | .max_event_log_size = 512, |
| 575 | .sensitivity_calib_by_driver = true, | 575 | .sensitivity_calib_by_driver = true, |
| 576 | .chain_noise_calib_by_driver = true, | 576 | .chain_noise_calib_by_driver = true, |
| @@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = { | |||
| 606 | .support_ct_kill_exit = true, | 606 | .support_ct_kill_exit = true, |
| 607 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 607 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 608 | .chain_noise_scale = 1000, | 608 | .chain_noise_scale = 1000, |
| 609 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 609 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 610 | .max_event_log_size = 512, | 610 | .max_event_log_size = 512, |
| 611 | .sensitivity_calib_by_driver = true, | 611 | .sensitivity_calib_by_driver = true, |
| 612 | .chain_noise_calib_by_driver = true, | 612 | .chain_noise_calib_by_driver = true, |
| @@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = { | |||
| 644 | .support_ct_kill_exit = true, | 644 | .support_ct_kill_exit = true, |
| 645 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 645 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 646 | .chain_noise_scale = 1000, | 646 | .chain_noise_scale = 1000, |
| 647 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 647 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 648 | .max_event_log_size = 512, | 648 | .max_event_log_size = 512, |
| 649 | .sensitivity_calib_by_driver = true, | 649 | .sensitivity_calib_by_driver = true, |
| 650 | .chain_noise_calib_by_driver = true, | 650 | .chain_noise_calib_by_driver = true, |
| @@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = { | |||
| 680 | .support_ct_kill_exit = true, | 680 | .support_ct_kill_exit = true, |
| 681 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 681 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 682 | .chain_noise_scale = 1000, | 682 | .chain_noise_scale = 1000, |
| 683 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 683 | .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, |
| 684 | .max_event_log_size = 512, | 684 | .max_event_log_size = 512, |
| 685 | .sensitivity_calib_by_driver = true, | 685 | .sensitivity_calib_by_driver = true, |
| 686 | .chain_noise_calib_by_driver = true, | 686 | .chain_noise_calib_by_driver = true, |
| @@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = { | |||
| 721 | .support_ct_kill_exit = true, | 721 | .support_ct_kill_exit = true, |
| 722 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 722 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 723 | .chain_noise_scale = 1000, | 723 | .chain_noise_scale = 1000, |
| 724 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 724 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 725 | .max_event_log_size = 1024, | 725 | .max_event_log_size = 1024, |
| 726 | .ucode_tracing = true, | 726 | .ucode_tracing = true, |
| 727 | .sensitivity_calib_by_driver = true, | 727 | .sensitivity_calib_by_driver = true, |
| @@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = { | |||
| 756 | .support_ct_kill_exit = true, | 756 | .support_ct_kill_exit = true, |
| 757 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 757 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 758 | .chain_noise_scale = 1000, | 758 | .chain_noise_scale = 1000, |
| 759 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 759 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 760 | .max_event_log_size = 1024, | 760 | .max_event_log_size = 1024, |
| 761 | .ucode_tracing = true, | 761 | .ucode_tracing = true, |
| 762 | .sensitivity_calib_by_driver = true, | 762 | .sensitivity_calib_by_driver = true, |
| @@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
| 791 | .support_ct_kill_exit = true, | 791 | .support_ct_kill_exit = true, |
| 792 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 792 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 793 | .chain_noise_scale = 1000, | 793 | .chain_noise_scale = 1000, |
| 794 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 794 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 795 | .max_event_log_size = 1024, | 795 | .max_event_log_size = 1024, |
| 796 | .ucode_tracing = true, | 796 | .ucode_tracing = true, |
| 797 | .sensitivity_calib_by_driver = true, | 797 | .sensitivity_calib_by_driver = true, |
| @@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = { | |||
| 828 | .support_ct_kill_exit = true, | 828 | .support_ct_kill_exit = true, |
| 829 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 829 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 830 | .chain_noise_scale = 1500, | 830 | .chain_noise_scale = 1500, |
| 831 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 831 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 832 | .max_event_log_size = 1024, | 832 | .max_event_log_size = 1024, |
| 833 | .ucode_tracing = true, | 833 | .ucode_tracing = true, |
| 834 | .sensitivity_calib_by_driver = true, | 834 | .sensitivity_calib_by_driver = true, |
| @@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = { | |||
| 866 | .support_ct_kill_exit = true, | 866 | .support_ct_kill_exit = true, |
| 867 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 867 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 868 | .chain_noise_scale = 1500, | 868 | .chain_noise_scale = 1500, |
| 869 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 869 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 870 | .max_event_log_size = 1024, | 870 | .max_event_log_size = 1024, |
| 871 | .ucode_tracing = true, | 871 | .ucode_tracing = true, |
| 872 | .sensitivity_calib_by_driver = true, | 872 | .sensitivity_calib_by_driver = true, |
| @@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = { | |||
| 902 | .support_ct_kill_exit = true, | 902 | .support_ct_kill_exit = true, |
| 903 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 903 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 904 | .chain_noise_scale = 1500, | 904 | .chain_noise_scale = 1500, |
| 905 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 905 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 906 | .max_event_log_size = 1024, | 906 | .max_event_log_size = 1024, |
| 907 | .ucode_tracing = true, | 907 | .ucode_tracing = true, |
| 908 | .sensitivity_calib_by_driver = true, | 908 | .sensitivity_calib_by_driver = true, |
| @@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = { | |||
| 940 | .support_ct_kill_exit = true, | 940 | .support_ct_kill_exit = true, |
| 941 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, | 941 | .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, |
| 942 | .chain_noise_scale = 1000, | 942 | .chain_noise_scale = 1000, |
| 943 | .monitor_recover_period = IWL_MONITORING_PERIOD, | 943 | .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, |
| 944 | .max_event_log_size = 1024, | 944 | .max_event_log_size = 1024, |
| 945 | .ucode_tracing = true, | 945 | .ucode_tracing = true, |
| 946 | .sensitivity_calib_by_driver = true, | 946 | .sensitivity_calib_by_driver = true, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c1882fd8345d..10d7b9b7f064 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
| @@ -3667,6 +3667,49 @@ out_exit: | |||
| 3667 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 3667 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
| 3668 | } | 3668 | } |
| 3669 | 3669 | ||
| 3670 | static void iwlagn_configure_filter(struct ieee80211_hw *hw, | ||
| 3671 | unsigned int changed_flags, | ||
| 3672 | unsigned int *total_flags, | ||
| 3673 | u64 multicast) | ||
| 3674 | { | ||
| 3675 | struct iwl_priv *priv = hw->priv; | ||
| 3676 | __le32 filter_or = 0, filter_nand = 0; | ||
| 3677 | |||
| 3678 | #define CHK(test, flag) do { \ | ||
| 3679 | if (*total_flags & (test)) \ | ||
| 3680 | filter_or |= (flag); \ | ||
| 3681 | else \ | ||
| 3682 | filter_nand |= (flag); \ | ||
| 3683 | } while (0) | ||
| 3684 | |||
| 3685 | IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", | ||
| 3686 | changed_flags, *total_flags); | ||
| 3687 | |||
| 3688 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | ||
| 3689 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); | ||
| 3690 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | ||
| 3691 | |||
| 3692 | #undef CHK | ||
| 3693 | |||
| 3694 | mutex_lock(&priv->mutex); | ||
| 3695 | |||
| 3696 | priv->staging_rxon.filter_flags &= ~filter_nand; | ||
| 3697 | priv->staging_rxon.filter_flags |= filter_or; | ||
| 3698 | |||
| 3699 | iwlcore_commit_rxon(priv); | ||
| 3700 | |||
| 3701 | mutex_unlock(&priv->mutex); | ||
| 3702 | |||
| 3703 | /* | ||
| 3704 | * Receiving all multicast frames is always enabled by the | ||
| 3705 | * default flags setup in iwl_connection_init_rx_config() | ||
| 3706 | * since we currently do not support programming multicast | ||
| 3707 | * filters into the device. | ||
| 3708 | */ | ||
| 3709 | *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | ||
| 3710 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | ||
| 3711 | } | ||
| 3712 | |||
| 3670 | static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) | 3713 | static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) |
| 3671 | { | 3714 | { |
| 3672 | struct iwl_priv *priv = hw->priv; | 3715 | struct iwl_priv *priv = hw->priv; |
| @@ -3867,7 +3910,7 @@ static struct ieee80211_ops iwl_hw_ops = { | |||
| 3867 | .add_interface = iwl_mac_add_interface, | 3910 | .add_interface = iwl_mac_add_interface, |
| 3868 | .remove_interface = iwl_mac_remove_interface, | 3911 | .remove_interface = iwl_mac_remove_interface, |
| 3869 | .config = iwl_mac_config, | 3912 | .config = iwl_mac_config, |
| 3870 | .configure_filter = iwl_configure_filter, | 3913 | .configure_filter = iwlagn_configure_filter, |
| 3871 | .set_key = iwl_mac_set_key, | 3914 | .set_key = iwl_mac_set_key, |
| 3872 | .update_tkip_key = iwl_mac_update_tkip_key, | 3915 | .update_tkip_key = iwl_mac_update_tkip_key, |
| 3873 | .conf_tx = iwl_mac_conf_tx, | 3916 | .conf_tx = iwl_mac_conf_tx, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 2c03c6e20a72..07dbc2796448 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
| @@ -1328,51 +1328,6 @@ out: | |||
| 1328 | EXPORT_SYMBOL(iwl_apm_init); | 1328 | EXPORT_SYMBOL(iwl_apm_init); |
| 1329 | 1329 | ||
| 1330 | 1330 | ||
| 1331 | |||
| 1332 | void iwl_configure_filter(struct ieee80211_hw *hw, | ||
| 1333 | unsigned int changed_flags, | ||
| 1334 | unsigned int *total_flags, | ||
| 1335 | u64 multicast) | ||
| 1336 | { | ||
| 1337 | struct iwl_priv *priv = hw->priv; | ||
| 1338 | __le32 filter_or = 0, filter_nand = 0; | ||
| 1339 | |||
| 1340 | #define CHK(test, flag) do { \ | ||
| 1341 | if (*total_flags & (test)) \ | ||
| 1342 | filter_or |= (flag); \ | ||
| 1343 | else \ | ||
| 1344 | filter_nand |= (flag); \ | ||
| 1345 | } while (0) | ||
| 1346 | |||
| 1347 | IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", | ||
| 1348 | changed_flags, *total_flags); | ||
| 1349 | |||
| 1350 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | ||
| 1351 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); | ||
| 1352 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | ||
| 1353 | |||
| 1354 | #undef CHK | ||
| 1355 | |||
| 1356 | mutex_lock(&priv->mutex); | ||
| 1357 | |||
| 1358 | priv->staging_rxon.filter_flags &= ~filter_nand; | ||
| 1359 | priv->staging_rxon.filter_flags |= filter_or; | ||
| 1360 | |||
| 1361 | iwlcore_commit_rxon(priv); | ||
| 1362 | |||
| 1363 | mutex_unlock(&priv->mutex); | ||
| 1364 | |||
| 1365 | /* | ||
| 1366 | * Receiving all multicast frames is always enabled by the | ||
| 1367 | * default flags setup in iwl_connection_init_rx_config() | ||
| 1368 | * since we currently do not support programming multicast | ||
| 1369 | * filters into the device. | ||
| 1370 | */ | ||
| 1371 | *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | ||
| 1372 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | ||
| 1373 | } | ||
| 1374 | EXPORT_SYMBOL(iwl_configure_filter); | ||
| 1375 | |||
| 1376 | int iwl_set_hw_params(struct iwl_priv *priv) | 1331 | int iwl_set_hw_params(struct iwl_priv *priv) |
| 1377 | { | 1332 | { |
| 1378 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; | 1333 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4a71dfb10a15..5e6ee3da6bbf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
| @@ -372,9 +372,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, | |||
| 372 | u32 decrypt_res, | 372 | u32 decrypt_res, |
| 373 | struct ieee80211_rx_status *stats); | 373 | struct ieee80211_rx_status *stats); |
| 374 | void iwl_irq_handle_error(struct iwl_priv *priv); | 374 | void iwl_irq_handle_error(struct iwl_priv *priv); |
| 375 | void iwl_configure_filter(struct ieee80211_hw *hw, | ||
| 376 | unsigned int changed_flags, | ||
| 377 | unsigned int *total_flags, u64 multicast); | ||
| 378 | int iwl_set_hw_params(struct iwl_priv *priv); | 375 | int iwl_set_hw_params(struct iwl_priv *priv); |
| 379 | void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); | 376 | void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); |
| 380 | void iwl_bss_info_changed(struct ieee80211_hw *hw, | 377 | void iwl_bss_info_changed(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index f35bcad56e36..2e97cd2fa98a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
| @@ -1049,7 +1049,8 @@ struct iwl_event_log { | |||
| 1049 | #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) | 1049 | #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) |
| 1050 | 1050 | ||
| 1051 | /* timer constants use to monitor and recover stuck tx queues in mSecs */ | 1051 | /* timer constants use to monitor and recover stuck tx queues in mSecs */ |
| 1052 | #define IWL_MONITORING_PERIOD (1000) | 1052 | #define IWL_DEF_MONITORING_PERIOD (1000) |
| 1053 | #define IWL_LONG_MONITORING_PERIOD (5000) | ||
| 1053 | #define IWL_ONE_HUNDRED_MSECS (100) | 1054 | #define IWL_ONE_HUNDRED_MSECS (100) |
| 1054 | #define IWL_SIXTY_SECS (60000) | 1055 | #define IWL_SIXTY_SECS (60000) |
| 1055 | 1056 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 70c4b8fba0ee..59a308b02f95 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
| @@ -3391,6 +3391,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, | |||
| 3391 | 3391 | ||
| 3392 | return 0; | 3392 | return 0; |
| 3393 | } | 3393 | } |
| 3394 | |||
| 3395 | static void iwl3945_configure_filter(struct ieee80211_hw *hw, | ||
| 3396 | unsigned int changed_flags, | ||
| 3397 | unsigned int *total_flags, | ||
| 3398 | u64 multicast) | ||
| 3399 | { | ||
| 3400 | struct iwl_priv *priv = hw->priv; | ||
| 3401 | __le32 filter_or = 0, filter_nand = 0; | ||
| 3402 | |||
| 3403 | #define CHK(test, flag) do { \ | ||
| 3404 | if (*total_flags & (test)) \ | ||
| 3405 | filter_or |= (flag); \ | ||
| 3406 | else \ | ||
| 3407 | filter_nand |= (flag); \ | ||
| 3408 | } while (0) | ||
| 3409 | |||
| 3410 | IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", | ||
| 3411 | changed_flags, *total_flags); | ||
| 3412 | |||
| 3413 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | ||
| 3414 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); | ||
| 3415 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | ||
| 3416 | |||
| 3417 | #undef CHK | ||
| 3418 | |||
| 3419 | mutex_lock(&priv->mutex); | ||
| 3420 | |||
| 3421 | priv->staging_rxon.filter_flags &= ~filter_nand; | ||
| 3422 | priv->staging_rxon.filter_flags |= filter_or; | ||
| 3423 | |||
| 3424 | /* | ||
| 3425 | * Committing directly here breaks for some reason, | ||
| 3426 | * but we'll eventually commit the filter flags | ||
| 3427 | * change anyway. | ||
| 3428 | */ | ||
| 3429 | |||
| 3430 | mutex_unlock(&priv->mutex); | ||
| 3431 | |||
| 3432 | /* | ||
| 3433 | * Receiving all multicast frames is always enabled by the | ||
| 3434 | * default flags setup in iwl_connection_init_rx_config() | ||
| 3435 | * since we currently do not support programming multicast | ||
| 3436 | * filters into the device. | ||
| 3437 | */ | ||
| 3438 | *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | ||
| 3439 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | ||
| 3440 | } | ||
| 3441 | |||
| 3442 | |||
| 3394 | /***************************************************************************** | 3443 | /***************************************************************************** |
| 3395 | * | 3444 | * |
| 3396 | * sysfs attributes | 3445 | * sysfs attributes |
| @@ -3796,7 +3845,7 @@ static struct ieee80211_ops iwl3945_hw_ops = { | |||
| 3796 | .add_interface = iwl_mac_add_interface, | 3845 | .add_interface = iwl_mac_add_interface, |
| 3797 | .remove_interface = iwl_mac_remove_interface, | 3846 | .remove_interface = iwl_mac_remove_interface, |
| 3798 | .config = iwl_mac_config, | 3847 | .config = iwl_mac_config, |
| 3799 | .configure_filter = iwl_configure_filter, | 3848 | .configure_filter = iwl3945_configure_filter, |
| 3800 | .set_key = iwl3945_mac_set_key, | 3849 | .set_key = iwl3945_mac_set_key, |
| 3801 | .conf_tx = iwl_mac_conf_tx, | 3850 | .conf_tx = iwl_mac_conf_tx, |
| 3802 | .reset_tsf = iwl_mac_reset_tsf, | 3851 | .reset_tsf = iwl_mac_reset_tsf, |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 01ad7f77383a..86fa8abdd66f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, | |||
| 486 | struct ieee80211_rx_status rx_status; | 486 | struct ieee80211_rx_status rx_status; |
| 487 | 487 | ||
| 488 | if (data->idle) { | 488 | if (data->idle) { |
| 489 | wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n"); | 489 | wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); |
| 490 | return false; | 490 | return false; |
| 491 | } | 491 | } |
| 492 | 492 | ||
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index d761ed2d8af4..f152a25be59f 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
| @@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) | |||
| 910 | 910 | ||
| 911 | rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); | 911 | rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); |
| 912 | if (rxq->rxd == NULL) { | 912 | if (rxq->rxd == NULL) { |
| 913 | wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n"); | 913 | wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n"); |
| 914 | return -ENOMEM; | 914 | return -ENOMEM; |
| 915 | } | 915 | } |
| 916 | memset(rxq->rxd, 0, size); | 916 | memset(rxq->rxd, 0, size); |
| 917 | 917 | ||
| 918 | rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); | 918 | rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); |
| 919 | if (rxq->buf == NULL) { | 919 | if (rxq->buf == NULL) { |
| 920 | wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n"); | 920 | wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); |
| 921 | pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); | 921 | pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); |
| 922 | return -ENOMEM; | 922 | return -ENOMEM; |
| 923 | } | 923 | } |
| @@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) | |||
| 1145 | 1145 | ||
| 1146 | txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); | 1146 | txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); |
| 1147 | if (txq->txd == NULL) { | 1147 | if (txq->txd == NULL) { |
| 1148 | wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n"); | 1148 | wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n"); |
| 1149 | return -ENOMEM; | 1149 | return -ENOMEM; |
| 1150 | } | 1150 | } |
| 1151 | memset(txq->txd, 0, size); | 1151 | memset(txq->txd, 0, size); |
| 1152 | 1152 | ||
| 1153 | txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); | 1153 | txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); |
| 1154 | if (txq->skb == NULL) { | 1154 | if (txq->skb == NULL) { |
| 1155 | wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n"); | 1155 | wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); |
| 1156 | pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); | 1156 | pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); |
| 1157 | return -ENOMEM; | 1157 | return -ENOMEM; |
| 1158 | } | 1158 | } |
| @@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) | |||
| 1573 | PCI_DMA_BIDIRECTIONAL); | 1573 | PCI_DMA_BIDIRECTIONAL); |
| 1574 | 1574 | ||
| 1575 | if (!timeout) { | 1575 | if (!timeout) { |
| 1576 | wiphy_err(hw->wiphy, "command %s timeout after %u ms\n", | 1576 | wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n", |
| 1577 | mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), | 1577 | mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), |
| 1578 | MWL8K_CMD_TIMEOUT_MS); | 1578 | MWL8K_CMD_TIMEOUT_MS); |
| 1579 | rc = -ETIMEDOUT; | 1579 | rc = -ETIMEDOUT; |
| @@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) | |||
| 1584 | 1584 | ||
| 1585 | rc = cmd->result ? -EINVAL : 0; | 1585 | rc = cmd->result ? -EINVAL : 0; |
| 1586 | if (rc) | 1586 | if (rc) |
| 1587 | wiphy_err(hw->wiphy, "command %s error 0x%x\n", | 1587 | wiphy_err(hw->wiphy, "Command %s error 0x%x\n", |
| 1588 | mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), | 1588 | mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), |
| 1589 | le16_to_cpu(cmd->result)); | 1589 | le16_to_cpu(cmd->result)); |
| 1590 | else if (ms > 2000) | 1590 | else if (ms > 2000) |
| 1591 | wiphy_notice(hw->wiphy, "command %s took %d ms\n", | 1591 | wiphy_notice(hw->wiphy, "Command %s took %d ms\n", |
| 1592 | mwl8k_cmd_name(cmd->code, | 1592 | mwl8k_cmd_name(cmd->code, |
| 1593 | buf, sizeof(buf)), | 1593 | buf, sizeof(buf)), |
| 1594 | ms); | 1594 | ms); |
| @@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw) | |||
| 3210 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, | 3210 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, |
| 3211 | IRQF_SHARED, MWL8K_NAME, hw); | 3211 | IRQF_SHARED, MWL8K_NAME, hw); |
| 3212 | if (rc) { | 3212 | if (rc) { |
| 3213 | wiphy_err(hw->wiphy, "failed to register irq handler\n"); | 3213 | wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); |
| 3214 | return -EIO; | 3214 | return -EIO; |
| 3215 | } | 3215 | } |
| 3216 | 3216 | ||
| @@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 3926 | 3926 | ||
| 3927 | priv->sram = pci_iomap(pdev, 0, 0x10000); | 3927 | priv->sram = pci_iomap(pdev, 0, 0x10000); |
| 3928 | if (priv->sram == NULL) { | 3928 | if (priv->sram == NULL) { |
| 3929 | wiphy_err(hw->wiphy, "cannot map device sram\n"); | 3929 | wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); |
| 3930 | goto err_iounmap; | 3930 | goto err_iounmap; |
| 3931 | } | 3931 | } |
| 3932 | 3932 | ||
| @@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 3938 | if (priv->regs == NULL) { | 3938 | if (priv->regs == NULL) { |
| 3939 | priv->regs = pci_iomap(pdev, 2, 0x10000); | 3939 | priv->regs = pci_iomap(pdev, 2, 0x10000); |
| 3940 | if (priv->regs == NULL) { | 3940 | if (priv->regs == NULL) { |
| 3941 | wiphy_err(hw->wiphy, "cannot map device registers\n"); | 3941 | wiphy_err(hw->wiphy, "Cannot map device registers\n"); |
| 3942 | goto err_iounmap; | 3942 | goto err_iounmap; |
| 3943 | } | 3943 | } |
| 3944 | } | 3944 | } |
| @@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 3950 | /* Ask userland hotplug daemon for the device firmware */ | 3950 | /* Ask userland hotplug daemon for the device firmware */ |
| 3951 | rc = mwl8k_request_firmware(priv); | 3951 | rc = mwl8k_request_firmware(priv); |
| 3952 | if (rc) { | 3952 | if (rc) { |
| 3953 | wiphy_err(hw->wiphy, "firmware files not found\n"); | 3953 | wiphy_err(hw->wiphy, "Firmware files not found\n"); |
| 3954 | goto err_stop_firmware; | 3954 | goto err_stop_firmware; |
| 3955 | } | 3955 | } |
| 3956 | 3956 | ||
| 3957 | /* Load firmware into hardware */ | 3957 | /* Load firmware into hardware */ |
| 3958 | rc = mwl8k_load_firmware(hw); | 3958 | rc = mwl8k_load_firmware(hw); |
| 3959 | if (rc) { | 3959 | if (rc) { |
| 3960 | wiphy_err(hw->wiphy, "cannot start firmware\n"); | 3960 | wiphy_err(hw->wiphy, "Cannot start firmware\n"); |
| 3961 | goto err_stop_firmware; | 3961 | goto err_stop_firmware; |
| 3962 | } | 3962 | } |
| 3963 | 3963 | ||
| @@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 4047 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, | 4047 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, |
| 4048 | IRQF_SHARED, MWL8K_NAME, hw); | 4048 | IRQF_SHARED, MWL8K_NAME, hw); |
| 4049 | if (rc) { | 4049 | if (rc) { |
| 4050 | wiphy_err(hw->wiphy, "failed to register irq handler\n"); | 4050 | wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); |
| 4051 | goto err_free_queues; | 4051 | goto err_free_queues; |
| 4052 | } | 4052 | } |
| 4053 | 4053 | ||
| @@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 4067 | rc = mwl8k_cmd_get_hw_spec_sta(hw); | 4067 | rc = mwl8k_cmd_get_hw_spec_sta(hw); |
| 4068 | } | 4068 | } |
| 4069 | if (rc) { | 4069 | if (rc) { |
| 4070 | wiphy_err(hw->wiphy, "cannot initialise firmware\n"); | 4070 | wiphy_err(hw->wiphy, "Cannot initialise firmware\n"); |
| 4071 | goto err_free_irq; | 4071 | goto err_free_irq; |
| 4072 | } | 4072 | } |
| 4073 | 4073 | ||
| @@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 4081 | /* Turn radio off */ | 4081 | /* Turn radio off */ |
| 4082 | rc = mwl8k_cmd_radio_disable(hw); | 4082 | rc = mwl8k_cmd_radio_disable(hw); |
| 4083 | if (rc) { | 4083 | if (rc) { |
| 4084 | wiphy_err(hw->wiphy, "cannot disable\n"); | 4084 | wiphy_err(hw->wiphy, "Cannot disable\n"); |
| 4085 | goto err_free_irq; | 4085 | goto err_free_irq; |
| 4086 | } | 4086 | } |
| 4087 | 4087 | ||
| 4088 | /* Clear MAC address */ | 4088 | /* Clear MAC address */ |
| 4089 | rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); | 4089 | rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00"); |
| 4090 | if (rc) { | 4090 | if (rc) { |
| 4091 | wiphy_err(hw->wiphy, "cannot clear mac address\n"); | 4091 | wiphy_err(hw->wiphy, "Cannot clear MAC address\n"); |
| 4092 | goto err_free_irq; | 4092 | goto err_free_irq; |
| 4093 | } | 4093 | } |
| 4094 | 4094 | ||
| @@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev, | |||
| 4098 | 4098 | ||
| 4099 | rc = ieee80211_register_hw(hw); | 4099 | rc = ieee80211_register_hw(hw); |
| 4100 | if (rc) { | 4100 | if (rc) { |
| 4101 | wiphy_err(hw->wiphy, "cannot register device\n"); | 4101 | wiphy_err(hw->wiphy, "Cannot register device\n"); |
| 4102 | goto err_free_queues; | 4102 | goto err_free_queues; |
| 4103 | } | 4103 | } |
| 4104 | 4104 | ||
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index d687cb7f2a59..78347041ec40 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c | |||
| @@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev, | |||
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | if (j == 0) { | 169 | if (j == 0) { |
| 170 | wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n", | 170 | wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", |
| 171 | (band == IEEE80211_BAND_2GHZ) ? 2 : 5); | 171 | (band == IEEE80211_BAND_2GHZ) ? 2 : 5); |
| 172 | 172 | ||
| 173 | ret = -ENODATA; | 173 | ret = -ENODATA; |
| @@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
| 695 | u8 perm_addr[ETH_ALEN]; | 695 | u8 perm_addr[ETH_ALEN]; |
| 696 | 696 | ||
| 697 | wiphy_warn(dev->wiphy, | 697 | wiphy_warn(dev->wiphy, |
| 698 | "invalid hwaddr! using randomly generated mac addr\n"); | 698 | "Invalid hwaddr! Using randomly generated MAC addr\n"); |
| 699 | random_ether_addr(perm_addr); | 699 | random_ether_addr(perm_addr); |
| 700 | SET_IEEE80211_PERM_ADDR(dev, perm_addr); | 700 | SET_IEEE80211_PERM_ADDR(dev, perm_addr); |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n", | 703 | wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", |
| 704 | dev->wiphy->perm_addr, priv->version, | 704 | dev->wiphy->perm_addr, priv->version, |
| 705 | p54_rf_chips[priv->rxhw]); | 705 | p54_rf_chips[priv->rxhw]); |
| 706 | 706 | ||
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 47006bca4852..15b20c29a604 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c | |||
| @@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) | |||
| 125 | 125 | ||
| 126 | if (fw_version) | 126 | if (fw_version) |
| 127 | wiphy_info(priv->hw->wiphy, | 127 | wiphy_info(priv->hw->wiphy, |
| 128 | "fw rev %s - softmac protocol %x.%x\n", | 128 | "FW rev %s - Softmac protocol %x.%x\n", |
| 129 | fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); | 129 | fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); |
| 130 | 130 | ||
| 131 | if (priv->fw_var < 0x500) | 131 | if (priv->fw_var < 0x500) |
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c index ea91f5cce6b3..3837e1eec5f4 100644 --- a/drivers/net/wireless/p54/led.c +++ b/drivers/net/wireless/p54/led.c | |||
| @@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work) | |||
| 58 | err = p54_set_leds(priv); | 58 | err = p54_set_leds(priv); |
| 59 | if (err && net_ratelimit()) | 59 | if (err && net_ratelimit()) |
| 60 | wiphy_err(priv->hw->wiphy, | 60 | wiphy_err(priv->hw->wiphy, |
| 61 | "failed to update leds (%d).\n", err); | 61 | "failed to update LEDs (%d).\n", err); |
| 62 | 62 | ||
| 63 | if (rerun) | 63 | if (rerun) |
| 64 | ieee80211_queue_delayed_work(priv->hw, &priv->led_work, | 64 | ieee80211_queue_delayed_work(priv->hw, &priv->led_work, |
| @@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv, | |||
| 103 | err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); | 103 | err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); |
| 104 | if (err) | 104 | if (err) |
| 105 | wiphy_err(priv->hw->wiphy, | 105 | wiphy_err(priv->hw->wiphy, |
| 106 | "failed to register %s led.\n", name); | 106 | "Failed to register %s LED.\n", name); |
| 107 | else | 107 | else |
| 108 | led->registered = 1; | 108 | led->registered = 1; |
| 109 | 109 | ||
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 822f8dc26e9c..1eacba4daa5b 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
| @@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev) | |||
| 466 | P54P_READ(dev_int); | 466 | P54P_READ(dev_int); |
| 467 | 467 | ||
| 468 | if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { | 468 | if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { |
| 469 | wiphy_err(dev->wiphy, "cannot boot firmware!\n"); | 469 | wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); |
| 470 | p54p_stop(dev); | 470 | p54p_stop(dev); |
| 471 | return -ETIMEDOUT; | 471 | return -ETIMEDOUT; |
| 472 | } | 472 | } |
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 427b46f558ed..173aec3d6e7e 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c | |||
| @@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) | |||
| 540 | case P54_TRAP_BEACON_TX: | 540 | case P54_TRAP_BEACON_TX: |
| 541 | break; | 541 | break; |
| 542 | case P54_TRAP_RADAR: | 542 | case P54_TRAP_RADAR: |
| 543 | wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq); | 543 | wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq); |
| 544 | break; | 544 | break; |
| 545 | case P54_TRAP_NO_BEACON: | 545 | case P54_TRAP_NO_BEACON: |
| 546 | if (priv->vif) | 546 | if (priv->vif) |
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index b50c39aaec05..30107ce78dfb 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c | |||
| @@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) | |||
| 445 | &priv->rx_ring_dma); | 445 | &priv->rx_ring_dma); |
| 446 | 446 | ||
| 447 | if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { | 447 | if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { |
| 448 | wiphy_err(dev->wiphy, "cannot allocate rx ring\n"); | 448 | wiphy_err(dev->wiphy, "Cannot allocate RX ring\n"); |
| 449 | return -ENOMEM; | 449 | return -ENOMEM; |
| 450 | } | 450 | } |
| 451 | 451 | ||
| @@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev, | |||
| 502 | 502 | ||
| 503 | ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); | 503 | ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); |
| 504 | if (!ring || (unsigned long)ring & 0xFF) { | 504 | if (!ring || (unsigned long)ring & 0xFF) { |
| 505 | wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n", | 505 | wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n", |
| 506 | prio); | 506 | prio); |
| 507 | return -ENOMEM; | 507 | return -ENOMEM; |
| 508 | } | 508 | } |
| @@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev) | |||
| 568 | ret = request_irq(priv->pdev->irq, rtl8180_interrupt, | 568 | ret = request_irq(priv->pdev->irq, rtl8180_interrupt, |
| 569 | IRQF_SHARED, KBUILD_MODNAME, dev); | 569 | IRQF_SHARED, KBUILD_MODNAME, dev); |
| 570 | if (ret) { | 570 | if (ret) { |
| 571 | wiphy_err(dev->wiphy, "failed to register irq handler\n"); | 571 | wiphy_err(dev->wiphy, "failed to register IRQ handler\n"); |
| 572 | goto err_free_rings; | 572 | goto err_free_rings; |
| 573 | } | 573 | } |
| 574 | 574 | ||
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 5738a55c1b06..98e0351c1dd6 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c | |||
| @@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev) | |||
| 573 | } while (--i); | 573 | } while (--i); |
| 574 | 574 | ||
| 575 | if (!i) { | 575 | if (!i) { |
| 576 | wiphy_err(dev->wiphy, "reset timeout!\n"); | 576 | wiphy_err(dev->wiphy, "Reset timeout!\n"); |
| 577 | return -ETIMEDOUT; | 577 | return -ETIMEDOUT; |
| 578 | } | 578 | } |
| 579 | 579 | ||
| @@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, | |||
| 1526 | mutex_init(&priv->conf_mutex); | 1526 | mutex_init(&priv->conf_mutex); |
| 1527 | skb_queue_head_init(&priv->b_tx_status.queue); | 1527 | skb_queue_head_init(&priv->b_tx_status.queue); |
| 1528 | 1528 | ||
| 1529 | wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n", | 1529 | wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", |
| 1530 | mac_addr, chip_name, priv->asic_rev, priv->rf->name, | 1530 | mac_addr, chip_name, priv->asic_rev, priv->rf->name, |
| 1531 | priv->rfkill_mask); | 1531 | priv->rfkill_mask); |
| 1532 | 1532 | ||
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c index fd96f9112322..97eebdcf7eb9 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c +++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c | |||
| @@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev) | |||
| 366 | rtl8225_write(dev, 0x02, 0x044d); | 366 | rtl8225_write(dev, 0x02, 0x044d); |
| 367 | msleep(100); | 367 | msleep(100); |
| 368 | if (!(rtl8225_read(dev, 6) & (1 << 7))) | 368 | if (!(rtl8225_read(dev, 6) & (1 << 7))) |
| 369 | wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", | 369 | wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n", |
| 370 | rtl8225_read(dev, 6)); | 370 | rtl8225_read(dev, 6)); |
| 371 | } | 371 | } |
| 372 | 372 | ||
| @@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev) | |||
| 735 | rtl8225_write(dev, 0x02, 0x044D); | 735 | rtl8225_write(dev, 0x02, 0x044D); |
| 736 | msleep(100); | 736 | msleep(100); |
| 737 | if (!(rtl8225_read(dev, 6) & (1 << 7))) | 737 | if (!(rtl8225_read(dev, 6) & (1 << 7))) |
| 738 | wiphy_warn(dev->wiphy, "rf calibration failed! %x\n", | 738 | wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n", |
| 739 | rtl8225_read(dev, 6)); | 739 | rtl8225_read(dev, 6)); |
| 740 | } | 740 | } |
| 741 | 741 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 044f430f3b43..cff7cc2c1f02 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP | |||
| 486 | config ACPI_TOSHIBA | 486 | config ACPI_TOSHIBA |
| 487 | tristate "Toshiba Laptop Extras" | 487 | tristate "Toshiba Laptop Extras" |
| 488 | depends on ACPI | 488 | depends on ACPI |
| 489 | depends on LEDS_CLASS | ||
| 490 | depends on NEW_LEDS | ||
| 491 | depends on BACKLIGHT_CLASS_DEVICE | ||
| 489 | depends on INPUT | 492 | depends on INPUT |
| 490 | depends on RFKILL || RFKILL = n | 493 | depends on RFKILL || RFKILL = n |
| 491 | select INPUT_POLLDEV | 494 | select INPUT_POLLDEV |
| 492 | select BACKLIGHT_CLASS_DEVICE | ||
| 493 | ---help--- | 495 | ---help--- |
| 494 | This driver adds support for access to certain system settings | 496 | This driver adds support for access to certain system settings |
| 495 | on "legacy free" Toshiba laptops. These laptops can be recognized by | 497 | on "legacy free" Toshiba laptops. These laptops can be recognized by |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index f15516374987..c1741142a4cb 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
| @@ -79,12 +79,13 @@ struct bios_args { | |||
| 79 | u32 command; | 79 | u32 command; |
| 80 | u32 commandtype; | 80 | u32 commandtype; |
| 81 | u32 datasize; | 81 | u32 datasize; |
| 82 | char *data; | 82 | u32 data; |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | struct bios_return { | 85 | struct bios_return { |
| 86 | u32 sigpass; | 86 | u32 sigpass; |
| 87 | u32 return_code; | 87 | u32 return_code; |
| 88 | u32 value; | ||
| 88 | }; | 89 | }; |
| 89 | 90 | ||
| 90 | struct key_entry { | 91 | struct key_entry { |
| @@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = { | |||
| 148 | * buffer = kzalloc(128, GFP_KERNEL); | 149 | * buffer = kzalloc(128, GFP_KERNEL); |
| 149 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) | 150 | * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) |
| 150 | */ | 151 | */ |
| 151 | static int hp_wmi_perform_query(int query, int write, char *buffer, | 152 | static int hp_wmi_perform_query(int query, int write, u32 *buffer, |
| 152 | int buffersize) | 153 | int buffersize) |
| 153 | { | 154 | { |
| 154 | struct bios_return bios_return; | 155 | struct bios_return bios_return; |
| @@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer, | |||
| 159 | .command = write ? 0x2 : 0x1, | 160 | .command = write ? 0x2 : 0x1, |
| 160 | .commandtype = query, | 161 | .commandtype = query, |
| 161 | .datasize = buffersize, | 162 | .datasize = buffersize, |
| 162 | .data = buffer, | 163 | .data = *buffer, |
| 163 | }; | 164 | }; |
| 164 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; | 165 | struct acpi_buffer input = { sizeof(struct bios_args), &args }; |
| 165 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | 166 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; |
| @@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer, | |||
| 177 | 178 | ||
| 178 | bios_return = *((struct bios_return *)obj->buffer.pointer); | 179 | bios_return = *((struct bios_return *)obj->buffer.pointer); |
| 179 | 180 | ||
| 180 | if (bios_return.return_code) { | 181 | memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); |
| 181 | printk(KERN_WARNING PREFIX "Query %d returned %d\n", query, | ||
| 182 | bios_return.return_code); | ||
| 183 | kfree(obj); | ||
| 184 | return bios_return.return_code; | ||
| 185 | } | ||
| 186 | if (obj->buffer.length - sizeof(bios_return) > buffersize) { | ||
| 187 | kfree(obj); | ||
| 188 | return -EINVAL; | ||
| 189 | } | ||
| 190 | |||
| 191 | memset(buffer, 0, buffersize); | ||
| 192 | memcpy(buffer, | ||
| 193 | ((char *)obj->buffer.pointer) + sizeof(struct bios_return), | ||
| 194 | obj->buffer.length - sizeof(bios_return)); | ||
| 195 | kfree(obj); | ||
| 196 | return 0; | 182 | return 0; |
| 197 | } | 183 | } |
| 198 | 184 | ||
| 199 | static int hp_wmi_display_state(void) | 185 | static int hp_wmi_display_state(void) |
| 200 | { | 186 | { |
| 201 | int state; | 187 | int state = 0; |
| 202 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state, | 188 | int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state, |
| 203 | sizeof(state)); | 189 | sizeof(state)); |
| 204 | if (ret) | 190 | if (ret) |
| 205 | return -EINVAL; | 191 | return -EINVAL; |
| @@ -208,8 +194,8 @@ static int hp_wmi_display_state(void) | |||
| 208 | 194 | ||
| 209 | static int hp_wmi_hddtemp_state(void) | 195 | static int hp_wmi_hddtemp_state(void) |
| 210 | { | 196 | { |
| 211 | int state; | 197 | int state = 0; |
| 212 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state, | 198 | int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state, |
| 213 | sizeof(state)); | 199 | sizeof(state)); |
| 214 | if (ret) | 200 | if (ret) |
| 215 | return -EINVAL; | 201 | return -EINVAL; |
| @@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void) | |||
| 218 | 204 | ||
| 219 | static int hp_wmi_als_state(void) | 205 | static int hp_wmi_als_state(void) |
| 220 | { | 206 | { |
| 221 | int state; | 207 | int state = 0; |
| 222 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state, | 208 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state, |
| 223 | sizeof(state)); | 209 | sizeof(state)); |
| 224 | if (ret) | 210 | if (ret) |
| 225 | return -EINVAL; | 211 | return -EINVAL; |
| @@ -228,8 +214,8 @@ static int hp_wmi_als_state(void) | |||
| 228 | 214 | ||
| 229 | static int hp_wmi_dock_state(void) | 215 | static int hp_wmi_dock_state(void) |
| 230 | { | 216 | { |
| 231 | int state; | 217 | int state = 0; |
| 232 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, | 218 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
| 233 | sizeof(state)); | 219 | sizeof(state)); |
| 234 | 220 | ||
| 235 | if (ret) | 221 | if (ret) |
| @@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void) | |||
| 240 | 226 | ||
| 241 | static int hp_wmi_tablet_state(void) | 227 | static int hp_wmi_tablet_state(void) |
| 242 | { | 228 | { |
| 243 | int state; | 229 | int state = 0; |
| 244 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, | 230 | int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state, |
| 245 | sizeof(state)); | 231 | sizeof(state)); |
| 246 | if (ret) | 232 | if (ret) |
| 247 | return ret; | 233 | return ret; |
| @@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked) | |||
| 256 | int ret; | 242 | int ret; |
| 257 | 243 | ||
| 258 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, | 244 | ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, |
| 259 | (char *)&query, sizeof(query)); | 245 | &query, sizeof(query)); |
| 260 | if (ret) | 246 | if (ret) |
| 261 | return -EINVAL; | 247 | return -EINVAL; |
| 262 | return 0; | 248 | return 0; |
| @@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = { | |||
| 268 | 254 | ||
| 269 | static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) | 255 | static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) |
| 270 | { | 256 | { |
| 271 | int wireless; | 257 | int wireless = 0; |
| 272 | int mask; | 258 | int mask; |
| 273 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 259 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
| 274 | (char *)&wireless, sizeof(wireless)); | 260 | &wireless, sizeof(wireless)); |
| 275 | /* TBD: Pass error */ | 261 | /* TBD: Pass error */ |
| 276 | 262 | ||
| 277 | mask = 0x200 << (r * 8); | 263 | mask = 0x200 << (r * 8); |
| @@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) | |||
| 284 | 270 | ||
| 285 | static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) | 271 | static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) |
| 286 | { | 272 | { |
| 287 | int wireless; | 273 | int wireless = 0; |
| 288 | int mask; | 274 | int mask; |
| 289 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, | 275 | hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, |
| 290 | (char *)&wireless, sizeof(wireless)); | 276 | &wireless, sizeof(wireless)); |
| 291 | /* TBD: Pass error */ | 277 | /* TBD: Pass error */ |
| 292 | 278 | ||
| 293 | mask = 0x800 << (r * 8); | 279 | mask = 0x800 << (r * 8); |
| @@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr, | |||
| 347 | const char *buf, size_t count) | 333 | const char *buf, size_t count) |
| 348 | { | 334 | { |
| 349 | u32 tmp = simple_strtoul(buf, NULL, 10); | 335 | u32 tmp = simple_strtoul(buf, NULL, 10); |
| 350 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp, | 336 | int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp, |
| 351 | sizeof(tmp)); | 337 | sizeof(tmp)); |
| 352 | if (ret) | 338 | if (ret) |
| 353 | return -EINVAL; | 339 | return -EINVAL; |
| @@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context) | |||
| 421 | static struct key_entry *key; | 407 | static struct key_entry *key; |
| 422 | union acpi_object *obj; | 408 | union acpi_object *obj; |
| 423 | u32 event_id, event_data; | 409 | u32 event_id, event_data; |
| 424 | int key_code, ret; | 410 | int key_code = 0, ret; |
| 425 | u32 *location; | 411 | u32 *location; |
| 426 | acpi_status status; | 412 | acpi_status status; |
| 427 | 413 | ||
| @@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context) | |||
| 475 | break; | 461 | break; |
| 476 | case HPWMI_BEZEL_BUTTON: | 462 | case HPWMI_BEZEL_BUTTON: |
| 477 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, | 463 | ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, |
| 478 | (char *)&key_code, | 464 | &key_code, |
| 479 | sizeof(key_code)); | 465 | sizeof(key_code)); |
| 480 | if (ret) | 466 | if (ret) |
| 481 | break; | 467 | break; |
| @@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device) | |||
| 578 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) | 564 | static int __devinit hp_wmi_bios_setup(struct platform_device *device) |
| 579 | { | 565 | { |
| 580 | int err; | 566 | int err; |
| 581 | int wireless; | 567 | int wireless = 0; |
| 582 | 568 | ||
| 583 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless, | 569 | err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless, |
| 584 | sizeof(wireless)); | 570 | sizeof(wireless)); |
| 585 | if (err) | 571 | if (err) |
| 586 | return err; | 572 | return err; |
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index 73f8e6d72669..2b11a33325e6 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c | |||
| @@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar) | |||
| 145 | */ | 145 | */ |
| 146 | static struct rar_device *_rar_to_device(int rar, int *off) | 146 | static struct rar_device *_rar_to_device(int rar, int *off) |
| 147 | { | 147 | { |
| 148 | if (rar >= 0 && rar <= 3) { | 148 | if (rar >= 0 && rar < MRST_NUM_RAR) { |
| 149 | *off = rar; | 149 | *off = rar; |
| 150 | return &my_rar_device; | 150 | return &my_rar_device; |
| 151 | } | 151 | } |
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 943f9084dcb1..6abe18e638e9 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c | |||
| @@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) | |||
| 487 | mdelay(1); | 487 | mdelay(1); |
| 488 | *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); | 488 | *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); |
| 489 | } else if (cmd == IPC_I2C_WRITE) { | 489 | } else if (cmd == IPC_I2C_WRITE) { |
| 490 | writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR); | 490 | writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR); |
| 491 | mdelay(1); | 491 | mdelay(1); |
| 492 | writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); | 492 | writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); |
| 493 | } else { | 493 | } else { |
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index 7356a56ac458..be0ebce36e54 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c | |||
| @@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info, | |||
| 869 | tmp.close_delay = info->close_delay; | 869 | tmp.close_delay = info->close_delay; |
| 870 | tmp.closing_wait = info->closing_wait; | 870 | tmp.closing_wait = info->closing_wait; |
| 871 | tmp.custom_divisor = info->custom_divisor; | 871 | tmp.custom_divisor = info->custom_divisor; |
| 872 | copy_to_user(retinfo,&tmp,sizeof(*retinfo)); | 872 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) |
| 873 | return -EFAULT; | ||
| 874 | |||
| 873 | return 0; | 875 | return 0; |
| 874 | } | 876 | } |
| 875 | 877 | ||
| @@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info, | |||
| 882 | 884 | ||
| 883 | if (!new_info) | 885 | if (!new_info) |
| 884 | return -EFAULT; | 886 | return -EFAULT; |
| 885 | copy_from_user(&new_serial,new_info,sizeof(new_serial)); | 887 | if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) |
| 888 | return -EFAULT; | ||
| 886 | old_info = *info; | 889 | old_info = *info; |
| 887 | 890 | ||
| 888 | if (!capable(CAP_SYS_ADMIN)) { | 891 | if (!capable(CAP_SYS_ADMIN)) { |
| @@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value) | |||
| 943 | status = 0; | 946 | status = 0; |
| 944 | #endif | 947 | #endif |
| 945 | local_irq_restore(flags); | 948 | local_irq_restore(flags); |
| 946 | put_user(status,value); | 949 | return put_user(status, value); |
| 947 | return 0; | ||
| 948 | } | 950 | } |
| 949 | 951 | ||
| 950 | /* | 952 | /* |
| @@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
| 999 | send_break(info, arg ? arg*(100) : 250); | 1001 | send_break(info, arg ? arg*(100) : 250); |
| 1000 | return 0; | 1002 | return 0; |
| 1001 | case TIOCGSERIAL: | 1003 | case TIOCGSERIAL: |
| 1002 | if (access_ok(VERIFY_WRITE, (void *) arg, | 1004 | return get_serial_info(info, |
| 1003 | sizeof(struct serial_struct))) | 1005 | (struct serial_struct *) arg); |
| 1004 | return get_serial_info(info, | ||
| 1005 | (struct serial_struct *) arg); | ||
| 1006 | return -EFAULT; | ||
| 1007 | case TIOCSSERIAL: | 1006 | case TIOCSSERIAL: |
| 1008 | return set_serial_info(info, | 1007 | return set_serial_info(info, |
| 1009 | (struct serial_struct *) arg); | 1008 | (struct serial_struct *) arg); |
| 1010 | case TIOCSERGETLSR: /* Get line status register */ | 1009 | case TIOCSERGETLSR: /* Get line status register */ |
| 1011 | if (access_ok(VERIFY_WRITE, (void *) arg, | 1010 | return get_lsr_info(info, (unsigned int *) arg); |
| 1012 | sizeof(unsigned int))) | ||
| 1013 | return get_lsr_info(info, (unsigned int *) arg); | ||
| 1014 | return -EFAULT; | ||
| 1015 | case TIOCSERGSTRUCT: | 1011 | case TIOCSERGSTRUCT: |
| 1016 | if (!access_ok(VERIFY_WRITE, (void *) arg, | 1012 | if (copy_to_user((struct m68k_serial *) arg, |
| 1017 | sizeof(struct m68k_serial))) | 1013 | info, sizeof(struct m68k_serial))) |
| 1018 | return -EFAULT; | 1014 | return -EFAULT; |
| 1019 | copy_to_user((struct m68k_serial *) arg, | ||
| 1020 | info, sizeof(struct m68k_serial)); | ||
| 1021 | return 0; | 1015 | return 0; |
| 1022 | |||
| 1023 | default: | 1016 | default: |
| 1024 | return -ENOIOCTLCMD; | 1017 | return -ENOIOCTLCMD; |
| 1025 | } | 1018 | } |
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c index b745792ec25a..eaafb98debed 100644 --- a/drivers/serial/8250_early.c +++ b/drivers/serial/8250_early.c | |||
| @@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device, | |||
| 203 | 203 | ||
| 204 | if (mmio || mmio32) | 204 | if (mmio || mmio32) |
| 205 | printk(KERN_INFO | 205 | printk(KERN_INFO |
| 206 | "Early serial console at MMIO%s 0x%llu (options '%s')\n", | 206 | "Early serial console at MMIO%s 0x%llx (options '%s')\n", |
| 207 | mmio32 ? "32" : "", | 207 | mmio32 ? "32" : "", |
| 208 | (unsigned long long)port->mapbase, | 208 | (unsigned long long)port->mapbase, |
| 209 | device->options); | 209 | device->options); |
| 210 | else | 210 | else |
| 211 | printk(KERN_INFO | 211 | printk(KERN_INFO |
| 212 | "Early serial console at I/O port 0x%lu (options '%s')\n", | 212 | "Early serial console at I/O port 0x%lx (options '%s')\n", |
| 213 | port->iobase, | 213 | port->iobase, |
| 214 | device->options); | 214 | device->options); |
| 215 | 215 | ||
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 4a7a7a7f11b6..335311a98fdc 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig" | |||
| 113 | 113 | ||
| 114 | source "drivers/staging/memrar/Kconfig" | 114 | source "drivers/staging/memrar/Kconfig" |
| 115 | 115 | ||
| 116 | source "drivers/staging/sep/Kconfig" | ||
| 117 | |||
| 118 | source "drivers/staging/iio/Kconfig" | 116 | source "drivers/staging/iio/Kconfig" |
| 119 | 117 | ||
| 120 | source "drivers/staging/zram/Kconfig" | 118 | source "drivers/staging/zram/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ca5c03eb3ce3..e3f1e1b6095e 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
| @@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/ | |||
| 38 | obj-$(CONFIG_HYPERV) += hv/ | 38 | obj-$(CONFIG_HYPERV) += hv/ |
| 39 | obj-$(CONFIG_VME_BUS) += vme/ | 39 | obj-$(CONFIG_VME_BUS) += vme/ |
| 40 | obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ | 40 | obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ |
| 41 | obj-$(CONFIG_DX_SEP) += sep/ | ||
| 42 | obj-$(CONFIG_IIO) += iio/ | 41 | obj-$(CONFIG_IIO) += iio/ |
| 43 | obj-$(CONFIG_ZRAM) += zram/ | 42 | obj-$(CONFIG_ZRAM) += zram/ |
| 44 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ | 43 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ |
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c index b4a8d5eb64fa..05ca15a6c9f8 100644 --- a/drivers/staging/batman-adv/bat_sysfs.c +++ b/drivers/staging/batman-adv/bat_sysfs.c | |||
| @@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr, | |||
| 267 | if (atomic_read(&bat_priv->log_level) == log_level_tmp) | 267 | if (atomic_read(&bat_priv->log_level) == log_level_tmp) |
| 268 | return count; | 268 | return count; |
| 269 | 269 | ||
| 270 | bat_info(net_dev, "Changing log level from: %i to: %li\n", | ||
| 271 | atomic_read(&bat_priv->log_level), | ||
| 272 | log_level_tmp); | ||
| 273 | |||
| 270 | atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp); | 274 | atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp); |
| 271 | return count; | 275 | return count; |
| 272 | } | 276 | } |
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 92c216a56885..baa8b05b9e8d 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c | |||
| @@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if) | |||
| 129 | 129 | ||
| 130 | static void update_mac_addresses(struct batman_if *batman_if) | 130 | static void update_mac_addresses(struct batman_if *batman_if) |
| 131 | { | 131 | { |
| 132 | if (!batman_if || !batman_if->packet_buff) | ||
| 133 | return; | ||
| 134 | |||
| 132 | addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); | 135 | addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); |
| 133 | 136 | ||
| 134 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, | 137 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, |
| @@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev, | |||
| 194 | if (batman_if->if_status != IF_INACTIVE) | 197 | if (batman_if->if_status != IF_INACTIVE) |
| 195 | return; | 198 | return; |
| 196 | 199 | ||
| 197 | dev_hold(batman_if->net_dev); | ||
| 198 | |||
| 199 | update_mac_addresses(batman_if); | 200 | update_mac_addresses(batman_if); |
| 200 | batman_if->if_status = IF_TO_BE_ACTIVATED; | 201 | batman_if->if_status = IF_TO_BE_ACTIVATED; |
| 201 | 202 | ||
| @@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev, | |||
| 222 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 223 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) |
| 223 | return; | 224 | return; |
| 224 | 225 | ||
| 225 | dev_put(batman_if->net_dev); | ||
| 226 | |||
| 227 | batman_if->if_status = IF_INACTIVE; | 226 | batman_if->if_status = IF_INACTIVE; |
| 228 | 227 | ||
| 229 | bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev); | 228 | bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev); |
| @@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |||
| 318 | if (ret != 1) | 317 | if (ret != 1) |
| 319 | goto out; | 318 | goto out; |
| 320 | 319 | ||
| 320 | dev_hold(net_dev); | ||
| 321 | |||
| 321 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); | 322 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); |
| 322 | if (!batman_if) { | 323 | if (!batman_if) { |
| 323 | pr_err("Can't add interface (%s): out of memory\n", | 324 | pr_err("Can't add interface (%s): out of memory\n", |
| 324 | net_dev->name); | 325 | net_dev->name); |
| 325 | goto out; | 326 | goto release_dev; |
| 326 | } | 327 | } |
| 327 | 328 | ||
| 328 | batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); | 329 | batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); |
| @@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |||
| 336 | batman_if->if_num = -1; | 337 | batman_if->if_num = -1; |
| 337 | batman_if->net_dev = net_dev; | 338 | batman_if->net_dev = net_dev; |
| 338 | batman_if->if_status = IF_NOT_IN_USE; | 339 | batman_if->if_status = IF_NOT_IN_USE; |
| 340 | batman_if->packet_buff = NULL; | ||
| 339 | INIT_LIST_HEAD(&batman_if->list); | 341 | INIT_LIST_HEAD(&batman_if->list); |
| 340 | 342 | ||
| 341 | check_known_mac_addr(batman_if->net_dev->dev_addr); | 343 | check_known_mac_addr(batman_if->net_dev->dev_addr); |
| @@ -346,6 +348,8 @@ free_dev: | |||
| 346 | kfree(batman_if->dev); | 348 | kfree(batman_if->dev); |
| 347 | free_if: | 349 | free_if: |
| 348 | kfree(batman_if); | 350 | kfree(batman_if); |
| 351 | release_dev: | ||
| 352 | dev_put(net_dev); | ||
| 349 | out: | 353 | out: |
| 350 | return NULL; | 354 | return NULL; |
| 351 | } | 355 | } |
| @@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if) | |||
| 374 | batman_if->if_status = IF_TO_BE_REMOVED; | 378 | batman_if->if_status = IF_TO_BE_REMOVED; |
| 375 | list_del_rcu(&batman_if->list); | 379 | list_del_rcu(&batman_if->list); |
| 376 | sysfs_del_hardif(&batman_if->hardif_obj); | 380 | sysfs_del_hardif(&batman_if->hardif_obj); |
| 381 | dev_put(batman_if->net_dev); | ||
| 377 | call_rcu(&batman_if->rcu, hardif_free_interface); | 382 | call_rcu(&batman_if->rcu, hardif_free_interface); |
| 378 | } | 383 | } |
| 379 | 384 | ||
| @@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this, | |||
| 393 | /* FIXME: each batman_if will be attached to a softif */ | 398 | /* FIXME: each batman_if will be attached to a softif */ |
| 394 | struct bat_priv *bat_priv = netdev_priv(soft_device); | 399 | struct bat_priv *bat_priv = netdev_priv(soft_device); |
| 395 | 400 | ||
| 396 | if (!batman_if) | 401 | if (!batman_if && event == NETDEV_REGISTER) |
| 397 | batman_if = hardif_add_interface(net_dev); | 402 | batman_if = hardif_add_interface(net_dev); |
| 398 | 403 | ||
| 399 | if (!batman_if) | 404 | if (!batman_if) |
| 400 | goto out; | 405 | goto out; |
| 401 | 406 | ||
| 402 | switch (event) { | 407 | switch (event) { |
| 403 | case NETDEV_REGISTER: | ||
| 404 | break; | ||
| 405 | case NETDEV_UP: | 408 | case NETDEV_UP: |
| 406 | hardif_activate_interface(soft_device, bat_priv, batman_if); | 409 | hardif_activate_interface(soft_device, bat_priv, batman_if); |
| 407 | break; | 410 | break; |
| @@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 442 | struct bat_priv *bat_priv = netdev_priv(soft_device); | 445 | struct bat_priv *bat_priv = netdev_priv(soft_device); |
| 443 | struct batman_packet *batman_packet; | 446 | struct batman_packet *batman_packet; |
| 444 | struct batman_if *batman_if; | 447 | struct batman_if *batman_if; |
| 445 | struct net_device_stats *stats; | ||
| 446 | struct rtnl_link_stats64 temp; | ||
| 447 | int ret; | 448 | int ret; |
| 448 | 449 | ||
| 449 | skb = skb_share_check(skb, GFP_ATOMIC); | 450 | skb = skb_share_check(skb, GFP_ATOMIC); |
| @@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 479 | if (batman_if->if_status != IF_ACTIVE) | 480 | if (batman_if->if_status != IF_ACTIVE) |
| 480 | goto err_free; | 481 | goto err_free; |
| 481 | 482 | ||
| 482 | stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp); | ||
| 483 | if (stats) { | ||
| 484 | stats->rx_packets++; | ||
| 485 | stats->rx_bytes += skb->len; | ||
| 486 | } | ||
| 487 | |||
| 488 | batman_packet = (struct batman_packet *)skb->data; | 483 | batman_packet = (struct batman_packet *)skb->data; |
| 489 | 484 | ||
| 490 | if (batman_packet->version != COMPAT_VERSION) { | 485 | if (batman_packet->version != COMPAT_VERSION) { |
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c index fc3d32c12729..3ae7dd2d2d4d 100644 --- a/drivers/staging/batman-adv/icmp_socket.c +++ b/drivers/staging/batman-adv/icmp_socket.c | |||
| @@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file) | |||
| 67 | INIT_LIST_HEAD(&socket_client->queue_list); | 67 | INIT_LIST_HEAD(&socket_client->queue_list); |
| 68 | socket_client->queue_len = 0; | 68 | socket_client->queue_len = 0; |
| 69 | socket_client->index = i; | 69 | socket_client->index = i; |
| 70 | socket_client->bat_priv = inode->i_private; | ||
| 70 | spin_lock_init(&socket_client->lock); | 71 | spin_lock_init(&socket_client->lock); |
| 71 | init_waitqueue_head(&socket_client->queue_wait); | 72 | init_waitqueue_head(&socket_client->queue_wait); |
| 72 | 73 | ||
| @@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf, | |||
| 151 | static ssize_t bat_socket_write(struct file *file, const char __user *buff, | 152 | static ssize_t bat_socket_write(struct file *file, const char __user *buff, |
| 152 | size_t len, loff_t *off) | 153 | size_t len, loff_t *off) |
| 153 | { | 154 | { |
| 154 | /* FIXME: each orig_node->batman_if will be attached to a softif */ | ||
| 155 | struct bat_priv *bat_priv = netdev_priv(soft_device); | ||
| 156 | struct socket_client *socket_client = file->private_data; | 155 | struct socket_client *socket_client = file->private_data; |
| 156 | struct bat_priv *bat_priv = socket_client->bat_priv; | ||
| 157 | struct icmp_packet_rr icmp_packet; | 157 | struct icmp_packet_rr icmp_packet; |
| 158 | struct orig_node *orig_node; | 158 | struct orig_node *orig_node; |
| 159 | struct batman_if *batman_if; | 159 | struct batman_if *batman_if; |
| @@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, | |||
| 168 | return -EINVAL; | 168 | return -EINVAL; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | if (!bat_priv->primary_if) | ||
| 172 | return -EFAULT; | ||
| 173 | |||
| 171 | if (len >= sizeof(struct icmp_packet_rr)) | 174 | if (len >= sizeof(struct icmp_packet_rr)) |
| 172 | packet_len = sizeof(struct icmp_packet_rr); | 175 | packet_len = sizeof(struct icmp_packet_rr); |
| 173 | 176 | ||
| @@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, | |||
| 223 | if (batman_if->if_status != IF_ACTIVE) | 226 | if (batman_if->if_status != IF_ACTIVE) |
| 224 | goto dst_unreach; | 227 | goto dst_unreach; |
| 225 | 228 | ||
| 226 | memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN); | 229 | memcpy(icmp_packet.orig, |
| 230 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
| 227 | 231 | ||
| 228 | if (packet_len == sizeof(struct icmp_packet_rr)) | 232 | if (packet_len == sizeof(struct icmp_packet_rr)) |
| 229 | memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); | 233 | memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); |
| @@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv) | |||
| 271 | goto err; | 275 | goto err; |
| 272 | 276 | ||
| 273 | d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, | 277 | d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, |
| 274 | bat_priv->debug_dir, NULL, &fops); | 278 | bat_priv->debug_dir, bat_priv, &fops); |
| 275 | if (d) | 279 | if (d) |
| 276 | goto err; | 280 | goto err; |
| 277 | 281 | ||
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c index 2686019fe4e1..ef7c20ae7979 100644 --- a/drivers/staging/batman-adv/main.c +++ b/drivers/staging/batman-adv/main.c | |||
| @@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size) | |||
| 250 | int is_my_mac(uint8_t *addr) | 250 | int is_my_mac(uint8_t *addr) |
| 251 | { | 251 | { |
| 252 | struct batman_if *batman_if; | 252 | struct batman_if *batman_if; |
| 253 | |||
| 253 | rcu_read_lock(); | 254 | rcu_read_lock(); |
| 254 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 255 | list_for_each_entry_rcu(batman_if, &if_list, list) { |
| 255 | if ((batman_if->net_dev) && | 256 | if (batman_if->if_status != IF_ACTIVE) |
| 256 | (compare_orig(batman_if->net_dev->dev_addr, addr))) { | 257 | continue; |
| 258 | |||
| 259 | if (compare_orig(batman_if->net_dev->dev_addr, addr)) { | ||
| 257 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
| 258 | return 1; | 261 | return 1; |
| 259 | } | 262 | } |
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c index 28bb627ffa13..de5a8c1a8104 100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c | |||
| @@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) | |||
| 391 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) | 391 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) |
| 392 | { | 392 | { |
| 393 | struct orig_node *orig_node; | 393 | struct orig_node *orig_node; |
| 394 | unsigned long flags; | ||
| 394 | HASHIT(hashit); | 395 | HASHIT(hashit); |
| 395 | 396 | ||
| 396 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on | 397 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
| 397 | * if_num */ | 398 | * if_num */ |
| 398 | spin_lock(&orig_hash_lock); | 399 | spin_lock_irqsave(&orig_hash_lock, flags); |
| 399 | 400 | ||
| 400 | while (hash_iterate(orig_hash, &hashit)) { | 401 | while (hash_iterate(orig_hash, &hashit)) { |
| 401 | orig_node = hashit.bucket->data; | 402 | orig_node = hashit.bucket->data; |
| @@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) | |||
| 404 | goto err; | 405 | goto err; |
| 405 | } | 406 | } |
| 406 | 407 | ||
| 407 | spin_unlock(&orig_hash_lock); | 408 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
| 408 | return 0; | 409 | return 0; |
| 409 | 410 | ||
| 410 | err: | 411 | err: |
| 411 | spin_unlock(&orig_hash_lock); | 412 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
| 412 | return -ENOMEM; | 413 | return -ENOMEM; |
| 413 | } | 414 | } |
| 414 | 415 | ||
| @@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | |||
| 468 | { | 469 | { |
| 469 | struct batman_if *batman_if_tmp; | 470 | struct batman_if *batman_if_tmp; |
| 470 | struct orig_node *orig_node; | 471 | struct orig_node *orig_node; |
| 472 | unsigned long flags; | ||
| 471 | HASHIT(hashit); | 473 | HASHIT(hashit); |
| 472 | int ret; | 474 | int ret; |
| 473 | 475 | ||
| 474 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on | 476 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
| 475 | * if_num */ | 477 | * if_num */ |
| 476 | spin_lock(&orig_hash_lock); | 478 | spin_lock_irqsave(&orig_hash_lock, flags); |
| 477 | 479 | ||
| 478 | while (hash_iterate(orig_hash, &hashit)) { | 480 | while (hash_iterate(orig_hash, &hashit)) { |
| 479 | orig_node = hashit.bucket->data; | 481 | orig_node = hashit.bucket->data; |
| @@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | |||
| 500 | rcu_read_unlock(); | 502 | rcu_read_unlock(); |
| 501 | 503 | ||
| 502 | batman_if->if_num = -1; | 504 | batman_if->if_num = -1; |
| 503 | spin_unlock(&orig_hash_lock); | 505 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
| 504 | return 0; | 506 | return 0; |
| 505 | 507 | ||
| 506 | err: | 508 | err: |
| 507 | spin_unlock(&orig_hash_lock); | 509 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
| 508 | return -ENOMEM; | 510 | return -ENOMEM; |
| 509 | } | 511 | } |
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c index 066cc9149bf1..032195e6de94 100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c | |||
| @@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb, | |||
| 783 | 783 | ||
| 784 | static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) | 784 | static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) |
| 785 | { | 785 | { |
| 786 | /* FIXME: each batman_if will be attached to a softif */ | ||
| 787 | struct bat_priv *bat_priv = netdev_priv(soft_device); | ||
| 786 | struct orig_node *orig_node; | 788 | struct orig_node *orig_node; |
| 787 | struct icmp_packet_rr *icmp_packet; | 789 | struct icmp_packet_rr *icmp_packet; |
| 788 | struct ethhdr *ethhdr; | 790 | struct ethhdr *ethhdr; |
| @@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) | |||
| 801 | return NET_RX_DROP; | 803 | return NET_RX_DROP; |
| 802 | } | 804 | } |
| 803 | 805 | ||
| 806 | if (!bat_priv->primary_if) | ||
| 807 | return NET_RX_DROP; | ||
| 808 | |||
| 804 | /* answer echo request (ping) */ | 809 | /* answer echo request (ping) */ |
| 805 | /* get routing information */ | 810 | /* get routing information */ |
| 806 | spin_lock_irqsave(&orig_hash_lock, flags); | 811 | spin_lock_irqsave(&orig_hash_lock, flags); |
| @@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) | |||
| 830 | } | 835 | } |
| 831 | 836 | ||
| 832 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 837 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); |
| 833 | memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); | 838 | memcpy(icmp_packet->orig, |
| 839 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
| 834 | icmp_packet->msg_type = ECHO_REPLY; | 840 | icmp_packet->msg_type = ECHO_REPLY; |
| 835 | icmp_packet->ttl = TTL; | 841 | icmp_packet->ttl = TTL; |
| 836 | 842 | ||
| @@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) | |||
| 845 | 851 | ||
| 846 | static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) | 852 | static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) |
| 847 | { | 853 | { |
| 854 | /* FIXME: each batman_if will be attached to a softif */ | ||
| 855 | struct bat_priv *bat_priv = netdev_priv(soft_device); | ||
| 848 | struct orig_node *orig_node; | 856 | struct orig_node *orig_node; |
| 849 | struct icmp_packet *icmp_packet; | 857 | struct icmp_packet *icmp_packet; |
| 850 | struct ethhdr *ethhdr; | 858 | struct ethhdr *ethhdr; |
| @@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) | |||
| 865 | return NET_RX_DROP; | 873 | return NET_RX_DROP; |
| 866 | } | 874 | } |
| 867 | 875 | ||
| 876 | if (!bat_priv->primary_if) | ||
| 877 | return NET_RX_DROP; | ||
| 878 | |||
| 868 | /* get routing information */ | 879 | /* get routing information */ |
| 869 | spin_lock_irqsave(&orig_hash_lock, flags); | 880 | spin_lock_irqsave(&orig_hash_lock, flags); |
| 870 | orig_node = ((struct orig_node *) | 881 | orig_node = ((struct orig_node *) |
| @@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) | |||
| 892 | } | 903 | } |
| 893 | 904 | ||
| 894 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 905 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); |
| 895 | memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); | 906 | memcpy(icmp_packet->orig, |
| 907 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
| 896 | icmp_packet->msg_type = TTL_EXCEEDED; | 908 | icmp_packet->msg_type = TTL_EXCEEDED; |
| 897 | icmp_packet->ttl = TTL; | 909 | icmp_packet->ttl = TTL; |
| 898 | 910 | ||
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h index 21d0717afb09..9aa9d369c752 100644 --- a/drivers/staging/batman-adv/types.h +++ b/drivers/staging/batman-adv/types.h | |||
| @@ -126,6 +126,7 @@ struct socket_client { | |||
| 126 | unsigned char index; | 126 | unsigned char index; |
| 127 | spinlock_t lock; | 127 | spinlock_t lock; |
| 128 | wait_queue_head_t queue_wait; | 128 | wait_queue_head_t queue_wait; |
| 129 | struct bat_priv *bat_priv; | ||
| 129 | }; | 130 | }; |
| 130 | 131 | ||
| 131 | struct socket_packet { | 132 | struct socket_packet { |
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig deleted file mode 100644 index 0a9c39c7f2bd..000000000000 --- a/drivers/staging/sep/Kconfig +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | config DX_SEP | ||
| 2 | tristate "Discretix SEP driver" | ||
| 3 | # depends on MRST | ||
| 4 | depends on RAR_REGISTER && PCI | ||
| 5 | default y | ||
| 6 | help | ||
| 7 | Discretix SEP driver | ||
| 8 | |||
| 9 | If unsure say M. The compiled module will be | ||
| 10 | called sep_driver.ko | ||
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile deleted file mode 100644 index 628d5f919414..000000000000 --- a/drivers/staging/sep/Makefile +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | obj-$(CONFIG_DX_SEP) := sep_driver.o | ||
| 2 | |||
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO deleted file mode 100644 index ff0e931dab64..000000000000 --- a/drivers/staging/sep/TODO +++ /dev/null | |||
| @@ -1,8 +0,0 @@ | |||
| 1 | Todo's so far (from Alan Cox) | ||
| 2 | - Fix firmware loading | ||
| 3 | - Get firmware into firmware git tree | ||
| 4 | - Review and tidy each algorithm function | ||
| 5 | - Check whether it can be plugged into any of the kernel crypto API | ||
| 6 | interfaces | ||
| 7 | - Do something about the magic shared memory interface and replace it | ||
| 8 | with something saner (in Linux terms) | ||
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h deleted file mode 100644 index 9200524bb64d..000000000000 --- a/drivers/staging/sep/sep_dev.h +++ /dev/null | |||
| @@ -1,110 +0,0 @@ | |||
| 1 | #ifndef __SEP_DEV_H__ | ||
| 2 | #define __SEP_DEV_H__ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * | ||
| 6 | * sep_dev.h - Security Processor Device Structures | ||
| 7 | * | ||
| 8 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 9 | * Copyright(c) 2009 Discretix. All rights reserved. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify it | ||
| 12 | * under the terms of the GNU General Public License as published by the Free | ||
| 13 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 14 | * any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 19 | * more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License along with | ||
| 22 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 23 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 24 | * | ||
| 25 | * CONTACTS: | ||
| 26 | * | ||
| 27 | * Alan Cox alan@linux.intel.com | ||
| 28 | * | ||
| 29 | */ | ||
| 30 | |||
| 31 | struct sep_device { | ||
| 32 | /* pointer to pci dev */ | ||
| 33 | struct pci_dev *pdev; | ||
| 34 | |||
| 35 | unsigned long in_use; | ||
| 36 | |||
| 37 | /* address of the shared memory allocated during init for SEP driver | ||
| 38 | (coherent alloc) */ | ||
| 39 | void *shared_addr; | ||
| 40 | /* the physical address of the shared area */ | ||
| 41 | dma_addr_t shared_bus; | ||
| 42 | |||
| 43 | /* restricted access region (coherent alloc) */ | ||
| 44 | dma_addr_t rar_bus; | ||
| 45 | void *rar_addr; | ||
| 46 | /* firmware regions: cache is at rar_addr */ | ||
| 47 | unsigned long cache_size; | ||
| 48 | |||
| 49 | /* follows the cache */ | ||
| 50 | dma_addr_t resident_bus; | ||
| 51 | unsigned long resident_size; | ||
| 52 | void *resident_addr; | ||
| 53 | |||
| 54 | /* start address of the access to the SEP registers from driver */ | ||
| 55 | void __iomem *reg_addr; | ||
| 56 | /* transaction counter that coordinates the transactions between SEP and HOST */ | ||
| 57 | unsigned long send_ct; | ||
| 58 | /* counter for the messages from sep */ | ||
| 59 | unsigned long reply_ct; | ||
| 60 | /* counter for the number of bytes allocated in the pool for the current | ||
| 61 | transaction */ | ||
| 62 | unsigned long data_pool_bytes_allocated; | ||
| 63 | |||
| 64 | /* array of pointers to the pages that represent input data for the synchronic | ||
| 65 | DMA action */ | ||
| 66 | struct page **in_page_array; | ||
| 67 | |||
| 68 | /* array of pointers to the pages that represent out data for the synchronic | ||
| 69 | DMA action */ | ||
| 70 | struct page **out_page_array; | ||
| 71 | |||
| 72 | /* number of pages in the sep_in_page_array */ | ||
| 73 | unsigned long in_num_pages; | ||
| 74 | |||
| 75 | /* number of pages in the sep_out_page_array */ | ||
| 76 | unsigned long out_num_pages; | ||
| 77 | |||
| 78 | /* global data for every flow */ | ||
| 79 | struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS]; | ||
| 80 | |||
| 81 | /* pointer to the workqueue that handles the flow done interrupts */ | ||
| 82 | struct workqueue_struct *flow_wq; | ||
| 83 | |||
| 84 | }; | ||
| 85 | |||
| 86 | static struct sep_device *sep_dev; | ||
| 87 | |||
| 88 | static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value) | ||
| 89 | { | ||
| 90 | void __iomem *addr = dev->reg_addr + reg; | ||
| 91 | writel(value, addr); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline u32 sep_read_reg(struct sep_device *dev, int reg) | ||
| 95 | { | ||
| 96 | void __iomem *addr = dev->reg_addr + reg; | ||
| 97 | return readl(addr); | ||
| 98 | } | ||
| 99 | |||
| 100 | /* wait for SRAM write complete(indirect write */ | ||
| 101 | static inline void sep_wait_sram_write(struct sep_device *dev) | ||
| 102 | { | ||
| 103 | u32 reg_val; | ||
| 104 | do | ||
| 105 | reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR); | ||
| 106 | while (!(reg_val & 1)); | ||
| 107 | } | ||
| 108 | |||
| 109 | |||
| 110 | #endif | ||
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c deleted file mode 100644 index ecbde3467b1b..000000000000 --- a/drivers/staging/sep/sep_driver.c +++ /dev/null | |||
| @@ -1,2742 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * sep_driver.c - Security Processor Driver main group of functions | ||
| 4 | * | ||
| 5 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 6 | * Copyright(c) 2009 Discretix. All rights reserved. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * CONTACTS: | ||
| 23 | * | ||
| 24 | * Mark Allyn mark.a.allyn@intel.com | ||
| 25 | * | ||
| 26 | * CHANGES: | ||
| 27 | * | ||
| 28 | * 2009.06.26 Initial publish | ||
| 29 | * | ||
| 30 | */ | ||
| 31 | |||
| 32 | #include <linux/init.h> | ||
| 33 | #include <linux/module.h> | ||
| 34 | #include <linux/fs.h> | ||
| 35 | #include <linux/cdev.h> | ||
| 36 | #include <linux/kdev_t.h> | ||
| 37 | #include <linux/mutex.h> | ||
| 38 | #include <linux/sched.h> | ||
| 39 | #include <linux/mm.h> | ||
| 40 | #include <linux/poll.h> | ||
| 41 | #include <linux/wait.h> | ||
| 42 | #include <linux/pci.h> | ||
| 43 | #include <linux/firmware.h> | ||
| 44 | #include <linux/slab.h> | ||
| 45 | #include <asm/ioctl.h> | ||
| 46 | #include <linux/ioport.h> | ||
| 47 | #include <asm/io.h> | ||
| 48 | #include <linux/interrupt.h> | ||
| 49 | #include <linux/pagemap.h> | ||
| 50 | #include <asm/cacheflush.h> | ||
| 51 | #include "sep_driver_hw_defs.h" | ||
| 52 | #include "sep_driver_config.h" | ||
| 53 | #include "sep_driver_api.h" | ||
| 54 | #include "sep_dev.h" | ||
| 55 | |||
| 56 | #if SEP_DRIVER_ARM_DEBUG_MODE | ||
| 57 | |||
| 58 | #define CRYS_SEP_ROM_length 0x4000 | ||
| 59 | #define CRYS_SEP_ROM_start_address 0x8000C000UL | ||
| 60 | #define CRYS_SEP_ROM_start_address_offset 0xC000UL | ||
| 61 | #define SEP_ROM_BANK_register 0x80008420UL | ||
| 62 | #define SEP_ROM_BANK_register_offset 0x8420UL | ||
| 63 | #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000 | ||
| 64 | |||
| 65 | /* | ||
| 66 | * THESE 2 definitions are specific to the board - must be | ||
| 67 | * defined during integration | ||
| 68 | */ | ||
| 69 | #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000 | ||
| 70 | |||
| 71 | /* 2M size */ | ||
| 72 | |||
| 73 | static void sep_load_rom_code(struct sep_device *sep) | ||
| 74 | { | ||
| 75 | /* Index variables */ | ||
| 76 | unsigned long i, k, j; | ||
| 77 | u32 reg; | ||
| 78 | u32 error; | ||
| 79 | u32 warning; | ||
| 80 | |||
| 81 | /* Loading ROM from SEP_ROM_image.h file */ | ||
| 82 | k = sizeof(CRYS_SEP_ROM); | ||
| 83 | |||
| 84 | edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n"); | ||
| 85 | |||
| 86 | edbg("SEP Driver: k is %lu\n", k); | ||
| 87 | edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr); | ||
| 88 | edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset); | ||
| 89 | |||
| 90 | for (i = 0; i < 4; i++) { | ||
| 91 | /* write bank */ | ||
| 92 | sep_write_reg(sep, SEP_ROM_BANK_register_offset, i); | ||
| 93 | |||
| 94 | for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) { | ||
| 95 | sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]); | ||
| 96 | |||
| 97 | k = k - 4; | ||
| 98 | |||
| 99 | if (k == 0) { | ||
| 100 | j = CRYS_SEP_ROM_length; | ||
| 101 | i = 4; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | /* reset the SEP */ | ||
| 107 | sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1); | ||
| 108 | |||
| 109 | /* poll for SEP ROM boot finish */ | ||
| 110 | do | ||
| 111 | reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); | ||
| 112 | while (!reg); | ||
| 113 | |||
| 114 | edbg("SEP Driver: ROM polling ended\n"); | ||
| 115 | |||
| 116 | switch (reg) { | ||
| 117 | case 0x1: | ||
| 118 | /* fatal error - read erro status from GPRO */ | ||
| 119 | error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); | ||
| 120 | edbg("SEP Driver: ROM polling case 1\n"); | ||
| 121 | break; | ||
| 122 | case 0x4: | ||
| 123 | /* Cold boot ended successfully */ | ||
| 124 | case 0x8: | ||
| 125 | /* Warmboot ended successfully */ | ||
| 126 | case 0x10: | ||
| 127 | /* ColdWarm boot ended successfully */ | ||
| 128 | error = 0; | ||
| 129 | case 0x2: | ||
| 130 | /* Boot First Phase ended */ | ||
| 131 | warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); | ||
| 132 | case 0x20: | ||
| 133 | edbg("SEP Driver: ROM polling case %d\n", reg); | ||
| 134 | break; | ||
| 135 | } | ||
| 136 | |||
| 137 | } | ||
| 138 | |||
| 139 | #else | ||
| 140 | static void sep_load_rom_code(struct sep_device *sep) { } | ||
| 141 | #endif /* SEP_DRIVER_ARM_DEBUG_MODE */ | ||
| 142 | |||
| 143 | |||
| 144 | |||
| 145 | /*---------------------------------------- | ||
| 146 | DEFINES | ||
| 147 | -----------------------------------------*/ | ||
| 148 | |||
| 149 | #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000 | ||
| 150 | #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000 | ||
| 151 | |||
| 152 | /*-------------------------------------------- | ||
| 153 | GLOBAL variables | ||
| 154 | --------------------------------------------*/ | ||
| 155 | |||
| 156 | /* debug messages level */ | ||
| 157 | static int debug; | ||
| 158 | module_param(debug, int , 0); | ||
| 159 | MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages"); | ||
| 160 | |||
| 161 | /* Keep this a single static object for now to keep the conversion easy */ | ||
| 162 | |||
| 163 | static struct sep_device sep_instance; | ||
| 164 | static struct sep_device *sep_dev = &sep_instance; | ||
| 165 | |||
| 166 | /* | ||
| 167 | mutex for the access to the internals of the sep driver | ||
| 168 | */ | ||
| 169 | static DEFINE_MUTEX(sep_mutex); | ||
| 170 | |||
| 171 | |||
| 172 | /* wait queue head (event) of the driver */ | ||
| 173 | static DECLARE_WAIT_QUEUE_HEAD(sep_event); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * sep_load_firmware - copy firmware cache/resident | ||
| 177 | * @sep: device we are loading | ||
| 178 | * | ||
| 179 | * This functions copies the cache and resident from their source | ||
| 180 | * location into destination shared memory. | ||
| 181 | */ | ||
| 182 | |||
| 183 | static int sep_load_firmware(struct sep_device *sep) | ||
| 184 | { | ||
| 185 | const struct firmware *fw; | ||
| 186 | char *cache_name = "sep/cache.image.bin"; | ||
| 187 | char *res_name = "sep/resident.image.bin"; | ||
| 188 | int error; | ||
| 189 | |||
| 190 | edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr); | ||
| 191 | edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus); | ||
| 192 | |||
| 193 | /* load cache */ | ||
| 194 | error = request_firmware(&fw, cache_name, &sep->pdev->dev); | ||
| 195 | if (error) { | ||
| 196 | edbg("SEP Driver:cant request cache fw\n"); | ||
| 197 | return error; | ||
| 198 | } | ||
| 199 | edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data); | ||
| 200 | |||
| 201 | memcpy(sep->rar_addr, (void *)fw->data, fw->size); | ||
| 202 | sep->cache_size = fw->size; | ||
| 203 | release_firmware(fw); | ||
| 204 | |||
| 205 | sep->resident_bus = sep->rar_bus + sep->cache_size; | ||
| 206 | sep->resident_addr = sep->rar_addr + sep->cache_size; | ||
| 207 | |||
| 208 | /* load resident */ | ||
| 209 | error = request_firmware(&fw, res_name, &sep->pdev->dev); | ||
| 210 | if (error) { | ||
| 211 | edbg("SEP Driver:cant request res fw\n"); | ||
| 212 | return error; | ||
| 213 | } | ||
| 214 | edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data); | ||
| 215 | |||
| 216 | memcpy(sep->resident_addr, (void *) fw->data, fw->size); | ||
| 217 | sep->resident_size = fw->size; | ||
| 218 | release_firmware(fw); | ||
| 219 | |||
| 220 | edbg("sep: resident v %p b %08llx cache v %p b %08llx\n", | ||
| 221 | sep->resident_addr, (unsigned long long)sep->resident_bus, | ||
| 222 | sep->rar_addr, (unsigned long long)sep->rar_bus); | ||
| 223 | return 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | MODULE_FIRMWARE("sep/cache.image.bin"); | ||
| 227 | MODULE_FIRMWARE("sep/resident.image.bin"); | ||
| 228 | |||
| 229 | /** | ||
| 230 | * sep_map_and_alloc_shared_area - allocate shared block | ||
| 231 | * @sep: security processor | ||
| 232 | * @size: size of shared area | ||
| 233 | * | ||
| 234 | * Allocate a shared buffer in host memory that can be used by both the | ||
| 235 | * kernel and also the hardware interface via DMA. | ||
| 236 | */ | ||
| 237 | |||
| 238 | static int sep_map_and_alloc_shared_area(struct sep_device *sep, | ||
| 239 | unsigned long size) | ||
| 240 | { | ||
| 241 | /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */ | ||
| 242 | sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size, | ||
| 243 | &sep->shared_bus, GFP_KERNEL); | ||
| 244 | |||
| 245 | if (!sep->shared_addr) { | ||
| 246 | edbg("sep_driver :shared memory dma_alloc_coherent failed\n"); | ||
| 247 | return -ENOMEM; | ||
| 248 | } | ||
| 249 | /* set the bus address of the shared area */ | ||
| 250 | edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n", | ||
| 251 | size, sep->shared_addr, (unsigned long long)sep->shared_bus); | ||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | /** | ||
| 256 | * sep_unmap_and_free_shared_area - free shared block | ||
| 257 | * @sep: security processor | ||
| 258 | * | ||
| 259 | * Free the shared area allocated to the security processor. The | ||
| 260 | * processor must have finished with this and any final posted | ||
| 261 | * writes cleared before we do so. | ||
| 262 | */ | ||
| 263 | static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size) | ||
| 264 | { | ||
| 265 | dma_free_coherent(&sep->pdev->dev, size, | ||
| 266 | sep->shared_addr, sep->shared_bus); | ||
| 267 | } | ||
| 268 | |||
| 269 | /** | ||
| 270 | * sep_shared_virt_to_bus - convert bus/virt addresses | ||
| 271 | * | ||
| 272 | * Returns the bus address inside the shared area according | ||
| 273 | * to the virtual address. | ||
| 274 | */ | ||
| 275 | |||
| 276 | static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep, | ||
| 277 | void *virt_address) | ||
| 278 | { | ||
| 279 | dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr); | ||
| 280 | edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa, | ||
| 281 | virt_address); | ||
| 282 | return pa; | ||
| 283 | } | ||
| 284 | |||
| 285 | /** | ||
| 286 | * sep_shared_bus_to_virt - convert bus/virt addresses | ||
| 287 | * | ||
| 288 | * Returns virtual address inside the shared area according | ||
| 289 | * to the bus address. | ||
| 290 | */ | ||
| 291 | |||
| 292 | static void *sep_shared_bus_to_virt(struct sep_device *sep, | ||
| 293 | dma_addr_t bus_address) | ||
| 294 | { | ||
| 295 | return sep->shared_addr + (bus_address - sep->shared_bus); | ||
| 296 | } | ||
| 297 | |||
| 298 | |||
| 299 | /** | ||
| 300 | * sep_try_open - attempt to open a SEP device | ||
| 301 | * @sep: device to attempt to open | ||
| 302 | * | ||
| 303 | * Atomically attempt to get ownership of a SEP device. | ||
| 304 | * Returns 1 if the device was opened, 0 on failure. | ||
| 305 | */ | ||
| 306 | |||
| 307 | static int sep_try_open(struct sep_device *sep) | ||
| 308 | { | ||
| 309 | if (!test_and_set_bit(0, &sep->in_use)) | ||
| 310 | return 1; | ||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | |||
| 314 | /** | ||
| 315 | * sep_open - device open method | ||
| 316 | * @inode: inode of sep device | ||
| 317 | * @filp: file handle to sep device | ||
| 318 | * | ||
| 319 | * Open method for the SEP device. Called when userspace opens | ||
| 320 | * the SEP device node. Must also release the memory data pool | ||
| 321 | * allocations. | ||
| 322 | * | ||
| 323 | * Returns zero on success otherwise an error code. | ||
| 324 | */ | ||
| 325 | |||
| 326 | static int sep_open(struct inode *inode, struct file *filp) | ||
| 327 | { | ||
| 328 | if (sep_dev == NULL) | ||
| 329 | return -ENODEV; | ||
| 330 | |||
| 331 | /* check the blocking mode */ | ||
| 332 | if (filp->f_flags & O_NDELAY) { | ||
| 333 | if (sep_try_open(sep_dev) == 0) | ||
| 334 | return -EAGAIN; | ||
| 335 | } else | ||
| 336 | if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0) | ||
| 337 | return -EINTR; | ||
| 338 | |||
| 339 | /* Bind to the device, we only have one which makes it easy */ | ||
| 340 | filp->private_data = sep_dev; | ||
| 341 | /* release data pool allocations */ | ||
| 342 | sep_dev->data_pool_bytes_allocated = 0; | ||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 346 | |||
| 347 | /** | ||
| 348 | * sep_release - close a SEP device | ||
| 349 | * @inode: inode of SEP device | ||
| 350 | * @filp: file handle being closed | ||
| 351 | * | ||
| 352 | * Called on the final close of a SEP device. As the open protects against | ||
| 353 | * multiple simultaenous opens that means this method is called when the | ||
| 354 | * final reference to the open handle is dropped. | ||
| 355 | */ | ||
| 356 | |||
| 357 | static int sep_release(struct inode *inode, struct file *filp) | ||
| 358 | { | ||
| 359 | struct sep_device *sep = filp->private_data; | ||
| 360 | #if 0 /*!SEP_DRIVER_POLLING_MODE */ | ||
| 361 | /* close IMR */ | ||
| 362 | sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF); | ||
| 363 | /* release IRQ line */ | ||
| 364 | free_irq(SEP_DIRVER_IRQ_NUM, sep); | ||
| 365 | |||
| 366 | #endif | ||
| 367 | /* Ensure any blocked open progresses */ | ||
| 368 | clear_bit(0, &sep->in_use); | ||
| 369 | wake_up(&sep_event); | ||
| 370 | return 0; | ||
| 371 | } | ||
| 372 | |||
| 373 | /*--------------------------------------------------------------- | ||
| 374 | map function - this functions maps the message shared area | ||
| 375 | -----------------------------------------------------------------*/ | ||
| 376 | static int sep_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 377 | { | ||
| 378 | dma_addr_t bus_addr; | ||
| 379 | struct sep_device *sep = filp->private_data; | ||
| 380 | |||
| 381 | dbg("-------->SEP Driver: mmap start\n"); | ||
| 382 | |||
| 383 | /* check that the size of the mapped range is as the size of the message | ||
| 384 | shared area */ | ||
| 385 | if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { | ||
| 386 | edbg("SEP Driver mmap requested size is more than allowed\n"); | ||
| 387 | printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n"); | ||
| 388 | printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end); | ||
| 389 | printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start); | ||
| 390 | return -EAGAIN; | ||
| 391 | } | ||
| 392 | |||
| 393 | edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr); | ||
| 394 | |||
| 395 | /* get bus address */ | ||
| 396 | bus_addr = sep->shared_bus; | ||
| 397 | |||
| 398 | edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr); | ||
| 399 | |||
| 400 | if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { | ||
| 401 | edbg("SEP Driver remap_page_range failed\n"); | ||
| 402 | printk(KERN_WARNING "SEP Driver remap_page_range failed\n"); | ||
| 403 | return -EAGAIN; | ||
| 404 | } | ||
| 405 | |||
| 406 | dbg("SEP Driver:<-------- mmap end\n"); | ||
| 407 | |||
| 408 | return 0; | ||
| 409 | } | ||
| 410 | |||
| 411 | |||
| 412 | /*----------------------------------------------- | ||
| 413 | poll function | ||
| 414 | *----------------------------------------------*/ | ||
| 415 | static unsigned int sep_poll(struct file *filp, poll_table * wait) | ||
| 416 | { | ||
| 417 | unsigned long count; | ||
| 418 | unsigned int mask = 0; | ||
| 419 | unsigned long retval = 0; /* flow id */ | ||
| 420 | struct sep_device *sep = filp->private_data; | ||
| 421 | |||
| 422 | dbg("---------->SEP Driver poll: start\n"); | ||
| 423 | |||
| 424 | |||
| 425 | #if SEP_DRIVER_POLLING_MODE | ||
| 426 | |||
| 427 | while (sep->send_ct != (retval & 0x7FFFFFFF)) { | ||
| 428 | retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
| 429 | |||
| 430 | for (count = 0; count < 10 * 4; count += 4) | ||
| 431 | edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count))); | ||
| 432 | } | ||
| 433 | |||
| 434 | sep->reply_ct++; | ||
| 435 | #else | ||
| 436 | /* add the event to the polling wait table */ | ||
| 437 | poll_wait(filp, &sep_event, wait); | ||
| 438 | |||
| 439 | #endif | ||
| 440 | |||
| 441 | edbg("sep->send_ct is %lu\n", sep->send_ct); | ||
| 442 | edbg("sep->reply_ct is %lu\n", sep->reply_ct); | ||
| 443 | |||
| 444 | /* check if the data is ready */ | ||
| 445 | if (sep->send_ct == sep->reply_ct) { | ||
| 446 | for (count = 0; count < 12 * 4; count += 4) | ||
| 447 | edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count))); | ||
| 448 | |||
| 449 | for (count = 0; count < 10 * 4; count += 4) | ||
| 450 | edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count))); | ||
| 451 | |||
| 452 | retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
| 453 | edbg("retval is %lu\n", retval); | ||
| 454 | /* check if the this is sep reply or request */ | ||
| 455 | if (retval >> 31) { | ||
| 456 | edbg("SEP Driver: sep request in\n"); | ||
| 457 | /* request */ | ||
| 458 | mask |= POLLOUT | POLLWRNORM; | ||
| 459 | } else { | ||
| 460 | edbg("SEP Driver: sep reply in\n"); | ||
| 461 | mask |= POLLIN | POLLRDNORM; | ||
| 462 | } | ||
| 463 | } | ||
| 464 | dbg("SEP Driver:<-------- poll exit\n"); | ||
| 465 | return mask; | ||
| 466 | } | ||
| 467 | |||
| 468 | /** | ||
| 469 | * sep_time_address - address in SEP memory of time | ||
| 470 | * @sep: SEP device we want the address from | ||
| 471 | * | ||
| 472 | * Return the address of the two dwords in memory used for time | ||
| 473 | * setting. | ||
| 474 | */ | ||
| 475 | |||
| 476 | static u32 *sep_time_address(struct sep_device *sep) | ||
| 477 | { | ||
| 478 | return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; | ||
| 479 | } | ||
| 480 | |||
| 481 | /** | ||
| 482 | * sep_set_time - set the SEP time | ||
| 483 | * @sep: the SEP we are setting the time for | ||
| 484 | * | ||
| 485 | * Calculates time and sets it at the predefined address. | ||
| 486 | * Called with the sep mutex held. | ||
| 487 | */ | ||
| 488 | static unsigned long sep_set_time(struct sep_device *sep) | ||
| 489 | { | ||
| 490 | struct timeval time; | ||
| 491 | u32 *time_addr; /* address of time as seen by the kernel */ | ||
| 492 | |||
| 493 | |||
| 494 | dbg("sep:sep_set_time start\n"); | ||
| 495 | |||
| 496 | do_gettimeofday(&time); | ||
| 497 | |||
| 498 | /* set value in the SYSTEM MEMORY offset */ | ||
| 499 | time_addr = sep_time_address(sep); | ||
| 500 | |||
| 501 | time_addr[0] = SEP_TIME_VAL_TOKEN; | ||
| 502 | time_addr[1] = time.tv_sec; | ||
| 503 | |||
| 504 | edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec); | ||
| 505 | edbg("SEP Driver:time_addr is %p\n", time_addr); | ||
| 506 | edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr); | ||
| 507 | |||
| 508 | return time.tv_sec; | ||
| 509 | } | ||
| 510 | |||
| 511 | /** | ||
| 512 | * sep_dump_message - dump the message that is pending | ||
| 513 | * @sep: sep device | ||
| 514 | * | ||
| 515 | * Dump out the message pending in the shared message area | ||
| 516 | */ | ||
| 517 | |||
| 518 | static void sep_dump_message(struct sep_device *sep) | ||
| 519 | { | ||
| 520 | int count; | ||
| 521 | for (count = 0; count < 12 * 4; count += 4) | ||
| 522 | edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count))); | ||
| 523 | } | ||
| 524 | |||
| 525 | /** | ||
| 526 | * sep_send_command_handler - kick off a command | ||
| 527 | * @sep: sep being signalled | ||
| 528 | * | ||
| 529 | * This function raises interrupt to SEP that signals that is has a new | ||
| 530 | * command from the host | ||
| 531 | */ | ||
| 532 | |||
| 533 | static void sep_send_command_handler(struct sep_device *sep) | ||
| 534 | { | ||
| 535 | dbg("sep:sep_send_command_handler start\n"); | ||
| 536 | |||
| 537 | mutex_lock(&sep_mutex); | ||
| 538 | sep_set_time(sep); | ||
| 539 | |||
| 540 | /* FIXME: flush cache */ | ||
| 541 | flush_cache_all(); | ||
| 542 | |||
| 543 | sep_dump_message(sep); | ||
| 544 | /* update counter */ | ||
| 545 | sep->send_ct++; | ||
| 546 | /* send interrupt to SEP */ | ||
| 547 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); | ||
| 548 | dbg("SEP Driver:<-------- sep_send_command_handler end\n"); | ||
| 549 | mutex_unlock(&sep_mutex); | ||
| 550 | return; | ||
| 551 | } | ||
| 552 | |||
| 553 | /** | ||
| 554 | * sep_send_reply_command_handler - kick off a command reply | ||
| 555 | * @sep: sep being signalled | ||
| 556 | * | ||
| 557 | * This function raises interrupt to SEP that signals that is has a new | ||
| 558 | * command from the host | ||
| 559 | */ | ||
| 560 | |||
| 561 | static void sep_send_reply_command_handler(struct sep_device *sep) | ||
| 562 | { | ||
| 563 | dbg("sep:sep_send_reply_command_handler start\n"); | ||
| 564 | |||
| 565 | /* flash cache */ | ||
| 566 | flush_cache_all(); | ||
| 567 | |||
| 568 | sep_dump_message(sep); | ||
| 569 | |||
| 570 | mutex_lock(&sep_mutex); | ||
| 571 | sep->send_ct++; /* update counter */ | ||
| 572 | /* send the interrupt to SEP */ | ||
| 573 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct); | ||
| 574 | /* update both counters */ | ||
| 575 | sep->send_ct++; | ||
| 576 | sep->reply_ct++; | ||
| 577 | mutex_unlock(&sep_mutex); | ||
| 578 | dbg("sep: sep_send_reply_command_handler end\n"); | ||
| 579 | } | ||
| 580 | |||
| 581 | /* | ||
| 582 | This function handles the allocate data pool memory request | ||
| 583 | This function returns calculates the bus address of the | ||
| 584 | allocated memory, and the offset of this area from the mapped address. | ||
| 585 | Therefore, the FVOs in user space can calculate the exact virtual | ||
| 586 | address of this allocated memory | ||
| 587 | */ | ||
| 588 | static int sep_allocate_data_pool_memory_handler(struct sep_device *sep, | ||
| 589 | unsigned long arg) | ||
| 590 | { | ||
| 591 | int error; | ||
| 592 | struct sep_driver_alloc_t command_args; | ||
| 593 | |||
| 594 | dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n"); | ||
| 595 | |||
| 596 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t)); | ||
| 597 | if (error) { | ||
| 598 | error = -EFAULT; | ||
| 599 | goto end_function; | ||
| 600 | } | ||
| 601 | |||
| 602 | /* allocate memory */ | ||
| 603 | if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { | ||
| 604 | error = -ENOMEM; | ||
| 605 | goto end_function; | ||
| 606 | } | ||
| 607 | |||
| 608 | /* set the virtual and bus address */ | ||
| 609 | command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated; | ||
| 610 | command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated; | ||
| 611 | |||
| 612 | /* write the memory back to the user space */ | ||
| 613 | error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t)); | ||
| 614 | if (error) { | ||
| 615 | error = -EFAULT; | ||
| 616 | goto end_function; | ||
| 617 | } | ||
| 618 | |||
| 619 | /* set the allocation */ | ||
| 620 | sep->data_pool_bytes_allocated += command_args.num_bytes; | ||
| 621 | |||
| 622 | end_function: | ||
| 623 | dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n"); | ||
| 624 | return error; | ||
| 625 | } | ||
| 626 | |||
| 627 | /* | ||
| 628 | This function handles write into allocated data pool command | ||
| 629 | */ | ||
| 630 | static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg) | ||
| 631 | { | ||
| 632 | int error; | ||
| 633 | void *virt_address; | ||
| 634 | unsigned long va; | ||
| 635 | unsigned long app_in_address; | ||
| 636 | unsigned long num_bytes; | ||
| 637 | void *data_pool_area_addr; | ||
| 638 | |||
| 639 | dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n"); | ||
| 640 | |||
| 641 | /* get the application address */ | ||
| 642 | error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address)); | ||
| 643 | if (error) | ||
| 644 | goto end_function; | ||
| 645 | |||
| 646 | /* get the virtual kernel address address */ | ||
| 647 | error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address)); | ||
| 648 | if (error) | ||
| 649 | goto end_function; | ||
| 650 | virt_address = (void *)va; | ||
| 651 | |||
| 652 | /* get the number of bytes */ | ||
| 653 | error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes)); | ||
| 654 | if (error) | ||
| 655 | goto end_function; | ||
| 656 | |||
| 657 | /* calculate the start of the data pool */ | ||
| 658 | data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES; | ||
| 659 | |||
| 660 | |||
| 661 | /* check that the range of the virtual kernel address is correct */ | ||
| 662 | if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) { | ||
| 663 | error = -EINVAL; | ||
| 664 | goto end_function; | ||
| 665 | } | ||
| 666 | /* copy the application data */ | ||
| 667 | error = copy_from_user(virt_address, (void *) app_in_address, num_bytes); | ||
| 668 | if (error) | ||
| 669 | error = -EFAULT; | ||
| 670 | end_function: | ||
| 671 | dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n"); | ||
| 672 | return error; | ||
| 673 | } | ||
| 674 | |||
| 675 | /* | ||
| 676 | this function handles the read from data pool command | ||
| 677 | */ | ||
| 678 | static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg) | ||
| 679 | { | ||
| 680 | int error; | ||
| 681 | /* virtual address of dest application buffer */ | ||
| 682 | unsigned long app_out_address; | ||
| 683 | /* virtual address of the data pool */ | ||
| 684 | unsigned long va; | ||
| 685 | void *virt_address; | ||
| 686 | unsigned long num_bytes; | ||
| 687 | void *data_pool_area_addr; | ||
| 688 | |||
| 689 | dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n"); | ||
| 690 | |||
| 691 | /* get the application address */ | ||
| 692 | error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address)); | ||
| 693 | if (error) | ||
| 694 | goto end_function; | ||
| 695 | |||
| 696 | /* get the virtual kernel address address */ | ||
| 697 | error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address)); | ||
| 698 | if (error) | ||
| 699 | goto end_function; | ||
| 700 | virt_address = (void *)va; | ||
| 701 | |||
| 702 | /* get the number of bytes */ | ||
| 703 | error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes)); | ||
| 704 | if (error) | ||
| 705 | goto end_function; | ||
| 706 | |||
| 707 | /* calculate the start of the data pool */ | ||
| 708 | data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES; | ||
| 709 | |||
| 710 | /* FIXME: These are incomplete all over the driver: what about + len | ||
| 711 | and when doing that also overflows */ | ||
| 712 | /* check that the range of the virtual kernel address is correct */ | ||
| 713 | if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { | ||
| 714 | error = -EINVAL; | ||
| 715 | goto end_function; | ||
| 716 | } | ||
| 717 | |||
| 718 | /* copy the application data */ | ||
| 719 | error = copy_to_user((void *) app_out_address, virt_address, num_bytes); | ||
| 720 | if (error) | ||
| 721 | error = -EFAULT; | ||
| 722 | end_function: | ||
| 723 | dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n"); | ||
| 724 | return error; | ||
| 725 | } | ||
| 726 | |||
| 727 | /* | ||
| 728 | This function releases all the application virtual buffer physical pages, | ||
| 729 | that were previously locked | ||
| 730 | */ | ||
| 731 | static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag) | ||
| 732 | { | ||
| 733 | unsigned long count; | ||
| 734 | |||
| 735 | if (dirtyFlag) { | ||
| 736 | for (count = 0; count < num_pages; count++) { | ||
| 737 | /* the out array was written, therefore the data was changed */ | ||
| 738 | if (!PageReserved(page_array_ptr[count])) | ||
| 739 | SetPageDirty(page_array_ptr[count]); | ||
| 740 | page_cache_release(page_array_ptr[count]); | ||
| 741 | } | ||
| 742 | } else { | ||
| 743 | /* free in pages - the data was only read, therefore no update was done | ||
| 744 | on those pages */ | ||
| 745 | for (count = 0; count < num_pages; count++) | ||
| 746 | page_cache_release(page_array_ptr[count]); | ||
| 747 | } | ||
| 748 | |||
| 749 | if (page_array_ptr) | ||
| 750 | /* free the array */ | ||
| 751 | kfree(page_array_ptr); | ||
| 752 | |||
| 753 | return 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | /* | ||
| 757 | This function locks all the physical pages of the kernel virtual buffer | ||
| 758 | and construct a basic lli array, where each entry holds the physical | ||
| 759 | page address and the size that application data holds in this physical pages | ||
| 760 | */ | ||
| 761 | static int sep_lock_kernel_pages(struct sep_device *sep, | ||
| 762 | unsigned long kernel_virt_addr, | ||
| 763 | unsigned long data_size, | ||
| 764 | unsigned long *num_pages_ptr, | ||
| 765 | struct sep_lli_entry_t **lli_array_ptr, | ||
| 766 | struct page ***page_array_ptr) | ||
| 767 | { | ||
| 768 | int error = 0; | ||
| 769 | /* the the page of the end address of the user space buffer */ | ||
| 770 | unsigned long end_page; | ||
| 771 | /* the page of the start address of the user space buffer */ | ||
| 772 | unsigned long start_page; | ||
| 773 | /* the range in pages */ | ||
| 774 | unsigned long num_pages; | ||
| 775 | struct sep_lli_entry_t *lli_array; | ||
| 776 | /* next kernel address to map */ | ||
| 777 | unsigned long next_kernel_address; | ||
| 778 | unsigned long count; | ||
| 779 | |||
| 780 | dbg("SEP Driver:--------> sep_lock_kernel_pages start\n"); | ||
| 781 | |||
| 782 | /* set start and end pages and num pages */ | ||
| 783 | end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT; | ||
| 784 | start_page = kernel_virt_addr >> PAGE_SHIFT; | ||
| 785 | num_pages = end_page - start_page + 1; | ||
| 786 | |||
| 787 | edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr); | ||
| 788 | edbg("SEP Driver: data_size is %lu\n", data_size); | ||
| 789 | edbg("SEP Driver: start_page is %lx\n", start_page); | ||
| 790 | edbg("SEP Driver: end_page is %lx\n", end_page); | ||
| 791 | edbg("SEP Driver: num_pages is %lu\n", num_pages); | ||
| 792 | |||
| 793 | lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC); | ||
| 794 | if (!lli_array) { | ||
| 795 | edbg("SEP Driver: kmalloc for lli_array failed\n"); | ||
| 796 | error = -ENOMEM; | ||
| 797 | goto end_function; | ||
| 798 | } | ||
| 799 | |||
| 800 | /* set the start address of the first page - app data may start not at | ||
| 801 | the beginning of the page */ | ||
| 802 | lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr); | ||
| 803 | |||
| 804 | /* check that not all the data is in the first page only */ | ||
| 805 | if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size) | ||
| 806 | lli_array[0].block_size = data_size; | ||
| 807 | else | ||
| 808 | lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK)); | ||
| 809 | |||
| 810 | /* debug print */ | ||
| 811 | dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size); | ||
| 812 | |||
| 813 | /* advance the address to the start of the next page */ | ||
| 814 | next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE; | ||
| 815 | |||
| 816 | /* go from the second page to the prev before last */ | ||
| 817 | for (count = 1; count < (num_pages - 1); count++) { | ||
| 818 | lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address); | ||
| 819 | lli_array[count].block_size = PAGE_SIZE; | ||
| 820 | |||
| 821 | edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); | ||
| 822 | next_kernel_address += PAGE_SIZE; | ||
| 823 | } | ||
| 824 | |||
| 825 | /* if more then 1 pages locked - then update for the last page size needed */ | ||
| 826 | if (num_pages > 1) { | ||
| 827 | /* update the address of the last page */ | ||
| 828 | lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address); | ||
| 829 | |||
| 830 | /* set the size of the last page */ | ||
| 831 | lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK); | ||
| 832 | |||
| 833 | if (lli_array[count].block_size == 0) { | ||
| 834 | dbg("app_virt_addr is %08lx\n", kernel_virt_addr); | ||
| 835 | dbg("data_size is %lu\n", data_size); | ||
| 836 | while (1); | ||
| 837 | } | ||
| 838 | |||
| 839 | edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); | ||
| 840 | } | ||
| 841 | /* set output params */ | ||
| 842 | *lli_array_ptr = lli_array; | ||
| 843 | *num_pages_ptr = num_pages; | ||
| 844 | *page_array_ptr = 0; | ||
| 845 | end_function: | ||
| 846 | dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n"); | ||
| 847 | return 0; | ||
| 848 | } | ||
| 849 | |||
| 850 | /* | ||
| 851 | This function locks all the physical pages of the application virtual buffer | ||
| 852 | and construct a basic lli array, where each entry holds the physical page | ||
| 853 | address and the size that application data holds in this physical pages | ||
| 854 | */ | ||
| 855 | static int sep_lock_user_pages(struct sep_device *sep, | ||
| 856 | unsigned long app_virt_addr, | ||
| 857 | unsigned long data_size, | ||
| 858 | unsigned long *num_pages_ptr, | ||
| 859 | struct sep_lli_entry_t **lli_array_ptr, | ||
| 860 | struct page ***page_array_ptr) | ||
| 861 | { | ||
| 862 | int error = 0; | ||
| 863 | /* the the page of the end address of the user space buffer */ | ||
| 864 | unsigned long end_page; | ||
| 865 | /* the page of the start address of the user space buffer */ | ||
| 866 | unsigned long start_page; | ||
| 867 | /* the range in pages */ | ||
| 868 | unsigned long num_pages; | ||
| 869 | struct page **page_array; | ||
| 870 | struct sep_lli_entry_t *lli_array; | ||
| 871 | unsigned long count; | ||
| 872 | int result; | ||
| 873 | |||
| 874 | dbg("SEP Driver:--------> sep_lock_user_pages start\n"); | ||
| 875 | |||
| 876 | /* set start and end pages and num pages */ | ||
| 877 | end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; | ||
| 878 | start_page = app_virt_addr >> PAGE_SHIFT; | ||
| 879 | num_pages = end_page - start_page + 1; | ||
| 880 | |||
| 881 | edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr); | ||
| 882 | edbg("SEP Driver: data_size is %lu\n", data_size); | ||
| 883 | edbg("SEP Driver: start_page is %lu\n", start_page); | ||
| 884 | edbg("SEP Driver: end_page is %lu\n", end_page); | ||
| 885 | edbg("SEP Driver: num_pages is %lu\n", num_pages); | ||
| 886 | |||
| 887 | /* allocate array of pages structure pointers */ | ||
| 888 | page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); | ||
| 889 | if (!page_array) { | ||
| 890 | edbg("SEP Driver: kmalloc for page_array failed\n"); | ||
| 891 | |||
| 892 | error = -ENOMEM; | ||
| 893 | goto end_function; | ||
| 894 | } | ||
| 895 | |||
| 896 | lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC); | ||
| 897 | if (!lli_array) { | ||
| 898 | edbg("SEP Driver: kmalloc for lli_array failed\n"); | ||
| 899 | |||
| 900 | error = -ENOMEM; | ||
| 901 | goto end_function_with_error1; | ||
| 902 | } | ||
| 903 | |||
| 904 | /* convert the application virtual address into a set of physical */ | ||
| 905 | down_read(¤t->mm->mmap_sem); | ||
| 906 | result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0); | ||
| 907 | up_read(¤t->mm->mmap_sem); | ||
| 908 | |||
| 909 | /* check the number of pages locked - if not all then exit with error */ | ||
| 910 | if (result != num_pages) { | ||
| 911 | dbg("SEP Driver: not all pages locked by get_user_pages\n"); | ||
| 912 | |||
| 913 | error = -ENOMEM; | ||
| 914 | goto end_function_with_error2; | ||
| 915 | } | ||
| 916 | |||
| 917 | /* flush the cache */ | ||
| 918 | for (count = 0; count < num_pages; count++) | ||
| 919 | flush_dcache_page(page_array[count]); | ||
| 920 | |||
| 921 | /* set the start address of the first page - app data may start not at | ||
| 922 | the beginning of the page */ | ||
| 923 | lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK)); | ||
| 924 | |||
| 925 | /* check that not all the data is in the first page only */ | ||
| 926 | if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) | ||
| 927 | lli_array[0].block_size = data_size; | ||
| 928 | else | ||
| 929 | lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); | ||
| 930 | |||
| 931 | /* debug print */ | ||
| 932 | dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size); | ||
| 933 | |||
| 934 | /* go from the second page to the prev before last */ | ||
| 935 | for (count = 1; count < (num_pages - 1); count++) { | ||
| 936 | lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]); | ||
| 937 | lli_array[count].block_size = PAGE_SIZE; | ||
| 938 | |||
| 939 | edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size); | ||
| 940 | } | ||
| 941 | |||
| 942 | /* if more then 1 pages locked - then update for the last page size needed */ | ||
| 943 | if (num_pages > 1) { | ||
| 944 | /* update the address of the last page */ | ||
| 945 | lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]); | ||
| 946 | |||
| 947 | /* set the size of the last page */ | ||
| 948 | lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK); | ||
| 949 | |||
| 950 | if (lli_array[count].block_size == 0) { | ||
| 951 | dbg("app_virt_addr is %08lx\n", app_virt_addr); | ||
| 952 | dbg("data_size is %lu\n", data_size); | ||
| 953 | while (1); | ||
| 954 | } | ||
| 955 | edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", | ||
| 956 | count, lli_array[count].physical_address, | ||
| 957 | count, lli_array[count].block_size); | ||
| 958 | } | ||
| 959 | |||
| 960 | /* set output params */ | ||
| 961 | *lli_array_ptr = lli_array; | ||
| 962 | *num_pages_ptr = num_pages; | ||
| 963 | *page_array_ptr = page_array; | ||
| 964 | goto end_function; | ||
| 965 | |||
| 966 | end_function_with_error2: | ||
| 967 | /* release the cache */ | ||
| 968 | for (count = 0; count < num_pages; count++) | ||
| 969 | page_cache_release(page_array[count]); | ||
| 970 | kfree(lli_array); | ||
| 971 | end_function_with_error1: | ||
| 972 | kfree(page_array); | ||
| 973 | end_function: | ||
| 974 | dbg("SEP Driver:<-------- sep_lock_user_pages end\n"); | ||
| 975 | return 0; | ||
| 976 | } | ||
| 977 | |||
| 978 | |||
| 979 | /* | ||
| 980 | this function calculates the size of data that can be inserted into the lli | ||
| 981 | table from this array the condition is that either the table is full | ||
| 982 | (all etnries are entered), or there are no more entries in the lli array | ||
| 983 | */ | ||
| 984 | static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries) | ||
| 985 | { | ||
| 986 | unsigned long table_data_size = 0; | ||
| 987 | unsigned long counter; | ||
| 988 | |||
| 989 | /* calculate the data in the out lli table if till we fill the whole | ||
| 990 | table or till the data has ended */ | ||
| 991 | for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++) | ||
| 992 | table_data_size += lli_in_array_ptr[counter].block_size; | ||
| 993 | return table_data_size; | ||
| 994 | } | ||
| 995 | |||
| 996 | /* | ||
| 997 | this functions builds ont lli table from the lli_array according to | ||
| 998 | the given size of data | ||
| 999 | */ | ||
| 1000 | static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size) | ||
| 1001 | { | ||
| 1002 | unsigned long curr_table_data_size; | ||
| 1003 | /* counter of lli array entry */ | ||
| 1004 | unsigned long array_counter; | ||
| 1005 | |||
| 1006 | dbg("SEP Driver:--------> sep_build_lli_table start\n"); | ||
| 1007 | |||
| 1008 | /* init currrent table data size and lli array entry counter */ | ||
| 1009 | curr_table_data_size = 0; | ||
| 1010 | array_counter = 0; | ||
| 1011 | *num_table_entries_ptr = 1; | ||
| 1012 | |||
| 1013 | edbg("SEP Driver:table_data_size is %lu\n", table_data_size); | ||
| 1014 | |||
| 1015 | /* fill the table till table size reaches the needed amount */ | ||
| 1016 | while (curr_table_data_size < table_data_size) { | ||
| 1017 | /* update the number of entries in table */ | ||
| 1018 | (*num_table_entries_ptr)++; | ||
| 1019 | |||
| 1020 | lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address; | ||
| 1021 | lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size; | ||
| 1022 | curr_table_data_size += lli_table_ptr->block_size; | ||
| 1023 | |||
| 1024 | edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr); | ||
| 1025 | edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); | ||
| 1026 | edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); | ||
| 1027 | |||
| 1028 | /* check for overflow of the table data */ | ||
| 1029 | if (curr_table_data_size > table_data_size) { | ||
| 1030 | edbg("SEP Driver:curr_table_data_size > table_data_size\n"); | ||
| 1031 | |||
| 1032 | /* update the size of block in the table */ | ||
| 1033 | lli_table_ptr->block_size -= (curr_table_data_size - table_data_size); | ||
| 1034 | |||
| 1035 | /* update the physical address in the lli array */ | ||
| 1036 | lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size; | ||
| 1037 | |||
| 1038 | /* update the block size left in the lli array */ | ||
| 1039 | lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size); | ||
| 1040 | } else | ||
| 1041 | /* advance to the next entry in the lli_array */ | ||
| 1042 | array_counter++; | ||
| 1043 | |||
| 1044 | edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); | ||
| 1045 | edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); | ||
| 1046 | |||
| 1047 | /* move to the next entry in table */ | ||
| 1048 | lli_table_ptr++; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | /* set the info entry to default */ | ||
| 1052 | lli_table_ptr->physical_address = 0xffffffff; | ||
| 1053 | lli_table_ptr->block_size = 0; | ||
| 1054 | |||
| 1055 | edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr); | ||
| 1056 | edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); | ||
| 1057 | edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); | ||
| 1058 | |||
| 1059 | /* set the output parameter */ | ||
| 1060 | *num_processed_entries_ptr += array_counter; | ||
| 1061 | |||
| 1062 | edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr); | ||
| 1063 | dbg("SEP Driver:<-------- sep_build_lli_table end\n"); | ||
| 1064 | return; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | /* | ||
| 1068 | this function goes over the list of the print created tables and | ||
| 1069 | prints all the data | ||
| 1070 | */ | ||
| 1071 | static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size) | ||
| 1072 | { | ||
| 1073 | unsigned long table_count; | ||
| 1074 | unsigned long entries_count; | ||
| 1075 | |||
| 1076 | dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n"); | ||
| 1077 | |||
| 1078 | table_count = 1; | ||
| 1079 | while ((unsigned long) lli_table_ptr != 0xffffffff) { | ||
| 1080 | edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size); | ||
| 1081 | edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries); | ||
| 1082 | |||
| 1083 | /* print entries of the table (without info entry) */ | ||
| 1084 | for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) { | ||
| 1085 | edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr); | ||
| 1086 | edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size); | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | /* point to the info entry */ | ||
| 1090 | lli_table_ptr--; | ||
| 1091 | |||
| 1092 | edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size); | ||
| 1093 | edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address); | ||
| 1094 | |||
| 1095 | |||
| 1096 | table_data_size = lli_table_ptr->block_size & 0xffffff; | ||
| 1097 | num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; | ||
| 1098 | lli_table_ptr = (struct sep_lli_entry_t *) | ||
| 1099 | (lli_table_ptr->physical_address); | ||
| 1100 | |||
| 1101 | edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr); | ||
| 1102 | |||
| 1103 | if ((unsigned long) lli_table_ptr != 0xffffffff) | ||
| 1104 | lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr); | ||
| 1105 | |||
| 1106 | table_count++; | ||
| 1107 | } | ||
| 1108 | dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n"); | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | |||
| 1112 | /* | ||
| 1113 | This function prepares only input DMA table for synhronic symmetric | ||
| 1114 | operations (HASH) | ||
| 1115 | */ | ||
| 1116 | static int sep_prepare_input_dma_table(struct sep_device *sep, | ||
| 1117 | unsigned long app_virt_addr, | ||
| 1118 | unsigned long data_size, | ||
| 1119 | unsigned long block_size, | ||
| 1120 | unsigned long *lli_table_ptr, | ||
| 1121 | unsigned long *num_entries_ptr, | ||
| 1122 | unsigned long *table_data_size_ptr, | ||
| 1123 | bool isKernelVirtualAddress) | ||
| 1124 | { | ||
| 1125 | /* pointer to the info entry of the table - the last entry */ | ||
| 1126 | struct sep_lli_entry_t *info_entry_ptr; | ||
| 1127 | /* array of pointers ot page */ | ||
| 1128 | struct sep_lli_entry_t *lli_array_ptr; | ||
| 1129 | /* points to the first entry to be processed in the lli_in_array */ | ||
| 1130 | unsigned long current_entry; | ||
| 1131 | /* num entries in the virtual buffer */ | ||
| 1132 | unsigned long sep_lli_entries; | ||
| 1133 | /* lli table pointer */ | ||
| 1134 | struct sep_lli_entry_t *in_lli_table_ptr; | ||
| 1135 | /* the total data in one table */ | ||
| 1136 | unsigned long table_data_size; | ||
| 1137 | /* number of entries in lli table */ | ||
| 1138 | unsigned long num_entries_in_table; | ||
| 1139 | /* next table address */ | ||
| 1140 | void *lli_table_alloc_addr; | ||
| 1141 | unsigned long result; | ||
| 1142 | |||
| 1143 | dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n"); | ||
| 1144 | |||
| 1145 | edbg("SEP Driver:data_size is %lu\n", data_size); | ||
| 1146 | edbg("SEP Driver:block_size is %lu\n", block_size); | ||
| 1147 | |||
| 1148 | /* initialize the pages pointers */ | ||
| 1149 | sep->in_page_array = 0; | ||
| 1150 | sep->in_num_pages = 0; | ||
| 1151 | |||
| 1152 | if (data_size == 0) { | ||
| 1153 | /* special case - created 2 entries table with zero data */ | ||
| 1154 | in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES); | ||
| 1155 | /* FIXME: Should the entry below not be for _bus */ | ||
| 1156 | in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; | ||
| 1157 | in_lli_table_ptr->block_size = 0; | ||
| 1158 | |||
| 1159 | in_lli_table_ptr++; | ||
| 1160 | in_lli_table_ptr->physical_address = 0xFFFFFFFF; | ||
| 1161 | in_lli_table_ptr->block_size = 0; | ||
| 1162 | |||
| 1163 | *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; | ||
| 1164 | *num_entries_ptr = 2; | ||
| 1165 | *table_data_size_ptr = 0; | ||
| 1166 | |||
| 1167 | goto end_function; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | /* check if the pages are in Kernel Virtual Address layout */ | ||
| 1171 | if (isKernelVirtualAddress == true) | ||
| 1172 | /* lock the pages of the kernel buffer and translate them to pages */ | ||
| 1173 | result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array); | ||
| 1174 | else | ||
| 1175 | /* lock the pages of the user buffer and translate them to pages */ | ||
| 1176 | result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array); | ||
| 1177 | |||
| 1178 | if (result) | ||
| 1179 | return result; | ||
| 1180 | |||
| 1181 | edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages); | ||
| 1182 | |||
| 1183 | current_entry = 0; | ||
| 1184 | info_entry_ptr = 0; | ||
| 1185 | sep_lli_entries = sep->in_num_pages; | ||
| 1186 | |||
| 1187 | /* initiate to point after the message area */ | ||
| 1188 | lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; | ||
| 1189 | |||
| 1190 | /* loop till all the entries in in array are not processed */ | ||
| 1191 | while (current_entry < sep_lli_entries) { | ||
| 1192 | /* set the new input and output tables */ | ||
| 1193 | in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; | ||
| 1194 | |||
| 1195 | lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
| 1196 | |||
| 1197 | /* calculate the maximum size of data for input table */ | ||
| 1198 | table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry)); | ||
| 1199 | |||
| 1200 | /* now calculate the table size so that it will be module block size */ | ||
| 1201 | table_data_size = (table_data_size / block_size) * block_size; | ||
| 1202 | |||
| 1203 | edbg("SEP Driver:output table_data_size is %lu\n", table_data_size); | ||
| 1204 | |||
| 1205 | /* construct input lli table */ | ||
| 1206 | sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size); | ||
| 1207 | |||
| 1208 | if (info_entry_ptr == 0) { | ||
| 1209 | /* set the output parameters to physical addresses */ | ||
| 1210 | *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr); | ||
| 1211 | *num_entries_ptr = num_entries_in_table; | ||
| 1212 | *table_data_size_ptr = table_data_size; | ||
| 1213 | |||
| 1214 | edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr); | ||
| 1215 | } else { | ||
| 1216 | /* update the info entry of the previous in table */ | ||
| 1217 | info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr); | ||
| 1218 | info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | /* save the pointer to the info entry of the current tables */ | ||
| 1222 | info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | /* print input tables */ | ||
| 1226 | sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) | ||
| 1227 | sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr); | ||
| 1228 | |||
| 1229 | /* the array of the pages */ | ||
| 1230 | kfree(lli_array_ptr); | ||
| 1231 | end_function: | ||
| 1232 | dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n"); | ||
| 1233 | return 0; | ||
| 1234 | |||
| 1235 | } | ||
| 1236 | |||
| 1237 | /* | ||
| 1238 | This function creates the input and output dma tables for | ||
| 1239 | symmetric operations (AES/DES) according to the block size from LLI arays | ||
| 1240 | */ | ||
| 1241 | static int sep_construct_dma_tables_from_lli(struct sep_device *sep, | ||
| 1242 | struct sep_lli_entry_t *lli_in_array, | ||
| 1243 | unsigned long sep_in_lli_entries, | ||
| 1244 | struct sep_lli_entry_t *lli_out_array, | ||
| 1245 | unsigned long sep_out_lli_entries, | ||
| 1246 | unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr) | ||
| 1247 | { | ||
| 1248 | /* points to the area where next lli table can be allocated: keep void * | ||
| 1249 | as there is pointer scaling to fix otherwise */ | ||
| 1250 | void *lli_table_alloc_addr; | ||
| 1251 | /* input lli table */ | ||
| 1252 | struct sep_lli_entry_t *in_lli_table_ptr; | ||
| 1253 | /* output lli table */ | ||
| 1254 | struct sep_lli_entry_t *out_lli_table_ptr; | ||
| 1255 | /* pointer to the info entry of the table - the last entry */ | ||
| 1256 | struct sep_lli_entry_t *info_in_entry_ptr; | ||
| 1257 | /* pointer to the info entry of the table - the last entry */ | ||
| 1258 | struct sep_lli_entry_t *info_out_entry_ptr; | ||
| 1259 | /* points to the first entry to be processed in the lli_in_array */ | ||
| 1260 | unsigned long current_in_entry; | ||
| 1261 | /* points to the first entry to be processed in the lli_out_array */ | ||
| 1262 | unsigned long current_out_entry; | ||
| 1263 | /* max size of the input table */ | ||
| 1264 | unsigned long in_table_data_size; | ||
| 1265 | /* max size of the output table */ | ||
| 1266 | unsigned long out_table_data_size; | ||
| 1267 | /* flag te signifies if this is the first tables build from the arrays */ | ||
| 1268 | unsigned long first_table_flag; | ||
| 1269 | /* the data size that should be in table */ | ||
| 1270 | unsigned long table_data_size; | ||
| 1271 | /* number of etnries in the input table */ | ||
| 1272 | unsigned long num_entries_in_table; | ||
| 1273 | /* number of etnries in the output table */ | ||
| 1274 | unsigned long num_entries_out_table; | ||
| 1275 | |||
| 1276 | dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n"); | ||
| 1277 | |||
| 1278 | /* initiate to pint after the message area */ | ||
| 1279 | lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES; | ||
| 1280 | |||
| 1281 | current_in_entry = 0; | ||
| 1282 | current_out_entry = 0; | ||
| 1283 | first_table_flag = 1; | ||
| 1284 | info_in_entry_ptr = 0; | ||
| 1285 | info_out_entry_ptr = 0; | ||
| 1286 | |||
| 1287 | /* loop till all the entries in in array are not processed */ | ||
| 1288 | while (current_in_entry < sep_in_lli_entries) { | ||
| 1289 | /* set the new input and output tables */ | ||
| 1290 | in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; | ||
| 1291 | |||
| 1292 | lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
| 1293 | |||
| 1294 | /* set the first output tables */ | ||
| 1295 | out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr; | ||
| 1296 | |||
| 1297 | lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
| 1298 | |||
| 1299 | /* calculate the maximum size of data for input table */ | ||
| 1300 | in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry)); | ||
| 1301 | |||
| 1302 | /* calculate the maximum size of data for output table */ | ||
| 1303 | out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry)); | ||
| 1304 | |||
| 1305 | edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size); | ||
| 1306 | edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size); | ||
| 1307 | |||
| 1308 | /* check where the data is smallest */ | ||
| 1309 | table_data_size = in_table_data_size; | ||
| 1310 | if (table_data_size > out_table_data_size) | ||
| 1311 | table_data_size = out_table_data_size; | ||
| 1312 | |||
| 1313 | /* now calculate the table size so that it will be module block size */ | ||
| 1314 | table_data_size = (table_data_size / block_size) * block_size; | ||
| 1315 | |||
| 1316 | dbg("SEP Driver:table_data_size is %lu\n", table_data_size); | ||
| 1317 | |||
| 1318 | /* construct input lli table */ | ||
| 1319 | sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size); | ||
| 1320 | |||
| 1321 | /* construct output lli table */ | ||
| 1322 | sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size); | ||
| 1323 | |||
| 1324 | /* if info entry is null - this is the first table built */ | ||
| 1325 | if (info_in_entry_ptr == 0) { | ||
| 1326 | /* set the output parameters to physical addresses */ | ||
| 1327 | *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr); | ||
| 1328 | *in_num_entries_ptr = num_entries_in_table; | ||
| 1329 | *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr); | ||
| 1330 | *out_num_entries_ptr = num_entries_out_table; | ||
| 1331 | *table_data_size_ptr = table_data_size; | ||
| 1332 | |||
| 1333 | edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr); | ||
| 1334 | edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr); | ||
| 1335 | } else { | ||
| 1336 | /* update the info entry of the previous in table */ | ||
| 1337 | info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr); | ||
| 1338 | info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size); | ||
| 1339 | |||
| 1340 | /* update the info entry of the previous in table */ | ||
| 1341 | info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr); | ||
| 1342 | info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size); | ||
| 1343 | } | ||
| 1344 | |||
| 1345 | /* save the pointer to the info entry of the current tables */ | ||
| 1346 | info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; | ||
| 1347 | info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1; | ||
| 1348 | |||
| 1349 | edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table); | ||
| 1350 | edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr); | ||
| 1351 | edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr); | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | /* print input tables */ | ||
| 1355 | sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) | ||
| 1356 | sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr); | ||
| 1357 | /* print output tables */ | ||
| 1358 | sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *) | ||
| 1359 | sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr); | ||
| 1360 | dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n"); | ||
| 1361 | return 0; | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | |||
| 1365 | /* | ||
| 1366 | This function builds input and output DMA tables for synhronic | ||
| 1367 | symmetric operations (AES, DES). It also checks that each table | ||
| 1368 | is of the modular block size | ||
| 1369 | */ | ||
| 1370 | static int sep_prepare_input_output_dma_table(struct sep_device *sep, | ||
| 1371 | unsigned long app_virt_in_addr, | ||
| 1372 | unsigned long app_virt_out_addr, | ||
| 1373 | unsigned long data_size, | ||
| 1374 | unsigned long block_size, | ||
| 1375 | unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress) | ||
| 1376 | { | ||
| 1377 | /* array of pointers of page */ | ||
| 1378 | struct sep_lli_entry_t *lli_in_array; | ||
| 1379 | /* array of pointers of page */ | ||
| 1380 | struct sep_lli_entry_t *lli_out_array; | ||
| 1381 | int result = 0; | ||
| 1382 | |||
| 1383 | dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n"); | ||
| 1384 | |||
| 1385 | /* initialize the pages pointers */ | ||
| 1386 | sep->in_page_array = 0; | ||
| 1387 | sep->out_page_array = 0; | ||
| 1388 | |||
| 1389 | /* check if the pages are in Kernel Virtual Address layout */ | ||
| 1390 | if (isKernelVirtualAddress == true) { | ||
| 1391 | /* lock the pages of the kernel buffer and translate them to pages */ | ||
| 1392 | result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array); | ||
| 1393 | if (result) { | ||
| 1394 | edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n"); | ||
| 1395 | goto end_function; | ||
| 1396 | } | ||
| 1397 | } else { | ||
| 1398 | /* lock the pages of the user buffer and translate them to pages */ | ||
| 1399 | result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array); | ||
| 1400 | if (result) { | ||
| 1401 | edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n"); | ||
| 1402 | goto end_function; | ||
| 1403 | } | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | if (isKernelVirtualAddress == true) { | ||
| 1407 | result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array); | ||
| 1408 | if (result) { | ||
| 1409 | edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n"); | ||
| 1410 | goto end_function_with_error1; | ||
| 1411 | } | ||
| 1412 | } else { | ||
| 1413 | result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array); | ||
| 1414 | if (result) { | ||
| 1415 | edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n"); | ||
| 1416 | goto end_function_with_error1; | ||
| 1417 | } | ||
| 1418 | } | ||
| 1419 | edbg("sep->in_num_pages is %lu\n", sep->in_num_pages); | ||
| 1420 | edbg("sep->out_num_pages is %lu\n", sep->out_num_pages); | ||
| 1421 | edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); | ||
| 1422 | |||
| 1423 | |||
| 1424 | /* call the fucntion that creates table from the lli arrays */ | ||
| 1425 | result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr); | ||
| 1426 | if (result) { | ||
| 1427 | edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n"); | ||
| 1428 | goto end_function_with_error2; | ||
| 1429 | } | ||
| 1430 | |||
| 1431 | /* fall through - free the lli entry arrays */ | ||
| 1432 | dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr); | ||
| 1433 | dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr); | ||
| 1434 | dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr); | ||
| 1435 | end_function_with_error2: | ||
| 1436 | kfree(lli_out_array); | ||
| 1437 | end_function_with_error1: | ||
| 1438 | kfree(lli_in_array); | ||
| 1439 | end_function: | ||
| 1440 | dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result); | ||
| 1441 | return result; | ||
| 1442 | |||
| 1443 | } | ||
| 1444 | |||
| 1445 | /* | ||
| 1446 | this function handles tha request for creation of the DMA table | ||
| 1447 | for the synchronic symmetric operations (AES,DES) | ||
| 1448 | */ | ||
| 1449 | static int sep_create_sync_dma_tables_handler(struct sep_device *sep, | ||
| 1450 | unsigned long arg) | ||
| 1451 | { | ||
| 1452 | int error; | ||
| 1453 | /* command arguments */ | ||
| 1454 | struct sep_driver_build_sync_table_t command_args; | ||
| 1455 | |||
| 1456 | dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n"); | ||
| 1457 | |||
| 1458 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t)); | ||
| 1459 | if (error) { | ||
| 1460 | error = -EFAULT; | ||
| 1461 | goto end_function; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | edbg("app_in_address is %08lx\n", command_args.app_in_address); | ||
| 1465 | edbg("app_out_address is %08lx\n", command_args.app_out_address); | ||
| 1466 | edbg("data_size is %lu\n", command_args.data_in_size); | ||
| 1467 | edbg("block_size is %lu\n", command_args.block_size); | ||
| 1468 | |||
| 1469 | /* check if we need to build only input table or input/output */ | ||
| 1470 | if (command_args.app_out_address) | ||
| 1471 | /* prepare input and output tables */ | ||
| 1472 | error = sep_prepare_input_output_dma_table(sep, | ||
| 1473 | command_args.app_in_address, | ||
| 1474 | command_args.app_out_address, | ||
| 1475 | command_args.data_in_size, | ||
| 1476 | command_args.block_size, | ||
| 1477 | &command_args.in_table_address, | ||
| 1478 | &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress); | ||
| 1479 | else | ||
| 1480 | /* prepare input tables */ | ||
| 1481 | error = sep_prepare_input_dma_table(sep, | ||
| 1482 | command_args.app_in_address, | ||
| 1483 | command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress); | ||
| 1484 | |||
| 1485 | if (error) | ||
| 1486 | goto end_function; | ||
| 1487 | /* copy to user */ | ||
| 1488 | if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t))) | ||
| 1489 | error = -EFAULT; | ||
| 1490 | end_function: | ||
| 1491 | dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n"); | ||
| 1492 | return error; | ||
| 1493 | } | ||
| 1494 | |||
| 1495 | /* | ||
| 1496 | this function handles the request for freeing dma table for synhronic actions | ||
| 1497 | */ | ||
| 1498 | static int sep_free_dma_table_data_handler(struct sep_device *sep) | ||
| 1499 | { | ||
| 1500 | dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n"); | ||
| 1501 | |||
| 1502 | /* free input pages array */ | ||
| 1503 | sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0); | ||
| 1504 | |||
| 1505 | /* free output pages array if needed */ | ||
| 1506 | if (sep->out_page_array) | ||
| 1507 | sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1); | ||
| 1508 | |||
| 1509 | /* reset all the values */ | ||
| 1510 | sep->in_page_array = 0; | ||
| 1511 | sep->out_page_array = 0; | ||
| 1512 | sep->in_num_pages = 0; | ||
| 1513 | sep->out_num_pages = 0; | ||
| 1514 | dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n"); | ||
| 1515 | return 0; | ||
| 1516 | } | ||
| 1517 | |||
| 1518 | /* | ||
| 1519 | this function find a space for the new flow dma table | ||
| 1520 | */ | ||
| 1521 | static int sep_find_free_flow_dma_table_space(struct sep_device *sep, | ||
| 1522 | unsigned long **table_address_ptr) | ||
| 1523 | { | ||
| 1524 | int error = 0; | ||
| 1525 | /* pointer to the id field of the flow dma table */ | ||
| 1526 | unsigned long *start_table_ptr; | ||
| 1527 | /* Do not make start_addr unsigned long * unless fixing the offset | ||
| 1528 | computations ! */ | ||
| 1529 | void *flow_dma_area_start_addr; | ||
| 1530 | unsigned long *flow_dma_area_end_addr; | ||
| 1531 | /* maximum table size in words */ | ||
| 1532 | unsigned long table_size_in_words; | ||
| 1533 | |||
| 1534 | /* find the start address of the flow DMA table area */ | ||
| 1535 | flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES; | ||
| 1536 | |||
| 1537 | /* set end address of the flow table area */ | ||
| 1538 | flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES; | ||
| 1539 | |||
| 1540 | /* set table size in words */ | ||
| 1541 | table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2; | ||
| 1542 | |||
| 1543 | /* set the pointer to the start address of DMA area */ | ||
| 1544 | start_table_ptr = flow_dma_area_start_addr; | ||
| 1545 | |||
| 1546 | /* find the space for the next table */ | ||
| 1547 | while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr) | ||
| 1548 | start_table_ptr += table_size_in_words; | ||
| 1549 | |||
| 1550 | /* check if we reached the end of floa tables area */ | ||
| 1551 | if (start_table_ptr >= flow_dma_area_end_addr) | ||
| 1552 | error = -1; | ||
| 1553 | else | ||
| 1554 | *table_address_ptr = start_table_ptr; | ||
| 1555 | |||
| 1556 | return error; | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | /* | ||
| 1560 | This function creates one DMA table for flow and returns its data, | ||
| 1561 | and pointer to its info entry | ||
| 1562 | */ | ||
| 1563 | static int sep_prepare_one_flow_dma_table(struct sep_device *sep, | ||
| 1564 | unsigned long virt_buff_addr, | ||
| 1565 | unsigned long virt_buff_size, | ||
| 1566 | struct sep_lli_entry_t *table_data, | ||
| 1567 | struct sep_lli_entry_t **info_entry_ptr, | ||
| 1568 | struct sep_flow_context_t *flow_data_ptr, | ||
| 1569 | bool isKernelVirtualAddress) | ||
| 1570 | { | ||
| 1571 | int error; | ||
| 1572 | /* the range in pages */ | ||
| 1573 | unsigned long lli_array_size; | ||
| 1574 | struct sep_lli_entry_t *lli_array; | ||
| 1575 | struct sep_lli_entry_t *flow_dma_table_entry_ptr; | ||
| 1576 | unsigned long *start_dma_table_ptr; | ||
| 1577 | /* total table data counter */ | ||
| 1578 | unsigned long dma_table_data_count; | ||
| 1579 | /* pointer that will keep the pointer to the pages of the virtual buffer */ | ||
| 1580 | struct page **page_array_ptr; | ||
| 1581 | unsigned long entry_count; | ||
| 1582 | |||
| 1583 | /* find the space for the new table */ | ||
| 1584 | error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr); | ||
| 1585 | if (error) | ||
| 1586 | goto end_function; | ||
| 1587 | |||
| 1588 | /* check if the pages are in Kernel Virtual Address layout */ | ||
| 1589 | if (isKernelVirtualAddress == true) | ||
| 1590 | /* lock kernel buffer in the memory */ | ||
| 1591 | error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr); | ||
| 1592 | else | ||
| 1593 | /* lock user buffer in the memory */ | ||
| 1594 | error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr); | ||
| 1595 | |||
| 1596 | if (error) | ||
| 1597 | goto end_function; | ||
| 1598 | |||
| 1599 | /* set the pointer to page array at the beginning of table - this table is | ||
| 1600 | now considered taken */ | ||
| 1601 | *start_dma_table_ptr = lli_array_size; | ||
| 1602 | |||
| 1603 | /* point to the place of the pages pointers of the table */ | ||
| 1604 | start_dma_table_ptr++; | ||
| 1605 | |||
| 1606 | /* set the pages pointer */ | ||
| 1607 | *start_dma_table_ptr = (unsigned long) page_array_ptr; | ||
| 1608 | |||
| 1609 | /* set the pointer to the first entry */ | ||
| 1610 | flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr); | ||
| 1611 | |||
| 1612 | /* now create the entries for table */ | ||
| 1613 | for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) { | ||
| 1614 | flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address; | ||
| 1615 | |||
| 1616 | flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size; | ||
| 1617 | |||
| 1618 | /* set the total data of a table */ | ||
| 1619 | dma_table_data_count += lli_array[entry_count].block_size; | ||
| 1620 | |||
| 1621 | flow_dma_table_entry_ptr++; | ||
| 1622 | } | ||
| 1623 | |||
| 1624 | /* set the physical address */ | ||
| 1625 | table_data->physical_address = virt_to_phys(start_dma_table_ptr); | ||
| 1626 | |||
| 1627 | /* set the num_entries and total data size */ | ||
| 1628 | table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count); | ||
| 1629 | |||
| 1630 | /* set the info entry */ | ||
| 1631 | flow_dma_table_entry_ptr->physical_address = 0xffffffff; | ||
| 1632 | flow_dma_table_entry_ptr->block_size = 0; | ||
| 1633 | |||
| 1634 | /* set the pointer to info entry */ | ||
| 1635 | *info_entry_ptr = flow_dma_table_entry_ptr; | ||
| 1636 | |||
| 1637 | /* the array of the lli entries */ | ||
| 1638 | kfree(lli_array); | ||
| 1639 | end_function: | ||
| 1640 | return error; | ||
| 1641 | } | ||
| 1642 | |||
| 1643 | |||
| 1644 | |||
| 1645 | /* | ||
| 1646 | This function creates a list of tables for flow and returns the data for | ||
| 1647 | the first and last tables of the list | ||
| 1648 | */ | ||
| 1649 | static int sep_prepare_flow_dma_tables(struct sep_device *sep, | ||
| 1650 | unsigned long num_virtual_buffers, | ||
| 1651 | unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress) | ||
| 1652 | { | ||
| 1653 | int error; | ||
| 1654 | unsigned long virt_buff_addr; | ||
| 1655 | unsigned long virt_buff_size; | ||
| 1656 | struct sep_lli_entry_t table_data; | ||
| 1657 | struct sep_lli_entry_t *info_entry_ptr; | ||
| 1658 | struct sep_lli_entry_t *prev_info_entry_ptr; | ||
| 1659 | unsigned long i; | ||
| 1660 | |||
| 1661 | /* init vars */ | ||
| 1662 | error = 0; | ||
| 1663 | prev_info_entry_ptr = 0; | ||
| 1664 | |||
| 1665 | /* init the first table to default */ | ||
| 1666 | table_data.physical_address = 0xffffffff; | ||
| 1667 | first_table_data_ptr->physical_address = 0xffffffff; | ||
| 1668 | table_data.block_size = 0; | ||
| 1669 | |||
| 1670 | for (i = 0; i < num_virtual_buffers; i++) { | ||
| 1671 | /* get the virtual buffer address */ | ||
| 1672 | error = get_user(virt_buff_addr, &first_buff_addr); | ||
| 1673 | if (error) | ||
| 1674 | goto end_function; | ||
| 1675 | |||
| 1676 | /* get the virtual buffer size */ | ||
| 1677 | first_buff_addr++; | ||
| 1678 | error = get_user(virt_buff_size, &first_buff_addr); | ||
| 1679 | if (error) | ||
| 1680 | goto end_function; | ||
| 1681 | |||
| 1682 | /* advance the address to point to the next pair of address|size */ | ||
| 1683 | first_buff_addr++; | ||
| 1684 | |||
| 1685 | /* now prepare the one flow LLI table from the data */ | ||
| 1686 | error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress); | ||
| 1687 | if (error) | ||
| 1688 | goto end_function; | ||
| 1689 | |||
| 1690 | if (i == 0) { | ||
| 1691 | /* if this is the first table - save it to return to the user | ||
| 1692 | application */ | ||
| 1693 | *first_table_data_ptr = table_data; | ||
| 1694 | |||
| 1695 | /* set the pointer to info entry */ | ||
| 1696 | prev_info_entry_ptr = info_entry_ptr; | ||
| 1697 | } else { | ||
| 1698 | /* not first table - the previous table info entry should | ||
| 1699 | be updated */ | ||
| 1700 | prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size); | ||
| 1701 | |||
| 1702 | /* set the pointer to info entry */ | ||
| 1703 | prev_info_entry_ptr = info_entry_ptr; | ||
| 1704 | } | ||
| 1705 | } | ||
| 1706 | |||
| 1707 | /* set the last table data */ | ||
| 1708 | *last_table_data_ptr = table_data; | ||
| 1709 | end_function: | ||
| 1710 | return error; | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | /* | ||
| 1714 | this function goes over all the flow tables connected to the given | ||
| 1715 | table and deallocate them | ||
| 1716 | */ | ||
| 1717 | static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr) | ||
| 1718 | { | ||
| 1719 | /* id pointer */ | ||
| 1720 | unsigned long *table_ptr; | ||
| 1721 | /* end address of the flow dma area */ | ||
| 1722 | unsigned long num_entries; | ||
| 1723 | unsigned long num_pages; | ||
| 1724 | struct page **pages_ptr; | ||
| 1725 | /* maximum table size in words */ | ||
| 1726 | struct sep_lli_entry_t *info_entry_ptr; | ||
| 1727 | |||
| 1728 | /* set the pointer to the first table */ | ||
| 1729 | table_ptr = (unsigned long *) first_table_ptr->physical_address; | ||
| 1730 | |||
| 1731 | /* set the num of entries */ | ||
| 1732 | num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) | ||
| 1733 | & SEP_NUM_ENTRIES_MASK; | ||
| 1734 | |||
| 1735 | /* go over all the connected tables */ | ||
| 1736 | while (*table_ptr != 0xffffffff) { | ||
| 1737 | /* get number of pages */ | ||
| 1738 | num_pages = *(table_ptr - 2); | ||
| 1739 | |||
| 1740 | /* get the pointer to the pages */ | ||
| 1741 | pages_ptr = (struct page **) (*(table_ptr - 1)); | ||
| 1742 | |||
| 1743 | /* free the pages */ | ||
| 1744 | sep_free_dma_pages(pages_ptr, num_pages, 1); | ||
| 1745 | |||
| 1746 | /* goto to the info entry */ | ||
| 1747 | info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1); | ||
| 1748 | |||
| 1749 | table_ptr = (unsigned long *) info_entry_ptr->physical_address; | ||
| 1750 | num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; | ||
| 1751 | } | ||
| 1752 | |||
| 1753 | return; | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | /** | ||
| 1757 | * sep_find_flow_context - find a flow | ||
| 1758 | * @sep: the SEP we are working with | ||
| 1759 | * @flow_id: flow identifier | ||
| 1760 | * | ||
| 1761 | * Returns a pointer the matching flow, or NULL if the flow does not | ||
| 1762 | * exist. | ||
| 1763 | */ | ||
| 1764 | |||
| 1765 | static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep, | ||
| 1766 | unsigned long flow_id) | ||
| 1767 | { | ||
| 1768 | int count; | ||
| 1769 | /* | ||
| 1770 | * always search for flow with id default first - in case we | ||
| 1771 | * already started working on the flow there can be no situation | ||
| 1772 | * when 2 flows are with default flag | ||
| 1773 | */ | ||
| 1774 | for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) { | ||
| 1775 | if (sep->flows[count].flow_id == flow_id) | ||
| 1776 | return &sep->flows[count]; | ||
| 1777 | } | ||
| 1778 | return NULL; | ||
| 1779 | } | ||
| 1780 | |||
| 1781 | |||
| 1782 | /* | ||
| 1783 | this function handles the request to create the DMA tables for flow | ||
| 1784 | */ | ||
| 1785 | static int sep_create_flow_dma_tables_handler(struct sep_device *sep, | ||
| 1786 | unsigned long arg) | ||
| 1787 | { | ||
| 1788 | int error = -ENOENT; | ||
| 1789 | struct sep_driver_build_flow_table_t command_args; | ||
| 1790 | /* first table - output */ | ||
| 1791 | struct sep_lli_entry_t first_table_data; | ||
| 1792 | /* dma table data */ | ||
| 1793 | struct sep_lli_entry_t last_table_data; | ||
| 1794 | /* pointer to the info entry of the previuos DMA table */ | ||
| 1795 | struct sep_lli_entry_t *prev_info_entry_ptr; | ||
| 1796 | /* pointer to the flow data strucutre */ | ||
| 1797 | struct sep_flow_context_t *flow_context_ptr; | ||
| 1798 | |||
| 1799 | dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n"); | ||
| 1800 | |||
| 1801 | /* init variables */ | ||
| 1802 | prev_info_entry_ptr = 0; | ||
| 1803 | first_table_data.physical_address = 0xffffffff; | ||
| 1804 | |||
| 1805 | /* find the free structure for flow data */ | ||
| 1806 | error = -EINVAL; | ||
| 1807 | flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID); | ||
| 1808 | if (flow_context_ptr == NULL) | ||
| 1809 | goto end_function; | ||
| 1810 | |||
| 1811 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t)); | ||
| 1812 | if (error) { | ||
| 1813 | error = -EFAULT; | ||
| 1814 | goto end_function; | ||
| 1815 | } | ||
| 1816 | |||
| 1817 | /* create flow tables */ | ||
| 1818 | error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress); | ||
| 1819 | if (error) | ||
| 1820 | goto end_function_with_error; | ||
| 1821 | |||
| 1822 | /* check if flow is static */ | ||
| 1823 | if (!command_args.flow_type) | ||
| 1824 | /* point the info entry of the last to the info entry of the first */ | ||
| 1825 | last_table_data = first_table_data; | ||
| 1826 | |||
| 1827 | /* set output params */ | ||
| 1828 | command_args.first_table_addr = first_table_data.physical_address; | ||
| 1829 | command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK); | ||
| 1830 | command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK); | ||
| 1831 | |||
| 1832 | /* send the parameters to user application */ | ||
| 1833 | error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t)); | ||
| 1834 | if (error) { | ||
| 1835 | error = -EFAULT; | ||
| 1836 | goto end_function_with_error; | ||
| 1837 | } | ||
| 1838 | |||
| 1839 | /* all the flow created - update the flow entry with temp id */ | ||
| 1840 | flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID; | ||
| 1841 | |||
| 1842 | /* set the processing tables data in the context */ | ||
| 1843 | if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG) | ||
| 1844 | flow_context_ptr->input_tables_in_process = first_table_data; | ||
| 1845 | else | ||
| 1846 | flow_context_ptr->output_tables_in_process = first_table_data; | ||
| 1847 | |||
| 1848 | goto end_function; | ||
| 1849 | |||
| 1850 | end_function_with_error: | ||
| 1851 | /* free the allocated tables */ | ||
| 1852 | sep_deallocated_flow_tables(&first_table_data); | ||
| 1853 | end_function: | ||
| 1854 | dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n"); | ||
| 1855 | return error; | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | /* | ||
| 1859 | this function handles add tables to flow | ||
| 1860 | */ | ||
| 1861 | static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg) | ||
| 1862 | { | ||
| 1863 | int error; | ||
| 1864 | unsigned long num_entries; | ||
| 1865 | struct sep_driver_add_flow_table_t command_args; | ||
| 1866 | struct sep_flow_context_t *flow_context_ptr; | ||
| 1867 | /* first dma table data */ | ||
| 1868 | struct sep_lli_entry_t first_table_data; | ||
| 1869 | /* last dma table data */ | ||
| 1870 | struct sep_lli_entry_t last_table_data; | ||
| 1871 | /* pointer to the info entry of the current DMA table */ | ||
| 1872 | struct sep_lli_entry_t *info_entry_ptr; | ||
| 1873 | |||
| 1874 | dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n"); | ||
| 1875 | |||
| 1876 | /* get input parameters */ | ||
| 1877 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t)); | ||
| 1878 | if (error) { | ||
| 1879 | error = -EFAULT; | ||
| 1880 | goto end_function; | ||
| 1881 | } | ||
| 1882 | |||
| 1883 | /* find the flow structure for the flow id */ | ||
| 1884 | flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id); | ||
| 1885 | if (flow_context_ptr == NULL) | ||
| 1886 | goto end_function; | ||
| 1887 | |||
| 1888 | /* prepare the flow dma tables */ | ||
| 1889 | error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress); | ||
| 1890 | if (error) | ||
| 1891 | goto end_function_with_error; | ||
| 1892 | |||
| 1893 | /* now check if there is already an existing add table for this flow */ | ||
| 1894 | if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) { | ||
| 1895 | /* this buffer was for input buffers */ | ||
| 1896 | if (flow_context_ptr->input_tables_flag) { | ||
| 1897 | /* add table already exists - add the new tables to the end | ||
| 1898 | of the previous */ | ||
| 1899 | num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; | ||
| 1900 | |||
| 1901 | info_entry_ptr = (struct sep_lli_entry_t *) | ||
| 1902 | (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1))); | ||
| 1903 | |||
| 1904 | /* connect to list of tables */ | ||
| 1905 | *info_entry_ptr = first_table_data; | ||
| 1906 | |||
| 1907 | /* set the first table data */ | ||
| 1908 | first_table_data = flow_context_ptr->first_input_table; | ||
| 1909 | } else { | ||
| 1910 | /* set the input flag */ | ||
| 1911 | flow_context_ptr->input_tables_flag = 1; | ||
| 1912 | |||
| 1913 | /* set the first table data */ | ||
| 1914 | flow_context_ptr->first_input_table = first_table_data; | ||
| 1915 | } | ||
| 1916 | /* set the last table data */ | ||
| 1917 | flow_context_ptr->last_input_table = last_table_data; | ||
| 1918 | } else { /* this is output tables */ | ||
| 1919 | |||
| 1920 | /* this buffer was for input buffers */ | ||
| 1921 | if (flow_context_ptr->output_tables_flag) { | ||
| 1922 | /* add table already exists - add the new tables to | ||
| 1923 | the end of the previous */ | ||
| 1924 | num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK; | ||
| 1925 | |||
| 1926 | info_entry_ptr = (struct sep_lli_entry_t *) | ||
| 1927 | (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1))); | ||
| 1928 | |||
| 1929 | /* connect to list of tables */ | ||
| 1930 | *info_entry_ptr = first_table_data; | ||
| 1931 | |||
| 1932 | /* set the first table data */ | ||
| 1933 | first_table_data = flow_context_ptr->first_output_table; | ||
| 1934 | } else { | ||
| 1935 | /* set the input flag */ | ||
| 1936 | flow_context_ptr->output_tables_flag = 1; | ||
| 1937 | |||
| 1938 | /* set the first table data */ | ||
| 1939 | flow_context_ptr->first_output_table = first_table_data; | ||
| 1940 | } | ||
| 1941 | /* set the last table data */ | ||
| 1942 | flow_context_ptr->last_output_table = last_table_data; | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | /* set output params */ | ||
| 1946 | command_args.first_table_addr = first_table_data.physical_address; | ||
| 1947 | command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK); | ||
| 1948 | command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK); | ||
| 1949 | |||
| 1950 | /* send the parameters to user application */ | ||
| 1951 | error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t)); | ||
| 1952 | if (error) | ||
| 1953 | error = -EFAULT; | ||
| 1954 | end_function_with_error: | ||
| 1955 | /* free the allocated tables */ | ||
| 1956 | sep_deallocated_flow_tables(&first_table_data); | ||
| 1957 | end_function: | ||
| 1958 | dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n"); | ||
| 1959 | return error; | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | /* | ||
| 1963 | this function add the flow add message to the specific flow | ||
| 1964 | */ | ||
| 1965 | static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg) | ||
| 1966 | { | ||
| 1967 | int error; | ||
| 1968 | struct sep_driver_add_message_t command_args; | ||
| 1969 | struct sep_flow_context_t *flow_context_ptr; | ||
| 1970 | |||
| 1971 | dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n"); | ||
| 1972 | |||
| 1973 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t)); | ||
| 1974 | if (error) { | ||
| 1975 | error = -EFAULT; | ||
| 1976 | goto end_function; | ||
| 1977 | } | ||
| 1978 | |||
| 1979 | /* check input */ | ||
| 1980 | if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) { | ||
| 1981 | error = -ENOMEM; | ||
| 1982 | goto end_function; | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | /* find the flow context */ | ||
| 1986 | flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id); | ||
| 1987 | if (flow_context_ptr == NULL) | ||
| 1988 | goto end_function; | ||
| 1989 | |||
| 1990 | /* copy the message into context */ | ||
| 1991 | flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes; | ||
| 1992 | error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes); | ||
| 1993 | if (error) | ||
| 1994 | error = -EFAULT; | ||
| 1995 | end_function: | ||
| 1996 | dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n"); | ||
| 1997 | return error; | ||
| 1998 | } | ||
| 1999 | |||
| 2000 | |||
| 2001 | /* | ||
| 2002 | this function returns the bus and virtual addresses of the static pool | ||
| 2003 | */ | ||
| 2004 | static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg) | ||
| 2005 | { | ||
| 2006 | int error; | ||
| 2007 | struct sep_driver_static_pool_addr_t command_args; | ||
| 2008 | |||
| 2009 | dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n"); | ||
| 2010 | |||
| 2011 | /*prepare the output parameters in the struct */ | ||
| 2012 | command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; | ||
| 2013 | command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; | ||
| 2014 | |||
| 2015 | edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address); | ||
| 2016 | |||
| 2017 | /* send the parameters to user application */ | ||
| 2018 | error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t)); | ||
| 2019 | if (error) | ||
| 2020 | error = -EFAULT; | ||
| 2021 | dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n"); | ||
| 2022 | return error; | ||
| 2023 | } | ||
| 2024 | |||
| 2025 | /* | ||
| 2026 | this address gets the offset of the physical address from the start | ||
| 2027 | of the mapped area | ||
| 2028 | */ | ||
| 2029 | static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg) | ||
| 2030 | { | ||
| 2031 | int error; | ||
| 2032 | struct sep_driver_get_mapped_offset_t command_args; | ||
| 2033 | |||
| 2034 | dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n"); | ||
| 2035 | |||
| 2036 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t)); | ||
| 2037 | if (error) { | ||
| 2038 | error = -EFAULT; | ||
| 2039 | goto end_function; | ||
| 2040 | } | ||
| 2041 | |||
| 2042 | if (command_args.physical_address < sep->shared_bus) { | ||
| 2043 | error = -EINVAL; | ||
| 2044 | goto end_function; | ||
| 2045 | } | ||
| 2046 | |||
| 2047 | /*prepare the output parameters in the struct */ | ||
| 2048 | command_args.offset = command_args.physical_address - sep->shared_bus; | ||
| 2049 | |||
| 2050 | edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset); | ||
| 2051 | |||
| 2052 | /* send the parameters to user application */ | ||
| 2053 | error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t)); | ||
| 2054 | if (error) | ||
| 2055 | error = -EFAULT; | ||
| 2056 | end_function: | ||
| 2057 | dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n"); | ||
| 2058 | return error; | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | |||
| 2062 | /* | ||
| 2063 | ? | ||
| 2064 | */ | ||
| 2065 | static int sep_start_handler(struct sep_device *sep) | ||
| 2066 | { | ||
| 2067 | unsigned long reg_val; | ||
| 2068 | unsigned long error = 0; | ||
| 2069 | |||
| 2070 | dbg("SEP Driver:--------> sep_start_handler start\n"); | ||
| 2071 | |||
| 2072 | /* wait in polling for message from SEP */ | ||
| 2073 | do | ||
| 2074 | reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); | ||
| 2075 | while (!reg_val); | ||
| 2076 | |||
| 2077 | /* check the value */ | ||
| 2078 | if (reg_val == 0x1) | ||
| 2079 | /* fatal error - read error status from GPRO */ | ||
| 2080 | error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); | ||
| 2081 | dbg("SEP Driver:<-------- sep_start_handler end\n"); | ||
| 2082 | return error; | ||
| 2083 | } | ||
| 2084 | |||
| 2085 | /* | ||
| 2086 | this function handles the request for SEP initialization | ||
| 2087 | */ | ||
| 2088 | static int sep_init_handler(struct sep_device *sep, unsigned long arg) | ||
| 2089 | { | ||
| 2090 | unsigned long message_word; | ||
| 2091 | unsigned long *message_ptr; | ||
| 2092 | struct sep_driver_init_t command_args; | ||
| 2093 | unsigned long counter; | ||
| 2094 | unsigned long error; | ||
| 2095 | unsigned long reg_val; | ||
| 2096 | |||
| 2097 | dbg("SEP Driver:--------> sep_init_handler start\n"); | ||
| 2098 | error = 0; | ||
| 2099 | |||
| 2100 | error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t)); | ||
| 2101 | if (error) { | ||
| 2102 | error = -EFAULT; | ||
| 2103 | goto end_function; | ||
| 2104 | } | ||
| 2105 | dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n"); | ||
| 2106 | |||
| 2107 | /* PATCH - configure the DMA to single -burst instead of multi-burst */ | ||
| 2108 | /*sep_configure_dma_burst(); */ | ||
| 2109 | |||
| 2110 | dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n"); | ||
| 2111 | |||
| 2112 | message_ptr = (unsigned long *) command_args.message_addr; | ||
| 2113 | |||
| 2114 | /* set the base address of the SRAM */ | ||
| 2115 | sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS); | ||
| 2116 | |||
| 2117 | for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) { | ||
| 2118 | get_user(message_word, message_ptr); | ||
| 2119 | /* write data to SRAM */ | ||
| 2120 | sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word); | ||
| 2121 | edbg("SEP Driver:message_word is %lu\n", message_word); | ||
| 2122 | /* wait for write complete */ | ||
| 2123 | sep_wait_sram_write(sep); | ||
| 2124 | } | ||
| 2125 | dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n"); | ||
| 2126 | /* signal SEP */ | ||
| 2127 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1); | ||
| 2128 | |||
| 2129 | do | ||
| 2130 | reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); | ||
| 2131 | while (!(reg_val & 0xFFFFFFFD)); | ||
| 2132 | |||
| 2133 | dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n"); | ||
| 2134 | |||
| 2135 | /* check the value */ | ||
| 2136 | if (reg_val == 0x1) { | ||
| 2137 | edbg("SEP Driver:init failed\n"); | ||
| 2138 | |||
| 2139 | error = sep_read_reg(sep, 0x8060); | ||
| 2140 | edbg("SEP Driver:sw monitor is %lu\n", error); | ||
| 2141 | |||
| 2142 | /* fatal error - read erro status from GPRO */ | ||
| 2143 | error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR); | ||
| 2144 | edbg("SEP Driver:error is %lu\n", error); | ||
| 2145 | } | ||
| 2146 | end_function: | ||
| 2147 | dbg("SEP Driver:<-------- sep_init_handler end\n"); | ||
| 2148 | return error; | ||
| 2149 | |||
| 2150 | } | ||
| 2151 | |||
| 2152 | /* | ||
| 2153 | this function handles the request cache and resident reallocation | ||
| 2154 | */ | ||
| 2155 | static int sep_realloc_cache_resident_handler(struct sep_device *sep, | ||
| 2156 | unsigned long arg) | ||
| 2157 | { | ||
| 2158 | struct sep_driver_realloc_cache_resident_t command_args; | ||
| 2159 | int error; | ||
| 2160 | |||
| 2161 | /* copy cache and resident to the their intended locations */ | ||
| 2162 | error = sep_load_firmware(sep); | ||
| 2163 | if (error) | ||
| 2164 | return error; | ||
| 2165 | |||
| 2166 | command_args.new_base_addr = sep->shared_bus; | ||
| 2167 | |||
| 2168 | /* find the new base address according to the lowest address between | ||
| 2169 | cache, resident and shared area */ | ||
| 2170 | if (sep->resident_bus < command_args.new_base_addr) | ||
| 2171 | command_args.new_base_addr = sep->resident_bus; | ||
| 2172 | if (sep->rar_bus < command_args.new_base_addr) | ||
| 2173 | command_args.new_base_addr = sep->rar_bus; | ||
| 2174 | |||
| 2175 | /* set the return parameters */ | ||
| 2176 | command_args.new_cache_addr = sep->rar_bus; | ||
| 2177 | command_args.new_resident_addr = sep->resident_bus; | ||
| 2178 | |||
| 2179 | /* set the new shared area */ | ||
| 2180 | command_args.new_shared_area_addr = sep->shared_bus; | ||
| 2181 | |||
| 2182 | edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr); | ||
| 2183 | edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr); | ||
| 2184 | edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr); | ||
| 2185 | edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr); | ||
| 2186 | |||
| 2187 | /* return to user */ | ||
| 2188 | if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t))) | ||
| 2189 | return -EFAULT; | ||
| 2190 | return 0; | ||
| 2191 | } | ||
| 2192 | |||
| 2193 | /** | ||
| 2194 | * sep_get_time_handler - time request from user space | ||
| 2195 | * @sep: sep we are to set the time for | ||
| 2196 | * @arg: pointer to user space arg buffer | ||
| 2197 | * | ||
| 2198 | * This function reports back the time and the address in the SEP | ||
| 2199 | * shared buffer at which it has been placed. (Do we really need this!!!) | ||
| 2200 | */ | ||
| 2201 | |||
| 2202 | static int sep_get_time_handler(struct sep_device *sep, unsigned long arg) | ||
| 2203 | { | ||
| 2204 | struct sep_driver_get_time_t command_args; | ||
| 2205 | |||
| 2206 | mutex_lock(&sep_mutex); | ||
| 2207 | command_args.time_value = sep_set_time(sep); | ||
| 2208 | command_args.time_physical_address = (unsigned long)sep_time_address(sep); | ||
| 2209 | mutex_unlock(&sep_mutex); | ||
| 2210 | if (copy_to_user((void __user *)arg, | ||
| 2211 | &command_args, sizeof(struct sep_driver_get_time_t))) | ||
| 2212 | return -EFAULT; | ||
| 2213 | return 0; | ||
| 2214 | |||
| 2215 | } | ||
| 2216 | |||
| 2217 | /* | ||
| 2218 | This API handles the end transaction request | ||
| 2219 | */ | ||
| 2220 | static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg) | ||
| 2221 | { | ||
| 2222 | dbg("SEP Driver:--------> sep_end_transaction_handler start\n"); | ||
| 2223 | |||
| 2224 | #if 0 /*!SEP_DRIVER_POLLING_MODE */ | ||
| 2225 | /* close IMR */ | ||
| 2226 | sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF); | ||
| 2227 | |||
| 2228 | /* release IRQ line */ | ||
| 2229 | free_irq(SEP_DIRVER_IRQ_NUM, sep); | ||
| 2230 | |||
| 2231 | /* lock the sep mutex */ | ||
| 2232 | mutex_unlock(&sep_mutex); | ||
| 2233 | #endif | ||
| 2234 | |||
| 2235 | dbg("SEP Driver:<-------- sep_end_transaction_handler end\n"); | ||
| 2236 | |||
| 2237 | return 0; | ||
| 2238 | } | ||
| 2239 | |||
| 2240 | |||
| 2241 | /** | ||
| 2242 | * sep_set_flow_id_handler - handle flow setting | ||
| 2243 | * @sep: the SEP we are configuring | ||
| 2244 | * @flow_id: the flow we are setting | ||
| 2245 | * | ||
| 2246 | * This function handler the set flow id command | ||
| 2247 | */ | ||
| 2248 | static int sep_set_flow_id_handler(struct sep_device *sep, | ||
| 2249 | unsigned long flow_id) | ||
| 2250 | { | ||
| 2251 | int error = 0; | ||
| 2252 | struct sep_flow_context_t *flow_data_ptr; | ||
| 2253 | |||
| 2254 | /* find the flow data structure that was just used for creating new flow | ||
| 2255 | - its id should be default */ | ||
| 2256 | |||
| 2257 | mutex_lock(&sep_mutex); | ||
| 2258 | flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID); | ||
| 2259 | if (flow_data_ptr) | ||
| 2260 | flow_data_ptr->flow_id = flow_id; /* set flow id */ | ||
| 2261 | else | ||
| 2262 | error = -EINVAL; | ||
| 2263 | mutex_unlock(&sep_mutex); | ||
| 2264 | return error; | ||
| 2265 | } | ||
| 2266 | |||
| 2267 | static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
| 2268 | { | ||
| 2269 | int error = 0; | ||
| 2270 | struct sep_device *sep = filp->private_data; | ||
| 2271 | |||
| 2272 | dbg("------------>SEP Driver: ioctl start\n"); | ||
| 2273 | |||
| 2274 | edbg("SEP Driver: cmd is %x\n", cmd); | ||
| 2275 | |||
| 2276 | switch (cmd) { | ||
| 2277 | case SEP_IOCSENDSEPCOMMAND: | ||
| 2278 | /* send command to SEP */ | ||
| 2279 | sep_send_command_handler(sep); | ||
| 2280 | edbg("SEP Driver: after sep_send_command_handler\n"); | ||
| 2281 | break; | ||
| 2282 | case SEP_IOCSENDSEPRPLYCOMMAND: | ||
| 2283 | /* send reply command to SEP */ | ||
| 2284 | sep_send_reply_command_handler(sep); | ||
| 2285 | break; | ||
| 2286 | case SEP_IOCALLOCDATAPOLL: | ||
| 2287 | /* allocate data pool */ | ||
| 2288 | error = sep_allocate_data_pool_memory_handler(sep, arg); | ||
| 2289 | break; | ||
| 2290 | case SEP_IOCWRITEDATAPOLL: | ||
| 2291 | /* write data into memory pool */ | ||
| 2292 | error = sep_write_into_data_pool_handler(sep, arg); | ||
| 2293 | break; | ||
| 2294 | case SEP_IOCREADDATAPOLL: | ||
| 2295 | /* read data from data pool into application memory */ | ||
| 2296 | error = sep_read_from_data_pool_handler(sep, arg); | ||
| 2297 | break; | ||
| 2298 | case SEP_IOCCREATESYMDMATABLE: | ||
| 2299 | /* create dma table for synhronic operation */ | ||
| 2300 | error = sep_create_sync_dma_tables_handler(sep, arg); | ||
| 2301 | break; | ||
| 2302 | case SEP_IOCCREATEFLOWDMATABLE: | ||
| 2303 | /* create flow dma tables */ | ||
| 2304 | error = sep_create_flow_dma_tables_handler(sep, arg); | ||
| 2305 | break; | ||
| 2306 | case SEP_IOCFREEDMATABLEDATA: | ||
| 2307 | /* free the pages */ | ||
| 2308 | error = sep_free_dma_table_data_handler(sep); | ||
| 2309 | break; | ||
| 2310 | case SEP_IOCSETFLOWID: | ||
| 2311 | /* set flow id */ | ||
| 2312 | error = sep_set_flow_id_handler(sep, (unsigned long)arg); | ||
| 2313 | break; | ||
| 2314 | case SEP_IOCADDFLOWTABLE: | ||
| 2315 | /* add tables to the dynamic flow */ | ||
| 2316 | error = sep_add_flow_tables_handler(sep, arg); | ||
| 2317 | break; | ||
| 2318 | case SEP_IOCADDFLOWMESSAGE: | ||
| 2319 | /* add message of add tables to flow */ | ||
| 2320 | error = sep_add_flow_tables_message_handler(sep, arg); | ||
| 2321 | break; | ||
| 2322 | case SEP_IOCSEPSTART: | ||
| 2323 | /* start command to sep */ | ||
| 2324 | error = sep_start_handler(sep); | ||
| 2325 | break; | ||
| 2326 | case SEP_IOCSEPINIT: | ||
| 2327 | /* init command to sep */ | ||
| 2328 | error = sep_init_handler(sep, arg); | ||
| 2329 | break; | ||
| 2330 | case SEP_IOCGETSTATICPOOLADDR: | ||
| 2331 | /* get the physical and virtual addresses of the static pool */ | ||
| 2332 | error = sep_get_static_pool_addr_handler(sep, arg); | ||
| 2333 | break; | ||
| 2334 | case SEP_IOCENDTRANSACTION: | ||
| 2335 | error = sep_end_transaction_handler(sep, arg); | ||
| 2336 | break; | ||
| 2337 | case SEP_IOCREALLOCCACHERES: | ||
| 2338 | error = sep_realloc_cache_resident_handler(sep, arg); | ||
| 2339 | break; | ||
| 2340 | case SEP_IOCGETMAPPEDADDROFFSET: | ||
| 2341 | error = sep_get_physical_mapped_offset_handler(sep, arg); | ||
| 2342 | break; | ||
| 2343 | case SEP_IOCGETIME: | ||
| 2344 | error = sep_get_time_handler(sep, arg); | ||
| 2345 | break; | ||
| 2346 | default: | ||
| 2347 | error = -ENOTTY; | ||
| 2348 | break; | ||
| 2349 | } | ||
| 2350 | dbg("SEP Driver:<-------- ioctl end\n"); | ||
| 2351 | return error; | ||
| 2352 | } | ||
| 2353 | |||
| 2354 | |||
| 2355 | |||
| 2356 | #if !SEP_DRIVER_POLLING_MODE | ||
| 2357 | |||
| 2358 | /* handler for flow done interrupt */ | ||
| 2359 | |||
| 2360 | static void sep_flow_done_handler(struct work_struct *work) | ||
| 2361 | { | ||
| 2362 | struct sep_flow_context_t *flow_data_ptr; | ||
| 2363 | |||
| 2364 | /* obtain the mutex */ | ||
| 2365 | mutex_lock(&sep_mutex); | ||
| 2366 | |||
| 2367 | /* get the pointer to context */ | ||
| 2368 | flow_data_ptr = (struct sep_flow_context_t *) work; | ||
| 2369 | |||
| 2370 | /* free all the current input tables in sep */ | ||
| 2371 | sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process); | ||
| 2372 | |||
| 2373 | /* free all the current tables output tables in SEP (if needed) */ | ||
| 2374 | if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff) | ||
| 2375 | sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process); | ||
| 2376 | |||
| 2377 | /* check if we have additional tables to be sent to SEP only input | ||
| 2378 | flag may be checked */ | ||
| 2379 | if (flow_data_ptr->input_tables_flag) { | ||
| 2380 | /* copy the message to the shared RAM and signal SEP */ | ||
| 2381 | memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes); | ||
| 2382 | |||
| 2383 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2); | ||
| 2384 | } | ||
| 2385 | mutex_unlock(&sep_mutex); | ||
| 2386 | } | ||
| 2387 | /* | ||
| 2388 | interrupt handler function | ||
| 2389 | */ | ||
| 2390 | static irqreturn_t sep_inthandler(int irq, void *dev_id) | ||
| 2391 | { | ||
| 2392 | irqreturn_t int_error; | ||
| 2393 | unsigned long reg_val; | ||
| 2394 | unsigned long flow_id; | ||
| 2395 | struct sep_flow_context_t *flow_context_ptr; | ||
| 2396 | struct sep_device *sep = dev_id; | ||
| 2397 | |||
| 2398 | int_error = IRQ_HANDLED; | ||
| 2399 | |||
| 2400 | /* read the IRR register to check if this is SEP interrupt */ | ||
| 2401 | reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); | ||
| 2402 | edbg("SEP Interrupt - reg is %08lx\n", reg_val); | ||
| 2403 | |||
| 2404 | /* check if this is the flow interrupt */ | ||
| 2405 | if (0 /*reg_val & (0x1 << 11) */ ) { | ||
| 2406 | /* read GPRO to find out the which flow is done */ | ||
| 2407 | flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); | ||
| 2408 | |||
| 2409 | /* find the contex of the flow */ | ||
| 2410 | flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28); | ||
| 2411 | if (flow_context_ptr == NULL) | ||
| 2412 | goto end_function_with_error; | ||
| 2413 | |||
| 2414 | /* queue the work */ | ||
| 2415 | INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler); | ||
| 2416 | queue_work(sep->flow_wq, &flow_context_ptr->flow_wq); | ||
| 2417 | |||
| 2418 | } else { | ||
| 2419 | /* check if this is reply interrupt from SEP */ | ||
| 2420 | if (reg_val & (0x1 << 13)) { | ||
| 2421 | /* update the counter of reply messages */ | ||
| 2422 | sep->reply_ct++; | ||
| 2423 | /* wake up the waiting process */ | ||
| 2424 | wake_up(&sep_event); | ||
| 2425 | } else { | ||
| 2426 | int_error = IRQ_NONE; | ||
| 2427 | goto end_function; | ||
| 2428 | } | ||
| 2429 | } | ||
| 2430 | end_function_with_error: | ||
| 2431 | /* clear the interrupt */ | ||
| 2432 | sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); | ||
| 2433 | end_function: | ||
| 2434 | return int_error; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | #endif | ||
| 2438 | |||
| 2439 | |||
| 2440 | |||
| 2441 | #if 0 | ||
| 2442 | |||
| 2443 | static void sep_wait_busy(struct sep_device *sep) | ||
| 2444 | { | ||
| 2445 | u32 reg; | ||
| 2446 | |||
| 2447 | do { | ||
| 2448 | reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR); | ||
| 2449 | } while (reg); | ||
| 2450 | } | ||
| 2451 | |||
| 2452 | /* | ||
| 2453 | PATCH for configuring the DMA to single burst instead of multi-burst | ||
| 2454 | */ | ||
| 2455 | static void sep_configure_dma_burst(struct sep_device *sep) | ||
| 2456 | { | ||
| 2457 | #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL | ||
| 2458 | |||
| 2459 | dbg("SEP Driver:<-------- sep_configure_dma_burst start \n"); | ||
| 2460 | |||
| 2461 | /* request access to registers from SEP */ | ||
| 2462 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); | ||
| 2463 | |||
| 2464 | dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n"); | ||
| 2465 | |||
| 2466 | sep_wait_busy(sep); | ||
| 2467 | |||
| 2468 | dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n"); | ||
| 2469 | |||
| 2470 | /* set the DMA burst register to single burst */ | ||
| 2471 | sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL); | ||
| 2472 | |||
| 2473 | /* release the sep busy */ | ||
| 2474 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL); | ||
| 2475 | sep_wait_busy(sep); | ||
| 2476 | |||
| 2477 | dbg("SEP Driver:<-------- sep_configure_dma_burst done \n"); | ||
| 2478 | |||
| 2479 | } | ||
| 2480 | |||
| 2481 | #endif | ||
| 2482 | |||
| 2483 | /* | ||
| 2484 | Function that is activated on the successful probe of the SEP device | ||
| 2485 | */ | ||
| 2486 | static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
| 2487 | { | ||
| 2488 | int error = 0; | ||
| 2489 | struct sep_device *sep; | ||
| 2490 | int counter; | ||
| 2491 | int size; /* size of memory for allocation */ | ||
| 2492 | |||
| 2493 | edbg("Sep pci probe starting\n"); | ||
| 2494 | if (sep_dev != NULL) { | ||
| 2495 | dev_warn(&pdev->dev, "only one SEP supported.\n"); | ||
| 2496 | return -EBUSY; | ||
| 2497 | } | ||
| 2498 | |||
| 2499 | /* enable the device */ | ||
| 2500 | error = pci_enable_device(pdev); | ||
| 2501 | if (error) { | ||
| 2502 | edbg("error enabling pci device\n"); | ||
| 2503 | goto end_function; | ||
| 2504 | } | ||
| 2505 | |||
| 2506 | /* set the pci dev pointer */ | ||
| 2507 | sep_dev = &sep_instance; | ||
| 2508 | sep = &sep_instance; | ||
| 2509 | |||
| 2510 | edbg("sep->shared_addr = %p\n", sep->shared_addr); | ||
| 2511 | /* transaction counter that coordinates the transactions between SEP | ||
| 2512 | and HOST */ | ||
| 2513 | sep->send_ct = 0; | ||
| 2514 | /* counter for the messages from sep */ | ||
| 2515 | sep->reply_ct = 0; | ||
| 2516 | /* counter for the number of bytes allocated in the pool | ||
| 2517 | for the current transaction */ | ||
| 2518 | sep->data_pool_bytes_allocated = 0; | ||
| 2519 | |||
| 2520 | /* calculate the total size for allocation */ | ||
| 2521 | size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + | ||
| 2522 | SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; | ||
| 2523 | |||
| 2524 | /* allocate the shared area */ | ||
| 2525 | if (sep_map_and_alloc_shared_area(sep, size)) { | ||
| 2526 | error = -ENOMEM; | ||
| 2527 | /* allocation failed */ | ||
| 2528 | goto end_function_error; | ||
| 2529 | } | ||
| 2530 | /* now set the memory regions */ | ||
| 2531 | #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1) | ||
| 2532 | /* Note: this test section will need moving before it could ever | ||
| 2533 | work as the registers are not yet mapped ! */ | ||
| 2534 | /* send the new SHARED MESSAGE AREA to the SEP */ | ||
| 2535 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); | ||
| 2536 | |||
| 2537 | /* poll for SEP response */ | ||
| 2538 | retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); | ||
| 2539 | while (retval != 0xffffffff && retval != sep->shared_bus) | ||
| 2540 | retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); | ||
| 2541 | |||
| 2542 | /* check the return value (register) */ | ||
| 2543 | if (retval != sep->shared_bus) { | ||
| 2544 | error = -ENOMEM; | ||
| 2545 | goto end_function_deallocate_sep_shared_area; | ||
| 2546 | } | ||
| 2547 | #endif | ||
| 2548 | /* init the flow contextes */ | ||
| 2549 | for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++) | ||
| 2550 | sep->flows[counter].flow_id = SEP_FREE_FLOW_ID; | ||
| 2551 | |||
| 2552 | sep->flow_wq = create_singlethread_workqueue("sepflowwq"); | ||
| 2553 | if (sep->flow_wq == NULL) { | ||
| 2554 | error = -ENOMEM; | ||
| 2555 | edbg("sep_driver:flow queue creation failed\n"); | ||
| 2556 | goto end_function_deallocate_sep_shared_area; | ||
| 2557 | } | ||
| 2558 | edbg("SEP Driver: create flow workqueue \n"); | ||
| 2559 | sep->pdev = pci_dev_get(pdev); | ||
| 2560 | |||
| 2561 | sep->reg_addr = pci_ioremap_bar(pdev, 0); | ||
| 2562 | if (!sep->reg_addr) { | ||
| 2563 | edbg("sep: ioremap of registers failed.\n"); | ||
| 2564 | goto end_function_deallocate_sep_shared_area; | ||
| 2565 | } | ||
| 2566 | edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr); | ||
| 2567 | |||
| 2568 | /* load the rom code */ | ||
| 2569 | sep_load_rom_code(sep); | ||
| 2570 | |||
| 2571 | /* set up system base address and shared memory location */ | ||
| 2572 | sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev, | ||
| 2573 | 2 * SEP_RAR_IO_MEM_REGION_SIZE, | ||
| 2574 | &sep->rar_bus, GFP_KERNEL); | ||
| 2575 | |||
| 2576 | if (!sep->rar_addr) { | ||
| 2577 | edbg("SEP Driver:can't allocate rar\n"); | ||
| 2578 | goto end_function_uniomap; | ||
| 2579 | } | ||
| 2580 | |||
| 2581 | |||
| 2582 | edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus); | ||
| 2583 | edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr); | ||
| 2584 | |||
| 2585 | #if !SEP_DRIVER_POLLING_MODE | ||
| 2586 | |||
| 2587 | edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n"); | ||
| 2588 | |||
| 2589 | /* clear ICR register */ | ||
| 2590 | sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); | ||
| 2591 | |||
| 2592 | /* set the IMR register - open only GPR 2 */ | ||
| 2593 | sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); | ||
| 2594 | |||
| 2595 | edbg("SEP Driver: about to call request_irq\n"); | ||
| 2596 | /* get the interrupt line */ | ||
| 2597 | error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep); | ||
| 2598 | if (error) | ||
| 2599 | goto end_function_free_res; | ||
| 2600 | return 0; | ||
| 2601 | edbg("SEP Driver: about to write IMR REG_ADDR"); | ||
| 2602 | |||
| 2603 | /* set the IMR register - open only GPR 2 */ | ||
| 2604 | sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); | ||
| 2605 | |||
| 2606 | end_function_free_res: | ||
| 2607 | dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE, | ||
| 2608 | sep->rar_addr, sep->rar_bus); | ||
| 2609 | #endif /* SEP_DRIVER_POLLING_MODE */ | ||
| 2610 | end_function_uniomap: | ||
| 2611 | iounmap(sep->reg_addr); | ||
| 2612 | end_function_deallocate_sep_shared_area: | ||
| 2613 | /* de-allocate shared area */ | ||
| 2614 | sep_unmap_and_free_shared_area(sep, size); | ||
| 2615 | end_function_error: | ||
| 2616 | sep_dev = NULL; | ||
| 2617 | end_function: | ||
| 2618 | return error; | ||
| 2619 | } | ||
| 2620 | |||
| 2621 | static const struct pci_device_id sep_pci_id_tbl[] = { | ||
| 2622 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)}, | ||
| 2623 | {0} | ||
| 2624 | }; | ||
| 2625 | |||
| 2626 | MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); | ||
| 2627 | |||
| 2628 | /* field for registering driver to PCI device */ | ||
| 2629 | static struct pci_driver sep_pci_driver = { | ||
| 2630 | .name = "sep_sec_driver", | ||
| 2631 | .id_table = sep_pci_id_tbl, | ||
| 2632 | .probe = sep_probe | ||
| 2633 | /* FIXME: remove handler */ | ||
| 2634 | }; | ||
| 2635 | |||
| 2636 | /* major and minor device numbers */ | ||
| 2637 | static dev_t sep_devno; | ||
| 2638 | |||
| 2639 | /* the files operations structure of the driver */ | ||
| 2640 | static struct file_operations sep_file_operations = { | ||
| 2641 | .owner = THIS_MODULE, | ||
| 2642 | .unlocked_ioctl = sep_ioctl, | ||
| 2643 | .poll = sep_poll, | ||
| 2644 | .open = sep_open, | ||
| 2645 | .release = sep_release, | ||
| 2646 | .mmap = sep_mmap, | ||
| 2647 | }; | ||
| 2648 | |||
| 2649 | |||
| 2650 | /* cdev struct of the driver */ | ||
| 2651 | static struct cdev sep_cdev; | ||
| 2652 | |||
| 2653 | /* | ||
| 2654 | this function registers the driver to the file system | ||
| 2655 | */ | ||
| 2656 | static int sep_register_driver_to_fs(void) | ||
| 2657 | { | ||
| 2658 | int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver"); | ||
| 2659 | if (ret_val) { | ||
| 2660 | edbg("sep: major number allocation failed, retval is %d\n", | ||
| 2661 | ret_val); | ||
| 2662 | return ret_val; | ||
| 2663 | } | ||
| 2664 | /* init cdev */ | ||
| 2665 | cdev_init(&sep_cdev, &sep_file_operations); | ||
| 2666 | sep_cdev.owner = THIS_MODULE; | ||
| 2667 | |||
| 2668 | /* register the driver with the kernel */ | ||
| 2669 | ret_val = cdev_add(&sep_cdev, sep_devno, 1); | ||
| 2670 | if (ret_val) { | ||
| 2671 | edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val); | ||
| 2672 | /* unregister dev numbers */ | ||
| 2673 | unregister_chrdev_region(sep_devno, 1); | ||
| 2674 | } | ||
| 2675 | return ret_val; | ||
| 2676 | } | ||
| 2677 | |||
| 2678 | |||
| 2679 | /*-------------------------------------------------------------- | ||
| 2680 | init function | ||
| 2681 | ----------------------------------------------------------------*/ | ||
| 2682 | static int __init sep_init(void) | ||
| 2683 | { | ||
| 2684 | int ret_val = 0; | ||
| 2685 | dbg("SEP Driver:-------->Init start\n"); | ||
| 2686 | /* FIXME: Probe can occur before we are ready to survive a probe */ | ||
| 2687 | ret_val = pci_register_driver(&sep_pci_driver); | ||
| 2688 | if (ret_val) { | ||
| 2689 | edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val); | ||
| 2690 | goto end_function_unregister_from_fs; | ||
| 2691 | } | ||
| 2692 | /* register driver to fs */ | ||
| 2693 | ret_val = sep_register_driver_to_fs(); | ||
| 2694 | if (ret_val) | ||
| 2695 | goto end_function_unregister_pci; | ||
| 2696 | goto end_function; | ||
| 2697 | end_function_unregister_pci: | ||
| 2698 | pci_unregister_driver(&sep_pci_driver); | ||
| 2699 | end_function_unregister_from_fs: | ||
| 2700 | /* unregister from fs */ | ||
| 2701 | cdev_del(&sep_cdev); | ||
| 2702 | /* unregister dev numbers */ | ||
| 2703 | unregister_chrdev_region(sep_devno, 1); | ||
| 2704 | end_function: | ||
| 2705 | dbg("SEP Driver:<-------- Init end\n"); | ||
| 2706 | return ret_val; | ||
| 2707 | } | ||
| 2708 | |||
| 2709 | |||
| 2710 | /*------------------------------------------------------------- | ||
| 2711 | exit function | ||
| 2712 | --------------------------------------------------------------*/ | ||
| 2713 | static void __exit sep_exit(void) | ||
| 2714 | { | ||
| 2715 | int size; | ||
| 2716 | |||
| 2717 | dbg("SEP Driver:--------> Exit start\n"); | ||
| 2718 | |||
| 2719 | /* unregister from fs */ | ||
| 2720 | cdev_del(&sep_cdev); | ||
| 2721 | /* unregister dev numbers */ | ||
| 2722 | unregister_chrdev_region(sep_devno, 1); | ||
| 2723 | /* calculate the total size for de-allocation */ | ||
| 2724 | size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + | ||
| 2725 | SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; | ||
| 2726 | /* FIXME: We need to do this in the unload for the device */ | ||
| 2727 | /* free shared area */ | ||
| 2728 | if (sep_dev) { | ||
| 2729 | sep_unmap_and_free_shared_area(sep_dev, size); | ||
| 2730 | edbg("SEP Driver: free pages SEP SHARED AREA \n"); | ||
| 2731 | iounmap((void *) sep_dev->reg_addr); | ||
| 2732 | edbg("SEP Driver: iounmap \n"); | ||
| 2733 | } | ||
| 2734 | edbg("SEP Driver: release_mem_region \n"); | ||
| 2735 | dbg("SEP Driver:<-------- Exit end\n"); | ||
| 2736 | } | ||
| 2737 | |||
| 2738 | |||
| 2739 | module_init(sep_init); | ||
| 2740 | module_exit(sep_exit); | ||
| 2741 | |||
| 2742 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h deleted file mode 100644 index 7ef16da7c4ef..000000000000 --- a/drivers/staging/sep/sep_driver_api.h +++ /dev/null | |||
| @@ -1,425 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * sep_driver_api.h - Security Processor Driver api definitions | ||
| 4 | * | ||
| 5 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 6 | * Copyright(c) 2009 Discretix. All rights reserved. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * CONTACTS: | ||
| 23 | * | ||
| 24 | * Mark Allyn mark.a.allyn@intel.com | ||
| 25 | * | ||
| 26 | * CHANGES: | ||
| 27 | * | ||
| 28 | * 2009.06.26 Initial publish | ||
| 29 | * | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef __SEP_DRIVER_API_H__ | ||
| 33 | #define __SEP_DRIVER_API_H__ | ||
| 34 | |||
| 35 | |||
| 36 | |||
| 37 | /*---------------------------------------------------------------- | ||
| 38 | IOCTL command defines | ||
| 39 | -----------------------------------------------------------------*/ | ||
| 40 | |||
| 41 | /* magic number 1 of the sep IOCTL command */ | ||
| 42 | #define SEP_IOC_MAGIC_NUMBER 's' | ||
| 43 | |||
| 44 | /* sends interrupt to sep that message is ready */ | ||
| 45 | #define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0) | ||
| 46 | |||
| 47 | /* sends interrupt to sep that message is ready */ | ||
| 48 | #define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1) | ||
| 49 | |||
| 50 | /* allocate memory in data pool */ | ||
| 51 | #define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2) | ||
| 52 | |||
| 53 | /* write to pre-allocated memory in data pool */ | ||
| 54 | #define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3) | ||
| 55 | |||
| 56 | /* read from pre-allocated memory in data pool */ | ||
| 57 | #define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4) | ||
| 58 | |||
| 59 | /* create sym dma lli tables */ | ||
| 60 | #define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5) | ||
| 61 | |||
| 62 | /* create flow dma lli tables */ | ||
| 63 | #define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6) | ||
| 64 | |||
| 65 | /* free dynamic data aalocated during table creation */ | ||
| 66 | #define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7) | ||
| 67 | |||
| 68 | /* get the static pool area addresses (physical and virtual) */ | ||
| 69 | #define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8) | ||
| 70 | |||
| 71 | /* set flow id command */ | ||
| 72 | #define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9) | ||
| 73 | |||
| 74 | /* add tables to the dynamic flow */ | ||
| 75 | #define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10) | ||
| 76 | |||
| 77 | /* add flow add tables message */ | ||
| 78 | #define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11) | ||
| 79 | |||
| 80 | /* start sep command */ | ||
| 81 | #define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12) | ||
| 82 | |||
| 83 | /* init sep command */ | ||
| 84 | #define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13) | ||
| 85 | |||
| 86 | /* end transaction command */ | ||
| 87 | #define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15) | ||
| 88 | |||
| 89 | /* reallocate cache and resident */ | ||
| 90 | #define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16) | ||
| 91 | |||
| 92 | /* get the offset of the address starting from the beginnnig of the map area */ | ||
| 93 | #define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17) | ||
| 94 | |||
| 95 | /* get time address and value */ | ||
| 96 | #define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19) | ||
| 97 | |||
| 98 | /*------------------------------------------- | ||
| 99 | TYPEDEFS | ||
| 100 | ----------------------------------------------*/ | ||
| 101 | |||
| 102 | /* | ||
| 103 | init command struct | ||
| 104 | */ | ||
| 105 | struct sep_driver_init_t { | ||
| 106 | /* start of the 1G of the host memory address that SEP can access */ | ||
| 107 | unsigned long message_addr; | ||
| 108 | |||
| 109 | /* start address of resident */ | ||
| 110 | unsigned long message_size_in_words; | ||
| 111 | |||
| 112 | }; | ||
| 113 | |||
| 114 | |||
| 115 | /* | ||
| 116 | realloc cache resident command | ||
| 117 | */ | ||
| 118 | struct sep_driver_realloc_cache_resident_t { | ||
| 119 | /* new cache address */ | ||
| 120 | u64 new_cache_addr; | ||
| 121 | /* new resident address */ | ||
| 122 | u64 new_resident_addr; | ||
| 123 | /* new resident address */ | ||
| 124 | u64 new_shared_area_addr; | ||
| 125 | /* new base address */ | ||
| 126 | u64 new_base_addr; | ||
| 127 | }; | ||
| 128 | |||
| 129 | struct sep_driver_alloc_t { | ||
| 130 | /* virtual address of allocated space */ | ||
| 131 | unsigned long offset; | ||
| 132 | |||
| 133 | /* physical address of allocated space */ | ||
| 134 | unsigned long phys_address; | ||
| 135 | |||
| 136 | /* number of bytes to allocate */ | ||
| 137 | unsigned long num_bytes; | ||
| 138 | }; | ||
| 139 | |||
| 140 | /* | ||
| 141 | */ | ||
| 142 | struct sep_driver_write_t { | ||
| 143 | /* application space address */ | ||
| 144 | unsigned long app_address; | ||
| 145 | |||
| 146 | /* address of the data pool */ | ||
| 147 | unsigned long datapool_address; | ||
| 148 | |||
| 149 | /* number of bytes to write */ | ||
| 150 | unsigned long num_bytes; | ||
| 151 | }; | ||
| 152 | |||
| 153 | /* | ||
| 154 | */ | ||
| 155 | struct sep_driver_read_t { | ||
| 156 | /* application space address */ | ||
| 157 | unsigned long app_address; | ||
| 158 | |||
| 159 | /* address of the data pool */ | ||
| 160 | unsigned long datapool_address; | ||
| 161 | |||
| 162 | /* number of bytes to read */ | ||
| 163 | unsigned long num_bytes; | ||
| 164 | }; | ||
| 165 | |||
| 166 | /* | ||
| 167 | */ | ||
| 168 | struct sep_driver_build_sync_table_t { | ||
| 169 | /* address value of the data in */ | ||
| 170 | unsigned long app_in_address; | ||
| 171 | |||
| 172 | /* size of data in */ | ||
| 173 | unsigned long data_in_size; | ||
| 174 | |||
| 175 | /* address of the data out */ | ||
| 176 | unsigned long app_out_address; | ||
| 177 | |||
| 178 | /* the size of the block of the operation - if needed, | ||
| 179 | every table will be modulo this parameter */ | ||
| 180 | unsigned long block_size; | ||
| 181 | |||
| 182 | /* the physical address of the first input DMA table */ | ||
| 183 | unsigned long in_table_address; | ||
| 184 | |||
| 185 | /* number of entries in the first input DMA table */ | ||
| 186 | unsigned long in_table_num_entries; | ||
| 187 | |||
| 188 | /* the physical address of the first output DMA table */ | ||
| 189 | unsigned long out_table_address; | ||
| 190 | |||
| 191 | /* number of entries in the first output DMA table */ | ||
| 192 | unsigned long out_table_num_entries; | ||
| 193 | |||
| 194 | /* data in the first input table */ | ||
| 195 | unsigned long table_data_size; | ||
| 196 | |||
| 197 | /* distinct user/kernel layout */ | ||
| 198 | bool isKernelVirtualAddress; | ||
| 199 | |||
| 200 | }; | ||
| 201 | |||
| 202 | /* | ||
| 203 | */ | ||
| 204 | struct sep_driver_build_flow_table_t { | ||
| 205 | /* flow type */ | ||
| 206 | unsigned long flow_type; | ||
| 207 | |||
| 208 | /* flag for input output */ | ||
| 209 | unsigned long input_output_flag; | ||
| 210 | |||
| 211 | /* address value of the data in */ | ||
| 212 | unsigned long virt_buff_data_addr; | ||
| 213 | |||
| 214 | /* size of data in */ | ||
| 215 | unsigned long num_virtual_buffers; | ||
| 216 | |||
| 217 | /* the physical address of the first input DMA table */ | ||
| 218 | unsigned long first_table_addr; | ||
| 219 | |||
| 220 | /* number of entries in the first input DMA table */ | ||
| 221 | unsigned long first_table_num_entries; | ||
| 222 | |||
| 223 | /* data in the first input table */ | ||
| 224 | unsigned long first_table_data_size; | ||
| 225 | |||
| 226 | /* distinct user/kernel layout */ | ||
| 227 | bool isKernelVirtualAddress; | ||
| 228 | }; | ||
| 229 | |||
| 230 | |||
| 231 | struct sep_driver_add_flow_table_t { | ||
| 232 | /* flow id */ | ||
| 233 | unsigned long flow_id; | ||
| 234 | |||
| 235 | /* flag for input output */ | ||
| 236 | unsigned long inputOutputFlag; | ||
| 237 | |||
| 238 | /* address value of the data in */ | ||
| 239 | unsigned long virt_buff_data_addr; | ||
| 240 | |||
| 241 | /* size of data in */ | ||
| 242 | unsigned long num_virtual_buffers; | ||
| 243 | |||
| 244 | /* address of the first table */ | ||
| 245 | unsigned long first_table_addr; | ||
| 246 | |||
| 247 | /* number of entries in the first table */ | ||
| 248 | unsigned long first_table_num_entries; | ||
| 249 | |||
| 250 | /* data size of the first table */ | ||
| 251 | unsigned long first_table_data_size; | ||
| 252 | |||
| 253 | /* distinct user/kernel layout */ | ||
| 254 | bool isKernelVirtualAddress; | ||
| 255 | |||
| 256 | }; | ||
| 257 | |||
| 258 | /* | ||
| 259 | command struct for set flow id | ||
| 260 | */ | ||
| 261 | struct sep_driver_set_flow_id_t { | ||
| 262 | /* flow id to set */ | ||
| 263 | unsigned long flow_id; | ||
| 264 | }; | ||
| 265 | |||
| 266 | |||
| 267 | /* command struct for add tables message */ | ||
| 268 | struct sep_driver_add_message_t { | ||
| 269 | /* flow id to set */ | ||
| 270 | unsigned long flow_id; | ||
| 271 | |||
| 272 | /* message size in bytes */ | ||
| 273 | unsigned long message_size_in_bytes; | ||
| 274 | |||
| 275 | /* address of the message */ | ||
| 276 | unsigned long message_address; | ||
| 277 | }; | ||
| 278 | |||
| 279 | /* command struct for static pool addresses */ | ||
| 280 | struct sep_driver_static_pool_addr_t { | ||
| 281 | /* physical address of the static pool */ | ||
| 282 | unsigned long physical_static_address; | ||
| 283 | |||
| 284 | /* virtual address of the static pool */ | ||
| 285 | unsigned long virtual_static_address; | ||
| 286 | }; | ||
| 287 | |||
| 288 | /* command struct for getiing offset of the physical address from | ||
| 289 | the start of the mapped area */ | ||
| 290 | struct sep_driver_get_mapped_offset_t { | ||
| 291 | /* physical address of the static pool */ | ||
| 292 | unsigned long physical_address; | ||
| 293 | |||
| 294 | /* virtual address of the static pool */ | ||
| 295 | unsigned long offset; | ||
| 296 | }; | ||
| 297 | |||
| 298 | /* command struct for getting time value and address */ | ||
| 299 | struct sep_driver_get_time_t { | ||
| 300 | /* physical address of stored time */ | ||
| 301 | unsigned long time_physical_address; | ||
| 302 | |||
| 303 | /* value of the stored time */ | ||
| 304 | unsigned long time_value; | ||
| 305 | }; | ||
| 306 | |||
| 307 | |||
| 308 | /* | ||
| 309 | structure that represent one entry in the DMA LLI table | ||
| 310 | */ | ||
| 311 | struct sep_lli_entry_t { | ||
| 312 | /* physical address */ | ||
| 313 | unsigned long physical_address; | ||
| 314 | |||
| 315 | /* block size */ | ||
| 316 | unsigned long block_size; | ||
| 317 | }; | ||
| 318 | |||
| 319 | /* | ||
| 320 | structure that reperesents data needed for lli table construction | ||
| 321 | */ | ||
| 322 | struct sep_lli_prepare_table_data_t { | ||
| 323 | /* pointer to the memory where the first lli entry to be built */ | ||
| 324 | struct sep_lli_entry_t *lli_entry_ptr; | ||
| 325 | |||
| 326 | /* pointer to the array of lli entries from which the table is to be built */ | ||
| 327 | struct sep_lli_entry_t *lli_array_ptr; | ||
| 328 | |||
| 329 | /* number of elements in lli array */ | ||
| 330 | int lli_array_size; | ||
| 331 | |||
| 332 | /* number of entries in the created table */ | ||
| 333 | int num_table_entries; | ||
| 334 | |||
| 335 | /* number of array entries processed during table creation */ | ||
| 336 | int num_array_entries_processed; | ||
| 337 | |||
| 338 | /* the totatl data size in the created table */ | ||
| 339 | int lli_table_total_data_size; | ||
| 340 | }; | ||
| 341 | |||
| 342 | /* | ||
| 343 | structure that represent tone table - it is not used in code, jkust | ||
| 344 | to show what table looks like | ||
| 345 | */ | ||
| 346 | struct sep_lli_table_t { | ||
| 347 | /* number of pages mapped in this tables. If 0 - means that the table | ||
| 348 | is not defined (used as a valid flag) */ | ||
| 349 | unsigned long num_pages; | ||
| 350 | /* | ||
| 351 | pointer to array of page pointers that represent the mapping of the | ||
| 352 | virtual buffer defined by the table to the physical memory. If this | ||
| 353 | pointer is NULL, it means that the table is not defined | ||
| 354 | (used as a valid flag) | ||
| 355 | */ | ||
| 356 | struct page **table_page_array_ptr; | ||
| 357 | |||
| 358 | /* maximum flow entries in table */ | ||
| 359 | struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE]; | ||
| 360 | }; | ||
| 361 | |||
| 362 | |||
| 363 | /* | ||
| 364 | structure for keeping the mapping of the virtual buffer into physical pages | ||
| 365 | */ | ||
| 366 | struct sep_flow_buffer_data { | ||
| 367 | /* pointer to the array of page structs pointers to the pages of the | ||
| 368 | virtual buffer */ | ||
| 369 | struct page **page_array_ptr; | ||
| 370 | |||
| 371 | /* number of pages taken by the virtual buffer */ | ||
| 372 | unsigned long num_pages; | ||
| 373 | |||
| 374 | /* this flag signals if this page_array is the last one among many that were | ||
| 375 | sent in one setting to SEP */ | ||
| 376 | unsigned long last_page_array_flag; | ||
| 377 | }; | ||
| 378 | |||
| 379 | /* | ||
| 380 | struct that keeps all the data for one flow | ||
| 381 | */ | ||
| 382 | struct sep_flow_context_t { | ||
| 383 | /* | ||
| 384 | work struct for handling the flow done interrupt in the workqueue | ||
| 385 | this structure must be in the first place, since it will be used | ||
| 386 | forcasting to the containing flow context | ||
| 387 | */ | ||
| 388 | struct work_struct flow_wq; | ||
| 389 | |||
| 390 | /* flow id */ | ||
| 391 | unsigned long flow_id; | ||
| 392 | |||
| 393 | /* additional input tables exists */ | ||
| 394 | unsigned long input_tables_flag; | ||
| 395 | |||
| 396 | /* additional output tables exists */ | ||
| 397 | unsigned long output_tables_flag; | ||
| 398 | |||
| 399 | /* data of the first input file */ | ||
| 400 | struct sep_lli_entry_t first_input_table; | ||
| 401 | |||
| 402 | /* data of the first output table */ | ||
| 403 | struct sep_lli_entry_t first_output_table; | ||
| 404 | |||
| 405 | /* last input table data */ | ||
| 406 | struct sep_lli_entry_t last_input_table; | ||
| 407 | |||
| 408 | /* last output table data */ | ||
| 409 | struct sep_lli_entry_t last_output_table; | ||
| 410 | |||
| 411 | /* first list of table */ | ||
| 412 | struct sep_lli_entry_t input_tables_in_process; | ||
| 413 | |||
| 414 | /* output table in process (in sep) */ | ||
| 415 | struct sep_lli_entry_t output_tables_in_process; | ||
| 416 | |||
| 417 | /* size of messages in bytes */ | ||
| 418 | unsigned long message_size_in_bytes; | ||
| 419 | |||
| 420 | /* message */ | ||
| 421 | unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES]; | ||
| 422 | }; | ||
| 423 | |||
| 424 | |||
| 425 | #endif | ||
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h deleted file mode 100644 index 6008fe5eca09..000000000000 --- a/drivers/staging/sep/sep_driver_config.h +++ /dev/null | |||
| @@ -1,225 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * sep_driver_config.h - Security Processor Driver configuration | ||
| 4 | * | ||
| 5 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 6 | * Copyright(c) 2009 Discretix. All rights reserved. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * CONTACTS: | ||
| 23 | * | ||
| 24 | * Mark Allyn mark.a.allyn@intel.com | ||
| 25 | * | ||
| 26 | * CHANGES: | ||
| 27 | * | ||
| 28 | * 2009.06.26 Initial publish | ||
| 29 | * | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef __SEP_DRIVER_CONFIG_H__ | ||
| 33 | #define __SEP_DRIVER_CONFIG_H__ | ||
| 34 | |||
| 35 | |||
| 36 | /*-------------------------------------- | ||
| 37 | DRIVER CONFIGURATION FLAGS | ||
| 38 | -------------------------------------*/ | ||
| 39 | |||
| 40 | /* if flag is on , then the driver is running in polling and | ||
| 41 | not interrupt mode */ | ||
| 42 | #define SEP_DRIVER_POLLING_MODE 1 | ||
| 43 | |||
| 44 | /* flag which defines if the shared area address should be | ||
| 45 | reconfiged (send to SEP anew) during init of the driver */ | ||
| 46 | #define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0 | ||
| 47 | |||
| 48 | /* the mode for running on the ARM1172 Evaluation platform (flag is 1) */ | ||
| 49 | #define SEP_DRIVER_ARM_DEBUG_MODE 0 | ||
| 50 | |||
| 51 | /*------------------------------------------- | ||
| 52 | INTERNAL DATA CONFIGURATION | ||
| 53 | -------------------------------------------*/ | ||
| 54 | |||
| 55 | /* flag for the input array */ | ||
| 56 | #define SEP_DRIVER_IN_FLAG 0 | ||
| 57 | |||
| 58 | /* flag for output array */ | ||
| 59 | #define SEP_DRIVER_OUT_FLAG 1 | ||
| 60 | |||
| 61 | /* maximum number of entries in one LLI tables */ | ||
| 62 | #define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8 | ||
| 63 | |||
| 64 | |||
| 65 | /*-------------------------------------------------------- | ||
| 66 | SHARED AREA memory total size is 36K | ||
| 67 | it is divided is following: | ||
| 68 | |||
| 69 | SHARED_MESSAGE_AREA 8K } | ||
| 70 | } | ||
| 71 | STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K) | ||
| 72 | } | ||
| 73 | DATA_POOL_AREA 12K } | ||
| 74 | |||
| 75 | SYNCHRONIC_DMA_TABLES_AREA 5K | ||
| 76 | |||
| 77 | FLOW_DMA_TABLES_AREA 4K | ||
| 78 | |||
| 79 | SYSTEM_MEMORY_AREA 3k | ||
| 80 | |||
| 81 | SYSTEM_MEMORY total size is 3k | ||
| 82 | it is divided as following: | ||
| 83 | |||
| 84 | TIME_MEMORY_AREA 8B | ||
| 85 | -----------------------------------------------------------*/ | ||
| 86 | |||
| 87 | |||
| 88 | |||
| 89 | /* | ||
| 90 | the maximum length of the message - the rest of the message shared | ||
| 91 | area will be dedicated to the dma lli tables | ||
| 92 | */ | ||
| 93 | #define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024) | ||
| 94 | |||
| 95 | /* the size of the message shared area in pages */ | ||
| 96 | #define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024) | ||
| 97 | |||
| 98 | /* the size of the data pool static area in pages */ | ||
| 99 | #define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024) | ||
| 100 | |||
| 101 | /* the size of the data pool shared area size in pages */ | ||
| 102 | #define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024) | ||
| 103 | |||
| 104 | /* the size of the message shared area in pages */ | ||
| 105 | #define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5) | ||
| 106 | |||
| 107 | |||
| 108 | /* the size of the data pool shared area size in pages */ | ||
| 109 | #define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4) | ||
| 110 | |||
| 111 | /* system data (time, caller id etc') pool */ | ||
| 112 | #define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100 | ||
| 113 | |||
| 114 | |||
| 115 | /* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and | ||
| 116 | DATA POOL areas. area must be module 4k */ | ||
| 117 | #define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24) | ||
| 118 | |||
| 119 | |||
| 120 | /*----------------------------------------------- | ||
| 121 | offsets of the areas starting from the shared area start address | ||
| 122 | */ | ||
| 123 | |||
| 124 | /* message area offset */ | ||
| 125 | #define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0 | ||
| 126 | |||
| 127 | /* static pool area offset */ | ||
| 128 | #define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \ | ||
| 129 | (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES) | ||
| 130 | |||
| 131 | /* data pool area offset */ | ||
| 132 | #define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \ | ||
| 133 | (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \ | ||
| 134 | SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES) | ||
| 135 | |||
| 136 | /* synhronic dma tables area offset */ | ||
| 137 | #define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \ | ||
| 138 | (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \ | ||
| 139 | SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) | ||
| 140 | |||
| 141 | /* sep driver flow dma tables area offset */ | ||
| 142 | #define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \ | ||
| 143 | (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \ | ||
| 144 | SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES) | ||
| 145 | |||
| 146 | /* system memory offset in bytes */ | ||
| 147 | #define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \ | ||
| 148 | (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \ | ||
| 149 | SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES) | ||
| 150 | |||
| 151 | /* offset of the time area */ | ||
| 152 | #define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \ | ||
| 153 | (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES) | ||
| 154 | |||
| 155 | |||
| 156 | |||
| 157 | /* start physical address of the SEP registers memory in HOST */ | ||
| 158 | #define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000 | ||
| 159 | |||
| 160 | /* size of the SEP registers memory region in HOST (for now 100 registers) */ | ||
| 161 | #define SEP_IO_MEM_REGION_SIZE (2 * 0x100000) | ||
| 162 | |||
| 163 | /* define the number of IRQ for SEP interrupts */ | ||
| 164 | #define SEP_DIRVER_IRQ_NUM 1 | ||
| 165 | |||
| 166 | /* maximum number of add buffers */ | ||
| 167 | #define SEP_MAX_NUM_ADD_BUFFERS 100 | ||
| 168 | |||
| 169 | /* number of flows */ | ||
| 170 | #define SEP_DRIVER_NUM_FLOWS 4 | ||
| 171 | |||
| 172 | /* maximum number of entries in flow table */ | ||
| 173 | #define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25 | ||
| 174 | |||
| 175 | /* offset of the num entries in the block length entry of the LLI */ | ||
| 176 | #define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24 | ||
| 177 | |||
| 178 | /* offset of the interrupt flag in the block length entry of the LLI */ | ||
| 179 | #define SEP_INT_FLAG_OFFSET_IN_BITS 31 | ||
| 180 | |||
| 181 | /* mask for extracting data size from LLI */ | ||
| 182 | #define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF | ||
| 183 | |||
| 184 | /* mask for entries after being shifted left */ | ||
| 185 | #define SEP_NUM_ENTRIES_MASK 0x7F | ||
| 186 | |||
| 187 | /* default flow id */ | ||
| 188 | #define SEP_FREE_FLOW_ID 0xFFFFFFFF | ||
| 189 | |||
| 190 | /* temp flow id used during cretiong of new flow until receiving | ||
| 191 | real flow id from sep */ | ||
| 192 | #define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1) | ||
| 193 | |||
| 194 | /* maximum add buffers message length in bytes */ | ||
| 195 | #define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4) | ||
| 196 | |||
| 197 | /* maximum number of concurrent virtual buffers */ | ||
| 198 | #define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100 | ||
| 199 | |||
| 200 | /* the token that defines the start of time address */ | ||
| 201 | #define SEP_TIME_VAL_TOKEN 0x12345678 | ||
| 202 | |||
| 203 | /* DEBUG LEVEL MASKS */ | ||
| 204 | #define SEP_DEBUG_LEVEL_BASIC 0x1 | ||
| 205 | |||
| 206 | #define SEP_DEBUG_LEVEL_EXTENDED 0x4 | ||
| 207 | |||
| 208 | |||
| 209 | /* Debug helpers */ | ||
| 210 | |||
| 211 | #define dbg(fmt, args...) \ | ||
| 212 | do {\ | ||
| 213 | if (debug & SEP_DEBUG_LEVEL_BASIC) \ | ||
| 214 | printk(KERN_DEBUG fmt, ##args); \ | ||
| 215 | } while(0); | ||
| 216 | |||
| 217 | #define edbg(fmt, args...) \ | ||
| 218 | do { \ | ||
| 219 | if (debug & SEP_DEBUG_LEVEL_EXTENDED) \ | ||
| 220 | printk(KERN_DEBUG fmt, ##args); \ | ||
| 221 | } while(0); | ||
| 222 | |||
| 223 | |||
| 224 | |||
| 225 | #endif | ||
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h deleted file mode 100644 index ea6abd8a14b4..000000000000 --- a/drivers/staging/sep/sep_driver_hw_defs.h +++ /dev/null | |||
| @@ -1,232 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * sep_driver_hw_defs.h - Security Processor Driver hardware definitions | ||
| 4 | * | ||
| 5 | * Copyright(c) 2009 Intel Corporation. All rights reserved. | ||
| 6 | * Copyright(c) 2009 Discretix. All rights reserved. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * CONTACTS: | ||
| 23 | * | ||
| 24 | * Mark Allyn mark.a.allyn@intel.com | ||
| 25 | * | ||
| 26 | * CHANGES: | ||
| 27 | * | ||
| 28 | * 2009.06.26 Initial publish | ||
| 29 | * | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef SEP_DRIVER_HW_DEFS__H | ||
| 33 | #define SEP_DRIVER_HW_DEFS__H | ||
| 34 | |||
| 35 | /*--------------------------------------------------------------------------*/ | ||
| 36 | /* Abstract: HW Registers Defines. */ | ||
| 37 | /* */ | ||
| 38 | /* Note: This file was automatically created !!! */ | ||
| 39 | /* DO NOT EDIT THIS FILE !!! */ | ||
| 40 | /*--------------------------------------------------------------------------*/ | ||
| 41 | |||
| 42 | |||
| 43 | /* cf registers */ | ||
| 44 | #define HW_R0B_ADDR_0_REG_ADDR 0x0000UL | ||
| 45 | #define HW_R0B_ADDR_1_REG_ADDR 0x0004UL | ||
| 46 | #define HW_R0B_ADDR_2_REG_ADDR 0x0008UL | ||
| 47 | #define HW_R0B_ADDR_3_REG_ADDR 0x000cUL | ||
| 48 | #define HW_R0B_ADDR_4_REG_ADDR 0x0010UL | ||
| 49 | #define HW_R0B_ADDR_5_REG_ADDR 0x0014UL | ||
| 50 | #define HW_R0B_ADDR_6_REG_ADDR 0x0018UL | ||
| 51 | #define HW_R0B_ADDR_7_REG_ADDR 0x001cUL | ||
| 52 | #define HW_R0B_ADDR_8_REG_ADDR 0x0020UL | ||
| 53 | #define HW_R2B_ADDR_0_REG_ADDR 0x0080UL | ||
| 54 | #define HW_R2B_ADDR_1_REG_ADDR 0x0084UL | ||
| 55 | #define HW_R2B_ADDR_2_REG_ADDR 0x0088UL | ||
| 56 | #define HW_R2B_ADDR_3_REG_ADDR 0x008cUL | ||
| 57 | #define HW_R2B_ADDR_4_REG_ADDR 0x0090UL | ||
| 58 | #define HW_R2B_ADDR_5_REG_ADDR 0x0094UL | ||
| 59 | #define HW_R2B_ADDR_6_REG_ADDR 0x0098UL | ||
| 60 | #define HW_R2B_ADDR_7_REG_ADDR 0x009cUL | ||
| 61 | #define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL | ||
| 62 | #define HW_R3B_REG_ADDR 0x00C0UL | ||
| 63 | #define HW_R4B_REG_ADDR 0x0100UL | ||
| 64 | #define HW_CSA_ADDR_0_REG_ADDR 0x0140UL | ||
| 65 | #define HW_CSA_ADDR_1_REG_ADDR 0x0144UL | ||
| 66 | #define HW_CSA_ADDR_2_REG_ADDR 0x0148UL | ||
| 67 | #define HW_CSA_ADDR_3_REG_ADDR 0x014cUL | ||
| 68 | #define HW_CSA_ADDR_4_REG_ADDR 0x0150UL | ||
| 69 | #define HW_CSA_ADDR_5_REG_ADDR 0x0154UL | ||
| 70 | #define HW_CSA_ADDR_6_REG_ADDR 0x0158UL | ||
| 71 | #define HW_CSA_ADDR_7_REG_ADDR 0x015cUL | ||
| 72 | #define HW_CSA_ADDR_8_REG_ADDR 0x0160UL | ||
| 73 | #define HW_CSA_REG_ADDR 0x0140UL | ||
| 74 | #define HW_SINB_REG_ADDR 0x0180UL | ||
| 75 | #define HW_SOUTB_REG_ADDR 0x0184UL | ||
| 76 | #define HW_PKI_CONTROL_REG_ADDR 0x01C0UL | ||
| 77 | #define HW_PKI_STATUS_REG_ADDR 0x01C4UL | ||
| 78 | #define HW_PKI_BUSY_REG_ADDR 0x01C8UL | ||
| 79 | #define HW_PKI_A_1025_REG_ADDR 0x01CCUL | ||
| 80 | #define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL | ||
| 81 | #define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL | ||
| 82 | #define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL | ||
| 83 | #define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL | ||
| 84 | #define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL | ||
| 85 | #define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL | ||
| 86 | #define HW_PKI_CLR_REG_ADDR 0x01E8UL | ||
| 87 | #define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL | ||
| 88 | #define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL | ||
| 89 | #define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL | ||
| 90 | #define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL | ||
| 91 | #define HW_DES_KEY_0_REG_ADDR 0x0208UL | ||
| 92 | #define HW_DES_KEY_1_REG_ADDR 0x020CUL | ||
| 93 | #define HW_DES_KEY_2_REG_ADDR 0x0210UL | ||
| 94 | #define HW_DES_KEY_3_REG_ADDR 0x0214UL | ||
| 95 | #define HW_DES_KEY_4_REG_ADDR 0x0218UL | ||
| 96 | #define HW_DES_KEY_5_REG_ADDR 0x021CUL | ||
| 97 | #define HW_DES_CONTROL_0_REG_ADDR 0x0220UL | ||
| 98 | #define HW_DES_CONTROL_1_REG_ADDR 0x0224UL | ||
| 99 | #define HW_DES_IV_0_REG_ADDR 0x0228UL | ||
| 100 | #define HW_DES_IV_1_REG_ADDR 0x022CUL | ||
| 101 | #define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL | ||
| 102 | #define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL | ||
| 103 | #define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL | ||
| 104 | #define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL | ||
| 105 | #define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL | ||
| 106 | #define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL | ||
| 107 | #define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL | ||
| 108 | #define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL | ||
| 109 | #define HW_AES_KEY_0_REG_ADDR 0x0400UL | ||
| 110 | #define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL | ||
| 111 | #define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL | ||
| 112 | #define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL | ||
| 113 | #define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL | ||
| 114 | #define HW_AES_IV_0_REG_ADDR 0x0440UL | ||
| 115 | #define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL | ||
| 116 | #define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL | ||
| 117 | #define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL | ||
| 118 | #define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL | ||
| 119 | #define HW_AES_CTR1_REG_ADDR 0x0460UL | ||
| 120 | #define HW_AES_SK_REG_ADDR 0x0478UL | ||
| 121 | #define HW_AES_MAC_OK_REG_ADDR 0x0480UL | ||
| 122 | #define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL | ||
| 123 | #define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL | ||
| 124 | #define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL | ||
| 125 | #define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL | ||
| 126 | #define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL | ||
| 127 | #define HW_AES_CONTROL_REG_ADDR 0x04C0UL | ||
| 128 | #define HW_HASH_H0_REG_ADDR 0x0640UL | ||
| 129 | #define HW_HASH_H1_REG_ADDR 0x0644UL | ||
| 130 | #define HW_HASH_H2_REG_ADDR 0x0648UL | ||
| 131 | #define HW_HASH_H3_REG_ADDR 0x064CUL | ||
| 132 | #define HW_HASH_H4_REG_ADDR 0x0650UL | ||
| 133 | #define HW_HASH_H5_REG_ADDR 0x0654UL | ||
| 134 | #define HW_HASH_H6_REG_ADDR 0x0658UL | ||
| 135 | #define HW_HASH_H7_REG_ADDR 0x065CUL | ||
| 136 | #define HW_HASH_H8_REG_ADDR 0x0660UL | ||
| 137 | #define HW_HASH_H9_REG_ADDR 0x0664UL | ||
| 138 | #define HW_HASH_H10_REG_ADDR 0x0668UL | ||
| 139 | #define HW_HASH_H11_REG_ADDR 0x066CUL | ||
| 140 | #define HW_HASH_H12_REG_ADDR 0x0670UL | ||
| 141 | #define HW_HASH_H13_REG_ADDR 0x0674UL | ||
| 142 | #define HW_HASH_H14_REG_ADDR 0x0678UL | ||
| 143 | #define HW_HASH_H15_REG_ADDR 0x067CUL | ||
| 144 | #define HW_HASH_CONTROL_REG_ADDR 0x07C0UL | ||
| 145 | #define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL | ||
| 146 | #define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL | ||
| 147 | #define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL | ||
| 148 | #define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL | ||
| 149 | #define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL | ||
| 150 | #define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL | ||
| 151 | #define HW_HASH_PARAM_REG_ADDR 0x07DCUL | ||
| 152 | #define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL | ||
| 153 | #define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL | ||
| 154 | #define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL | ||
| 155 | #define HW_HASH_DATA_REG_ADDR 0x07ECUL | ||
| 156 | #define HW_DRNG_CONTROL_REG_ADDR 0x0800UL | ||
| 157 | #define HW_DRNG_VALID_REG_ADDR 0x0804UL | ||
| 158 | #define HW_DRNG_DATA_REG_ADDR 0x0808UL | ||
| 159 | #define HW_RND_SRC_EN_REG_ADDR 0x080CUL | ||
| 160 | #define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL | ||
| 161 | #define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL | ||
| 162 | #define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL | ||
| 163 | #define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL | ||
| 164 | #define HW_CLK_STATUS_REG_ADDR 0x0824UL | ||
| 165 | #define HW_CLK_ENABLE_REG_ADDR 0x0828UL | ||
| 166 | #define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL | ||
| 167 | #define HW_RND_SRC_CTL_REG_ADDR 0x0858UL | ||
| 168 | #define HW_CRYPTO_CTL_REG_ADDR 0x0900UL | ||
| 169 | #define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL | ||
| 170 | #define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL | ||
| 171 | #define HW_AES_BUSY_REG_ADDR 0x0914UL | ||
| 172 | #define HW_DES_BUSY_REG_ADDR 0x0918UL | ||
| 173 | #define HW_HASH_BUSY_REG_ADDR 0x091CUL | ||
| 174 | #define HW_CONTENT_REG_ADDR 0x0924UL | ||
| 175 | #define HW_VERSION_REG_ADDR 0x0928UL | ||
| 176 | #define HW_CONTEXT_ID_REG_ADDR 0x0930UL | ||
| 177 | #define HW_DIN_BUFFER_REG_ADDR 0x0C00UL | ||
| 178 | #define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL | ||
| 179 | #define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL | ||
| 180 | #define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL | ||
| 181 | #define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL | ||
| 182 | #define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL | ||
| 183 | #define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL | ||
| 184 | #define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL | ||
| 185 | #define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL | ||
| 186 | #define HW_OLD_DATA_REG_ADDR 0x0C48UL | ||
| 187 | #define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL | ||
| 188 | #define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL | ||
| 189 | #define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL | ||
| 190 | #define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL | ||
| 191 | #define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL | ||
| 192 | #define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL | ||
| 193 | #define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL | ||
| 194 | #define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL | ||
| 195 | #define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL | ||
| 196 | #define HW_READ_ALIGN_REG_ADDR 0x0D3CUL | ||
| 197 | #define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL | ||
| 198 | #define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL | ||
| 199 | #define HW_AHB_SINGLE_REG_ADDR 0x0E00UL | ||
| 200 | #define HW_SRAM_DATA_REG_ADDR 0x0F00UL | ||
| 201 | #define HW_SRAM_ADDR_REG_ADDR 0x0F04UL | ||
| 202 | #define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL | ||
| 203 | #define HW_HOST_IRR_REG_ADDR 0x0A00UL | ||
| 204 | #define HW_HOST_IMR_REG_ADDR 0x0A04UL | ||
| 205 | #define HW_HOST_ICR_REG_ADDR 0x0A08UL | ||
| 206 | #define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL | ||
| 207 | #define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL | ||
| 208 | #define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL | ||
| 209 | #define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL | ||
| 210 | #define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL | ||
| 211 | #define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL | ||
| 212 | #define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL | ||
| 213 | #define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL | ||
| 214 | #define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL | ||
| 215 | #define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL | ||
| 216 | #define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL | ||
| 217 | #define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL | ||
| 218 | #define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL | ||
| 219 | #define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL | ||
| 220 | #define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL | ||
| 221 | #define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL | ||
| 222 | #define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL | ||
| 223 | #define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL | ||
| 224 | #define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL | ||
| 225 | #define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL | ||
| 226 | #define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL | ||
| 227 | #define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL | ||
| 228 | #define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL | ||
| 229 | #define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL | ||
| 230 | #define HW_CC_SRAM_BASE_ADDRESS 0x5800UL | ||
| 231 | |||
| 232 | #endif /* ifndef HW_DEFS */ | ||
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index d0c5c97eda3e..44a7fbe7eccd 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/kthread.h> | 27 | #include <linux/kthread.h> |
| 28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/smp_lock.h> | ||
| 30 | 31 | ||
| 31 | /**** Helper functions used for Div, Remainder operation on u64 ****/ | 32 | /**** Helper functions used for Div, Remainder operation on u64 ****/ |
| 32 | 33 | ||
| @@ -113,7 +114,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type) | |||
| 113 | 114 | ||
| 114 | #define GLOB_SBD_NAME "nd" | 115 | #define GLOB_SBD_NAME "nd" |
| 115 | #define GLOB_SBD_IRQ_NUM (29) | 116 | #define GLOB_SBD_IRQ_NUM (29) |
| 116 | #define GLOB_VERSION "driver version 20091110" | ||
| 117 | 117 | ||
| 118 | #define GLOB_SBD_IOCTL_GC (0x7701) | 118 | #define GLOB_SBD_IOCTL_GC (0x7701) |
| 119 | #define GLOB_SBD_IOCTL_WL (0x7702) | 119 | #define GLOB_SBD_IOCTL_WL (0x7702) |
| @@ -272,13 +272,6 @@ static int get_res_blk_num_os(void) | |||
| 272 | return res_blks; | 272 | return res_blks; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | static void SBD_prepare_flush(struct request_queue *q, struct request *rq) | ||
| 276 | { | ||
| 277 | rq->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
| 278 | /* rq->timeout = 5 * HZ; */ | ||
| 279 | rq->cmd[0] = REQ_LB_OP_FLUSH; | ||
| 280 | } | ||
| 281 | |||
| 282 | /* Transfer a full request. */ | 275 | /* Transfer a full request. */ |
| 283 | static int do_transfer(struct spectra_nand_dev *tr, struct request *req) | 276 | static int do_transfer(struct spectra_nand_dev *tr, struct request *req) |
| 284 | { | 277 | { |
| @@ -296,8 +289,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req) | |||
| 296 | IdentifyDeviceData.PagesPerBlock * | 289 | IdentifyDeviceData.PagesPerBlock * |
| 297 | res_blks_os; | 290 | res_blks_os; |
| 298 | 291 | ||
| 299 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | 292 | if (req->cmd_type & REQ_FLUSH) { |
| 300 | req->cmd[0] == REQ_LB_OP_FLUSH) { | ||
| 301 | if (force_flush_cache()) /* Fail to flush cache */ | 293 | if (force_flush_cache()) /* Fail to flush cache */ |
| 302 | return -EIO; | 294 | return -EIO; |
| 303 | else | 295 | else |
| @@ -597,11 +589,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 597 | return -ENOTTY; | 589 | return -ENOTTY; |
| 598 | } | 590 | } |
| 599 | 591 | ||
| 592 | int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode, | ||
| 593 | unsigned int cmd, unsigned long arg) | ||
| 594 | { | ||
| 595 | int ret; | ||
| 596 | |||
| 597 | lock_kernel(); | ||
| 598 | ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg); | ||
| 599 | unlock_kernel(); | ||
| 600 | |||
| 601 | return ret; | ||
| 602 | } | ||
| 603 | |||
| 600 | static struct block_device_operations GLOB_SBD_ops = { | 604 | static struct block_device_operations GLOB_SBD_ops = { |
| 601 | .owner = THIS_MODULE, | 605 | .owner = THIS_MODULE, |
| 602 | .open = GLOB_SBD_open, | 606 | .open = GLOB_SBD_open, |
| 603 | .release = GLOB_SBD_release, | 607 | .release = GLOB_SBD_release, |
| 604 | .locked_ioctl = GLOB_SBD_ioctl, | 608 | .ioctl = GLOB_SBD_unlocked_ioctl, |
| 605 | .getgeo = GLOB_SBD_getgeo, | 609 | .getgeo = GLOB_SBD_getgeo, |
| 606 | }; | 610 | }; |
| 607 | 611 | ||
| @@ -650,8 +654,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) | |||
| 650 | /* Here we force report 512 byte hardware sector size to Kernel */ | 654 | /* Here we force report 512 byte hardware sector size to Kernel */ |
| 651 | blk_queue_logical_block_size(dev->queue, 512); | 655 | blk_queue_logical_block_size(dev->queue, 512); |
| 652 | 656 | ||
| 653 | blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH, | 657 | blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH); |
| 654 | SBD_prepare_flush); | ||
| 655 | 658 | ||
| 656 | dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); | 659 | dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); |
| 657 | if (IS_ERR(dev->thread)) { | 660 | if (IS_ERR(dev->thread)) { |
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c index 134aa5166a8d..9b5218b6ada8 100644 --- a/drivers/staging/spectra/flash.c +++ b/drivers/staging/spectra/flash.c | |||
| @@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr, | |||
| 61 | static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, | 61 | static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, |
| 62 | u8 cache_blk, u16 flag); | 62 | u8 cache_blk, u16 flag); |
| 63 | static int FTL_Cache_Write(void); | 63 | static int FTL_Cache_Write(void); |
| 64 | static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr); | ||
| 65 | static void FTL_Calculate_LRU(void); | 64 | static void FTL_Calculate_LRU(void); |
| 66 | static u32 FTL_Get_Block_Index(u32 wBlockNum); | 65 | static u32 FTL_Get_Block_Index(u32 wBlockNum); |
| 67 | 66 | ||
| @@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void); | |||
| 86 | static int FTL_Replace_Block(u64 blk_addr); | 85 | static int FTL_Replace_Block(u64 blk_addr); |
| 87 | static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); | 86 | static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); |
| 88 | 87 | ||
| 89 | static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr); | ||
| 90 | |||
| 91 | struct device_info_tag DeviceInfo; | 88 | struct device_info_tag DeviceInfo; |
| 92 | struct flash_cache_tag Cache; | 89 | struct flash_cache_tag Cache; |
| 93 | static struct spectra_l2_cache_info cache_l2; | 90 | static struct spectra_l2_cache_info cache_l2; |
| @@ -775,7 +772,7 @@ static void dump_cache_l2_table(void) | |||
| 775 | { | 772 | { |
| 776 | struct list_head *p; | 773 | struct list_head *p; |
| 777 | struct spectra_l2_cache_list *pnd; | 774 | struct spectra_l2_cache_list *pnd; |
| 778 | int n, i; | 775 | int n; |
| 779 | 776 | ||
| 780 | n = 0; | 777 | n = 0; |
| 781 | list_for_each(p, &cache_l2.table.list) { | 778 | list_for_each(p, &cache_l2.table.list) { |
| @@ -1538,79 +1535,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr) | |||
| 1538 | } | 1535 | } |
| 1539 | 1536 | ||
| 1540 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | 1537 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& |
| 1541 | * Function: FTL_Cache_Update_Block | ||
| 1542 | * Inputs: pointer to buffer,page address,block address | ||
| 1543 | * Outputs: PASS=0 / FAIL=1 | ||
| 1544 | * Description: It updates the cache | ||
| 1545 | *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
| 1546 | static int FTL_Cache_Update_Block(u8 *pData, | ||
| 1547 | u64 old_page_addr, u64 blk_addr) | ||
| 1548 | { | ||
| 1549 | int i, j; | ||
| 1550 | u8 *buf = pData; | ||
| 1551 | int wResult = PASS; | ||
| 1552 | int wFoundInCache; | ||
| 1553 | u64 page_addr; | ||
| 1554 | u64 addr; | ||
| 1555 | u64 old_blk_addr; | ||
| 1556 | u16 page_offset; | ||
| 1557 | |||
| 1558 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
| 1559 | __FILE__, __LINE__, __func__); | ||
| 1560 | |||
| 1561 | old_blk_addr = (u64)(old_page_addr >> | ||
| 1562 | DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize; | ||
| 1563 | page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >> | ||
| 1564 | DeviceInfo.nBitsInPageDataSize); | ||
| 1565 | |||
| 1566 | for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) { | ||
| 1567 | page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize; | ||
| 1568 | if (i != page_offset) { | ||
| 1569 | wFoundInCache = FAIL; | ||
| 1570 | for (j = 0; j < CACHE_ITEM_NUM; j++) { | ||
| 1571 | addr = Cache.array[j].address; | ||
| 1572 | addr = FTL_Get_Physical_Block_Addr(addr) + | ||
| 1573 | GLOB_u64_Remainder(addr, 2); | ||
| 1574 | if ((addr >= page_addr) && addr < | ||
| 1575 | (page_addr + Cache.cache_item_size)) { | ||
| 1576 | wFoundInCache = PASS; | ||
| 1577 | buf = Cache.array[j].buf; | ||
| 1578 | Cache.array[j].changed = SET; | ||
| 1579 | #if CMD_DMA | ||
| 1580 | #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE | ||
| 1581 | int_cache[ftl_cmd_cnt].item = j; | ||
| 1582 | int_cache[ftl_cmd_cnt].cache.address = | ||
| 1583 | Cache.array[j].address; | ||
| 1584 | int_cache[ftl_cmd_cnt].cache.changed = | ||
| 1585 | Cache.array[j].changed; | ||
| 1586 | #endif | ||
| 1587 | #endif | ||
| 1588 | break; | ||
| 1589 | } | ||
| 1590 | } | ||
| 1591 | if (FAIL == wFoundInCache) { | ||
| 1592 | if (ERR == FTL_Cache_Read_All(g_pTempBuf, | ||
| 1593 | page_addr)) { | ||
| 1594 | wResult = FAIL; | ||
| 1595 | break; | ||
| 1596 | } | ||
| 1597 | buf = g_pTempBuf; | ||
| 1598 | } | ||
| 1599 | } else { | ||
| 1600 | buf = pData; | ||
| 1601 | } | ||
| 1602 | |||
| 1603 | if (FAIL == FTL_Cache_Write_All(buf, | ||
| 1604 | blk_addr + (page_addr - old_blk_addr))) { | ||
| 1605 | wResult = FAIL; | ||
| 1606 | break; | ||
| 1607 | } | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | return wResult; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | ||
| 1614 | * Function: FTL_Copy_Block | 1538 | * Function: FTL_Copy_Block |
| 1615 | * Inputs: source block address | 1539 | * Inputs: source block address |
| 1616 | * Destination block address | 1540 | * Destination block address |
| @@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void) | |||
| 1698 | static int erase_l2_cache_blocks(void) | 1622 | static int erase_l2_cache_blocks(void) |
| 1699 | { | 1623 | { |
| 1700 | int i, ret = PASS; | 1624 | int i, ret = PASS; |
| 1701 | u32 pblk, lblk; | 1625 | u32 pblk, lblk = BAD_BLOCK; |
| 1702 | u64 addr; | 1626 | u64 addr; |
| 1703 | u32 *pbt = (u32 *)g_pBlockTable; | 1627 | u32 *pbt = (u32 *)g_pBlockTable; |
| 1704 | 1628 | ||
| @@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr) | |||
| 2004 | return ret; | 1928 | return ret; |
| 2005 | } | 1929 | } |
| 2006 | 1930 | ||
| 2007 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | ||
| 2008 | * Function: FTL_Cache_Write_Back | ||
| 2009 | * Inputs: pointer to data cached in sys memory | ||
| 2010 | * address of free block in flash | ||
| 2011 | * Outputs: PASS=0 / FAIL=1 | ||
| 2012 | * Description: writes all the pages of Cache Block to flash | ||
| 2013 | * | ||
| 2014 | *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
| 2015 | static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr) | ||
| 2016 | { | ||
| 2017 | int i, j, iErase; | ||
| 2018 | u64 old_page_addr, addr, phy_addr; | ||
| 2019 | u32 *pbt = (u32 *)g_pBlockTable; | ||
| 2020 | u32 lba; | ||
| 2021 | |||
| 2022 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
| 2023 | __FILE__, __LINE__, __func__); | ||
| 2024 | |||
| 2025 | old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) + | ||
| 2026 | GLOB_u64_Remainder(blk_addr, 2); | ||
| 2027 | |||
| 2028 | iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL; | ||
| 2029 | |||
| 2030 | pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK); | ||
| 2031 | |||
| 2032 | #if CMD_DMA | ||
| 2033 | p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; | ||
| 2034 | g_pBTDelta_Free += sizeof(struct BTableChangesDelta); | ||
| 2035 | |||
| 2036 | p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; | ||
| 2037 | p_BTableChangesDelta->BT_Index = (u32)(blk_addr >> | ||
| 2038 | DeviceInfo.nBitsInBlockDataSize); | ||
| 2039 | p_BTableChangesDelta->BT_Entry_Value = | ||
| 2040 | pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)]; | ||
| 2041 | p_BTableChangesDelta->ValidFields = 0x0C; | ||
| 2042 | #endif | ||
| 2043 | |||
| 2044 | if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { | ||
| 2045 | g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; | ||
| 2046 | FTL_Write_IN_Progress_Block_Table_Page(); | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | for (i = 0; i < RETRY_TIMES; i++) { | ||
| 2050 | if (PASS == iErase) { | ||
| 2051 | phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); | ||
| 2052 | if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) { | ||
| 2053 | lba = BLK_FROM_ADDR(blk_addr); | ||
| 2054 | MARK_BLOCK_AS_BAD(pbt[lba]); | ||
| 2055 | i = RETRY_TIMES; | ||
| 2056 | break; | ||
| 2057 | } | ||
| 2058 | } | ||
| 2059 | |||
| 2060 | for (j = 0; j < CACHE_ITEM_NUM; j++) { | ||
| 2061 | addr = Cache.array[j].address; | ||
| 2062 | if ((addr <= blk_addr) && | ||
| 2063 | ((addr + Cache.cache_item_size) > blk_addr)) | ||
| 2064 | cache_block_to_write = j; | ||
| 2065 | } | ||
| 2066 | |||
| 2067 | phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); | ||
| 2068 | if (PASS == FTL_Cache_Update_Block(pData, | ||
| 2069 | old_page_addr, phy_addr)) { | ||
| 2070 | cache_block_to_write = UNHIT_CACHE_ITEM; | ||
| 2071 | break; | ||
| 2072 | } else { | ||
| 2073 | iErase = PASS; | ||
| 2074 | } | ||
| 2075 | } | ||
| 2076 | |||
| 2077 | if (i >= RETRY_TIMES) { | ||
| 2078 | if (ERR == FTL_Flash_Error_Handle(pData, | ||
| 2079 | old_page_addr, blk_addr)) | ||
| 2080 | return ERR; | ||
| 2081 | else | ||
| 2082 | return FAIL; | ||
| 2083 | } | ||
| 2084 | |||
| 2085 | return PASS; | ||
| 2086 | } | ||
| 2087 | |||
| 2088 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | 1931 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& |
| 2089 | * Function: FTL_Cache_Write_Page | 1932 | * Function: FTL_Cache_Write_Page |
| 2090 | * Inputs: Pointer to buffer, page address, cache block number | 1933 | * Inputs: Pointer to buffer, page address, cache block number |
| @@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce) | |||
| 2370 | return 1; | 2213 | return 1; |
| 2371 | } | 2214 | } |
| 2372 | 2215 | ||
| 2373 | /****************************************************************** | ||
| 2374 | * Function: GLOB_FTL_Flash_Format | ||
| 2375 | * Inputs: none | ||
| 2376 | * Outputs: PASS | ||
| 2377 | * Description: The block table stores bad block info, including MDF+ | ||
| 2378 | * blocks gone bad over the ages. Therefore, if we have a | ||
| 2379 | * block table in place, then use it to scan for bad blocks | ||
| 2380 | * If not, then scan for MDF. | ||
| 2381 | * Now, a block table will only be found if spectra was already | ||
| 2382 | * being used. For a fresh flash, we'll go thru scanning for | ||
| 2383 | * MDF. If spectra was being used, then there is a chance that | ||
| 2384 | * the MDF has been corrupted. Spectra avoids writing to the | ||
| 2385 | * first 2 bytes of the spare area to all pages in a block. This | ||
| 2386 | * covers all known flash devices. However, since flash | ||
| 2387 | * manufacturers have no standard of where the MDF is stored, | ||
| 2388 | * this cannot guarantee that the MDF is protected for future | ||
| 2389 | * devices too. The initial scanning for the block table assures | ||
| 2390 | * this. It is ok even if the block table is outdated, as all | ||
| 2391 | * we're looking for are bad block markers. | ||
| 2392 | * Use this when mounting a file system or starting a | ||
| 2393 | * new flash. | ||
| 2394 | * | ||
| 2395 | *********************************************************************/ | ||
| 2396 | static int FTL_Format_Flash(u8 valid_block_table) | ||
| 2397 | { | ||
| 2398 | u32 i, j; | ||
| 2399 | u32 *pbt = (u32 *)g_pBlockTable; | ||
| 2400 | u32 tempNode; | ||
| 2401 | int ret; | ||
| 2402 | |||
| 2403 | #if CMD_DMA | ||
| 2404 | u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy; | ||
| 2405 | if (ftl_cmd_cnt) | ||
| 2406 | return FAIL; | ||
| 2407 | #endif | ||
| 2408 | |||
| 2409 | if (FAIL == FTL_Check_Block_Table(FAIL)) | ||
| 2410 | valid_block_table = 0; | ||
| 2411 | |||
| 2412 | if (valid_block_table) { | ||
| 2413 | u8 switched = 1; | ||
| 2414 | u32 block, k; | ||
| 2415 | |||
| 2416 | k = DeviceInfo.wSpectraStartBlock; | ||
| 2417 | while (switched && (k < DeviceInfo.wSpectraEndBlock)) { | ||
| 2418 | switched = 0; | ||
| 2419 | k++; | ||
| 2420 | for (j = DeviceInfo.wSpectraStartBlock, i = 0; | ||
| 2421 | j <= DeviceInfo.wSpectraEndBlock; | ||
| 2422 | j++, i++) { | ||
| 2423 | block = (pbt[i] & ~BAD_BLOCK) - | ||
| 2424 | DeviceInfo.wSpectraStartBlock; | ||
| 2425 | if (block != i) { | ||
| 2426 | switched = 1; | ||
| 2427 | tempNode = pbt[i]; | ||
| 2428 | pbt[i] = pbt[block]; | ||
| 2429 | pbt[block] = tempNode; | ||
| 2430 | } | ||
| 2431 | } | ||
| 2432 | } | ||
| 2433 | if ((k == DeviceInfo.wSpectraEndBlock) && switched) | ||
| 2434 | valid_block_table = 0; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | if (!valid_block_table) { | ||
| 2438 | memset(g_pBlockTable, 0, | ||
| 2439 | DeviceInfo.wDataBlockNum * sizeof(u32)); | ||
| 2440 | memset(g_pWearCounter, 0, | ||
| 2441 | DeviceInfo.wDataBlockNum * sizeof(u8)); | ||
| 2442 | if (DeviceInfo.MLCDevice) | ||
| 2443 | memset(g_pReadCounter, 0, | ||
| 2444 | DeviceInfo.wDataBlockNum * sizeof(u16)); | ||
| 2445 | #if CMD_DMA | ||
| 2446 | memset(g_pBTStartingCopy, 0, | ||
| 2447 | DeviceInfo.wDataBlockNum * sizeof(u32)); | ||
| 2448 | memset(g_pWearCounterCopy, 0, | ||
| 2449 | DeviceInfo.wDataBlockNum * sizeof(u8)); | ||
| 2450 | if (DeviceInfo.MLCDevice) | ||
| 2451 | memset(g_pReadCounterCopy, 0, | ||
| 2452 | DeviceInfo.wDataBlockNum * sizeof(u16)); | ||
| 2453 | #endif | ||
| 2454 | for (j = DeviceInfo.wSpectraStartBlock, i = 0; | ||
| 2455 | j <= DeviceInfo.wSpectraEndBlock; | ||
| 2456 | j++, i++) { | ||
| 2457 | if (GLOB_LLD_Get_Bad_Block((u32)j)) | ||
| 2458 | pbt[i] = (u32)(BAD_BLOCK | j); | ||
| 2459 | } | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n"); | ||
| 2463 | |||
| 2464 | for (j = DeviceInfo.wSpectraStartBlock, i = 0; | ||
| 2465 | j <= DeviceInfo.wSpectraEndBlock; | ||
| 2466 | j++, i++) { | ||
| 2467 | if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) { | ||
| 2468 | ret = GLOB_LLD_Erase_Block(j); | ||
| 2469 | if (FAIL == ret) { | ||
| 2470 | pbt[i] = (u32)(j); | ||
| 2471 | MARK_BLOCK_AS_BAD(pbt[i]); | ||
| 2472 | nand_dbg_print(NAND_DBG_WARN, | ||
| 2473 | "NAND Program fail in %s, Line %d, " | ||
| 2474 | "Function: %s, new Bad Block %d generated!\n", | ||
| 2475 | __FILE__, __LINE__, __func__, (int)j); | ||
| 2476 | } else { | ||
| 2477 | pbt[i] = (u32)(SPARE_BLOCK | j); | ||
| 2478 | } | ||
| 2479 | } | ||
| 2480 | #if CMD_DMA | ||
| 2481 | pbtStartingCopy[i] = pbt[i]; | ||
| 2482 | #endif | ||
| 2483 | } | ||
| 2484 | |||
| 2485 | g_wBlockTableOffset = 0; | ||
| 2486 | for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock - | ||
| 2487 | DeviceInfo.wSpectraStartBlock)) | ||
| 2488 | && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++) | ||
| 2489 | ; | ||
| 2490 | if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) { | ||
| 2491 | printk(KERN_ERR "All blocks bad!\n"); | ||
| 2492 | return FAIL; | ||
| 2493 | } else { | ||
| 2494 | g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK; | ||
| 2495 | if (i != BLOCK_TABLE_INDEX) { | ||
| 2496 | tempNode = pbt[i]; | ||
| 2497 | pbt[i] = pbt[BLOCK_TABLE_INDEX]; | ||
| 2498 | pbt[BLOCK_TABLE_INDEX] = tempNode; | ||
| 2499 | } | ||
| 2500 | } | ||
| 2501 | pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK); | ||
| 2502 | |||
| 2503 | #if CMD_DMA | ||
| 2504 | pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK); | ||
| 2505 | #endif | ||
| 2506 | |||
| 2507 | g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; | ||
| 2508 | memset(g_pBTBlocks, 0xFF, | ||
| 2509 | (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32)); | ||
| 2510 | g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex; | ||
| 2511 | FTL_Write_Block_Table(FAIL); | ||
| 2512 | |||
| 2513 | for (i = 0; i < CACHE_ITEM_NUM; i++) { | ||
| 2514 | Cache.array[i].address = NAND_CACHE_INIT_ADDR; | ||
| 2515 | Cache.array[i].use_cnt = 0; | ||
| 2516 | Cache.array[i].changed = CLEAR; | ||
| 2517 | } | ||
| 2518 | |||
| 2519 | #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA) | ||
| 2520 | memcpy((void *)&cache_start_copy, (void *)&Cache, | ||
| 2521 | sizeof(struct flash_cache_tag)); | ||
| 2522 | #endif | ||
| 2523 | return PASS; | ||
| 2524 | } | ||
| 2525 | |||
| 2526 | static int force_format_nand(void) | 2216 | static int force_format_nand(void) |
| 2527 | { | 2217 | { |
| 2528 | u32 i; | 2218 | u32 i; |
| @@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void) | |||
| 3031 | return wResult; | 2721 | return wResult; |
| 3032 | } | 2722 | } |
| 3033 | 2723 | ||
| 3034 | |||
| 3035 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | ||
| 3036 | * Function: FTL_Flash_Error_Handle | ||
| 3037 | * Inputs: Pointer to data | ||
| 3038 | * Page address | ||
| 3039 | * Block address | ||
| 3040 | * Outputs: PASS=0 / FAIL=1 | ||
| 3041 | * Description: It handles any error occured during Spectra operation | ||
| 3042 | *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ | ||
| 3043 | static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, | ||
| 3044 | u64 blk_addr) | ||
| 3045 | { | ||
| 3046 | u32 i; | ||
| 3047 | int j; | ||
| 3048 | u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr); | ||
| 3049 | u64 phy_addr; | ||
| 3050 | int wErase = FAIL; | ||
| 3051 | int wResult = FAIL; | ||
| 3052 | u32 *pbt = (u32 *)g_pBlockTable; | ||
| 3053 | |||
| 3054 | nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", | ||
| 3055 | __FILE__, __LINE__, __func__); | ||
| 3056 | |||
| 3057 | if (ERR == GLOB_FTL_Garbage_Collection()) | ||
| 3058 | return ERR; | ||
| 3059 | |||
| 3060 | do { | ||
| 3061 | for (i = DeviceInfo.wSpectraEndBlock - | ||
| 3062 | DeviceInfo.wSpectraStartBlock; | ||
| 3063 | i > 0; i--) { | ||
| 3064 | if (IS_SPARE_BLOCK(i)) { | ||
| 3065 | tmp_node = (u32)(BAD_BLOCK | | ||
| 3066 | pbt[blk_node]); | ||
| 3067 | pbt[blk_node] = (u32)(pbt[i] & | ||
| 3068 | (~SPARE_BLOCK)); | ||
| 3069 | pbt[i] = tmp_node; | ||
| 3070 | #if CMD_DMA | ||
| 3071 | p_BTableChangesDelta = | ||
| 3072 | (struct BTableChangesDelta *) | ||
| 3073 | g_pBTDelta_Free; | ||
| 3074 | g_pBTDelta_Free += | ||
| 3075 | sizeof(struct BTableChangesDelta); | ||
| 3076 | |||
| 3077 | p_BTableChangesDelta->ftl_cmd_cnt = | ||
| 3078 | ftl_cmd_cnt; | ||
| 3079 | p_BTableChangesDelta->BT_Index = | ||
| 3080 | blk_node; | ||
| 3081 | p_BTableChangesDelta->BT_Entry_Value = | ||
| 3082 | pbt[blk_node]; | ||
| 3083 | p_BTableChangesDelta->ValidFields = 0x0C; | ||
| 3084 | |||
| 3085 | p_BTableChangesDelta = | ||
| 3086 | (struct BTableChangesDelta *) | ||
| 3087 | g_pBTDelta_Free; | ||
| 3088 | g_pBTDelta_Free += | ||
| 3089 | sizeof(struct BTableChangesDelta); | ||
| 3090 | |||
| 3091 | p_BTableChangesDelta->ftl_cmd_cnt = | ||
| 3092 | ftl_cmd_cnt; | ||
| 3093 | p_BTableChangesDelta->BT_Index = i; | ||
| 3094 | p_BTableChangesDelta->BT_Entry_Value = pbt[i]; | ||
| 3095 | p_BTableChangesDelta->ValidFields = 0x0C; | ||
| 3096 | #endif | ||
| 3097 | wResult = PASS; | ||
| 3098 | break; | ||
| 3099 | } | ||
| 3100 | } | ||
| 3101 | |||
| 3102 | if (FAIL == wResult) { | ||
| 3103 | if (FAIL == GLOB_FTL_Garbage_Collection()) | ||
| 3104 | break; | ||
| 3105 | else | ||
| 3106 | continue; | ||
| 3107 | } | ||
| 3108 | |||
| 3109 | if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { | ||
| 3110 | g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; | ||
| 3111 | FTL_Write_IN_Progress_Block_Table_Page(); | ||
| 3112 | } | ||
| 3113 | |||
| 3114 | phy_addr = FTL_Get_Physical_Block_Addr(blk_addr); | ||
| 3115 | |||
| 3116 | for (j = 0; j < RETRY_TIMES; j++) { | ||
| 3117 | if (PASS == wErase) { | ||
| 3118 | if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) { | ||
| 3119 | MARK_BLOCK_AS_BAD(pbt[blk_node]); | ||
| 3120 | break; | ||
| 3121 | } | ||
| 3122 | } | ||
| 3123 | if (PASS == FTL_Cache_Update_Block(pData, | ||
| 3124 | old_page_addr, | ||
| 3125 | phy_addr)) { | ||
| 3126 | wResult = PASS; | ||
| 3127 | break; | ||
| 3128 | } else { | ||
| 3129 | wResult = FAIL; | ||
| 3130 | wErase = PASS; | ||
| 3131 | } | ||
| 3132 | } | ||
| 3133 | } while (FAIL == wResult); | ||
| 3134 | |||
| 3135 | FTL_Write_Block_Table(FAIL); | ||
| 3136 | |||
| 3137 | return wResult; | ||
| 3138 | } | ||
| 3139 | |||
| 3140 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& | 2724 | /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& |
| 3141 | * Function: FTL_Get_Page_Num | 2725 | * Function: FTL_Get_Page_Num |
| 3142 | * Inputs: Size in bytes | 2726 | * Inputs: Size in bytes |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index e483f80822d2..1160c55de7f2 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) | |||
| 723 | 723 | ||
| 724 | /** | 724 | /** |
| 725 | * usb_string_ids_n() - allocate unused string IDs in batch | 725 | * usb_string_ids_n() - allocate unused string IDs in batch |
| 726 | * @cdev: the device whose string descriptor IDs are being allocated | 726 | * @c: the device whose string descriptor IDs are being allocated |
| 727 | * @n: number of string IDs to allocate | 727 | * @n: number of string IDs to allocate |
| 728 | * Context: single threaded during gadget setup | 728 | * Context: single threaded during gadget setup |
| 729 | * | 729 | * |
| 730 | * Returns the first requested ID. This ID and next @n-1 IDs are now | 730 | * Returns the first requested ID. This ID and next @n-1 IDs are now |
| 731 | * valid IDs. At least providind that @n is non zore because if it | 731 | * valid IDs. At least provided that @n is non-zero because if it |
| 732 | * is, returns last requested ID which is now very useful information. | 732 | * is, returns last requested ID which is now very useful information. |
| 733 | * | 733 | * |
| 734 | * @usb_string_ids_n() is called from bind() callbacks to allocate | 734 | * @usb_string_ids_n() is called from bind() callbacks to allocate |
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 166bf71fd348..e03058fe23cb 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c | |||
| @@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev) | |||
| 1609 | /* initialize ucd */ | 1609 | /* initialize ucd */ |
| 1610 | m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); | 1610 | m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); |
| 1611 | if (m66592 == NULL) { | 1611 | if (m66592 == NULL) { |
| 1612 | ret = -ENOMEM; | ||
| 1612 | pr_err("kzalloc error\n"); | 1613 | pr_err("kzalloc error\n"); |
| 1613 | goto clean_up; | 1614 | goto clean_up; |
| 1614 | } | 1615 | } |
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 70a817842755..2456ccd9965e 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c | |||
| @@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev) | |||
| 1557 | /* initialize ucd */ | 1557 | /* initialize ucd */ |
| 1558 | r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL); | 1558 | r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL); |
| 1559 | if (r8a66597 == NULL) { | 1559 | if (r8a66597 == NULL) { |
| 1560 | ret = -ENOMEM; | ||
| 1560 | printk(KERN_ERR "kzalloc error\n"); | 1561 | printk(KERN_ERR "kzalloc error\n"); |
| 1561 | goto clean_up; | 1562 | goto clean_up; |
| 1562 | } | 1563 | } |
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index 2dcffdac86d2..5e807f083bc8 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c | |||
| @@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt) | |||
| 94 | break; | 94 | break; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) { | 97 | if (i == ARRAY_SIZE(uvc_formats)) { |
| 98 | printk(KERN_INFO "Unsupported format 0x%08x.\n", | 98 | printk(KERN_INFO "Unsupported format 0x%08x.\n", |
| 99 | fmt->fmt.pix.pixelformat); | 99 | fmt->fmt.pix.pixelformat); |
| 100 | return -EINVAL; | 100 | return -EINVAL; |
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index d1a3dfc9a408..bdba8c5d844a 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c | |||
| @@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, | |||
| 829 | * almost immediately. With ISP1761, this register requires a delay of | 829 | * almost immediately. With ISP1761, this register requires a delay of |
| 830 | * 195ns between a write and subsequent read (see section 15.1.1.3). | 830 | * 195ns between a write and subsequent read (see section 15.1.1.3). |
| 831 | */ | 831 | */ |
| 832 | mmiowb(); | ||
| 832 | ndelay(195); | 833 | ndelay(195); |
| 833 | skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG); | 834 | skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG); |
| 834 | 835 | ||
| @@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, | |||
| 870 | * almost immediately. With ISP1761, this register requires a delay of | 871 | * almost immediately. With ISP1761, this register requires a delay of |
| 871 | * 195ns between a write and subsequent read (see section 15.1.1.3). | 872 | * 195ns between a write and subsequent read (see section 15.1.1.3). |
| 872 | */ | 873 | */ |
| 874 | mmiowb(); | ||
| 873 | ndelay(195); | 875 | ndelay(195); |
| 874 | skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG); | 876 | skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG); |
| 875 | 877 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index bc3f4f427065..48e60d166ff0 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci, | |||
| 131 | *seg = (*seg)->next; | 131 | *seg = (*seg)->next; |
| 132 | *trb = ((*seg)->trbs); | 132 | *trb = ((*seg)->trbs); |
| 133 | } else { | 133 | } else { |
| 134 | *trb = (*trb)++; | 134 | (*trb)++; |
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| @@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1551 | /* calc actual length */ | 1551 | /* calc actual length */ |
| 1552 | if (ep->skip) { | 1552 | if (ep->skip) { |
| 1553 | td->urb->iso_frame_desc[idx].actual_length = 0; | 1553 | td->urb->iso_frame_desc[idx].actual_length = 0; |
| 1554 | /* Update ring dequeue pointer */ | ||
| 1555 | while (ep_ring->dequeue != td->last_trb) | ||
| 1556 | inc_deq(xhci, ep_ring, false); | ||
| 1557 | inc_deq(xhci, ep_ring, false); | ||
| 1554 | return finish_td(xhci, td, event_trb, event, ep, status, true); | 1558 | return finish_td(xhci, td, event_trb, event, ep, status, true); |
| 1555 | } | 1559 | } |
| 1556 | 1560 | ||
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index d240de097c62..801324af9470 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
| @@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, | |||
| 439 | /* drain secondary buffer */ | 439 | /* drain secondary buffer */ |
| 440 | int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; | 440 | int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; |
| 441 | i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); | 441 | i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); |
| 442 | if (i < 0) { | 442 | if (i) { |
| 443 | retval = -EFAULT; | 443 | retval = -EFAULT; |
| 444 | goto exit; | 444 | goto exit; |
| 445 | } | 445 | } |
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 2de49c8887c5..bc88c79875a1 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c | |||
| @@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, | |||
| 542 | retval = io_res; | 542 | retval = io_res; |
| 543 | else { | 543 | else { |
| 544 | io_res = copy_to_user(user_buffer, buffer, dev->report_size); | 544 | io_res = copy_to_user(user_buffer, buffer, dev->report_size); |
| 545 | if (io_res < 0) | 545 | if (io_res) |
| 546 | retval = -EFAULT; | 546 | retval = -EFAULT; |
| 547 | } | 547 | } |
| 548 | break; | 548 | break; |
| @@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, | |||
| 574 | } | 574 | } |
| 575 | io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, | 575 | io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, |
| 576 | sizeof(struct iowarrior_info)); | 576 | sizeof(struct iowarrior_info)); |
| 577 | if (io_res < 0) | 577 | if (io_res) |
| 578 | retval = -EFAULT; | 578 | retval = -EFAULT; |
| 579 | break; | 579 | break; |
| 580 | } | 580 | } |
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 0e8888588d4e..05aaac1c3861 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c | |||
| @@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) | |||
| 550 | struct twl4030_usb_data *pdata = pdev->dev.platform_data; | 550 | struct twl4030_usb_data *pdata = pdev->dev.platform_data; |
| 551 | struct twl4030_usb *twl; | 551 | struct twl4030_usb *twl; |
| 552 | int status, err; | 552 | int status, err; |
| 553 | u8 pwr; | ||
| 553 | 554 | ||
| 554 | if (!pdata) { | 555 | if (!pdata) { |
| 555 | dev_dbg(&pdev->dev, "platform_data not available\n"); | 556 | dev_dbg(&pdev->dev, "platform_data not available\n"); |
| @@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) | |||
| 568 | twl->otg.set_peripheral = twl4030_set_peripheral; | 569 | twl->otg.set_peripheral = twl4030_set_peripheral; |
| 569 | twl->otg.set_suspend = twl4030_set_suspend; | 570 | twl->otg.set_suspend = twl4030_set_suspend; |
| 570 | twl->usb_mode = pdata->usb_mode; | 571 | twl->usb_mode = pdata->usb_mode; |
| 571 | twl->asleep = 1; | 572 | |
| 573 | pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); | ||
| 574 | |||
| 575 | twl->asleep = (pwr & PHY_PWR_PHYPWD); | ||
| 572 | 576 | ||
| 573 | /* init spinlock for workqueue */ | 577 | /* init spinlock for workqueue */ |
| 574 | spin_lock_init(&twl->lock); | 578 | spin_lock_init(&twl->lock); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2bef4415c19c..80bf8333bb03 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -222,8 +222,8 @@ static struct usb_serial_driver cp210x_device = { | |||
| 222 | #define BITS_STOP_2 0x0002 | 222 | #define BITS_STOP_2 0x0002 |
| 223 | 223 | ||
| 224 | /* CP210X_SET_BREAK */ | 224 | /* CP210X_SET_BREAK */ |
| 225 | #define BREAK_ON 0x0000 | 225 | #define BREAK_ON 0x0001 |
| 226 | #define BREAK_OFF 0x0001 | 226 | #define BREAK_OFF 0x0000 |
| 227 | 227 | ||
| 228 | /* CP210X_(SET_MHS|GET_MDMSTS) */ | 228 | /* CP210X_(SET_MHS|GET_MDMSTS) */ |
| 229 | #define CONTROL_DTR 0x0001 | 229 | #define CONTROL_DTR 0x0001 |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index eb12d9b096b4..63ddb2f65cee 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = { | |||
| 180 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, | 180 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
| 181 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, | 181 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
| 182 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, | 182 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
| 183 | { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, | ||
| 183 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, | 184 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, |
| 184 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, | 185 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, |
| 185 | { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, | 186 | { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, |
| @@ -750,6 +751,8 @@ static struct usb_device_id id_table_combined [] = { | |||
| 750 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), | 751 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), |
| 751 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 752 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 752 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, | 753 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, |
| 754 | { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), | ||
| 755 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
| 753 | { }, /* Optional parameter entry */ | 756 | { }, /* Optional parameter entry */ |
| 754 | { } /* Terminating entry */ | 757 | { } /* Terminating entry */ |
| 755 | }; | 758 | }; |
| @@ -1376,7 +1379,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) | |||
| 1376 | } | 1379 | } |
| 1377 | 1380 | ||
| 1378 | /* set max packet size based on descriptor */ | 1381 | /* set max packet size based on descriptor */ |
| 1379 | priv->max_packet_size = ep_desc->wMaxPacketSize; | 1382 | priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize); |
| 1380 | 1383 | ||
| 1381 | dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); | 1384 | dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); |
| 1382 | } | 1385 | } |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 6e612c52e763..2e95857c9633 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -110,6 +110,9 @@ | |||
| 110 | /* Propox devices */ | 110 | /* Propox devices */ |
| 111 | #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 | 111 | #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 |
| 112 | 112 | ||
| 113 | /* Lenz LI-USB Computer Interface. */ | ||
| 114 | #define FTDI_LENZ_LIUSB_PID 0xD780 | ||
| 115 | |||
| 113 | /* | 116 | /* |
| 114 | * Xsens Technologies BV products (http://www.xsens.com). | 117 | * Xsens Technologies BV products (http://www.xsens.com). |
| 115 | */ | 118 | */ |
| @@ -989,6 +992,12 @@ | |||
| 989 | #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ | 992 | #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ |
| 990 | 993 | ||
| 991 | /* | 994 | /* |
| 995 | * Ionics PlugComputer | ||
| 996 | */ | ||
| 997 | #define IONICS_VID 0x1c0c | ||
| 998 | #define IONICS_PLUGCOMPUTER_PID 0x0102 | ||
| 999 | |||
| 1000 | /* | ||
| 992 | * Dresden Elektronik Sensor Terminal Board | 1001 | * Dresden Elektronik Sensor Terminal Board |
| 993 | */ | 1002 | */ |
| 994 | #define DE_VID 0x1cf1 /* Vendor ID */ | 1003 | #define DE_VID 0x1cf1 /* Vendor ID */ |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ca92f67747cc..0b1a13384c6d 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
| @@ -518,6 +518,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial) | |||
| 518 | for (i = 0; i < serial->num_ports; ++i) | 518 | for (i = 0; i < serial->num_ports; ++i) |
| 519 | generic_cleanup(serial->port[i]); | 519 | generic_cleanup(serial->port[i]); |
| 520 | } | 520 | } |
| 521 | EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect); | ||
| 521 | 522 | ||
| 522 | void usb_serial_generic_release(struct usb_serial *serial) | 523 | void usb_serial_generic_release(struct usb_serial *serial) |
| 523 | { | 524 | { |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index dc47f986df57..a7cfc5952937 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
| @@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial) | |||
| 1151 | 1151 | ||
| 1152 | /* Check if we have an old version in the I2C and | 1152 | /* Check if we have an old version in the I2C and |
| 1153 | update if necessary */ | 1153 | update if necessary */ |
| 1154 | if (download_cur_ver != download_new_ver) { | 1154 | if (download_cur_ver < download_new_ver) { |
| 1155 | dbg("%s - Update I2C dld from %d.%d to %d.%d", | 1155 | dbg("%s - Update I2C dld from %d.%d to %d.%d", |
| 1156 | __func__, | 1156 | __func__, |
| 1157 | firmware_version->Ver_Major, | 1157 | firmware_version->Ver_Major, |
| @@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial) | |||
| 1284 | kfree(header); | 1284 | kfree(header); |
| 1285 | kfree(rom_desc); | 1285 | kfree(rom_desc); |
| 1286 | kfree(ti_manuf_desc); | 1286 | kfree(ti_manuf_desc); |
| 1287 | return status; | 1287 | return -EINVAL; |
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | /* Update I2C with type 0xf2 record with correct | 1290 | /* Update I2C with type 0xf2 record with correct |
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index a6b207c84917..1f00f243c26c 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c | |||
| @@ -25,6 +25,7 @@ static int debug; | |||
| 25 | 25 | ||
| 26 | static const struct usb_device_id id_table[] = { | 26 | static const struct usb_device_id id_table[] = { |
| 27 | { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ | 27 | { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ |
| 28 | { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */ | ||
| 28 | { }, | 29 | { }, |
| 29 | }; | 30 | }; |
| 30 | MODULE_DEVICE_TABLE(usb, id_table); | 31 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9fc6ea2c681f..adcbdb994de3 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -365,6 +365,10 @@ static void option_instat_callback(struct urb *urb); | |||
| 365 | #define OLIVETTI_VENDOR_ID 0x0b3c | 365 | #define OLIVETTI_VENDOR_ID 0x0b3c |
| 366 | #define OLIVETTI_PRODUCT_OLICARD100 0xc000 | 366 | #define OLIVETTI_PRODUCT_OLICARD100 0xc000 |
| 367 | 367 | ||
| 368 | /* Celot products */ | ||
| 369 | #define CELOT_VENDOR_ID 0x211f | ||
| 370 | #define CELOT_PRODUCT_CT680M 0x6801 | ||
| 371 | |||
| 368 | /* some devices interfaces need special handling due to a number of reasons */ | 372 | /* some devices interfaces need special handling due to a number of reasons */ |
| 369 | enum option_blacklist_reason { | 373 | enum option_blacklist_reason { |
| 370 | OPTION_BLACKLIST_NONE = 0, | 374 | OPTION_BLACKLIST_NONE = 0, |
| @@ -887,10 +891,9 @@ static const struct usb_device_id option_ids[] = { | |||
| 887 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, | 891 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, |
| 888 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, | 892 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, |
| 889 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, | 893 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, |
| 890 | |||
| 891 | { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, | 894 | { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, |
| 892 | |||
| 893 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, | 895 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
| 896 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ | ||
| 894 | { } /* Terminating entry */ | 897 | { } /* Terminating entry */ |
| 895 | }; | 898 | }; |
| 896 | MODULE_DEVICE_TABLE(usb, option_ids); | 899 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 6b6001822279..c98f0fb675ba 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = { | |||
| 86 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, | 86 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, |
| 87 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, | 87 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, |
| 88 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, | 88 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
| 89 | { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, | ||
| 89 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, | 90 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
| 90 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, | 91 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
| 91 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, | 92 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a871645389dd..43eb9bdad422 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -128,6 +128,10 @@ | |||
| 128 | #define CRESSI_VENDOR_ID 0x04b8 | 128 | #define CRESSI_VENDOR_ID 0x04b8 |
| 129 | #define CRESSI_EDY_PRODUCT_ID 0x0521 | 129 | #define CRESSI_EDY_PRODUCT_ID 0x0521 |
| 130 | 130 | ||
| 131 | /* Zeagle dive computer interface */ | ||
| 132 | #define ZEAGLE_VENDOR_ID 0x04b8 | ||
| 133 | #define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 | ||
| 134 | |||
| 131 | /* Sony, USB data cable for CMD-Jxx mobile phones */ | 135 | /* Sony, USB data cable for CMD-Jxx mobile phones */ |
| 132 | #define SONY_VENDOR_ID 0x054c | 136 | #define SONY_VENDOR_ID 0x054c |
| 133 | #define SONY_QN3USB_PRODUCT_ID 0x0437 | 137 | #define SONY_QN3USB_PRODUCT_ID 0x0437 |
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index 6e82d4f54bc8..660c31f14999 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/serial.h> | 15 | #include <linux/serial.h> |
| 16 | #include <linux/usb.h> | 16 | #include <linux/usb.h> |
| 17 | #include <linux/usb/serial.h> | 17 | #include <linux/usb/serial.h> |
| 18 | #include <linux/serial_reg.h> | ||
| 18 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
| 19 | 20 | ||
| 20 | #define QT_OPEN_CLOSE_CHANNEL 0xca | 21 | #define QT_OPEN_CLOSE_CHANNEL 0xca |
| @@ -27,36 +28,11 @@ | |||
| 27 | #define QT_HW_FLOW_CONTROL_MASK 0xc5 | 28 | #define QT_HW_FLOW_CONTROL_MASK 0xc5 |
| 28 | #define QT_SW_FLOW_CONTROL_MASK 0xc6 | 29 | #define QT_SW_FLOW_CONTROL_MASK 0xc6 |
| 29 | 30 | ||
| 30 | #define MODEM_CTL_REGISTER 0x04 | ||
| 31 | #define MODEM_STATUS_REGISTER 0x06 | ||
| 32 | |||
| 33 | |||
| 34 | #define SERIAL_LSR_OE 0x02 | ||
| 35 | #define SERIAL_LSR_PE 0x04 | ||
| 36 | #define SERIAL_LSR_FE 0x08 | ||
| 37 | #define SERIAL_LSR_BI 0x10 | ||
| 38 | |||
| 39 | #define SERIAL_LSR_TEMT 0x40 | ||
| 40 | |||
| 41 | #define SERIAL_MCR_DTR 0x01 | ||
| 42 | #define SERIAL_MCR_RTS 0x02 | ||
| 43 | #define SERIAL_MCR_LOOP 0x10 | ||
| 44 | |||
| 45 | #define SERIAL_MSR_CTS 0x10 | ||
| 46 | #define SERIAL_MSR_CD 0x80 | ||
| 47 | #define SERIAL_MSR_RI 0x40 | ||
| 48 | #define SERIAL_MSR_DSR 0x20 | ||
| 49 | #define SERIAL_MSR_MASK 0xf0 | 31 | #define SERIAL_MSR_MASK 0xf0 |
| 50 | 32 | ||
| 51 | #define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS) | 33 | #define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS) |
| 52 | 34 | ||
| 53 | #define SERIAL_8_DATA 0x03 | 35 | #define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR) |
| 54 | #define SERIAL_7_DATA 0x02 | ||
| 55 | #define SERIAL_6_DATA 0x01 | ||
| 56 | #define SERIAL_5_DATA 0x00 | ||
| 57 | |||
| 58 | #define SERIAL_ODD_PARITY 0X08 | ||
| 59 | #define SERIAL_EVEN_PARITY 0X18 | ||
| 60 | 36 | ||
| 61 | #define MAX_BAUD_RATE 460800 | 37 | #define MAX_BAUD_RATE 460800 |
| 62 | 38 | ||
| @@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = { | |||
| 99 | }; | 75 | }; |
| 100 | 76 | ||
| 101 | struct ssu100_port_private { | 77 | struct ssu100_port_private { |
| 78 | spinlock_t status_lock; | ||
| 102 | u8 shadowLSR; | 79 | u8 shadowLSR; |
| 103 | u8 shadowMSR; | 80 | u8 shadowMSR; |
| 104 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | 81 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ |
| 105 | unsigned short max_packet_size; | 82 | unsigned short max_packet_size; |
| 83 | struct async_icount icount; | ||
| 106 | }; | 84 | }; |
| 107 | 85 | ||
| 108 | static void ssu100_release(struct usb_serial *serial) | 86 | static void ssu100_release(struct usb_serial *serial) |
| @@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev, | |||
| 150 | 128 | ||
| 151 | static inline int ssu100_setregister(struct usb_device *dev, | 129 | static inline int ssu100_setregister(struct usb_device *dev, |
| 152 | unsigned short uart, | 130 | unsigned short uart, |
| 131 | unsigned short reg, | ||
| 153 | u16 data) | 132 | u16 data) |
| 154 | { | 133 | { |
| 155 | u16 value = (data << 8) | MODEM_CTL_REGISTER; | 134 | u16 value = (data << 8) | reg; |
| 156 | 135 | ||
| 157 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 136 | return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
| 158 | QT_SET_GET_REGISTER, 0x40, value, uart, | 137 | QT_SET_GET_REGISTER, 0x40, value, uart, |
| @@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set, | |||
| 178 | clear &= ~set; /* 'set' takes precedence over 'clear' */ | 157 | clear &= ~set; /* 'set' takes precedence over 'clear' */ |
| 179 | urb_value = 0; | 158 | urb_value = 0; |
| 180 | if (set & TIOCM_DTR) | 159 | if (set & TIOCM_DTR) |
| 181 | urb_value |= SERIAL_MCR_DTR; | 160 | urb_value |= UART_MCR_DTR; |
| 182 | if (set & TIOCM_RTS) | 161 | if (set & TIOCM_RTS) |
| 183 | urb_value |= SERIAL_MCR_RTS; | 162 | urb_value |= UART_MCR_RTS; |
| 184 | 163 | ||
| 185 | result = ssu100_setregister(dev, 0, urb_value); | 164 | result = ssu100_setregister(dev, 0, UART_MCR, urb_value); |
| 186 | if (result < 0) | 165 | if (result < 0) |
| 187 | dbg("%s Error from MODEM_CTRL urb", __func__); | 166 | dbg("%s Error from MODEM_CTRL urb", __func__); |
| 188 | 167 | ||
| @@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty, | |||
| 264 | 243 | ||
| 265 | if (cflag & PARENB) { | 244 | if (cflag & PARENB) { |
| 266 | if (cflag & PARODD) | 245 | if (cflag & PARODD) |
| 267 | urb_value |= SERIAL_ODD_PARITY; | 246 | urb_value |= UART_LCR_PARITY; |
| 268 | else | 247 | else |
| 269 | urb_value |= SERIAL_EVEN_PARITY; | 248 | urb_value |= SERIAL_EVEN_PARITY; |
| 270 | } | 249 | } |
| 271 | 250 | ||
| 272 | switch (cflag & CSIZE) { | 251 | switch (cflag & CSIZE) { |
| 273 | case CS5: | 252 | case CS5: |
| 274 | urb_value |= SERIAL_5_DATA; | 253 | urb_value |= UART_LCR_WLEN5; |
| 275 | break; | 254 | break; |
| 276 | case CS6: | 255 | case CS6: |
| 277 | urb_value |= SERIAL_6_DATA; | 256 | urb_value |= UART_LCR_WLEN6; |
| 278 | break; | 257 | break; |
| 279 | case CS7: | 258 | case CS7: |
| 280 | urb_value |= SERIAL_7_DATA; | 259 | urb_value |= UART_LCR_WLEN7; |
| 281 | break; | 260 | break; |
| 282 | default: | 261 | default: |
| 283 | case CS8: | 262 | case CS8: |
| 284 | urb_value |= SERIAL_8_DATA; | 263 | urb_value |= UART_LCR_WLEN8; |
| 285 | break; | 264 | break; |
| 286 | } | 265 | } |
| 287 | 266 | ||
| @@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 333 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); | 312 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); |
| 334 | u8 *data; | 313 | u8 *data; |
| 335 | int result; | 314 | int result; |
| 315 | unsigned long flags; | ||
| 336 | 316 | ||
| 337 | dbg("%s - port %d", __func__, port->number); | 317 | dbg("%s - port %d", __func__, port->number); |
| 338 | 318 | ||
| @@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 350 | return result; | 330 | return result; |
| 351 | } | 331 | } |
| 352 | 332 | ||
| 353 | priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE | | 333 | spin_lock_irqsave(&priv->status_lock, flags); |
| 354 | SERIAL_LSR_FE | SERIAL_LSR_BI); | 334 | priv->shadowLSR = data[0]; |
| 355 | 335 | priv->shadowMSR = data[1]; | |
| 356 | priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR | | 336 | spin_unlock_irqrestore(&priv->status_lock, flags); |
| 357 | SERIAL_MSR_RI | SERIAL_MSR_CD); | ||
| 358 | 337 | ||
| 359 | kfree(data); | 338 | kfree(data); |
| 360 | 339 | ||
| @@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port, | |||
| 398 | return 0; | 377 | return 0; |
| 399 | } | 378 | } |
| 400 | 379 | ||
| 380 | static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | ||
| 381 | { | ||
| 382 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); | ||
| 383 | struct async_icount prev, cur; | ||
| 384 | unsigned long flags; | ||
| 385 | |||
| 386 | spin_lock_irqsave(&priv->status_lock, flags); | ||
| 387 | prev = priv->icount; | ||
| 388 | spin_unlock_irqrestore(&priv->status_lock, flags); | ||
| 389 | |||
| 390 | while (1) { | ||
| 391 | wait_event_interruptible(priv->delta_msr_wait, | ||
| 392 | ((priv->icount.rng != prev.rng) || | ||
| 393 | (priv->icount.dsr != prev.dsr) || | ||
| 394 | (priv->icount.dcd != prev.dcd) || | ||
| 395 | (priv->icount.cts != prev.cts))); | ||
| 396 | |||
| 397 | if (signal_pending(current)) | ||
| 398 | return -ERESTARTSYS; | ||
| 399 | |||
| 400 | spin_lock_irqsave(&priv->status_lock, flags); | ||
| 401 | cur = priv->icount; | ||
| 402 | spin_unlock_irqrestore(&priv->status_lock, flags); | ||
| 403 | |||
| 404 | if ((prev.rng == cur.rng) && | ||
| 405 | (prev.dsr == cur.dsr) && | ||
| 406 | (prev.dcd == cur.dcd) && | ||
| 407 | (prev.cts == cur.cts)) | ||
| 408 | return -EIO; | ||
| 409 | |||
| 410 | if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) || | ||
| 411 | (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) || | ||
| 412 | (arg & TIOCM_CD && (prev.dcd != cur.dcd)) || | ||
| 413 | (arg & TIOCM_CTS && (prev.cts != cur.cts))) | ||
| 414 | return 0; | ||
| 415 | } | ||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 401 | static int ssu100_ioctl(struct tty_struct *tty, struct file *file, | 419 | static int ssu100_ioctl(struct tty_struct *tty, struct file *file, |
| 402 | unsigned int cmd, unsigned long arg) | 420 | unsigned int cmd, unsigned long arg) |
| 403 | { | 421 | { |
| 404 | struct usb_serial_port *port = tty->driver_data; | 422 | struct usb_serial_port *port = tty->driver_data; |
| 405 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); | 423 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); |
| 424 | void __user *user_arg = (void __user *)arg; | ||
| 406 | 425 | ||
| 407 | dbg("%s cmd 0x%04x", __func__, cmd); | 426 | dbg("%s cmd 0x%04x", __func__, cmd); |
| 408 | 427 | ||
| @@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file, | |||
| 412 | (struct serial_struct __user *) arg); | 431 | (struct serial_struct __user *) arg); |
| 413 | 432 | ||
| 414 | case TIOCMIWAIT: | 433 | case TIOCMIWAIT: |
| 415 | while (priv != NULL) { | 434 | return wait_modem_info(port, arg); |
| 416 | u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK; | 435 | |
| 417 | interruptible_sleep_on(&priv->delta_msr_wait); | 436 | case TIOCGICOUNT: |
| 418 | /* see if a signal did it */ | 437 | { |
| 419 | if (signal_pending(current)) | 438 | struct serial_icounter_struct icount; |
| 420 | return -ERESTARTSYS; | 439 | struct async_icount cnow = priv->icount; |
| 421 | else { | 440 | memset(&icount, 0, sizeof(icount)); |
| 422 | u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR; | 441 | icount.cts = cnow.cts; |
| 423 | if (!diff) | 442 | icount.dsr = cnow.dsr; |
| 424 | return -EIO; /* no change => error */ | 443 | icount.rng = cnow.rng; |
| 425 | 444 | icount.dcd = cnow.dcd; | |
| 426 | /* Return 0 if caller wanted to know about | 445 | icount.rx = cnow.rx; |
| 427 | these bits */ | 446 | icount.tx = cnow.tx; |
| 428 | 447 | icount.frame = cnow.frame; | |
| 429 | if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) || | 448 | icount.overrun = cnow.overrun; |
| 430 | ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) || | 449 | icount.parity = cnow.parity; |
| 431 | ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) || | 450 | icount.brk = cnow.brk; |
| 432 | ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS))) | 451 | icount.buf_overrun = cnow.buf_overrun; |
| 433 | return 0; | 452 | if (copy_to_user(user_arg, &icount, sizeof(icount))) |
| 434 | } | 453 | return -EFAULT; |
| 435 | } | ||
| 436 | return 0; | 454 | return 0; |
| 455 | } | ||
| 437 | 456 | ||
| 438 | default: | 457 | default: |
| 439 | break; | 458 | break; |
| @@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port) | |||
| 455 | 474 | ||
| 456 | unsigned num_endpoints; | 475 | unsigned num_endpoints; |
| 457 | int i; | 476 | int i; |
| 477 | unsigned long flags; | ||
| 458 | 478 | ||
| 459 | num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; | 479 | num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; |
| 460 | dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); | 480 | dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); |
| @@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port) | |||
| 466 | } | 486 | } |
| 467 | 487 | ||
| 468 | /* set max packet size based on descriptor */ | 488 | /* set max packet size based on descriptor */ |
| 489 | spin_lock_irqsave(&priv->status_lock, flags); | ||
| 469 | priv->max_packet_size = ep_desc->wMaxPacketSize; | 490 | priv->max_packet_size = ep_desc->wMaxPacketSize; |
| 491 | spin_unlock_irqrestore(&priv->status_lock, flags); | ||
| 470 | 492 | ||
| 471 | dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); | 493 | dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); |
| 472 | } | 494 | } |
| @@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial) | |||
| 485 | return -ENOMEM; | 507 | return -ENOMEM; |
| 486 | } | 508 | } |
| 487 | 509 | ||
| 510 | spin_lock_init(&priv->status_lock); | ||
| 488 | init_waitqueue_head(&priv->delta_msr_wait); | 511 | init_waitqueue_head(&priv->delta_msr_wait); |
| 489 | usb_set_serial_port_data(port, priv); | 512 | usb_set_serial_port_data(port, priv); |
| 490 | |||
| 491 | ssu100_set_max_packet_size(port); | 513 | ssu100_set_max_packet_size(port); |
| 492 | 514 | ||
| 493 | return ssu100_initdevice(serial->dev); | 515 | return ssu100_initdevice(serial->dev); |
| @@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 506 | if (!d) | 528 | if (!d) |
| 507 | return -ENOMEM; | 529 | return -ENOMEM; |
| 508 | 530 | ||
| 509 | r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d); | 531 | r = ssu100_getregister(dev, 0, UART_MCR, d); |
| 510 | if (r < 0) | 532 | if (r < 0) |
| 511 | goto mget_out; | 533 | goto mget_out; |
| 512 | 534 | ||
| 513 | r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1); | 535 | r = ssu100_getregister(dev, 0, UART_MSR, d+1); |
| 514 | if (r < 0) | 536 | if (r < 0) |
| 515 | goto mget_out; | 537 | goto mget_out; |
| 516 | 538 | ||
| 517 | r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) | | 539 | r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) | |
| 518 | (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) | | 540 | (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) | |
| 519 | (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) | | 541 | (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) | |
| 520 | (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) | | 542 | (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) | |
| 521 | (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) | | 543 | (d[1] & UART_MSR_RI ? TIOCM_RI : 0) | |
| 522 | (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0); | 544 | (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0); |
| 523 | 545 | ||
| 524 | mget_out: | 546 | mget_out: |
| 525 | kfree(d); | 547 | kfree(d); |
| @@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) | |||
| 546 | if (!port->serial->disconnected) { | 568 | if (!port->serial->disconnected) { |
| 547 | /* Disable flow control */ | 569 | /* Disable flow control */ |
| 548 | if (!on && | 570 | if (!on && |
| 549 | ssu100_setregister(dev, 0, 0) < 0) | 571 | ssu100_setregister(dev, 0, UART_MCR, 0) < 0) |
| 550 | dev_err(&port->dev, "error from flowcontrol urb\n"); | 572 | dev_err(&port->dev, "error from flowcontrol urb\n"); |
| 551 | /* drop RTS and DTR */ | 573 | /* drop RTS and DTR */ |
| 552 | if (on) | 574 | if (on) |
| @@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on) | |||
| 557 | mutex_unlock(&port->serial->disc_mutex); | 579 | mutex_unlock(&port->serial->disc_mutex); |
| 558 | } | 580 | } |
| 559 | 581 | ||
| 582 | static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) | ||
| 583 | { | ||
| 584 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); | ||
| 585 | unsigned long flags; | ||
| 586 | |||
| 587 | spin_lock_irqsave(&priv->status_lock, flags); | ||
| 588 | priv->shadowMSR = msr; | ||
| 589 | spin_unlock_irqrestore(&priv->status_lock, flags); | ||
| 590 | |||
| 591 | if (msr & UART_MSR_ANY_DELTA) { | ||
| 592 | /* update input line counters */ | ||
| 593 | if (msr & UART_MSR_DCTS) | ||
| 594 | priv->icount.cts++; | ||
| 595 | if (msr & UART_MSR_DDSR) | ||
| 596 | priv->icount.dsr++; | ||
| 597 | if (msr & UART_MSR_DDCD) | ||
| 598 | priv->icount.dcd++; | ||
| 599 | if (msr & UART_MSR_TERI) | ||
| 600 | priv->icount.rng++; | ||
| 601 | wake_up_interruptible(&priv->delta_msr_wait); | ||
| 602 | } | ||
| 603 | } | ||
| 604 | |||
| 605 | static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, | ||
| 606 | char *tty_flag) | ||
| 607 | { | ||
| 608 | struct ssu100_port_private *priv = usb_get_serial_port_data(port); | ||
| 609 | unsigned long flags; | ||
| 610 | |||
| 611 | spin_lock_irqsave(&priv->status_lock, flags); | ||
| 612 | priv->shadowLSR = lsr; | ||
| 613 | spin_unlock_irqrestore(&priv->status_lock, flags); | ||
| 614 | |||
| 615 | *tty_flag = TTY_NORMAL; | ||
| 616 | if (lsr & UART_LSR_BRK_ERROR_BITS) { | ||
| 617 | /* we always want to update icount, but we only want to | ||
| 618 | * update tty_flag for one case */ | ||
| 619 | if (lsr & UART_LSR_BI) { | ||
| 620 | priv->icount.brk++; | ||
| 621 | *tty_flag = TTY_BREAK; | ||
| 622 | usb_serial_handle_break(port); | ||
| 623 | } | ||
| 624 | if (lsr & UART_LSR_PE) { | ||
| 625 | priv->icount.parity++; | ||
| 626 | if (*tty_flag == TTY_NORMAL) | ||
| 627 | *tty_flag = TTY_PARITY; | ||
| 628 | } | ||
| 629 | if (lsr & UART_LSR_FE) { | ||
| 630 | priv->icount.frame++; | ||
| 631 | if (*tty_flag == TTY_NORMAL) | ||
| 632 | *tty_flag = TTY_FRAME; | ||
| 633 | } | ||
| 634 | if (lsr & UART_LSR_OE){ | ||
| 635 | priv->icount.overrun++; | ||
| 636 | if (*tty_flag == TTY_NORMAL) | ||
| 637 | *tty_flag = TTY_OVERRUN; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | } | ||
| 642 | |||
| 560 | static int ssu100_process_packet(struct tty_struct *tty, | 643 | static int ssu100_process_packet(struct tty_struct *tty, |
| 561 | struct usb_serial_port *port, | 644 | struct usb_serial_port *port, |
| 562 | struct ssu100_port_private *priv, | 645 | struct ssu100_port_private *priv, |
| 563 | char *packet, int len) | 646 | char *packet, int len) |
| 564 | { | 647 | { |
| 565 | int i; | 648 | int i; |
| 566 | char flag; | 649 | char flag = TTY_NORMAL; |
| 567 | char *ch; | 650 | char *ch; |
| 568 | 651 | ||
| 569 | dbg("%s - port %d", __func__, port->number); | 652 | dbg("%s - port %d", __func__, port->number); |
| 570 | 653 | ||
| 571 | if (len < 4) { | 654 | if ((len >= 4) && |
| 572 | dbg("%s - malformed packet", __func__); | 655 | (packet[0] == 0x1b) && (packet[1] == 0x1b) && |
| 573 | return 0; | ||
| 574 | } | ||
| 575 | |||
| 576 | if ((packet[0] == 0x1b) && (packet[1] == 0x1b) && | ||
| 577 | ((packet[2] == 0x00) || (packet[2] == 0x01))) { | 656 | ((packet[2] == 0x00) || (packet[2] == 0x01))) { |
| 578 | if (packet[2] == 0x00) | 657 | if (packet[2] == 0x00) { |
| 579 | priv->shadowLSR = packet[3] & (SERIAL_LSR_OE | | 658 | ssu100_update_lsr(port, packet[3], &flag); |
| 580 | SERIAL_LSR_PE | | 659 | if (flag == TTY_OVERRUN) |
| 581 | SERIAL_LSR_FE | | 660 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); |
| 582 | SERIAL_LSR_BI); | ||
| 583 | |||
| 584 | if (packet[2] == 0x01) { | ||
| 585 | priv->shadowMSR = packet[3]; | ||
| 586 | wake_up_interruptible(&priv->delta_msr_wait); | ||
| 587 | } | 661 | } |
| 662 | if (packet[2] == 0x01) | ||
| 663 | ssu100_update_msr(port, packet[3]); | ||
| 588 | 664 | ||
| 589 | len -= 4; | 665 | len -= 4; |
| 590 | ch = packet + 4; | 666 | ch = packet + 4; |
| @@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb) | |||
| 631 | tty_kref_put(tty); | 707 | tty_kref_put(tty); |
| 632 | } | 708 | } |
| 633 | 709 | ||
| 634 | |||
| 635 | static struct usb_serial_driver ssu100_device = { | 710 | static struct usb_serial_driver ssu100_device = { |
| 636 | .driver = { | 711 | .driver = { |
| 637 | .owner = THIS_MODULE, | 712 | .owner = THIS_MODULE, |
| @@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = { | |||
| 653 | .tiocmset = ssu100_tiocmset, | 728 | .tiocmset = ssu100_tiocmset, |
| 654 | .ioctl = ssu100_ioctl, | 729 | .ioctl = ssu100_ioctl, |
| 655 | .set_termios = ssu100_set_termios, | 730 | .set_termios = ssu100_set_termios, |
| 731 | .disconnect = usb_serial_generic_disconnect, | ||
| 656 | }; | 732 | }; |
| 657 | 733 | ||
| 658 | static int __init ssu100_init(void) | 734 | static int __init ssu100_init(void) |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 2a982e62963b..7a2177c79bde 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 736 | 736 | ||
| 737 | serial = create_serial(dev, interface, type); | 737 | serial = create_serial(dev, interface, type); |
| 738 | if (!serial) { | 738 | if (!serial) { |
| 739 | module_put(type->driver.owner); | ||
| 739 | dev_err(&interface->dev, "%s - out of memory\n", __func__); | 740 | dev_err(&interface->dev, "%s - out of memory\n", __func__); |
| 740 | return -ENOMEM; | 741 | return -ENOMEM; |
| 741 | } | 742 | } |
| @@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 746 | 747 | ||
| 747 | id = get_iface_id(type, interface); | 748 | id = get_iface_id(type, interface); |
| 748 | retval = type->probe(serial, id); | 749 | retval = type->probe(serial, id); |
| 749 | module_put(type->driver.owner); | ||
| 750 | 750 | ||
| 751 | if (retval) { | 751 | if (retval) { |
| 752 | dbg("sub driver rejected device"); | 752 | dbg("sub driver rejected device"); |
| 753 | kfree(serial); | 753 | kfree(serial); |
| 754 | module_put(type->driver.owner); | ||
| 754 | return retval; | 755 | return retval; |
| 755 | } | 756 | } |
| 756 | } | 757 | } |
| @@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 822 | if (num_bulk_in == 0 || num_bulk_out == 0) { | 823 | if (num_bulk_in == 0 || num_bulk_out == 0) { |
| 823 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); | 824 | dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); |
| 824 | kfree(serial); | 825 | kfree(serial); |
| 826 | module_put(type->driver.owner); | ||
| 825 | return -ENODEV; | 827 | return -ENODEV; |
| 826 | } | 828 | } |
| 827 | } | 829 | } |
| @@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 835 | dev_err(&interface->dev, | 837 | dev_err(&interface->dev, |
| 836 | "Generic device with no bulk out, not allowed.\n"); | 838 | "Generic device with no bulk out, not allowed.\n"); |
| 837 | kfree(serial); | 839 | kfree(serial); |
| 840 | module_put(type->driver.owner); | ||
| 838 | return -EIO; | 841 | return -EIO; |
| 839 | } | 842 | } |
| 840 | } | 843 | } |
| 841 | #endif | 844 | #endif |
| 842 | if (!num_ports) { | 845 | if (!num_ports) { |
| 843 | /* if this device type has a calc_num_ports function, call it */ | 846 | /* if this device type has a calc_num_ports function, call it */ |
| 844 | if (type->calc_num_ports) { | 847 | if (type->calc_num_ports) |
| 845 | if (!try_module_get(type->driver.owner)) { | ||
| 846 | dev_err(&interface->dev, | ||
| 847 | "module get failed, exiting\n"); | ||
| 848 | kfree(serial); | ||
| 849 | return -EIO; | ||
| 850 | } | ||
| 851 | num_ports = type->calc_num_ports(serial); | 848 | num_ports = type->calc_num_ports(serial); |
| 852 | module_put(type->driver.owner); | ||
| 853 | } | ||
| 854 | if (!num_ports) | 849 | if (!num_ports) |
| 855 | num_ports = type->num_ports; | 850 | num_ports = type->num_ports; |
| 856 | } | 851 | } |
| @@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 1039 | 1034 | ||
| 1040 | /* if this device type has an attach function, call it */ | 1035 | /* if this device type has an attach function, call it */ |
| 1041 | if (type->attach) { | 1036 | if (type->attach) { |
| 1042 | if (!try_module_get(type->driver.owner)) { | ||
| 1043 | dev_err(&interface->dev, | ||
| 1044 | "module get failed, exiting\n"); | ||
| 1045 | goto probe_error; | ||
| 1046 | } | ||
| 1047 | retval = type->attach(serial); | 1037 | retval = type->attach(serial); |
| 1048 | module_put(type->driver.owner); | ||
| 1049 | if (retval < 0) | 1038 | if (retval < 0) |
| 1050 | goto probe_error; | 1039 | goto probe_error; |
| 1051 | serial->attached = 1; | 1040 | serial->attached = 1; |
| @@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 1088 | exit: | 1077 | exit: |
| 1089 | /* success */ | 1078 | /* success */ |
| 1090 | usb_set_intfdata(interface, serial); | 1079 | usb_set_intfdata(interface, serial); |
| 1080 | module_put(type->driver.owner); | ||
| 1091 | return 0; | 1081 | return 0; |
| 1092 | 1082 | ||
| 1093 | probe_error: | 1083 | probe_error: |
| 1094 | usb_serial_put(serial); | 1084 | usb_serial_put(serial); |
| 1085 | module_put(type->driver.owner); | ||
| 1095 | return -EIO; | 1086 | return -EIO; |
| 1096 | } | 1087 | } |
| 1097 | EXPORT_SYMBOL_GPL(usb_serial_probe); | 1088 | EXPORT_SYMBOL_GPL(usb_serial_probe); |
diff --git a/firmware/Makefile b/firmware/Makefile index b27f09f05d17..9c2d19452d0b 100644 --- a/firmware/Makefile +++ b/firmware/Makefile | |||
| @@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin | |||
| 142 | fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) | 142 | fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) |
| 143 | 143 | ||
| 144 | # Directories which we _might_ need to create, so we have a rule for them. | 144 | # Directories which we _might_ need to create, so we have a rule for them. |
| 145 | firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all)))) | 145 | firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) |
| 146 | 146 | ||
| 147 | quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) | 147 | quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) |
| 148 | cmd_mkdir = mkdir -p $@ | 148 | cmd_mkdir = mkdir -p $@ |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 15412fe15c3a..b552f816de15 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
| @@ -852,8 +852,8 @@ xfs_convert_page( | |||
| 852 | SetPageUptodate(page); | 852 | SetPageUptodate(page); |
| 853 | 853 | ||
| 854 | if (count) { | 854 | if (count) { |
| 855 | wbc->nr_to_write--; | 855 | if (--wbc->nr_to_write <= 0 && |
| 856 | if (wbc->nr_to_write <= 0) | 856 | wbc->sync_mode == WB_SYNC_NONE) |
| 857 | done = 1; | 857 | done = 1; |
| 858 | } | 858 | } |
| 859 | xfs_start_page_writeback(page, !page_dirty, count); | 859 | xfs_start_page_writeback(page, !page_dirty, count); |
| @@ -1068,7 +1068,7 @@ xfs_vm_writepage( | |||
| 1068 | * by themselves. | 1068 | * by themselves. |
| 1069 | */ | 1069 | */ |
| 1070 | if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) | 1070 | if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) |
| 1071 | goto out_fail; | 1071 | goto redirty; |
| 1072 | 1072 | ||
| 1073 | /* | 1073 | /* |
| 1074 | * We need a transaction if there are delalloc or unwritten buffers | 1074 | * We need a transaction if there are delalloc or unwritten buffers |
| @@ -1080,7 +1080,7 @@ xfs_vm_writepage( | |||
| 1080 | */ | 1080 | */ |
| 1081 | xfs_count_page_state(page, &delalloc, &unwritten); | 1081 | xfs_count_page_state(page, &delalloc, &unwritten); |
| 1082 | if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) | 1082 | if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) |
| 1083 | goto out_fail; | 1083 | goto redirty; |
| 1084 | 1084 | ||
| 1085 | /* Is this page beyond the end of the file? */ | 1085 | /* Is this page beyond the end of the file? */ |
| 1086 | offset = i_size_read(inode); | 1086 | offset = i_size_read(inode); |
| @@ -1245,12 +1245,15 @@ error: | |||
| 1245 | if (iohead) | 1245 | if (iohead) |
| 1246 | xfs_cancel_ioend(iohead); | 1246 | xfs_cancel_ioend(iohead); |
| 1247 | 1247 | ||
| 1248 | if (err == -EAGAIN) | ||
| 1249 | goto redirty; | ||
| 1250 | |||
| 1248 | xfs_aops_discard_page(page); | 1251 | xfs_aops_discard_page(page); |
| 1249 | ClearPageUptodate(page); | 1252 | ClearPageUptodate(page); |
| 1250 | unlock_page(page); | 1253 | unlock_page(page); |
| 1251 | return err; | 1254 | return err; |
| 1252 | 1255 | ||
| 1253 | out_fail: | 1256 | redirty: |
| 1254 | redirty_page_for_writepage(wbc, page); | 1257 | redirty_page_for_writepage(wbc, page); |
| 1255 | unlock_page(page); | 1258 | unlock_page(page); |
| 1256 | return 0; | 1259 | return 0; |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 15c35b62ff14..a4e07974955b 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -1226,6 +1226,7 @@ xfs_fs_statfs( | |||
| 1226 | struct xfs_inode *ip = XFS_I(dentry->d_inode); | 1226 | struct xfs_inode *ip = XFS_I(dentry->d_inode); |
| 1227 | __uint64_t fakeinos, id; | 1227 | __uint64_t fakeinos, id; |
| 1228 | xfs_extlen_t lsize; | 1228 | xfs_extlen_t lsize; |
| 1229 | __int64_t ffree; | ||
| 1229 | 1230 | ||
| 1230 | statp->f_type = XFS_SB_MAGIC; | 1231 | statp->f_type = XFS_SB_MAGIC; |
| 1231 | statp->f_namelen = MAXNAMELEN - 1; | 1232 | statp->f_namelen = MAXNAMELEN - 1; |
| @@ -1249,7 +1250,11 @@ xfs_fs_statfs( | |||
| 1249 | statp->f_files = min_t(typeof(statp->f_files), | 1250 | statp->f_files = min_t(typeof(statp->f_files), |
| 1250 | statp->f_files, | 1251 | statp->f_files, |
| 1251 | mp->m_maxicount); | 1252 | mp->m_maxicount); |
| 1252 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | 1253 | |
| 1254 | /* make sure statp->f_ffree does not underflow */ | ||
| 1255 | ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | ||
| 1256 | statp->f_ffree = max_t(__int64_t, ffree, 0); | ||
| 1257 | |||
| 1253 | spin_unlock(&mp->m_sb_lock); | 1258 | spin_unlock(&mp->m_sb_lock); |
| 1254 | 1259 | ||
| 1255 | if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || | 1260 | if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || |
| @@ -1402,7 +1407,7 @@ xfs_fs_freeze( | |||
| 1402 | 1407 | ||
| 1403 | xfs_save_resvblks(mp); | 1408 | xfs_save_resvblks(mp); |
| 1404 | xfs_quiesce_attr(mp); | 1409 | xfs_quiesce_attr(mp); |
| 1405 | return -xfs_fs_log_dummy(mp); | 1410 | return -xfs_fs_log_dummy(mp, SYNC_WAIT); |
| 1406 | } | 1411 | } |
| 1407 | 1412 | ||
| 1408 | STATIC int | 1413 | STATIC int |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index dfcbd98d1599..d59c4a65d492 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include "xfs_inode_item.h" | 34 | #include "xfs_inode_item.h" |
| 35 | #include "xfs_quota.h" | 35 | #include "xfs_quota.h" |
| 36 | #include "xfs_trace.h" | 36 | #include "xfs_trace.h" |
| 37 | #include "xfs_fsops.h" | ||
| 37 | 38 | ||
| 38 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
| 39 | #include <linux/freezer.h> | 40 | #include <linux/freezer.h> |
| @@ -341,38 +342,6 @@ xfs_sync_attr( | |||
| 341 | } | 342 | } |
| 342 | 343 | ||
| 343 | STATIC int | 344 | STATIC int |
| 344 | xfs_commit_dummy_trans( | ||
| 345 | struct xfs_mount *mp, | ||
| 346 | uint flags) | ||
| 347 | { | ||
| 348 | struct xfs_inode *ip = mp->m_rootip; | ||
| 349 | struct xfs_trans *tp; | ||
| 350 | int error; | ||
| 351 | |||
| 352 | /* | ||
| 353 | * Put a dummy transaction in the log to tell recovery | ||
| 354 | * that all others are OK. | ||
| 355 | */ | ||
| 356 | tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); | ||
| 357 | error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); | ||
| 358 | if (error) { | ||
| 359 | xfs_trans_cancel(tp, 0); | ||
| 360 | return error; | ||
| 361 | } | ||
| 362 | |||
| 363 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
| 364 | |||
| 365 | xfs_trans_ijoin(tp, ip); | ||
| 366 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | ||
| 367 | error = xfs_trans_commit(tp, 0); | ||
| 368 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
| 369 | |||
| 370 | /* the log force ensures this transaction is pushed to disk */ | ||
| 371 | xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); | ||
| 372 | return error; | ||
| 373 | } | ||
| 374 | |||
| 375 | STATIC int | ||
| 376 | xfs_sync_fsdata( | 345 | xfs_sync_fsdata( |
| 377 | struct xfs_mount *mp) | 346 | struct xfs_mount *mp) |
| 378 | { | 347 | { |
| @@ -432,7 +401,7 @@ xfs_quiesce_data( | |||
| 432 | 401 | ||
| 433 | /* mark the log as covered if needed */ | 402 | /* mark the log as covered if needed */ |
| 434 | if (xfs_log_need_covered(mp)) | 403 | if (xfs_log_need_covered(mp)) |
| 435 | error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT); | 404 | error2 = xfs_fs_log_dummy(mp, SYNC_WAIT); |
| 436 | 405 | ||
| 437 | /* flush data-only devices */ | 406 | /* flush data-only devices */ |
| 438 | if (mp->m_rtdev_targp) | 407 | if (mp->m_rtdev_targp) |
| @@ -563,7 +532,7 @@ xfs_flush_inodes( | |||
| 563 | /* | 532 | /* |
| 564 | * Every sync period we need to unpin all items, reclaim inodes and sync | 533 | * Every sync period we need to unpin all items, reclaim inodes and sync |
| 565 | * disk quotas. We might need to cover the log to indicate that the | 534 | * disk quotas. We might need to cover the log to indicate that the |
| 566 | * filesystem is idle. | 535 | * filesystem is idle and not frozen. |
| 567 | */ | 536 | */ |
| 568 | STATIC void | 537 | STATIC void |
| 569 | xfs_sync_worker( | 538 | xfs_sync_worker( |
| @@ -577,8 +546,9 @@ xfs_sync_worker( | |||
| 577 | xfs_reclaim_inodes(mp, 0); | 546 | xfs_reclaim_inodes(mp, 0); |
| 578 | /* dgc: errors ignored here */ | 547 | /* dgc: errors ignored here */ |
| 579 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); | 548 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
| 580 | if (xfs_log_need_covered(mp)) | 549 | if (mp->m_super->s_frozen == SB_UNFROZEN && |
| 581 | error = xfs_commit_dummy_trans(mp, 0); | 550 | xfs_log_need_covered(mp)) |
| 551 | error = xfs_fs_log_dummy(mp, 0); | ||
| 582 | } | 552 | } |
| 583 | mp->m_sync_seq++; | 553 | mp->m_sync_seq++; |
| 584 | wake_up(&mp->m_wait_single_sync_task); | 554 | wake_up(&mp->m_wait_single_sync_task); |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index dbca5f5c37ba..43b1d5699335 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
| @@ -604,31 +604,36 @@ out: | |||
| 604 | return 0; | 604 | return 0; |
| 605 | } | 605 | } |
| 606 | 606 | ||
| 607 | /* | ||
| 608 | * Dump a transaction into the log that contains no real change. This is needed | ||
| 609 | * to be able to make the log dirty or stamp the current tail LSN into the log | ||
| 610 | * during the covering operation. | ||
| 611 | * | ||
| 612 | * We cannot use an inode here for this - that will push dirty state back up | ||
| 613 | * into the VFS and then periodic inode flushing will prevent log covering from | ||
| 614 | * making progress. Hence we log a field in the superblock instead. | ||
| 615 | */ | ||
| 607 | int | 616 | int |
| 608 | xfs_fs_log_dummy( | 617 | xfs_fs_log_dummy( |
| 609 | xfs_mount_t *mp) | 618 | xfs_mount_t *mp, |
| 619 | int flags) | ||
| 610 | { | 620 | { |
| 611 | xfs_trans_t *tp; | 621 | xfs_trans_t *tp; |
| 612 | xfs_inode_t *ip; | ||
| 613 | int error; | 622 | int error; |
| 614 | 623 | ||
| 615 | tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); | 624 | tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); |
| 616 | error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); | 625 | error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, |
| 626 | XFS_DEFAULT_LOG_COUNT); | ||
| 617 | if (error) { | 627 | if (error) { |
| 618 | xfs_trans_cancel(tp, 0); | 628 | xfs_trans_cancel(tp, 0); |
| 619 | return error; | 629 | return error; |
| 620 | } | 630 | } |
| 621 | 631 | ||
| 622 | ip = mp->m_rootip; | 632 | /* log the UUID because it is an unchanging field */ |
| 623 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 633 | xfs_mod_sb(tp, XFS_SB_UUID); |
| 624 | 634 | if (flags & SYNC_WAIT) | |
| 625 | xfs_trans_ijoin(tp, ip); | 635 | xfs_trans_set_sync(tp); |
| 626 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 636 | return xfs_trans_commit(tp, 0); |
| 627 | xfs_trans_set_sync(tp); | ||
| 628 | error = xfs_trans_commit(tp, 0); | ||
| 629 | |||
| 630 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
| 631 | return error; | ||
| 632 | } | 637 | } |
| 633 | 638 | ||
| 634 | int | 639 | int |
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index 88435e0a77c9..a786c5212c1e 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h | |||
| @@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt); | |||
| 25 | extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, | 25 | extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, |
| 26 | xfs_fsop_resblks_t *outval); | 26 | xfs_fsop_resblks_t *outval); |
| 27 | extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); | 27 | extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); |
| 28 | extern int xfs_fs_log_dummy(xfs_mount_t *mp); | 28 | extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags); |
| 29 | 29 | ||
| 30 | #endif /* __XFS_FSOPS_H__ */ | 30 | #endif /* __XFS_FSOPS_H__ */ |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index abf80ae1e95b..5371d2dc360e 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
| @@ -1213,7 +1213,6 @@ xfs_imap_lookup( | |||
| 1213 | struct xfs_inobt_rec_incore rec; | 1213 | struct xfs_inobt_rec_incore rec; |
| 1214 | struct xfs_btree_cur *cur; | 1214 | struct xfs_btree_cur *cur; |
| 1215 | struct xfs_buf *agbp; | 1215 | struct xfs_buf *agbp; |
| 1216 | xfs_agino_t startino; | ||
| 1217 | int error; | 1216 | int error; |
| 1218 | int i; | 1217 | int i; |
| 1219 | 1218 | ||
| @@ -1227,13 +1226,13 @@ xfs_imap_lookup( | |||
| 1227 | } | 1226 | } |
| 1228 | 1227 | ||
| 1229 | /* | 1228 | /* |
| 1230 | * derive and lookup the exact inode record for the given agino. If the | 1229 | * Lookup the inode record for the given agino. If the record cannot be |
| 1231 | * record cannot be found, then it's an invalid inode number and we | 1230 | * found, then it's an invalid inode number and we should abort. Once |
| 1232 | * should abort. | 1231 | * we have a record, we need to ensure it contains the inode number |
| 1232 | * we are looking up. | ||
| 1233 | */ | 1233 | */ |
| 1234 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); | 1234 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); |
| 1235 | startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); | 1235 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); |
| 1236 | error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i); | ||
| 1237 | if (!error) { | 1236 | if (!error) { |
| 1238 | if (i) | 1237 | if (i) |
| 1239 | error = xfs_inobt_get_rec(cur, &rec, &i); | 1238 | error = xfs_inobt_get_rec(cur, &rec, &i); |
| @@ -1246,6 +1245,11 @@ xfs_imap_lookup( | |||
| 1246 | if (error) | 1245 | if (error) |
| 1247 | return error; | 1246 | return error; |
| 1248 | 1247 | ||
| 1248 | /* check that the returned record contains the required inode */ | ||
| 1249 | if (rec.ir_startino > agino || | ||
| 1250 | rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) | ||
| 1251 | return EINVAL; | ||
| 1252 | |||
| 1249 | /* for untrusted inodes check it is allocated first */ | 1253 | /* for untrusted inodes check it is allocated first */ |
| 1250 | if ((flags & XFS_IGET_UNTRUSTED) && | 1254 | if ((flags & XFS_IGET_UNTRUSTED) && |
| 1251 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) | 1255 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 68415cb4f23c..34798f391c49 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -1914,6 +1914,11 @@ xfs_iunlink_remove( | |||
| 1914 | return 0; | 1914 | return 0; |
| 1915 | } | 1915 | } |
| 1916 | 1916 | ||
| 1917 | /* | ||
| 1918 | * A big issue when freeing the inode cluster is is that we _cannot_ skip any | ||
| 1919 | * inodes that are in memory - they all must be marked stale and attached to | ||
| 1920 | * the cluster buffer. | ||
| 1921 | */ | ||
| 1917 | STATIC void | 1922 | STATIC void |
| 1918 | xfs_ifree_cluster( | 1923 | xfs_ifree_cluster( |
| 1919 | xfs_inode_t *free_ip, | 1924 | xfs_inode_t *free_ip, |
| @@ -1945,8 +1950,6 @@ xfs_ifree_cluster( | |||
| 1945 | } | 1950 | } |
| 1946 | 1951 | ||
| 1947 | for (j = 0; j < nbufs; j++, inum += ninodes) { | 1952 | for (j = 0; j < nbufs; j++, inum += ninodes) { |
| 1948 | int found = 0; | ||
| 1949 | |||
| 1950 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), | 1953 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), |
| 1951 | XFS_INO_TO_AGBNO(mp, inum)); | 1954 | XFS_INO_TO_AGBNO(mp, inum)); |
| 1952 | 1955 | ||
| @@ -1965,7 +1968,9 @@ xfs_ifree_cluster( | |||
| 1965 | /* | 1968 | /* |
| 1966 | * Walk the inodes already attached to the buffer and mark them | 1969 | * Walk the inodes already attached to the buffer and mark them |
| 1967 | * stale. These will all have the flush locks held, so an | 1970 | * stale. These will all have the flush locks held, so an |
| 1968 | * in-memory inode walk can't lock them. | 1971 | * in-memory inode walk can't lock them. By marking them all |
| 1972 | * stale first, we will not attempt to lock them in the loop | ||
| 1973 | * below as the XFS_ISTALE flag will be set. | ||
| 1969 | */ | 1974 | */ |
| 1970 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); | 1975 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); |
| 1971 | while (lip) { | 1976 | while (lip) { |
| @@ -1977,11 +1982,11 @@ xfs_ifree_cluster( | |||
| 1977 | &iip->ili_flush_lsn, | 1982 | &iip->ili_flush_lsn, |
| 1978 | &iip->ili_item.li_lsn); | 1983 | &iip->ili_item.li_lsn); |
| 1979 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); | 1984 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); |
| 1980 | found++; | ||
| 1981 | } | 1985 | } |
| 1982 | lip = lip->li_bio_list; | 1986 | lip = lip->li_bio_list; |
| 1983 | } | 1987 | } |
| 1984 | 1988 | ||
| 1989 | |||
| 1985 | /* | 1990 | /* |
| 1986 | * For each inode in memory attempt to add it to the inode | 1991 | * For each inode in memory attempt to add it to the inode |
| 1987 | * buffer and set it up for being staled on buffer IO | 1992 | * buffer and set it up for being staled on buffer IO |
| @@ -1993,6 +1998,7 @@ xfs_ifree_cluster( | |||
| 1993 | * even trying to lock them. | 1998 | * even trying to lock them. |
| 1994 | */ | 1999 | */ |
| 1995 | for (i = 0; i < ninodes; i++) { | 2000 | for (i = 0; i < ninodes; i++) { |
| 2001 | retry: | ||
| 1996 | read_lock(&pag->pag_ici_lock); | 2002 | read_lock(&pag->pag_ici_lock); |
| 1997 | ip = radix_tree_lookup(&pag->pag_ici_root, | 2003 | ip = radix_tree_lookup(&pag->pag_ici_root, |
| 1998 | XFS_INO_TO_AGINO(mp, (inum + i))); | 2004 | XFS_INO_TO_AGINO(mp, (inum + i))); |
| @@ -2003,38 +2009,36 @@ xfs_ifree_cluster( | |||
| 2003 | continue; | 2009 | continue; |
| 2004 | } | 2010 | } |
| 2005 | 2011 | ||
| 2006 | /* don't try to lock/unlock the current inode */ | 2012 | /* |
| 2013 | * Don't try to lock/unlock the current inode, but we | ||
| 2014 | * _cannot_ skip the other inodes that we did not find | ||
| 2015 | * in the list attached to the buffer and are not | ||
| 2016 | * already marked stale. If we can't lock it, back off | ||
| 2017 | * and retry. | ||
| 2018 | */ | ||
| 2007 | if (ip != free_ip && | 2019 | if (ip != free_ip && |
| 2008 | !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | 2020 | !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { |
| 2009 | read_unlock(&pag->pag_ici_lock); | 2021 | read_unlock(&pag->pag_ici_lock); |
| 2010 | continue; | 2022 | delay(1); |
| 2023 | goto retry; | ||
| 2011 | } | 2024 | } |
| 2012 | read_unlock(&pag->pag_ici_lock); | 2025 | read_unlock(&pag->pag_ici_lock); |
| 2013 | 2026 | ||
| 2014 | if (!xfs_iflock_nowait(ip)) { | 2027 | xfs_iflock(ip); |
| 2015 | if (ip != free_ip) | ||
| 2016 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
| 2017 | continue; | ||
| 2018 | } | ||
| 2019 | |||
| 2020 | xfs_iflags_set(ip, XFS_ISTALE); | 2028 | xfs_iflags_set(ip, XFS_ISTALE); |
| 2021 | if (xfs_inode_clean(ip)) { | ||
| 2022 | ASSERT(ip != free_ip); | ||
| 2023 | xfs_ifunlock(ip); | ||
| 2024 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
| 2025 | continue; | ||
| 2026 | } | ||
| 2027 | 2029 | ||
| 2030 | /* | ||
| 2031 | * we don't need to attach clean inodes or those only | ||
| 2032 | * with unlogged changes (which we throw away, anyway). | ||
| 2033 | */ | ||
| 2028 | iip = ip->i_itemp; | 2034 | iip = ip->i_itemp; |
| 2029 | if (!iip) { | 2035 | if (!iip || xfs_inode_clean(ip)) { |
| 2030 | /* inode with unlogged changes only */ | ||
| 2031 | ASSERT(ip != free_ip); | 2036 | ASSERT(ip != free_ip); |
| 2032 | ip->i_update_core = 0; | 2037 | ip->i_update_core = 0; |
| 2033 | xfs_ifunlock(ip); | 2038 | xfs_ifunlock(ip); |
| 2034 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2039 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 2035 | continue; | 2040 | continue; |
| 2036 | } | 2041 | } |
| 2037 | found++; | ||
| 2038 | 2042 | ||
| 2039 | iip->ili_last_fields = iip->ili_format.ilf_fields; | 2043 | iip->ili_last_fields = iip->ili_format.ilf_fields; |
| 2040 | iip->ili_format.ilf_fields = 0; | 2044 | iip->ili_format.ilf_fields = 0; |
| @@ -2049,8 +2053,7 @@ xfs_ifree_cluster( | |||
| 2049 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2053 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 2050 | } | 2054 | } |
| 2051 | 2055 | ||
| 2052 | if (found) | 2056 | xfs_trans_stale_inode_buf(tp, bp); |
| 2053 | xfs_trans_stale_inode_buf(tp, bp); | ||
| 2054 | xfs_trans_binval(tp, bp); | 2057 | xfs_trans_binval(tp, bp); |
| 2055 | } | 2058 | } |
| 2056 | 2059 | ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 925d572bf0f4..33f718f92a48 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -3015,7 +3015,8 @@ _xfs_log_force( | |||
| 3015 | 3015 | ||
| 3016 | XFS_STATS_INC(xs_log_force); | 3016 | XFS_STATS_INC(xs_log_force); |
| 3017 | 3017 | ||
| 3018 | xlog_cil_push(log, 1); | 3018 | if (log->l_cilp) |
| 3019 | xlog_cil_force(log); | ||
| 3019 | 3020 | ||
| 3020 | spin_lock(&log->l_icloglock); | 3021 | spin_lock(&log->l_icloglock); |
| 3021 | 3022 | ||
| @@ -3167,7 +3168,7 @@ _xfs_log_force_lsn( | |||
| 3167 | XFS_STATS_INC(xs_log_force); | 3168 | XFS_STATS_INC(xs_log_force); |
| 3168 | 3169 | ||
| 3169 | if (log->l_cilp) { | 3170 | if (log->l_cilp) { |
| 3170 | lsn = xlog_cil_push_lsn(log, lsn); | 3171 | lsn = xlog_cil_force_lsn(log, lsn); |
| 3171 | if (lsn == NULLCOMMITLSN) | 3172 | if (lsn == NULLCOMMITLSN) |
| 3172 | return 0; | 3173 | return 0; |
| 3173 | } | 3174 | } |
| @@ -3724,7 +3725,7 @@ xfs_log_force_umount( | |||
| 3724 | * call below. | 3725 | * call below. |
| 3725 | */ | 3726 | */ |
| 3726 | if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG)) | 3727 | if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG)) |
| 3727 | xlog_cil_push(log, 1); | 3728 | xlog_cil_force(log); |
| 3728 | 3729 | ||
| 3729 | /* | 3730 | /* |
| 3730 | * We must hold both the GRANT lock and the LOG lock, | 3731 | * We must hold both the GRANT lock and the LOG lock, |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 31e4ea2d19ac..ed575fb4b495 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
| @@ -68,6 +68,7 @@ xlog_cil_init( | |||
| 68 | ctx->sequence = 1; | 68 | ctx->sequence = 1; |
| 69 | ctx->cil = cil; | 69 | ctx->cil = cil; |
| 70 | cil->xc_ctx = ctx; | 70 | cil->xc_ctx = ctx; |
| 71 | cil->xc_current_sequence = ctx->sequence; | ||
| 71 | 72 | ||
| 72 | cil->xc_log = log; | 73 | cil->xc_log = log; |
| 73 | log->l_cilp = cil; | 74 | log->l_cilp = cil; |
| @@ -269,15 +270,10 @@ xlog_cil_insert( | |||
| 269 | static void | 270 | static void |
| 270 | xlog_cil_format_items( | 271 | xlog_cil_format_items( |
| 271 | struct log *log, | 272 | struct log *log, |
| 272 | struct xfs_log_vec *log_vector, | 273 | struct xfs_log_vec *log_vector) |
| 273 | struct xlog_ticket *ticket, | ||
| 274 | xfs_lsn_t *start_lsn) | ||
| 275 | { | 274 | { |
| 276 | struct xfs_log_vec *lv; | 275 | struct xfs_log_vec *lv; |
| 277 | 276 | ||
| 278 | if (start_lsn) | ||
| 279 | *start_lsn = log->l_cilp->xc_ctx->sequence; | ||
| 280 | |||
| 281 | ASSERT(log_vector); | 277 | ASSERT(log_vector); |
| 282 | for (lv = log_vector; lv; lv = lv->lv_next) { | 278 | for (lv = log_vector; lv; lv = lv->lv_next) { |
| 283 | void *ptr; | 279 | void *ptr; |
| @@ -301,9 +297,24 @@ xlog_cil_format_items( | |||
| 301 | ptr += vec->i_len; | 297 | ptr += vec->i_len; |
| 302 | } | 298 | } |
| 303 | ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); | 299 | ASSERT(ptr == lv->lv_buf + lv->lv_buf_len); |
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | static void | ||
| 304 | xlog_cil_insert_items( | ||
| 305 | struct log *log, | ||
| 306 | struct xfs_log_vec *log_vector, | ||
| 307 | struct xlog_ticket *ticket, | ||
| 308 | xfs_lsn_t *start_lsn) | ||
| 309 | { | ||
| 310 | struct xfs_log_vec *lv; | ||
| 311 | |||
| 312 | if (start_lsn) | ||
| 313 | *start_lsn = log->l_cilp->xc_ctx->sequence; | ||
| 304 | 314 | ||
| 315 | ASSERT(log_vector); | ||
| 316 | for (lv = log_vector; lv; lv = lv->lv_next) | ||
| 305 | xlog_cil_insert(log, ticket, lv->lv_item, lv); | 317 | xlog_cil_insert(log, ticket, lv->lv_item, lv); |
| 306 | } | ||
| 307 | } | 318 | } |
| 308 | 319 | ||
| 309 | static void | 320 | static void |
| @@ -321,80 +332,6 @@ xlog_cil_free_logvec( | |||
| 321 | } | 332 | } |
| 322 | 333 | ||
| 323 | /* | 334 | /* |
| 324 | * Commit a transaction with the given vector to the Committed Item List. | ||
| 325 | * | ||
| 326 | * To do this, we need to format the item, pin it in memory if required and | ||
| 327 | * account for the space used by the transaction. Once we have done that we | ||
| 328 | * need to release the unused reservation for the transaction, attach the | ||
| 329 | * transaction to the checkpoint context so we carry the busy extents through | ||
| 330 | * to checkpoint completion, and then unlock all the items in the transaction. | ||
| 331 | * | ||
| 332 | * For more specific information about the order of operations in | ||
| 333 | * xfs_log_commit_cil() please refer to the comments in | ||
| 334 | * xfs_trans_commit_iclog(). | ||
| 335 | * | ||
| 336 | * Called with the context lock already held in read mode to lock out | ||
| 337 | * background commit, returns without it held once background commits are | ||
| 338 | * allowed again. | ||
| 339 | */ | ||
| 340 | int | ||
| 341 | xfs_log_commit_cil( | ||
| 342 | struct xfs_mount *mp, | ||
| 343 | struct xfs_trans *tp, | ||
| 344 | struct xfs_log_vec *log_vector, | ||
| 345 | xfs_lsn_t *commit_lsn, | ||
| 346 | int flags) | ||
| 347 | { | ||
| 348 | struct log *log = mp->m_log; | ||
| 349 | int log_flags = 0; | ||
| 350 | int push = 0; | ||
| 351 | |||
| 352 | if (flags & XFS_TRANS_RELEASE_LOG_RES) | ||
| 353 | log_flags = XFS_LOG_REL_PERM_RESERV; | ||
| 354 | |||
| 355 | if (XLOG_FORCED_SHUTDOWN(log)) { | ||
| 356 | xlog_cil_free_logvec(log_vector); | ||
| 357 | return XFS_ERROR(EIO); | ||
| 358 | } | ||
| 359 | |||
| 360 | /* lock out background commit */ | ||
| 361 | down_read(&log->l_cilp->xc_ctx_lock); | ||
| 362 | xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn); | ||
| 363 | |||
| 364 | /* check we didn't blow the reservation */ | ||
| 365 | if (tp->t_ticket->t_curr_res < 0) | ||
| 366 | xlog_print_tic_res(log->l_mp, tp->t_ticket); | ||
| 367 | |||
| 368 | /* attach the transaction to the CIL if it has any busy extents */ | ||
| 369 | if (!list_empty(&tp->t_busy)) { | ||
| 370 | spin_lock(&log->l_cilp->xc_cil_lock); | ||
| 371 | list_splice_init(&tp->t_busy, | ||
| 372 | &log->l_cilp->xc_ctx->busy_extents); | ||
| 373 | spin_unlock(&log->l_cilp->xc_cil_lock); | ||
| 374 | } | ||
| 375 | |||
| 376 | tp->t_commit_lsn = *commit_lsn; | ||
| 377 | xfs_log_done(mp, tp->t_ticket, NULL, log_flags); | ||
| 378 | xfs_trans_unreserve_and_mod_sb(tp); | ||
| 379 | |||
| 380 | /* check for background commit before unlock */ | ||
| 381 | if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log)) | ||
| 382 | push = 1; | ||
| 383 | up_read(&log->l_cilp->xc_ctx_lock); | ||
| 384 | |||
| 385 | /* | ||
| 386 | * We need to push CIL every so often so we don't cache more than we | ||
| 387 | * can fit in the log. The limit really is that a checkpoint can't be | ||
| 388 | * more than half the log (the current checkpoint is not allowed to | ||
| 389 | * overwrite the previous checkpoint), but commit latency and memory | ||
| 390 | * usage limit this to a smaller size in most cases. | ||
| 391 | */ | ||
| 392 | if (push) | ||
| 393 | xlog_cil_push(log, 0); | ||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | /* | ||
| 398 | * Mark all items committed and clear busy extents. We free the log vector | 335 | * Mark all items committed and clear busy extents. We free the log vector |
| 399 | * chains in a separate pass so that we unpin the log items as quickly as | 336 | * chains in a separate pass so that we unpin the log items as quickly as |
| 400 | * possible. | 337 | * possible. |
| @@ -427,13 +364,23 @@ xlog_cil_committed( | |||
| 427 | } | 364 | } |
| 428 | 365 | ||
| 429 | /* | 366 | /* |
| 430 | * Push the Committed Item List to the log. If the push_now flag is not set, | 367 | * Push the Committed Item List to the log. If @push_seq flag is zero, then it |
| 431 | * then it is a background flush and so we can chose to ignore it. | 368 | * is a background flush and so we can chose to ignore it. Otherwise, if the |
| 369 | * current sequence is the same as @push_seq we need to do a flush. If | ||
| 370 | * @push_seq is less than the current sequence, then it has already been | ||
| 371 | * flushed and we don't need to do anything - the caller will wait for it to | ||
| 372 | * complete if necessary. | ||
| 373 | * | ||
| 374 | * @push_seq is a value rather than a flag because that allows us to do an | ||
| 375 | * unlocked check of the sequence number for a match. Hence we can allows log | ||
| 376 | * forces to run racily and not issue pushes for the same sequence twice. If we | ||
| 377 | * get a race between multiple pushes for the same sequence they will block on | ||
| 378 | * the first one and then abort, hence avoiding needless pushes. | ||
| 432 | */ | 379 | */ |
| 433 | int | 380 | STATIC int |
| 434 | xlog_cil_push( | 381 | xlog_cil_push( |
| 435 | struct log *log, | 382 | struct log *log, |
| 436 | int push_now) | 383 | xfs_lsn_t push_seq) |
| 437 | { | 384 | { |
| 438 | struct xfs_cil *cil = log->l_cilp; | 385 | struct xfs_cil *cil = log->l_cilp; |
| 439 | struct xfs_log_vec *lv; | 386 | struct xfs_log_vec *lv; |
| @@ -453,12 +400,14 @@ xlog_cil_push( | |||
| 453 | if (!cil) | 400 | if (!cil) |
| 454 | return 0; | 401 | return 0; |
| 455 | 402 | ||
| 403 | ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence); | ||
| 404 | |||
| 456 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); | 405 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
| 457 | new_ctx->ticket = xlog_cil_ticket_alloc(log); | 406 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 458 | 407 | ||
| 459 | /* lock out transaction commit, but don't block on background push */ | 408 | /* lock out transaction commit, but don't block on background push */ |
| 460 | if (!down_write_trylock(&cil->xc_ctx_lock)) { | 409 | if (!down_write_trylock(&cil->xc_ctx_lock)) { |
| 461 | if (!push_now) | 410 | if (!push_seq) |
| 462 | goto out_free_ticket; | 411 | goto out_free_ticket; |
| 463 | down_write(&cil->xc_ctx_lock); | 412 | down_write(&cil->xc_ctx_lock); |
| 464 | } | 413 | } |
| @@ -469,7 +418,11 @@ xlog_cil_push( | |||
| 469 | goto out_skip; | 418 | goto out_skip; |
| 470 | 419 | ||
| 471 | /* check for spurious background flush */ | 420 | /* check for spurious background flush */ |
| 472 | if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) | 421 | if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) |
| 422 | goto out_skip; | ||
| 423 | |||
| 424 | /* check for a previously pushed seqeunce */ | ||
| 425 | if (push_seq < cil->xc_ctx->sequence) | ||
| 473 | goto out_skip; | 426 | goto out_skip; |
| 474 | 427 | ||
| 475 | /* | 428 | /* |
| @@ -515,6 +468,13 @@ xlog_cil_push( | |||
| 515 | cil->xc_ctx = new_ctx; | 468 | cil->xc_ctx = new_ctx; |
| 516 | 469 | ||
| 517 | /* | 470 | /* |
| 471 | * mirror the new sequence into the cil structure so that we can do | ||
| 472 | * unlocked checks against the current sequence in log forces without | ||
| 473 | * risking deferencing a freed context pointer. | ||
| 474 | */ | ||
| 475 | cil->xc_current_sequence = new_ctx->sequence; | ||
| 476 | |||
| 477 | /* | ||
| 518 | * The switch is now done, so we can drop the context lock and move out | 478 | * The switch is now done, so we can drop the context lock and move out |
| 519 | * of a shared context. We can't just go straight to the commit record, | 479 | * of a shared context. We can't just go straight to the commit record, |
| 520 | * though - we need to synchronise with previous and future commits so | 480 | * though - we need to synchronise with previous and future commits so |
| @@ -626,6 +586,102 @@ out_abort: | |||
| 626 | } | 586 | } |
| 627 | 587 | ||
| 628 | /* | 588 | /* |
| 589 | * Commit a transaction with the given vector to the Committed Item List. | ||
| 590 | * | ||
| 591 | * To do this, we need to format the item, pin it in memory if required and | ||
| 592 | * account for the space used by the transaction. Once we have done that we | ||
| 593 | * need to release the unused reservation for the transaction, attach the | ||
| 594 | * transaction to the checkpoint context so we carry the busy extents through | ||
| 595 | * to checkpoint completion, and then unlock all the items in the transaction. | ||
| 596 | * | ||
| 597 | * For more specific information about the order of operations in | ||
| 598 | * xfs_log_commit_cil() please refer to the comments in | ||
| 599 | * xfs_trans_commit_iclog(). | ||
| 600 | * | ||
| 601 | * Called with the context lock already held in read mode to lock out | ||
| 602 | * background commit, returns without it held once background commits are | ||
| 603 | * allowed again. | ||
| 604 | */ | ||
| 605 | int | ||
| 606 | xfs_log_commit_cil( | ||
| 607 | struct xfs_mount *mp, | ||
| 608 | struct xfs_trans *tp, | ||
| 609 | struct xfs_log_vec *log_vector, | ||
| 610 | xfs_lsn_t *commit_lsn, | ||
| 611 | int flags) | ||
| 612 | { | ||
| 613 | struct log *log = mp->m_log; | ||
| 614 | int log_flags = 0; | ||
| 615 | int push = 0; | ||
| 616 | |||
| 617 | if (flags & XFS_TRANS_RELEASE_LOG_RES) | ||
| 618 | log_flags = XFS_LOG_REL_PERM_RESERV; | ||
| 619 | |||
| 620 | if (XLOG_FORCED_SHUTDOWN(log)) { | ||
| 621 | xlog_cil_free_logvec(log_vector); | ||
| 622 | return XFS_ERROR(EIO); | ||
| 623 | } | ||
| 624 | |||
| 625 | /* | ||
| 626 | * do all the hard work of formatting items (including memory | ||
| 627 | * allocation) outside the CIL context lock. This prevents stalling CIL | ||
| 628 | * pushes when we are low on memory and a transaction commit spends a | ||
| 629 | * lot of time in memory reclaim. | ||
| 630 | */ | ||
| 631 | xlog_cil_format_items(log, log_vector); | ||
| 632 | |||
| 633 | /* lock out background commit */ | ||
| 634 | down_read(&log->l_cilp->xc_ctx_lock); | ||
| 635 | xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn); | ||
| 636 | |||
| 637 | /* check we didn't blow the reservation */ | ||
| 638 | if (tp->t_ticket->t_curr_res < 0) | ||
| 639 | xlog_print_tic_res(log->l_mp, tp->t_ticket); | ||
| 640 | |||
| 641 | /* attach the transaction to the CIL if it has any busy extents */ | ||
| 642 | if (!list_empty(&tp->t_busy)) { | ||
| 643 | spin_lock(&log->l_cilp->xc_cil_lock); | ||
| 644 | list_splice_init(&tp->t_busy, | ||
| 645 | &log->l_cilp->xc_ctx->busy_extents); | ||
| 646 | spin_unlock(&log->l_cilp->xc_cil_lock); | ||
| 647 | } | ||
| 648 | |||
| 649 | tp->t_commit_lsn = *commit_lsn; | ||
| 650 | xfs_log_done(mp, tp->t_ticket, NULL, log_flags); | ||
| 651 | xfs_trans_unreserve_and_mod_sb(tp); | ||
| 652 | |||
| 653 | /* | ||
| 654 | * Once all the items of the transaction have been copied to the CIL, | ||
| 655 | * the items can be unlocked and freed. | ||
| 656 | * | ||
| 657 | * This needs to be done before we drop the CIL context lock because we | ||
| 658 | * have to update state in the log items and unlock them before they go | ||
| 659 | * to disk. If we don't, then the CIL checkpoint can race with us and | ||
| 660 | * we can run checkpoint completion before we've updated and unlocked | ||
| 661 | * the log items. This affects (at least) processing of stale buffers, | ||
| 662 | * inodes and EFIs. | ||
| 663 | */ | ||
| 664 | xfs_trans_free_items(tp, *commit_lsn, 0); | ||
| 665 | |||
| 666 | /* check for background commit before unlock */ | ||
| 667 | if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log)) | ||
| 668 | push = 1; | ||
| 669 | |||
| 670 | up_read(&log->l_cilp->xc_ctx_lock); | ||
| 671 | |||
| 672 | /* | ||
| 673 | * We need to push CIL every so often so we don't cache more than we | ||
| 674 | * can fit in the log. The limit really is that a checkpoint can't be | ||
| 675 | * more than half the log (the current checkpoint is not allowed to | ||
| 676 | * overwrite the previous checkpoint), but commit latency and memory | ||
| 677 | * usage limit this to a smaller size in most cases. | ||
| 678 | */ | ||
| 679 | if (push) | ||
| 680 | xlog_cil_push(log, 0); | ||
| 681 | return 0; | ||
| 682 | } | ||
| 683 | |||
| 684 | /* | ||
| 629 | * Conditionally push the CIL based on the sequence passed in. | 685 | * Conditionally push the CIL based on the sequence passed in. |
| 630 | * | 686 | * |
| 631 | * We only need to push if we haven't already pushed the sequence | 687 | * We only need to push if we haven't already pushed the sequence |
| @@ -639,39 +695,34 @@ out_abort: | |||
| 639 | * commit lsn is there. It'll be empty, so this is broken for now. | 695 | * commit lsn is there. It'll be empty, so this is broken for now. |
| 640 | */ | 696 | */ |
| 641 | xfs_lsn_t | 697 | xfs_lsn_t |
| 642 | xlog_cil_push_lsn( | 698 | xlog_cil_force_lsn( |
| 643 | struct log *log, | 699 | struct log *log, |
| 644 | xfs_lsn_t push_seq) | 700 | xfs_lsn_t sequence) |
| 645 | { | 701 | { |
| 646 | struct xfs_cil *cil = log->l_cilp; | 702 | struct xfs_cil *cil = log->l_cilp; |
| 647 | struct xfs_cil_ctx *ctx; | 703 | struct xfs_cil_ctx *ctx; |
| 648 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; | 704 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; |
| 649 | 705 | ||
| 650 | restart: | 706 | ASSERT(sequence <= cil->xc_current_sequence); |
| 651 | down_write(&cil->xc_ctx_lock); | 707 | |
| 652 | ASSERT(push_seq <= cil->xc_ctx->sequence); | 708 | /* |
| 653 | 709 | * check to see if we need to force out the current context. | |
| 654 | /* check to see if we need to force out the current context */ | 710 | * xlog_cil_push() handles racing pushes for the same sequence, |
| 655 | if (push_seq == cil->xc_ctx->sequence) { | 711 | * so no need to deal with it here. |
| 656 | up_write(&cil->xc_ctx_lock); | 712 | */ |
| 657 | xlog_cil_push(log, 1); | 713 | if (sequence == cil->xc_current_sequence) |
| 658 | goto restart; | 714 | xlog_cil_push(log, sequence); |
| 659 | } | ||
| 660 | 715 | ||
| 661 | /* | 716 | /* |
| 662 | * See if we can find a previous sequence still committing. | 717 | * See if we can find a previous sequence still committing. |
| 663 | * We can drop the flush lock as soon as we have the cil lock | ||
| 664 | * because we are now only comparing contexts protected by | ||
| 665 | * the cil lock. | ||
| 666 | * | ||
| 667 | * We need to wait for all previous sequence commits to complete | 718 | * We need to wait for all previous sequence commits to complete |
| 668 | * before allowing the force of push_seq to go ahead. Hence block | 719 | * before allowing the force of push_seq to go ahead. Hence block |
| 669 | * on commits for those as well. | 720 | * on commits for those as well. |
| 670 | */ | 721 | */ |
| 722 | restart: | ||
| 671 | spin_lock(&cil->xc_cil_lock); | 723 | spin_lock(&cil->xc_cil_lock); |
| 672 | up_write(&cil->xc_ctx_lock); | ||
| 673 | list_for_each_entry(ctx, &cil->xc_committing, committing) { | 724 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
| 674 | if (ctx->sequence > push_seq) | 725 | if (ctx->sequence > sequence) |
| 675 | continue; | 726 | continue; |
| 676 | if (!ctx->commit_lsn) { | 727 | if (!ctx->commit_lsn) { |
| 677 | /* | 728 | /* |
| @@ -681,7 +732,7 @@ restart: | |||
| 681 | sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); | 732 | sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); |
| 682 | goto restart; | 733 | goto restart; |
| 683 | } | 734 | } |
| 684 | if (ctx->sequence != push_seq) | 735 | if (ctx->sequence != sequence) |
| 685 | continue; | 736 | continue; |
| 686 | /* found it! */ | 737 | /* found it! */ |
| 687 | commit_lsn = ctx->commit_lsn; | 738 | commit_lsn = ctx->commit_lsn; |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 8c072618965c..ced52b98b322 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
| @@ -422,6 +422,7 @@ struct xfs_cil { | |||
| 422 | struct rw_semaphore xc_ctx_lock; | 422 | struct rw_semaphore xc_ctx_lock; |
| 423 | struct list_head xc_committing; | 423 | struct list_head xc_committing; |
| 424 | sv_t xc_commit_wait; | 424 | sv_t xc_commit_wait; |
| 425 | xfs_lsn_t xc_current_sequence; | ||
| 425 | }; | 426 | }; |
| 426 | 427 | ||
| 427 | /* | 428 | /* |
| @@ -562,8 +563,16 @@ int xlog_cil_init(struct log *log); | |||
| 562 | void xlog_cil_init_post_recovery(struct log *log); | 563 | void xlog_cil_init_post_recovery(struct log *log); |
| 563 | void xlog_cil_destroy(struct log *log); | 564 | void xlog_cil_destroy(struct log *log); |
| 564 | 565 | ||
| 565 | int xlog_cil_push(struct log *log, int push_now); | 566 | /* |
| 566 | xfs_lsn_t xlog_cil_push_lsn(struct log *log, xfs_lsn_t push_sequence); | 567 | * CIL force routines |
| 568 | */ | ||
| 569 | xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence); | ||
| 570 | |||
| 571 | static inline void | ||
| 572 | xlog_cil_force(struct log *log) | ||
| 573 | { | ||
| 574 | xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); | ||
| 575 | } | ||
| 567 | 576 | ||
| 568 | /* | 577 | /* |
| 569 | * Unmount record type is used as a pseudo transaction type for the ticket. | 578 | * Unmount record type is used as a pseudo transaction type for the ticket. |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index fdca7416c754..1c47edaea0d2 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
| @@ -1167,7 +1167,7 @@ xfs_trans_del_item( | |||
| 1167 | * Unlock all of the items of a transaction and free all the descriptors | 1167 | * Unlock all of the items of a transaction and free all the descriptors |
| 1168 | * of that transaction. | 1168 | * of that transaction. |
| 1169 | */ | 1169 | */ |
| 1170 | STATIC void | 1170 | void |
| 1171 | xfs_trans_free_items( | 1171 | xfs_trans_free_items( |
| 1172 | struct xfs_trans *tp, | 1172 | struct xfs_trans *tp, |
| 1173 | xfs_lsn_t commit_lsn, | 1173 | xfs_lsn_t commit_lsn, |
| @@ -1653,9 +1653,6 @@ xfs_trans_commit_cil( | |||
| 1653 | return error; | 1653 | return error; |
| 1654 | 1654 | ||
| 1655 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); | 1655 | current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); |
| 1656 | |||
| 1657 | /* xfs_trans_free_items() unlocks them first */ | ||
| 1658 | xfs_trans_free_items(tp, *commit_lsn, 0); | ||
| 1659 | xfs_trans_free(tp); | 1656 | xfs_trans_free(tp); |
| 1660 | return 0; | 1657 | return 0; |
| 1661 | } | 1658 | } |
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index e2d93d8ead7b..62da86c90de5 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h | |||
| @@ -25,7 +25,8 @@ struct xfs_trans; | |||
| 25 | 25 | ||
| 26 | void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); | 26 | void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); |
| 27 | void xfs_trans_del_item(struct xfs_log_item *); | 27 | void xfs_trans_del_item(struct xfs_log_item *); |
| 28 | 28 | void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn, | |
| 29 | int flags); | ||
| 29 | void xfs_trans_item_committed(struct xfs_log_item *lip, | 30 | void xfs_trans_item_committed(struct xfs_log_item *lip, |
| 30 | xfs_lsn_t commit_lsn, int aborted); | 31 | xfs_lsn_t commit_lsn, int aborted); |
| 31 | void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); | 32 | void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 2a512bc0d4ab..7809d230adee 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -305,14 +305,16 @@ struct drm_ioctl_desc { | |||
| 305 | unsigned int cmd; | 305 | unsigned int cmd; |
| 306 | int flags; | 306 | int flags; |
| 307 | drm_ioctl_t *func; | 307 | drm_ioctl_t *func; |
| 308 | unsigned int cmd_drv; | ||
| 308 | }; | 309 | }; |
| 309 | 310 | ||
| 310 | /** | 311 | /** |
| 311 | * Creates a driver or general drm_ioctl_desc array entry for the given | 312 | * Creates a driver or general drm_ioctl_desc array entry for the given |
| 312 | * ioctl, for use by drm_ioctl(). | 313 | * ioctl, for use by drm_ioctl(). |
| 313 | */ | 314 | */ |
| 314 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | 315 | |
| 315 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags} | 316 | #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
| 317 | [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} | ||
| 316 | 318 | ||
| 317 | struct drm_magic_entry { | 319 | struct drm_magic_entry { |
| 318 | struct list_head head; | 320 | struct list_head head; |
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h index 4b00d2dd4f68..61315c29b8f3 100644 --- a/include/drm/i830_drm.h +++ b/include/drm/i830_drm.h | |||
| @@ -264,20 +264,20 @@ typedef struct _drm_i830_sarea { | |||
| 264 | #define DRM_I830_GETPARAM 0x0c | 264 | #define DRM_I830_GETPARAM 0x0c |
| 265 | #define DRM_I830_SETPARAM 0x0d | 265 | #define DRM_I830_SETPARAM 0x0d |
| 266 | 266 | ||
| 267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t) | 267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t) |
| 268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t) | 268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t) |
| 269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t) | 269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t) |
| 270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH) | 270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH) |
| 271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE) | 271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE) |
| 272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t) | 272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t) |
| 273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP) | 273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP) |
| 274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t) | 274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t) |
| 275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY) | 275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY) |
| 276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP) | 276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP) |
| 277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t) | 277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t) |
| 278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t) | 278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t) |
| 279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t) | 279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t) |
| 280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t) | 280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t) |
| 281 | 281 | ||
| 282 | typedef struct _drm_i830_clear { | 282 | typedef struct _drm_i830_clear { |
| 283 | int clear_color; | 283 | int clear_color; |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8f8b072c4c7b..e41c74facb6a 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
| @@ -215,6 +215,7 @@ typedef struct _drm_i915_sarea { | |||
| 215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
| 216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
| 217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | 217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
| 218 | #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) | ||
| 218 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | 219 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
| 219 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | 220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
| 220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | 221 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h index 3ffbc4798afa..c16097f99be0 100644 --- a/include/drm/mga_drm.h +++ b/include/drm/mga_drm.h | |||
| @@ -248,7 +248,7 @@ typedef struct _drm_mga_sarea { | |||
| 248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c | 248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c |
| 249 | 249 | ||
| 250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) | 250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) |
| 251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) | 251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock) |
| 252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) | 252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) |
| 253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) | 253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) |
| 254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) | 254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index fe917dee723a..01a714119506 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
| @@ -197,4 +197,17 @@ struct drm_nouveau_sarea { | |||
| 197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 | 197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 |
| 198 | #define DRM_NOUVEAU_GEM_INFO 0x44 | 198 | #define DRM_NOUVEAU_GEM_INFO 0x44 |
| 199 | 199 | ||
| 200 | #define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) | ||
| 201 | #define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) | ||
| 202 | #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) | ||
| 203 | #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) | ||
| 204 | #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) | ||
| 205 | #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) | ||
| 206 | #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) | ||
| 207 | #define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new) | ||
| 208 | #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf) | ||
| 209 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep) | ||
| 210 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini) | ||
| 211 | #define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info) | ||
| 212 | |||
| 200 | #endif /* __NOUVEAU_DRM_H__ */ | 213 | #endif /* __NOUVEAU_DRM_H__ */ |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 0acaf8f91437..10f8b53bdd40 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
| @@ -547,8 +547,8 @@ typedef struct { | |||
| 547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) | 547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
| 548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) | 548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
| 549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) | 549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
| 550 | #define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) | 550 | #define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
| 551 | #define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | 551 | #define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
| 552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) | 552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
| 553 | 553 | ||
| 554 | typedef struct drm_radeon_init { | 554 | typedef struct drm_radeon_init { |
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h index 8a576ef01821..4863cf6bf96f 100644 --- a/include/drm/savage_drm.h +++ b/include/drm/savage_drm.h | |||
| @@ -63,10 +63,10 @@ typedef struct _drm_savage_sarea { | |||
| 63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 | 63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 |
| 64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 | 64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 |
| 65 | 65 | ||
| 66 | #define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) | 66 | #define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) |
| 67 | #define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) | 67 | #define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) |
| 68 | #define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) | 68 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) |
| 69 | #define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) | 69 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) |
| 70 | 70 | ||
| 71 | #define SAVAGE_DMA_PCI 1 | 71 | #define SAVAGE_DMA_PCI 1 |
| 72 | #define SAVAGE_DMA_AGP 3 | 72 | #define SAVAGE_DMA_AGP 3 |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index c831467774d0..bed7a4682b90 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
| @@ -119,7 +119,7 @@ struct ethhdr { | |||
| 119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
| 120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ | 120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ |
| 121 | __be16 h_proto; /* packet type ID field */ | 121 | __be16 h_proto; /* packet type ID field */ |
| 122 | } __packed; | 122 | } __attribute__((packed)); |
| 123 | 123 | ||
| 124 | #ifdef __KERNEL__ | 124 | #ifdef __KERNEL__ |
| 125 | #include <linux/skbuff.h> | 125 | #include <linux/skbuff.h> |
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index 9947c39e62f6..e6dc11e7f9a5 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h | |||
| @@ -67,7 +67,7 @@ struct fddi_8022_1_hdr { | |||
| 67 | __u8 dsap; /* destination service access point */ | 67 | __u8 dsap; /* destination service access point */ |
| 68 | __u8 ssap; /* source service access point */ | 68 | __u8 ssap; /* source service access point */ |
| 69 | __u8 ctrl; /* control byte #1 */ | 69 | __u8 ctrl; /* control byte #1 */ |
| 70 | } __packed; | 70 | } __attribute__((packed)); |
| 71 | 71 | ||
| 72 | /* Define 802.2 Type 2 header */ | 72 | /* Define 802.2 Type 2 header */ |
| 73 | struct fddi_8022_2_hdr { | 73 | struct fddi_8022_2_hdr { |
| @@ -75,7 +75,7 @@ struct fddi_8022_2_hdr { | |||
| 75 | __u8 ssap; /* source service access point */ | 75 | __u8 ssap; /* source service access point */ |
| 76 | __u8 ctrl_1; /* control byte #1 */ | 76 | __u8 ctrl_1; /* control byte #1 */ |
| 77 | __u8 ctrl_2; /* control byte #2 */ | 77 | __u8 ctrl_2; /* control byte #2 */ |
| 78 | } __packed; | 78 | } __attribute__((packed)); |
| 79 | 79 | ||
| 80 | /* Define 802.2 SNAP header */ | 80 | /* Define 802.2 SNAP header */ |
| 81 | #define FDDI_K_OUI_LEN 3 | 81 | #define FDDI_K_OUI_LEN 3 |
| @@ -85,7 +85,7 @@ struct fddi_snap_hdr { | |||
| 85 | __u8 ctrl; /* always 0x03 */ | 85 | __u8 ctrl; /* always 0x03 */ |
| 86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ | 86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ |
| 87 | __be16 ethertype; /* packet type ID field */ | 87 | __be16 ethertype; /* packet type ID field */ |
| 88 | } __packed; | 88 | } __attribute__((packed)); |
| 89 | 89 | ||
| 90 | /* Define FDDI LLC frame header */ | 90 | /* Define FDDI LLC frame header */ |
| 91 | struct fddihdr { | 91 | struct fddihdr { |
| @@ -98,7 +98,7 @@ struct fddihdr { | |||
| 98 | struct fddi_8022_2_hdr llc_8022_2; | 98 | struct fddi_8022_2_hdr llc_8022_2; |
| 99 | struct fddi_snap_hdr llc_snap; | 99 | struct fddi_snap_hdr llc_snap; |
| 100 | } hdr; | 100 | } hdr; |
| 101 | } __packed; | 101 | } __attribute__((packed)); |
| 102 | 102 | ||
| 103 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
| 104 | #include <linux/netdevice.h> | 104 | #include <linux/netdevice.h> |
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index 5fe5f307c6f5..cdc049f1829a 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h | |||
| @@ -104,7 +104,7 @@ struct hippi_fp_hdr { | |||
| 104 | __be32 fixed; | 104 | __be32 fixed; |
| 105 | #endif | 105 | #endif |
| 106 | __be32 d2_size; | 106 | __be32 d2_size; |
| 107 | } __packed; | 107 | } __attribute__((packed)); |
| 108 | 108 | ||
| 109 | struct hippi_le_hdr { | 109 | struct hippi_le_hdr { |
| 110 | #if defined (__BIG_ENDIAN_BITFIELD) | 110 | #if defined (__BIG_ENDIAN_BITFIELD) |
| @@ -129,7 +129,7 @@ struct hippi_le_hdr { | |||
| 129 | __u8 daddr[HIPPI_ALEN]; | 129 | __u8 daddr[HIPPI_ALEN]; |
| 130 | __u16 locally_administered; | 130 | __u16 locally_administered; |
| 131 | __u8 saddr[HIPPI_ALEN]; | 131 | __u8 saddr[HIPPI_ALEN]; |
| 132 | } __packed; | 132 | } __attribute__((packed)); |
| 133 | 133 | ||
| 134 | #define HIPPI_OUI_LEN 3 | 134 | #define HIPPI_OUI_LEN 3 |
| 135 | /* | 135 | /* |
| @@ -142,12 +142,12 @@ struct hippi_snap_hdr { | |||
| 142 | __u8 ctrl; /* always 0x03 */ | 142 | __u8 ctrl; /* always 0x03 */ |
| 143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ | 143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ |
| 144 | __be16 ethertype; /* packet type ID field */ | 144 | __be16 ethertype; /* packet type ID field */ |
| 145 | } __packed; | 145 | } __attribute__((packed)); |
| 146 | 146 | ||
| 147 | struct hippi_hdr { | 147 | struct hippi_hdr { |
| 148 | struct hippi_fp_hdr fp; | 148 | struct hippi_fp_hdr fp; |
| 149 | struct hippi_le_hdr le; | 149 | struct hippi_le_hdr le; |
| 150 | struct hippi_snap_hdr snap; | 150 | struct hippi_snap_hdr snap; |
| 151 | } __packed; | 151 | } __attribute__((packed)); |
| 152 | 152 | ||
| 153 | #endif /* _LINUX_IF_HIPPI_H */ | 153 | #endif /* _LINUX_IF_HIPPI_H */ |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 1925e0c3f162..27741e05446f 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
| @@ -59,7 +59,7 @@ struct sockaddr_pppox { | |||
| 59 | union{ | 59 | union{ |
| 60 | struct pppoe_addr pppoe; | 60 | struct pppoe_addr pppoe; |
| 61 | }sa_addr; | 61 | }sa_addr; |
| 62 | } __packed; | 62 | } __attribute__((packed)); |
| 63 | 63 | ||
| 64 | /* The use of the above union isn't viable because the size of this | 64 | /* The use of the above union isn't viable because the size of this |
| 65 | * struct must stay fixed over time -- applications use sizeof(struct | 65 | * struct must stay fixed over time -- applications use sizeof(struct |
| @@ -70,7 +70,7 @@ struct sockaddr_pppol2tp { | |||
| 70 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 70 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
| 71 | unsigned int sa_protocol; /* protocol identifier */ | 71 | unsigned int sa_protocol; /* protocol identifier */ |
| 72 | struct pppol2tp_addr pppol2tp; | 72 | struct pppol2tp_addr pppol2tp; |
| 73 | } __packed; | 73 | } __attribute__((packed)); |
| 74 | 74 | ||
| 75 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 | 75 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 |
| 76 | * bits. So we need a different sockaddr structure. | 76 | * bits. So we need a different sockaddr structure. |
| @@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 { | |||
| 79 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 79 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
| 80 | unsigned int sa_protocol; /* protocol identifier */ | 80 | unsigned int sa_protocol; /* protocol identifier */ |
| 81 | struct pppol2tpv3_addr pppol2tp; | 81 | struct pppol2tpv3_addr pppol2tp; |
| 82 | } __packed; | 82 | } __attribute__((packed)); |
| 83 | 83 | ||
| 84 | /********************************************************************* | 84 | /********************************************************************* |
| 85 | * | 85 | * |
| @@ -101,7 +101,7 @@ struct pppoe_tag { | |||
| 101 | __be16 tag_type; | 101 | __be16 tag_type; |
| 102 | __be16 tag_len; | 102 | __be16 tag_len; |
| 103 | char tag_data[0]; | 103 | char tag_data[0]; |
| 104 | } __attribute ((packed)); | 104 | } __attribute__ ((packed)); |
| 105 | 105 | ||
| 106 | /* Tag identifiers */ | 106 | /* Tag identifiers */ |
| 107 | #define PTT_EOL __cpu_to_be16(0x0000) | 107 | #define PTT_EOL __cpu_to_be16(0x0000) |
| @@ -129,7 +129,7 @@ struct pppoe_hdr { | |||
| 129 | __be16 sid; | 129 | __be16 sid; |
| 130 | __be16 length; | 130 | __be16 length; |
| 131 | struct pppoe_tag tag[0]; | 131 | struct pppoe_tag tag[0]; |
| 132 | } __packed; | 132 | } __attribute__((packed)); |
| 133 | 133 | ||
| 134 | /* Length of entire PPPoE + PPP header */ | 134 | /* Length of entire PPPoE + PPP header */ |
| 135 | #define PPPOE_SES_HLEN 8 | 135 | #define PPPOE_SES_HLEN 8 |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ab9e9e89e407..e62683ba88e6 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -58,7 +58,7 @@ struct ipv6_opt_hdr { | |||
| 58 | /* | 58 | /* |
| 59 | * TLV encoded option data follows. | 59 | * TLV encoded option data follows. |
| 60 | */ | 60 | */ |
| 61 | } __packed; /* required for some archs */ | 61 | } __attribute__((packed)); /* required for some archs */ |
| 62 | 62 | ||
| 63 | #define ipv6_destopt_hdr ipv6_opt_hdr | 63 | #define ipv6_destopt_hdr ipv6_opt_hdr |
| 64 | #define ipv6_hopopt_hdr ipv6_opt_hdr | 64 | #define ipv6_hopopt_hdr ipv6_opt_hdr |
| @@ -99,7 +99,7 @@ struct ipv6_destopt_hao { | |||
| 99 | __u8 type; | 99 | __u8 type; |
| 100 | __u8 length; | 100 | __u8 length; |
| 101 | struct in6_addr addr; | 101 | struct in6_addr addr; |
| 102 | } __packed; | 102 | } __attribute__((packed)); |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * IPv6 fixed header | 105 | * IPv6 fixed header |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index cf343a852534..7950a37a7146 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
| 23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/kref.h> | 24 | #include <linux/kref.h> |
| 25 | #include <linux/kobject_ns.h> | ||
| 25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
| 26 | #include <linux/wait.h> | 27 | #include <linux/wait.h> |
| 27 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
| @@ -136,42 +137,8 @@ struct kobj_attribute { | |||
| 136 | 137 | ||
| 137 | extern const struct sysfs_ops kobj_sysfs_ops; | 138 | extern const struct sysfs_ops kobj_sysfs_ops; |
| 138 | 139 | ||
| 139 | /* | ||
| 140 | * Namespace types which are used to tag kobjects and sysfs entries. | ||
| 141 | * Network namespace will likely be the first. | ||
| 142 | */ | ||
| 143 | enum kobj_ns_type { | ||
| 144 | KOBJ_NS_TYPE_NONE = 0, | ||
| 145 | KOBJ_NS_TYPE_NET, | ||
| 146 | KOBJ_NS_TYPES | ||
| 147 | }; | ||
| 148 | |||
| 149 | struct sock; | 140 | struct sock; |
| 150 | 141 | ||
| 151 | /* | ||
| 152 | * Callbacks so sysfs can determine namespaces | ||
| 153 | * @current_ns: return calling task's namespace | ||
| 154 | * @netlink_ns: return namespace to which a sock belongs (right?) | ||
| 155 | * @initial_ns: return the initial namespace (i.e. init_net_ns) | ||
| 156 | */ | ||
| 157 | struct kobj_ns_type_operations { | ||
| 158 | enum kobj_ns_type type; | ||
| 159 | const void *(*current_ns)(void); | ||
| 160 | const void *(*netlink_ns)(struct sock *sk); | ||
| 161 | const void *(*initial_ns)(void); | ||
| 162 | }; | ||
| 163 | |||
| 164 | int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); | ||
| 165 | int kobj_ns_type_registered(enum kobj_ns_type type); | ||
| 166 | const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); | ||
| 167 | const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); | ||
| 168 | |||
| 169 | const void *kobj_ns_current(enum kobj_ns_type type); | ||
| 170 | const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); | ||
| 171 | const void *kobj_ns_initial(enum kobj_ns_type type); | ||
| 172 | void kobj_ns_exit(enum kobj_ns_type type, const void *ns); | ||
| 173 | |||
| 174 | |||
| 175 | /** | 142 | /** |
| 176 | * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. | 143 | * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. |
| 177 | * | 144 | * |
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h new file mode 100644 index 000000000000..82cb5bf461fb --- /dev/null +++ b/include/linux/kobject_ns.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | /* Kernel object name space definitions | ||
| 2 | * | ||
| 3 | * Copyright (c) 2002-2003 Patrick Mochel | ||
| 4 | * Copyright (c) 2002-2003 Open Source Development Labs | ||
| 5 | * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> | ||
| 6 | * Copyright (c) 2006-2008 Novell Inc. | ||
| 7 | * | ||
| 8 | * Split from kobject.h by David Howells (dhowells@redhat.com) | ||
| 9 | * | ||
| 10 | * This file is released under the GPLv2. | ||
| 11 | * | ||
| 12 | * Please read Documentation/kobject.txt before using the kobject | ||
| 13 | * interface, ESPECIALLY the parts about reference counts and object | ||
| 14 | * destructors. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef _LINUX_KOBJECT_NS_H | ||
| 18 | #define _LINUX_KOBJECT_NS_H | ||
| 19 | |||
| 20 | struct sock; | ||
| 21 | struct kobject; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Namespace types which are used to tag kobjects and sysfs entries. | ||
| 25 | * Network namespace will likely be the first. | ||
| 26 | */ | ||
| 27 | enum kobj_ns_type { | ||
| 28 | KOBJ_NS_TYPE_NONE = 0, | ||
| 29 | KOBJ_NS_TYPE_NET, | ||
| 30 | KOBJ_NS_TYPES | ||
| 31 | }; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Callbacks so sysfs can determine namespaces | ||
| 35 | * @current_ns: return calling task's namespace | ||
| 36 | * @netlink_ns: return namespace to which a sock belongs (right?) | ||
| 37 | * @initial_ns: return the initial namespace (i.e. init_net_ns) | ||
| 38 | */ | ||
| 39 | struct kobj_ns_type_operations { | ||
| 40 | enum kobj_ns_type type; | ||
| 41 | const void *(*current_ns)(void); | ||
| 42 | const void *(*netlink_ns)(struct sock *sk); | ||
| 43 | const void *(*initial_ns)(void); | ||
| 44 | }; | ||
| 45 | |||
| 46 | int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); | ||
| 47 | int kobj_ns_type_registered(enum kobj_ns_type type); | ||
| 48 | const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); | ||
| 49 | const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); | ||
| 50 | |||
| 51 | const void *kobj_ns_current(enum kobj_ns_type type); | ||
| 52 | const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); | ||
| 53 | const void *kobj_ns_initial(enum kobj_ns_type type); | ||
| 54 | void kobj_ns_exit(enum kobj_ns_type type, const void *ns); | ||
| 55 | |||
| 56 | #endif /* _LINUX_KOBJECT_NS_H */ | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 709f6728fc90..831c693416b2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp); | |||
| 78 | #define VM_MAYSHARE 0x00000080 | 78 | #define VM_MAYSHARE 0x00000080 |
| 79 | 79 | ||
| 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
| 81 | #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) | ||
| 81 | #define VM_GROWSUP 0x00000200 | 82 | #define VM_GROWSUP 0x00000200 |
| 83 | #else | ||
| 84 | #define VM_GROWSUP 0x00000000 | ||
| 85 | #endif | ||
| 82 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 86 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
| 83 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 87 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
| 84 | 88 | ||
| @@ -1330,8 +1334,10 @@ unsigned long ra_submit(struct file_ra_state *ra, | |||
| 1330 | 1334 | ||
| 1331 | /* Do stack extension */ | 1335 | /* Do stack extension */ |
| 1332 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 1336 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
| 1333 | #ifdef CONFIG_IA64 | 1337 | #if VM_GROWSUP |
| 1334 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1338 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
| 1339 | #else | ||
| 1340 | #define expand_upwards(vma, address) do { } while (0) | ||
| 1335 | #endif | 1341 | #endif |
| 1336 | extern int expand_stack_downwards(struct vm_area_struct *vma, | 1342 | extern int expand_stack_downwards(struct vm_area_struct *vma, |
| 1337 | unsigned long address); | 1343 | unsigned long address); |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index bb58854a8061..d146ca10c0f5 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
| @@ -88,7 +88,7 @@ struct nbd_request { | |||
| 88 | char handle[8]; | 88 | char handle[8]; |
| 89 | __be64 from; | 89 | __be64 from; |
| 90 | __be32 len; | 90 | __be32 len; |
| 91 | } __packed; | 91 | } __attribute__((packed)); |
| 92 | 92 | ||
| 93 | /* | 93 | /* |
| 94 | * This is the reply packet that nbd-server sends back to the client after | 94 | * This is the reply packet that nbd-server sends back to the client after |
diff --git a/include/linux/ncp.h b/include/linux/ncp.h index 3ace8370e61e..99f0adeeb3f3 100644 --- a/include/linux/ncp.h +++ b/include/linux/ncp.h | |||
| @@ -27,7 +27,7 @@ struct ncp_request_header { | |||
| 27 | __u8 conn_high; | 27 | __u8 conn_high; |
| 28 | __u8 function; | 28 | __u8 function; |
| 29 | __u8 data[0]; | 29 | __u8 data[0]; |
| 30 | } __packed; | 30 | } __attribute__((packed)); |
| 31 | 31 | ||
| 32 | #define NCP_REPLY (0x3333) | 32 | #define NCP_REPLY (0x3333) |
| 33 | #define NCP_WATCHDOG (0x3E3E) | 33 | #define NCP_WATCHDOG (0x3E3E) |
| @@ -42,7 +42,7 @@ struct ncp_reply_header { | |||
| 42 | __u8 completion_code; | 42 | __u8 completion_code; |
| 43 | __u8 connection_state; | 43 | __u8 connection_state; |
| 44 | __u8 data[0]; | 44 | __u8 data[0]; |
| 45 | } __packed; | 45 | } __attribute__((packed)); |
| 46 | 46 | ||
| 47 | #define NCP_VOLNAME_LEN (16) | 47 | #define NCP_VOLNAME_LEN (16) |
| 48 | #define NCP_NUMBER_OF_VOLUMES (256) | 48 | #define NCP_NUMBER_OF_VOLUMES (256) |
| @@ -158,7 +158,7 @@ struct nw_info_struct { | |||
| 158 | #ifdef __KERNEL__ | 158 | #ifdef __KERNEL__ |
| 159 | struct nw_nfs_info nfs; | 159 | struct nw_nfs_info nfs; |
| 160 | #endif | 160 | #endif |
| 161 | } __packed; | 161 | } __attribute__((packed)); |
| 162 | 162 | ||
| 163 | /* modify mask - use with MODIFY_DOS_INFO structure */ | 163 | /* modify mask - use with MODIFY_DOS_INFO structure */ |
| 164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) | 164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) |
| @@ -190,12 +190,12 @@ struct nw_modify_dos_info { | |||
| 190 | __u16 inheritanceGrantMask; | 190 | __u16 inheritanceGrantMask; |
| 191 | __u16 inheritanceRevokeMask; | 191 | __u16 inheritanceRevokeMask; |
| 192 | __u32 maximumSpace; | 192 | __u32 maximumSpace; |
| 193 | } __packed; | 193 | } __attribute__((packed)); |
| 194 | 194 | ||
| 195 | struct nw_search_sequence { | 195 | struct nw_search_sequence { |
| 196 | __u8 volNumber; | 196 | __u8 volNumber; |
| 197 | __u32 dirBase; | 197 | __u32 dirBase; |
| 198 | __u32 sequence; | 198 | __u32 sequence; |
| 199 | } __packed; | 199 | } __attribute__((packed)); |
| 200 | 200 | ||
| 201 | #endif /* _LINUX_NCP_H */ | 201 | #endif /* _LINUX_NCP_H */ |
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h index 3e1aa1be942e..208ae9387331 100644 --- a/include/linux/netfilter/xt_IDLETIMER.h +++ b/include/linux/netfilter/xt_IDLETIMER.h | |||
| @@ -39,7 +39,7 @@ struct idletimer_tg_info { | |||
| 39 | char label[MAX_IDLETIMER_LABEL_SIZE]; | 39 | char label[MAX_IDLETIMER_LABEL_SIZE]; |
| 40 | 40 | ||
| 41 | /* for kernel module internal use only */ | 41 | /* for kernel module internal use only */ |
| 42 | struct idletimer_tg *timer __attribute((aligned(8))); | 42 | struct idletimer_tg *timer __attribute__((aligned(8))); |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | #endif | 45 | #endif |
diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h index 1167aeb7a347..eff34ac18808 100644 --- a/include/linux/netfilter/xt_ipvs.h +++ b/include/linux/netfilter/xt_ipvs.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _XT_IPVS_H | 1 | #ifndef _XT_IPVS_H |
| 2 | #define _XT_IPVS_H | 2 | #define _XT_IPVS_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 4 | enum { | 6 | enum { |
| 5 | XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ | 7 | XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ |
| 6 | XT_IPVS_PROTO = 1 << 1, | 8 | XT_IPVS_PROTO = 1 << 1, |
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index 24426c3d6b5a..76edadf046d3 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
| @@ -56,7 +56,7 @@ struct phonethdr { | |||
| 56 | __be16 pn_length; | 56 | __be16 pn_length; |
| 57 | __u8 pn_robj; | 57 | __u8 pn_robj; |
| 58 | __u8 pn_sobj; | 58 | __u8 pn_sobj; |
| 59 | } __packed; | 59 | } __attribute__((packed)); |
| 60 | 60 | ||
| 61 | /* Common Phonet payload header */ | 61 | /* Common Phonet payload header */ |
| 62 | struct phonetmsg { | 62 | struct phonetmsg { |
| @@ -98,7 +98,7 @@ struct sockaddr_pn { | |||
| 98 | __u8 spn_dev; | 98 | __u8 spn_dev; |
| 99 | __u8 spn_resource; | 99 | __u8 spn_resource; |
| 100 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; | 100 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; |
| 101 | } __packed; | 101 | } __attribute__((packed)); |
| 102 | 102 | ||
| 103 | /* Well known address */ | 103 | /* Well known address */ |
| 104 | #define PN_DEV_PC 0x10 | 104 | #define PN_DEV_PC 0x10 |
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h new file mode 100644 index 000000000000..18d75e795606 --- /dev/null +++ b/include/linux/pxa168_eth.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* | ||
| 2 | *pxa168 ethernet platform device data definition file. | ||
| 3 | */ | ||
| 4 | #ifndef __LINUX_PXA168_ETH_H | ||
| 5 | #define __LINUX_PXA168_ETH_H | ||
| 6 | |||
| 7 | struct pxa168_eth_platform_data { | ||
| 8 | int port_number; | ||
| 9 | int phy_addr; | ||
| 10 | |||
| 11 | /* | ||
| 12 | * If speed is 0, then speed and duplex are autonegotiated. | ||
| 13 | */ | ||
| 14 | int speed; /* 0, SPEED_10, SPEED_100 */ | ||
| 15 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Override default RX/TX queue sizes if nonzero. | ||
| 19 | */ | ||
| 20 | int rx_queue_size; | ||
| 21 | int tx_queue_size; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * init callback is used for board specific initialization | ||
| 25 | * e.g on Aspenite its used to initialize the PHY transceiver. | ||
| 26 | */ | ||
| 27 | int (*init)(void); | ||
| 28 | }; | ||
| 29 | |||
| 30 | #endif /* __LINUX_PXA168_ETH_H */ | ||
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 4f82326eb294..08c32e4f261a 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h | |||
| @@ -81,7 +81,7 @@ struct rfkill_event { | |||
| 81 | __u8 type; | 81 | __u8 type; |
| 82 | __u8 op; | 82 | __u8 op; |
| 83 | __u8 soft, hard; | 83 | __u8 soft, hard; |
| 84 | } __packed; | 84 | } __attribute__((packed)); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * We are planning to be backward and forward compatible with changes | 87 | * We are planning to be backward and forward compatible with changes |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 3c92121ba9af..96eb576d82fd 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
| 18 | #include <linux/lockdep.h> | 18 | #include <linux/lockdep.h> |
| 19 | #include <linux/kobject_ns.h> | ||
| 19 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
| 20 | 21 | ||
| 21 | struct kobject; | 22 | struct kobject; |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 890bc1472190..617068134ae8 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
| @@ -247,6 +247,7 @@ int usb_add_config(struct usb_composite_dev *, | |||
| 247 | * value; it should return zero on successful initialization. | 247 | * value; it should return zero on successful initialization. |
| 248 | * @unbind: Reverses @bind(); called as a side effect of unregistering | 248 | * @unbind: Reverses @bind(); called as a side effect of unregistering |
| 249 | * this driver. | 249 | * this driver. |
| 250 | * @disconnect: optional driver disconnect method | ||
| 250 | * @suspend: Notifies when the host stops sending USB traffic, | 251 | * @suspend: Notifies when the host stops sending USB traffic, |
| 251 | * after function notifications | 252 | * after function notifications |
| 252 | * @resume: Notifies configuration when the host restarts USB traffic, | 253 | * @resume: Notifies configuration when the host restarts USB traffic, |
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index c624126a9c8a..425bcfe56c62 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h | |||
| @@ -81,14 +81,16 @@ TRACE_EVENT(timer_expire_entry, | |||
| 81 | TP_STRUCT__entry( | 81 | TP_STRUCT__entry( |
| 82 | __field( void *, timer ) | 82 | __field( void *, timer ) |
| 83 | __field( unsigned long, now ) | 83 | __field( unsigned long, now ) |
| 84 | __field( void *, function) | ||
| 84 | ), | 85 | ), |
| 85 | 86 | ||
| 86 | TP_fast_assign( | 87 | TP_fast_assign( |
| 87 | __entry->timer = timer; | 88 | __entry->timer = timer; |
| 88 | __entry->now = jiffies; | 89 | __entry->now = jiffies; |
| 90 | __entry->function = timer->function; | ||
| 89 | ), | 91 | ), |
| 90 | 92 | ||
| 91 | TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) | 93 | TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) |
| 92 | ); | 94 | ); |
| 93 | 95 | ||
| 94 | /** | 96 | /** |
| @@ -200,14 +202,16 @@ TRACE_EVENT(hrtimer_expire_entry, | |||
| 200 | TP_STRUCT__entry( | 202 | TP_STRUCT__entry( |
| 201 | __field( void *, hrtimer ) | 203 | __field( void *, hrtimer ) |
| 202 | __field( s64, now ) | 204 | __field( s64, now ) |
| 205 | __field( void *, function) | ||
| 203 | ), | 206 | ), |
| 204 | 207 | ||
| 205 | TP_fast_assign( | 208 | TP_fast_assign( |
| 206 | __entry->hrtimer = hrtimer; | 209 | __entry->hrtimer = hrtimer; |
| 207 | __entry->now = now->tv64; | 210 | __entry->now = now->tv64; |
| 211 | __entry->function = hrtimer->function; | ||
| 208 | ), | 212 | ), |
| 209 | 213 | ||
| 210 | TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, | 214 | TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, |
| 211 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) | 215 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) |
| 212 | ); | 216 | ); |
| 213 | 217 | ||
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h index ce9d671c636c..a785a3b0c8c7 100644 --- a/include/xen/platform_pci.h +++ b/include/xen/platform_pci.h | |||
| @@ -16,11 +16,15 @@ | |||
| 16 | #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ | 16 | #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ |
| 17 | #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ | 17 | #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ |
| 18 | 18 | ||
| 19 | #define XEN_UNPLUG_ALL_IDE_DISKS 1 | 19 | #define XEN_UNPLUG_ALL_IDE_DISKS (1<<0) |
| 20 | #define XEN_UNPLUG_ALL_NICS 2 | 20 | #define XEN_UNPLUG_ALL_NICS (1<<1) |
| 21 | #define XEN_UNPLUG_AUX_IDE_DISKS 4 | 21 | #define XEN_UNPLUG_AUX_IDE_DISKS (1<<2) |
| 22 | #define XEN_UNPLUG_ALL 7 | 22 | #define XEN_UNPLUG_ALL (XEN_UNPLUG_ALL_IDE_DISKS|\ |
| 23 | #define XEN_UNPLUG_IGNORE 8 | 23 | XEN_UNPLUG_ALL_NICS|\ |
| 24 | XEN_UNPLUG_AUX_IDE_DISKS) | ||
| 25 | |||
| 26 | #define XEN_UNPLUG_UNNECESSARY (1<<16) | ||
| 27 | #define XEN_UNPLUG_NEVER (1<<17) | ||
| 24 | 28 | ||
| 25 | static inline int xen_must_unplug_nics(void) { | 29 | static inline int xen_must_unplug_nics(void) { |
| 26 | #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ | 30 | #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ |
diff --git a/kernel/sched.c b/kernel/sched.c index 41541d79e3c8..09b574e7f4df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3865 | /* | 3865 | /* |
| 3866 | * Owner changed, break to re-assess state. | 3866 | * Owner changed, break to re-assess state. |
| 3867 | */ | 3867 | */ |
| 3868 | if (lock->owner != owner) | 3868 | if (lock->owner != owner) { |
| 3869 | /* | ||
| 3870 | * If the lock has switched to a different owner, | ||
| 3871 | * we likely have heavy contention. Return 0 to quit | ||
| 3872 | * optimistic spinning and not contend further: | ||
| 3873 | */ | ||
| 3874 | if (lock->owner) | ||
| 3875 | return 0; | ||
| 3869 | break; | 3876 | break; |
| 3877 | } | ||
| 3870 | 3878 | ||
| 3871 | /* | 3879 | /* |
| 3872 | * Is that owner really running on that cpu? | 3880 | * Is that owner really running on that cpu? |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 613bc1f04610..0d53c8e853b1 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -206,6 +206,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
| 206 | struct perf_sample_data *data, | 206 | struct perf_sample_data *data, |
| 207 | struct pt_regs *regs) | 207 | struct pt_regs *regs) |
| 208 | { | 208 | { |
| 209 | /* Ensure the watchdog never gets throttled */ | ||
| 210 | event->hw.interrupts = 0; | ||
| 211 | |||
| 209 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | 212 | if (__get_cpu_var(watchdog_nmi_touch) == true) { |
| 210 | __get_cpu_var(watchdog_nmi_touch) = false; | 213 | __get_cpu_var(watchdog_nmi_touch) = false; |
| 211 | return; | 214 | return; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index b93579504dfa..70af0a7f97c0 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -123,7 +123,7 @@ static int kobj_usermode_filter(struct kobject *kobj) | |||
| 123 | * @kobj: struct kobject that the action is happening to | 123 | * @kobj: struct kobject that the action is happening to |
| 124 | * @envp_ext: pointer to environmental data | 124 | * @envp_ext: pointer to environmental data |
| 125 | * | 125 | * |
| 126 | * Returns 0 if kobject_uevent() is completed with success or the | 126 | * Returns 0 if kobject_uevent_env() is completed with success or the |
| 127 | * corresponding error when it fails. | 127 | * corresponding error when it fails. |
| 128 | */ | 128 | */ |
| 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | 129 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
| @@ -317,7 +317,7 @@ exit: | |||
| 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); | 317 | EXPORT_SYMBOL_GPL(kobject_uevent_env); |
| 318 | 318 | ||
| 319 | /** | 319 | /** |
| 320 | * kobject_uevent - notify userspace by ending an uevent | 320 | * kobject_uevent - notify userspace by sending an uevent |
| 321 | * | 321 | * |
| 322 | * @action: action that is happening | 322 | * @action: action that is happening |
| 323 | * @kobj: struct kobject that the action is happening to | 323 | * @kobj: struct kobject that the action is happening to |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5b7d4623f0b7..efd16fa80b1c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -174,14 +174,16 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
| 174 | { | 174 | { |
| 175 | struct radix_tree_node *node = | 175 | struct radix_tree_node *node = |
| 176 | container_of(head, struct radix_tree_node, rcu_head); | 176 | container_of(head, struct radix_tree_node, rcu_head); |
| 177 | int i; | ||
| 177 | 178 | ||
| 178 | /* | 179 | /* |
| 179 | * must only free zeroed nodes into the slab. radix_tree_shrink | 180 | * must only free zeroed nodes into the slab. radix_tree_shrink |
| 180 | * can leave us with a non-NULL entry in the first slot, so clear | 181 | * can leave us with a non-NULL entry in the first slot, so clear |
| 181 | * that here to make sure. | 182 | * that here to make sure. |
| 182 | */ | 183 | */ |
| 183 | tag_clear(node, 0, 0); | 184 | for (i = 0; i < RADIX_TREE_MAX_TAGS; i++) |
| 184 | tag_clear(node, 1, 0); | 185 | tag_clear(node, i, 0); |
| 186 | |||
| 185 | node->slots[0] = NULL; | 187 | node->slots[0] = NULL; |
| 186 | node->count = 0; | 188 | node->count = 0; |
| 187 | 189 | ||
| @@ -623,6 +625,13 @@ EXPORT_SYMBOL(radix_tree_tag_get); | |||
| 623 | * also settag. The function stops either after tagging nr_to_tag items or | 625 | * also settag. The function stops either after tagging nr_to_tag items or |
| 624 | * after reaching last_index. | 626 | * after reaching last_index. |
| 625 | * | 627 | * |
| 628 | * The tags must be set from the leaf level only and propagated back up the | ||
| 629 | * path to the root. We must do this so that we resolve the full path before | ||
| 630 | * setting any tags on intermediate nodes. If we set tags as we descend, then | ||
| 631 | * we can get to the leaf node and find that the index that has the iftag | ||
| 632 | * set is outside the range we are scanning. This reults in dangling tags and | ||
| 633 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | ||
| 634 | * | ||
| 626 | * The function returns number of leaves where the tag was set and sets | 635 | * The function returns number of leaves where the tag was set and sets |
| 627 | * *first_indexp to the first unscanned index. | 636 | * *first_indexp to the first unscanned index. |
| 628 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | 637 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must |
| @@ -633,9 +642,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 633 | unsigned long nr_to_tag, | 642 | unsigned long nr_to_tag, |
| 634 | unsigned int iftag, unsigned int settag) | 643 | unsigned int iftag, unsigned int settag) |
| 635 | { | 644 | { |
| 636 | unsigned int height = root->height, shift; | 645 | unsigned int height = root->height; |
| 637 | unsigned long tagged = 0, index = *first_indexp; | 646 | struct radix_tree_path path[height]; |
| 638 | struct radix_tree_node *open_slots[height], *slot; | 647 | struct radix_tree_path *pathp = path; |
| 648 | struct radix_tree_node *slot; | ||
| 649 | unsigned int shift; | ||
| 650 | unsigned long tagged = 0; | ||
| 651 | unsigned long index = *first_indexp; | ||
| 639 | 652 | ||
| 640 | last_index = min(last_index, radix_tree_maxindex(height)); | 653 | last_index = min(last_index, radix_tree_maxindex(height)); |
| 641 | if (index > last_index) | 654 | if (index > last_index) |
| @@ -655,6 +668,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 655 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
| 656 | slot = radix_tree_indirect_to_ptr(root->rnode); | 669 | slot = radix_tree_indirect_to_ptr(root->rnode); |
| 657 | 670 | ||
| 671 | /* | ||
| 672 | * we fill the path from (root->height - 2) to 0, leaving the index at | ||
| 673 | * (root->height - 1) as a terminator. Zero the node in the terminator | ||
| 674 | * so that we can use this to end walk loops back up the path. | ||
| 675 | */ | ||
| 676 | path[height - 1].node = NULL; | ||
| 677 | |||
| 658 | for (;;) { | 678 | for (;;) { |
| 659 | int offset; | 679 | int offset; |
| 660 | 680 | ||
| @@ -663,17 +683,30 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
| 663 | goto next; | 683 | goto next; |
| 664 | if (!tag_get(slot, iftag, offset)) | 684 | if (!tag_get(slot, iftag, offset)) |
| 665 | goto next; | 685 | goto next; |
| 686 | if (height > 1) { | ||
| 687 | /* Go down one level */ | ||
| 688 | height--; | ||
| 689 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 690 | path[height - 1].node = slot; | ||
| 691 | path[height - 1].offset = offset; | ||
| 692 | slot = slot->slots[offset]; | ||
| 693 | continue; | ||
| 694 | } | ||
| 695 | |||
| 696 | /* tag the leaf */ | ||
| 697 | tagged++; | ||
| 666 | tag_set(slot, settag, offset); | 698 | tag_set(slot, settag, offset); |
| 667 | if (height == 1) { | 699 | |
| 668 | tagged++; | 700 | /* walk back up the path tagging interior nodes */ |
| 669 | goto next; | 701 | pathp = &path[0]; |
| 702 | while (pathp->node) { | ||
| 703 | /* stop if we find a node with the tag already set */ | ||
| 704 | if (tag_get(pathp->node, settag, pathp->offset)) | ||
| 705 | break; | ||
| 706 | tag_set(pathp->node, settag, pathp->offset); | ||
| 707 | pathp++; | ||
| 670 | } | 708 | } |
| 671 | /* Go down one level */ | 709 | |
| 672 | height--; | ||
| 673 | shift -= RADIX_TREE_MAP_SHIFT; | ||
| 674 | open_slots[height] = slot; | ||
| 675 | slot = slot->slots[offset]; | ||
| 676 | continue; | ||
| 677 | next: | 710 | next: |
| 678 | /* Go to next item at level determined by 'shift' */ | 711 | /* Go to next item at level determined by 'shift' */ |
| 679 | index = ((index >> shift) + 1) << shift; | 712 | index = ((index >> shift) + 1) << shift; |
| @@ -688,7 +721,7 @@ next: | |||
| 688 | * last_index is guaranteed to be in the tree, what | 721 | * last_index is guaranteed to be in the tree, what |
| 689 | * we do below cannot wander astray. | 722 | * we do below cannot wander astray. |
| 690 | */ | 723 | */ |
| 691 | slot = open_slots[height]; | 724 | slot = path[height - 1].node; |
| 692 | height++; | 725 | height++; |
| 693 | shift += RADIX_TREE_MAP_SHIFT; | 726 | shift += RADIX_TREE_MAP_SHIFT; |
| 694 | } | 727 | } |
diff --git a/mm/memory.c b/mm/memory.c index 2ed2267439df..6b2ab1051851 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2760,11 +2760,9 @@ out_release: | |||
| 2760 | } | 2760 | } |
| 2761 | 2761 | ||
| 2762 | /* | 2762 | /* |
| 2763 | * This is like a special single-page "expand_downwards()", | 2763 | * This is like a special single-page "expand_{down|up}wards()", |
| 2764 | * except we must first make sure that 'address-PAGE_SIZE' | 2764 | * except we must first make sure that 'address{-|+}PAGE_SIZE' |
| 2765 | * doesn't hit another vma. | 2765 | * doesn't hit another vma. |
| 2766 | * | ||
| 2767 | * The "find_vma()" will do the right thing even if we wrap | ||
| 2768 | */ | 2766 | */ |
| 2769 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) | 2767 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) |
| 2770 | { | 2768 | { |
| @@ -2783,6 +2781,15 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
| 2783 | 2781 | ||
| 2784 | expand_stack(vma, address - PAGE_SIZE); | 2782 | expand_stack(vma, address - PAGE_SIZE); |
| 2785 | } | 2783 | } |
| 2784 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { | ||
| 2785 | struct vm_area_struct *next = vma->vm_next; | ||
| 2786 | |||
| 2787 | /* As VM_GROWSDOWN but s/below/above/ */ | ||
| 2788 | if (next && next->vm_start == address + PAGE_SIZE) | ||
| 2789 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; | ||
| 2790 | |||
| 2791 | expand_upwards(vma, address + PAGE_SIZE); | ||
| 2792 | } | ||
| 2786 | return 0; | 2793 | return 0; |
| 2787 | } | 2794 | } |
| 2788 | 2795 | ||
| @@ -1716,9 +1716,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns | |||
| 1716 | * PA-RISC uses this for its stack; IA64 for its Register Backing Store. | 1716 | * PA-RISC uses this for its stack; IA64 for its Register Backing Store. |
| 1717 | * vma is the last one with address > vma->vm_end. Have to extend vma. | 1717 | * vma is the last one with address > vma->vm_end. Have to extend vma. |
| 1718 | */ | 1718 | */ |
| 1719 | #ifndef CONFIG_IA64 | ||
| 1720 | static | ||
| 1721 | #endif | ||
| 1722 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) | 1719 | int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
| 1723 | { | 1720 | { |
| 1724 | int error; | 1721 | int error; |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c09ef5219cbe..a803f5e33471 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -985,22 +985,16 @@ continue_unlock: | |||
| 985 | } | 985 | } |
| 986 | } | 986 | } |
| 987 | 987 | ||
| 988 | if (wbc->nr_to_write > 0) { | 988 | /* |
| 989 | if (--wbc->nr_to_write == 0 && | 989 | * We stop writing back only if we are not doing |
| 990 | wbc->sync_mode == WB_SYNC_NONE) { | 990 | * integrity sync. In case of integrity sync we have to |
| 991 | /* | 991 | * keep going until we have written all the pages |
| 992 | * We stop writing back only if we are | 992 | * we tagged for writeback prior to entering this loop. |
| 993 | * not doing integrity sync. In case of | 993 | */ |
| 994 | * integrity sync we have to keep going | 994 | if (--wbc->nr_to_write <= 0 && |
| 995 | * because someone may be concurrently | 995 | wbc->sync_mode == WB_SYNC_NONE) { |
| 996 | * dirtying pages, and we might have | 996 | done = 1; |
| 997 | * synced a lot of newly appeared dirty | 997 | break; |
| 998 | * pages, but have not synced all of the | ||
| 999 | * old dirty pages. | ||
| 1000 | */ | ||
| 1001 | done = 1; | ||
| 1002 | break; | ||
| 1003 | } | ||
| 1004 | } | 998 | } |
| 1005 | } | 999 | } |
| 1006 | pagevec_release(&pvec); | 1000 | pagevec_release(&pvec); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 3d59c9bf8feb..3bccdd12a264 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
| 510 | if (vlan->flags & VLAN_FLAG_GVRP) | 510 | if (vlan->flags & VLAN_FLAG_GVRP) |
| 511 | vlan_gvrp_request_join(dev); | 511 | vlan_gvrp_request_join(dev); |
| 512 | 512 | ||
| 513 | netif_carrier_on(dev); | 513 | if (netif_carrier_ok(real_dev)) |
| 514 | netif_carrier_on(dev); | ||
| 514 | return 0; | 515 | return 0; |
| 515 | 516 | ||
| 516 | clear_allmulti: | 517 | clear_allmulti: |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 51d6c3167975..e8f4f9a57f12 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
| @@ -1420,6 +1420,9 @@ static int translate_compat_table(const char *name, | |||
| 1420 | if (ret != 0) | 1420 | if (ret != 0) |
| 1421 | break; | 1421 | break; |
| 1422 | ++i; | 1422 | ++i; |
| 1423 | if (strcmp(arpt_get_target(iter1)->u.user.name, | ||
| 1424 | XT_ERROR_TARGET) == 0) | ||
| 1425 | ++newinfo->stacksize; | ||
| 1423 | } | 1426 | } |
| 1424 | if (ret) { | 1427 | if (ret) { |
| 1425 | /* | 1428 | /* |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 97b64b22c412..d163f2e3b2e9 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -1751,6 +1751,9 @@ translate_compat_table(struct net *net, | |||
| 1751 | if (ret != 0) | 1751 | if (ret != 0) |
| 1752 | break; | 1752 | break; |
| 1753 | ++i; | 1753 | ++i; |
| 1754 | if (strcmp(ipt_get_target(iter1)->u.user.name, | ||
| 1755 | XT_ERROR_TARGET) == 0) | ||
| 1756 | ++newinfo->stacksize; | ||
| 1754 | } | 1757 | } |
| 1755 | if (ret) { | 1758 | if (ret) { |
| 1756 | /* | 1759 | /* |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 29a7bca29e3f..8e754be92c24 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -1766,6 +1766,9 @@ translate_compat_table(struct net *net, | |||
| 1766 | if (ret != 0) | 1766 | if (ret != 0) |
| 1767 | break; | 1767 | break; |
| 1768 | ++i; | 1768 | ++i; |
| 1769 | if (strcmp(ip6t_get_target(iter1)->u.user.name, | ||
| 1770 | XT_ERROR_TARGET) == 0) | ||
| 1771 | ++newinfo->stacksize; | ||
| 1769 | } | 1772 | } |
| 1770 | if (ret) { | 1773 | if (ret) { |
| 1771 | /* | 1774 | /* |
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 9616c32d1076..5bb8353105cc 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c | |||
| @@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
| 169 | { | 169 | { |
| 170 | struct irlan_cb *self = netdev_priv(dev); | 170 | struct irlan_cb *self = netdev_priv(dev); |
| 171 | int ret; | 171 | int ret; |
| 172 | unsigned int len; | ||
| 172 | 173 | ||
| 173 | /* skb headroom large enough to contain all IrDA-headers? */ | 174 | /* skb headroom large enough to contain all IrDA-headers? */ |
| 174 | if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { | 175 | if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { |
| @@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
| 188 | 189 | ||
| 189 | dev->trans_start = jiffies; | 190 | dev->trans_start = jiffies; |
| 190 | 191 | ||
| 192 | len = skb->len; | ||
| 191 | /* Now queue the packet in the transport layer */ | 193 | /* Now queue the packet in the transport layer */ |
| 192 | if (self->use_udata) | 194 | if (self->use_udata) |
| 193 | ret = irttp_udata_request(self->tsap_data, skb); | 195 | ret = irttp_udata_request(self->tsap_data, skb); |
| @@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, | |||
| 209 | self->stats.tx_dropped++; | 211 | self->stats.tx_dropped++; |
| 210 | } else { | 212 | } else { |
| 211 | self->stats.tx_packets++; | 213 | self->stats.tx_packets++; |
| 212 | self->stats.tx_bytes += skb->len; | 214 | self->stats.tx_bytes += len; |
| 213 | } | 215 | } |
| 214 | 216 | ||
| 215 | return NETDEV_TX_OK; | 217 | return NETDEV_TX_OK; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8648a9922aab..980fe4ad0016 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1406 | struct netlink_sock *nlk = nlk_sk(sk); | 1406 | struct netlink_sock *nlk = nlk_sk(sk); |
| 1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
| 1408 | size_t copied; | 1408 | size_t copied; |
| 1409 | struct sk_buff *skb, *frag __maybe_unused = NULL; | 1409 | struct sk_buff *skb, *data_skb; |
| 1410 | int err; | 1410 | int err; |
| 1411 | 1411 | ||
| 1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
| @@ -1418,45 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1418 | if (skb == NULL) | 1418 | if (skb == NULL) |
| 1419 | goto out; | 1419 | goto out; |
| 1420 | 1420 | ||
| 1421 | data_skb = skb; | ||
| 1422 | |||
| 1421 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | 1423 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES |
| 1422 | if (unlikely(skb_shinfo(skb)->frag_list)) { | 1424 | if (unlikely(skb_shinfo(skb)->frag_list)) { |
| 1423 | bool need_compat = !!(flags & MSG_CMSG_COMPAT); | ||
| 1424 | |||
| 1425 | /* | 1425 | /* |
| 1426 | * If this skb has a frag_list, then here that means that | 1426 | * If this skb has a frag_list, then here that means that we |
| 1427 | * we will have to use the frag_list skb for compat tasks | 1427 | * will have to use the frag_list skb's data for compat tasks |
| 1428 | * and the regular skb for non-compat tasks. | 1428 | * and the regular skb's data for normal (non-compat) tasks. |
| 1429 | * | 1429 | * |
| 1430 | * The skb might (and likely will) be cloned, so we can't | 1430 | * If we need to send the compat skb, assign it to the |
| 1431 | * just reset frag_list and go on with things -- we need to | 1431 | * 'data_skb' variable so that it will be used below for data |
| 1432 | * keep that. For the compat case that's easy -- simply get | 1432 | * copying. We keep 'skb' for everything else, including |
| 1433 | * a reference to the compat skb and free the regular one | 1433 | * freeing both later. |
| 1434 | * including the frag. For the non-compat case, we need to | ||
| 1435 | * avoid sending the frag to the user -- so assign NULL but | ||
| 1436 | * restore it below before freeing the skb. | ||
| 1437 | */ | 1434 | */ |
| 1438 | if (need_compat) { | 1435 | if (flags & MSG_CMSG_COMPAT) |
| 1439 | struct sk_buff *compskb = skb_shinfo(skb)->frag_list; | 1436 | data_skb = skb_shinfo(skb)->frag_list; |
| 1440 | skb_get(compskb); | ||
| 1441 | kfree_skb(skb); | ||
| 1442 | skb = compskb; | ||
| 1443 | } else { | ||
| 1444 | frag = skb_shinfo(skb)->frag_list; | ||
| 1445 | skb_shinfo(skb)->frag_list = NULL; | ||
| 1446 | } | ||
| 1447 | } | 1437 | } |
| 1448 | #endif | 1438 | #endif |
| 1449 | 1439 | ||
| 1450 | msg->msg_namelen = 0; | 1440 | msg->msg_namelen = 0; |
| 1451 | 1441 | ||
| 1452 | copied = skb->len; | 1442 | copied = data_skb->len; |
| 1453 | if (len < copied) { | 1443 | if (len < copied) { |
| 1454 | msg->msg_flags |= MSG_TRUNC; | 1444 | msg->msg_flags |= MSG_TRUNC; |
| 1455 | copied = len; | 1445 | copied = len; |
| 1456 | } | 1446 | } |
| 1457 | 1447 | ||
| 1458 | skb_reset_transport_header(skb); | 1448 | skb_reset_transport_header(data_skb); |
| 1459 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1449 | err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); |
| 1460 | 1450 | ||
| 1461 | if (msg->msg_name) { | 1451 | if (msg->msg_name) { |
| 1462 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; | 1452 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; |
| @@ -1476,11 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1476 | } | 1466 | } |
| 1477 | siocb->scm->creds = *NETLINK_CREDS(skb); | 1467 | siocb->scm->creds = *NETLINK_CREDS(skb); |
| 1478 | if (flags & MSG_TRUNC) | 1468 | if (flags & MSG_TRUNC) |
| 1479 | copied = skb->len; | 1469 | copied = data_skb->len; |
| 1480 | |||
| 1481 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
| 1482 | skb_shinfo(skb)->frag_list = frag; | ||
| 1483 | #endif | ||
| 1484 | 1470 | ||
| 1485 | skb_free_datagram(sk, skb); | 1471 | skb_free_datagram(sk, skb); |
| 1486 | 1472 | ||
diff --git a/net/rds/recv.c b/net/rds/recv.c index 795a00b7f2cb..c93588c2d553 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
| @@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, | |||
| 297 | int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) | 297 | int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) |
| 298 | { | 298 | { |
| 299 | struct rds_notifier *notifier; | 299 | struct rds_notifier *notifier; |
| 300 | struct rds_rdma_notify cmsg; | 300 | struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ |
| 301 | unsigned int count = 0, max_messages = ~0U; | 301 | unsigned int count = 0, max_messages = ~0U; |
| 302 | unsigned long flags; | 302 | unsigned long flags; |
| 303 | LIST_HEAD(copy); | 303 | LIST_HEAD(copy); |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index c39327e60ea4..515253fe46cf 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
| @@ -497,7 +497,9 @@ int conf_write_defconfig(const char *filename) | |||
| 497 | /* | 497 | /* |
| 498 | * If symbol is a choice value and equals to the | 498 | * If symbol is a choice value and equals to the |
| 499 | * default for a choice - skip. | 499 | * default for a choice - skip. |
| 500 | * But only if value is bool and equal to "y" . | 500 | * But only if value is bool and equal to "y" and |
| 501 | * choice is not "optional". | ||
| 502 | * (If choice is "optional" then all values can be "n") | ||
| 501 | */ | 503 | */ |
| 502 | if (sym_is_choice_value(sym)) { | 504 | if (sym_is_choice_value(sym)) { |
| 503 | struct symbol *cs; | 505 | struct symbol *cs; |
| @@ -505,7 +507,7 @@ int conf_write_defconfig(const char *filename) | |||
| 505 | 507 | ||
| 506 | cs = prop_get_symbol(sym_get_choice_prop(sym)); | 508 | cs = prop_get_symbol(sym_get_choice_prop(sym)); |
| 507 | ds = sym_choice_default(cs); | 509 | ds = sym_choice_default(cs); |
| 508 | if (sym == ds) { | 510 | if (!sym_is_optional(cs) && sym == ds) { |
| 509 | if ((sym->type == S_BOOLEAN) && | 511 | if ((sym->type == S_BOOLEAN) && |
| 510 | sym_get_tristate_value(sym) == yes) | 512 | sym_get_tristate_value(sym) == yes) |
| 511 | goto next_menu; | 513 | goto next_menu; |
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index e95718fea355..943712ca6c0a 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c | |||
| @@ -937,6 +937,8 @@ static void sym_check_print_recursive(struct symbol *last_sym) | |||
| 937 | sym = stack->sym; | 937 | sym = stack->sym; |
| 938 | next_sym = stack->next ? stack->next->sym : last_sym; | 938 | next_sym = stack->next ? stack->next->sym : last_sym; |
| 939 | prop = stack->prop; | 939 | prop = stack->prop; |
| 940 | if (prop == NULL) | ||
| 941 | prop = stack->sym->prop; | ||
| 940 | 942 | ||
| 941 | /* for choice values find the menu entry (used below) */ | 943 | /* for choice values find the menu entry (used below) */ |
| 942 | if (sym_is_choice(sym) || sym_is_choice_value(sym)) { | 944 | if (sym_is_choice(sym) || sym_is_choice_value(sym)) { |
diff --git a/scripts/mkmakefile b/scripts/mkmakefile index 67d59c7a18dc..5325423ceab4 100644 --- a/scripts/mkmakefile +++ b/scripts/mkmakefile | |||
| @@ -44,7 +44,9 @@ all: | |||
| 44 | 44 | ||
| 45 | Makefile:; | 45 | Makefile:; |
| 46 | 46 | ||
| 47 | \$(all) %/: all | 47 | \$(all): all |
| 48 | @: | 48 | @: |
| 49 | 49 | ||
| 50 | %/: all | ||
| 51 | @: | ||
| 50 | EOF | 52 | EOF |
diff --git a/scripts/setlocalversion b/scripts/setlocalversion index e90a91cc5185..057b6b3c5dfb 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion | |||
| @@ -43,7 +43,7 @@ scm_version() | |||
| 43 | fi | 43 | fi |
| 44 | 44 | ||
| 45 | # Check for git and a git repo. | 45 | # Check for git and a git repo. |
| 46 | if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then | 46 | if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then |
| 47 | 47 | ||
| 48 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore | 48 | # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore |
| 49 | # it, because this version is defined in the top level Makefile. | 49 | # it, because this version is defined in the top level Makefile. |
| @@ -85,7 +85,7 @@ scm_version() | |||
| 85 | fi | 85 | fi |
| 86 | 86 | ||
| 87 | # Check for mercurial and a mercurial repo. | 87 | # Check for mercurial and a mercurial repo. |
| 88 | if hgid=`hg id 2>/dev/null`; then | 88 | if test -d .hg && hgid=`hg id 2>/dev/null`; then |
| 89 | tag=`printf '%s' "$hgid" | cut -s -d' ' -f2` | 89 | tag=`printf '%s' "$hgid" | cut -s -d' ' -f2` |
| 90 | 90 | ||
| 91 | # Do we have an untagged version? | 91 | # Do we have an untagged version? |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index dd8fb86c842b..3827092cc1d2 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
| @@ -589,6 +589,7 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card, | |||
| 589 | bus->ops = temp->ops; | 589 | bus->ops = temp->ops; |
| 590 | 590 | ||
| 591 | mutex_init(&bus->cmd_mutex); | 591 | mutex_init(&bus->cmd_mutex); |
| 592 | mutex_init(&bus->prepare_mutex); | ||
| 592 | INIT_LIST_HEAD(&bus->codec_list); | 593 | INIT_LIST_HEAD(&bus->codec_list); |
| 593 | 594 | ||
| 594 | snprintf(bus->workq_name, sizeof(bus->workq_name), | 595 | snprintf(bus->workq_name, sizeof(bus->workq_name), |
| @@ -1068,7 +1069,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, | |||
| 1068 | codec->addr = codec_addr; | 1069 | codec->addr = codec_addr; |
| 1069 | mutex_init(&codec->spdif_mutex); | 1070 | mutex_init(&codec->spdif_mutex); |
| 1070 | mutex_init(&codec->control_mutex); | 1071 | mutex_init(&codec->control_mutex); |
| 1071 | mutex_init(&codec->prepare_mutex); | ||
| 1072 | init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info)); | 1072 | init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info)); |
| 1073 | init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head)); | 1073 | init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head)); |
| 1074 | snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32); | 1074 | snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32); |
| @@ -1213,6 +1213,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
| 1213 | u32 stream_tag, | 1213 | u32 stream_tag, |
| 1214 | int channel_id, int format) | 1214 | int channel_id, int format) |
| 1215 | { | 1215 | { |
| 1216 | struct hda_codec *c; | ||
| 1216 | struct hda_cvt_setup *p; | 1217 | struct hda_cvt_setup *p; |
| 1217 | unsigned int oldval, newval; | 1218 | unsigned int oldval, newval; |
| 1218 | int i; | 1219 | int i; |
| @@ -1253,10 +1254,12 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
| 1253 | p->dirty = 0; | 1254 | p->dirty = 0; |
| 1254 | 1255 | ||
| 1255 | /* make other inactive cvts with the same stream-tag dirty */ | 1256 | /* make other inactive cvts with the same stream-tag dirty */ |
| 1256 | for (i = 0; i < codec->cvt_setups.used; i++) { | 1257 | list_for_each_entry(c, &codec->bus->codec_list, list) { |
| 1257 | p = snd_array_elem(&codec->cvt_setups, i); | 1258 | for (i = 0; i < c->cvt_setups.used; i++) { |
| 1258 | if (!p->active && p->stream_tag == stream_tag) | 1259 | p = snd_array_elem(&c->cvt_setups, i); |
| 1259 | p->dirty = 1; | 1260 | if (!p->active && p->stream_tag == stream_tag) |
| 1261 | p->dirty = 1; | ||
| 1262 | } | ||
| 1260 | } | 1263 | } |
| 1261 | } | 1264 | } |
| 1262 | EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream); | 1265 | EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream); |
| @@ -1306,12 +1309,16 @@ static void really_cleanup_stream(struct hda_codec *codec, | |||
| 1306 | /* clean up the all conflicting obsolete streams */ | 1309 | /* clean up the all conflicting obsolete streams */ |
| 1307 | static void purify_inactive_streams(struct hda_codec *codec) | 1310 | static void purify_inactive_streams(struct hda_codec *codec) |
| 1308 | { | 1311 | { |
| 1312 | struct hda_codec *c; | ||
| 1309 | int i; | 1313 | int i; |
| 1310 | 1314 | ||
| 1311 | for (i = 0; i < codec->cvt_setups.used; i++) { | 1315 | list_for_each_entry(c, &codec->bus->codec_list, list) { |
| 1312 | struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i); | 1316 | for (i = 0; i < c->cvt_setups.used; i++) { |
| 1313 | if (p->dirty) | 1317 | struct hda_cvt_setup *p; |
| 1314 | really_cleanup_stream(codec, p); | 1318 | p = snd_array_elem(&c->cvt_setups, i); |
| 1319 | if (p->dirty) | ||
| 1320 | really_cleanup_stream(c, p); | ||
| 1321 | } | ||
| 1315 | } | 1322 | } |
| 1316 | } | 1323 | } |
| 1317 | 1324 | ||
| @@ -3502,11 +3509,11 @@ int snd_hda_codec_prepare(struct hda_codec *codec, | |||
| 3502 | struct snd_pcm_substream *substream) | 3509 | struct snd_pcm_substream *substream) |
| 3503 | { | 3510 | { |
| 3504 | int ret; | 3511 | int ret; |
| 3505 | mutex_lock(&codec->prepare_mutex); | 3512 | mutex_lock(&codec->bus->prepare_mutex); |
| 3506 | ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream); | 3513 | ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream); |
| 3507 | if (ret >= 0) | 3514 | if (ret >= 0) |
| 3508 | purify_inactive_streams(codec); | 3515 | purify_inactive_streams(codec); |
| 3509 | mutex_unlock(&codec->prepare_mutex); | 3516 | mutex_unlock(&codec->bus->prepare_mutex); |
| 3510 | return ret; | 3517 | return ret; |
| 3511 | } | 3518 | } |
| 3512 | EXPORT_SYMBOL_HDA(snd_hda_codec_prepare); | 3519 | EXPORT_SYMBOL_HDA(snd_hda_codec_prepare); |
| @@ -3515,9 +3522,9 @@ void snd_hda_codec_cleanup(struct hda_codec *codec, | |||
| 3515 | struct hda_pcm_stream *hinfo, | 3522 | struct hda_pcm_stream *hinfo, |
| 3516 | struct snd_pcm_substream *substream) | 3523 | struct snd_pcm_substream *substream) |
| 3517 | { | 3524 | { |
| 3518 | mutex_lock(&codec->prepare_mutex); | 3525 | mutex_lock(&codec->bus->prepare_mutex); |
| 3519 | hinfo->ops.cleanup(hinfo, codec, substream); | 3526 | hinfo->ops.cleanup(hinfo, codec, substream); |
| 3520 | mutex_unlock(&codec->prepare_mutex); | 3527 | mutex_unlock(&codec->bus->prepare_mutex); |
| 3521 | } | 3528 | } |
| 3522 | EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup); | 3529 | EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup); |
| 3523 | 3530 | ||
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 4303353feda9..62c702240108 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h | |||
| @@ -648,6 +648,7 @@ struct hda_bus { | |||
| 648 | struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1]; | 648 | struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1]; |
| 649 | 649 | ||
| 650 | struct mutex cmd_mutex; | 650 | struct mutex cmd_mutex; |
| 651 | struct mutex prepare_mutex; | ||
| 651 | 652 | ||
| 652 | /* unsolicited event queue */ | 653 | /* unsolicited event queue */ |
| 653 | struct hda_bus_unsolicited *unsol; | 654 | struct hda_bus_unsolicited *unsol; |
| @@ -826,7 +827,6 @@ struct hda_codec { | |||
| 826 | 827 | ||
| 827 | struct mutex spdif_mutex; | 828 | struct mutex spdif_mutex; |
| 828 | struct mutex control_mutex; | 829 | struct mutex control_mutex; |
| 829 | struct mutex prepare_mutex; | ||
| 830 | unsigned int spdif_status; /* IEC958 status bits */ | 830 | unsigned int spdif_status; /* IEC958 status bits */ |
| 831 | unsigned short spdif_ctls; /* SPDIF control bits */ | 831 | unsigned short spdif_ctls; /* SPDIF control bits */ |
| 832 | unsigned int spdif_in_enable; /* SPDIF input enable? */ | 832 | unsigned int spdif_in_enable; /* SPDIF input enable? */ |
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 803b298f7411..26c3ade73583 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
| @@ -596,6 +596,8 @@ void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld) | |||
| 596 | } | 596 | } |
| 597 | EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free); | 597 | EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free); |
| 598 | 598 | ||
| 599 | #endif /* CONFIG_PROC_FS */ | ||
| 600 | |||
| 599 | /* update PCM info based on ELD */ | 601 | /* update PCM info based on ELD */ |
| 600 | void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, | 602 | void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, |
| 601 | struct hda_pcm_stream *codec_pars) | 603 | struct hda_pcm_stream *codec_pars) |
| @@ -644,5 +646,3 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm, | |||
| 644 | pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps); | 646 | pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps); |
| 645 | } | 647 | } |
| 646 | EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info); | 648 | EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info); |
| 647 | |||
| 648 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c424952a734e..5cdb80edbd7f 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -3059,6 +3059,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
| 3059 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), | 3059 | SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), |
| 3060 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3060 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
| 3061 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), | 3061 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), |
| 3062 | SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), | ||
| 3062 | SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD), | 3063 | SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD), |
| 3063 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), | 3064 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), |
| 3064 | {} | 3065 | {} |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2bc0f07cf33f..afd6022a96a7 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -707,8 +707,6 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
| 707 | u32 stream_tag, int format) | 707 | u32 stream_tag, int format) |
| 708 | { | 708 | { |
| 709 | struct hdmi_spec *spec = codec->spec; | 709 | struct hdmi_spec *spec = codec->spec; |
| 710 | int tag; | ||
| 711 | int fmt; | ||
| 712 | int pinctl; | 710 | int pinctl; |
| 713 | int new_pinctl = 0; | 711 | int new_pinctl = 0; |
| 714 | int i; | 712 | int i; |
| @@ -745,24 +743,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid, | |||
| 745 | return -EINVAL; | 743 | return -EINVAL; |
| 746 | } | 744 | } |
| 747 | 745 | ||
| 748 | tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4; | 746 | snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format); |
| 749 | fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0); | ||
| 750 | |||
| 751 | snd_printdd("hdmi_setup_stream: " | ||
| 752 | "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n", | ||
| 753 | nid, | ||
| 754 | tag == stream_tag ? "" : "new-", | ||
| 755 | stream_tag, | ||
| 756 | fmt == format ? "" : "new-", | ||
| 757 | format); | ||
| 758 | |||
| 759 | if (tag != stream_tag) | ||
| 760 | snd_hda_codec_write(codec, nid, 0, | ||
| 761 | AC_VERB_SET_CHANNEL_STREAMID, | ||
| 762 | stream_tag << 4); | ||
| 763 | if (fmt != format) | ||
| 764 | snd_hda_codec_write(codec, nid, 0, | ||
| 765 | AC_VERB_SET_STREAM_FORMAT, format); | ||
| 766 | return 0; | 747 | return 0; |
| 767 | } | 748 | } |
| 768 | 749 | ||
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c index d382d3c81c0f..36a9b83a6174 100644 --- a/sound/pci/hda/patch_intelhdmi.c +++ b/sound/pci/hda/patch_intelhdmi.c | |||
| @@ -69,20 +69,12 @@ static int intel_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
| 69 | return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format); | 69 | return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, | ||
| 73 | struct hda_codec *codec, | ||
| 74 | struct snd_pcm_substream *substream) | ||
| 75 | { | ||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | static struct hda_pcm_stream intel_hdmi_pcm_playback = { | 72 | static struct hda_pcm_stream intel_hdmi_pcm_playback = { |
| 80 | .substreams = 1, | 73 | .substreams = 1, |
| 81 | .channels_min = 2, | 74 | .channels_min = 2, |
| 82 | .ops = { | 75 | .ops = { |
| 83 | .open = hdmi_pcm_open, | 76 | .open = hdmi_pcm_open, |
| 84 | .prepare = intel_hdmi_playback_pcm_prepare, | 77 | .prepare = intel_hdmi_playback_pcm_prepare, |
| 85 | .cleanup = intel_hdmi_playback_pcm_cleanup, | ||
| 86 | }, | 78 | }, |
| 87 | }; | 79 | }; |
| 88 | 80 | ||
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index f636870dc718..69b950d527c3 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c | |||
| @@ -326,13 +326,6 @@ static int nvhdmi_dig_playback_pcm_prepare_8ch(struct hda_pcm_stream *hinfo, | |||
| 326 | return 0; | 326 | return 0; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, | ||
| 330 | struct hda_codec *codec, | ||
| 331 | struct snd_pcm_substream *substream) | ||
| 332 | { | ||
| 333 | return 0; | ||
| 334 | } | ||
| 335 | |||
| 336 | static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo, | 329 | static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo, |
| 337 | struct hda_codec *codec, | 330 | struct hda_codec *codec, |
| 338 | unsigned int stream_tag, | 331 | unsigned int stream_tag, |
| @@ -350,7 +343,6 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch_89 = { | |||
| 350 | .ops = { | 343 | .ops = { |
| 351 | .open = hdmi_pcm_open, | 344 | .open = hdmi_pcm_open, |
| 352 | .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89, | 345 | .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89, |
| 353 | .cleanup = nvhdmi_playback_pcm_cleanup, | ||
| 354 | }, | 346 | }, |
| 355 | }; | 347 | }; |
| 356 | 348 | ||
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f3f861bd1bf8..95148e58026c 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -6303,6 +6303,21 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { | |||
| 6303 | { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, | 6303 | { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx }, |
| 6304 | { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, | 6304 | { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, |
| 6305 | { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, | 6305 | { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx }, |
| 6306 | { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx }, | ||
| 6307 | { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx }, | ||
| 6308 | { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx }, | ||
| 6309 | { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx }, | ||
| 6310 | { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx }, | ||
| 6311 | { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx }, | ||
| 6312 | { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx }, | ||
| 6313 | { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx }, | ||
| 6314 | { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx }, | ||
| 6315 | { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx }, | ||
| 6316 | { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx }, | ||
| 6317 | { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx }, | ||
| 6318 | { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx }, | ||
| 6319 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, | ||
| 6320 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, | ||
| 6306 | {} /* terminator */ | 6321 | {} /* terminator */ |
| 6307 | }; | 6322 | }; |
| 6308 | 6323 | ||
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 6433e65c9507..467749249576 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c | |||
| @@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { | |||
| 1776 | }, | 1776 | }, |
| 1777 | { | 1777 | { |
| 1778 | .subvendor = 0x1014, | 1778 | .subvendor = 0x1014, |
| 1779 | .subdevice = 0x0534, | ||
| 1780 | .name = "ThinkPad X31", | ||
| 1781 | .type = AC97_TUNE_INV_EAPD | ||
| 1782 | }, | ||
| 1783 | { | ||
| 1784 | .subvendor = 0x1014, | ||
| 1779 | .subdevice = 0x1f00, | 1785 | .subdevice = 0x1f00, |
| 1780 | .name = "MS-9128", | 1786 | .name = "MS-9128", |
| 1781 | .type = AC97_TUNE_ALC_JACK | 1787 | .type = AC97_TUNE_ALC_JACK |
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c index a11daa1e905b..c81da05a4f11 100644 --- a/sound/soc/imx/imx-ssi.c +++ b/sound/soc/imx/imx-ssi.c | |||
| @@ -254,6 +254,9 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream, | |||
| 254 | dma_data = &ssi->dma_params_rx; | 254 | dma_data = &ssi->dma_params_rx; |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | if (ssi->flags & IMX_SSI_SYN) | ||
| 258 | reg = SSI_STCCR; | ||
| 259 | |||
| 257 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); | 260 | snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); |
| 258 | 261 | ||
| 259 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; | 262 | sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; |
