diff options
48 files changed, 630 insertions, 530 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 52c80c2a57f2..600eef3f3ac7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -351,7 +351,7 @@ config SGI_IP27 | |||
351 | select ARC64 | 351 | select ARC64 |
352 | select BOOT_ELF64 | 352 | select BOOT_ELF64 |
353 | select DEFAULT_SGI_PARTITION | 353 | select DEFAULT_SGI_PARTITION |
354 | select DMA_IP27 | 354 | select DMA_COHERENT |
355 | select SYS_HAS_EARLY_PRINTK | 355 | select SYS_HAS_EARLY_PRINTK |
356 | select HW_HAS_PCI | 356 | select HW_HAS_PCI |
357 | select NR_CPUS_DEFAULT_64 | 357 | select NR_CPUS_DEFAULT_64 |
@@ -761,9 +761,6 @@ config CFE | |||
761 | config DMA_COHERENT | 761 | config DMA_COHERENT |
762 | bool | 762 | bool |
763 | 763 | ||
764 | config DMA_IP27 | ||
765 | bool | ||
766 | |||
767 | config DMA_NONCOHERENT | 764 | config DMA_NONCOHERENT |
768 | bool | 765 | bool |
769 | select DMA_NEED_PCI_MAP_STATE | 766 | select DMA_NEED_PCI_MAP_STATE |
@@ -1368,7 +1365,7 @@ config CPU_SUPPORTS_64BIT_KERNEL | |||
1368 | # | 1365 | # |
1369 | config HARDWARE_WATCHPOINTS | 1366 | config HARDWARE_WATCHPOINTS |
1370 | bool | 1367 | bool |
1371 | default y if CPU_MIPS32 || CPU_MIPS64 | 1368 | default y if CPU_MIPSR1 || CPU_MIPSR2 |
1372 | 1369 | ||
1373 | menu "Kernel type" | 1370 | menu "Kernel type" |
1374 | 1371 | ||
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 32880146cbc1..6fd441d16af5 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c | |||
@@ -89,7 +89,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = { | |||
89 | .irq = AU1000_RTC_MATCH2_INT, | 89 | .irq = AU1000_RTC_MATCH2_INT, |
90 | .set_next_event = au1x_rtcmatch2_set_next_event, | 90 | .set_next_event = au1x_rtcmatch2_set_next_event, |
91 | .set_mode = au1x_rtcmatch2_set_mode, | 91 | .set_mode = au1x_rtcmatch2_set_mode, |
92 | .cpumask = CPU_MASK_ALL, | 92 | .cpumask = CPU_MASK_ALL_PTR, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static struct irqaction au1x_rtcmatch2_irqaction = { | 95 | static struct irqaction au1x_rtcmatch2_irqaction = { |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index e085feddb4a4..5f4e49ba4713 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -15,13 +15,11 @@ | |||
15 | #include <linux/serial.h> | 15 | #include <linux/serial.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/string.h> /* for memset */ | 17 | #include <linux/string.h> /* for memset */ |
18 | #include <linux/serial.h> | ||
19 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
20 | #include <linux/time.h> | 19 | #include <linux/time.h> |
21 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
22 | #include <linux/serial_core.h> | 21 | #include <linux/serial_core.h> |
23 | #include <linux/serial_8250.h> | 22 | #include <linux/serial_8250.h> |
24 | #include <linux/string.h> | ||
25 | 23 | ||
26 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
27 | #include <asm/reboot.h> | 25 | #include <asm/reboot.h> |
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 34ea319be94c..f2baea3039bb 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig | |||
@@ -53,7 +53,7 @@ CONFIG_GENERIC_TIME=y | |||
53 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 53 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
54 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | 54 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y |
55 | CONFIG_ARC=y | 55 | CONFIG_ARC=y |
56 | CONFIG_DMA_IP27=y | 56 | CONFIG_DMA_COHERENT=y |
57 | CONFIG_EARLY_PRINTK=y | 57 | CONFIG_EARLY_PRINTK=y |
58 | CONFIG_SYS_HAS_EARLY_PRINTK=y | 58 | CONFIG_SYS_HAS_EARLY_PRINTK=y |
59 | # CONFIG_NO_IOPORT is not set | 59 | # CONFIG_NO_IOPORT is not set |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c996c3b4d074..1b332e15ab52 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -50,7 +50,7 @@ | |||
50 | static __inline__ void atomic_add(int i, atomic_t * v) | 50 | static __inline__ void atomic_add(int i, atomic_t * v) |
51 | { | 51 | { |
52 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 52 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
53 | unsigned long temp; | 53 | int temp; |
54 | 54 | ||
55 | __asm__ __volatile__( | 55 | __asm__ __volatile__( |
56 | " .set mips3 \n" | 56 | " .set mips3 \n" |
@@ -62,7 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
62 | : "=&r" (temp), "=m" (v->counter) | 62 | : "=&r" (temp), "=m" (v->counter) |
63 | : "Ir" (i), "m" (v->counter)); | 63 | : "Ir" (i), "m" (v->counter)); |
64 | } else if (cpu_has_llsc) { | 64 | } else if (cpu_has_llsc) { |
65 | unsigned long temp; | 65 | int temp; |
66 | 66 | ||
67 | __asm__ __volatile__( | 67 | __asm__ __volatile__( |
68 | " .set mips3 \n" | 68 | " .set mips3 \n" |
@@ -95,7 +95,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) | |||
95 | static __inline__ void atomic_sub(int i, atomic_t * v) | 95 | static __inline__ void atomic_sub(int i, atomic_t * v) |
96 | { | 96 | { |
97 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 97 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
98 | unsigned long temp; | 98 | int temp; |
99 | 99 | ||
100 | __asm__ __volatile__( | 100 | __asm__ __volatile__( |
101 | " .set mips3 \n" | 101 | " .set mips3 \n" |
@@ -107,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
107 | : "=&r" (temp), "=m" (v->counter) | 107 | : "=&r" (temp), "=m" (v->counter) |
108 | : "Ir" (i), "m" (v->counter)); | 108 | : "Ir" (i), "m" (v->counter)); |
109 | } else if (cpu_has_llsc) { | 109 | } else if (cpu_has_llsc) { |
110 | unsigned long temp; | 110 | int temp; |
111 | 111 | ||
112 | __asm__ __volatile__( | 112 | __asm__ __volatile__( |
113 | " .set mips3 \n" | 113 | " .set mips3 \n" |
@@ -135,12 +135,12 @@ static __inline__ void atomic_sub(int i, atomic_t * v) | |||
135 | */ | 135 | */ |
136 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 136 | static __inline__ int atomic_add_return(int i, atomic_t * v) |
137 | { | 137 | { |
138 | unsigned long result; | 138 | int result; |
139 | 139 | ||
140 | smp_llsc_mb(); | 140 | smp_llsc_mb(); |
141 | 141 | ||
142 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 142 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
143 | unsigned long temp; | 143 | int temp; |
144 | 144 | ||
145 | __asm__ __volatile__( | 145 | __asm__ __volatile__( |
146 | " .set mips3 \n" | 146 | " .set mips3 \n" |
@@ -154,7 +154,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
154 | : "Ir" (i), "m" (v->counter) | 154 | : "Ir" (i), "m" (v->counter) |
155 | : "memory"); | 155 | : "memory"); |
156 | } else if (cpu_has_llsc) { | 156 | } else if (cpu_has_llsc) { |
157 | unsigned long temp; | 157 | int temp; |
158 | 158 | ||
159 | __asm__ __volatile__( | 159 | __asm__ __volatile__( |
160 | " .set mips3 \n" | 160 | " .set mips3 \n" |
@@ -187,12 +187,12 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
187 | 187 | ||
188 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 188 | static __inline__ int atomic_sub_return(int i, atomic_t * v) |
189 | { | 189 | { |
190 | unsigned long result; | 190 | int result; |
191 | 191 | ||
192 | smp_llsc_mb(); | 192 | smp_llsc_mb(); |
193 | 193 | ||
194 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 194 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
195 | unsigned long temp; | 195 | int temp; |
196 | 196 | ||
197 | __asm__ __volatile__( | 197 | __asm__ __volatile__( |
198 | " .set mips3 \n" | 198 | " .set mips3 \n" |
@@ -206,7 +206,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
206 | : "Ir" (i), "m" (v->counter) | 206 | : "Ir" (i), "m" (v->counter) |
207 | : "memory"); | 207 | : "memory"); |
208 | } else if (cpu_has_llsc) { | 208 | } else if (cpu_has_llsc) { |
209 | unsigned long temp; | 209 | int temp; |
210 | 210 | ||
211 | __asm__ __volatile__( | 211 | __asm__ __volatile__( |
212 | " .set mips3 \n" | 212 | " .set mips3 \n" |
@@ -247,12 +247,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
247 | */ | 247 | */ |
248 | static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | 248 | static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) |
249 | { | 249 | { |
250 | unsigned long result; | 250 | int result; |
251 | 251 | ||
252 | smp_llsc_mb(); | 252 | smp_llsc_mb(); |
253 | 253 | ||
254 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 254 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
255 | unsigned long temp; | 255 | int temp; |
256 | 256 | ||
257 | __asm__ __volatile__( | 257 | __asm__ __volatile__( |
258 | " .set mips3 \n" | 258 | " .set mips3 \n" |
@@ -270,7 +270,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
270 | : "Ir" (i), "m" (v->counter) | 270 | : "Ir" (i), "m" (v->counter) |
271 | : "memory"); | 271 | : "memory"); |
272 | } else if (cpu_has_llsc) { | 272 | } else if (cpu_has_llsc) { |
273 | unsigned long temp; | 273 | int temp; |
274 | 274 | ||
275 | __asm__ __volatile__( | 275 | __asm__ __volatile__( |
276 | " .set mips3 \n" | 276 | " .set mips3 \n" |
@@ -429,7 +429,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
429 | static __inline__ void atomic64_add(long i, atomic64_t * v) | 429 | static __inline__ void atomic64_add(long i, atomic64_t * v) |
430 | { | 430 | { |
431 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 431 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
432 | unsigned long temp; | 432 | long temp; |
433 | 433 | ||
434 | __asm__ __volatile__( | 434 | __asm__ __volatile__( |
435 | " .set mips3 \n" | 435 | " .set mips3 \n" |
@@ -441,7 +441,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
441 | : "=&r" (temp), "=m" (v->counter) | 441 | : "=&r" (temp), "=m" (v->counter) |
442 | : "Ir" (i), "m" (v->counter)); | 442 | : "Ir" (i), "m" (v->counter)); |
443 | } else if (cpu_has_llsc) { | 443 | } else if (cpu_has_llsc) { |
444 | unsigned long temp; | 444 | long temp; |
445 | 445 | ||
446 | __asm__ __volatile__( | 446 | __asm__ __volatile__( |
447 | " .set mips3 \n" | 447 | " .set mips3 \n" |
@@ -474,7 +474,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) | |||
474 | static __inline__ void atomic64_sub(long i, atomic64_t * v) | 474 | static __inline__ void atomic64_sub(long i, atomic64_t * v) |
475 | { | 475 | { |
476 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 476 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
477 | unsigned long temp; | 477 | long temp; |
478 | 478 | ||
479 | __asm__ __volatile__( | 479 | __asm__ __volatile__( |
480 | " .set mips3 \n" | 480 | " .set mips3 \n" |
@@ -486,7 +486,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
486 | : "=&r" (temp), "=m" (v->counter) | 486 | : "=&r" (temp), "=m" (v->counter) |
487 | : "Ir" (i), "m" (v->counter)); | 487 | : "Ir" (i), "m" (v->counter)); |
488 | } else if (cpu_has_llsc) { | 488 | } else if (cpu_has_llsc) { |
489 | unsigned long temp; | 489 | long temp; |
490 | 490 | ||
491 | __asm__ __volatile__( | 491 | __asm__ __volatile__( |
492 | " .set mips3 \n" | 492 | " .set mips3 \n" |
@@ -514,12 +514,12 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) | |||
514 | */ | 514 | */ |
515 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) | 515 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
516 | { | 516 | { |
517 | unsigned long result; | 517 | long result; |
518 | 518 | ||
519 | smp_llsc_mb(); | 519 | smp_llsc_mb(); |
520 | 520 | ||
521 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 521 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
522 | unsigned long temp; | 522 | long temp; |
523 | 523 | ||
524 | __asm__ __volatile__( | 524 | __asm__ __volatile__( |
525 | " .set mips3 \n" | 525 | " .set mips3 \n" |
@@ -533,7 +533,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
533 | : "Ir" (i), "m" (v->counter) | 533 | : "Ir" (i), "m" (v->counter) |
534 | : "memory"); | 534 | : "memory"); |
535 | } else if (cpu_has_llsc) { | 535 | } else if (cpu_has_llsc) { |
536 | unsigned long temp; | 536 | long temp; |
537 | 537 | ||
538 | __asm__ __volatile__( | 538 | __asm__ __volatile__( |
539 | " .set mips3 \n" | 539 | " .set mips3 \n" |
@@ -566,12 +566,12 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) | |||
566 | 566 | ||
567 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | 567 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) |
568 | { | 568 | { |
569 | unsigned long result; | 569 | long result; |
570 | 570 | ||
571 | smp_llsc_mb(); | 571 | smp_llsc_mb(); |
572 | 572 | ||
573 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 573 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
574 | unsigned long temp; | 574 | long temp; |
575 | 575 | ||
576 | __asm__ __volatile__( | 576 | __asm__ __volatile__( |
577 | " .set mips3 \n" | 577 | " .set mips3 \n" |
@@ -585,7 +585,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
585 | : "Ir" (i), "m" (v->counter) | 585 | : "Ir" (i), "m" (v->counter) |
586 | : "memory"); | 586 | : "memory"); |
587 | } else if (cpu_has_llsc) { | 587 | } else if (cpu_has_llsc) { |
588 | unsigned long temp; | 588 | long temp; |
589 | 589 | ||
590 | __asm__ __volatile__( | 590 | __asm__ __volatile__( |
591 | " .set mips3 \n" | 591 | " .set mips3 \n" |
@@ -626,12 +626,12 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
626 | */ | 626 | */ |
627 | static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | 627 | static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) |
628 | { | 628 | { |
629 | unsigned long result; | 629 | long result; |
630 | 630 | ||
631 | smp_llsc_mb(); | 631 | smp_llsc_mb(); |
632 | 632 | ||
633 | if (cpu_has_llsc && R10000_LLSC_WAR) { | 633 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
634 | unsigned long temp; | 634 | long temp; |
635 | 635 | ||
636 | __asm__ __volatile__( | 636 | __asm__ __volatile__( |
637 | " .set mips3 \n" | 637 | " .set mips3 \n" |
@@ -649,7 +649,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
649 | : "Ir" (i), "m" (v->counter) | 649 | : "Ir" (i), "m" (v->counter) |
650 | : "memory"); | 650 | : "memory"); |
651 | } else if (cpu_has_llsc) { | 651 | } else if (cpu_has_llsc) { |
652 | unsigned long temp; | 652 | long temp; |
653 | 653 | ||
654 | __asm__ __volatile__( | 654 | __asm__ __volatile__( |
655 | " .set mips3 \n" | 655 | " .set mips3 \n" |
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h index b5cf6457305a..3cb50d17b62d 100644 --- a/arch/mips/include/asm/mach-rc32434/gpio.h +++ b/arch/mips/include/asm/mach-rc32434/gpio.h | |||
@@ -80,11 +80,8 @@ struct rb532_gpio_reg { | |||
80 | /* Compact Flash GPIO pin */ | 80 | /* Compact Flash GPIO pin */ |
81 | #define CF_GPIO_NUM 13 | 81 | #define CF_GPIO_NUM 13 |
82 | 82 | ||
83 | extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val); | ||
84 | extern unsigned get_434_reg(unsigned reg_offs); | ||
85 | extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); | ||
86 | extern unsigned char get_latch_u5(void); | ||
87 | extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); | 83 | extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); |
88 | extern void rb532_gpio_set_istat(int bit, unsigned gpio); | 84 | extern void rb532_gpio_set_istat(int bit, unsigned gpio); |
85 | extern void rb532_gpio_set_func(unsigned gpio); | ||
89 | 86 | ||
90 | #endif /* _RC32434_GPIO_H_ */ | 87 | #endif /* _RC32434_GPIO_H_ */ |
diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h index 56738d8ec4e2..023a5b100ed0 100644 --- a/arch/mips/include/asm/mach-rc32434/irq.h +++ b/arch/mips/include/asm/mach-rc32434/irq.h | |||
@@ -30,4 +30,7 @@ | |||
30 | #define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9) | 30 | #define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9) |
31 | #define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10) | 31 | #define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10) |
32 | 32 | ||
33 | #define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE | ||
34 | #define GPIO_MAPPED_IRQ_GROUP 4 | ||
35 | |||
33 | #endif /* __ASM_RC32434_IRQ_H */ | 36 | #endif /* __ASM_RC32434_IRQ_H */ |
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h index f25a84916703..6dc5f8df1f3e 100644 --- a/arch/mips/include/asm/mach-rc32434/rb.h +++ b/arch/mips/include/asm/mach-rc32434/rb.h | |||
@@ -83,4 +83,7 @@ struct mpmc_device { | |||
83 | void __iomem *base; | 83 | void __iomem *base; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); | ||
87 | extern unsigned char get_latch_u5(void); | ||
88 | |||
86 | #endif /* __ASM_RC32434_RB_H */ | 89 | #endif /* __ASM_RC32434_RB_H */ |
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 1f30d16d4669..ce47118e52b7 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -105,7 +105,7 @@ struct pt_watch_regs { | |||
105 | enum pt_watch_style style; | 105 | enum pt_watch_style style; |
106 | union { | 106 | union { |
107 | struct mips32_watch_regs mips32; | 107 | struct mips32_watch_regs mips32; |
108 | struct mips32_watch_regs mips64; | 108 | struct mips64_watch_regs mips64; |
109 | }; | 109 | }; |
110 | }; | 110 | }; |
111 | 111 | ||
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h index a275661fa7e1..8f77f774a2a0 100644 --- a/arch/mips/include/asm/termios.h +++ b/arch/mips/include/asm/termios.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #ifndef _ASM_TERMIOS_H | 9 | #ifndef _ASM_TERMIOS_H |
10 | #define _ASM_TERMIOS_H | 10 | #define _ASM_TERMIOS_H |
11 | 11 | ||
12 | #include <linux/errno.h> | ||
12 | #include <asm/termbits.h> | 13 | #include <asm/termbits.h> |
13 | #include <asm/ioctls.h> | 14 | #include <asm/ioctls.h> |
14 | 15 | ||
@@ -94,38 +95,81 @@ struct termio { | |||
94 | /* | 95 | /* |
95 | * Translate a "termio" structure into a "termios". Ugh. | 96 | * Translate a "termio" structure into a "termios". Ugh. |
96 | */ | 97 | */ |
97 | #define user_termio_to_kernel_termios(termios, termio) \ | 98 | static inline int user_termio_to_kernel_termios(struct ktermios *termios, |
98 | ({ \ | 99 | struct termio __user *termio) |
99 | unsigned short tmp; \ | 100 | { |
100 | get_user(tmp, &(termio)->c_iflag); \ | 101 | unsigned short iflag, oflag, cflag, lflag; |
101 | (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ | 102 | unsigned int err; |
102 | get_user(tmp, &(termio)->c_oflag); \ | 103 | |
103 | (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ | 104 | if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) |
104 | get_user(tmp, &(termio)->c_cflag); \ | 105 | return -EFAULT; |
105 | (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ | 106 | |
106 | get_user(tmp, &(termio)->c_lflag); \ | 107 | err = __get_user(iflag, &termio->c_iflag); |
107 | (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ | 108 | termios->c_iflag = (termios->c_iflag & 0xffff0000) | iflag; |
108 | get_user((termios)->c_line, &(termio)->c_line); \ | 109 | err |=__get_user(oflag, &termio->c_oflag); |
109 | copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ | 110 | termios->c_oflag = (termios->c_oflag & 0xffff0000) | oflag; |
110 | }) | 111 | err |=__get_user(cflag, &termio->c_cflag); |
112 | termios->c_cflag = (termios->c_cflag & 0xffff0000) | cflag; | ||
113 | err |=__get_user(lflag, &termio->c_lflag); | ||
114 | termios->c_lflag = (termios->c_lflag & 0xffff0000) | lflag; | ||
115 | err |=__get_user(termios->c_line, &termio->c_line); | ||
116 | if (err) | ||
117 | return -EFAULT; | ||
118 | |||
119 | if (__copy_from_user(termios->c_cc, termio->c_cc, NCC)) | ||
120 | return -EFAULT; | ||
121 | |||
122 | return 0; | ||
123 | } | ||
111 | 124 | ||
112 | /* | 125 | /* |
113 | * Translate a "termios" structure into a "termio". Ugh. | 126 | * Translate a "termios" structure into a "termio". Ugh. |
114 | */ | 127 | */ |
115 | #define kernel_termios_to_user_termio(termio, termios) \ | 128 | static inline int kernel_termios_to_user_termio(struct termio __user *termio, |
116 | ({ \ | 129 | struct ktermios *termios) |
117 | put_user((termios)->c_iflag, &(termio)->c_iflag); \ | 130 | { |
118 | put_user((termios)->c_oflag, &(termio)->c_oflag); \ | 131 | int err; |
119 | put_user((termios)->c_cflag, &(termio)->c_cflag); \ | 132 | |
120 | put_user((termios)->c_lflag, &(termio)->c_lflag); \ | 133 | if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) |
121 | put_user((termios)->c_line, &(termio)->c_line); \ | 134 | return -EFAULT; |
122 | copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ | 135 | |
123 | }) | 136 | err = __put_user(termios->c_iflag, &termio->c_iflag); |
124 | 137 | err |= __put_user(termios->c_oflag, &termio->c_oflag); | |
125 | #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) | 138 | err |= __put_user(termios->c_cflag, &termio->c_cflag); |
126 | #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) | 139 | err |= __put_user(termios->c_lflag, &termio->c_lflag); |
127 | #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) | 140 | err |= __put_user(termios->c_line, &termio->c_line); |
128 | #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) | 141 | if (err) |
142 | return -EFAULT; | ||
143 | |||
144 | if (__copy_to_user(termio->c_cc, termios->c_cc, NCC)) | ||
145 | return -EFAULT; | ||
146 | |||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static inline int user_termios_to_kernel_termios(struct ktermios __user *k, | ||
151 | struct termios2 *u) | ||
152 | { | ||
153 | return copy_from_user(k, u, sizeof(struct termios2)) ? -EFAULT : 0; | ||
154 | } | ||
155 | |||
156 | static inline int kernel_termios_to_user_termios(struct termios2 __user *u, | ||
157 | struct ktermios *k) | ||
158 | { | ||
159 | return copy_to_user(u, k, sizeof(struct termios2)) ? -EFAULT : 0; | ||
160 | } | ||
161 | |||
162 | static inline int user_termios_to_kernel_termios_1(struct ktermios *k, | ||
163 | struct termios __user *u) | ||
164 | { | ||
165 | return copy_from_user(k, u, sizeof(struct termios)) ? -EFAULT : 0; | ||
166 | } | ||
167 | |||
168 | static inline int kernel_termios_to_user_termios_1(struct termios __user *u, | ||
169 | struct ktermios *k) | ||
170 | { | ||
171 | return copy_to_user(u, k, sizeof(struct termios)) ? -EFAULT : 0; | ||
172 | } | ||
129 | 173 | ||
130 | #endif /* defined(__KERNEL__) */ | 174 | #endif /* defined(__KERNEL__) */ |
131 | 175 | ||
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index 88badb423010..964ef7ede268 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h | |||
@@ -541,5 +541,6 @@ void tx4939_irq_init(void); | |||
541 | int tx4939_irq(void); | 541 | int tx4939_irq(void); |
542 | void tx4939_mtd_init(int ch); | 542 | void tx4939_mtd_init(int ch); |
543 | void tx4939_ata_init(void); | 543 | void tx4939_ata_init(void); |
544 | void tx4939_rtc_init(void); | ||
544 | 545 | ||
545 | #endif /* __ASM_TXX9_TX4939_H */ | 546 | #endif /* __ASM_TXX9_TX4939_H */ |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index fb6f73148df2..8882e5766f27 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -458,7 +458,11 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
458 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ | 458 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
459 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ | 459 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
460 | #ifdef CONFIG_HARDWARE_WATCHPOINTS | 460 | #ifdef CONFIG_HARDWARE_WATCHPOINTS |
461 | BUILD_HANDLER watch watch sti silent /* #23 */ | 461 | /* |
462 | * For watch, interrupts will be enabled after the watch | ||
463 | * registers are read. | ||
464 | */ | ||
465 | BUILD_HANDLER watch watch cli silent /* #23 */ | ||
462 | #else | 466 | #else |
463 | BUILD_HANDLER watch watch sti verbose /* #23 */ | 467 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
464 | #endif | 468 | #endif |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 5e77a3a21f98..42461310b185 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -79,7 +79,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
79 | 79 | ||
80 | euid = current_euid(); | 80 | euid = current_euid(); |
81 | retval = -EPERM; | 81 | retval = -EPERM; |
82 | if (euid != p->euid && euid != p->uid && !capable(CAP_SYS_NICE)) { | 82 | if (euid != p->cred->euid && euid != p->cred->uid && |
83 | !capable(CAP_SYS_NICE)) { | ||
83 | read_unlock(&tasklist_lock); | 84 | read_unlock(&tasklist_lock); |
84 | goto out_unlock; | 85 | goto out_unlock; |
85 | } | 86 | } |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f6083c6bfaa4..b2d7041341b8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -944,6 +944,9 @@ asmlinkage void do_mdmx(struct pt_regs *regs) | |||
944 | force_sig(SIGILL, current); | 944 | force_sig(SIGILL, current); |
945 | } | 945 | } |
946 | 946 | ||
947 | /* | ||
948 | * Called with interrupts disabled. | ||
949 | */ | ||
947 | asmlinkage void do_watch(struct pt_regs *regs) | 950 | asmlinkage void do_watch(struct pt_regs *regs) |
948 | { | 951 | { |
949 | u32 cause; | 952 | u32 cause; |
@@ -963,9 +966,12 @@ asmlinkage void do_watch(struct pt_regs *regs) | |||
963 | */ | 966 | */ |
964 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { | 967 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { |
965 | mips_read_watch_registers(); | 968 | mips_read_watch_registers(); |
969 | local_irq_enable(); | ||
966 | force_sig(SIGTRAP, current); | 970 | force_sig(SIGTRAP, current); |
967 | } else | 971 | } else { |
968 | mips_clear_watch_registers(); | 972 | mips_clear_watch_registers(); |
973 | local_irq_enable(); | ||
974 | } | ||
969 | } | 975 | } |
970 | 976 | ||
971 | asmlinkage void do_mcheck(struct pt_regs *regs) | 977 | asmlinkage void do_mcheck(struct pt_regs *regs) |
@@ -1582,7 +1588,11 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size) | |||
1582 | static char panic_null_cerr[] __cpuinitdata = | 1588 | static char panic_null_cerr[] __cpuinitdata = |
1583 | "Trying to set NULL cache error exception handler"; | 1589 | "Trying to set NULL cache error exception handler"; |
1584 | 1590 | ||
1585 | /* Install uncached CPU exception handler */ | 1591 | /* |
1592 | * Install uncached CPU exception handler. | ||
1593 | * This is suitable only for the cache error exception which is the only | ||
1594 | * exception handler that is being run uncached. | ||
1595 | */ | ||
1586 | void __cpuinit set_uncached_handler(unsigned long offset, void *addr, | 1596 | void __cpuinit set_uncached_handler(unsigned long offset, void *addr, |
1587 | unsigned long size) | 1597 | unsigned long size) |
1588 | { | 1598 | { |
@@ -1593,7 +1603,7 @@ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, | |||
1593 | unsigned long uncached_ebase = TO_UNCAC(ebase); | 1603 | unsigned long uncached_ebase = TO_UNCAC(ebase); |
1594 | #endif | 1604 | #endif |
1595 | if (cpu_has_mips_r2) | 1605 | if (cpu_has_mips_r2) |
1596 | ebase += (read_c0_ebase() & 0x3ffff000); | 1606 | uncached_ebase += (read_c0_ebase() & 0x3ffff000); |
1597 | 1607 | ||
1598 | if (!addr) | 1608 | if (!addr) |
1599 | panic(panic_null_cerr); | 1609 | panic(panic_null_cerr); |
diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S index 736d0fb56a94..68853a038d3f 100644 --- a/arch/mips/lib/memcpy-inatomic.S +++ b/arch/mips/lib/memcpy-inatomic.S | |||
@@ -21,7 +21,7 @@ | |||
21 | * end of memory on some systems. It's also a seriously bad idea on non | 21 | * end of memory on some systems. It's also a seriously bad idea on non |
22 | * dma-coherent systems. | 22 | * dma-coherent systems. |
23 | */ | 23 | */ |
24 | #if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) | 24 | #ifdef CONFIG_DMA_NONCOHERENT |
25 | #undef CONFIG_CPU_HAS_PREFETCH | 25 | #undef CONFIG_CPU_HAS_PREFETCH |
26 | #endif | 26 | #endif |
27 | #ifdef CONFIG_MIPS_MALTA | 27 | #ifdef CONFIG_MIPS_MALTA |
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c06cccf60bec..56a1f85a1ce8 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S | |||
@@ -21,7 +21,7 @@ | |||
21 | * end of memory on some systems. It's also a seriously bad idea on non | 21 | * end of memory on some systems. It's also a seriously bad idea on non |
22 | * dma-coherent systems. | 22 | * dma-coherent systems. |
23 | */ | 23 | */ |
24 | #if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) | 24 | #ifdef CONFIG_DMA_NONCOHERENT |
25 | #undef CONFIG_CPU_HAS_PREFETCH | 25 | #undef CONFIG_CPU_HAS_PREFETCH |
26 | #endif | 26 | #endif |
27 | #ifdef CONFIG_MIPS_MALTA | 27 | #ifdef CONFIG_MIPS_MALTA |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6e99665ae860..c43f4b26a690 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
618 | if (cpu_has_inclusive_pcaches) { | 618 | if (cpu_has_inclusive_pcaches) { |
619 | if (size >= scache_size) | 619 | if (size >= scache_size) |
620 | r4k_blast_scache(); | 620 | r4k_blast_scache(); |
621 | else | 621 | else { |
622 | unsigned long lsize = cpu_scache_line_size(); | ||
623 | unsigned long almask = ~(lsize - 1); | ||
624 | |||
625 | /* | ||
626 | * There is no clearly documented alignment requirement | ||
627 | * for the cache instruction on MIPS processors and | ||
628 | * some processors, among them the RM5200 and RM7000 | ||
629 | * QED processors will throw an address error for cache | ||
630 | * hit ops with insufficient alignment. Solved by | ||
631 | * aligning the address to cache line size. | ||
632 | */ | ||
633 | cache_op(Hit_Writeback_Inv_SD, addr & almask); | ||
634 | cache_op(Hit_Writeback_Inv_SD, | ||
635 | (addr + size - 1) & almask); | ||
622 | blast_inv_scache_range(addr, addr + size); | 636 | blast_inv_scache_range(addr, addr + size); |
637 | } | ||
623 | return; | 638 | return; |
624 | } | 639 | } |
625 | 640 | ||
626 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { | 641 | if (cpu_has_safe_index_cacheops && size >= dcache_size) { |
627 | r4k_blast_dcache(); | 642 | r4k_blast_dcache(); |
628 | } else { | 643 | } else { |
644 | unsigned long lsize = cpu_dcache_line_size(); | ||
645 | unsigned long almask = ~(lsize - 1); | ||
646 | |||
629 | R4600_HIT_CACHEOP_WAR_IMPL; | 647 | R4600_HIT_CACHEOP_WAR_IMPL; |
648 | cache_op(Hit_Writeback_Inv_D, addr & almask); | ||
649 | cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); | ||
630 | blast_inv_dcache_range(addr, addr + size); | 650 | blast_inv_dcache_range(addr, addr + size); |
631 | } | 651 | } |
632 | 652 | ||
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index fa636fc6b7b9..55767ad9f00e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -97,7 +97,6 @@ good_area: | |||
97 | goto bad_area; | 97 | goto bad_area; |
98 | } | 98 | } |
99 | 99 | ||
100 | survive: | ||
101 | /* | 100 | /* |
102 | * If for any reason at all we couldn't handle the fault, | 101 | * If for any reason at all we couldn't handle the fault, |
103 | * make sure we exit gracefully rather than endlessly redo | 102 | * make sure we exit gracefully rather than endlessly redo |
@@ -167,21 +166,13 @@ no_context: | |||
167 | field, regs->regs[31]); | 166 | field, regs->regs[31]); |
168 | die("Oops", regs); | 167 | die("Oops", regs); |
169 | 168 | ||
170 | /* | ||
171 | * We ran out of memory, or some other thing happened to us that made | ||
172 | * us unable to handle the page fault gracefully. | ||
173 | */ | ||
174 | out_of_memory: | 169 | out_of_memory: |
175 | up_read(&mm->mmap_sem); | 170 | /* |
176 | if (is_global_init(tsk)) { | 171 | * We ran out of memory, call the OOM killer, and return the userspace |
177 | yield(); | 172 | * (which will retry the fault, or kill us if we got oom-killed). |
178 | down_read(&mm->mmap_sem); | 173 | */ |
179 | goto survive; | 174 | pagefault_out_of_memory(); |
180 | } | 175 | return; |
181 | printk("VM: killing process %s\n", tsk->comm); | ||
182 | if (user_mode(regs)) | ||
183 | do_group_exit(SIGKILL); | ||
184 | goto no_context; | ||
185 | 176 | ||
186 | do_sigbus: | 177 | do_sigbus: |
187 | up_read(&mm->mmap_sem); | 178 | up_read(&mm->mmap_sem); |
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 1c2821e2f494..71f7d27b0d4c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c | |||
@@ -205,6 +205,8 @@ static int __init rc32434_pcibridge_init(void) | |||
205 | 205 | ||
206 | static int __init rc32434_pci_init(void) | 206 | static int __init rc32434_pci_init(void) |
207 | { | 207 | { |
208 | void __iomem *io_map_base; | ||
209 | |||
208 | pr_info("PCI: Initializing PCI\n"); | 210 | pr_info("PCI: Initializing PCI\n"); |
209 | 211 | ||
210 | ioport_resource.start = rc32434_res_pci_io1.start; | 212 | ioport_resource.start = rc32434_res_pci_io1.start; |
@@ -212,6 +214,15 @@ static int __init rc32434_pci_init(void) | |||
212 | 214 | ||
213 | rc32434_pcibridge_init(); | 215 | rc32434_pcibridge_init(); |
214 | 216 | ||
217 | io_map_base = ioremap(rc32434_res_pci_io1.start, | ||
218 | rc32434_res_pci_io1.end - rc32434_res_pci_io1.start + 1); | ||
219 | |||
220 | if (!io_map_base) | ||
221 | return -ENOMEM; | ||
222 | |||
223 | rc32434_controller.io_map_base = | ||
224 | (unsigned long)io_map_base - rc32434_res_pci_io1.start; | ||
225 | |||
215 | register_pci_controller(&rc32434_controller); | 226 | register_pci_controller(&rc32434_controller); |
216 | rc32434_sync(); | 227 | rc32434_sync(); |
217 | 228 | ||
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index c1c29181bd46..4a5f05b662ae 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mtd/partitions.h> | 24 | #include <linux/mtd/partitions.h> |
25 | #include <linux/gpio_keys.h> | 25 | #include <linux/gpio_keys.h> |
26 | #include <linux/input.h> | 26 | #include <linux/input.h> |
27 | #include <linux/serial_8250.h> | ||
27 | 28 | ||
28 | #include <asm/bootinfo.h> | 29 | #include <asm/bootinfo.h> |
29 | 30 | ||
@@ -39,6 +40,29 @@ | |||
39 | #define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET) | 40 | #define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET) |
40 | #define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET) | 41 | #define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET) |
41 | 42 | ||
43 | extern unsigned int idt_cpu_freq; | ||
44 | |||
45 | static struct mpmc_device dev3; | ||
46 | |||
47 | void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | |||
51 | spin_lock_irqsave(&dev3.lock, flags); | ||
52 | |||
53 | dev3.state = (dev3.state | or_mask) & ~nand_mask; | ||
54 | writeb(dev3.state, dev3.base); | ||
55 | |||
56 | spin_unlock_irqrestore(&dev3.lock, flags); | ||
57 | } | ||
58 | EXPORT_SYMBOL(set_latch_u5); | ||
59 | |||
60 | unsigned char get_latch_u5(void) | ||
61 | { | ||
62 | return dev3.state; | ||
63 | } | ||
64 | EXPORT_SYMBOL(get_latch_u5); | ||
65 | |||
42 | static struct resource korina_dev0_res[] = { | 66 | static struct resource korina_dev0_res[] = { |
43 | { | 67 | { |
44 | .name = "korina_regs", | 68 | .name = "korina_regs", |
@@ -86,7 +110,7 @@ static struct korina_device korina_dev0_data = { | |||
86 | static struct platform_device korina_dev0 = { | 110 | static struct platform_device korina_dev0 = { |
87 | .id = -1, | 111 | .id = -1, |
88 | .name = "korina", | 112 | .name = "korina", |
89 | .dev.platform_data = &korina_dev0_data, | 113 | .dev.driver_data = &korina_dev0_data, |
90 | .resource = korina_dev0_res, | 114 | .resource = korina_dev0_res, |
91 | .num_resources = ARRAY_SIZE(korina_dev0_res), | 115 | .num_resources = ARRAY_SIZE(korina_dev0_res), |
92 | }; | 116 | }; |
@@ -214,12 +238,32 @@ static struct platform_device rb532_wdt = { | |||
214 | .num_resources = ARRAY_SIZE(rb532_wdt_res), | 238 | .num_resources = ARRAY_SIZE(rb532_wdt_res), |
215 | }; | 239 | }; |
216 | 240 | ||
241 | static struct plat_serial8250_port rb532_uart_res[] = { | ||
242 | { | ||
243 | .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE), | ||
244 | .irq = UART0_IRQ, | ||
245 | .regshift = 2, | ||
246 | .iotype = UPIO_MEM, | ||
247 | .flags = UPF_BOOT_AUTOCONF, | ||
248 | }, | ||
249 | { | ||
250 | .flags = 0, | ||
251 | } | ||
252 | }; | ||
253 | |||
254 | static struct platform_device rb532_uart = { | ||
255 | .name = "serial8250", | ||
256 | .id = PLAT8250_DEV_PLATFORM, | ||
257 | .dev.platform_data = &rb532_uart_res, | ||
258 | }; | ||
259 | |||
217 | static struct platform_device *rb532_devs[] = { | 260 | static struct platform_device *rb532_devs[] = { |
218 | &korina_dev0, | 261 | &korina_dev0, |
219 | &nand_slot0, | 262 | &nand_slot0, |
220 | &cf_slot0, | 263 | &cf_slot0, |
221 | &rb532_led, | 264 | &rb532_led, |
222 | &rb532_button, | 265 | &rb532_button, |
266 | &rb532_uart, | ||
223 | &rb532_wdt | 267 | &rb532_wdt |
224 | }; | 268 | }; |
225 | 269 | ||
@@ -291,9 +335,20 @@ static int __init plat_setup_devices(void) | |||
291 | nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE); | 335 | nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE); |
292 | nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; | 336 | nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; |
293 | 337 | ||
338 | /* Read and map device controller 3 */ | ||
339 | dev3.base = ioremap_nocache(readl(IDT434_REG_BASE + DEV3BASE), 1); | ||
340 | |||
341 | if (!dev3.base) { | ||
342 | printk(KERN_ERR "rb532: cannot remap device controller 3\n"); | ||
343 | return -ENXIO; | ||
344 | } | ||
345 | |||
294 | /* Initialise the NAND device */ | 346 | /* Initialise the NAND device */ |
295 | rb532_nand_setup(); | 347 | rb532_nand_setup(); |
296 | 348 | ||
349 | /* set the uart clock to the current cpu frequency */ | ||
350 | rb532_uart_res[0].uartclk = idt_cpu_freq; | ||
351 | |||
297 | return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); | 352 | return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); |
298 | } | 353 | } |
299 | 354 | ||
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 0e84c8ab6a39..37de05d595e7 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c | |||
@@ -41,8 +41,6 @@ struct rb532_gpio_chip { | |||
41 | void __iomem *regbase; | 41 | void __iomem *regbase; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct mpmc_device dev3; | ||
45 | |||
46 | static struct resource rb532_gpio_reg0_res[] = { | 44 | static struct resource rb532_gpio_reg0_res[] = { |
47 | { | 45 | { |
48 | .name = "gpio_reg0", | 46 | .name = "gpio_reg0", |
@@ -52,61 +50,6 @@ static struct resource rb532_gpio_reg0_res[] = { | |||
52 | } | 50 | } |
53 | }; | 51 | }; |
54 | 52 | ||
55 | static struct resource rb532_dev3_ctl_res[] = { | ||
56 | { | ||
57 | .name = "dev3_ctl", | ||
58 | .start = REGBASE + DEV3BASE, | ||
59 | .end = REGBASE + DEV3BASE + sizeof(struct dev_reg) - 1, | ||
60 | .flags = IORESOURCE_MEM, | ||
61 | } | ||
62 | }; | ||
63 | |||
64 | void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val) | ||
65 | { | ||
66 | unsigned long flags; | ||
67 | unsigned data; | ||
68 | unsigned i = 0; | ||
69 | |||
70 | spin_lock_irqsave(&dev3.lock, flags); | ||
71 | |||
72 | data = readl(IDT434_REG_BASE + reg_offs); | ||
73 | for (i = 0; i != len; ++i) { | ||
74 | if (val & (1 << i)) | ||
75 | data |= (1 << (i + bit)); | ||
76 | else | ||
77 | data &= ~(1 << (i + bit)); | ||
78 | } | ||
79 | writel(data, (IDT434_REG_BASE + reg_offs)); | ||
80 | |||
81 | spin_unlock_irqrestore(&dev3.lock, flags); | ||
82 | } | ||
83 | EXPORT_SYMBOL(set_434_reg); | ||
84 | |||
85 | unsigned get_434_reg(unsigned reg_offs) | ||
86 | { | ||
87 | return readl(IDT434_REG_BASE + reg_offs); | ||
88 | } | ||
89 | EXPORT_SYMBOL(get_434_reg); | ||
90 | |||
91 | void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) | ||
92 | { | ||
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&dev3.lock, flags); | ||
96 | |||
97 | dev3.state = (dev3.state | or_mask) & ~nand_mask; | ||
98 | writel(dev3.state, &dev3.base); | ||
99 | |||
100 | spin_unlock_irqrestore(&dev3.lock, flags); | ||
101 | } | ||
102 | EXPORT_SYMBOL(set_latch_u5); | ||
103 | |||
104 | unsigned char get_latch_u5(void) | ||
105 | { | ||
106 | return dev3.state; | ||
107 | } | ||
108 | EXPORT_SYMBOL(get_latch_u5); | ||
109 | |||
110 | /* rb532_set_bit - sanely set a bit | 53 | /* rb532_set_bit - sanely set a bit |
111 | * | 54 | * |
112 | * bitval: new value for the bit | 55 | * bitval: new value for the bit |
@@ -119,13 +62,11 @@ static inline void rb532_set_bit(unsigned bitval, | |||
119 | unsigned long flags; | 62 | unsigned long flags; |
120 | u32 val; | 63 | u32 val; |
121 | 64 | ||
122 | bitval = !!bitval; /* map parameter to {0,1} */ | ||
123 | |||
124 | local_irq_save(flags); | 65 | local_irq_save(flags); |
125 | 66 | ||
126 | val = readl(ioaddr); | 67 | val = readl(ioaddr); |
127 | val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */ | 68 | val &= ~(!bitval << offset); /* unset bit if bitval == 0 */ |
128 | val |= ( bitval << offset ); /* set bit if bitval == 1 */ | 69 | val |= (!!bitval << offset); /* set bit if bitval == 1 */ |
129 | writel(val, ioaddr); | 70 | writel(val, ioaddr); |
130 | 71 | ||
131 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
@@ -171,8 +112,8 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
171 | 112 | ||
172 | gpch = container_of(chip, struct rb532_gpio_chip, chip); | 113 | gpch = container_of(chip, struct rb532_gpio_chip, chip); |
173 | 114 | ||
174 | if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) | 115 | /* disable alternate function in case it's set */ |
175 | return 1; /* alternate function, GPIOCFG is ignored */ | 116 | rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); |
176 | 117 | ||
177 | rb532_set_bit(0, offset, gpch->regbase + GPIOCFG); | 118 | rb532_set_bit(0, offset, gpch->regbase + GPIOCFG); |
178 | return 0; | 119 | return 0; |
@@ -188,8 +129,8 @@ static int rb532_gpio_direction_output(struct gpio_chip *chip, | |||
188 | 129 | ||
189 | gpch = container_of(chip, struct rb532_gpio_chip, chip); | 130 | gpch = container_of(chip, struct rb532_gpio_chip, chip); |
190 | 131 | ||
191 | if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) | 132 | /* disable alternate function in case it's set */ |
192 | return 1; /* alternate function, GPIOCFG is ignored */ | 133 | rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); |
193 | 134 | ||
194 | /* set the initial output value */ | 135 | /* set the initial output value */ |
195 | rb532_set_bit(value, offset, gpch->regbase + GPIOD); | 136 | rb532_set_bit(value, offset, gpch->regbase + GPIOD); |
@@ -233,10 +174,11 @@ EXPORT_SYMBOL(rb532_gpio_set_istat); | |||
233 | /* | 174 | /* |
234 | * Configure GPIO alternate function | 175 | * Configure GPIO alternate function |
235 | */ | 176 | */ |
236 | static void rb532_gpio_set_func(int bit, unsigned gpio) | 177 | void rb532_gpio_set_func(unsigned gpio) |
237 | { | 178 | { |
238 | rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC); | 179 | rb532_set_bit(1, gpio, rb532_gpio_chip->regbase + GPIOFUNC); |
239 | } | 180 | } |
181 | EXPORT_SYMBOL(rb532_gpio_set_func); | ||
240 | 182 | ||
241 | int __init rb532_gpio_init(void) | 183 | int __init rb532_gpio_init(void) |
242 | { | 184 | { |
@@ -253,20 +195,6 @@ int __init rb532_gpio_init(void) | |||
253 | /* Register our GPIO chip */ | 195 | /* Register our GPIO chip */ |
254 | gpiochip_add(&rb532_gpio_chip->chip); | 196 | gpiochip_add(&rb532_gpio_chip->chip); |
255 | 197 | ||
256 | r = rb532_dev3_ctl_res; | ||
257 | dev3.base = ioremap_nocache(r->start, r->end - r->start); | ||
258 | |||
259 | if (!dev3.base) { | ||
260 | printk(KERN_ERR "rb532: cannot remap device controller 3\n"); | ||
261 | return -ENXIO; | ||
262 | } | ||
263 | |||
264 | /* configure CF_GPIO_NUM as CFRDY IRQ source */ | ||
265 | rb532_gpio_set_func(0, CF_GPIO_NUM); | ||
266 | rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM); | ||
267 | rb532_gpio_set_ilevel(1, CF_GPIO_NUM); | ||
268 | rb532_gpio_set_istat(0, CF_GPIO_NUM); | ||
269 | |||
270 | return 0; | 198 | return 0; |
271 | } | 199 | } |
272 | arch_initcall(rb532_gpio_init); | 200 | arch_initcall(rb532_gpio_init); |
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 549b46d2fcee..53eeb5e7bc5b 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/system.h> | 46 | #include <asm/system.h> |
47 | 47 | ||
48 | #include <asm/mach-rc32434/irq.h> | 48 | #include <asm/mach-rc32434/irq.h> |
49 | #include <asm/mach-rc32434/gpio.h> | ||
49 | 50 | ||
50 | struct intr_group { | 51 | struct intr_group { |
51 | u32 mask; /* mask of valid bits in pending/mask registers */ | 52 | u32 mask; /* mask of valid bits in pending/mask registers */ |
@@ -150,6 +151,9 @@ static void rb532_disable_irq(unsigned int irq_nr) | |||
150 | mask |= intr_bit; | 151 | mask |= intr_bit; |
151 | WRITE_MASK(addr, mask); | 152 | WRITE_MASK(addr, mask); |
152 | 153 | ||
154 | if (group == GPIO_MAPPED_IRQ_GROUP) | ||
155 | rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); | ||
156 | |||
153 | /* | 157 | /* |
154 | * if there are no more interrupts enabled in this | 158 | * if there are no more interrupts enabled in this |
155 | * group, disable corresponding IP | 159 | * group, disable corresponding IP |
@@ -165,12 +169,35 @@ static void rb532_mask_and_ack_irq(unsigned int irq_nr) | |||
165 | ack_local_irq(group_to_ip(irq_to_group(irq_nr))); | 169 | ack_local_irq(group_to_ip(irq_to_group(irq_nr))); |
166 | } | 170 | } |
167 | 171 | ||
172 | static int rb532_set_type(unsigned int irq_nr, unsigned type) | ||
173 | { | ||
174 | int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; | ||
175 | int group = irq_to_group(irq_nr); | ||
176 | |||
177 | if (group != GPIO_MAPPED_IRQ_GROUP) | ||
178 | return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; | ||
179 | |||
180 | switch (type) { | ||
181 | case IRQ_TYPE_LEVEL_HIGH: | ||
182 | rb532_gpio_set_ilevel(1, gpio); | ||
183 | break; | ||
184 | case IRQ_TYPE_LEVEL_LOW: | ||
185 | rb532_gpio_set_ilevel(0, gpio); | ||
186 | break; | ||
187 | default: | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
168 | static struct irq_chip rc32434_irq_type = { | 194 | static struct irq_chip rc32434_irq_type = { |
169 | .name = "RB532", | 195 | .name = "RB532", |
170 | .ack = rb532_disable_irq, | 196 | .ack = rb532_disable_irq, |
171 | .mask = rb532_disable_irq, | 197 | .mask = rb532_disable_irq, |
172 | .mask_ack = rb532_mask_and_ack_irq, | 198 | .mask_ack = rb532_mask_and_ack_irq, |
173 | .unmask = rb532_enable_irq, | 199 | .unmask = rb532_enable_irq, |
200 | .set_type = rb532_set_type, | ||
174 | }; | 201 | }; |
175 | 202 | ||
176 | void __init arch_init_irq(void) | 203 | void __init arch_init_irq(void) |
diff --git a/arch/mips/rb532/serial.c b/arch/mips/rb532/serial.c index 3e0d7ec3a579..00ed19f0bdb5 100644 --- a/arch/mips/rb532/serial.c +++ b/arch/mips/rb532/serial.c | |||
@@ -36,7 +36,7 @@ | |||
36 | extern unsigned int idt_cpu_freq; | 36 | extern unsigned int idt_cpu_freq; |
37 | 37 | ||
38 | static struct uart_port rb532_uart = { | 38 | static struct uart_port rb532_uart = { |
39 | .type = PORT_16550A, | 39 | .flags = UPF_BOOT_AUTOCONF, |
40 | .line = 0, | 40 | .line = 0, |
41 | .irq = UART0_IRQ, | 41 | .irq = UART0_IRQ, |
42 | .iotype = UPIO_MEM, | 42 | .iotype = UPIO_MEM, |
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 6c0049a5bbc1..55440967b3a8 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c | |||
@@ -435,6 +435,28 @@ void __init tx4939_ata_init(void) | |||
435 | platform_device_register(&ata1_dev); | 435 | platform_device_register(&ata1_dev); |
436 | } | 436 | } |
437 | 437 | ||
438 | void __init tx4939_rtc_init(void) | ||
439 | { | ||
440 | static struct resource res[] = { | ||
441 | { | ||
442 | .start = TX4939_RTC_REG & 0xfffffffffULL, | ||
443 | .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1, | ||
444 | .flags = IORESOURCE_MEM, | ||
445 | }, { | ||
446 | .start = TXX9_IRQ_BASE + TX4939_IR_RTC, | ||
447 | .flags = IORESOURCE_IRQ, | ||
448 | }, | ||
449 | }; | ||
450 | static struct platform_device rtc_dev = { | ||
451 | .name = "tx4939rtc", | ||
452 | .id = -1, | ||
453 | .num_resources = ARRAY_SIZE(res), | ||
454 | .resource = res, | ||
455 | }; | ||
456 | |||
457 | platform_device_register(&rtc_dev); | ||
458 | } | ||
459 | |||
438 | static void __init tx4939_stop_unused_modules(void) | 460 | static void __init tx4939_stop_unused_modules(void) |
439 | { | 461 | { |
440 | __u64 pcfg, rst = 0, ckd = 0; | 462 | __u64 pcfg, rst = 0, ckd = 0; |
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 98fbd9391bf8..656603b85b71 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c | |||
@@ -336,6 +336,7 @@ static void __init rbtx4939_device_init(void) | |||
336 | rbtx4939_led_setup(); | 336 | rbtx4939_led_setup(); |
337 | tx4939_wdt_init(); | 337 | tx4939_wdt_init(); |
338 | tx4939_ata_init(); | 338 | tx4939_ata_init(); |
339 | tx4939_rtc_init(); | ||
339 | } | 340 | } |
340 | 341 | ||
341 | static void __init rbtx4939_setup(void) | 342 | static void __init rbtx4939_setup(void) |
diff --git a/drivers/char/selection.c b/drivers/char/selection.c index f29fbe9b8ed7..cb8ca5698963 100644 --- a/drivers/char/selection.c +++ b/drivers/char/selection.c | |||
@@ -268,7 +268,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t | |||
268 | 268 | ||
269 | /* Allocate a new buffer before freeing the old one ... */ | 269 | /* Allocate a new buffer before freeing the old one ... */ |
270 | multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ | 270 | multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ |
271 | bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL); | 271 | bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); |
272 | if (!bp) { | 272 | if (!bp) { |
273 | printk(KERN_WARNING "selection: kmalloc() failed\n"); | 273 | printk(KERN_WARNING "selection: kmalloc() failed\n"); |
274 | clear_selection(); | 274 | clear_selection(); |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9da581452874..6915fb82d0b0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -136,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
136 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); | 136 | obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); |
137 | 137 | ||
138 | obj->dev = dev; | 138 | obj->dev = dev; |
139 | obj->filp = shmem_file_setup("drm mm object", size, 0); | 139 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
140 | if (IS_ERR(obj->filp)) { | 140 | if (IS_ERR(obj->filp)) { |
141 | kfree(obj); | 141 | kfree(obj); |
142 | return NULL; | 142 | return NULL; |
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index f5e2e7235fcb..13ca73f96ec6 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) | |||
699 | 699 | ||
700 | /* SGMII link check is done through the PCS register. */ | 700 | /* SGMII link check is done through the PCS register. */ |
701 | if ((hw->phy.media_type != e1000_media_type_copper) || | 701 | if ((hw->phy.media_type != e1000_media_type_copper) || |
702 | (igb_sgmii_active_82575(hw))) | 702 | (igb_sgmii_active_82575(hw))) { |
703 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, | 703 | ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, |
704 | &duplex); | 704 | &duplex); |
705 | else | 705 | /* |
706 | * Use this flag to determine if link needs to be checked or | ||
707 | * not. If we have link clear the flag so that we do not | ||
708 | * continue to check for link. | ||
709 | */ | ||
710 | hw->mac.get_link_status = !hw->mac.serdes_has_link; | ||
711 | } else { | ||
706 | ret_val = igb_check_for_copper_link(hw); | 712 | ret_val = igb_check_for_copper_link(hw); |
713 | } | ||
707 | 714 | ||
708 | return ret_val; | 715 | return ret_val; |
709 | } | 716 | } |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 5a27825cc48a..aebef8e48e76 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -300,11 +300,10 @@ struct igb_adapter { | |||
300 | 300 | ||
301 | #define IGB_FLAG_HAS_MSI (1 << 0) | 301 | #define IGB_FLAG_HAS_MSI (1 << 0) |
302 | #define IGB_FLAG_MSI_ENABLE (1 << 1) | 302 | #define IGB_FLAG_MSI_ENABLE (1 << 1) |
303 | #define IGB_FLAG_HAS_DCA (1 << 2) | 303 | #define IGB_FLAG_DCA_ENABLED (1 << 2) |
304 | #define IGB_FLAG_DCA_ENABLED (1 << 3) | 304 | #define IGB_FLAG_IN_NETPOLL (1 << 3) |
305 | #define IGB_FLAG_IN_NETPOLL (1 << 5) | 305 | #define IGB_FLAG_QUAD_PORT_A (1 << 4) |
306 | #define IGB_FLAG_QUAD_PORT_A (1 << 6) | 306 | #define IGB_FLAG_NEED_CTX_IDX (1 << 5) |
307 | #define IGB_FLAG_NEED_CTX_IDX (1 << 7) | ||
308 | 307 | ||
309 | enum e1000_state_t { | 308 | enum e1000_state_t { |
310 | __IGB_TESTING, | 309 | __IGB_TESTING, |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b82b0fb2056c..a50db5398fa5 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -206,10 +206,11 @@ static int __init igb_init_module(void) | |||
206 | 206 | ||
207 | global_quad_port_a = 0; | 207 | global_quad_port_a = 0; |
208 | 208 | ||
209 | ret = pci_register_driver(&igb_driver); | ||
210 | #ifdef CONFIG_IGB_DCA | 209 | #ifdef CONFIG_IGB_DCA |
211 | dca_register_notify(&dca_notifier); | 210 | dca_register_notify(&dca_notifier); |
212 | #endif | 211 | #endif |
212 | |||
213 | ret = pci_register_driver(&igb_driver); | ||
213 | return ret; | 214 | return ret; |
214 | } | 215 | } |
215 | 216 | ||
@@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1156 | 1157 | ||
1157 | /* set flags */ | 1158 | /* set flags */ |
1158 | switch (hw->mac.type) { | 1159 | switch (hw->mac.type) { |
1159 | case e1000_82576: | ||
1160 | case e1000_82575: | 1160 | case e1000_82575: |
1161 | adapter->flags |= IGB_FLAG_HAS_DCA; | ||
1162 | adapter->flags |= IGB_FLAG_NEED_CTX_IDX; | 1161 | adapter->flags |= IGB_FLAG_NEED_CTX_IDX; |
1163 | break; | 1162 | break; |
1163 | case e1000_82576: | ||
1164 | default: | 1164 | default: |
1165 | break; | 1165 | break; |
1166 | } | 1166 | } |
@@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1310 | goto err_register; | 1310 | goto err_register; |
1311 | 1311 | ||
1312 | #ifdef CONFIG_IGB_DCA | 1312 | #ifdef CONFIG_IGB_DCA |
1313 | if ((adapter->flags & IGB_FLAG_HAS_DCA) && | 1313 | if (dca_add_requester(&pdev->dev) == 0) { |
1314 | (dca_add_requester(&pdev->dev) == 0)) { | ||
1315 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | 1314 | adapter->flags |= IGB_FLAG_DCA_ENABLED; |
1316 | dev_info(&pdev->dev, "DCA enabled\n"); | 1315 | dev_info(&pdev->dev, "DCA enabled\n"); |
1317 | /* Always use CB2 mode, difference is masked | 1316 | /* Always use CB2 mode, difference is masked |
@@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
1835 | rctl |= E1000_RCTL_SECRC; | 1834 | rctl |= E1000_RCTL_SECRC; |
1836 | 1835 | ||
1837 | /* | 1836 | /* |
1838 | * disable store bad packets, long packet enable, and clear size bits. | 1837 | * disable store bad packets and clear size bits. |
1839 | */ | 1838 | */ |
1840 | rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256); | 1839 | rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); |
1841 | 1840 | ||
1842 | if (adapter->netdev->mtu > ETH_DATA_LEN) | 1841 | /* enable LPE when to prevent packets larger than max_frame_size */ |
1843 | rctl |= E1000_RCTL_LPE; | 1842 | rctl |= E1000_RCTL_LPE; |
1844 | 1843 | ||
1845 | /* Setup buffer sizes */ | 1844 | /* Setup buffer sizes */ |
@@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
1865 | */ | 1864 | */ |
1866 | /* allocations using alloc_page take too long for regular MTU | 1865 | /* allocations using alloc_page take too long for regular MTU |
1867 | * so only enable packet split for jumbo frames */ | 1866 | * so only enable packet split for jumbo frames */ |
1868 | if (rctl & E1000_RCTL_LPE) { | 1867 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
1869 | adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; | 1868 | adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; |
1870 | srrctl |= adapter->rx_ps_hdr_size << | 1869 | srrctl |= adapter->rx_ps_hdr_size << |
1871 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | 1870 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
@@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3473 | struct e1000_hw *hw = &adapter->hw; | 3472 | struct e1000_hw *hw = &adapter->hw; |
3474 | unsigned long event = *(unsigned long *)data; | 3473 | unsigned long event = *(unsigned long *)data; |
3475 | 3474 | ||
3476 | if (!(adapter->flags & IGB_FLAG_HAS_DCA)) | ||
3477 | goto out; | ||
3478 | |||
3479 | switch (event) { | 3475 | switch (event) { |
3480 | case DCA_PROVIDER_ADD: | 3476 | case DCA_PROVIDER_ADD: |
3481 | /* if already enabled, don't do it again */ | 3477 | /* if already enabled, don't do it again */ |
3482 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3478 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) |
3483 | break; | 3479 | break; |
3484 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | ||
3485 | /* Always use CB2 mode, difference is masked | 3480 | /* Always use CB2 mode, difference is masked |
3486 | * in the CB driver. */ | 3481 | * in the CB driver. */ |
3487 | wr32(E1000_DCA_CTRL, 2); | 3482 | wr32(E1000_DCA_CTRL, 2); |
3488 | if (dca_add_requester(dev) == 0) { | 3483 | if (dca_add_requester(dev) == 0) { |
3484 | adapter->flags |= IGB_FLAG_DCA_ENABLED; | ||
3489 | dev_info(&adapter->pdev->dev, "DCA enabled\n"); | 3485 | dev_info(&adapter->pdev->dev, "DCA enabled\n"); |
3490 | igb_setup_dca(adapter); | 3486 | igb_setup_dca(adapter); |
3491 | break; | 3487 | break; |
@@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3502 | } | 3498 | } |
3503 | break; | 3499 | break; |
3504 | } | 3500 | } |
3505 | out: | 3501 | |
3506 | return 0; | 3502 | return 0; |
3507 | } | 3503 | } |
3508 | 3504 | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7673fd92eaf5..ab0e09bf154d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx) | |||
676 | rc = efx->phy_op->init(efx); | 676 | rc = efx->phy_op->init(efx); |
677 | if (rc) | 677 | if (rc) |
678 | return rc; | 678 | return rc; |
679 | efx->phy_op->reconfigure(efx); | ||
680 | |||
681 | mutex_lock(&efx->mac_lock); | 679 | mutex_lock(&efx->mac_lock); |
680 | efx->phy_op->reconfigure(efx); | ||
682 | rc = falcon_switch_mac(efx); | 681 | rc = falcon_switch_mac(efx); |
683 | mutex_unlock(&efx->mac_lock); | 682 | mutex_unlock(&efx->mac_lock); |
684 | if (rc) | 683 | if (rc) |
@@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx) | |||
686 | efx->mac_op->reconfigure(efx); | 685 | efx->mac_op->reconfigure(efx); |
687 | 686 | ||
688 | efx->port_initialized = true; | 687 | efx->port_initialized = true; |
689 | efx->stats_enabled = true; | 688 | efx_stats_enable(efx); |
690 | return 0; | 689 | return 0; |
691 | 690 | ||
692 | fail: | 691 | fail: |
@@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx) | |||
735 | if (!efx->port_initialized) | 734 | if (!efx->port_initialized) |
736 | return; | 735 | return; |
737 | 736 | ||
737 | efx_stats_disable(efx); | ||
738 | efx->phy_op->fini(efx); | 738 | efx->phy_op->fini(efx); |
739 | efx->port_initialized = false; | 739 | efx->port_initialized = false; |
740 | 740 | ||
@@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1361 | return 0; | 1361 | return 0; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | void efx_stats_disable(struct efx_nic *efx) | ||
1365 | { | ||
1366 | spin_lock(&efx->stats_lock); | ||
1367 | ++efx->stats_disable_count; | ||
1368 | spin_unlock(&efx->stats_lock); | ||
1369 | } | ||
1370 | |||
1371 | void efx_stats_enable(struct efx_nic *efx) | ||
1372 | { | ||
1373 | spin_lock(&efx->stats_lock); | ||
1374 | --efx->stats_disable_count; | ||
1375 | spin_unlock(&efx->stats_lock); | ||
1376 | } | ||
1377 | |||
1364 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ | 1378 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1365 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1379 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1366 | { | 1380 | { |
@@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | |||
1369 | struct net_device_stats *stats = &net_dev->stats; | 1383 | struct net_device_stats *stats = &net_dev->stats; |
1370 | 1384 | ||
1371 | /* Update stats if possible, but do not wait if another thread | 1385 | /* Update stats if possible, but do not wait if another thread |
1372 | * is updating them (or resetting the NIC); slightly stale | 1386 | * is updating them or if MAC stats fetches are temporarily |
1373 | * stats are acceptable. | 1387 | * disabled; slightly stale stats are acceptable. |
1374 | */ | 1388 | */ |
1375 | if (!spin_trylock(&efx->stats_lock)) | 1389 | if (!spin_trylock(&efx->stats_lock)) |
1376 | return stats; | 1390 | return stats; |
1377 | if (efx->stats_enabled) { | 1391 | if (!efx->stats_disable_count) { |
1378 | efx->mac_op->update_stats(efx); | 1392 | efx->mac_op->update_stats(efx); |
1379 | falcon_update_nic_stats(efx); | 1393 | falcon_update_nic_stats(efx); |
1380 | } | 1394 | } |
@@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1622 | 1636 | ||
1623 | /* Tears down the entire software state and most of the hardware state | 1637 | /* Tears down the entire software state and most of the hardware state |
1624 | * before reset. */ | 1638 | * before reset. */ |
1625 | void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 1639 | void efx_reset_down(struct efx_nic *efx, enum reset_type method, |
1640 | struct ethtool_cmd *ecmd) | ||
1626 | { | 1641 | { |
1627 | EFX_ASSERT_RESET_SERIALISED(efx); | 1642 | EFX_ASSERT_RESET_SERIALISED(efx); |
1628 | 1643 | ||
1629 | /* The net_dev->get_stats handler is quite slow, and will fail | 1644 | efx_stats_disable(efx); |
1630 | * if a fetch is pending over reset. Serialise against it. */ | ||
1631 | spin_lock(&efx->stats_lock); | ||
1632 | efx->stats_enabled = false; | ||
1633 | spin_unlock(&efx->stats_lock); | ||
1634 | |||
1635 | efx_stop_all(efx); | 1645 | efx_stop_all(efx); |
1636 | mutex_lock(&efx->mac_lock); | 1646 | mutex_lock(&efx->mac_lock); |
1637 | mutex_lock(&efx->spi_lock); | 1647 | mutex_lock(&efx->spi_lock); |
@@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) | |||
1639 | efx->phy_op->get_settings(efx, ecmd); | 1649 | efx->phy_op->get_settings(efx, ecmd); |
1640 | 1650 | ||
1641 | efx_fini_channels(efx); | 1651 | efx_fini_channels(efx); |
1652 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | ||
1653 | efx->phy_op->fini(efx); | ||
1642 | } | 1654 | } |
1643 | 1655 | ||
1644 | /* This function will always ensure that the locks acquired in | 1656 | /* This function will always ensure that the locks acquired in |
@@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) | |||
1646 | * that we were unable to reinitialise the hardware, and the | 1658 | * that we were unable to reinitialise the hardware, and the |
1647 | * driver should be disabled. If ok is false, then the rx and tx | 1659 | * driver should be disabled. If ok is false, then the rx and tx |
1648 | * engines are not restarted, pending a RESET_DISABLE. */ | 1660 | * engines are not restarted, pending a RESET_DISABLE. */ |
1649 | int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) | 1661 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, |
1662 | struct ethtool_cmd *ecmd, bool ok) | ||
1650 | { | 1663 | { |
1651 | int rc; | 1664 | int rc; |
1652 | 1665 | ||
@@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) | |||
1658 | ok = false; | 1671 | ok = false; |
1659 | } | 1672 | } |
1660 | 1673 | ||
1674 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { | ||
1675 | if (ok) { | ||
1676 | rc = efx->phy_op->init(efx); | ||
1677 | if (rc) | ||
1678 | ok = false; | ||
1679 | } else | ||
1680 | efx->port_initialized = false; | ||
1681 | } | ||
1682 | |||
1661 | if (ok) { | 1683 | if (ok) { |
1662 | efx_init_channels(efx); | 1684 | efx_init_channels(efx); |
1663 | 1685 | ||
@@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) | |||
1670 | 1692 | ||
1671 | if (ok) { | 1693 | if (ok) { |
1672 | efx_start_all(efx); | 1694 | efx_start_all(efx); |
1673 | efx->stats_enabled = true; | 1695 | efx_stats_enable(efx); |
1674 | } | 1696 | } |
1675 | return rc; | 1697 | return rc; |
1676 | } | 1698 | } |
@@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx) | |||
1702 | 1724 | ||
1703 | EFX_INFO(efx, "resetting (%d)\n", method); | 1725 | EFX_INFO(efx, "resetting (%d)\n", method); |
1704 | 1726 | ||
1705 | efx_reset_down(efx, &ecmd); | 1727 | efx_reset_down(efx, method, &ecmd); |
1706 | 1728 | ||
1707 | rc = falcon_reset_hw(efx, method); | 1729 | rc = falcon_reset_hw(efx, method); |
1708 | if (rc) { | 1730 | if (rc) { |
@@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx) | |||
1721 | 1743 | ||
1722 | /* Leave device stopped if necessary */ | 1744 | /* Leave device stopped if necessary */ |
1723 | if (method == RESET_TYPE_DISABLE) { | 1745 | if (method == RESET_TYPE_DISABLE) { |
1724 | efx_reset_up(efx, &ecmd, false); | 1746 | efx_reset_up(efx, method, &ecmd, false); |
1725 | rc = -EIO; | 1747 | rc = -EIO; |
1726 | } else { | 1748 | } else { |
1727 | rc = efx_reset_up(efx, &ecmd, true); | 1749 | rc = efx_reset_up(efx, method, &ecmd, true); |
1728 | } | 1750 | } |
1729 | 1751 | ||
1730 | out_disable: | 1752 | out_disable: |
@@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1876 | efx->rx_checksum_enabled = true; | 1898 | efx->rx_checksum_enabled = true; |
1877 | spin_lock_init(&efx->netif_stop_lock); | 1899 | spin_lock_init(&efx->netif_stop_lock); |
1878 | spin_lock_init(&efx->stats_lock); | 1900 | spin_lock_init(&efx->stats_lock); |
1901 | efx->stats_disable_count = 1; | ||
1879 | mutex_init(&efx->mac_lock); | 1902 | mutex_init(&efx->mac_lock); |
1880 | efx->mac_op = &efx_dummy_mac_operations; | 1903 | efx->mac_op = &efx_dummy_mac_operations; |
1881 | efx->phy_op = &efx_dummy_phy_operations; | 1904 | efx->phy_op = &efx_dummy_phy_operations; |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 0dd7a532c78a..55d0f131b0e9 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h | |||
@@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel); | |||
36 | extern void efx_flush_queues(struct efx_nic *efx); | 36 | extern void efx_flush_queues(struct efx_nic *efx); |
37 | 37 | ||
38 | /* Ports */ | 38 | /* Ports */ |
39 | extern void efx_stats_disable(struct efx_nic *efx); | ||
40 | extern void efx_stats_enable(struct efx_nic *efx); | ||
39 | extern void efx_reconfigure_port(struct efx_nic *efx); | 41 | extern void efx_reconfigure_port(struct efx_nic *efx); |
40 | extern void __efx_reconfigure_port(struct efx_nic *efx); | 42 | extern void __efx_reconfigure_port(struct efx_nic *efx); |
41 | 43 | ||
42 | /* Reset handling */ | 44 | /* Reset handling */ |
43 | extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd); | 45 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, |
44 | extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, | 46 | struct ethtool_cmd *ecmd); |
45 | bool ok); | 47 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, |
48 | struct ethtool_cmd *ecmd, bool ok); | ||
46 | 49 | ||
47 | /* Global */ | 50 | /* Global */ |
48 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); | 51 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 53d259e90187..7b5924c039b3 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev, | |||
219 | struct efx_nic *efx = netdev_priv(net_dev); | 219 | struct efx_nic *efx = netdev_priv(net_dev); |
220 | int rc; | 220 | int rc; |
221 | 221 | ||
222 | if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg) | ||
223 | return -EINVAL; | ||
224 | |||
225 | /* Falcon GMAC does not support 1000Mbps HD */ | 222 | /* Falcon GMAC does not support 1000Mbps HD */ |
226 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { | 223 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { |
227 | EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" | 224 | EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 5b9f2d9cc4ed..d5378e60fcdd 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
824 | rx_ev_pause_frm ? " [PAUSE]" : ""); | 824 | rx_ev_pause_frm ? " [PAUSE]" : ""); |
825 | } | 825 | } |
826 | #endif | 826 | #endif |
827 | |||
828 | if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && | ||
829 | efx->phy_type == PHY_TYPE_SFX7101)) | ||
830 | tenxpress_crc_err(efx); | ||
831 | } | 827 | } |
832 | 828 | ||
833 | /* Handle receive events that are not in-order. */ | 829 | /* Handle receive events that are not in-order. */ |
@@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx) | |||
1887 | 1883 | ||
1888 | /* MAC stats will fail whilst the TX fifo is draining. Serialise | 1884 | /* MAC stats will fail whilst the TX fifo is draining. Serialise |
1889 | * the drain sequence with the statistics fetch */ | 1885 | * the drain sequence with the statistics fetch */ |
1890 | spin_lock(&efx->stats_lock); | 1886 | efx_stats_disable(efx); |
1891 | 1887 | ||
1892 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); | 1888 | falcon_read(efx, ®, MAC0_CTRL_REG_KER); |
1893 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); | 1889 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); |
@@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx) | |||
1917 | udelay(10); | 1913 | udelay(10); |
1918 | } | 1914 | } |
1919 | 1915 | ||
1920 | spin_unlock(&efx->stats_lock); | 1916 | efx_stats_enable(efx); |
1921 | 1917 | ||
1922 | /* If we've reset the EM block and the link is up, then | 1918 | /* If we've reset the EM block and the link is up, then |
1923 | * we'll have to kick the XAUI link so the PHY can recover */ | 1919 | * we'll have to kick the XAUI link so the PHY can recover */ |
@@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx) | |||
2277 | struct efx_mac_operations *old_mac_op = efx->mac_op; | 2273 | struct efx_mac_operations *old_mac_op = efx->mac_op; |
2278 | efx_oword_t nic_stat; | 2274 | efx_oword_t nic_stat; |
2279 | unsigned strap_val; | 2275 | unsigned strap_val; |
2276 | int rc = 0; | ||
2277 | |||
2278 | /* Don't try to fetch MAC stats while we're switching MACs */ | ||
2279 | efx_stats_disable(efx); | ||
2280 | 2280 | ||
2281 | /* Internal loopbacks override the phy speed setting */ | 2281 | /* Internal loopbacks override the phy speed setting */ |
2282 | if (efx->loopback_mode == LOOPBACK_GMAC) { | 2282 | if (efx->loopback_mode == LOOPBACK_GMAC) { |
@@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx) | |||
2287 | efx->link_fd = true; | 2287 | efx->link_fd = true; |
2288 | } | 2288 | } |
2289 | 2289 | ||
2290 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
2290 | efx->mac_op = (EFX_IS10G(efx) ? | 2291 | efx->mac_op = (EFX_IS10G(efx) ? |
2291 | &falcon_xmac_operations : &falcon_gmac_operations); | 2292 | &falcon_xmac_operations : &falcon_gmac_operations); |
2292 | if (old_mac_op == efx->mac_op) | ||
2293 | return 0; | ||
2294 | |||
2295 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
2296 | |||
2297 | /* Not all macs support a mac-level link state */ | ||
2298 | efx->mac_up = true; | ||
2299 | 2293 | ||
2294 | /* Always push the NIC_STAT_REG setting even if the mac hasn't | ||
2295 | * changed, because this function is run post online reset */ | ||
2300 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | 2296 | falcon_read(efx, &nic_stat, NIC_STAT_REG); |
2301 | strap_val = EFX_IS10G(efx) ? 5 : 3; | 2297 | strap_val = EFX_IS10G(efx) ? 5 : 3; |
2302 | if (falcon_rev(efx) >= FALCON_REV_B0) { | 2298 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
@@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx) | |||
2309 | BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); | 2305 | BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); |
2310 | } | 2306 | } |
2311 | 2307 | ||
2308 | if (old_mac_op == efx->mac_op) | ||
2309 | goto out; | ||
2312 | 2310 | ||
2313 | EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); | 2311 | EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); |
2314 | return falcon_reset_macs(efx); | 2312 | /* Not all macs support a mac-level link state */ |
2313 | efx->mac_up = true; | ||
2314 | |||
2315 | rc = falcon_reset_macs(efx); | ||
2316 | out: | ||
2317 | efx_stats_enable(efx); | ||
2318 | return rc; | ||
2315 | } | 2319 | } |
2316 | 2320 | ||
2317 | /* This call is responsible for hooking in the MAC and PHY operations */ | 2321 | /* This call is responsible for hooking in the MAC and PHY operations */ |
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index f6a16428113d..f9e2f95c3b48 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include "net_driver.h" | 15 | #include "net_driver.h" |
16 | #include "mdio_10g.h" | 16 | #include "mdio_10g.h" |
17 | #include "boards.h" | 17 | #include "boards.h" |
18 | #include "workarounds.h" | ||
18 | 19 | ||
19 | int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, | 20 | int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, |
20 | int spins, int spintime) | 21 | int spins, int spintime) |
@@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
179 | return false; | 180 | return false; |
180 | else if (efx_phy_mode_disabled(efx->phy_mode)) | 181 | else if (efx_phy_mode_disabled(efx->phy_mode)) |
181 | return false; | 182 | return false; |
182 | else if (efx->loopback_mode == LOOPBACK_PHYXS) { | 183 | else if (efx->loopback_mode == LOOPBACK_PHYXS) |
183 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | | 184 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | |
184 | MDIO_MMDREG_DEVS_PCS | | 185 | MDIO_MMDREG_DEVS_PCS | |
185 | MDIO_MMDREG_DEVS_PMAPMD | | 186 | MDIO_MMDREG_DEVS_PMAPMD | |
186 | MDIO_MMDREG_DEVS_AN); | 187 | MDIO_MMDREG_DEVS_AN); |
187 | if (!mmd_mask) { | 188 | else if (efx->loopback_mode == LOOPBACK_PCS) |
188 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
189 | MDIO_PHYXS_STATUS2); | ||
190 | return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); | ||
191 | } | ||
192 | } else if (efx->loopback_mode == LOOPBACK_PCS) | ||
193 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | | 189 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | |
194 | MDIO_MMDREG_DEVS_PMAPMD | | 190 | MDIO_MMDREG_DEVS_PMAPMD | |
195 | MDIO_MMDREG_DEVS_AN); | 191 | MDIO_MMDREG_DEVS_AN); |
@@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
197 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | | 193 | mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | |
198 | MDIO_MMDREG_DEVS_AN); | 194 | MDIO_MMDREG_DEVS_AN); |
199 | 195 | ||
196 | if (!mmd_mask) { | ||
197 | /* Use presence of XGMII faults in leui of link state */ | ||
198 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
199 | MDIO_PHYXS_STATUS2); | ||
200 | return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); | ||
201 | } | ||
202 | |||
200 | while (mmd_mask) { | 203 | while (mmd_mask) { |
201 | if (mmd_mask & 1) { | 204 | if (mmd_mask & 1) { |
202 | /* Double reads because link state is latched, and a | 205 | /* Double reads because link state is latched, and a |
@@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx, | |||
263 | } | 266 | } |
264 | } | 267 | } |
265 | 268 | ||
266 | static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) | 269 | static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr) |
267 | { | 270 | { |
268 | int phy_id = efx->mii.phy_id; | 271 | int phy_id = efx->mii.phy_id; |
269 | u32 result = 0; | 272 | u32 result = 0; |
@@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) | |||
278 | result |= ADVERTISED_100baseT_Half; | 281 | result |= ADVERTISED_100baseT_Half; |
279 | if (reg & ADVERTISE_100FULL) | 282 | if (reg & ADVERTISE_100FULL) |
280 | result |= ADVERTISED_100baseT_Full; | 283 | result |= ADVERTISED_100baseT_Full; |
281 | if (reg & LPA_RESV) | ||
282 | result |= xnp; | ||
283 | |||
284 | return result; | 284 | return result; |
285 | } | 285 | } |
286 | 286 | ||
@@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx, | |||
310 | */ | 310 | */ |
311 | void mdio_clause45_get_settings_ext(struct efx_nic *efx, | 311 | void mdio_clause45_get_settings_ext(struct efx_nic *efx, |
312 | struct ethtool_cmd *ecmd, | 312 | struct ethtool_cmd *ecmd, |
313 | u32 xnp, u32 xnp_lpa) | 313 | u32 npage_adv, u32 npage_lpa) |
314 | { | 314 | { |
315 | int phy_id = efx->mii.phy_id; | 315 | int phy_id = efx->mii.phy_id; |
316 | int reg; | 316 | int reg; |
@@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, | |||
361 | ecmd->autoneg = AUTONEG_ENABLE; | 361 | ecmd->autoneg = AUTONEG_ENABLE; |
362 | ecmd->advertising |= | 362 | ecmd->advertising |= |
363 | ADVERTISED_Autoneg | | 363 | ADVERTISED_Autoneg | |
364 | mdio_clause45_get_an(efx, | 364 | mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) | |
365 | MDIO_AN_ADVERTISE, xnp); | 365 | npage_adv; |
366 | } else | 366 | } else |
367 | ecmd->autoneg = AUTONEG_DISABLE; | 367 | ecmd->autoneg = AUTONEG_DISABLE; |
368 | } else | 368 | } else |
@@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, | |||
371 | if (ecmd->autoneg) { | 371 | if (ecmd->autoneg) { |
372 | /* If AN is complete, report best common mode, | 372 | /* If AN is complete, report best common mode, |
373 | * otherwise report best advertised mode. */ | 373 | * otherwise report best advertised mode. */ |
374 | u32 common = ecmd->advertising; | 374 | u32 modes = 0; |
375 | if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, | 375 | if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, |
376 | MDIO_MMDREG_STAT1) & | 376 | MDIO_MMDREG_STAT1) & |
377 | (1 << MDIO_AN_STATUS_AN_DONE_LBN)) { | 377 | (1 << MDIO_AN_STATUS_AN_DONE_LBN)) |
378 | common &= mdio_clause45_get_an(efx, MDIO_AN_LPA, | 378 | modes = (ecmd->advertising & |
379 | xnp_lpa); | 379 | (mdio_clause45_get_an(efx, MDIO_AN_LPA) | |
380 | } | 380 | npage_lpa)); |
381 | if (common & ADVERTISED_10000baseT_Full) { | 381 | if (modes == 0) |
382 | modes = ecmd->advertising; | ||
383 | |||
384 | if (modes & ADVERTISED_10000baseT_Full) { | ||
382 | ecmd->speed = SPEED_10000; | 385 | ecmd->speed = SPEED_10000; |
383 | ecmd->duplex = DUPLEX_FULL; | 386 | ecmd->duplex = DUPLEX_FULL; |
384 | } else if (common & (ADVERTISED_1000baseT_Full | | 387 | } else if (modes & (ADVERTISED_1000baseT_Full | |
385 | ADVERTISED_1000baseT_Half)) { | 388 | ADVERTISED_1000baseT_Half)) { |
386 | ecmd->speed = SPEED_1000; | 389 | ecmd->speed = SPEED_1000; |
387 | ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full); | 390 | ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full); |
388 | } else if (common & (ADVERTISED_100baseT_Full | | 391 | } else if (modes & (ADVERTISED_100baseT_Full | |
389 | ADVERTISED_100baseT_Half)) { | 392 | ADVERTISED_100baseT_Half)) { |
390 | ecmd->speed = SPEED_100; | 393 | ecmd->speed = SPEED_100; |
391 | ecmd->duplex = !!(common & ADVERTISED_100baseT_Full); | 394 | ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); |
392 | } else { | 395 | } else { |
393 | ecmd->speed = SPEED_10; | 396 | ecmd->speed = SPEED_10; |
394 | ecmd->duplex = !!(common & ADVERTISED_10baseT_Full); | 397 | ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); |
395 | } | 398 | } |
396 | } else { | 399 | } else { |
397 | /* Report forced settings */ | 400 | /* Report forced settings */ |
@@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx, | |||
415 | int phy_id = efx->mii.phy_id; | 418 | int phy_id = efx->mii.phy_id; |
416 | struct ethtool_cmd prev; | 419 | struct ethtool_cmd prev; |
417 | u32 required; | 420 | u32 required; |
418 | int ctrl1_bits, reg; | 421 | int reg; |
419 | 422 | ||
420 | efx->phy_op->get_settings(efx, &prev); | 423 | efx->phy_op->get_settings(efx, &prev); |
421 | 424 | ||
@@ -430,99 +433,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx, | |||
430 | if (prev.port != PORT_TP || ecmd->port != PORT_TP) | 433 | if (prev.port != PORT_TP || ecmd->port != PORT_TP) |
431 | return -EINVAL; | 434 | return -EINVAL; |
432 | 435 | ||
433 | /* Check that PHY supports these settings and work out the | 436 | /* Check that PHY supports these settings */ |
434 | * basic control bits */ | 437 | if (ecmd->autoneg) { |
435 | if (ecmd->duplex) { | 438 | required = SUPPORTED_Autoneg; |
439 | } else if (ecmd->duplex) { | ||
436 | switch (ecmd->speed) { | 440 | switch (ecmd->speed) { |
437 | case SPEED_10: | 441 | case SPEED_10: required = SUPPORTED_10baseT_Full; break; |
438 | ctrl1_bits = BMCR_FULLDPLX; | 442 | case SPEED_100: required = SUPPORTED_100baseT_Full; break; |
439 | required = SUPPORTED_10baseT_Full; | 443 | default: return -EINVAL; |
440 | break; | ||
441 | case SPEED_100: | ||
442 | ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX; | ||
443 | required = SUPPORTED_100baseT_Full; | ||
444 | break; | ||
445 | case SPEED_1000: | ||
446 | ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX; | ||
447 | required = SUPPORTED_1000baseT_Full; | ||
448 | break; | ||
449 | case SPEED_10000: | ||
450 | ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 | | ||
451 | BMCR_FULLDPLX); | ||
452 | required = SUPPORTED_10000baseT_Full; | ||
453 | break; | ||
454 | default: | ||
455 | return -EINVAL; | ||
456 | } | 444 | } |
457 | } else { | 445 | } else { |
458 | switch (ecmd->speed) { | 446 | switch (ecmd->speed) { |
459 | case SPEED_10: | 447 | case SPEED_10: required = SUPPORTED_10baseT_Half; break; |
460 | ctrl1_bits = 0; | 448 | case SPEED_100: required = SUPPORTED_100baseT_Half; break; |
461 | required = SUPPORTED_10baseT_Half; | 449 | default: return -EINVAL; |
462 | break; | ||
463 | case SPEED_100: | ||
464 | ctrl1_bits = BMCR_SPEED100; | ||
465 | required = SUPPORTED_100baseT_Half; | ||
466 | break; | ||
467 | case SPEED_1000: | ||
468 | ctrl1_bits = BMCR_SPEED1000; | ||
469 | required = SUPPORTED_1000baseT_Half; | ||
470 | break; | ||
471 | default: | ||
472 | return -EINVAL; | ||
473 | } | 450 | } |
474 | } | 451 | } |
475 | if (ecmd->autoneg) | ||
476 | required |= SUPPORTED_Autoneg; | ||
477 | required |= ecmd->advertising; | 452 | required |= ecmd->advertising; |
478 | if (required & ~prev.supported) | 453 | if (required & ~prev.supported) |
479 | return -EINVAL; | 454 | return -EINVAL; |
480 | 455 | ||
481 | /* Set the basic control bits */ | 456 | if (ecmd->autoneg) { |
482 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | 457 | bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full |
483 | MDIO_MMDREG_CTRL1); | 458 | || EFX_WORKAROUND_13204(efx)); |
484 | reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c); | 459 | |
485 | reg |= ctrl1_bits; | 460 | /* Set up the base page */ |
486 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1, | 461 | reg = ADVERTISE_CSMA; |
487 | reg); | 462 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
488 | 463 | reg |= ADVERTISE_10HALF; | |
489 | /* Set the AN registers */ | 464 | if (ecmd->advertising & ADVERTISED_10baseT_Full) |
490 | if (ecmd->autoneg != prev.autoneg || | 465 | reg |= ADVERTISE_10FULL; |
491 | ecmd->advertising != prev.advertising) { | 466 | if (ecmd->advertising & ADVERTISED_100baseT_Half) |
492 | bool xnp = false; | 467 | reg |= ADVERTISE_100HALF; |
493 | 468 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | |
494 | if (efx->phy_op->set_xnp_advertise) | 469 | reg |= ADVERTISE_100FULL; |
495 | xnp = efx->phy_op->set_xnp_advertise(efx, | 470 | if (xnp) |
496 | ecmd->advertising); | 471 | reg |= ADVERTISE_RESV; |
497 | 472 | else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | | |
498 | if (ecmd->autoneg) { | 473 | ADVERTISED_1000baseT_Full)) |
499 | reg = 0; | 474 | reg |= ADVERTISE_NPAGE; |
500 | if (ecmd->advertising & ADVERTISED_10baseT_Half) | 475 | reg |= efx_fc_advertise(efx->wanted_fc); |
501 | reg |= ADVERTISE_10HALF; | 476 | mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, |
502 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | 477 | MDIO_AN_ADVERTISE, reg); |
503 | reg |= ADVERTISE_10FULL; | 478 | |
504 | if (ecmd->advertising & ADVERTISED_100baseT_Half) | 479 | /* Set up the (extended) next page if necessary */ |
505 | reg |= ADVERTISE_100HALF; | 480 | if (efx->phy_op->set_npage_adv) |
506 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | 481 | efx->phy_op->set_npage_adv(efx, ecmd->advertising); |
507 | reg |= ADVERTISE_100FULL; | ||
508 | if (xnp) | ||
509 | reg |= ADVERTISE_RESV; | ||
510 | mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, | ||
511 | MDIO_AN_ADVERTISE, reg); | ||
512 | } | ||
513 | 482 | ||
483 | /* Enable and restart AN */ | ||
514 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, | 484 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, |
515 | MDIO_MMDREG_CTRL1); | 485 | MDIO_MMDREG_CTRL1); |
516 | if (ecmd->autoneg) | 486 | reg |= BMCR_ANENABLE; |
517 | reg |= BMCR_ANENABLE | BMCR_ANRESTART; | 487 | if (!(EFX_WORKAROUND_15195(efx) && |
518 | else | 488 | LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) |
519 | reg &= ~BMCR_ANENABLE; | 489 | reg |= BMCR_ANRESTART; |
520 | if (xnp) | 490 | if (xnp) |
521 | reg |= 1 << MDIO_AN_CTRL_XNP_LBN; | 491 | reg |= 1 << MDIO_AN_CTRL_XNP_LBN; |
522 | else | 492 | else |
523 | reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); | 493 | reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); |
524 | mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, | 494 | mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, |
525 | MDIO_MMDREG_CTRL1, reg); | 495 | MDIO_MMDREG_CTRL1, reg); |
496 | } else { | ||
497 | /* Disable AN */ | ||
498 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, | ||
499 | MDIO_MMDREG_CTRL1, | ||
500 | __ffs(BMCR_ANENABLE), false); | ||
501 | |||
502 | /* Set the basic control bits */ | ||
503 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
504 | MDIO_MMDREG_CTRL1); | ||
505 | reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | | ||
506 | 0x003c); | ||
507 | if (ecmd->speed == SPEED_100) | ||
508 | reg |= BMCR_SPEED100; | ||
509 | if (ecmd->duplex) | ||
510 | reg |= BMCR_FULLDPLX; | ||
511 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
512 | MDIO_MMDREG_CTRL1, reg); | ||
526 | } | 513 | } |
527 | 514 | ||
528 | return 0; | 515 | return 0; |
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 09bf801d0569..8ba49773ce7e 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h | |||
@@ -155,7 +155,8 @@ | |||
155 | #define MDIO_AN_XNP 22 | 155 | #define MDIO_AN_XNP 22 |
156 | #define MDIO_AN_LPA_XNP 25 | 156 | #define MDIO_AN_LPA_XNP 25 |
157 | 157 | ||
158 | #define MDIO_AN_10GBT_ADVERTISE 32 | 158 | #define MDIO_AN_10GBT_CTRL 32 |
159 | #define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12 | ||
159 | #define MDIO_AN_10GBT_STATUS (33) | 160 | #define MDIO_AN_10GBT_STATUS (33) |
160 | #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ | 161 | #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ |
161 | #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ | 162 | #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 5f255f75754e..e019ad1fb9a0 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -566,7 +566,7 @@ struct efx_mac_operations { | |||
566 | * @poll: Poll for hardware state. Serialised by the mac_lock. | 566 | * @poll: Poll for hardware state. Serialised by the mac_lock. |
567 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. | 567 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. |
568 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. | 568 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. |
569 | * @set_xnp_advertise: Set abilities advertised in Extended Next Page | 569 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page |
570 | * (only needed where AN bit is set in mmds) | 570 | * (only needed where AN bit is set in mmds) |
571 | * @num_tests: Number of PHY-specific tests/results | 571 | * @num_tests: Number of PHY-specific tests/results |
572 | * @test_names: Names of the tests/results | 572 | * @test_names: Names of the tests/results |
@@ -586,7 +586,7 @@ struct efx_phy_operations { | |||
586 | struct ethtool_cmd *ecmd); | 586 | struct ethtool_cmd *ecmd); |
587 | int (*set_settings) (struct efx_nic *efx, | 587 | int (*set_settings) (struct efx_nic *efx, |
588 | struct ethtool_cmd *ecmd); | 588 | struct ethtool_cmd *ecmd); |
589 | bool (*set_xnp_advertise) (struct efx_nic *efx, u32); | 589 | void (*set_npage_adv) (struct efx_nic *efx, u32); |
590 | u32 num_tests; | 590 | u32 num_tests; |
591 | const char *const *test_names; | 591 | const char *const *test_names; |
592 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); | 592 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); |
@@ -754,8 +754,7 @@ union efx_multicast_hash { | |||
754 | * &struct net_device_stats. | 754 | * &struct net_device_stats. |
755 | * @stats_buffer: DMA buffer for statistics | 755 | * @stats_buffer: DMA buffer for statistics |
756 | * @stats_lock: Statistics update lock. Serialises statistics fetches | 756 | * @stats_lock: Statistics update lock. Serialises statistics fetches |
757 | * @stats_enabled: Temporarily disable statistics fetches. | 757 | * @stats_disable_count: Nest count for disabling statistics fetches |
758 | * Serialised by @stats_lock | ||
759 | * @mac_op: MAC interface | 758 | * @mac_op: MAC interface |
760 | * @mac_address: Permanent MAC address | 759 | * @mac_address: Permanent MAC address |
761 | * @phy_type: PHY type | 760 | * @phy_type: PHY type |
@@ -837,7 +836,7 @@ struct efx_nic { | |||
837 | struct efx_mac_stats mac_stats; | 836 | struct efx_mac_stats mac_stats; |
838 | struct efx_buffer stats_buffer; | 837 | struct efx_buffer stats_buffer; |
839 | spinlock_t stats_lock; | 838 | spinlock_t stats_lock; |
840 | bool stats_enabled; | 839 | unsigned int stats_disable_count; |
841 | 840 | ||
842 | struct efx_mac_operations *mac_op; | 841 | struct efx_mac_operations *mac_op; |
843 | unsigned char mac_address[ETH_ALEN]; | 842 | unsigned char mac_address[ETH_ALEN]; |
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h index 58c493ef81bb..07e855c148bc 100644 --- a/drivers/net/sfc/phy.h +++ b/drivers/net/sfc/phy.h | |||
@@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops; | |||
17 | extern struct efx_phy_operations falcon_sft9001_phy_ops; | 17 | extern struct efx_phy_operations falcon_sft9001_phy_ops; |
18 | 18 | ||
19 | extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); | 19 | extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); |
20 | extern void tenxpress_crc_err(struct efx_nic *efx); | ||
21 | 20 | ||
22 | /**************************************************************************** | 21 | /**************************************************************************** |
23 | * Exported functions from the driver for XFP optical PHYs | 22 | * Exported functions from the driver for XFP optical PHYs |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index dba0d64d50cd..0a598084c513 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
665 | { | 665 | { |
666 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | 666 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; |
667 | int phy_mode = efx->phy_mode; | 667 | int phy_mode = efx->phy_mode; |
668 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | ||
668 | struct ethtool_cmd ecmd; | 669 | struct ethtool_cmd ecmd; |
669 | struct efx_channel *channel; | 670 | struct efx_channel *channel; |
670 | int rc_test = 0, rc_reset = 0, rc; | 671 | int rc_test = 0, rc_reset = 0, rc; |
@@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
718 | mutex_unlock(&efx->mac_lock); | 719 | mutex_unlock(&efx->mac_lock); |
719 | 720 | ||
720 | /* free up all consumers of SRAM (including all the queues) */ | 721 | /* free up all consumers of SRAM (including all the queues) */ |
721 | efx_reset_down(efx, &ecmd); | 722 | efx_reset_down(efx, reset_method, &ecmd); |
722 | 723 | ||
723 | rc = efx_test_chip(efx, tests); | 724 | rc = efx_test_chip(efx, tests); |
724 | if (rc && !rc_test) | 725 | if (rc && !rc_test) |
725 | rc_test = rc; | 726 | rc_test = rc; |
726 | 727 | ||
727 | /* reset the chip to recover from the register test */ | 728 | /* reset the chip to recover from the register test */ |
728 | rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL); | 729 | rc_reset = falcon_reset_hw(efx, reset_method); |
729 | 730 | ||
730 | /* Ensure that the phy is powered and out of loopback | 731 | /* Ensure that the phy is powered and out of loopback |
731 | * for the bist and loopback tests */ | 732 | * for the bist and loopback tests */ |
732 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | 733 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; |
733 | efx->loopback_mode = LOOPBACK_NONE; | 734 | efx->loopback_mode = LOOPBACK_NONE; |
734 | 735 | ||
735 | rc = efx_reset_up(efx, &ecmd, rc_reset == 0); | 736 | rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0); |
736 | if (rc && !rc_reset) | 737 | if (rc && !rc_reset) |
737 | rc_reset = rc; | 738 | rc_reset = rc; |
738 | 739 | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 16b80acb9992..cb25ae5b257a 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
@@ -186,19 +186,22 @@ static int sfn4111t_reset(struct efx_nic *efx) | |||
186 | { | 186 | { |
187 | efx_oword_t reg; | 187 | efx_oword_t reg; |
188 | 188 | ||
189 | /* GPIO pins are also used for I2C, so block that temporarily */ | 189 | /* GPIO 3 and the GPIO register are shared with I2C, so block that */ |
190 | mutex_lock(&efx->i2c_adap.bus_lock); | 190 | mutex_lock(&efx->i2c_adap.bus_lock); |
191 | 191 | ||
192 | /* Pull RST_N (GPIO 2) low then let it up again, setting the | ||
193 | * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the | ||
194 | * output enables; the output levels should always be 0 (low) | ||
195 | * and we rely on external pull-ups. */ | ||
192 | falcon_read(efx, ®, GPIO_CTL_REG_KER); | 196 | falcon_read(efx, ®, GPIO_CTL_REG_KER); |
193 | EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); | 197 | EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); |
194 | EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false); | ||
195 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 198 | falcon_write(efx, ®, GPIO_CTL_REG_KER); |
196 | msleep(1000); | 199 | msleep(1000); |
197 | EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true); | 200 | EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); |
198 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true); | 201 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, |
199 | EFX_SET_OWORD_FIELD(reg, GPIO3_OUT, | 202 | !!(efx->phy_mode & PHY_MODE_SPECIAL)); |
200 | !(efx->phy_mode & PHY_MODE_SPECIAL)); | ||
201 | falcon_write(efx, ®, GPIO_CTL_REG_KER); | 203 | falcon_write(efx, ®, GPIO_CTL_REG_KER); |
204 | msleep(1); | ||
202 | 205 | ||
203 | mutex_unlock(&efx->i2c_adap.bus_lock); | 206 | mutex_unlock(&efx->i2c_adap.bus_lock); |
204 | 207 | ||
@@ -232,12 +235,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev, | |||
232 | } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { | 235 | } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { |
233 | err = -EBUSY; | 236 | err = -EBUSY; |
234 | } else { | 237 | } else { |
238 | /* Reset the PHY, reconfigure the MAC and enable/disable | ||
239 | * MAC stats accordingly. */ | ||
235 | efx->phy_mode = new_mode; | 240 | efx->phy_mode = new_mode; |
241 | if (new_mode & PHY_MODE_SPECIAL) | ||
242 | efx_stats_disable(efx); | ||
236 | if (efx->board_info.type == EFX_BOARD_SFE4001) | 243 | if (efx->board_info.type == EFX_BOARD_SFE4001) |
237 | err = sfe4001_poweron(efx); | 244 | err = sfe4001_poweron(efx); |
238 | else | 245 | else |
239 | err = sfn4111t_reset(efx); | 246 | err = sfn4111t_reset(efx); |
240 | efx_reconfigure_port(efx); | 247 | efx_reconfigure_port(efx); |
248 | if (!(new_mode & PHY_MODE_SPECIAL)) | ||
249 | efx_stats_enable(efx); | ||
241 | } | 250 | } |
242 | rtnl_unlock(); | 251 | rtnl_unlock(); |
243 | 252 | ||
@@ -326,6 +335,11 @@ int sfe4001_init(struct efx_nic *efx) | |||
326 | efx->board_info.monitor = sfe4001_check_hw; | 335 | efx->board_info.monitor = sfe4001_check_hw; |
327 | efx->board_info.fini = sfe4001_fini; | 336 | efx->board_info.fini = sfe4001_fini; |
328 | 337 | ||
338 | if (efx->phy_mode & PHY_MODE_SPECIAL) { | ||
339 | /* PHY won't generate a 156.25 MHz clock and MAC stats fetch | ||
340 | * will fail. */ | ||
341 | efx_stats_disable(efx); | ||
342 | } | ||
329 | rc = sfe4001_poweron(efx); | 343 | rc = sfe4001_poweron(efx); |
330 | if (rc) | 344 | if (rc) |
331 | goto fail_ioexp; | 345 | goto fail_ioexp; |
@@ -372,17 +386,25 @@ static void sfn4111t_fini(struct efx_nic *efx) | |||
372 | i2c_unregister_device(efx->board_info.hwmon_client); | 386 | i2c_unregister_device(efx->board_info.hwmon_client); |
373 | } | 387 | } |
374 | 388 | ||
375 | static struct i2c_board_info sfn4111t_hwmon_info = { | 389 | static struct i2c_board_info sfn4111t_a0_hwmon_info = { |
376 | I2C_BOARD_INFO("max6647", 0x4e), | 390 | I2C_BOARD_INFO("max6647", 0x4e), |
377 | .irq = -1, | 391 | .irq = -1, |
378 | }; | 392 | }; |
379 | 393 | ||
394 | static struct i2c_board_info sfn4111t_r5_hwmon_info = { | ||
395 | I2C_BOARD_INFO("max6646", 0x4d), | ||
396 | .irq = -1, | ||
397 | }; | ||
398 | |||
380 | int sfn4111t_init(struct efx_nic *efx) | 399 | int sfn4111t_init(struct efx_nic *efx) |
381 | { | 400 | { |
382 | int rc; | 401 | int rc; |
383 | 402 | ||
384 | efx->board_info.hwmon_client = | 403 | efx->board_info.hwmon_client = |
385 | i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info); | 404 | i2c_new_device(&efx->i2c_adap, |
405 | (efx->board_info.minor < 5) ? | ||
406 | &sfn4111t_a0_hwmon_info : | ||
407 | &sfn4111t_r5_hwmon_info); | ||
386 | if (!efx->board_info.hwmon_client) | 408 | if (!efx->board_info.hwmon_client) |
387 | return -EIO; | 409 | return -EIO; |
388 | 410 | ||
@@ -394,8 +416,10 @@ int sfn4111t_init(struct efx_nic *efx) | |||
394 | if (rc) | 416 | if (rc) |
395 | goto fail_hwmon; | 417 | goto fail_hwmon; |
396 | 418 | ||
397 | if (efx->phy_mode & PHY_MODE_SPECIAL) | 419 | if (efx->phy_mode & PHY_MODE_SPECIAL) { |
420 | efx_stats_disable(efx); | ||
398 | sfn4111t_reset(efx); | 421 | sfn4111t_reset(efx); |
422 | } | ||
399 | 423 | ||
400 | return 0; | 424 | return 0; |
401 | 425 | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index 9ecb77da9545..f0efd246962c 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -67,6 +67,8 @@ | |||
67 | #define PMA_PMD_EXT_CLK312_WIDTH 1 | 67 | #define PMA_PMD_EXT_CLK312_WIDTH 1 |
68 | #define PMA_PMD_EXT_LPOWER_LBN 12 | 68 | #define PMA_PMD_EXT_LPOWER_LBN 12 |
69 | #define PMA_PMD_EXT_LPOWER_WIDTH 1 | 69 | #define PMA_PMD_EXT_LPOWER_WIDTH 1 |
70 | #define PMA_PMD_EXT_ROBUST_LBN 14 | ||
71 | #define PMA_PMD_EXT_ROBUST_WIDTH 1 | ||
70 | #define PMA_PMD_EXT_SSR_LBN 15 | 72 | #define PMA_PMD_EXT_SSR_LBN 15 |
71 | #define PMA_PMD_EXT_SSR_WIDTH 1 | 73 | #define PMA_PMD_EXT_SSR_WIDTH 1 |
72 | 74 | ||
@@ -177,35 +179,24 @@ | |||
177 | #define C22EXT_STATUS_LINK_LBN 2 | 179 | #define C22EXT_STATUS_LINK_LBN 2 |
178 | #define C22EXT_STATUS_LINK_WIDTH 1 | 180 | #define C22EXT_STATUS_LINK_WIDTH 1 |
179 | 181 | ||
180 | #define C22EXT_MSTSLV_REG 49162 | 182 | #define C22EXT_MSTSLV_CTRL 49161 |
181 | #define C22EXT_MSTSLV_1000_HD_LBN 10 | 183 | #define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8 |
182 | #define C22EXT_MSTSLV_1000_HD_WIDTH 1 | 184 | #define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9 |
183 | #define C22EXT_MSTSLV_1000_FD_LBN 11 | 185 | |
184 | #define C22EXT_MSTSLV_1000_FD_WIDTH 1 | 186 | #define C22EXT_MSTSLV_STATUS 49162 |
187 | #define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10 | ||
188 | #define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11 | ||
185 | 189 | ||
186 | /* Time to wait between powering down the LNPGA and turning off the power | 190 | /* Time to wait between powering down the LNPGA and turning off the power |
187 | * rails */ | 191 | * rails */ |
188 | #define LNPGA_PDOWN_WAIT (HZ / 5) | 192 | #define LNPGA_PDOWN_WAIT (HZ / 5) |
189 | 193 | ||
190 | static int crc_error_reset_threshold = 100; | ||
191 | module_param(crc_error_reset_threshold, int, 0644); | ||
192 | MODULE_PARM_DESC(crc_error_reset_threshold, | ||
193 | "Max number of CRC errors before XAUI reset"); | ||
194 | |||
195 | struct tenxpress_phy_data { | 194 | struct tenxpress_phy_data { |
196 | enum efx_loopback_mode loopback_mode; | 195 | enum efx_loopback_mode loopback_mode; |
197 | atomic_t bad_crc_count; | ||
198 | enum efx_phy_mode phy_mode; | 196 | enum efx_phy_mode phy_mode; |
199 | int bad_lp_tries; | 197 | int bad_lp_tries; |
200 | }; | 198 | }; |
201 | 199 | ||
202 | void tenxpress_crc_err(struct efx_nic *efx) | ||
203 | { | ||
204 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
205 | if (phy_data != NULL) | ||
206 | atomic_inc(&phy_data->bad_crc_count); | ||
207 | } | ||
208 | |||
209 | static ssize_t show_phy_short_reach(struct device *dev, | 200 | static ssize_t show_phy_short_reach(struct device *dev, |
210 | struct device_attribute *attr, char *buf) | 201 | struct device_attribute *attr, char *buf) |
211 | { | 202 | { |
@@ -284,7 +275,9 @@ static int tenxpress_init(struct efx_nic *efx) | |||
284 | PMA_PMD_XCONTROL_REG); | 275 | PMA_PMD_XCONTROL_REG); |
285 | reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | | 276 | reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | |
286 | (1 << PMA_PMD_EXT_CLK_OUT_LBN) | | 277 | (1 << PMA_PMD_EXT_CLK_OUT_LBN) | |
287 | (1 << PMA_PMD_EXT_CLK312_LBN)); | 278 | (1 << PMA_PMD_EXT_CLK312_LBN) | |
279 | (1 << PMA_PMD_EXT_ROBUST_LBN)); | ||
280 | |||
288 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | 281 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, |
289 | PMA_PMD_XCONTROL_REG, reg); | 282 | PMA_PMD_XCONTROL_REG, reg); |
290 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, | 283 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, |
@@ -346,6 +339,7 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
346 | rc = tenxpress_init(efx); | 339 | rc = tenxpress_init(efx); |
347 | if (rc < 0) | 340 | if (rc < 0) |
348 | goto fail; | 341 | goto fail; |
342 | mdio_clause45_set_pause(efx); | ||
349 | 343 | ||
350 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | 344 | if (efx->phy_type == PHY_TYPE_SFT9001B) { |
351 | rc = device_create_file(&efx->pci_dev->dev, | 345 | rc = device_create_file(&efx->pci_dev->dev, |
@@ -376,8 +370,8 @@ static int tenxpress_special_reset(struct efx_nic *efx) | |||
376 | 370 | ||
377 | /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so | 371 | /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so |
378 | * a special software reset can glitch the XGMAC sufficiently for stats | 372 | * a special software reset can glitch the XGMAC sufficiently for stats |
379 | * requests to fail. Since we don't often special_reset, just lock. */ | 373 | * requests to fail. */ |
380 | spin_lock(&efx->stats_lock); | 374 | efx_stats_disable(efx); |
381 | 375 | ||
382 | /* Initiate reset */ | 376 | /* Initiate reset */ |
383 | reg = mdio_clause45_read(efx, efx->mii.phy_id, | 377 | reg = mdio_clause45_read(efx, efx->mii.phy_id, |
@@ -392,17 +386,17 @@ static int tenxpress_special_reset(struct efx_nic *efx) | |||
392 | rc = mdio_clause45_wait_reset_mmds(efx, | 386 | rc = mdio_clause45_wait_reset_mmds(efx, |
393 | TENXPRESS_REQUIRED_DEVS); | 387 | TENXPRESS_REQUIRED_DEVS); |
394 | if (rc < 0) | 388 | if (rc < 0) |
395 | goto unlock; | 389 | goto out; |
396 | 390 | ||
397 | /* Try and reconfigure the device */ | 391 | /* Try and reconfigure the device */ |
398 | rc = tenxpress_init(efx); | 392 | rc = tenxpress_init(efx); |
399 | if (rc < 0) | 393 | if (rc < 0) |
400 | goto unlock; | 394 | goto out; |
401 | 395 | ||
402 | /* Wait for the XGXS state machine to churn */ | 396 | /* Wait for the XGXS state machine to churn */ |
403 | mdelay(10); | 397 | mdelay(10); |
404 | unlock: | 398 | out: |
405 | spin_unlock(&efx->stats_lock); | 399 | efx_stats_enable(efx); |
406 | return rc; | 400 | return rc; |
407 | } | 401 | } |
408 | 402 | ||
@@ -520,7 +514,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
520 | { | 514 | { |
521 | struct tenxpress_phy_data *phy_data = efx->phy_data; | 515 | struct tenxpress_phy_data *phy_data = efx->phy_data; |
522 | struct ethtool_cmd ecmd; | 516 | struct ethtool_cmd ecmd; |
523 | bool phy_mode_change, loop_reset, loop_toggle, loopback; | 517 | bool phy_mode_change, loop_reset; |
524 | 518 | ||
525 | if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { | 519 | if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { |
526 | phy_data->phy_mode = efx->phy_mode; | 520 | phy_data->phy_mode = efx->phy_mode; |
@@ -531,12 +525,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
531 | 525 | ||
532 | phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && | 526 | phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && |
533 | phy_data->phy_mode != PHY_MODE_NORMAL); | 527 | phy_data->phy_mode != PHY_MODE_NORMAL); |
534 | loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks; | ||
535 | loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks); | ||
536 | loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || | 528 | loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || |
537 | LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); | 529 | LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); |
538 | 530 | ||
539 | if (loop_reset || loop_toggle || loopback || phy_mode_change) { | 531 | if (loop_reset || phy_mode_change) { |
540 | int rc; | 532 | int rc; |
541 | 533 | ||
542 | efx->phy_op->get_settings(efx, &ecmd); | 534 | efx->phy_op->get_settings(efx, &ecmd); |
@@ -551,20 +543,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
551 | falcon_reset_xaui(efx); | 543 | falcon_reset_xaui(efx); |
552 | } | 544 | } |
553 | 545 | ||
554 | if (efx->phy_type != PHY_TYPE_SFX7101) { | ||
555 | /* Only change autoneg once, on coming out or | ||
556 | * going into loopback */ | ||
557 | if (loop_toggle) | ||
558 | ecmd.autoneg = !loopback; | ||
559 | if (loopback) { | ||
560 | ecmd.duplex = DUPLEX_FULL; | ||
561 | if (efx->loopback_mode == LOOPBACK_GPHY) | ||
562 | ecmd.speed = SPEED_1000; | ||
563 | else | ||
564 | ecmd.speed = SPEED_10000; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | rc = efx->phy_op->set_settings(efx, &ecmd); | 546 | rc = efx->phy_op->set_settings(efx, &ecmd); |
569 | WARN_ON(rc); | 547 | WARN_ON(rc); |
570 | } | 548 | } |
@@ -623,13 +601,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx) | |||
623 | 601 | ||
624 | if (phy_data->phy_mode != PHY_MODE_NORMAL) | 602 | if (phy_data->phy_mode != PHY_MODE_NORMAL) |
625 | return; | 603 | return; |
626 | |||
627 | if (EFX_WORKAROUND_10750(efx) && | ||
628 | atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { | ||
629 | EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n"); | ||
630 | falcon_reset_xaui(efx); | ||
631 | atomic_set(&phy_data->bad_crc_count, 0); | ||
632 | } | ||
633 | } | 604 | } |
634 | 605 | ||
635 | static void tenxpress_phy_fini(struct efx_nic *efx) | 606 | static void tenxpress_phy_fini(struct efx_nic *efx) |
@@ -772,107 +743,76 @@ reset: | |||
772 | return rc; | 743 | return rc; |
773 | } | 744 | } |
774 | 745 | ||
775 | static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx) | 746 | static void |
747 | tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
776 | { | 748 | { |
777 | int phy = efx->mii.phy_id; | 749 | int phy_id = efx->mii.phy_id; |
778 | u32 lpa = 0; | 750 | u32 adv = 0, lpa = 0; |
779 | int reg; | 751 | int reg; |
780 | 752 | ||
781 | if (efx->phy_type != PHY_TYPE_SFX7101) { | 753 | if (efx->phy_type != PHY_TYPE_SFX7101) { |
782 | reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT, | 754 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, |
783 | C22EXT_MSTSLV_REG); | 755 | C22EXT_MSTSLV_CTRL); |
784 | if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN)) | 756 | if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN)) |
757 | adv |= ADVERTISED_1000baseT_Full; | ||
758 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, | ||
759 | C22EXT_MSTSLV_STATUS); | ||
760 | if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN)) | ||
785 | lpa |= ADVERTISED_1000baseT_Half; | 761 | lpa |= ADVERTISED_1000baseT_Half; |
786 | if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN)) | 762 | if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN)) |
787 | lpa |= ADVERTISED_1000baseT_Full; | 763 | lpa |= ADVERTISED_1000baseT_Full; |
788 | } | 764 | } |
789 | reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS); | 765 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, |
766 | MDIO_AN_10GBT_CTRL); | ||
767 | if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN)) | ||
768 | adv |= ADVERTISED_10000baseT_Full; | ||
769 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, | ||
770 | MDIO_AN_10GBT_STATUS); | ||
790 | if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) | 771 | if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) |
791 | lpa |= ADVERTISED_10000baseT_Full; | 772 | lpa |= ADVERTISED_10000baseT_Full; |
792 | return lpa; | ||
793 | } | ||
794 | 773 | ||
795 | static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 774 | mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa); |
796 | { | 775 | |
797 | mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full, | 776 | if (efx->phy_type != PHY_TYPE_SFX7101) |
798 | tenxpress_get_xnp_lpa(efx)); | 777 | ecmd->supported |= (SUPPORTED_100baseT_Full | |
799 | ecmd->supported |= SUPPORTED_10000baseT_Full; | 778 | SUPPORTED_1000baseT_Full); |
800 | ecmd->advertising |= ADVERTISED_10000baseT_Full; | 779 | |
780 | /* In loopback, the PHY automatically brings up the correct interface, | ||
781 | * but doesn't advertise the correct speed. So override it */ | ||
782 | if (efx->loopback_mode == LOOPBACK_GPHY) | ||
783 | ecmd->speed = SPEED_1000; | ||
784 | else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks) | ||
785 | ecmd->speed = SPEED_10000; | ||
801 | } | 786 | } |
802 | 787 | ||
803 | static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 788 | static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
804 | { | 789 | { |
805 | int phy_id = efx->mii.phy_id; | 790 | if (!ecmd->autoneg) |
806 | u32 xnp_adv = 0; | 791 | return -EINVAL; |
807 | int reg; | ||
808 | |||
809 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
810 | PMA_PMD_SPEED_ENABLE_REG); | ||
811 | if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN))) | ||
812 | xnp_adv |= ADVERTISED_100baseT_Full; | ||
813 | if (reg & (1 << PMA_PMD_1000T_ADV_LBN)) | ||
814 | xnp_adv |= ADVERTISED_1000baseT_Full; | ||
815 | if (reg & (1 << PMA_PMD_10000T_ADV_LBN)) | ||
816 | xnp_adv |= ADVERTISED_10000baseT_Full; | ||
817 | |||
818 | mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv, | ||
819 | tenxpress_get_xnp_lpa(efx)); | ||
820 | |||
821 | ecmd->supported |= (SUPPORTED_100baseT_Half | | ||
822 | SUPPORTED_100baseT_Full | | ||
823 | SUPPORTED_1000baseT_Full); | ||
824 | 792 | ||
825 | /* Use the vendor defined C22ext register for duplex settings */ | 793 | return mdio_clause45_set_settings(efx, ecmd); |
826 | if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) { | ||
827 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, | ||
828 | GPHY_XCONTROL_REG); | ||
829 | ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ? | ||
830 | DUPLEX_FULL : DUPLEX_HALF); | ||
831 | } | ||
832 | } | 794 | } |
833 | 795 | ||
834 | static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 796 | static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising) |
835 | { | 797 | { |
836 | int phy_id = efx->mii.phy_id; | 798 | mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN, |
837 | int rc; | 799 | MDIO_AN_10GBT_CTRL, |
838 | 800 | MDIO_AN_10GBT_CTRL_ADV_10G_LBN, | |
839 | rc = mdio_clause45_set_settings(efx, ecmd); | 801 | advertising & ADVERTISED_10000baseT_Full); |
840 | if (rc) | ||
841 | return rc; | ||
842 | |||
843 | if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) | ||
844 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, | ||
845 | GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN, | ||
846 | ecmd->duplex == DUPLEX_FULL); | ||
847 | |||
848 | return rc; | ||
849 | } | 802 | } |
850 | 803 | ||
851 | static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising) | 804 | static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) |
852 | { | 805 | { |
853 | int phy = efx->mii.phy_id; | 806 | int phy_id = efx->mii.phy_id; |
854 | int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD, | 807 | |
855 | PMA_PMD_SPEED_ENABLE_REG); | 808 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, |
856 | bool enabled; | 809 | C22EXT_MSTSLV_CTRL, |
857 | 810 | C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN, | |
858 | reg &= ~((1 << 2) | (1 << 3)); | 811 | advertising & ADVERTISED_1000baseT_Full); |
859 | if (EFX_WORKAROUND_13204(efx) && | 812 | mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, |
860 | (advertising & ADVERTISED_100baseT_Full)) | 813 | MDIO_AN_10GBT_CTRL, |
861 | reg |= 1 << PMA_PMD_100TX_ADV_LBN; | 814 | MDIO_AN_10GBT_CTRL_ADV_10G_LBN, |
862 | if (advertising & ADVERTISED_1000baseT_Full) | 815 | advertising & ADVERTISED_10000baseT_Full); |
863 | reg |= 1 << PMA_PMD_1000T_ADV_LBN; | ||
864 | if (advertising & ADVERTISED_10000baseT_Full) | ||
865 | reg |= 1 << PMA_PMD_10000T_ADV_LBN; | ||
866 | mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD, | ||
867 | PMA_PMD_SPEED_ENABLE_REG, reg); | ||
868 | |||
869 | enabled = (advertising & | ||
870 | (ADVERTISED_1000baseT_Half | | ||
871 | ADVERTISED_1000baseT_Full | | ||
872 | ADVERTISED_10000baseT_Full)); | ||
873 | if (EFX_WORKAROUND_13204(efx)) | ||
874 | enabled |= (advertising & ADVERTISED_100baseT_Full); | ||
875 | return enabled; | ||
876 | } | 816 | } |
877 | 817 | ||
878 | struct efx_phy_operations falcon_sfx7101_phy_ops = { | 818 | struct efx_phy_operations falcon_sfx7101_phy_ops = { |
@@ -882,8 +822,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = { | |||
882 | .poll = tenxpress_phy_poll, | 822 | .poll = tenxpress_phy_poll, |
883 | .fini = tenxpress_phy_fini, | 823 | .fini = tenxpress_phy_fini, |
884 | .clear_interrupt = efx_port_dummy_op_void, | 824 | .clear_interrupt = efx_port_dummy_op_void, |
885 | .get_settings = sfx7101_get_settings, | 825 | .get_settings = tenxpress_get_settings, |
886 | .set_settings = mdio_clause45_set_settings, | 826 | .set_settings = tenxpress_set_settings, |
827 | .set_npage_adv = sfx7101_set_npage_adv, | ||
887 | .num_tests = ARRAY_SIZE(sfx7101_test_names), | 828 | .num_tests = ARRAY_SIZE(sfx7101_test_names), |
888 | .test_names = sfx7101_test_names, | 829 | .test_names = sfx7101_test_names, |
889 | .run_tests = sfx7101_run_tests, | 830 | .run_tests = sfx7101_run_tests, |
@@ -898,9 +839,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = { | |||
898 | .poll = tenxpress_phy_poll, | 839 | .poll = tenxpress_phy_poll, |
899 | .fini = tenxpress_phy_fini, | 840 | .fini = tenxpress_phy_fini, |
900 | .clear_interrupt = efx_port_dummy_op_void, | 841 | .clear_interrupt = efx_port_dummy_op_void, |
901 | .get_settings = sft9001_get_settings, | 842 | .get_settings = tenxpress_get_settings, |
902 | .set_settings = sft9001_set_settings, | 843 | .set_settings = tenxpress_set_settings, |
903 | .set_xnp_advertise = sft9001_set_xnp_advertise, | 844 | .set_npage_adv = sft9001_set_npage_adv, |
904 | .num_tests = ARRAY_SIZE(sft9001_test_names), | 845 | .num_tests = ARRAY_SIZE(sft9001_test_names), |
905 | .test_names = sft9001_test_names, | 846 | .test_names = sft9001_test_names, |
906 | .run_tests = sft9001_run_tests, | 847 | .run_tests = sft9001_run_tests, |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 82e03e1d7371..78de68f4a95b 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -18,8 +18,8 @@ | |||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
20 | #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) | 20 | #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) |
21 | #define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101) | 21 | #define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ |
22 | #define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A) | 22 | (efx)->phy_type == PHY_TYPE_SFT9001B) |
23 | 23 | ||
24 | /* XAUI resets if link not detected */ | 24 | /* XAUI resets if link not detected */ |
25 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 25 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
@@ -29,8 +29,6 @@ | |||
29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G | 29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G |
30 | /* TX pkt parser problem with <= 16 byte TXes */ | 30 | /* TX pkt parser problem with <= 16 byte TXes */ |
31 | #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS | 31 | #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS |
32 | /* Low rate CRC errors require XAUI reset */ | ||
33 | #define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101 | ||
34 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor | 32 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor |
35 | * or a PCIe error (bug 11028) */ | 33 | * or a PCIe error (bug 11028) */ |
36 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS | 34 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS |
@@ -55,8 +53,8 @@ | |||
55 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A | 53 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A |
56 | 54 | ||
57 | /* Need to send XNP pages for 100BaseT */ | 55 | /* Need to send XNP pages for 100BaseT */ |
58 | #define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A | 56 | #define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001 |
59 | /* Need to keep AN enabled */ | 57 | /* Don't restart AN in near-side loopback */ |
60 | #define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A | 58 | #define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001 |
61 | 59 | ||
62 | #endif /* EFX_WORKAROUNDS_H */ | 60 | #endif /* EFX_WORKAROUNDS_H */ |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3668e81e474d..994703cc0db3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev) | |||
1403 | 1403 | ||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | if (netif_msg_ifup(sky2)) | ||
1407 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | ||
1408 | |||
1409 | netif_carrier_off(dev); | 1406 | netif_carrier_off(dev); |
1410 | 1407 | ||
1411 | /* must be power of 2 */ | 1408 | /* must be power of 2 */ |
@@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev) | |||
1484 | sky2_write32(hw, B0_IMSK, imask); | 1481 | sky2_write32(hw, B0_IMSK, imask); |
1485 | 1482 | ||
1486 | sky2_set_multicast(dev); | 1483 | sky2_set_multicast(dev); |
1484 | |||
1485 | if (netif_msg_ifup(sky2)) | ||
1486 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | ||
1487 | return 0; | 1487 | return 0; |
1488 | 1488 | ||
1489 | err_out: | 1489 | err_out: |
@@ -368,14 +368,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
368 | file = hugetlb_file_setup(name, size); | 368 | file = hugetlb_file_setup(name, size); |
369 | shp->mlock_user = current_user(); | 369 | shp->mlock_user = current_user(); |
370 | } else { | 370 | } else { |
371 | int acctflag = VM_ACCOUNT; | 371 | int acctflag = 0; |
372 | /* | 372 | /* |
373 | * Do not allow no accounting for OVERCOMMIT_NEVER, even | 373 | * Do not allow no accounting for OVERCOMMIT_NEVER, even |
374 | * if it's asked for. | 374 | * if it's asked for. |
375 | */ | 375 | */ |
376 | if ((shmflg & SHM_NORESERVE) && | 376 | if ((shmflg & SHM_NORESERVE) && |
377 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) | 377 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
378 | acctflag = 0; | 378 | acctflag = VM_NORESERVE; |
379 | file = shmem_file_setup(name, size, acctflag); | 379 | file = shmem_file_setup(name, size, acctflag); |
380 | } | 380 | } |
381 | error = PTR_ERR(file); | 381 | error = PTR_ERR(file); |
@@ -1090,6 +1090,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma) | |||
1090 | mapping_cap_account_dirty(vma->vm_file->f_mapping); | 1090 | mapping_cap_account_dirty(vma->vm_file->f_mapping); |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | /* | ||
1094 | * We account for memory if it's a private writeable mapping, | ||
1095 | * and VM_NORESERVE wasn't set. | ||
1096 | */ | ||
1097 | static inline int accountable_mapping(unsigned int vm_flags) | ||
1098 | { | ||
1099 | return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; | ||
1100 | } | ||
1101 | |||
1093 | unsigned long mmap_region(struct file *file, unsigned long addr, | 1102 | unsigned long mmap_region(struct file *file, unsigned long addr, |
1094 | unsigned long len, unsigned long flags, | 1103 | unsigned long len, unsigned long flags, |
1095 | unsigned int vm_flags, unsigned long pgoff, | 1104 | unsigned int vm_flags, unsigned long pgoff, |
@@ -1117,23 +1126,24 @@ munmap_back: | |||
1117 | if (!may_expand_vm(mm, len >> PAGE_SHIFT)) | 1126 | if (!may_expand_vm(mm, len >> PAGE_SHIFT)) |
1118 | return -ENOMEM; | 1127 | return -ENOMEM; |
1119 | 1128 | ||
1120 | if (flags & MAP_NORESERVE) | 1129 | /* |
1130 | * Set 'VM_NORESERVE' if we should not account for the | ||
1131 | * memory use of this mapping. We only honor MAP_NORESERVE | ||
1132 | * if we're allowed to overcommit memory. | ||
1133 | */ | ||
1134 | if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) | ||
1135 | vm_flags |= VM_NORESERVE; | ||
1136 | if (!accountable) | ||
1121 | vm_flags |= VM_NORESERVE; | 1137 | vm_flags |= VM_NORESERVE; |
1122 | 1138 | ||
1123 | if (accountable && (!(flags & MAP_NORESERVE) || | 1139 | /* |
1124 | sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { | 1140 | * Private writable mapping: check memory availability |
1125 | if (vm_flags & VM_SHARED) { | 1141 | */ |
1126 | /* Check memory availability in shmem_file_setup? */ | 1142 | if (accountable_mapping(vm_flags)) { |
1127 | vm_flags |= VM_ACCOUNT; | 1143 | charged = len >> PAGE_SHIFT; |
1128 | } else if (vm_flags & VM_WRITE) { | 1144 | if (security_vm_enough_memory(charged)) |
1129 | /* | 1145 | return -ENOMEM; |
1130 | * Private writable mapping: check memory availability | 1146 | vm_flags |= VM_ACCOUNT; |
1131 | */ | ||
1132 | charged = len >> PAGE_SHIFT; | ||
1133 | if (security_vm_enough_memory(charged)) | ||
1134 | return -ENOMEM; | ||
1135 | vm_flags |= VM_ACCOUNT; | ||
1136 | } | ||
1137 | } | 1147 | } |
1138 | 1148 | ||
1139 | /* | 1149 | /* |
@@ -1184,14 +1194,6 @@ munmap_back: | |||
1184 | goto free_vma; | 1194 | goto free_vma; |
1185 | } | 1195 | } |
1186 | 1196 | ||
1187 | /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform | ||
1188 | * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) | ||
1189 | * that memory reservation must be checked; but that reservation | ||
1190 | * belongs to shared memory object, not to vma: so now clear it. | ||
1191 | */ | ||
1192 | if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) | ||
1193 | vma->vm_flags &= ~VM_ACCOUNT; | ||
1194 | |||
1195 | /* Can addr have changed?? | 1197 | /* Can addr have changed?? |
1196 | * | 1198 | * |
1197 | * Answer: Yes, several device drivers can do it in their | 1199 | * Answer: Yes, several device drivers can do it in their |
diff --git a/mm/shmem.c b/mm/shmem.c index 5d0de96c9789..19d566ccdeea 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2628,7 +2628,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |||
2628 | goto close_file; | 2628 | goto close_file; |
2629 | 2629 | ||
2630 | #ifdef CONFIG_SHMEM | 2630 | #ifdef CONFIG_SHMEM |
2631 | SHMEM_I(inode)->flags = flags & VM_ACCOUNT; | 2631 | SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT; |
2632 | #endif | 2632 | #endif |
2633 | d_instantiate(dentry, inode); | 2633 | d_instantiate(dentry, inode); |
2634 | inode->i_size = size; | 2634 | inode->i_size = size; |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index d19a84b79503..228be551e9c1 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/pim.h> | 48 | #include <linux/pim.h> |
49 | #include <net/addrconf.h> | 49 | #include <net/addrconf.h> |
50 | #include <linux/netfilter_ipv6.h> | 50 | #include <linux/netfilter_ipv6.h> |
51 | #include <net/ip6_checksum.h> | ||
51 | 52 | ||
52 | /* Big lock, protecting vif table, mrt cache and mroute socket state. | 53 | /* Big lock, protecting vif table, mrt cache and mroute socket state. |
53 | Note that the changes are semaphored via rtnl_lock. | 54 | Note that the changes are semaphored via rtnl_lock. |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f94db2f3e9e..9454d4ae46df 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <linux/poll.h> | 77 | #include <linux/poll.h> |
78 | #include <linux/module.h> | 78 | #include <linux/module.h> |
79 | #include <linux/init.h> | 79 | #include <linux/init.h> |
80 | #include <linux/mutex.h> | ||
80 | 81 | ||
81 | #ifdef CONFIG_INET | 82 | #ifdef CONFIG_INET |
82 | #include <net/inet_common.h> | 83 | #include <net/inet_common.h> |
@@ -175,6 +176,7 @@ struct packet_sock { | |||
175 | #endif | 176 | #endif |
176 | struct packet_type prot_hook; | 177 | struct packet_type prot_hook; |
177 | spinlock_t bind_lock; | 178 | spinlock_t bind_lock; |
179 | struct mutex pg_vec_lock; | ||
178 | unsigned int running:1, /* prot_hook is attached*/ | 180 | unsigned int running:1, /* prot_hook is attached*/ |
179 | auxdata:1, | 181 | auxdata:1, |
180 | origdev:1; | 182 | origdev:1; |
@@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) | |||
1069 | */ | 1071 | */ |
1070 | 1072 | ||
1071 | spin_lock_init(&po->bind_lock); | 1073 | spin_lock_init(&po->bind_lock); |
1074 | mutex_init(&po->pg_vec_lock); | ||
1072 | po->prot_hook.func = packet_rcv; | 1075 | po->prot_hook.func = packet_rcv; |
1073 | 1076 | ||
1074 | if (sock->type == SOCK_PACKET) | 1077 | if (sock->type == SOCK_PACKET) |
@@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1865 | synchronize_net(); | 1868 | synchronize_net(); |
1866 | 1869 | ||
1867 | err = -EBUSY; | 1870 | err = -EBUSY; |
1871 | mutex_lock(&po->pg_vec_lock); | ||
1868 | if (closing || atomic_read(&po->mapped) == 0) { | 1872 | if (closing || atomic_read(&po->mapped) == 0) { |
1869 | err = 0; | 1873 | err = 0; |
1870 | #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) | 1874 | #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) |
@@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1886 | if (atomic_read(&po->mapped)) | 1890 | if (atomic_read(&po->mapped)) |
1887 | printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); | 1891 | printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); |
1888 | } | 1892 | } |
1893 | mutex_unlock(&po->pg_vec_lock); | ||
1889 | 1894 | ||
1890 | spin_lock(&po->bind_lock); | 1895 | spin_lock(&po->bind_lock); |
1891 | if (was_running && !po->running) { | 1896 | if (was_running && !po->running) { |
@@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st | |||
1918 | 1923 | ||
1919 | size = vma->vm_end - vma->vm_start; | 1924 | size = vma->vm_end - vma->vm_start; |
1920 | 1925 | ||
1921 | lock_sock(sk); | 1926 | mutex_lock(&po->pg_vec_lock); |
1922 | if (po->pg_vec == NULL) | 1927 | if (po->pg_vec == NULL) |
1923 | goto out; | 1928 | goto out; |
1924 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) | 1929 | if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) |
@@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st | |||
1941 | err = 0; | 1946 | err = 0; |
1942 | 1947 | ||
1943 | out: | 1948 | out: |
1944 | release_sock(sk); | 1949 | mutex_unlock(&po->pg_vec_lock); |
1945 | return err; | 1950 | return err; |
1946 | } | 1951 | } |
1947 | #endif | 1952 | #endif |