diff options
133 files changed, 957 insertions, 600 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index f216db847022..4afcfb4c892b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4049,6 +4049,12 @@ W: http://www.pharscape.org | |||
4049 | S: Maintained | 4049 | S: Maintained |
4050 | F: drivers/net/usb/hso.c | 4050 | F: drivers/net/usb/hso.c |
4051 | 4051 | ||
4052 | HSR NETWORK PROTOCOL | ||
4053 | M: Arvid Brodin <arvid.brodin@alten.se> | ||
4054 | L: netdev@vger.kernel.org | ||
4055 | S: Maintained | ||
4056 | F: net/hsr/ | ||
4057 | |||
4052 | HTCPEN TOUCHSCREEN DRIVER | 4058 | HTCPEN TOUCHSCREEN DRIVER |
4053 | M: Pau Oliva Fora <pof@eslack.org> | 4059 | M: Pau Oliva Fora <pof@eslack.org> |
4054 | L: linux-input@vger.kernel.org | 4060 | L: linux-input@vger.kernel.org |
@@ -5261,7 +5267,7 @@ S: Maintained | |||
5261 | F: Documentation/lockdep*.txt | 5267 | F: Documentation/lockdep*.txt |
5262 | F: Documentation/lockstat.txt | 5268 | F: Documentation/lockstat.txt |
5263 | F: include/linux/lockdep.h | 5269 | F: include/linux/lockdep.h |
5264 | F: kernel/lockdep* | 5270 | F: kernel/locking/ |
5265 | 5271 | ||
5266 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) | 5272 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) |
5267 | M: "Richard Russon (FlatCap)" <ldm@flatcap.org> | 5273 | M: "Richard Russon (FlatCap)" <ldm@flatcap.org> |
@@ -7385,7 +7391,6 @@ S: Maintained | |||
7385 | F: kernel/sched/ | 7391 | F: kernel/sched/ |
7386 | F: include/linux/sched.h | 7392 | F: include/linux/sched.h |
7387 | F: include/uapi/linux/sched.h | 7393 | F: include/uapi/linux/sched.h |
7388 | F: kernel/wait.c | ||
7389 | F: include/linux/wait.h | 7394 | F: include/linux/wait.h |
7390 | 7395 | ||
7391 | SCORE ARCHITECTURE | 7396 | SCORE ARCHITECTURE |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be956dbf6bae..1571d126e9dd 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t); | |||
61 | * mapping to be mapped at. This is particularly important for | 61 | * mapping to be mapped at. This is particularly important for |
62 | * non-high vector CPUs. | 62 | * non-high vector CPUs. |
63 | */ | 63 | */ |
64 | #define FIRST_USER_ADDRESS PAGE_SIZE | 64 | #define FIRST_USER_ADDRESS (PAGE_SIZE * 2) |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Use TASK_SIZE as the ceiling argument for free_pgtables() and | 67 | * Use TASK_SIZE as the ceiling argument for free_pgtables() and |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 57221e349a7c..f0d180d8b29f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -14,11 +14,12 @@ | |||
14 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/fncpy.h> | ||
17 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | 19 | #include <asm/smp_plat.h> |
19 | #include <asm/system_misc.h> | 20 | #include <asm/system_misc.h> |
20 | 21 | ||
21 | extern const unsigned char relocate_new_kernel[]; | 22 | extern void relocate_new_kernel(void); |
22 | extern const unsigned int relocate_new_kernel_size; | 23 | extern const unsigned int relocate_new_kernel_size; |
23 | 24 | ||
24 | extern unsigned long kexec_start_address; | 25 | extern unsigned long kexec_start_address; |
@@ -142,6 +143,8 @@ void machine_kexec(struct kimage *image) | |||
142 | { | 143 | { |
143 | unsigned long page_list; | 144 | unsigned long page_list; |
144 | unsigned long reboot_code_buffer_phys; | 145 | unsigned long reboot_code_buffer_phys; |
146 | unsigned long reboot_entry = (unsigned long)relocate_new_kernel; | ||
147 | unsigned long reboot_entry_phys; | ||
145 | void *reboot_code_buffer; | 148 | void *reboot_code_buffer; |
146 | 149 | ||
147 | /* | 150 | /* |
@@ -168,16 +171,16 @@ void machine_kexec(struct kimage *image) | |||
168 | 171 | ||
169 | 172 | ||
170 | /* copy our kernel relocation code to the control code page */ | 173 | /* copy our kernel relocation code to the control code page */ |
171 | memcpy(reboot_code_buffer, | 174 | reboot_entry = fncpy(reboot_code_buffer, |
172 | relocate_new_kernel, relocate_new_kernel_size); | 175 | reboot_entry, |
176 | relocate_new_kernel_size); | ||
177 | reboot_entry_phys = (unsigned long)reboot_entry + | ||
178 | (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); | ||
173 | 179 | ||
174 | |||
175 | flush_icache_range((unsigned long) reboot_code_buffer, | ||
176 | (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); | ||
177 | printk(KERN_INFO "Bye!\n"); | 180 | printk(KERN_INFO "Bye!\n"); |
178 | 181 | ||
179 | if (kexec_reinit) | 182 | if (kexec_reinit) |
180 | kexec_reinit(); | 183 | kexec_reinit(); |
181 | 184 | ||
182 | soft_restart(reboot_code_buffer_phys); | 185 | soft_restart(reboot_entry_phys); |
183 | } | 186 | } |
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index d0cdedf4864d..95858966d84e 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S | |||
@@ -2,10 +2,12 @@ | |||
2 | * relocate_kernel.S - put the kernel image in place to boot | 2 | * relocate_kernel.S - put the kernel image in place to boot |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/linkage.h> | ||
5 | #include <asm/kexec.h> | 6 | #include <asm/kexec.h> |
6 | 7 | ||
7 | .globl relocate_new_kernel | 8 | .align 3 /* not needed for this code, but keeps fncpy() happy */ |
8 | relocate_new_kernel: | 9 | |
10 | ENTRY(relocate_new_kernel) | ||
9 | 11 | ||
10 | ldr r0,kexec_indirection_page | 12 | ldr r0,kexec_indirection_page |
11 | ldr r1,kexec_start_address | 13 | ldr r1,kexec_start_address |
@@ -79,6 +81,8 @@ kexec_mach_type: | |||
79 | kexec_boot_atags: | 81 | kexec_boot_atags: |
80 | .long 0x0 | 82 | .long 0x0 |
81 | 83 | ||
84 | ENDPROC(relocate_new_kernel) | ||
85 | |||
82 | relocate_new_kernel_end: | 86 | relocate_new_kernel_end: |
83 | 87 | ||
84 | .globl relocate_new_kernel_size | 88 | .globl relocate_new_kernel_size |
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S index 3c5d0f2170fd..b84d0cb13682 100644 --- a/arch/arm/kernel/sigreturn_codes.S +++ b/arch/arm/kernel/sigreturn_codes.S | |||
@@ -30,6 +30,27 @@ | |||
30 | * snippets. | 30 | * snippets. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | /* | ||
34 | * In CPU_THUMBONLY case kernel arm opcodes are not allowed. | ||
35 | * Note in this case codes skips those instructions but it uses .org | ||
36 | * directive to keep correct layout of sigreturn_codes array. | ||
37 | */ | ||
38 | #ifndef CONFIG_CPU_THUMBONLY | ||
39 | #define ARM_OK(code...) code | ||
40 | #else | ||
41 | #define ARM_OK(code...) | ||
42 | #endif | ||
43 | |||
44 | .macro arm_slot n | ||
45 | .org sigreturn_codes + 12 * (\n) | ||
46 | ARM_OK( .arm ) | ||
47 | .endm | ||
48 | |||
49 | .macro thumb_slot n | ||
50 | .org sigreturn_codes + 12 * (\n) + 8 | ||
51 | .thumb | ||
52 | .endm | ||
53 | |||
33 | #if __LINUX_ARM_ARCH__ <= 4 | 54 | #if __LINUX_ARM_ARCH__ <= 4 |
34 | /* | 55 | /* |
35 | * Note we manually set minimally required arch that supports | 56 | * Note we manually set minimally required arch that supports |
@@ -45,26 +66,27 @@ | |||
45 | .global sigreturn_codes | 66 | .global sigreturn_codes |
46 | .type sigreturn_codes, #object | 67 | .type sigreturn_codes, #object |
47 | 68 | ||
48 | .arm | 69 | .align |
49 | 70 | ||
50 | sigreturn_codes: | 71 | sigreturn_codes: |
51 | 72 | ||
52 | /* ARM sigreturn syscall code snippet */ | 73 | /* ARM sigreturn syscall code snippet */ |
53 | mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) | 74 | arm_slot 0 |
54 | swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) | 75 | ARM_OK( mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) ) |
76 | ARM_OK( swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) | ||
55 | 77 | ||
56 | /* Thumb sigreturn syscall code snippet */ | 78 | /* Thumb sigreturn syscall code snippet */ |
57 | .thumb | 79 | thumb_slot 0 |
58 | movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) | 80 | movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) |
59 | swi #0 | 81 | swi #0 |
60 | 82 | ||
61 | /* ARM sigreturn_rt syscall code snippet */ | 83 | /* ARM sigreturn_rt syscall code snippet */ |
62 | .arm | 84 | arm_slot 1 |
63 | mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) | 85 | ARM_OK( mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) ) |
64 | swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) | 86 | ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) ) |
65 | 87 | ||
66 | /* Thumb sigreturn_rt syscall code snippet */ | 88 | /* Thumb sigreturn_rt syscall code snippet */ |
67 | .thumb | 89 | thumb_slot 1 |
68 | movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) | 90 | movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) |
69 | swi #0 | 91 | swi #0 |
70 | 92 | ||
@@ -74,7 +96,7 @@ sigreturn_codes: | |||
74 | * it is thumb case or not, so we need additional | 96 | * it is thumb case or not, so we need additional |
75 | * word after real last entry. | 97 | * word after real last entry. |
76 | */ | 98 | */ |
77 | .arm | 99 | arm_slot 2 |
78 | .space 4 | 100 | .space 4 |
79 | 101 | ||
80 | .size sigreturn_codes, . - sigreturn_codes | 102 | .size sigreturn_codes, . - sigreturn_codes |
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S index 36b668d8e121..bc1033b897b4 100644 --- a/arch/arm/lib/delay-loop.S +++ b/arch/arm/lib/delay-loop.S | |||
@@ -40,6 +40,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 | |||
40 | /* | 40 | /* |
41 | * loops = r0 * HZ * loops_per_jiffy / 1000000 | 41 | * loops = r0 * HZ * loops_per_jiffy / 1000000 |
42 | */ | 42 | */ |
43 | .align 3 | ||
43 | 44 | ||
44 | @ Delay routine | 45 | @ Delay routine |
45 | ENTRY(__loop_delay) | 46 | ENTRY(__loop_delay) |
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index f607deb40f4d..bc7b363a3083 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
@@ -174,7 +174,6 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) | |||
174 | static struct clock_event_device clkevt = { | 174 | static struct clock_event_device clkevt = { |
175 | .name = "at91_tick", | 175 | .name = "at91_tick", |
176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
177 | .shift = 32, | ||
178 | .rating = 150, | 177 | .rating = 150, |
179 | .set_next_event = clkevt32k_next_event, | 178 | .set_next_event = clkevt32k_next_event, |
180 | .set_mode = clkevt32k_mode, | 179 | .set_mode = clkevt32k_mode, |
@@ -265,11 +264,9 @@ void __init at91rm9200_timer_init(void) | |||
265 | at91_st_write(AT91_ST_RTMR, 1); | 264 | at91_st_write(AT91_ST_RTMR, 1); |
266 | 265 | ||
267 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ | 266 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ |
268 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); | ||
269 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | ||
270 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | ||
271 | clkevt.cpumask = cpumask_of(0); | 267 | clkevt.cpumask = cpumask_of(0); |
272 | clockevents_register_device(&clkevt); | 268 | clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, |
269 | 2, AT91_ST_ALMV); | ||
273 | 270 | ||
274 | /* register clocksource */ | 271 | /* register clocksource */ |
275 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); | 272 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); |
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 2739ca2c1334..e0091685fd48 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <video/vga.h> | ||
18 | 19 | ||
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
20 | #include <asm/page.h> | 21 | #include <asm/page.h> |
@@ -196,6 +197,8 @@ void __init footbridge_map_io(void) | |||
196 | iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); | 197 | iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc)); |
197 | pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); | 198 | pci_map_io_early(__phys_to_pfn(DC21285_PCI_IO)); |
198 | } | 199 | } |
200 | |||
201 | vga_base = PCIMEM_BASE; | ||
199 | } | 202 | } |
200 | 203 | ||
201 | void footbridge_restart(enum reboot_mode mode, const char *cmd) | 204 | void footbridge_restart(enum reboot_mode mode, const char *cmd) |
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index 3490a24f969e..7c2fdae9a38b 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/irq.h> | 18 | #include <linux/irq.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <video/vga.h> | ||
22 | 21 | ||
23 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
24 | #include <asm/mach/pci.h> | 23 | #include <asm/mach/pci.h> |
@@ -291,7 +290,6 @@ void __init dc21285_preinit(void) | |||
291 | int cfn_mode; | 290 | int cfn_mode; |
292 | 291 | ||
293 | pcibios_min_mem = 0x81000000; | 292 | pcibios_min_mem = 0x81000000; |
294 | vga_base = PCIMEM_BASE; | ||
295 | 293 | ||
296 | mem_size = (unsigned int)high_memory - PAGE_OFFSET; | 294 | mem_size = (unsigned int)high_memory - PAGE_OFFSET; |
297 | for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) | 295 | for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) |
diff --git a/arch/arm/mach-footbridge/ebsa285.c b/arch/arm/mach-footbridge/ebsa285.c index b08243500e2e..1a7235fb52ac 100644 --- a/arch/arm/mach-footbridge/ebsa285.c +++ b/arch/arm/mach-footbridge/ebsa285.c | |||
@@ -30,21 +30,24 @@ static const struct { | |||
30 | const char *name; | 30 | const char *name; |
31 | const char *trigger; | 31 | const char *trigger; |
32 | } ebsa285_leds[] = { | 32 | } ebsa285_leds[] = { |
33 | { "ebsa285:amber", "heartbeat", }, | 33 | { "ebsa285:amber", "cpu0", }, |
34 | { "ebsa285:green", "cpu0", }, | 34 | { "ebsa285:green", "heartbeat", }, |
35 | { "ebsa285:red",}, | 35 | { "ebsa285:red",}, |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static unsigned char hw_led_state; | ||
39 | |||
38 | static void ebsa285_led_set(struct led_classdev *cdev, | 40 | static void ebsa285_led_set(struct led_classdev *cdev, |
39 | enum led_brightness b) | 41 | enum led_brightness b) |
40 | { | 42 | { |
41 | struct ebsa285_led *led = container_of(cdev, | 43 | struct ebsa285_led *led = container_of(cdev, |
42 | struct ebsa285_led, cdev); | 44 | struct ebsa285_led, cdev); |
43 | 45 | ||
44 | if (b != LED_OFF) | 46 | if (b == LED_OFF) |
45 | *XBUS_LEDS |= led->mask; | 47 | hw_led_state |= led->mask; |
46 | else | 48 | else |
47 | *XBUS_LEDS &= ~led->mask; | 49 | hw_led_state &= ~led->mask; |
50 | *XBUS_LEDS = hw_led_state; | ||
48 | } | 51 | } |
49 | 52 | ||
50 | static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) | 53 | static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) |
@@ -52,18 +55,19 @@ static enum led_brightness ebsa285_led_get(struct led_classdev *cdev) | |||
52 | struct ebsa285_led *led = container_of(cdev, | 55 | struct ebsa285_led *led = container_of(cdev, |
53 | struct ebsa285_led, cdev); | 56 | struct ebsa285_led, cdev); |
54 | 57 | ||
55 | return (*XBUS_LEDS & led->mask) ? LED_FULL : LED_OFF; | 58 | return hw_led_state & led->mask ? LED_OFF : LED_FULL; |
56 | } | 59 | } |
57 | 60 | ||
58 | static int __init ebsa285_leds_init(void) | 61 | static int __init ebsa285_leds_init(void) |
59 | { | 62 | { |
60 | int i; | 63 | int i; |
61 | 64 | ||
62 | if (machine_is_ebsa285()) | 65 | if (!machine_is_ebsa285()) |
63 | return -ENODEV; | 66 | return -ENODEV; |
64 | 67 | ||
65 | /* 3 LEDS All ON */ | 68 | /* 3 LEDS all off */ |
66 | *XBUS_LEDS |= XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; | 69 | hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED; |
70 | *XBUS_LEDS = hw_led_state; | ||
67 | 71 | ||
68 | for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { | 72 | for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) { |
69 | struct ebsa285_led *led; | 73 | struct ebsa285_led *led; |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 79f8b39801a8..f6b6bfa88ecf 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * | 9 | * |
10 | * DMA uncached mapping support. | 10 | * DMA uncached mapping support. |
11 | */ | 11 | */ |
12 | #include <linux/bootmem.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
14 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
@@ -162,6 +163,8 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
162 | u64 mask = (u64)DMA_BIT_MASK(32); | 163 | u64 mask = (u64)DMA_BIT_MASK(32); |
163 | 164 | ||
164 | if (dev) { | 165 | if (dev) { |
166 | unsigned long max_dma_pfn; | ||
167 | |||
165 | mask = dev->coherent_dma_mask; | 168 | mask = dev->coherent_dma_mask; |
166 | 169 | ||
167 | /* | 170 | /* |
@@ -173,6 +176,8 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
173 | return 0; | 176 | return 0; |
174 | } | 177 | } |
175 | 178 | ||
179 | max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); | ||
180 | |||
176 | /* | 181 | /* |
177 | * If the mask allows for more memory than we can address, | 182 | * If the mask allows for more memory than we can address, |
178 | * and we actually have that much memory, then fail the | 183 | * and we actually have that much memory, then fail the |
@@ -180,7 +185,7 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
180 | */ | 185 | */ |
181 | if (sizeof(mask) != sizeof(dma_addr_t) && | 186 | if (sizeof(mask) != sizeof(dma_addr_t) && |
182 | mask > (dma_addr_t)~0 && | 187 | mask > (dma_addr_t)~0 && |
183 | dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) { | 188 | dma_to_pfn(dev, ~0) > max_dma_pfn) { |
184 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", | 189 | dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", |
185 | mask); | 190 | mask); |
186 | dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); | 191 | dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); |
@@ -192,7 +197,7 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
192 | * fits within the allowable addresses which we can | 197 | * fits within the allowable addresses which we can |
193 | * allocate. | 198 | * allocate. |
194 | */ | 199 | */ |
195 | if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) { | 200 | if (dma_to_pfn(dev, mask) < max_dma_pfn) { |
196 | dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", | 201 | dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", |
197 | mask, | 202 | mask, |
198 | dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, | 203 | dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index d27158c38eb0..5e85ed371364 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -146,7 +146,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
146 | 146 | ||
147 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | 147 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
148 | info.length = len; | 148 | info.length = len; |
149 | info.low_limit = PAGE_SIZE; | 149 | info.low_limit = FIRST_USER_ADDRESS; |
150 | info.high_limit = mm->mmap_base; | 150 | info.high_limit = mm->mmap_base; |
151 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | 151 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
152 | info.align_offset = pgoff << PAGE_SHIFT; | 152 | info.align_offset = pgoff << PAGE_SHIFT; |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 0acb089d0f70..1046b373d1ae 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -87,7 +87,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
87 | init_pud = pud_offset(init_pgd, 0); | 87 | init_pud = pud_offset(init_pgd, 0); |
88 | init_pmd = pmd_offset(init_pud, 0); | 88 | init_pmd = pmd_offset(init_pud, 0); |
89 | init_pte = pte_offset_map(init_pmd, 0); | 89 | init_pte = pte_offset_map(init_pmd, 0); |
90 | set_pte_ext(new_pte, *init_pte, 0); | 90 | set_pte_ext(new_pte + 0, init_pte[0], 0); |
91 | set_pte_ext(new_pte + 1, init_pte[1], 0); | ||
91 | pte_unmap(init_pte); | 92 | pte_unmap(init_pte); |
92 | pte_unmap(new_pte); | 93 | pte_unmap(new_pte); |
93 | } | 94 | } |
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 23732cdff551..b31ee1b275b0 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c | |||
@@ -25,8 +25,9 @@ struct xen_p2m_entry { | |||
25 | struct rb_node rbnode_phys; | 25 | struct rb_node rbnode_phys; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | rwlock_t p2m_lock; | 28 | static rwlock_t p2m_lock; |
29 | struct rb_root phys_to_mach = RB_ROOT; | 29 | struct rb_root phys_to_mach = RB_ROOT; |
30 | EXPORT_SYMBOL_GPL(phys_to_mach); | ||
30 | static struct rb_root mach_to_phys = RB_ROOT; | 31 | static struct rb_root mach_to_phys = RB_ROOT; |
31 | 32 | ||
32 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | 33 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) |
@@ -200,7 +201,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
200 | } | 201 | } |
201 | EXPORT_SYMBOL_GPL(__set_phys_to_machine); | 202 | EXPORT_SYMBOL_GPL(__set_phys_to_machine); |
202 | 203 | ||
203 | int p2m_init(void) | 204 | static int p2m_init(void) |
204 | { | 205 | { |
205 | rwlock_init(&p2m_lock); | 206 | rwlock_init(&p2m_lock); |
206 | return 0; | 207 | return 0; |
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 4363528dc8fd..b3feabd39f31 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -55,8 +55,7 @@ struct pcc_param { | |||
55 | 55 | ||
56 | struct s390_xts_ctx { | 56 | struct s390_xts_ctx { |
57 | u8 key[32]; | 57 | u8 key[32]; |
58 | u8 xts_param[16]; | 58 | u8 pcc_key[32]; |
59 | struct pcc_param pcc; | ||
60 | long enc; | 59 | long enc; |
61 | long dec; | 60 | long dec; |
62 | int key_len; | 61 | int key_len; |
@@ -591,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
591 | xts_ctx->enc = KM_XTS_128_ENCRYPT; | 590 | xts_ctx->enc = KM_XTS_128_ENCRYPT; |
592 | xts_ctx->dec = KM_XTS_128_DECRYPT; | 591 | xts_ctx->dec = KM_XTS_128_DECRYPT; |
593 | memcpy(xts_ctx->key + 16, in_key, 16); | 592 | memcpy(xts_ctx->key + 16, in_key, 16); |
594 | memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); | 593 | memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); |
595 | break; | 594 | break; |
596 | case 48: | 595 | case 48: |
597 | xts_ctx->enc = 0; | 596 | xts_ctx->enc = 0; |
@@ -602,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
602 | xts_ctx->enc = KM_XTS_256_ENCRYPT; | 601 | xts_ctx->enc = KM_XTS_256_ENCRYPT; |
603 | xts_ctx->dec = KM_XTS_256_DECRYPT; | 602 | xts_ctx->dec = KM_XTS_256_DECRYPT; |
604 | memcpy(xts_ctx->key, in_key, 32); | 603 | memcpy(xts_ctx->key, in_key, 32); |
605 | memcpy(xts_ctx->pcc.key, in_key + 32, 32); | 604 | memcpy(xts_ctx->pcc_key, in_key + 32, 32); |
606 | break; | 605 | break; |
607 | default: | 606 | default: |
608 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | 607 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
@@ -621,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func, | |||
621 | unsigned int nbytes = walk->nbytes; | 620 | unsigned int nbytes = walk->nbytes; |
622 | unsigned int n; | 621 | unsigned int n; |
623 | u8 *in, *out; | 622 | u8 *in, *out; |
624 | void *param; | 623 | struct pcc_param pcc_param; |
624 | struct { | ||
625 | u8 key[32]; | ||
626 | u8 init[16]; | ||
627 | } xts_param; | ||
625 | 628 | ||
626 | if (!nbytes) | 629 | if (!nbytes) |
627 | goto out; | 630 | goto out; |
628 | 631 | ||
629 | memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); | 632 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
630 | memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); | 633 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); |
631 | memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); | 634 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); |
632 | memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); | 635 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); |
633 | param = xts_ctx->pcc.key + offset; | 636 | memcpy(pcc_param.key, xts_ctx->pcc_key, 32); |
634 | ret = crypt_s390_pcc(func, param); | 637 | ret = crypt_s390_pcc(func, &pcc_param.key[offset]); |
635 | if (ret < 0) | 638 | if (ret < 0) |
636 | return -EIO; | 639 | return -EIO; |
637 | 640 | ||
638 | memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); | 641 | memcpy(xts_param.key, xts_ctx->key, 32); |
639 | param = xts_ctx->key + offset; | 642 | memcpy(xts_param.init, pcc_param.xts, 16); |
640 | do { | 643 | do { |
641 | /* only use complete blocks */ | 644 | /* only use complete blocks */ |
642 | n = nbytes & ~(AES_BLOCK_SIZE - 1); | 645 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
643 | out = walk->dst.virt.addr; | 646 | out = walk->dst.virt.addr; |
644 | in = walk->src.virt.addr; | 647 | in = walk->src.virt.addr; |
645 | 648 | ||
646 | ret = crypt_s390_km(func, param, out, in, n); | 649 | ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n); |
647 | if (ret < 0 || ret != n) | 650 | if (ret < 0 || ret != n) |
648 | return -EIO; | 651 | return -EIO; |
649 | 652 | ||
diff --git a/arch/um/Makefile b/arch/um/Makefile index 48d92bbe62e9..36e658a4291c 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile | |||
@@ -33,12 +33,11 @@ MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas | |||
33 | 33 | ||
34 | HEADER_ARCH := $(SUBARCH) | 34 | HEADER_ARCH := $(SUBARCH) |
35 | 35 | ||
36 | # Additional ARCH settings for x86 | 36 | ifneq ($(filter $(SUBARCH),x86 x86_64 i386),) |
37 | ifeq ($(SUBARCH),i386) | 37 | HEADER_ARCH := x86 |
38 | HEADER_ARCH := x86 | ||
39 | endif | 38 | endif |
40 | ifeq ($(SUBARCH),x86_64) | 39 | |
41 | HEADER_ARCH := x86 | 40 | ifdef CONFIG_64BIT |
42 | KBUILD_CFLAGS += -mcmodel=large | 41 | KBUILD_CFLAGS += -mcmodel=large |
43 | endif | 42 | endif |
44 | 43 | ||
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 4d6fdf68edf3..799d7e413bf5 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c | |||
@@ -19,7 +19,7 @@ struct stack_frame { | |||
19 | unsigned long return_address; | 19 | unsigned long return_address; |
20 | }; | 20 | }; |
21 | 21 | ||
22 | static void print_stack_trace(unsigned long *sp, unsigned long bp) | 22 | static void do_stack_trace(unsigned long *sp, unsigned long bp) |
23 | { | 23 | { |
24 | int reliable; | 24 | int reliable; |
25 | unsigned long addr; | 25 | unsigned long addr; |
@@ -94,5 +94,5 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
94 | } | 94 | } |
95 | printk(KERN_CONT "\n"); | 95 | printk(KERN_CONT "\n"); |
96 | 96 | ||
97 | print_stack_trace(sp, bp); | 97 | do_stack_trace(sp, bp); |
98 | } | 98 | } |
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 2874df24e7a4..4cab890007a7 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h | |||
@@ -72,6 +72,17 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi); | |||
72 | DEFINE_IRQ_VECTOR_EVENT(irq_work); | 72 | DEFINE_IRQ_VECTOR_EVENT(irq_work); |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * We must dis-allow sampling irq_work_exit() because perf event sampling | ||
76 | * itself can cause irq_work, which would lead to an infinite loop; | ||
77 | * | ||
78 | * 1) irq_work_exit happens | ||
79 | * 2) generates perf sample | ||
80 | * 3) generates irq_work | ||
81 | * 4) goto 1 | ||
82 | */ | ||
83 | TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0); | ||
84 | |||
85 | /* | ||
75 | * call_function - called when entering/exiting a call function interrupt | 86 | * call_function - called when entering/exiting a call function interrupt |
76 | * vector handler | 87 | * vector handler |
77 | */ | 88 | */ |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index ef5356cd280a..850246206b12 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, | |||
114 | struct hash_ctx *ctx = ask->private; | 114 | struct hash_ctx *ctx = ask->private; |
115 | int err; | 115 | int err; |
116 | 116 | ||
117 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
118 | flags |= MSG_MORE; | ||
119 | |||
117 | lock_sock(sk); | 120 | lock_sock(sk); |
118 | sg_init_table(ctx->sgl.sg, 1); | 121 | sg_init_table(ctx->sgl.sg, 1); |
119 | sg_set_page(ctx->sgl.sg, page, size, offset); | 122 | sg_set_page(ctx->sgl.sg, page, size, offset); |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc062d2a..a19c027b29bd 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | |||
378 | struct skcipher_sg_list *sgl; | 378 | struct skcipher_sg_list *sgl; |
379 | int err = -EINVAL; | 379 | int err = -EINVAL; |
380 | 380 | ||
381 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
382 | flags |= MSG_MORE; | ||
383 | |||
381 | lock_sock(sk); | 384 | lock_sock(sk); |
382 | if (!ctx->more && ctx->used) | 385 | if (!ctx->more && ctx->used) |
383 | goto unlock; | 386 | goto unlock; |
diff --git a/crypto/authenc.c b/crypto/authenc.c index 1875e7026e8f..e1223559d5df 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
@@ -380,9 +380,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req, | |||
380 | if (!err) { | 380 | if (!err) { |
381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 381 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | 382 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); |
383 | struct ablkcipher_request *abreq = aead_request_ctx(areq); | 383 | struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq); |
384 | u8 *iv = (u8 *)(abreq + 1) + | 384 | struct ablkcipher_request *abreq = (void *)(areq_ctx->tail |
385 | crypto_ablkcipher_reqsize(ctx->enc); | 385 | + ctx->reqoff); |
386 | u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc); | ||
386 | 387 | ||
387 | err = crypto_authenc_genicv(areq, iv, 0); | 388 | err = crypto_authenc_genicv(areq, iv, 0); |
388 | } | 389 | } |
diff --git a/crypto/ccm.c b/crypto/ccm.c index 3e05499d183a..1df84217f7c9 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, | |||
271 | } | 271 | } |
272 | 272 | ||
273 | /* compute plaintext into mac */ | 273 | /* compute plaintext into mac */ |
274 | get_data_to_compute(cipher, pctx, plain, cryptlen); | 274 | if (cryptlen) |
275 | get_data_to_compute(cipher, pctx, plain, cryptlen); | ||
275 | 276 | ||
276 | out: | 277 | out: |
277 | return err; | 278 | return err; |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1ab8258fcf56..001f07cdb828 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -1242,6 +1242,10 @@ static int do_test(int m) | |||
1242 | ret += tcrypt_test("cmac(des3_ede)"); | 1242 | ret += tcrypt_test("cmac(des3_ede)"); |
1243 | break; | 1243 | break; |
1244 | 1244 | ||
1245 | case 155: | ||
1246 | ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); | ||
1247 | break; | ||
1248 | |||
1245 | case 200: | 1249 | case 200: |
1246 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, | 1250 | test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, |
1247 | speed_template_16_24_32); | 1251 | speed_template_16_24_32); |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 432afc03e7c3..77955507f6f1 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -503,16 +503,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
503 | goto out; | 503 | goto out; |
504 | } | 504 | } |
505 | 505 | ||
506 | sg_init_one(&sg[0], input, | ||
507 | template[i].ilen + (enc ? authsize : 0)); | ||
508 | |||
509 | if (diff_dst) { | 506 | if (diff_dst) { |
510 | output = xoutbuf[0]; | 507 | output = xoutbuf[0]; |
511 | output += align_offset; | 508 | output += align_offset; |
509 | sg_init_one(&sg[0], input, template[i].ilen); | ||
512 | sg_init_one(&sgout[0], output, | 510 | sg_init_one(&sgout[0], output, |
511 | template[i].rlen); | ||
512 | } else { | ||
513 | sg_init_one(&sg[0], input, | ||
513 | template[i].ilen + | 514 | template[i].ilen + |
514 | (enc ? authsize : 0)); | 515 | (enc ? authsize : 0)); |
515 | } else { | ||
516 | output = input; | 516 | output = input; |
517 | } | 517 | } |
518 | 518 | ||
@@ -612,12 +612,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
612 | memcpy(q, template[i].input + temp, | 612 | memcpy(q, template[i].input + temp, |
613 | template[i].tap[k]); | 613 | template[i].tap[k]); |
614 | 614 | ||
615 | n = template[i].tap[k]; | ||
616 | if (k == template[i].np - 1 && enc) | ||
617 | n += authsize; | ||
618 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
619 | q[n] = 0; | ||
620 | |||
621 | sg_set_buf(&sg[k], q, template[i].tap[k]); | 615 | sg_set_buf(&sg[k], q, template[i].tap[k]); |
622 | 616 | ||
623 | if (diff_dst) { | 617 | if (diff_dst) { |
@@ -625,13 +619,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
625 | offset_in_page(IDX[k]); | 619 | offset_in_page(IDX[k]); |
626 | 620 | ||
627 | memset(q, 0, template[i].tap[k]); | 621 | memset(q, 0, template[i].tap[k]); |
628 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
629 | q[n] = 0; | ||
630 | 622 | ||
631 | sg_set_buf(&sgout[k], q, | 623 | sg_set_buf(&sgout[k], q, |
632 | template[i].tap[k]); | 624 | template[i].tap[k]); |
633 | } | 625 | } |
634 | 626 | ||
627 | n = template[i].tap[k]; | ||
628 | if (k == template[i].np - 1 && enc) | ||
629 | n += authsize; | ||
630 | if (offset_in_page(q) + n < PAGE_SIZE) | ||
631 | q[n] = 0; | ||
632 | |||
635 | temp += template[i].tap[k]; | 633 | temp += template[i].tap[k]; |
636 | } | 634 | } |
637 | 635 | ||
@@ -650,10 +648,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc, | |||
650 | goto out; | 648 | goto out; |
651 | } | 649 | } |
652 | 650 | ||
653 | sg[k - 1].length += authsize; | ||
654 | |||
655 | if (diff_dst) | 651 | if (diff_dst) |
656 | sgout[k - 1].length += authsize; | 652 | sgout[k - 1].length += authsize; |
653 | else | ||
654 | sg[k - 1].length += authsize; | ||
657 | } | 655 | } |
658 | 656 | ||
659 | sg_init_table(asg, template[i].anp); | 657 | sg_init_table(asg, template[i].anp); |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index bdb953e15d2a..5c07a56962db 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -87,6 +87,7 @@ config ARM_ARCH_TIMER | |||
87 | config ARM_ARCH_TIMER_EVTSTREAM | 87 | config ARM_ARCH_TIMER_EVTSTREAM |
88 | bool "Support for ARM architected timer event stream generation" | 88 | bool "Support for ARM architected timer event stream generation" |
89 | default y if ARM_ARCH_TIMER | 89 | default y if ARM_ARCH_TIMER |
90 | depends on ARM_ARCH_TIMER | ||
90 | help | 91 | help |
91 | This option enables support for event stream generation based on | 92 | This option enables support for event stream generation based on |
92 | the ARM architected timer. It is used for waking up CPUs executing | 93 | the ARM architected timer. It is used for waking up CPUs executing |
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 4aac9ee0d0c0..3cf12834681e 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -313,8 +313,20 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) | |||
313 | goto err1; | 313 | goto err1; |
314 | } | 314 | } |
315 | 315 | ||
316 | return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), | 316 | ret = clk_prepare(p->clk); |
317 | cfg->clockevent_rating); | 317 | if (ret < 0) |
318 | goto err2; | ||
319 | |||
320 | ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), | ||
321 | cfg->clockevent_rating); | ||
322 | if (ret < 0) | ||
323 | goto err3; | ||
324 | |||
325 | return 0; | ||
326 | err3: | ||
327 | clk_unprepare(p->clk); | ||
328 | err2: | ||
329 | clk_put(p->clk); | ||
318 | err1: | 330 | err1: |
319 | iounmap(p->mapbase); | 331 | iounmap(p->mapbase); |
320 | err0: | 332 | err0: |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 78b8dae49628..63557cda0a7d 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
@@ -472,12 +472,26 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) | |||
472 | ret = PTR_ERR(p->clk); | 472 | ret = PTR_ERR(p->clk); |
473 | goto err1; | 473 | goto err1; |
474 | } | 474 | } |
475 | |||
476 | ret = clk_prepare(p->clk); | ||
477 | if (ret < 0) | ||
478 | goto err2; | ||
479 | |||
475 | p->cs_enabled = false; | 480 | p->cs_enabled = false; |
476 | p->enable_count = 0; | 481 | p->enable_count = 0; |
477 | 482 | ||
478 | return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), | 483 | ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), |
479 | cfg->clockevent_rating, | 484 | cfg->clockevent_rating, |
480 | cfg->clocksource_rating); | 485 | cfg->clocksource_rating); |
486 | if (ret < 0) | ||
487 | goto err3; | ||
488 | |||
489 | return 0; | ||
490 | |||
491 | err3: | ||
492 | clk_unprepare(p->clk); | ||
493 | err2: | ||
494 | clk_put(p->clk); | ||
481 | err1: | 495 | err1: |
482 | iounmap(p->mapbase); | 496 | iounmap(p->mapbase); |
483 | err0: | 497 | err0: |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 4f44b71b9e24..4cf5dec826e1 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -818,7 +818,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
818 | ivsize, 1); | 818 | ivsize, 1); |
819 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", | 819 | print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", |
820 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), | 820 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), |
821 | req->cryptlen, 1); | 821 | req->cryptlen - ctx->authsize, 1); |
822 | #endif | 822 | #endif |
823 | 823 | ||
824 | if (err) { | 824 | if (err) { |
@@ -972,12 +972,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
972 | (edesc->src_nents ? : 1); | 972 | (edesc->src_nents ? : 1); |
973 | in_options = LDST_SGF; | 973 | in_options = LDST_SGF; |
974 | } | 974 | } |
975 | if (encrypt) | 975 | |
976 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 976 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
977 | req->cryptlen - authsize, in_options); | 977 | in_options); |
978 | else | ||
979 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | ||
980 | req->cryptlen, in_options); | ||
981 | 978 | ||
982 | if (likely(req->src == req->dst)) { | 979 | if (likely(req->src == req->dst)) { |
983 | if (all_contig) { | 980 | if (all_contig) { |
@@ -998,7 +995,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, | |||
998 | } | 995 | } |
999 | } | 996 | } |
1000 | if (encrypt) | 997 | if (encrypt) |
1001 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | 998 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, |
999 | out_options); | ||
1002 | else | 1000 | else |
1003 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, | 1001 | append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, |
1004 | out_options); | 1002 | out_options); |
@@ -1048,8 +1046,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1048 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; | 1046 | sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; |
1049 | in_options = LDST_SGF; | 1047 | in_options = LDST_SGF; |
1050 | } | 1048 | } |
1051 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + | 1049 | append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, |
1052 | req->cryptlen - authsize, in_options); | 1050 | in_options); |
1053 | 1051 | ||
1054 | if (contig & GIV_DST_CONTIG) { | 1052 | if (contig & GIV_DST_CONTIG) { |
1055 | dst_dma = edesc->iv_dma; | 1053 | dst_dma = edesc->iv_dma; |
@@ -1066,7 +1064,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, | |||
1066 | } | 1064 | } |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); | 1067 | append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, |
1068 | out_options); | ||
1070 | } | 1069 | } |
1071 | 1070 | ||
1072 | /* | 1071 | /* |
@@ -1130,7 +1129,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, | |||
1130 | * allocate and map the aead extended descriptor | 1129 | * allocate and map the aead extended descriptor |
1131 | */ | 1130 | */ |
1132 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | 1131 | static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, |
1133 | int desc_bytes, bool *all_contig_ptr) | 1132 | int desc_bytes, bool *all_contig_ptr, |
1133 | bool encrypt) | ||
1134 | { | 1134 | { |
1135 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1135 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1136 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1136 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
@@ -1145,12 +1145,22 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1145 | bool assoc_chained = false, src_chained = false, dst_chained = false; | 1145 | bool assoc_chained = false, src_chained = false, dst_chained = false; |
1146 | int ivsize = crypto_aead_ivsize(aead); | 1146 | int ivsize = crypto_aead_ivsize(aead); |
1147 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 1147 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
1148 | unsigned int authsize = ctx->authsize; | ||
1148 | 1149 | ||
1149 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); | 1150 | assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); |
1150 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | ||
1151 | 1151 | ||
1152 | if (unlikely(req->dst != req->src)) | 1152 | if (unlikely(req->dst != req->src)) { |
1153 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1153 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1154 | dst_nents = sg_count(req->dst, | ||
1155 | req->cryptlen + | ||
1156 | (encrypt ? authsize : (-authsize)), | ||
1157 | &dst_chained); | ||
1158 | } else { | ||
1159 | src_nents = sg_count(req->src, | ||
1160 | req->cryptlen + | ||
1161 | (encrypt ? authsize : 0), | ||
1162 | &src_chained); | ||
1163 | } | ||
1154 | 1164 | ||
1155 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1165 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1156 | DMA_TO_DEVICE, assoc_chained); | 1166 | DMA_TO_DEVICE, assoc_chained); |
@@ -1234,11 +1244,9 @@ static int aead_encrypt(struct aead_request *req) | |||
1234 | u32 *desc; | 1244 | u32 *desc; |
1235 | int ret = 0; | 1245 | int ret = 0; |
1236 | 1246 | ||
1237 | req->cryptlen += ctx->authsize; | ||
1238 | |||
1239 | /* allocate extended descriptor */ | 1247 | /* allocate extended descriptor */ |
1240 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1248 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1241 | CAAM_CMD_SZ, &all_contig); | 1249 | CAAM_CMD_SZ, &all_contig, true); |
1242 | if (IS_ERR(edesc)) | 1250 | if (IS_ERR(edesc)) |
1243 | return PTR_ERR(edesc); | 1251 | return PTR_ERR(edesc); |
1244 | 1252 | ||
@@ -1275,7 +1283,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1275 | 1283 | ||
1276 | /* allocate extended descriptor */ | 1284 | /* allocate extended descriptor */ |
1277 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * | 1285 | edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * |
1278 | CAAM_CMD_SZ, &all_contig); | 1286 | CAAM_CMD_SZ, &all_contig, false); |
1279 | if (IS_ERR(edesc)) | 1287 | if (IS_ERR(edesc)) |
1280 | return PTR_ERR(edesc); | 1288 | return PTR_ERR(edesc); |
1281 | 1289 | ||
@@ -1332,7 +1340,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request | |||
1332 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); | 1340 | src_nents = sg_count(req->src, req->cryptlen, &src_chained); |
1333 | 1341 | ||
1334 | if (unlikely(req->dst != req->src)) | 1342 | if (unlikely(req->dst != req->src)) |
1335 | dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); | 1343 | dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, |
1344 | &dst_chained); | ||
1336 | 1345 | ||
1337 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, | 1346 | sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, |
1338 | DMA_TO_DEVICE, assoc_chained); | 1347 | DMA_TO_DEVICE, assoc_chained); |
@@ -1426,8 +1435,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) | |||
1426 | u32 *desc; | 1435 | u32 *desc; |
1427 | int ret = 0; | 1436 | int ret = 0; |
1428 | 1437 | ||
1429 | req->cryptlen += ctx->authsize; | ||
1430 | |||
1431 | /* allocate extended descriptor */ | 1438 | /* allocate extended descriptor */ |
1432 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * | 1439 | edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * |
1433 | CAAM_CMD_SZ, &contig); | 1440 | CAAM_CMD_SZ, &contig); |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d23356d20e1c..1d80bd3636c5 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/of_irq.h> | 8 | #include <linux/of_irq.h> |
9 | #include <linux/of_address.h> | ||
9 | 10 | ||
10 | #include "compat.h" | 11 | #include "compat.h" |
11 | #include "regs.h" | 12 | #include "regs.h" |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 905de4427e7c..b44f4ddc565c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -790,7 +790,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
790 | 790 | ||
791 | if (edesc->assoc_chained) | 791 | if (edesc->assoc_chained) |
792 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); | 792 | talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); |
793 | else | 793 | else if (areq->assoclen) |
794 | /* assoc_nents counts also for IV in non-contiguous cases */ | 794 | /* assoc_nents counts also for IV in non-contiguous cases */ |
795 | dma_unmap_sg(dev, areq->assoc, | 795 | dma_unmap_sg(dev, areq->assoc, |
796 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, | 796 | edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, |
@@ -973,7 +973,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
973 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 973 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
974 | edesc->dma_len, DMA_BIDIRECTIONAL); | 974 | edesc->dma_len, DMA_BIDIRECTIONAL); |
975 | } else { | 975 | } else { |
976 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc)); | 976 | if (areq->assoclen) |
977 | to_talitos_ptr(&desc->ptr[1], | ||
978 | sg_dma_address(areq->assoc)); | ||
979 | else | ||
980 | to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); | ||
977 | desc->ptr[1].j_extent = 0; | 981 | desc->ptr[1].j_extent = 0; |
978 | } | 982 | } |
979 | 983 | ||
@@ -1108,7 +1112,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1108 | unsigned int authsize, | 1112 | unsigned int authsize, |
1109 | unsigned int ivsize, | 1113 | unsigned int ivsize, |
1110 | int icv_stashing, | 1114 | int icv_stashing, |
1111 | u32 cryptoflags) | 1115 | u32 cryptoflags, |
1116 | bool encrypt) | ||
1112 | { | 1117 | { |
1113 | struct talitos_edesc *edesc; | 1118 | struct talitos_edesc *edesc; |
1114 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; | 1119 | int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; |
@@ -1122,10 +1127,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1122 | return ERR_PTR(-EINVAL); | 1127 | return ERR_PTR(-EINVAL); |
1123 | } | 1128 | } |
1124 | 1129 | ||
1125 | if (iv) | 1130 | if (ivsize) |
1126 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1131 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
1127 | 1132 | ||
1128 | if (assoc) { | 1133 | if (assoclen) { |
1129 | /* | 1134 | /* |
1130 | * Currently it is assumed that iv is provided whenever assoc | 1135 | * Currently it is assumed that iv is provided whenever assoc |
1131 | * is. | 1136 | * is. |
@@ -1141,19 +1146,17 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1141 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; | 1146 | assoc_nents = assoc_nents ? assoc_nents + 1 : 2; |
1142 | } | 1147 | } |
1143 | 1148 | ||
1144 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); | 1149 | if (!dst || dst == src) { |
1145 | src_nents = (src_nents == 1) ? 0 : src_nents; | 1150 | src_nents = sg_count(src, cryptlen + authsize, &src_chained); |
1146 | 1151 | src_nents = (src_nents == 1) ? 0 : src_nents; | |
1147 | if (!dst) { | 1152 | dst_nents = dst ? src_nents : 0; |
1148 | dst_nents = 0; | 1153 | } else { /* dst && dst != src*/ |
1149 | } else { | 1154 | src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), |
1150 | if (dst == src) { | 1155 | &src_chained); |
1151 | dst_nents = src_nents; | 1156 | src_nents = (src_nents == 1) ? 0 : src_nents; |
1152 | } else { | 1157 | dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), |
1153 | dst_nents = sg_count(dst, cryptlen + authsize, | 1158 | &dst_chained); |
1154 | &dst_chained); | 1159 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1155 | dst_nents = (dst_nents == 1) ? 0 : dst_nents; | ||
1156 | } | ||
1157 | } | 1160 | } |
1158 | 1161 | ||
1159 | /* | 1162 | /* |
@@ -1173,9 +1176,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1173 | 1176 | ||
1174 | edesc = kmalloc(alloc_len, GFP_DMA | flags); | 1177 | edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1175 | if (!edesc) { | 1178 | if (!edesc) { |
1176 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | 1179 | if (assoc_chained) |
1180 | talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); | ||
1181 | else if (assoclen) | ||
1182 | dma_unmap_sg(dev, assoc, | ||
1183 | assoc_nents ? assoc_nents - 1 : 1, | ||
1184 | DMA_TO_DEVICE); | ||
1185 | |||
1177 | if (iv_dma) | 1186 | if (iv_dma) |
1178 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 1187 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
1188 | |||
1179 | dev_err(dev, "could not allocate edescriptor\n"); | 1189 | dev_err(dev, "could not allocate edescriptor\n"); |
1180 | return ERR_PTR(-ENOMEM); | 1190 | return ERR_PTR(-ENOMEM); |
1181 | } | 1191 | } |
@@ -1197,7 +1207,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1197 | } | 1207 | } |
1198 | 1208 | ||
1199 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | 1209 | static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
1200 | int icv_stashing) | 1210 | int icv_stashing, bool encrypt) |
1201 | { | 1211 | { |
1202 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1212 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1203 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1213 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
@@ -1206,7 +1216,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | |||
1206 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, | 1216 | return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, |
1207 | iv, areq->assoclen, areq->cryptlen, | 1217 | iv, areq->assoclen, areq->cryptlen, |
1208 | ctx->authsize, ivsize, icv_stashing, | 1218 | ctx->authsize, ivsize, icv_stashing, |
1209 | areq->base.flags); | 1219 | areq->base.flags, encrypt); |
1210 | } | 1220 | } |
1211 | 1221 | ||
1212 | static int aead_encrypt(struct aead_request *req) | 1222 | static int aead_encrypt(struct aead_request *req) |
@@ -1216,7 +1226,7 @@ static int aead_encrypt(struct aead_request *req) | |||
1216 | struct talitos_edesc *edesc; | 1226 | struct talitos_edesc *edesc; |
1217 | 1227 | ||
1218 | /* allocate extended descriptor */ | 1228 | /* allocate extended descriptor */ |
1219 | edesc = aead_edesc_alloc(req, req->iv, 0); | 1229 | edesc = aead_edesc_alloc(req, req->iv, 0, true); |
1220 | if (IS_ERR(edesc)) | 1230 | if (IS_ERR(edesc)) |
1221 | return PTR_ERR(edesc); | 1231 | return PTR_ERR(edesc); |
1222 | 1232 | ||
@@ -1239,7 +1249,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1239 | req->cryptlen -= authsize; | 1249 | req->cryptlen -= authsize; |
1240 | 1250 | ||
1241 | /* allocate extended descriptor */ | 1251 | /* allocate extended descriptor */ |
1242 | edesc = aead_edesc_alloc(req, req->iv, 1); | 1252 | edesc = aead_edesc_alloc(req, req->iv, 1, false); |
1243 | if (IS_ERR(edesc)) | 1253 | if (IS_ERR(edesc)) |
1244 | return PTR_ERR(edesc); | 1254 | return PTR_ERR(edesc); |
1245 | 1255 | ||
@@ -1285,7 +1295,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *req) | |||
1285 | struct talitos_edesc *edesc; | 1295 | struct talitos_edesc *edesc; |
1286 | 1296 | ||
1287 | /* allocate extended descriptor */ | 1297 | /* allocate extended descriptor */ |
1288 | edesc = aead_edesc_alloc(areq, req->giv, 0); | 1298 | edesc = aead_edesc_alloc(areq, req->giv, 0, true); |
1289 | if (IS_ERR(edesc)) | 1299 | if (IS_ERR(edesc)) |
1290 | return PTR_ERR(edesc); | 1300 | return PTR_ERR(edesc); |
1291 | 1301 | ||
@@ -1441,7 +1451,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, | |||
1441 | } | 1451 | } |
1442 | 1452 | ||
1443 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | 1453 | static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * |
1444 | areq) | 1454 | areq, bool encrypt) |
1445 | { | 1455 | { |
1446 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1456 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1447 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1457 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
@@ -1449,7 +1459,7 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * | |||
1449 | 1459 | ||
1450 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, | 1460 | return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, |
1451 | areq->info, 0, areq->nbytes, 0, ivsize, 0, | 1461 | areq->info, 0, areq->nbytes, 0, ivsize, 0, |
1452 | areq->base.flags); | 1462 | areq->base.flags, encrypt); |
1453 | } | 1463 | } |
1454 | 1464 | ||
1455 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) | 1465 | static int ablkcipher_encrypt(struct ablkcipher_request *areq) |
@@ -1459,7 +1469,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1459 | struct talitos_edesc *edesc; | 1469 | struct talitos_edesc *edesc; |
1460 | 1470 | ||
1461 | /* allocate extended descriptor */ | 1471 | /* allocate extended descriptor */ |
1462 | edesc = ablkcipher_edesc_alloc(areq); | 1472 | edesc = ablkcipher_edesc_alloc(areq, true); |
1463 | if (IS_ERR(edesc)) | 1473 | if (IS_ERR(edesc)) |
1464 | return PTR_ERR(edesc); | 1474 | return PTR_ERR(edesc); |
1465 | 1475 | ||
@@ -1476,7 +1486,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1476 | struct talitos_edesc *edesc; | 1486 | struct talitos_edesc *edesc; |
1477 | 1487 | ||
1478 | /* allocate extended descriptor */ | 1488 | /* allocate extended descriptor */ |
1479 | edesc = ablkcipher_edesc_alloc(areq); | 1489 | edesc = ablkcipher_edesc_alloc(areq, false); |
1480 | if (IS_ERR(edesc)) | 1490 | if (IS_ERR(edesc)) |
1481 | return PTR_ERR(edesc); | 1491 | return PTR_ERR(edesc); |
1482 | 1492 | ||
@@ -1628,7 +1638,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, | |||
1628 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | 1638 | struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); |
1629 | 1639 | ||
1630 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, | 1640 | return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, |
1631 | nbytes, 0, 0, 0, areq->base.flags); | 1641 | nbytes, 0, 0, 0, areq->base.flags, false); |
1632 | } | 1642 | } |
1633 | 1643 | ||
1634 | static int ahash_init(struct ahash_request *areq) | 1644 | static int ahash_init(struct ahash_request *areq) |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 9031171c141b..341c6016812d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -957,12 +957,13 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
957 | if (WARN_ON(!gic->domain)) | 957 | if (WARN_ON(!gic->domain)) |
958 | return; | 958 | return; |
959 | 959 | ||
960 | if (gic_nr == 0) { | ||
960 | #ifdef CONFIG_SMP | 961 | #ifdef CONFIG_SMP |
961 | set_smp_cross_call(gic_raise_softirq); | 962 | set_smp_cross_call(gic_raise_softirq); |
962 | register_cpu_notifier(&gic_cpu_notifier); | 963 | register_cpu_notifier(&gic_cpu_notifier); |
963 | #endif | 964 | #endif |
964 | 965 | set_handle_irq(gic_handle_irq); | |
965 | set_handle_irq(gic_handle_irq); | 966 | } |
966 | 967 | ||
967 | gic_chip.flags |= gic_arch_extn.flags; | 968 | gic_chip.flags |= gic_arch_extn.flags; |
968 | gic_dist_init(gic); | 969 | gic_dist_init(gic); |
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 2848171b8576..b31d8e99c419 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c | |||
@@ -82,22 +82,12 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds) | |||
82 | (sizeof(struct led_pwm_data) * num_leds); | 82 | (sizeof(struct led_pwm_data) * num_leds); |
83 | } | 83 | } |
84 | 84 | ||
85 | static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) | 85 | static int led_pwm_create_of(struct platform_device *pdev, |
86 | struct led_pwm_priv *priv) | ||
86 | { | 87 | { |
87 | struct device_node *node = pdev->dev.of_node; | 88 | struct device_node *node = pdev->dev.of_node; |
88 | struct device_node *child; | 89 | struct device_node *child; |
89 | struct led_pwm_priv *priv; | 90 | int ret; |
90 | int count, ret; | ||
91 | |||
92 | /* count LEDs in this device, so we know how much to allocate */ | ||
93 | count = of_get_child_count(node); | ||
94 | if (!count) | ||
95 | return NULL; | ||
96 | |||
97 | priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count), | ||
98 | GFP_KERNEL); | ||
99 | if (!priv) | ||
100 | return NULL; | ||
101 | 91 | ||
102 | for_each_child_of_node(node, child) { | 92 | for_each_child_of_node(node, child) { |
103 | struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; | 93 | struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; |
@@ -109,6 +99,7 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) | |||
109 | if (IS_ERR(led_dat->pwm)) { | 99 | if (IS_ERR(led_dat->pwm)) { |
110 | dev_err(&pdev->dev, "unable to request PWM for %s\n", | 100 | dev_err(&pdev->dev, "unable to request PWM for %s\n", |
111 | led_dat->cdev.name); | 101 | led_dat->cdev.name); |
102 | ret = PTR_ERR(led_dat->pwm); | ||
112 | goto err; | 103 | goto err; |
113 | } | 104 | } |
114 | /* Get the period from PWM core when n*/ | 105 | /* Get the period from PWM core when n*/ |
@@ -137,28 +128,36 @@ static struct led_pwm_priv *led_pwm_create_of(struct platform_device *pdev) | |||
137 | priv->num_leds++; | 128 | priv->num_leds++; |
138 | } | 129 | } |
139 | 130 | ||
140 | return priv; | 131 | return 0; |
141 | err: | 132 | err: |
142 | while (priv->num_leds--) | 133 | while (priv->num_leds--) |
143 | led_classdev_unregister(&priv->leds[priv->num_leds].cdev); | 134 | led_classdev_unregister(&priv->leds[priv->num_leds].cdev); |
144 | 135 | ||
145 | return NULL; | 136 | return ret; |
146 | } | 137 | } |
147 | 138 | ||
148 | static int led_pwm_probe(struct platform_device *pdev) | 139 | static int led_pwm_probe(struct platform_device *pdev) |
149 | { | 140 | { |
150 | struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev); | 141 | struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev); |
151 | struct led_pwm_priv *priv; | 142 | struct led_pwm_priv *priv; |
152 | int i, ret = 0; | 143 | int count, i; |
144 | int ret = 0; | ||
145 | |||
146 | if (pdata) | ||
147 | count = pdata->num_leds; | ||
148 | else | ||
149 | count = of_get_child_count(pdev->dev.of_node); | ||
150 | |||
151 | if (!count) | ||
152 | return -EINVAL; | ||
153 | 153 | ||
154 | if (pdata && pdata->num_leds) { | 154 | priv = devm_kzalloc(&pdev->dev, sizeof_pwm_leds_priv(count), |
155 | priv = devm_kzalloc(&pdev->dev, | 155 | GFP_KERNEL); |
156 | sizeof_pwm_leds_priv(pdata->num_leds), | 156 | if (!priv) |
157 | GFP_KERNEL); | 157 | return -ENOMEM; |
158 | if (!priv) | ||
159 | return -ENOMEM; | ||
160 | 158 | ||
161 | for (i = 0; i < pdata->num_leds; i++) { | 159 | if (pdata) { |
160 | for (i = 0; i < count; i++) { | ||
162 | struct led_pwm *cur_led = &pdata->leds[i]; | 161 | struct led_pwm *cur_led = &pdata->leds[i]; |
163 | struct led_pwm_data *led_dat = &priv->leds[i]; | 162 | struct led_pwm_data *led_dat = &priv->leds[i]; |
164 | 163 | ||
@@ -188,11 +187,11 @@ static int led_pwm_probe(struct platform_device *pdev) | |||
188 | if (ret < 0) | 187 | if (ret < 0) |
189 | goto err; | 188 | goto err; |
190 | } | 189 | } |
191 | priv->num_leds = pdata->num_leds; | 190 | priv->num_leds = count; |
192 | } else { | 191 | } else { |
193 | priv = led_pwm_create_of(pdev); | 192 | ret = led_pwm_create_of(pdev, priv); |
194 | if (!priv) | 193 | if (ret) |
195 | return -ENODEV; | 194 | return ret; |
196 | } | 195 | } |
197 | 196 | ||
198 | platform_set_drvdata(pdev, priv); | 197 | platform_set_drvdata(pdev, priv); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4dd5ee2a34cc..36eab0c4fb33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params) | |||
4110 | if (!miimon) { | 4110 | if (!miimon) { |
4111 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); | 4111 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); |
4112 | pr_warning("Forcing miimon to 100msec\n"); | 4112 | pr_warning("Forcing miimon to 100msec\n"); |
4113 | miimon = 100; | 4113 | miimon = BOND_DEFAULT_MIIMON; |
4114 | } | 4114 | } |
4115 | } | 4115 | } |
4116 | 4116 | ||
@@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params) | |||
4147 | if (!miimon) { | 4147 | if (!miimon) { |
4148 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); | 4148 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); |
4149 | pr_warning("Forcing miimon to 100msec\n"); | 4149 | pr_warning("Forcing miimon to 100msec\n"); |
4150 | miimon = 100; | 4150 | miimon = BOND_DEFAULT_MIIMON; |
4151 | } | 4151 | } |
4152 | } | 4152 | } |
4153 | 4153 | ||
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 9a5223c7b4d1..ea6f640782b7 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode) | |||
45 | return -EPERM; | 45 | return -EPERM; |
46 | } | 46 | } |
47 | 47 | ||
48 | if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { | 48 | if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) { |
49 | pr_err("%s: %s mode is incompatible with arp monitoring.\n", | 49 | pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", |
50 | bond->dev->name, bond_mode_tbl[mode].modename); | 50 | bond->dev->name, bond_mode_tbl[mode].modename); |
51 | return -EINVAL; | 51 | /* disable arp monitoring */ |
52 | bond->params.arp_interval = 0; | ||
53 | /* set miimon to default value */ | ||
54 | bond->params.miimon = BOND_DEFAULT_MIIMON; | ||
55 | pr_info("%s: Setting MII monitoring interval to %d.\n", | ||
56 | bond->dev->name, bond->params.miimon); | ||
52 | } | 57 | } |
53 | 58 | ||
54 | /* don't cache arp_validate between modes */ | 59 | /* don't cache arp_validate between modes */ |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0ec2a7e8c8a9..abf5e106edc5 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -523,9 +523,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
523 | ret = -EINVAL; | 523 | ret = -EINVAL; |
524 | goto out; | 524 | goto out; |
525 | } | 525 | } |
526 | if (bond->params.mode == BOND_MODE_ALB || | 526 | if (BOND_NO_USES_ARP(bond->params.mode)) { |
527 | bond->params.mode == BOND_MODE_TLB || | ||
528 | bond->params.mode == BOND_MODE_8023AD) { | ||
529 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", | 527 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", |
530 | bond->dev->name, bond->dev->name); | 528 | bond->dev->name, bond->dev->name); |
531 | ret = -EINVAL; | 529 | ret = -EINVAL; |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index ca31286aa028..a9f4f9f4d8ce 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -35,6 +35,8 @@ | |||
35 | 35 | ||
36 | #define BOND_MAX_ARP_TARGETS 16 | 36 | #define BOND_MAX_ARP_TARGETS 16 |
37 | 37 | ||
38 | #define BOND_DEFAULT_MIIMON 100 | ||
39 | |||
38 | #define IS_UP(dev) \ | 40 | #define IS_UP(dev) \ |
39 | ((((dev)->flags & IFF_UP) == IFF_UP) && \ | 41 | ((((dev)->flags & IFF_UP) == IFF_UP) && \ |
40 | netif_running(dev) && \ | 42 | netif_running(dev) && \ |
@@ -55,6 +57,11 @@ | |||
55 | ((mode) == BOND_MODE_TLB) || \ | 57 | ((mode) == BOND_MODE_TLB) || \ |
56 | ((mode) == BOND_MODE_ALB)) | 58 | ((mode) == BOND_MODE_ALB)) |
57 | 59 | ||
60 | #define BOND_NO_USES_ARP(mode) \ | ||
61 | (((mode) == BOND_MODE_8023AD) || \ | ||
62 | ((mode) == BOND_MODE_TLB) || \ | ||
63 | ((mode) == BOND_MODE_ALB)) | ||
64 | |||
58 | #define TX_QUEUE_OVERRIDE(mode) \ | 65 | #define TX_QUEUE_OVERRIDE(mode) \ |
59 | (((mode) == BOND_MODE_ACTIVEBACKUP) || \ | 66 | (((mode) == BOND_MODE_ACTIVEBACKUP) || \ |
60 | ((mode) == BOND_MODE_ROUNDROBIN)) | 67 | ((mode) == BOND_MODE_ROUNDROBIN)) |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3fc07cf2f62..77061eebb034 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode) | |||
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
714 | 714 | ||
715 | static int c_can_get_berr_counter(const struct net_device *dev, | 715 | static int __c_can_get_berr_counter(const struct net_device *dev, |
716 | struct can_berr_counter *bec) | 716 | struct can_berr_counter *bec) |
717 | { | 717 | { |
718 | unsigned int reg_err_counter; | 718 | unsigned int reg_err_counter; |
719 | struct c_can_priv *priv = netdev_priv(dev); | 719 | struct c_can_priv *priv = netdev_priv(dev); |
720 | 720 | ||
721 | c_can_pm_runtime_get_sync(priv); | ||
722 | |||
723 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); | 721 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); |
724 | bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> | 722 | bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> |
725 | ERR_CNT_REC_SHIFT; | 723 | ERR_CNT_REC_SHIFT; |
726 | bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; | 724 | bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; |
727 | 725 | ||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | static int c_can_get_berr_counter(const struct net_device *dev, | ||
730 | struct can_berr_counter *bec) | ||
731 | { | ||
732 | struct c_can_priv *priv = netdev_priv(dev); | ||
733 | int err; | ||
734 | |||
735 | c_can_pm_runtime_get_sync(priv); | ||
736 | err = __c_can_get_berr_counter(dev, bec); | ||
728 | c_can_pm_runtime_put_sync(priv); | 737 | c_can_pm_runtime_put_sync(priv); |
729 | 738 | ||
730 | return 0; | 739 | return err; |
731 | } | 740 | } |
732 | 741 | ||
733 | /* | 742 | /* |
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
754 | if (!(val & (1 << (msg_obj_no - 1)))) { | 763 | if (!(val & (1 << (msg_obj_no - 1)))) { |
755 | can_get_echo_skb(dev, | 764 | can_get_echo_skb(dev, |
756 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); | 765 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); |
766 | c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL); | ||
757 | stats->tx_bytes += priv->read_reg(priv, | 767 | stats->tx_bytes += priv->read_reg(priv, |
758 | C_CAN_IFACE(MSGCTRL_REG, 0)) | 768 | C_CAN_IFACE(MSGCTRL_REG, 0)) |
759 | & IF_MCONT_DLC_MASK; | 769 | & IF_MCONT_DLC_MASK; |
@@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev, | |||
872 | if (unlikely(!skb)) | 882 | if (unlikely(!skb)) |
873 | return 0; | 883 | return 0; |
874 | 884 | ||
875 | c_can_get_berr_counter(dev, &bec); | 885 | __c_can_get_berr_counter(dev, &bec); |
876 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); | 886 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); |
877 | rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> | 887 | rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> |
878 | ERR_CNT_RP_SHIFT; | 888 | ERR_CNT_RP_SHIFT; |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index ae08cf129ebb..aaed97bee471 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1020 | dev_err(&pdev->dev, "no ipg clock defined\n"); | 1020 | dev_err(&pdev->dev, "no ipg clock defined\n"); |
1021 | return PTR_ERR(clk_ipg); | 1021 | return PTR_ERR(clk_ipg); |
1022 | } | 1022 | } |
1023 | clock_freq = clk_get_rate(clk_ipg); | ||
1024 | 1023 | ||
1025 | clk_per = devm_clk_get(&pdev->dev, "per"); | 1024 | clk_per = devm_clk_get(&pdev->dev, "per"); |
1026 | if (IS_ERR(clk_per)) { | 1025 | if (IS_ERR(clk_per)) { |
1027 | dev_err(&pdev->dev, "no per clock defined\n"); | 1026 | dev_err(&pdev->dev, "no per clock defined\n"); |
1028 | return PTR_ERR(clk_per); | 1027 | return PTR_ERR(clk_per); |
1029 | } | 1028 | } |
1029 | clock_freq = clk_get_rate(clk_per); | ||
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1032 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7164a999f50f..f17c3018b7c7 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
494 | uint8_t isrc, status; | 494 | uint8_t isrc, status; |
495 | int n = 0; | 495 | int n = 0; |
496 | 496 | ||
497 | /* Shared interrupts and IRQ off? */ | ||
498 | if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) | ||
499 | return IRQ_NONE; | ||
500 | |||
501 | if (priv->pre_irq) | 497 | if (priv->pre_irq) |
502 | priv->pre_irq(priv); | 498 | priv->pre_irq(priv); |
503 | 499 | ||
500 | /* Shared interrupts and IRQ off? */ | ||
501 | if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) | ||
502 | goto out; | ||
503 | |||
504 | while ((isrc = priv->read_reg(priv, SJA1000_IR)) && | 504 | while ((isrc = priv->read_reg(priv, SJA1000_IR)) && |
505 | (n < SJA1000_MAX_IRQ)) { | 505 | (n < SJA1000_MAX_IRQ)) { |
506 | n++; | 506 | |
507 | status = priv->read_reg(priv, SJA1000_SR); | 507 | status = priv->read_reg(priv, SJA1000_SR); |
508 | /* check for absent controller due to hw unplug */ | 508 | /* check for absent controller due to hw unplug */ |
509 | if (status == 0xFF && sja1000_is_absent(priv)) | 509 | if (status == 0xFF && sja1000_is_absent(priv)) |
510 | return IRQ_NONE; | 510 | goto out; |
511 | 511 | ||
512 | if (isrc & IRQ_WUI) | 512 | if (isrc & IRQ_WUI) |
513 | netdev_warn(dev, "wakeup interrupt\n"); | 513 | netdev_warn(dev, "wakeup interrupt\n"); |
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
535 | status = priv->read_reg(priv, SJA1000_SR); | 535 | status = priv->read_reg(priv, SJA1000_SR); |
536 | /* check for absent controller */ | 536 | /* check for absent controller */ |
537 | if (status == 0xFF && sja1000_is_absent(priv)) | 537 | if (status == 0xFF && sja1000_is_absent(priv)) |
538 | return IRQ_NONE; | 538 | goto out; |
539 | } | 539 | } |
540 | } | 540 | } |
541 | if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { | 541 | if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { |
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
543 | if (sja1000_err(dev, isrc, status)) | 543 | if (sja1000_err(dev, isrc, status)) |
544 | break; | 544 | break; |
545 | } | 545 | } |
546 | n++; | ||
546 | } | 547 | } |
547 | 548 | out: | |
548 | if (priv->post_irq) | 549 | if (priv->post_irq) |
549 | priv->post_irq(priv); | 550 | priv->post_irq(priv); |
550 | 551 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a9e068423ba0..369b736dde05 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -10629,10 +10629,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) | |||
10629 | static ssize_t tg3_show_temp(struct device *dev, | 10629 | static ssize_t tg3_show_temp(struct device *dev, |
10630 | struct device_attribute *devattr, char *buf) | 10630 | struct device_attribute *devattr, char *buf) |
10631 | { | 10631 | { |
10632 | struct pci_dev *pdev = to_pci_dev(dev); | ||
10633 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
10634 | struct tg3 *tp = netdev_priv(netdev); | ||
10635 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 10632 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
10633 | struct tg3 *tp = dev_get_drvdata(dev); | ||
10636 | u32 temperature; | 10634 | u32 temperature; |
10637 | 10635 | ||
10638 | spin_lock_bh(&tp->lock); | 10636 | spin_lock_bh(&tp->lock); |
@@ -10650,29 +10648,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, | |||
10650 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, | 10648 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, |
10651 | TG3_TEMP_MAX_OFFSET); | 10649 | TG3_TEMP_MAX_OFFSET); |
10652 | 10650 | ||
10653 | static struct attribute *tg3_attributes[] = { | 10651 | static struct attribute *tg3_attrs[] = { |
10654 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 10652 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
10655 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | 10653 | &sensor_dev_attr_temp1_crit.dev_attr.attr, |
10656 | &sensor_dev_attr_temp1_max.dev_attr.attr, | 10654 | &sensor_dev_attr_temp1_max.dev_attr.attr, |
10657 | NULL | 10655 | NULL |
10658 | }; | 10656 | }; |
10659 | 10657 | ATTRIBUTE_GROUPS(tg3); | |
10660 | static const struct attribute_group tg3_group = { | ||
10661 | .attrs = tg3_attributes, | ||
10662 | }; | ||
10663 | 10658 | ||
10664 | static void tg3_hwmon_close(struct tg3 *tp) | 10659 | static void tg3_hwmon_close(struct tg3 *tp) |
10665 | { | 10660 | { |
10666 | if (tp->hwmon_dev) { | 10661 | if (tp->hwmon_dev) { |
10667 | hwmon_device_unregister(tp->hwmon_dev); | 10662 | hwmon_device_unregister(tp->hwmon_dev); |
10668 | tp->hwmon_dev = NULL; | 10663 | tp->hwmon_dev = NULL; |
10669 | sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); | ||
10670 | } | 10664 | } |
10671 | } | 10665 | } |
10672 | 10666 | ||
10673 | static void tg3_hwmon_open(struct tg3 *tp) | 10667 | static void tg3_hwmon_open(struct tg3 *tp) |
10674 | { | 10668 | { |
10675 | int i, err; | 10669 | int i; |
10676 | u32 size = 0; | 10670 | u32 size = 0; |
10677 | struct pci_dev *pdev = tp->pdev; | 10671 | struct pci_dev *pdev = tp->pdev; |
10678 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; | 10672 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; |
@@ -10690,18 +10684,11 @@ static void tg3_hwmon_open(struct tg3 *tp) | |||
10690 | if (!size) | 10684 | if (!size) |
10691 | return; | 10685 | return; |
10692 | 10686 | ||
10693 | /* Register hwmon sysfs hooks */ | 10687 | tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", |
10694 | err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); | 10688 | tp, tg3_groups); |
10695 | if (err) { | ||
10696 | dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); | ||
10697 | return; | ||
10698 | } | ||
10699 | |||
10700 | tp->hwmon_dev = hwmon_device_register(&pdev->dev); | ||
10701 | if (IS_ERR(tp->hwmon_dev)) { | 10689 | if (IS_ERR(tp->hwmon_dev)) { |
10702 | tp->hwmon_dev = NULL; | 10690 | tp->hwmon_dev = NULL; |
10703 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); | 10691 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); |
10704 | sysfs_remove_group(&pdev->dev.kobj, &tg3_group); | ||
10705 | } | 10692 | } |
10706 | } | 10693 | } |
10707 | 10694 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f4825db5d179..5878df619b53 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -503,6 +503,7 @@ struct be_adapter { | |||
503 | }; | 503 | }; |
504 | 504 | ||
505 | #define be_physfn(adapter) (!adapter->virtfn) | 505 | #define be_physfn(adapter) (!adapter->virtfn) |
506 | #define be_virtfn(adapter) (adapter->virtfn) | ||
506 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) | 507 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) |
507 | #define sriov_want(adapter) (be_physfn(adapter) && \ | 508 | #define sriov_want(adapter) (be_physfn(adapter) && \ |
508 | (num_vfs || pci_num_vf(adapter->pdev))) | 509 | (num_vfs || pci_num_vf(adapter->pdev))) |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index dbcd5262c016..e0e8bc1ef14c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, | |||
1032 | } else { | 1032 | } else { |
1033 | req->hdr.version = 2; | 1033 | req->hdr.version = 2; |
1034 | req->page_size = 1; /* 1 for 4K */ | 1034 | req->page_size = 1; /* 1 for 4K */ |
1035 | |||
1036 | /* coalesce-wm field in this cmd is not relevant to Lancer. | ||
1037 | * Lancer uses COMMON_MODIFY_CQ to set this field | ||
1038 | */ | ||
1039 | if (!lancer_chip(adapter)) | ||
1040 | AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, | ||
1041 | ctxt, coalesce_wm); | ||
1035 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, | 1042 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, |
1036 | no_delay); | 1043 | no_delay); |
1037 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, | 1044 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index abde97471636..fee64bf10446 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2658,8 +2658,8 @@ static int be_close(struct net_device *netdev) | |||
2658 | 2658 | ||
2659 | be_roce_dev_close(adapter); | 2659 | be_roce_dev_close(adapter); |
2660 | 2660 | ||
2661 | for_all_evt_queues(adapter, eqo, i) { | 2661 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { |
2662 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { | 2662 | for_all_evt_queues(adapter, eqo, i) { |
2663 | napi_disable(&eqo->napi); | 2663 | napi_disable(&eqo->napi); |
2664 | be_disable_busy_poll(eqo); | 2664 | be_disable_busy_poll(eqo); |
2665 | } | 2665 | } |
@@ -3253,12 +3253,10 @@ static int be_mac_setup(struct be_adapter *adapter) | |||
3253 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); | 3253 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); |
3254 | } | 3254 | } |
3255 | 3255 | ||
3256 | /* On BE3 VFs this cmd may fail due to lack of privilege. | 3256 | /* For BE3-R VFs, the PF programs the initial MAC address */ |
3257 | * Ignore the failure as in this case pmac_id is fetched | 3257 | if (!(BEx_chip(adapter) && be_virtfn(adapter))) |
3258 | * in the IFACE_CREATE cmd. | 3258 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, |
3259 | */ | 3259 | &adapter->pmac_id[0], 0); |
3260 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, | ||
3261 | &adapter->pmac_id[0], 0); | ||
3262 | return 0; | 3260 | return 0; |
3263 | } | 3261 | } |
3264 | 3262 | ||
@@ -4599,6 +4597,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4599 | if (adapter->wol) | 4597 | if (adapter->wol) |
4600 | be_setup_wol(adapter, true); | 4598 | be_setup_wol(adapter, true); |
4601 | 4599 | ||
4600 | be_intr_set(adapter, false); | ||
4602 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 4601 | cancel_delayed_work_sync(&adapter->func_recovery_work); |
4603 | 4602 | ||
4604 | netif_device_detach(netdev); | 4603 | netif_device_detach(netdev); |
@@ -4634,6 +4633,7 @@ static int be_resume(struct pci_dev *pdev) | |||
4634 | if (status) | 4633 | if (status) |
4635 | return status; | 4634 | return status; |
4636 | 4635 | ||
4636 | be_intr_set(adapter, true); | ||
4637 | /* tell fw we're ready to fire cmds */ | 4637 | /* tell fw we're ready to fire cmds */ |
4638 | status = be_cmd_fw_init(adapter); | 4638 | status = be_cmd_fw_init(adapter); |
4639 | if (status) | 4639 | if (status) |
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 58c147271a36..f9313b36c887 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h | |||
@@ -83,6 +83,11 @@ struct e1000_adapter; | |||
83 | 83 | ||
84 | #define E1000_MAX_INTR 10 | 84 | #define E1000_MAX_INTR 10 |
85 | 85 | ||
86 | /* | ||
87 | * Count for polling __E1000_RESET condition every 10-20msec. | ||
88 | */ | ||
89 | #define E1000_CHECK_RESET_COUNT 50 | ||
90 | |||
86 | /* TX/RX descriptor defines */ | 91 | /* TX/RX descriptor defines */ |
87 | #define E1000_DEFAULT_TXD 256 | 92 | #define E1000_DEFAULT_TXD 256 |
88 | #define E1000_MAX_TXD 256 | 93 | #define E1000_MAX_TXD 256 |
@@ -312,8 +317,6 @@ struct e1000_adapter { | |||
312 | struct delayed_work watchdog_task; | 317 | struct delayed_work watchdog_task; |
313 | struct delayed_work fifo_stall_task; | 318 | struct delayed_work fifo_stall_task; |
314 | struct delayed_work phy_info_task; | 319 | struct delayed_work phy_info_task; |
315 | |||
316 | struct mutex mutex; | ||
317 | }; | 320 | }; |
318 | 321 | ||
319 | enum e1000_state_t { | 322 | enum e1000_state_t { |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index e38622825fa7..46e6544ed1b7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) | |||
494 | { | 494 | { |
495 | set_bit(__E1000_DOWN, &adapter->flags); | 495 | set_bit(__E1000_DOWN, &adapter->flags); |
496 | 496 | ||
497 | /* Only kill reset task if adapter is not resetting */ | ||
498 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
499 | cancel_work_sync(&adapter->reset_task); | ||
500 | |||
501 | cancel_delayed_work_sync(&adapter->watchdog_task); | 497 | cancel_delayed_work_sync(&adapter->watchdog_task); |
498 | |||
499 | /* | ||
500 | * Since the watchdog task can reschedule other tasks, we should cancel | ||
501 | * it first, otherwise we can run into the situation when a work is | ||
502 | * still running after the adapter has been turned down. | ||
503 | */ | ||
504 | |||
502 | cancel_delayed_work_sync(&adapter->phy_info_task); | 505 | cancel_delayed_work_sync(&adapter->phy_info_task); |
503 | cancel_delayed_work_sync(&adapter->fifo_stall_task); | 506 | cancel_delayed_work_sync(&adapter->fifo_stall_task); |
507 | |||
508 | /* Only kill reset task if adapter is not resetting */ | ||
509 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
510 | cancel_work_sync(&adapter->reset_task); | ||
504 | } | 511 | } |
505 | 512 | ||
506 | void e1000_down(struct e1000_adapter *adapter) | 513 | void e1000_down(struct e1000_adapter *adapter) |
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter) | |||
544 | e1000_clean_all_rx_rings(adapter); | 551 | e1000_clean_all_rx_rings(adapter); |
545 | } | 552 | } |
546 | 553 | ||
547 | static void e1000_reinit_safe(struct e1000_adapter *adapter) | ||
548 | { | ||
549 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | ||
550 | msleep(1); | ||
551 | mutex_lock(&adapter->mutex); | ||
552 | e1000_down(adapter); | ||
553 | e1000_up(adapter); | ||
554 | mutex_unlock(&adapter->mutex); | ||
555 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
556 | } | ||
557 | |||
558 | void e1000_reinit_locked(struct e1000_adapter *adapter) | 554 | void e1000_reinit_locked(struct e1000_adapter *adapter) |
559 | { | 555 | { |
560 | /* if rtnl_lock is not held the call path is bogus */ | ||
561 | ASSERT_RTNL(); | ||
562 | WARN_ON(in_interrupt()); | 556 | WARN_ON(in_interrupt()); |
563 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | 557 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
564 | msleep(1); | 558 | msleep(1); |
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) | |||
1316 | e1000_irq_disable(adapter); | 1310 | e1000_irq_disable(adapter); |
1317 | 1311 | ||
1318 | spin_lock_init(&adapter->stats_lock); | 1312 | spin_lock_init(&adapter->stats_lock); |
1319 | mutex_init(&adapter->mutex); | ||
1320 | 1313 | ||
1321 | set_bit(__E1000_DOWN, &adapter->flags); | 1314 | set_bit(__E1000_DOWN, &adapter->flags); |
1322 | 1315 | ||
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev) | |||
1440 | { | 1433 | { |
1441 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1434 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1442 | struct e1000_hw *hw = &adapter->hw; | 1435 | struct e1000_hw *hw = &adapter->hw; |
1436 | int count = E1000_CHECK_RESET_COUNT; | ||
1437 | |||
1438 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
1439 | usleep_range(10000, 20000); | ||
1443 | 1440 | ||
1444 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 1441 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1445 | e1000_down(adapter); | 1442 | e1000_down(adapter); |
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work) | |||
2325 | struct e1000_adapter *adapter = container_of(work, | 2322 | struct e1000_adapter *adapter = container_of(work, |
2326 | struct e1000_adapter, | 2323 | struct e1000_adapter, |
2327 | phy_info_task.work); | 2324 | phy_info_task.work); |
2328 | if (test_bit(__E1000_DOWN, &adapter->flags)) | 2325 | |
2329 | return; | ||
2330 | mutex_lock(&adapter->mutex); | ||
2331 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 2326 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
2332 | mutex_unlock(&adapter->mutex); | ||
2333 | } | 2327 | } |
2334 | 2328 | ||
2335 | /** | 2329 | /** |
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2345 | struct net_device *netdev = adapter->netdev; | 2339 | struct net_device *netdev = adapter->netdev; |
2346 | u32 tctl; | 2340 | u32 tctl; |
2347 | 2341 | ||
2348 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2349 | return; | ||
2350 | mutex_lock(&adapter->mutex); | ||
2351 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2342 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2352 | if ((er32(TDT) == er32(TDH)) && | 2343 | if ((er32(TDT) == er32(TDH)) && |
2353 | (er32(TDFT) == er32(TDFH)) && | 2344 | (er32(TDFT) == er32(TDFH)) && |
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2368 | schedule_delayed_work(&adapter->fifo_stall_task, 1); | 2359 | schedule_delayed_work(&adapter->fifo_stall_task, 1); |
2369 | } | 2360 | } |
2370 | } | 2361 | } |
2371 | mutex_unlock(&adapter->mutex); | ||
2372 | } | 2362 | } |
2373 | 2363 | ||
2374 | bool e1000_has_link(struct e1000_adapter *adapter) | 2364 | bool e1000_has_link(struct e1000_adapter *adapter) |
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work) | |||
2422 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2412 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2423 | u32 link, tctl; | 2413 | u32 link, tctl; |
2424 | 2414 | ||
2425 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2426 | return; | ||
2427 | |||
2428 | mutex_lock(&adapter->mutex); | ||
2429 | link = e1000_has_link(adapter); | 2415 | link = e1000_has_link(adapter); |
2430 | if ((netif_carrier_ok(netdev)) && link) | 2416 | if ((netif_carrier_ok(netdev)) && link) |
2431 | goto link_up; | 2417 | goto link_up; |
@@ -2516,7 +2502,7 @@ link_up: | |||
2516 | adapter->tx_timeout_count++; | 2502 | adapter->tx_timeout_count++; |
2517 | schedule_work(&adapter->reset_task); | 2503 | schedule_work(&adapter->reset_task); |
2518 | /* exit immediately since reset is imminent */ | 2504 | /* exit immediately since reset is imminent */ |
2519 | goto unlock; | 2505 | return; |
2520 | } | 2506 | } |
2521 | } | 2507 | } |
2522 | 2508 | ||
@@ -2544,9 +2530,6 @@ link_up: | |||
2544 | /* Reschedule the task */ | 2530 | /* Reschedule the task */ |
2545 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 2531 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2546 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); | 2532 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); |
2547 | |||
2548 | unlock: | ||
2549 | mutex_unlock(&adapter->mutex); | ||
2550 | } | 2533 | } |
2551 | 2534 | ||
2552 | enum latency_range { | 2535 | enum latency_range { |
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
3495 | struct e1000_adapter *adapter = | 3478 | struct e1000_adapter *adapter = |
3496 | container_of(work, struct e1000_adapter, reset_task); | 3479 | container_of(work, struct e1000_adapter, reset_task); |
3497 | 3480 | ||
3498 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
3499 | return; | ||
3500 | e_err(drv, "Reset adapter\n"); | 3481 | e_err(drv, "Reset adapter\n"); |
3501 | e1000_reinit_safe(adapter); | 3482 | e1000_reinit_locked(adapter); |
3502 | } | 3483 | } |
3503 | 3484 | ||
3504 | /** | 3485 | /** |
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4963 | netif_device_detach(netdev); | 4944 | netif_device_detach(netdev); |
4964 | 4945 | ||
4965 | if (netif_running(netdev)) { | 4946 | if (netif_running(netdev)) { |
4947 | int count = E1000_CHECK_RESET_COUNT; | ||
4948 | |||
4949 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
4950 | usleep_range(10000, 20000); | ||
4951 | |||
4966 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 4952 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
4967 | e1000_down(adapter); | 4953 | e1000_down(adapter); |
4968 | } | 4954 | } |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b0f3666b1d7f..c3143da497c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
2062 | { | 2062 | { |
2063 | struct igb_adapter *adapter = netdev_priv(netdev); | 2063 | struct igb_adapter *adapter = netdev_priv(netdev); |
2064 | 2064 | ||
2065 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2066 | WAKE_BCAST | WAKE_MAGIC | | ||
2067 | WAKE_PHY; | ||
2068 | wol->wolopts = 0; | 2065 | wol->wolopts = 0; |
2069 | 2066 | ||
2070 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) | 2067 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) |
2071 | return; | 2068 | return; |
2072 | 2069 | ||
2070 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2071 | WAKE_BCAST | WAKE_MAGIC | | ||
2072 | WAKE_PHY; | ||
2073 | |||
2073 | /* apply any specific unsupported masks here */ | 2074 | /* apply any specific unsupported masks here */ |
2074 | switch (adapter->hw.device_id) { | 2075 | switch (adapter->hw.device_id) { |
2075 | default: | 2076 | default: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c55079ebee3..cc06854296a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, | |||
4251 | rx_ring->l2_accel_priv = NULL; | 4251 | rx_ring->l2_accel_priv = NULL; |
4252 | } | 4252 | } |
4253 | 4253 | ||
4254 | int ixgbe_fwd_ring_down(struct net_device *vdev, | 4254 | static int ixgbe_fwd_ring_down(struct net_device *vdev, |
4255 | struct ixgbe_fwd_adapter *accel) | 4255 | struct ixgbe_fwd_adapter *accel) |
4256 | { | 4256 | { |
4257 | struct ixgbe_adapter *adapter = accel->real_adapter; | 4257 | struct ixgbe_adapter *adapter = accel->real_adapter; |
4258 | unsigned int rxbase = accel->rx_base_queue; | 4258 | unsigned int rxbase = accel->rx_base_queue; |
@@ -7986,10 +7986,9 @@ skip_sriov: | |||
7986 | NETIF_F_TSO | | 7986 | NETIF_F_TSO | |
7987 | NETIF_F_TSO6 | | 7987 | NETIF_F_TSO6 | |
7988 | NETIF_F_RXHASH | | 7988 | NETIF_F_RXHASH | |
7989 | NETIF_F_RXCSUM | | 7989 | NETIF_F_RXCSUM; |
7990 | NETIF_F_HW_L2FW_DOFFLOAD; | ||
7991 | 7990 | ||
7992 | netdev->hw_features = netdev->features; | 7991 | netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; |
7993 | 7992 | ||
7994 | switch (adapter->hw.mac.type) { | 7993 | switch (adapter->hw.mac.type) { |
7995 | case ixgbe_mac_82599EB: | 7994 | case ixgbe_mac_82599EB: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e4c676006be9..39217e5ff7dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl); | |||
46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); | 46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); |
47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); | 47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); |
48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); | 48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); |
49 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * ixgbe_identify_phy_generic - Get physical layer module | 52 | * ixgbe_identify_phy_generic - Get physical layer module |
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom: | |||
1164 | * | 1165 | * |
1165 | * Searches for and identifies the QSFP module and assigns appropriate PHY type | 1166 | * Searches for and identifies the QSFP module and assigns appropriate PHY type |
1166 | **/ | 1167 | **/ |
1167 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) | 1168 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) |
1168 | { | 1169 | { |
1169 | struct ixgbe_adapter *adapter = hw->back; | 1170 | struct ixgbe_adapter *adapter = hw->back; |
1170 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 1171 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index aae900a256da..fffcbdd2bf0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | |||
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, | |||
145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); | 145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); |
146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); | 146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); |
147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); | 147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); |
148 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
149 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, | 148 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, |
150 | u16 *list_offset, | 149 | u16 *list_offset, |
151 | u16 *data_offset); | 150 | u16 *data_offset); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 40626690e8a8..c11d063473e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
140 | { | 140 | { |
141 | struct mlx4_en_priv *priv = netdev_priv(dev); | 141 | struct mlx4_en_priv *priv = netdev_priv(dev); |
142 | struct mlx4_en_dev *mdev = priv->mdev; | 142 | struct mlx4_en_dev *mdev = priv->mdev; |
143 | struct mlx4_en_tx_ring *tx_ring; | ||
144 | int i, carrier_ok; | 143 | int i, carrier_ok; |
145 | 144 | ||
146 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); | 145 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); |
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
150 | carrier_ok = netif_carrier_ok(dev); | 149 | carrier_ok = netif_carrier_ok(dev); |
151 | 150 | ||
152 | netif_carrier_off(dev); | 151 | netif_carrier_off(dev); |
153 | retry_tx: | ||
154 | /* Wait until all tx queues are empty. | 152 | /* Wait until all tx queues are empty. |
155 | * there should not be any additional incoming traffic | 153 | * there should not be any additional incoming traffic |
156 | * since we turned the carrier off */ | 154 | * since we turned the carrier off */ |
157 | msleep(200); | 155 | msleep(200); |
158 | for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { | ||
159 | tx_ring = priv->tx_ring[i]; | ||
160 | if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) | ||
161 | goto retry_tx; | ||
162 | } | ||
163 | 156 | ||
164 | if (priv->mdev->dev->caps.flags & | 157 | if (priv->mdev->dev->caps.flags & |
165 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { | 158 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index f2a2128165dd..737c1a881f78 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp) | |||
678 | le32_to_cpu(txd->opts1) & 0xffff, | 678 | le32_to_cpu(txd->opts1) & 0xffff, |
679 | PCI_DMA_TODEVICE); | 679 | PCI_DMA_TODEVICE); |
680 | 680 | ||
681 | bytes_compl += skb->len; | ||
682 | pkts_compl++; | ||
683 | |||
684 | if (status & LastFrag) { | 681 | if (status & LastFrag) { |
685 | if (status & (TxError | TxFIFOUnder)) { | 682 | if (status & (TxError | TxFIFOUnder)) { |
686 | netif_dbg(cp, tx_err, cp->dev, | 683 | netif_dbg(cp, tx_err, cp->dev, |
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp) | |||
702 | netif_dbg(cp, tx_done, cp->dev, | 699 | netif_dbg(cp, tx_done, cp->dev, |
703 | "tx done, slot %d\n", tx_tail); | 700 | "tx done, slot %d\n", tx_tail); |
704 | } | 701 | } |
702 | bytes_compl += skb->len; | ||
703 | pkts_compl++; | ||
705 | dev_kfree_skb_irq(skb); | 704 | dev_kfree_skb_irq(skb); |
706 | } | 705 | } |
707 | 706 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 799387570766..c737f0ea5de7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) | |||
3465 | rtl_writephy(tp, 0x14, 0x9065); | 3465 | rtl_writephy(tp, 0x14, 0x9065); |
3466 | rtl_writephy(tp, 0x14, 0x1065); | 3466 | rtl_writephy(tp, 0x14, 0x1065); |
3467 | 3467 | ||
3468 | /* Check ALDPS bit, disable it if enabled */ | ||
3469 | rtl_writephy(tp, 0x1f, 0x0a43); | ||
3470 | if (rtl_readphy(tp, 0x10) & 0x0004) | ||
3471 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); | ||
3472 | |||
3468 | rtl_writephy(tp, 0x1f, 0x0000); | 3473 | rtl_writephy(tp, 0x1f, 0x0000); |
3469 | } | 3474 | } |
3470 | 3475 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 656a3277c2b2..15816cacb548 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h | |||
@@ -75,6 +75,8 @@ struct efx_mcdi_mon { | |||
75 | unsigned long last_update; | 75 | unsigned long last_update; |
76 | struct device *device; | 76 | struct device *device; |
77 | struct efx_mcdi_mon_attribute *attrs; | 77 | struct efx_mcdi_mon_attribute *attrs; |
78 | struct attribute_group group; | ||
79 | const struct attribute_group *groups[2]; | ||
78 | unsigned int n_attrs; | 80 | unsigned int n_attrs; |
79 | }; | 81 | }; |
80 | 82 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4cc5d95b2a5a..d72ad4fc3617 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c | |||
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) | |||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
141 | 141 | ||
142 | static ssize_t efx_mcdi_mon_show_name(struct device *dev, | ||
143 | struct device_attribute *attr, | ||
144 | char *buf) | ||
145 | { | ||
146 | return sprintf(buf, "%s\n", KBUILD_MODNAME); | ||
147 | } | ||
148 | |||
149 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, | 142 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, |
150 | efx_dword_t *entry) | 143 | efx_dword_t *entry) |
151 | { | 144 | { |
152 | struct efx_nic *efx = dev_get_drvdata(dev); | 145 | struct efx_nic *efx = dev_get_drvdata(dev->parent); |
153 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 146 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
154 | int rc; | 147 | int rc; |
155 | 148 | ||
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev, | |||
263 | efx_mcdi_sensor_type[mon_attr->type].label); | 256 | efx_mcdi_sensor_type[mon_attr->type].label); |
264 | } | 257 | } |
265 | 258 | ||
266 | static int | 259 | static void |
267 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | 260 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, |
268 | ssize_t (*reader)(struct device *, | 261 | ssize_t (*reader)(struct device *, |
269 | struct device_attribute *, char *), | 262 | struct device_attribute *, char *), |
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
272 | { | 265 | { |
273 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 266 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
274 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; | 267 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; |
275 | int rc; | ||
276 | 268 | ||
277 | strlcpy(attr->name, name, sizeof(attr->name)); | 269 | strlcpy(attr->name, name, sizeof(attr->name)); |
278 | attr->index = index; | 270 | attr->index = index; |
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
286 | attr->dev_attr.attr.name = attr->name; | 278 | attr->dev_attr.attr.name = attr->name; |
287 | attr->dev_attr.attr.mode = S_IRUGO; | 279 | attr->dev_attr.attr.mode = S_IRUGO; |
288 | attr->dev_attr.show = reader; | 280 | attr->dev_attr.show = reader; |
289 | rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); | 281 | hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; |
290 | if (rc == 0) | ||
291 | ++hwmon->n_attrs; | ||
292 | return rc; | ||
293 | } | 282 | } |
294 | 283 | ||
295 | int efx_mcdi_mon_probe(struct efx_nic *efx) | 284 | int efx_mcdi_mon_probe(struct efx_nic *efx) |
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
338 | efx_mcdi_mon_update(efx); | 327 | efx_mcdi_mon_update(efx); |
339 | 328 | ||
340 | /* Allocate space for the maximum possible number of | 329 | /* Allocate space for the maximum possible number of |
341 | * attributes for this set of sensors: name of the driver plus | 330 | * attributes for this set of sensors: |
342 | * value, min, max, crit, alarm and label for each sensor. | 331 | * value, min, max, crit, alarm and label for each sensor. |
343 | */ | 332 | */ |
344 | n_attrs = 1 + 6 * n_sensors; | 333 | n_attrs = 6 * n_sensors; |
345 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); | 334 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); |
346 | if (!hwmon->attrs) { | 335 | if (!hwmon->attrs) { |
347 | rc = -ENOMEM; | 336 | rc = -ENOMEM; |
348 | goto fail; | 337 | goto fail; |
349 | } | 338 | } |
350 | 339 | hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), | |
351 | hwmon->device = hwmon_device_register(&efx->pci_dev->dev); | 340 | GFP_KERNEL); |
352 | if (IS_ERR(hwmon->device)) { | 341 | if (!hwmon->group.attrs) { |
353 | rc = PTR_ERR(hwmon->device); | 342 | rc = -ENOMEM; |
354 | goto fail; | 343 | goto fail; |
355 | } | 344 | } |
356 | 345 | ||
357 | rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); | ||
358 | if (rc) | ||
359 | goto fail; | ||
360 | |||
361 | for (i = 0, j = -1, type = -1; ; i++) { | 346 | for (i = 0, j = -1, type = -1; ; i++) { |
362 | enum efx_hwmon_type hwmon_type; | 347 | enum efx_hwmon_type hwmon_type; |
363 | const char *hwmon_prefix; | 348 | const char *hwmon_prefix; |
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
372 | page = type / 32; | 357 | page = type / 32; |
373 | j = -1; | 358 | j = -1; |
374 | if (page == n_pages) | 359 | if (page == n_pages) |
375 | return 0; | 360 | goto hwmon_register; |
376 | 361 | ||
377 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, | 362 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, |
378 | page); | 363 | page); |
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
453 | if (min1 != max1) { | 438 | if (min1 != max1) { |
454 | snprintf(name, sizeof(name), "%s%u_input", | 439 | snprintf(name, sizeof(name), "%s%u_input", |
455 | hwmon_prefix, hwmon_index); | 440 | hwmon_prefix, hwmon_index); |
456 | rc = efx_mcdi_mon_add_attr( | 441 | efx_mcdi_mon_add_attr( |
457 | efx, name, efx_mcdi_mon_show_value, i, type, 0); | 442 | efx, name, efx_mcdi_mon_show_value, i, type, 0); |
458 | if (rc) | ||
459 | goto fail; | ||
460 | 443 | ||
461 | if (hwmon_type != EFX_HWMON_POWER) { | 444 | if (hwmon_type != EFX_HWMON_POWER) { |
462 | snprintf(name, sizeof(name), "%s%u_min", | 445 | snprintf(name, sizeof(name), "%s%u_min", |
463 | hwmon_prefix, hwmon_index); | 446 | hwmon_prefix, hwmon_index); |
464 | rc = efx_mcdi_mon_add_attr( | 447 | efx_mcdi_mon_add_attr( |
465 | efx, name, efx_mcdi_mon_show_limit, | 448 | efx, name, efx_mcdi_mon_show_limit, |
466 | i, type, min1); | 449 | i, type, min1); |
467 | if (rc) | ||
468 | goto fail; | ||
469 | } | 450 | } |
470 | 451 | ||
471 | snprintf(name, sizeof(name), "%s%u_max", | 452 | snprintf(name, sizeof(name), "%s%u_max", |
472 | hwmon_prefix, hwmon_index); | 453 | hwmon_prefix, hwmon_index); |
473 | rc = efx_mcdi_mon_add_attr( | 454 | efx_mcdi_mon_add_attr( |
474 | efx, name, efx_mcdi_mon_show_limit, | 455 | efx, name, efx_mcdi_mon_show_limit, |
475 | i, type, max1); | 456 | i, type, max1); |
476 | if (rc) | ||
477 | goto fail; | ||
478 | 457 | ||
479 | if (min2 != max2) { | 458 | if (min2 != max2) { |
480 | /* Assume max2 is critical value. | 459 | /* Assume max2 is critical value. |
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
482 | */ | 461 | */ |
483 | snprintf(name, sizeof(name), "%s%u_crit", | 462 | snprintf(name, sizeof(name), "%s%u_crit", |
484 | hwmon_prefix, hwmon_index); | 463 | hwmon_prefix, hwmon_index); |
485 | rc = efx_mcdi_mon_add_attr( | 464 | efx_mcdi_mon_add_attr( |
486 | efx, name, efx_mcdi_mon_show_limit, | 465 | efx, name, efx_mcdi_mon_show_limit, |
487 | i, type, max2); | 466 | i, type, max2); |
488 | if (rc) | ||
489 | goto fail; | ||
490 | } | 467 | } |
491 | } | 468 | } |
492 | 469 | ||
493 | snprintf(name, sizeof(name), "%s%u_alarm", | 470 | snprintf(name, sizeof(name), "%s%u_alarm", |
494 | hwmon_prefix, hwmon_index); | 471 | hwmon_prefix, hwmon_index); |
495 | rc = efx_mcdi_mon_add_attr( | 472 | efx_mcdi_mon_add_attr( |
496 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); | 473 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); |
497 | if (rc) | ||
498 | goto fail; | ||
499 | 474 | ||
500 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && | 475 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && |
501 | efx_mcdi_sensor_type[type].label) { | 476 | efx_mcdi_sensor_type[type].label) { |
502 | snprintf(name, sizeof(name), "%s%u_label", | 477 | snprintf(name, sizeof(name), "%s%u_label", |
503 | hwmon_prefix, hwmon_index); | 478 | hwmon_prefix, hwmon_index); |
504 | rc = efx_mcdi_mon_add_attr( | 479 | efx_mcdi_mon_add_attr( |
505 | efx, name, efx_mcdi_mon_show_label, i, type, 0); | 480 | efx, name, efx_mcdi_mon_show_label, i, type, 0); |
506 | if (rc) | ||
507 | goto fail; | ||
508 | } | 481 | } |
509 | } | 482 | } |
510 | 483 | ||
484 | hwmon_register: | ||
485 | hwmon->groups[0] = &hwmon->group; | ||
486 | hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, | ||
487 | KBUILD_MODNAME, NULL, | ||
488 | hwmon->groups); | ||
489 | if (IS_ERR(hwmon->device)) { | ||
490 | rc = PTR_ERR(hwmon->device); | ||
491 | goto fail; | ||
492 | } | ||
493 | |||
494 | return 0; | ||
495 | |||
511 | fail: | 496 | fail: |
512 | efx_mcdi_mon_remove(efx); | 497 | efx_mcdi_mon_remove(efx); |
513 | return rc; | 498 | return rc; |
@@ -516,14 +501,11 @@ fail: | |||
516 | void efx_mcdi_mon_remove(struct efx_nic *efx) | 501 | void efx_mcdi_mon_remove(struct efx_nic *efx) |
517 | { | 502 | { |
518 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 503 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
519 | unsigned int i; | ||
520 | 504 | ||
521 | for (i = 0; i < hwmon->n_attrs; i++) | ||
522 | device_remove_file(&efx->pci_dev->dev, | ||
523 | &hwmon->attrs[i].dev_attr); | ||
524 | kfree(hwmon->attrs); | ||
525 | if (hwmon->device) | 505 | if (hwmon->device) |
526 | hwmon_device_unregister(hwmon->device); | 506 | hwmon_device_unregister(hwmon->device); |
507 | kfree(hwmon->attrs); | ||
508 | kfree(hwmon->group.attrs); | ||
527 | efx_nic_free_buffer(efx, &hwmon->dma_buf); | 509 | efx_nic_free_buffer(efx, &hwmon->dma_buf); |
528 | } | 510 | } |
529 | 511 | ||
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c9d4c872e81d..749654b976bc 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -46,7 +46,8 @@ | |||
46 | defined(CONFIG_MACH_LITTLETON) ||\ | 46 | defined(CONFIG_MACH_LITTLETON) ||\ |
47 | defined(CONFIG_MACH_ZYLONITE2) ||\ | 47 | defined(CONFIG_MACH_ZYLONITE2) ||\ |
48 | defined(CONFIG_ARCH_VIPER) ||\ | 48 | defined(CONFIG_ARCH_VIPER) ||\ |
49 | defined(CONFIG_MACH_STARGATE2) | 49 | defined(CONFIG_MACH_STARGATE2) ||\ |
50 | defined(CONFIG_ARCH_VERSATILE) | ||
50 | 51 | ||
51 | #include <asm/mach-types.h> | 52 | #include <asm/mach-types.h> |
52 | 53 | ||
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
154 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | 155 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) |
155 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | 156 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) |
156 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | 157 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) |
158 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
159 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
157 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 160 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
158 | 161 | ||
159 | /* We actually can't write halfwords properly if not word aligned */ | 162 | /* We actually can't write halfwords properly if not word aligned */ |
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
206 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX | 209 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX |
207 | #define RPC_LSB_DEFAULT RPC_LED_100_10 | 210 | #define RPC_LSB_DEFAULT RPC_LED_100_10 |
208 | 211 | ||
209 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
210 | |||
211 | #define SMC_CAN_USE_8BIT 1 | ||
212 | #define SMC_CAN_USE_16BIT 1 | ||
213 | #define SMC_CAN_USE_32BIT 1 | ||
214 | #define SMC_NOWAIT 1 | ||
215 | |||
216 | #define SMC_inb(a, r) readb((a) + (r)) | ||
217 | #define SMC_inw(a, r) readw((a) + (r)) | ||
218 | #define SMC_inl(a, r) readl((a) + (r)) | ||
219 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
220 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
221 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
222 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
223 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
224 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
225 | |||
226 | #elif defined(CONFIG_MN10300) | 212 | #elif defined(CONFIG_MN10300) |
227 | 213 | ||
228 | /* | 214 | /* |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d022bf936572..ad61d26a44f3 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget) | |||
2172 | unsigned int rx_done; | 2172 | unsigned int rx_done; |
2173 | unsigned long flags; | 2173 | unsigned long flags; |
2174 | 2174 | ||
2175 | spin_lock_irqsave(&vptr->lock, flags); | ||
2176 | /* | 2175 | /* |
2177 | * Do rx and tx twice for performance (taken from the VIA | 2176 | * Do rx and tx twice for performance (taken from the VIA |
2178 | * out-of-tree driver). | 2177 | * out-of-tree driver). |
2179 | */ | 2178 | */ |
2180 | rx_done = velocity_rx_srv(vptr, budget / 2); | 2179 | rx_done = velocity_rx_srv(vptr, budget); |
2181 | velocity_tx_srv(vptr); | 2180 | spin_lock_irqsave(&vptr->lock, flags); |
2182 | rx_done += velocity_rx_srv(vptr, budget - rx_done); | ||
2183 | velocity_tx_srv(vptr); | 2181 | velocity_tx_srv(vptr); |
2184 | |||
2185 | /* If budget not fully consumed, exit the polling mode */ | 2182 | /* If budget not fully consumed, exit the polling mode */ |
2186 | if (rx_done < budget) { | 2183 | if (rx_done < budget) { |
2187 | napi_complete(napi); | 2184 | napi_complete(napi); |
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2342 | if (ret < 0) | 2339 | if (ret < 0) |
2343 | goto out_free_tmp_vptr_1; | 2340 | goto out_free_tmp_vptr_1; |
2344 | 2341 | ||
2342 | napi_disable(&vptr->napi); | ||
2343 | |||
2345 | spin_lock_irqsave(&vptr->lock, flags); | 2344 | spin_lock_irqsave(&vptr->lock, flags); |
2346 | 2345 | ||
2347 | netif_stop_queue(dev); | 2346 | netif_stop_queue(dev); |
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2362 | 2361 | ||
2363 | velocity_give_many_rx_descs(vptr); | 2362 | velocity_give_many_rx_descs(vptr); |
2364 | 2363 | ||
2364 | napi_enable(&vptr->napi); | ||
2365 | |||
2365 | mac_enable_int(vptr->mac_regs); | 2366 | mac_enable_int(vptr->mac_regs); |
2366 | netif_start_queue(dev); | 2367 | netif_start_queue(dev); |
2367 | 2368 | ||
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index dc76670c2f2a..9093004f9b63 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -744,7 +744,7 @@ err: | |||
744 | rcu_read_lock(); | 744 | rcu_read_lock(); |
745 | vlan = rcu_dereference(q->vlan); | 745 | vlan = rcu_dereference(q->vlan); |
746 | if (vlan) | 746 | if (vlan) |
747 | vlan->dev->stats.tx_dropped++; | 747 | this_cpu_inc(vlan->pcpu_stats->tx_dropped); |
748 | rcu_read_unlock(); | 748 | rcu_read_unlock(); |
749 | 749 | ||
750 | return err; | 750 | return err; |
@@ -767,7 +767,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
767 | const struct sk_buff *skb, | 767 | const struct sk_buff *skb, |
768 | const struct iovec *iv, int len) | 768 | const struct iovec *iv, int len) |
769 | { | 769 | { |
770 | struct macvlan_dev *vlan; | ||
771 | int ret; | 770 | int ret; |
772 | int vnet_hdr_len = 0; | 771 | int vnet_hdr_len = 0; |
773 | int vlan_offset = 0; | 772 | int vlan_offset = 0; |
@@ -821,15 +820,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
821 | copied += len; | 820 | copied += len; |
822 | 821 | ||
823 | done: | 822 | done: |
824 | rcu_read_lock(); | ||
825 | vlan = rcu_dereference(q->vlan); | ||
826 | if (vlan) { | ||
827 | preempt_disable(); | ||
828 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | ||
829 | preempt_enable(); | ||
830 | } | ||
831 | rcu_read_unlock(); | ||
832 | |||
833 | return ret ? ret : copied; | 823 | return ret ? ret : copied; |
834 | } | 824 | } |
835 | 825 | ||
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 508e4359338b..14372c65a7e8 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
@@ -64,6 +64,7 @@ | |||
64 | 64 | ||
65 | #define PHY_ID_VSC8234 0x000fc620 | 65 | #define PHY_ID_VSC8234 0x000fc620 |
66 | #define PHY_ID_VSC8244 0x000fc6c0 | 66 | #define PHY_ID_VSC8244 0x000fc6c0 |
67 | #define PHY_ID_VSC8514 0x00070670 | ||
67 | #define PHY_ID_VSC8574 0x000704a0 | 68 | #define PHY_ID_VSC8574 0x000704a0 |
68 | #define PHY_ID_VSC8662 0x00070660 | 69 | #define PHY_ID_VSC8662 0x00070660 |
69 | #define PHY_ID_VSC8221 0x000fc550 | 70 | #define PHY_ID_VSC8221 0x000fc550 |
@@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev) | |||
131 | err = phy_write(phydev, MII_VSC8244_IMASK, | 132 | err = phy_write(phydev, MII_VSC8244_IMASK, |
132 | (phydev->drv->phy_id == PHY_ID_VSC8234 || | 133 | (phydev->drv->phy_id == PHY_ID_VSC8234 || |
133 | phydev->drv->phy_id == PHY_ID_VSC8244 || | 134 | phydev->drv->phy_id == PHY_ID_VSC8244 || |
135 | phydev->drv->phy_id == PHY_ID_VSC8514 || | ||
134 | phydev->drv->phy_id == PHY_ID_VSC8574) ? | 136 | phydev->drv->phy_id == PHY_ID_VSC8574) ? |
135 | MII_VSC8244_IMASK_MASK : | 137 | MII_VSC8244_IMASK_MASK : |
136 | MII_VSC8221_IMASK_MASK); | 138 | MII_VSC8221_IMASK_MASK); |
@@ -246,6 +248,18 @@ static struct phy_driver vsc82xx_driver[] = { | |||
246 | .config_intr = &vsc82xx_config_intr, | 248 | .config_intr = &vsc82xx_config_intr, |
247 | .driver = { .owner = THIS_MODULE,}, | 249 | .driver = { .owner = THIS_MODULE,}, |
248 | }, { | 250 | }, { |
251 | .phy_id = PHY_ID_VSC8514, | ||
252 | .name = "Vitesse VSC8514", | ||
253 | .phy_id_mask = 0x000ffff0, | ||
254 | .features = PHY_GBIT_FEATURES, | ||
255 | .flags = PHY_HAS_INTERRUPT, | ||
256 | .config_init = &vsc824x_config_init, | ||
257 | .config_aneg = &vsc82x4_config_aneg, | ||
258 | .read_status = &genphy_read_status, | ||
259 | .ack_interrupt = &vsc824x_ack_interrupt, | ||
260 | .config_intr = &vsc82xx_config_intr, | ||
261 | .driver = { .owner = THIS_MODULE,}, | ||
262 | }, { | ||
249 | .phy_id = PHY_ID_VSC8574, | 263 | .phy_id = PHY_ID_VSC8574, |
250 | .name = "Vitesse VSC8574", | 264 | .name = "Vitesse VSC8574", |
251 | .phy_id_mask = 0x000ffff0, | 265 | .phy_id_mask = 0x000ffff0, |
@@ -315,6 +329,7 @@ module_exit(vsc82xx_exit); | |||
315 | static struct mdio_device_id __maybe_unused vitesse_tbl[] = { | 329 | static struct mdio_device_id __maybe_unused vitesse_tbl[] = { |
316 | { PHY_ID_VSC8234, 0x000ffff0 }, | 330 | { PHY_ID_VSC8234, 0x000ffff0 }, |
317 | { PHY_ID_VSC8244, 0x000fffc0 }, | 331 | { PHY_ID_VSC8244, 0x000fffc0 }, |
332 | { PHY_ID_VSC8514, 0x000ffff0 }, | ||
318 | { PHY_ID_VSC8574, 0x000ffff0 }, | 333 | { PHY_ID_VSC8574, 0x000ffff0 }, |
319 | { PHY_ID_VSC8662, 0x000ffff0 }, | 334 | { PHY_ID_VSC8662, 0x000ffff0 }, |
320 | { PHY_ID_VSC8221, 0x000ffff0 }, | 335 | { PHY_ID_VSC8221, 0x000ffff0 }, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 34b0de09d881..736050d6b451 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team, | |||
1366 | return 0; | 1366 | return 0; |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | static void __team_carrier_check(struct team *team); | ||
1370 | |||
1369 | static int team_user_linkup_option_set(struct team *team, | 1371 | static int team_user_linkup_option_set(struct team *team, |
1370 | struct team_gsetter_ctx *ctx) | 1372 | struct team_gsetter_ctx *ctx) |
1371 | { | 1373 | { |
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team, | |||
1373 | 1375 | ||
1374 | port->user.linkup = ctx->data.bool_val; | 1376 | port->user.linkup = ctx->data.bool_val; |
1375 | team_refresh_port_linkup(port); | 1377 | team_refresh_port_linkup(port); |
1378 | __team_carrier_check(port->team); | ||
1376 | return 0; | 1379 | return 0; |
1377 | } | 1380 | } |
1378 | 1381 | ||
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team, | |||
1392 | 1395 | ||
1393 | port->user.linkup_enabled = ctx->data.bool_val; | 1396 | port->user.linkup_enabled = ctx->data.bool_val; |
1394 | team_refresh_port_linkup(port); | 1397 | team_refresh_port_linkup(port); |
1398 | __team_carrier_check(port->team); | ||
1395 | return 0; | 1399 | return 0; |
1396 | } | 1400 | } |
1397 | 1401 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7bab4de658a9..916241d16c67 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, | |||
299 | return skb; | 299 | return skb; |
300 | } | 300 | } |
301 | 301 | ||
302 | static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | 302 | static struct sk_buff *receive_small(void *buf, unsigned int len) |
303 | { | 303 | { |
304 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); | 304 | struct sk_buff * skb = buf; |
305 | |||
306 | len -= sizeof(struct virtio_net_hdr); | ||
307 | skb_trim(skb, len); | ||
308 | |||
309 | return skb; | ||
310 | } | ||
311 | |||
312 | static struct sk_buff *receive_big(struct net_device *dev, | ||
313 | struct receive_queue *rq, | ||
314 | void *buf, | ||
315 | unsigned int len) | ||
316 | { | ||
317 | struct page *page = buf; | ||
318 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | ||
319 | |||
320 | if (unlikely(!skb)) | ||
321 | goto err; | ||
322 | |||
323 | return skb; | ||
324 | |||
325 | err: | ||
326 | dev->stats.rx_dropped++; | ||
327 | give_pages(rq, page); | ||
328 | return NULL; | ||
329 | } | ||
330 | |||
331 | static struct sk_buff *receive_mergeable(struct net_device *dev, | ||
332 | struct receive_queue *rq, | ||
333 | void *buf, | ||
334 | unsigned int len) | ||
335 | { | ||
336 | struct skb_vnet_hdr *hdr = buf; | ||
337 | int num_buf = hdr->mhdr.num_buffers; | ||
338 | struct page *page = virt_to_head_page(buf); | ||
339 | int offset = buf - page_address(page); | ||
340 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, | ||
341 | MERGE_BUFFER_LEN); | ||
305 | struct sk_buff *curr_skb = head_skb; | 342 | struct sk_buff *curr_skb = head_skb; |
306 | char *buf; | ||
307 | struct page *page; | ||
308 | int num_buf, len, offset; | ||
309 | 343 | ||
310 | num_buf = hdr->mhdr.num_buffers; | 344 | if (unlikely(!curr_skb)) |
345 | goto err_skb; | ||
346 | |||
311 | while (--num_buf) { | 347 | while (--num_buf) { |
312 | int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | 348 | int num_skb_frags; |
349 | |||
313 | buf = virtqueue_get_buf(rq->vq, &len); | 350 | buf = virtqueue_get_buf(rq->vq, &len); |
314 | if (unlikely(!buf)) { | 351 | if (unlikely(!buf)) { |
315 | pr_debug("%s: rx error: %d buffers missing\n", | 352 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
316 | head_skb->dev->name, hdr->mhdr.num_buffers); | 353 | dev->name, num_buf, hdr->mhdr.num_buffers); |
317 | head_skb->dev->stats.rx_length_errors++; | 354 | dev->stats.rx_length_errors++; |
318 | return -EINVAL; | 355 | goto err_buf; |
319 | } | 356 | } |
320 | if (unlikely(len > MERGE_BUFFER_LEN)) { | 357 | if (unlikely(len > MERGE_BUFFER_LEN)) { |
321 | pr_debug("%s: rx error: merge buffer too long\n", | 358 | pr_debug("%s: rx error: merge buffer too long\n", |
322 | head_skb->dev->name); | 359 | dev->name); |
323 | len = MERGE_BUFFER_LEN; | 360 | len = MERGE_BUFFER_LEN; |
324 | } | 361 | } |
362 | |||
363 | page = virt_to_head_page(buf); | ||
364 | --rq->num; | ||
365 | |||
366 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | ||
325 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | 367 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
326 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | 368 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
327 | if (unlikely(!nskb)) { | 369 | |
328 | head_skb->dev->stats.rx_dropped++; | 370 | if (unlikely(!nskb)) |
329 | return -ENOMEM; | 371 | goto err_skb; |
330 | } | ||
331 | if (curr_skb == head_skb) | 372 | if (curr_skb == head_skb) |
332 | skb_shinfo(curr_skb)->frag_list = nskb; | 373 | skb_shinfo(curr_skb)->frag_list = nskb; |
333 | else | 374 | else |
@@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
341 | head_skb->len += len; | 382 | head_skb->len += len; |
342 | head_skb->truesize += MERGE_BUFFER_LEN; | 383 | head_skb->truesize += MERGE_BUFFER_LEN; |
343 | } | 384 | } |
344 | page = virt_to_head_page(buf); | 385 | offset = buf - page_address(page); |
345 | offset = buf - (char *)page_address(page); | ||
346 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | 386 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
347 | put_page(page); | 387 | put_page(page); |
348 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | 388 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
@@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
351 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | 391 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
352 | offset, len, MERGE_BUFFER_LEN); | 392 | offset, len, MERGE_BUFFER_LEN); |
353 | } | 393 | } |
394 | } | ||
395 | |||
396 | return head_skb; | ||
397 | |||
398 | err_skb: | ||
399 | put_page(page); | ||
400 | while (--num_buf) { | ||
401 | buf = virtqueue_get_buf(rq->vq, &len); | ||
402 | if (unlikely(!buf)) { | ||
403 | pr_debug("%s: rx error: %d buffers missing\n", | ||
404 | dev->name, num_buf); | ||
405 | dev->stats.rx_length_errors++; | ||
406 | break; | ||
407 | } | ||
408 | page = virt_to_head_page(buf); | ||
409 | put_page(page); | ||
354 | --rq->num; | 410 | --rq->num; |
355 | } | 411 | } |
356 | return 0; | 412 | err_buf: |
413 | dev->stats.rx_dropped++; | ||
414 | dev_kfree_skb(head_skb); | ||
415 | return NULL; | ||
357 | } | 416 | } |
358 | 417 | ||
359 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | 418 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
@@ -362,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
362 | struct net_device *dev = vi->dev; | 421 | struct net_device *dev = vi->dev; |
363 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 422 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
364 | struct sk_buff *skb; | 423 | struct sk_buff *skb; |
365 | struct page *page; | ||
366 | struct skb_vnet_hdr *hdr; | 424 | struct skb_vnet_hdr *hdr; |
367 | 425 | ||
368 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | 426 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
@@ -377,33 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
377 | return; | 435 | return; |
378 | } | 436 | } |
379 | 437 | ||
380 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { | 438 | if (vi->mergeable_rx_bufs) |
381 | skb = buf; | 439 | skb = receive_mergeable(dev, rq, buf, len); |
382 | len -= sizeof(struct virtio_net_hdr); | 440 | else if (vi->big_packets) |
383 | skb_trim(skb, len); | 441 | skb = receive_big(dev, rq, buf, len); |
384 | } else if (vi->mergeable_rx_bufs) { | 442 | else |
385 | struct page *page = virt_to_head_page(buf); | 443 | skb = receive_small(buf, len); |
386 | skb = page_to_skb(rq, page, | 444 | |
387 | (char *)buf - (char *)page_address(page), | 445 | if (unlikely(!skb)) |
388 | len, MERGE_BUFFER_LEN); | 446 | return; |
389 | if (unlikely(!skb)) { | ||
390 | dev->stats.rx_dropped++; | ||
391 | put_page(page); | ||
392 | return; | ||
393 | } | ||
394 | if (receive_mergeable(rq, skb)) { | ||
395 | dev_kfree_skb(skb); | ||
396 | return; | ||
397 | } | ||
398 | } else { | ||
399 | page = buf; | ||
400 | skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | ||
401 | if (unlikely(!skb)) { | ||
402 | dev->stats.rx_dropped++; | ||
403 | give_pages(rq, page); | ||
404 | return; | ||
405 | } | ||
406 | } | ||
407 | 447 | ||
408 | hdr = skb_vnet_hdr(skb); | 448 | hdr = skb_vnet_hdr(skb); |
409 | 449 | ||
@@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) | |||
1084 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | 1124 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
1085 | VIRTIO_NET_CTRL_MAC_TABLE_SET, | 1125 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
1086 | sg, NULL)) | 1126 | sg, NULL)) |
1087 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); | 1127 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
1088 | 1128 | ||
1089 | kfree(buf); | 1129 | kfree(buf); |
1090 | } | 1130 | } |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 919b6509455c..64f0e0d18b81 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/udp.h> | 39 | #include <linux/udp.h> |
40 | 40 | ||
41 | #include <net/tcp.h> | 41 | #include <net/tcp.h> |
42 | #include <net/ip6_checksum.h> | ||
42 | 43 | ||
43 | #include <xen/xen.h> | 44 | #include <xen/xen.h> |
44 | #include <xen/events.h> | 45 | #include <xen/events.h> |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bd6f743d87a7..892ea6161376 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -1404,11 +1404,22 @@ enum { | |||
1404 | }; | 1404 | }; |
1405 | #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) | 1405 | #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) |
1406 | 1406 | ||
1407 | static struct genl_multicast_group pmcraid_mcgrps[] = { | ||
1408 | { .name = "events", /* not really used - see ID discussion below */ }, | ||
1409 | }; | ||
1410 | |||
1407 | static struct genl_family pmcraid_event_family = { | 1411 | static struct genl_family pmcraid_event_family = { |
1408 | .id = GENL_ID_GENERATE, | 1412 | /* |
1413 | * Due to prior multicast group abuse (the code having assumed that | ||
1414 | * the family ID can be used as a multicast group ID) we need to | ||
1415 | * statically allocate a family (and thus group) ID. | ||
1416 | */ | ||
1417 | .id = GENL_ID_PMCRAID, | ||
1409 | .name = "pmcraid", | 1418 | .name = "pmcraid", |
1410 | .version = 1, | 1419 | .version = 1, |
1411 | .maxattr = PMCRAID_AEN_ATTR_MAX | 1420 | .maxattr = PMCRAID_AEN_ATTR_MAX, |
1421 | .mcgrps = pmcraid_mcgrps, | ||
1422 | .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), | ||
1412 | }; | 1423 | }; |
1413 | 1424 | ||
1414 | /** | 1425 | /** |
@@ -1511,9 +1522,8 @@ static int pmcraid_notify_aen( | |||
1511 | return result; | 1522 | return result; |
1512 | } | 1523 | } |
1513 | 1524 | ||
1514 | result = | 1525 | result = genlmsg_multicast(&pmcraid_event_family, skb, |
1515 | genlmsg_multicast(&pmcraid_event_family, skb, 0, | 1526 | 0, 0, GFP_ATOMIC); |
1516 | pmcraid_event_family.id, GFP_ATOMIC); | ||
1517 | 1527 | ||
1518 | /* If there are no listeners, genlmsg_multicast may return non-zero | 1528 | /* If there are no listeners, genlmsg_multicast may return non-zero |
1519 | * value. | 1529 | * value. |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 3ed666fe840a..9025edd7dc45 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -377,7 +377,7 @@ out_master_put: | |||
377 | 377 | ||
378 | static int bcm2835_spi_remove(struct platform_device *pdev) | 378 | static int bcm2835_spi_remove(struct platform_device *pdev) |
379 | { | 379 | { |
380 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 380 | struct spi_master *master = platform_get_drvdata(pdev); |
381 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 381 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
382 | 382 | ||
383 | free_irq(bs->irq, master); | 383 | free_irq(bs->irq, master); |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 80d56b214eb5..469ecd876358 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -435,7 +435,7 @@ out: | |||
435 | 435 | ||
436 | static int bcm63xx_spi_remove(struct platform_device *pdev) | 436 | static int bcm63xx_spi_remove(struct platform_device *pdev) |
437 | { | 437 | { |
438 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 438 | struct spi_master *master = platform_get_drvdata(pdev); |
439 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 439 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
440 | 440 | ||
441 | /* reset spi block */ | 441 | /* reset spi block */ |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 9602bbd8d7ea..87676587d783 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -557,7 +557,7 @@ free_master: | |||
557 | 557 | ||
558 | static int mpc512x_psc_spi_do_remove(struct device *dev) | 558 | static int mpc512x_psc_spi_do_remove(struct device *dev) |
559 | { | 559 | { |
560 | struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); | 560 | struct spi_master *master = dev_get_drvdata(dev); |
561 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); | 561 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(master); |
562 | 562 | ||
563 | clk_disable_unprepare(mps->clk_mclk); | 563 | clk_disable_unprepare(mps->clk_mclk); |
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 73afb56c08cc..3adebfa22e3d 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c | |||
@@ -565,7 +565,7 @@ static int mxs_spi_remove(struct platform_device *pdev) | |||
565 | struct mxs_spi *spi; | 565 | struct mxs_spi *spi; |
566 | struct mxs_ssp *ssp; | 566 | struct mxs_ssp *ssp; |
567 | 567 | ||
568 | master = spi_master_get(platform_get_drvdata(pdev)); | 568 | master = platform_get_drvdata(pdev); |
569 | spi = spi_master_get_devdata(master); | 569 | spi = spi_master_get_devdata(master); |
570 | ssp = &spi->ssp; | 570 | ssp = &spi->ssp; |
571 | 571 | ||
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index cb0e1f1137ad..7765b1999537 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1073,6 +1073,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | |||
1073 | static struct acpi_device_id pxa2xx_spi_acpi_match[] = { | 1073 | static struct acpi_device_id pxa2xx_spi_acpi_match[] = { |
1074 | { "INT33C0", 0 }, | 1074 | { "INT33C0", 0 }, |
1075 | { "INT33C1", 0 }, | 1075 | { "INT33C1", 0 }, |
1076 | { "INT3430", 0 }, | ||
1077 | { "INT3431", 0 }, | ||
1076 | { "80860F0E", 0 }, | 1078 | { "80860F0E", 0 }, |
1077 | { }, | 1079 | { }, |
1078 | }; | 1080 | }; |
@@ -1291,6 +1293,9 @@ static int pxa2xx_spi_resume(struct device *dev) | |||
1291 | /* Enable the SSP clock */ | 1293 | /* Enable the SSP clock */ |
1292 | clk_prepare_enable(ssp->clk); | 1294 | clk_prepare_enable(ssp->clk); |
1293 | 1295 | ||
1296 | /* Restore LPSS private register bits */ | ||
1297 | lpss_ssp_setup(drv_data); | ||
1298 | |||
1294 | /* Start the queue running */ | 1299 | /* Start the queue running */ |
1295 | status = spi_master_resume(drv_data->master); | 1300 | status = spi_master_resume(drv_data->master); |
1296 | if (status != 0) { | 1301 | if (status != 0) { |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 58449ad4ad0d..9e829cee7357 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -885,14 +885,13 @@ static void rspi_release_dma(struct rspi_data *rspi) | |||
885 | 885 | ||
886 | static int rspi_remove(struct platform_device *pdev) | 886 | static int rspi_remove(struct platform_device *pdev) |
887 | { | 887 | { |
888 | struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev)); | 888 | struct rspi_data *rspi = platform_get_drvdata(pdev); |
889 | 889 | ||
890 | spi_unregister_master(rspi->master); | 890 | spi_unregister_master(rspi->master); |
891 | rspi_release_dma(rspi); | 891 | rspi_release_dma(rspi); |
892 | free_irq(platform_get_irq(pdev, 0), rspi); | 892 | free_irq(platform_get_irq(pdev, 0), rspi); |
893 | clk_put(rspi->clk); | 893 | clk_put(rspi->clk); |
894 | iounmap(rspi->addr); | 894 | iounmap(rspi->addr); |
895 | spi_master_put(rspi->master); | ||
896 | 895 | ||
897 | return 0; | 896 | return 0; |
898 | } | 897 | } |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 0b71270fbf67..4396bd448540 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -161,7 +161,7 @@ static int ti_qspi_setup(struct spi_device *spi) | |||
161 | qspi->spi_max_frequency, clk_div); | 161 | qspi->spi_max_frequency, clk_div); |
162 | 162 | ||
163 | ret = pm_runtime_get_sync(qspi->dev); | 163 | ret = pm_runtime_get_sync(qspi->dev); |
164 | if (ret) { | 164 | if (ret < 0) { |
165 | dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); | 165 | dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); |
166 | return ret; | 166 | return ret; |
167 | } | 167 | } |
@@ -459,11 +459,10 @@ static int ti_qspi_probe(struct platform_device *pdev) | |||
459 | if (!of_property_read_u32(np, "num-cs", &num_cs)) | 459 | if (!of_property_read_u32(np, "num-cs", &num_cs)) |
460 | master->num_chipselect = num_cs; | 460 | master->num_chipselect = num_cs; |
461 | 461 | ||
462 | platform_set_drvdata(pdev, master); | ||
463 | |||
464 | qspi = spi_master_get_devdata(master); | 462 | qspi = spi_master_get_devdata(master); |
465 | qspi->master = master; | 463 | qspi->master = master; |
466 | qspi->dev = &pdev->dev; | 464 | qspi->dev = &pdev->dev; |
465 | platform_set_drvdata(pdev, qspi); | ||
467 | 466 | ||
468 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 467 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
469 | 468 | ||
@@ -517,10 +516,26 @@ free_master: | |||
517 | 516 | ||
518 | static int ti_qspi_remove(struct platform_device *pdev) | 517 | static int ti_qspi_remove(struct platform_device *pdev) |
519 | { | 518 | { |
520 | struct ti_qspi *qspi = platform_get_drvdata(pdev); | 519 | struct spi_master *master; |
520 | struct ti_qspi *qspi; | ||
521 | int ret; | ||
522 | |||
523 | master = platform_get_drvdata(pdev); | ||
524 | qspi = spi_master_get_devdata(master); | ||
525 | |||
526 | ret = pm_runtime_get_sync(qspi->dev); | ||
527 | if (ret < 0) { | ||
528 | dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); | ||
529 | return ret; | ||
530 | } | ||
521 | 531 | ||
522 | ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); | 532 | ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG); |
523 | 533 | ||
534 | pm_runtime_put(qspi->dev); | ||
535 | pm_runtime_disable(&pdev->dev); | ||
536 | |||
537 | spi_unregister_master(master); | ||
538 | |||
524 | return 0; | 539 | return 0; |
525 | } | 540 | } |
526 | 541 | ||
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c index 637cce2b8bdd..18c9bb2b5f39 100644 --- a/drivers/spi/spi-txx9.c +++ b/drivers/spi/spi-txx9.c | |||
@@ -425,7 +425,7 @@ exit: | |||
425 | 425 | ||
426 | static int txx9spi_remove(struct platform_device *dev) | 426 | static int txx9spi_remove(struct platform_device *dev) |
427 | { | 427 | { |
428 | struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); | 428 | struct spi_master *master = platform_get_drvdata(dev); |
429 | struct txx9spi *c = spi_master_get_devdata(master); | 429 | struct txx9spi *c = spi_master_get_devdata(master); |
430 | 430 | ||
431 | destroy_workqueue(c->workqueue); | 431 | destroy_workqueue(c->workqueue); |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 18cc625d887f..349ebba4b199 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1415,7 +1415,7 @@ int devm_spi_register_master(struct device *dev, struct spi_master *master) | |||
1415 | return -ENOMEM; | 1415 | return -ENOMEM; |
1416 | 1416 | ||
1417 | ret = spi_register_master(master); | 1417 | ret = spi_register_master(master); |
1418 | if (ret != 0) { | 1418 | if (!ret) { |
1419 | *ptr = master; | 1419 | *ptr = master; |
1420 | devres_add(dev, ptr); | 1420 | devres_add(dev, ptr); |
1421 | } else { | 1421 | } else { |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 67beb8444930..f7beb6eb40c7 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
@@ -653,6 +653,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma) | |||
653 | return -EINVAL; | 653 | return -EINVAL; |
654 | mem = idev->info->mem + mi; | 654 | mem = idev->info->mem + mi; |
655 | 655 | ||
656 | if (mem->addr & ~PAGE_MASK) | ||
657 | return -ENODEV; | ||
656 | if (vma->vm_end - vma->vm_start > mem->size) | 658 | if (vma->vm_end - vma->vm_start > mem->size) |
657 | return -EINVAL; | 659 | return -EINVAL; |
658 | 660 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 62ccf5424ba8..028387192b60 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -930,9 +930,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
930 | ret = m2p_add_override(mfn, pages[i], kmap_ops ? | 930 | ret = m2p_add_override(mfn, pages[i], kmap_ops ? |
931 | &kmap_ops[i] : NULL); | 931 | &kmap_ops[i] : NULL); |
932 | if (ret) | 932 | if (ret) |
933 | return ret; | 933 | goto out; |
934 | } | 934 | } |
935 | 935 | ||
936 | out: | ||
936 | if (lazy) | 937 | if (lazy) |
937 | arch_leave_lazy_mmu_mode(); | 938 | arch_leave_lazy_mmu_mode(); |
938 | 939 | ||
@@ -969,9 +970,10 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
969 | ret = m2p_remove_override(pages[i], kmap_ops ? | 970 | ret = m2p_remove_override(pages[i], kmap_ops ? |
970 | &kmap_ops[i] : NULL); | 971 | &kmap_ops[i] : NULL); |
971 | if (ret) | 972 | if (ret) |
972 | return ret; | 973 | goto out; |
973 | } | 974 | } |
974 | 975 | ||
976 | out: | ||
975 | if (lazy) | 977 | if (lazy) |
976 | arch_leave_lazy_mmu_mode(); | 978 | arch_leave_lazy_mmu_mode(); |
977 | 979 | ||
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a224bc74b6b9..1eac0731c349 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -555,6 +555,11 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |||
555 | sg_dma_len(sgl) = 0; | 555 | sg_dma_len(sgl) = 0; |
556 | return 0; | 556 | return 0; |
557 | } | 557 | } |
558 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), | ||
559 | map & ~PAGE_MASK, | ||
560 | sg->length, | ||
561 | dir, | ||
562 | attrs); | ||
558 | sg->dma_address = xen_phys_to_bus(map); | 563 | sg->dma_address = xen_phys_to_bus(map); |
559 | } else { | 564 | } else { |
560 | /* we are not interested in the dma_addr returned by | 565 | /* we are not interested in the dma_addr returned by |
@@ -726,11 +726,25 @@ pipe_poll(struct file *filp, poll_table *wait) | |||
726 | return mask; | 726 | return mask; |
727 | } | 727 | } |
728 | 728 | ||
729 | static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe) | ||
730 | { | ||
731 | int kill = 0; | ||
732 | |||
733 | spin_lock(&inode->i_lock); | ||
734 | if (!--pipe->files) { | ||
735 | inode->i_pipe = NULL; | ||
736 | kill = 1; | ||
737 | } | ||
738 | spin_unlock(&inode->i_lock); | ||
739 | |||
740 | if (kill) | ||
741 | free_pipe_info(pipe); | ||
742 | } | ||
743 | |||
729 | static int | 744 | static int |
730 | pipe_release(struct inode *inode, struct file *file) | 745 | pipe_release(struct inode *inode, struct file *file) |
731 | { | 746 | { |
732 | struct pipe_inode_info *pipe = inode->i_pipe; | 747 | struct pipe_inode_info *pipe = file->private_data; |
733 | int kill = 0; | ||
734 | 748 | ||
735 | __pipe_lock(pipe); | 749 | __pipe_lock(pipe); |
736 | if (file->f_mode & FMODE_READ) | 750 | if (file->f_mode & FMODE_READ) |
@@ -743,17 +757,9 @@ pipe_release(struct inode *inode, struct file *file) | |||
743 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | 757 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); |
744 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); | 758 | kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); |
745 | } | 759 | } |
746 | spin_lock(&inode->i_lock); | ||
747 | if (!--pipe->files) { | ||
748 | inode->i_pipe = NULL; | ||
749 | kill = 1; | ||
750 | } | ||
751 | spin_unlock(&inode->i_lock); | ||
752 | __pipe_unlock(pipe); | 760 | __pipe_unlock(pipe); |
753 | 761 | ||
754 | if (kill) | 762 | put_pipe_info(inode, pipe); |
755 | free_pipe_info(pipe); | ||
756 | |||
757 | return 0; | 763 | return 0; |
758 | } | 764 | } |
759 | 765 | ||
@@ -1014,7 +1020,6 @@ static int fifo_open(struct inode *inode, struct file *filp) | |||
1014 | { | 1020 | { |
1015 | struct pipe_inode_info *pipe; | 1021 | struct pipe_inode_info *pipe; |
1016 | bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; | 1022 | bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; |
1017 | int kill = 0; | ||
1018 | int ret; | 1023 | int ret; |
1019 | 1024 | ||
1020 | filp->f_version = 0; | 1025 | filp->f_version = 0; |
@@ -1130,15 +1135,9 @@ err_wr: | |||
1130 | goto err; | 1135 | goto err; |
1131 | 1136 | ||
1132 | err: | 1137 | err: |
1133 | spin_lock(&inode->i_lock); | ||
1134 | if (!--pipe->files) { | ||
1135 | inode->i_pipe = NULL; | ||
1136 | kill = 1; | ||
1137 | } | ||
1138 | spin_unlock(&inode->i_lock); | ||
1139 | __pipe_unlock(pipe); | 1138 | __pipe_unlock(pipe); |
1140 | if (kill) | 1139 | |
1141 | free_pipe_info(pipe); | 1140 | put_pipe_info(inode, pipe); |
1142 | return ret; | 1141 | return ret; |
1143 | } | 1142 | } |
1144 | 1143 | ||
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index 2943b2bfae48..62a0de6632e1 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c | |||
@@ -84,6 +84,9 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) | |||
84 | */ | 84 | */ |
85 | res = squashfs_read_cache(target_page, block, bsize, pages, | 85 | res = squashfs_read_cache(target_page, block, bsize, pages, |
86 | page); | 86 | page); |
87 | if (res < 0) | ||
88 | goto mark_errored; | ||
89 | |||
87 | goto out; | 90 | goto out; |
88 | } | 91 | } |
89 | 92 | ||
@@ -119,7 +122,7 @@ mark_errored: | |||
119 | * dealt with by the caller | 122 | * dealt with by the caller |
120 | */ | 123 | */ |
121 | for (i = 0; i < pages; i++) { | 124 | for (i = 0; i < pages; i++) { |
122 | if (page[i] == target_page) | 125 | if (page[i] == NULL || page[i] == target_page) |
123 | continue; | 126 | continue; |
124 | flush_dcache_page(page[i]); | 127 | flush_dcache_page(page[i]); |
125 | SetPageError(page[i]); | 128 | SetPageError(page[i]); |
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 13621cc8cf4c..64ebede184f1 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h | |||
@@ -36,6 +36,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, | |||
36 | { | 36 | { |
37 | sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); | 37 | sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); |
38 | sg1[num - 1].page_link &= ~0x02; | 38 | sg1[num - 1].page_link &= ~0x02; |
39 | sg1[num - 1].page_link |= 0x01; | ||
39 | } | 40 | } |
40 | 41 | ||
41 | static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | 42 | static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 9abbe630c456..8c9b7a1c4138 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -248,6 +248,9 @@ struct ftrace_event_call { | |||
248 | #ifdef CONFIG_PERF_EVENTS | 248 | #ifdef CONFIG_PERF_EVENTS |
249 | int perf_refcount; | 249 | int perf_refcount; |
250 | struct hlist_head __percpu *perf_events; | 250 | struct hlist_head __percpu *perf_events; |
251 | |||
252 | int (*perf_perm)(struct ftrace_event_call *, | ||
253 | struct perf_event *); | ||
251 | #endif | 254 | #endif |
252 | }; | 255 | }; |
253 | 256 | ||
@@ -317,6 +320,19 @@ struct ftrace_event_file { | |||
317 | } \ | 320 | } \ |
318 | early_initcall(trace_init_flags_##name); | 321 | early_initcall(trace_init_flags_##name); |
319 | 322 | ||
323 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ | ||
324 | static int perf_perm_##name(struct ftrace_event_call *tp_event, \ | ||
325 | struct perf_event *p_event) \ | ||
326 | { \ | ||
327 | return ({ expr; }); \ | ||
328 | } \ | ||
329 | static int __init trace_init_perf_perm_##name(void) \ | ||
330 | { \ | ||
331 | event_##name.perf_perm = &perf_perm_##name; \ | ||
332 | return 0; \ | ||
333 | } \ | ||
334 | early_initcall(trace_init_perf_perm_##name); | ||
335 | |||
320 | #define PERF_MAX_TRACE_SIZE 2048 | 336 | #define PERF_MAX_TRACE_SIZE 2048 |
321 | 337 | ||
322 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 338 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index 714ba08dc092..e374e369fb2f 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -14,6 +14,6 @@ enum irqreturn { | |||
14 | }; | 14 | }; |
15 | 15 | ||
16 | typedef enum irqreturn irqreturn_t; | 16 | typedef enum irqreturn irqreturn_t; |
17 | #define IRQ_RETVAL(x) ((x) != IRQ_NONE) | 17 | #define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) |
18 | 18 | ||
19 | #endif | 19 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 7e35d4b9e14a..768b037dfacb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -831,8 +831,6 @@ struct sched_domain { | |||
831 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | 831 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
832 | unsigned int nr_balance_failed; /* initialise to 0 */ | 832 | unsigned int nr_balance_failed; /* initialise to 0 */ |
833 | 833 | ||
834 | u64 last_update; | ||
835 | |||
836 | /* idle_balance() stats */ | 834 | /* idle_balance() stats */ |
837 | u64 max_newidle_lb_cost; | 835 | u64 max_newidle_lb_cost; |
838 | unsigned long next_decay_max_lb_cost; | 836 | unsigned long next_decay_max_lb_cost; |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index ebeab360d851..f16dc0a40049 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void) | |||
267 | 267 | ||
268 | #define TRACE_EVENT_FLAGS(event, flag) | 268 | #define TRACE_EVENT_FLAGS(event, flag) |
269 | 269 | ||
270 | #define TRACE_EVENT_PERF_PERM(event, expr...) | ||
271 | |||
270 | #endif /* DECLARE_TRACE */ | 272 | #endif /* DECLARE_TRACE */ |
271 | 273 | ||
272 | #ifndef TRACE_EVENT | 274 | #ifndef TRACE_EVENT |
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void) | |||
399 | 401 | ||
400 | #define TRACE_EVENT_FLAGS(event, flag) | 402 | #define TRACE_EVENT_FLAGS(event, flag) |
401 | 403 | ||
404 | #define TRACE_EVENT_PERF_PERM(event, expr...) | ||
405 | |||
402 | #endif /* ifdef TRACE_EVENT (see note above) */ | 406 | #endif /* ifdef TRACE_EVENT (see note above) */ |
diff --git a/include/net/ip.h b/include/net/ip.h index 217bc5bfc6c6..5a25f36fe3a7 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
473 | int ip_ra_control(struct sock *sk, unsigned char on, | 473 | int ip_ra_control(struct sock *sk, unsigned char on, |
474 | void (*destructor)(struct sock *)); | 474 | void (*destructor)(struct sock *)); |
475 | 475 | ||
476 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); | 476 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); |
477 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, | 477 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
478 | u32 info, u8 *payload); | 478 | u32 info, u8 *payload); |
479 | void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, | 479 | void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2a5f668cd683..eb198acaac1d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -776,8 +776,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
776 | 776 | ||
777 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); | 777 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); |
778 | 778 | ||
779 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); | 779 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
780 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); | 780 | int *addr_len); |
781 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, | ||
782 | int *addr_len); | ||
781 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, | 783 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
782 | u32 info, u8 *payload); | 784 | u32 info, u8 *payload); |
783 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); | 785 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); |
diff --git a/include/net/ping.h b/include/net/ping.h index 3f67704f3747..90f48417b03d 100644 --- a/include/net/ping.h +++ b/include/net/ping.h | |||
@@ -31,7 +31,8 @@ | |||
31 | 31 | ||
32 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ | 32 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ |
33 | struct pingv6_ops { | 33 | struct pingv6_ops { |
34 | int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); | 34 | int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, |
35 | int *addr_len); | ||
35 | int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, | 36 | int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, |
36 | struct sk_buff *skb); | 37 | struct sk_buff *skb); |
37 | int (*icmpv6_err_convert)(u8 type, u8 code, int *err); | 38 | int (*icmpv6_err_convert)(u8 type, u8 code, int *err); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 2174d8da0770..ea0ca5f6e629 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -629,6 +629,7 @@ struct sctp_chunk { | |||
629 | #define SCTP_NEED_FRTX 0x1 | 629 | #define SCTP_NEED_FRTX 0x1 |
630 | #define SCTP_DONT_FRTX 0x2 | 630 | #define SCTP_DONT_FRTX 0x2 |
631 | __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ | 631 | __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ |
632 | resent:1, /* Has this chunk ever been resent. */ | ||
632 | has_tsn:1, /* Does this chunk have a TSN yet? */ | 633 | has_tsn:1, /* Does this chunk have a TSN yet? */ |
633 | has_ssn:1, /* Does this chunk have a SSN yet? */ | 634 | has_ssn:1, /* Does this chunk have a SSN yet? */ |
634 | singleton:1, /* Only chunk in the packet? */ | 635 | singleton:1, /* Only chunk in the packet? */ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index d17a35c6537e..5c38606613d8 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -90,6 +90,10 @@ | |||
90 | #define TRACE_EVENT_FLAGS(name, value) \ | 90 | #define TRACE_EVENT_FLAGS(name, value) \ |
91 | __TRACE_EVENT_FLAGS(name, value) | 91 | __TRACE_EVENT_FLAGS(name, value) |
92 | 92 | ||
93 | #undef TRACE_EVENT_PERF_PERM | ||
94 | #define TRACE_EVENT_PERF_PERM(name, expr...) \ | ||
95 | __TRACE_EVENT_PERF_PERM(name, expr) | ||
96 | |||
93 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 97 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
94 | 98 | ||
95 | 99 | ||
@@ -140,6 +144,9 @@ | |||
140 | #undef TRACE_EVENT_FLAGS | 144 | #undef TRACE_EVENT_FLAGS |
141 | #define TRACE_EVENT_FLAGS(event, flag) | 145 | #define TRACE_EVENT_FLAGS(event, flag) |
142 | 146 | ||
147 | #undef TRACE_EVENT_PERF_PERM | ||
148 | #define TRACE_EVENT_PERF_PERM(event, expr...) | ||
149 | |||
143 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 150 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
144 | 151 | ||
145 | /* | 152 | /* |
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index 1af72d8228e0..c3363ba1ae05 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h | |||
@@ -28,6 +28,7 @@ struct genlmsghdr { | |||
28 | #define GENL_ID_GENERATE 0 | 28 | #define GENL_ID_GENERATE 0 |
29 | #define GENL_ID_CTRL NLMSG_MIN_TYPE | 29 | #define GENL_ID_CTRL NLMSG_MIN_TYPE |
30 | #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) | 30 | #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) |
31 | #define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2) | ||
31 | 32 | ||
32 | /************************************************************************** | 33 | /************************************************************************** |
33 | * Controller | 34 | * Controller |
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b78566f59aba..6db460121f84 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h | |||
@@ -488,7 +488,9 @@ enum { | |||
488 | IFLA_HSR_UNSPEC, | 488 | IFLA_HSR_UNSPEC, |
489 | IFLA_HSR_SLAVE1, | 489 | IFLA_HSR_SLAVE1, |
490 | IFLA_HSR_SLAVE2, | 490 | IFLA_HSR_SLAVE2, |
491 | IFLA_HSR_MULTICAST_SPEC, | 491 | IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ |
492 | IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ | ||
493 | IFLA_HSR_SEQ_NR, | ||
492 | __IFLA_HSR_MAX, | 494 | __IFLA_HSR_MAX, |
493 | }; | 495 | }; |
494 | 496 | ||
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index 4e31db4eea41..f2159d30d1f5 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h | |||
@@ -33,6 +33,7 @@ struct netlink_diag_ring { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | enum { | 35 | enum { |
36 | /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */ | ||
36 | NETLINK_DIAG_MEMINFO, | 37 | NETLINK_DIAG_MEMINFO, |
37 | NETLINK_DIAG_GROUPS, | 38 | NETLINK_DIAG_GROUPS, |
38 | NETLINK_DIAG_RX_RING, | 39 | NETLINK_DIAG_RX_RING, |
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index b2cc0cd9c4d9..d08c63f3dd6f 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h | |||
@@ -29,6 +29,7 @@ struct packet_diag_msg { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | enum { | 31 | enum { |
32 | /* PACKET_DIAG_NONE, standard nl API requires this attribute! */ | ||
32 | PACKET_DIAG_INFO, | 33 | PACKET_DIAG_INFO, |
33 | PACKET_DIAG_MCLIST, | 34 | PACKET_DIAG_MCLIST, |
34 | PACKET_DIAG_RX_RING, | 35 | PACKET_DIAG_RX_RING, |
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b9e2a6a7446f..1eb0b8dd1830 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h | |||
@@ -31,6 +31,7 @@ struct unix_diag_msg { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | enum { | 33 | enum { |
34 | /* UNIX_DIAG_NONE, standard nl API requires this attribute! */ | ||
34 | UNIX_DIAG_NAME, | 35 | UNIX_DIAG_NAME, |
35 | UNIX_DIAG_VFS, | 36 | UNIX_DIAG_VFS, |
36 | UNIX_DIAG_PEER, | 37 | UNIX_DIAG_PEER, |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d724e7757cd1..72348dc192c1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5680,11 +5680,6 @@ static void swevent_hlist_put(struct perf_event *event) | |||
5680 | { | 5680 | { |
5681 | int cpu; | 5681 | int cpu; |
5682 | 5682 | ||
5683 | if (event->cpu != -1) { | ||
5684 | swevent_hlist_put_cpu(event, event->cpu); | ||
5685 | return; | ||
5686 | } | ||
5687 | |||
5688 | for_each_possible_cpu(cpu) | 5683 | for_each_possible_cpu(cpu) |
5689 | swevent_hlist_put_cpu(event, cpu); | 5684 | swevent_hlist_put_cpu(event, cpu); |
5690 | } | 5685 | } |
@@ -5718,9 +5713,6 @@ static int swevent_hlist_get(struct perf_event *event) | |||
5718 | int err; | 5713 | int err; |
5719 | int cpu, failed_cpu; | 5714 | int cpu, failed_cpu; |
5720 | 5715 | ||
5721 | if (event->cpu != -1) | ||
5722 | return swevent_hlist_get_cpu(event, event->cpu); | ||
5723 | |||
5724 | get_online_cpus(); | 5716 | get_online_cpus(); |
5725 | for_each_possible_cpu(cpu) { | 5717 | for_each_possible_cpu(cpu) { |
5726 | err = swevent_hlist_get_cpu(event, cpu); | 5718 | err = swevent_hlist_get_cpu(event, cpu); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index cb228bf21760..abcd6ca86cb7 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early) | |||
50 | bool is_early = desc->action && | 50 | bool is_early = desc->action && |
51 | desc->action->flags & IRQF_EARLY_RESUME; | 51 | desc->action->flags & IRQF_EARLY_RESUME; |
52 | 52 | ||
53 | if (is_early != want_early) | 53 | if (!is_early && want_early) |
54 | continue; | 54 | continue; |
55 | 55 | ||
56 | raw_spin_lock_irqsave(&desc->lock, flags); | 56 | raw_spin_lock_irqsave(&desc->lock, flags); |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6abb03dff5c0..08a765232432 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -1632,7 +1632,7 @@ module_param(rcu_idle_gp_delay, int, 0644); | |||
1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; | 1632 | static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY; |
1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); | 1633 | module_param(rcu_idle_lazy_gp_delay, int, 0644); |
1634 | 1634 | ||
1635 | extern int tick_nohz_enabled; | 1635 | extern int tick_nohz_active; |
1636 | 1636 | ||
1637 | /* | 1637 | /* |
1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but | 1638 | * Try to advance callbacks for all flavors of RCU on the current CPU, but |
@@ -1729,7 +1729,7 @@ static void rcu_prepare_for_idle(int cpu) | |||
1729 | int tne; | 1729 | int tne; |
1730 | 1730 | ||
1731 | /* Handle nohz enablement switches conservatively. */ | 1731 | /* Handle nohz enablement switches conservatively. */ |
1732 | tne = ACCESS_ONCE(tick_nohz_enabled); | 1732 | tne = ACCESS_ONCE(tick_nohz_active); |
1733 | if (tne != rdtp->tick_nohz_enabled_snap) { | 1733 | if (tne != rdtp->tick_nohz_enabled_snap) { |
1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) | 1734 | if (rcu_cpu_has_callbacks(cpu, NULL)) |
1735 | invoke_rcu_core(); /* force nohz to see update. */ | 1735 | invoke_rcu_core(); /* force nohz to see update. */ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c1808606ee5f..e85cda20ab2b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
2660 | } while (need_resched()); | 2660 | } while (need_resched()); |
2661 | } | 2661 | } |
2662 | EXPORT_SYMBOL(preempt_schedule); | 2662 | EXPORT_SYMBOL(preempt_schedule); |
2663 | #endif /* CONFIG_PREEMPT */ | ||
2663 | 2664 | ||
2664 | /* | 2665 | /* |
2665 | * this is the entry point to schedule() from kernel preemption | 2666 | * this is the entry point to schedule() from kernel preemption |
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
2693 | exception_exit(prev_state); | 2694 | exception_exit(prev_state); |
2694 | } | 2695 | } |
2695 | 2696 | ||
2696 | #endif /* CONFIG_PREEMPT */ | ||
2697 | |||
2698 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | 2697 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
2699 | void *key) | 2698 | void *key) |
2700 | { | 2699 | { |
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
4762 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 4761 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
4763 | 4762 | ||
4764 | /* | 4763 | /* |
4765 | * If we dont want to free the old_rt yet then | 4764 | * If we dont want to free the old_rd yet then |
4766 | * set old_rd to NULL to skip the freeing later | 4765 | * set old_rd to NULL to skip the freeing later |
4767 | * in this function: | 4766 | * in this function: |
4768 | */ | 4767 | */ |
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu) | |||
4910 | if (sd) { | 4909 | if (sd) { |
4911 | id = cpumask_first(sched_domain_span(sd)); | 4910 | id = cpumask_first(sched_domain_span(sd)); |
4912 | size = cpumask_weight(sched_domain_span(sd)); | 4911 | size = cpumask_weight(sched_domain_span(sd)); |
4913 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); | 4912 | sd = sd->parent; /* sd_busy */ |
4914 | } | 4913 | } |
4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | ||
4915 | 4915 | ||
4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
4917 | per_cpu(sd_llc_size, cpu) = size; | 4917 | per_cpu(sd_llc_size, cpu) = size; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8b652ebe027..fd773ade1a31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
5379 | */ | 5379 | */ |
5380 | 5380 | ||
5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { | 5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { |
5382 | struct sched_group *sg = cpu_rq(cpu)->sd->groups; | 5382 | struct sched_group_power *sgp; |
5383 | struct rq *rq = cpu_rq(cpu); | ||
5383 | 5384 | ||
5384 | power_orig += sg->sgp->power_orig; | 5385 | /* |
5385 | power += sg->sgp->power; | 5386 | * build_sched_domains() -> init_sched_groups_power() |
5387 | * gets here before we've attached the domains to the | ||
5388 | * runqueues. | ||
5389 | * | ||
5390 | * Use power_of(), which is set irrespective of domains | ||
5391 | * in update_cpu_power(). | ||
5392 | * | ||
5393 | * This avoids power/power_orig from being 0 and | ||
5394 | * causing divide-by-zero issues on boot. | ||
5395 | * | ||
5396 | * Runtime updates will correct power_orig. | ||
5397 | */ | ||
5398 | if (unlikely(!rq->sd)) { | ||
5399 | power_orig += power_of(cpu); | ||
5400 | power += power_of(cpu); | ||
5401 | continue; | ||
5402 | } | ||
5403 | |||
5404 | sgp = rq->sd->groups->sgp; | ||
5405 | power_orig += sgp->power_orig; | ||
5406 | power += sgp->power; | ||
5386 | } | 5407 | } |
5387 | } else { | 5408 | } else { |
5388 | /* | 5409 | /* |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0e..162b03ab0ad2 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | |||
37 | /* | ||
38 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR | ||
39 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This | ||
40 | * variable has two functions: | ||
41 | * | ||
42 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the | ||
43 | * timekeeping lock all at once. Only the CPU which is assigned to do the | ||
44 | * update is handling it. | ||
45 | * | ||
46 | * 2) Hand off the duty in the NOHZ idle case by setting the value to | ||
47 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks | ||
48 | * at it will take over and keep the time keeping alive. The handover | ||
49 | * procedure also covers cpu hotplug. | ||
50 | */ | ||
36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; | 51 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | 52 | ||
38 | /* | 53 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f834..ea20f7d1ac2c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -361,8 +361,8 @@ void __init tick_nohz_init(void) | |||
361 | /* | 361 | /* |
362 | * NO HZ enabled ? | 362 | * NO HZ enabled ? |
363 | */ | 363 | */ |
364 | int tick_nohz_enabled __read_mostly = 1; | 364 | static int tick_nohz_enabled __read_mostly = 1; |
365 | 365 | int tick_nohz_active __read_mostly; | |
366 | /* | 366 | /* |
367 | * Enable / Disable tickless mode | 367 | * Enable / Disable tickless mode |
368 | */ | 368 | */ |
@@ -465,7 +465,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 465 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
466 | ktime_t now, idle; | 466 | ktime_t now, idle; |
467 | 467 | ||
468 | if (!tick_nohz_enabled) | 468 | if (!tick_nohz_active) |
469 | return -1; | 469 | return -1; |
470 | 470 | ||
471 | now = ktime_get(); | 471 | now = ktime_get(); |
@@ -506,7 +506,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 506 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
507 | ktime_t now, iowait; | 507 | ktime_t now, iowait; |
508 | 508 | ||
509 | if (!tick_nohz_enabled) | 509 | if (!tick_nohz_active) |
510 | return -1; | 510 | return -1; |
511 | 511 | ||
512 | now = ktime_get(); | 512 | now = ktime_get(); |
@@ -711,8 +711,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
711 | return false; | 711 | return false; |
712 | } | 712 | } |
713 | 713 | ||
714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 714 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
715 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; | ||
715 | return false; | 716 | return false; |
717 | } | ||
716 | 718 | ||
717 | if (need_resched()) | 719 | if (need_resched()) |
718 | return false; | 720 | return false; |
@@ -799,11 +801,6 @@ void tick_nohz_idle_enter(void) | |||
799 | local_irq_disable(); | 801 | local_irq_disable(); |
800 | 802 | ||
801 | ts = &__get_cpu_var(tick_cpu_sched); | 803 | ts = &__get_cpu_var(tick_cpu_sched); |
802 | /* | ||
803 | * set ts->inidle unconditionally. even if the system did not | ||
804 | * switch to nohz mode the cpu frequency governers rely on the | ||
805 | * update of the idle time accounting in tick_nohz_start_idle(). | ||
806 | */ | ||
807 | ts->inidle = 1; | 804 | ts->inidle = 1; |
808 | __tick_nohz_idle_enter(ts); | 805 | __tick_nohz_idle_enter(ts); |
809 | 806 | ||
@@ -973,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
973 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 970 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
974 | ktime_t next; | 971 | ktime_t next; |
975 | 972 | ||
976 | if (!tick_nohz_enabled) | 973 | if (!tick_nohz_active) |
977 | return; | 974 | return; |
978 | 975 | ||
979 | local_irq_disable(); | 976 | local_irq_disable(); |
@@ -981,7 +978,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
981 | local_irq_enable(); | 978 | local_irq_enable(); |
982 | return; | 979 | return; |
983 | } | 980 | } |
984 | 981 | tick_nohz_active = 1; | |
985 | ts->nohz_mode = NOHZ_MODE_LOWRES; | 982 | ts->nohz_mode = NOHZ_MODE_LOWRES; |
986 | 983 | ||
987 | /* | 984 | /* |
@@ -1139,8 +1136,10 @@ void tick_setup_sched_timer(void) | |||
1139 | } | 1136 | } |
1140 | 1137 | ||
1141 | #ifdef CONFIG_NO_HZ_COMMON | 1138 | #ifdef CONFIG_NO_HZ_COMMON |
1142 | if (tick_nohz_enabled) | 1139 | if (tick_nohz_enabled) { |
1143 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 1140 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
1141 | tick_nohz_active = 1; | ||
1142 | } | ||
1144 | #endif | 1143 | #endif |
1145 | } | 1144 | } |
1146 | #endif /* HIGH_RES_TIMERS */ | 1145 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3abf53418b67..87b4f00284c9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1347,7 +1347,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) | |||
1347 | tk->xtime_nsec -= remainder; | 1347 | tk->xtime_nsec -= remainder; |
1348 | tk->xtime_nsec += 1ULL << tk->shift; | 1348 | tk->xtime_nsec += 1ULL << tk->shift; |
1349 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1349 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1350 | 1350 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | |
1351 | } | 1351 | } |
1352 | #else | 1352 | #else |
1353 | #define old_vsyscall_fixup(tk) | 1353 | #define old_vsyscall_fixup(tk) |
diff --git a/kernel/timer.c b/kernel/timer.c index 6582b82fa966..accfd241b9e5 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1518,9 +1518,8 @@ static int init_timers_cpu(int cpu) | |||
1518 | /* | 1518 | /* |
1519 | * The APs use this path later in boot | 1519 | * The APs use this path later in boot |
1520 | */ | 1520 | */ |
1521 | base = kmalloc_node(sizeof(*base), | 1521 | base = kzalloc_node(sizeof(*base), GFP_KERNEL, |
1522 | GFP_KERNEL | __GFP_ZERO, | 1522 | cpu_to_node(cpu)); |
1523 | cpu_to_node(cpu)); | ||
1524 | if (!base) | 1523 | if (!base) |
1525 | return -ENOMEM; | 1524 | return -ENOMEM; |
1526 | 1525 | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 78e27e3b52ac..e854f420e033 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -24,6 +24,12 @@ static int total_ref_count; | |||
24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | 24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, |
25 | struct perf_event *p_event) | 25 | struct perf_event *p_event) |
26 | { | 26 | { |
27 | if (tp_event->perf_perm) { | ||
28 | int ret = tp_event->perf_perm(tp_event, p_event); | ||
29 | if (ret) | ||
30 | return ret; | ||
31 | } | ||
32 | |||
27 | /* The ftrace function trace is allowed only for root. */ | 33 | /* The ftrace function trace is allowed only for root. */ |
28 | if (ftrace_event_is_function(tp_event) && | 34 | if (ftrace_event_is_function(tp_event) && |
29 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | 35 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) |
@@ -173,7 +179,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
173 | int perf_trace_init(struct perf_event *p_event) | 179 | int perf_trace_init(struct perf_event *p_event) |
174 | { | 180 | { |
175 | struct ftrace_event_call *tp_event; | 181 | struct ftrace_event_call *tp_event; |
176 | int event_id = p_event->attr.config; | 182 | u64 event_id = p_event->attr.config; |
177 | int ret = -EINVAL; | 183 | int ret = -EINVAL; |
178 | 184 | ||
179 | mutex_lock(&event_mutex); | 185 | mutex_lock(&event_mutex); |
diff --git a/net/compat.c b/net/compat.c index 618c6a8a911b..dd32e34c1e2c 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) | |||
72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
73 | return -EFAULT; | 73 | return -EFAULT; |
74 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 74 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
75 | return -EINVAL; | 75 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
76 | kmsg->msg_name = compat_ptr(tmp1); | 76 | kmsg->msg_name = compat_ptr(tmp1); |
77 | kmsg->msg_iov = compat_ptr(tmp2); | 77 | kmsg->msg_iov = compat_ptr(tmp2); |
78 | kmsg->msg_control = compat_ptr(tmp3); | 78 | kmsg->msg_control = compat_ptr(tmp3); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 261357a66300..a797fff7f222 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2527 | if (x) { | 2527 | if (x) { |
2528 | int ret; | 2528 | int ret; |
2529 | __u8 *eth; | 2529 | __u8 *eth; |
2530 | struct iphdr *iph; | ||
2531 | |||
2530 | nhead = x->props.header_len - skb_headroom(skb); | 2532 | nhead = x->props.header_len - skb_headroom(skb); |
2531 | if (nhead > 0) { | 2533 | if (nhead > 0) { |
2532 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); | 2534 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); |
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2548 | eth = (__u8 *) skb_push(skb, ETH_HLEN); | 2550 | eth = (__u8 *) skb_push(skb, ETH_HLEN); |
2549 | memcpy(eth, pkt_dev->hh, 12); | 2551 | memcpy(eth, pkt_dev->hh, 12); |
2550 | *(u16 *) ð[12] = protocol; | 2552 | *(u16 *) ð[12] = protocol; |
2553 | |||
2554 | /* Update IPv4 header len as well as checksum value */ | ||
2555 | iph = ip_hdr(skb); | ||
2556 | iph->tot_len = htons(skb->len - ETH_HLEN); | ||
2557 | ip_send_check(iph); | ||
2551 | } | 2558 | } |
2552 | } | 2559 | } |
2553 | return 1; | 2560 | return 1; |
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 003f5bb3acd2..4bdab1521878 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c | |||
@@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr, | |||
288 | static bool seq_nr_after(u16 a, u16 b) | 288 | static bool seq_nr_after(u16 a, u16 b) |
289 | { | 289 | { |
290 | /* Remove inconsistency where | 290 | /* Remove inconsistency where |
291 | * seq_nr_after(a, b) == seq_nr_before(a, b) */ | 291 | * seq_nr_after(a, b) == seq_nr_before(a, b) |
292 | */ | ||
292 | if ((int) b - a == 32768) | 293 | if ((int) b - a == 32768) |
293 | return false; | 294 | return false; |
294 | 295 | ||
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 5325af85eea6..01a5261ac7a5 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c | |||
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { | |||
23 | [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, | 23 | [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, |
24 | [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, | 24 | [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, |
25 | [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, | 25 | [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, |
26 | [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, | ||
27 | [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, | ||
26 | }; | 28 | }; |
27 | 29 | ||
28 | 30 | ||
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, | |||
59 | return hsr_dev_finalize(dev, link, multicast_spec); | 61 | return hsr_dev_finalize(dev, link, multicast_spec); |
60 | } | 62 | } |
61 | 63 | ||
64 | static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
65 | { | ||
66 | struct hsr_priv *hsr_priv; | ||
67 | |||
68 | hsr_priv = netdev_priv(dev); | ||
69 | |||
70 | if (hsr_priv->slave[0]) | ||
71 | if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex)) | ||
72 | goto nla_put_failure; | ||
73 | |||
74 | if (hsr_priv->slave[1]) | ||
75 | if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex)) | ||
76 | goto nla_put_failure; | ||
77 | |||
78 | if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, | ||
79 | hsr_priv->sup_multicast_addr) || | ||
80 | nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr)) | ||
81 | goto nla_put_failure; | ||
82 | |||
83 | return 0; | ||
84 | |||
85 | nla_put_failure: | ||
86 | return -EMSGSIZE; | ||
87 | } | ||
88 | |||
62 | static struct rtnl_link_ops hsr_link_ops __read_mostly = { | 89 | static struct rtnl_link_ops hsr_link_ops __read_mostly = { |
63 | .kind = "hsr", | 90 | .kind = "hsr", |
64 | .maxtype = IFLA_HSR_MAX, | 91 | .maxtype = IFLA_HSR_MAX, |
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = { | |||
66 | .priv_size = sizeof(struct hsr_priv), | 93 | .priv_size = sizeof(struct hsr_priv), |
67 | .setup = hsr_dev_setup, | 94 | .setup = hsr_dev_setup, |
68 | .newlink = hsr_newlink, | 95 | .newlink = hsr_newlink, |
96 | .fill_info = hsr_fill_info, | ||
69 | }; | 97 | }; |
70 | 98 | ||
71 | 99 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 3f858266fa7e..ddf32a6bc415 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
386 | /* | 386 | /* |
387 | * Handle MSG_ERRQUEUE | 387 | * Handle MSG_ERRQUEUE |
388 | */ | 388 | */ |
389 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | 389 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) |
390 | { | 390 | { |
391 | struct sock_exterr_skb *serr; | 391 | struct sock_exterr_skb *serr; |
392 | struct sk_buff *skb, *skb2; | 392 | struct sk_buff *skb, *skb2; |
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
423 | serr->addr_offset); | 423 | serr->addr_offset); |
424 | sin->sin_port = serr->port; | 424 | sin->sin_port = serr->port; |
425 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | 425 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); |
426 | *addr_len = sizeof(*sin); | ||
426 | } | 427 | } |
427 | 428 | ||
428 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 429 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 876c6ca2d8f9..242e7f4ed6f4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
772 | err = PTR_ERR(rt); | 772 | err = PTR_ERR(rt); |
773 | rt = NULL; | 773 | rt = NULL; |
774 | if (err == -ENETUNREACH) | 774 | if (err == -ENETUNREACH) |
775 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 775 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
776 | goto out; | 776 | goto out; |
777 | } | 777 | } |
778 | 778 | ||
@@ -841,10 +841,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
841 | 841 | ||
842 | if (flags & MSG_ERRQUEUE) { | 842 | if (flags & MSG_ERRQUEUE) { |
843 | if (family == AF_INET) { | 843 | if (family == AF_INET) { |
844 | return ip_recv_error(sk, msg, len); | 844 | return ip_recv_error(sk, msg, len, addr_len); |
845 | #if IS_ENABLED(CONFIG_IPV6) | 845 | #if IS_ENABLED(CONFIG_IPV6) |
846 | } else if (family == AF_INET6) { | 846 | } else if (family == AF_INET6) { |
847 | return pingv6_ops.ipv6_recv_error(sk, msg, len); | 847 | return pingv6_ops.ipv6_recv_error(sk, msg, len, |
848 | addr_len); | ||
848 | #endif | 849 | #endif |
849 | } | 850 | } |
850 | } | 851 | } |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index ce848461acbb..46d6a1c923a8 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -31,10 +31,6 @@ | |||
31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; | 31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; |
32 | const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; | 32 | const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; |
33 | 33 | ||
34 | /* | ||
35 | * Add a protocol handler to the hash tables | ||
36 | */ | ||
37 | |||
38 | int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) | 34 | int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) |
39 | { | 35 | { |
40 | if (!prot->netns_ok) { | 36 | if (!prot->netns_ok) { |
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol) | |||
55 | } | 51 | } |
56 | EXPORT_SYMBOL(inet_add_offload); | 52 | EXPORT_SYMBOL(inet_add_offload); |
57 | 53 | ||
58 | /* | ||
59 | * Remove a protocol from the hash tables. | ||
60 | */ | ||
61 | |||
62 | int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) | 54 | int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) |
63 | { | 55 | { |
64 | int ret; | 56 | int ret; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5cb8ddb505ee..23c3e5b5bb53 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
697 | goto out; | 697 | goto out; |
698 | 698 | ||
699 | if (flags & MSG_ERRQUEUE) { | 699 | if (flags & MSG_ERRQUEUE) { |
700 | err = ip_recv_error(sk, msg, len); | 700 | err = ip_recv_error(sk, msg, len, addr_len); |
701 | goto out; | 701 | goto out; |
702 | } | 702 | } |
703 | 703 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 59a6f8b90cd9..067213924751 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
177 | if (IS_ERR(rt)) { | 177 | if (IS_ERR(rt)) { |
178 | err = PTR_ERR(rt); | 178 | err = PTR_ERR(rt); |
179 | if (err == -ENETUNREACH) | 179 | if (err == -ENETUNREACH) |
180 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 180 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
181 | return err; | 181 | return err; |
182 | } | 182 | } |
183 | 183 | ||
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 03e9154f7e68..269a89ecd2f4 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
@@ -60,7 +60,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup); | |||
60 | static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | 60 | static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) |
61 | { | 61 | { |
62 | struct cg_proto *cg_proto; | 62 | struct cg_proto *cg_proto; |
63 | u64 old_lim; | ||
64 | int i; | 63 | int i; |
65 | int ret; | 64 | int ret; |
66 | 65 | ||
@@ -71,7 +70,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | |||
71 | if (val > RES_COUNTER_MAX) | 70 | if (val > RES_COUNTER_MAX) |
72 | val = RES_COUNTER_MAX; | 71 | val = RES_COUNTER_MAX; |
73 | 72 | ||
74 | old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT); | ||
75 | ret = res_counter_set_limit(&cg_proto->memory_allocated, val); | 73 | ret = res_counter_set_limit(&cg_proto->memory_allocated, val); |
76 | if (ret) | 74 | if (ret) |
77 | return ret; | 75 | return ret; |
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index a2b68a108eae..05606353c7e7 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c | |||
@@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff * | |||
274 | { | 274 | { |
275 | const struct iphdr *iph = skb_gro_network_header(skb); | 275 | const struct iphdr *iph = skb_gro_network_header(skb); |
276 | __wsum wsum; | 276 | __wsum wsum; |
277 | __sum16 sum; | 277 | |
278 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
279 | if (NAPI_GRO_CB(skb)->flush) | ||
280 | goto skip_csum; | ||
281 | |||
282 | wsum = skb->csum; | ||
278 | 283 | ||
279 | switch (skb->ip_summed) { | 284 | switch (skb->ip_summed) { |
285 | case CHECKSUM_NONE: | ||
286 | wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), | ||
287 | 0); | ||
288 | |||
289 | /* fall through */ | ||
290 | |||
280 | case CHECKSUM_COMPLETE: | 291 | case CHECKSUM_COMPLETE: |
281 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, | 292 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, |
282 | skb->csum)) { | 293 | wsum)) { |
283 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 294 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
284 | break; | 295 | break; |
285 | } | 296 | } |
286 | flush: | 297 | |
287 | NAPI_GRO_CB(skb)->flush = 1; | 298 | NAPI_GRO_CB(skb)->flush = 1; |
288 | return NULL; | 299 | return NULL; |
289 | |||
290 | case CHECKSUM_NONE: | ||
291 | wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, | ||
292 | skb_gro_len(skb), IPPROTO_TCP, 0); | ||
293 | sum = csum_fold(skb_checksum(skb, | ||
294 | skb_gro_offset(skb), | ||
295 | skb_gro_len(skb), | ||
296 | wsum)); | ||
297 | if (sum) | ||
298 | goto flush; | ||
299 | |||
300 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
301 | break; | ||
302 | } | 300 | } |
303 | 301 | ||
302 | skip_csum: | ||
304 | return tcp_gro_receive(head, skb); | 303 | return tcp_gro_receive(head, skb); |
305 | } | 304 | } |
306 | 305 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5944d7d668dd..44f6a20fa29d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -999,7 +999,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
999 | err = PTR_ERR(rt); | 999 | err = PTR_ERR(rt); |
1000 | rt = NULL; | 1000 | rt = NULL; |
1001 | if (err == -ENETUNREACH) | 1001 | if (err == -ENETUNREACH) |
1002 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 1002 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
1003 | goto out; | 1003 | goto out; |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1098,6 +1098,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, | |||
1098 | struct udp_sock *up = udp_sk(sk); | 1098 | struct udp_sock *up = udp_sk(sk); |
1099 | int ret; | 1099 | int ret; |
1100 | 1100 | ||
1101 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
1102 | flags |= MSG_MORE; | ||
1103 | |||
1101 | if (!up->pending) { | 1104 | if (!up->pending) { |
1102 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; | 1105 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; |
1103 | 1106 | ||
@@ -1236,7 +1239,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1236 | bool slow; | 1239 | bool slow; |
1237 | 1240 | ||
1238 | if (flags & MSG_ERRQUEUE) | 1241 | if (flags & MSG_ERRQUEUE) |
1239 | return ip_recv_error(sk, msg, len); | 1242 | return ip_recv_error(sk, msg, len, addr_len); |
1240 | 1243 | ||
1241 | try_again: | 1244 | try_again: |
1242 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 1245 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index a454b0ff57c7..8dfe1f4d3c1a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
318 | /* | 318 | /* |
319 | * Handle MSG_ERRQUEUE | 319 | * Handle MSG_ERRQUEUE |
320 | */ | 320 | */ |
321 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | 321 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) |
322 | { | 322 | { |
323 | struct ipv6_pinfo *np = inet6_sk(sk); | 323 | struct ipv6_pinfo *np = inet6_sk(sk); |
324 | struct sock_exterr_skb *serr; | 324 | struct sock_exterr_skb *serr; |
@@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
369 | &sin->sin6_addr); | 369 | &sin->sin6_addr); |
370 | sin->sin6_scope_id = 0; | 370 | sin->sin6_scope_id = 0; |
371 | } | 371 | } |
372 | *addr_len = sizeof(*sin); | ||
372 | } | 373 | } |
373 | 374 | ||
374 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 375 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
@@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
377 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { | 378 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { |
378 | sin->sin6_family = AF_INET6; | 379 | sin->sin6_family = AF_INET6; |
379 | sin->sin6_flowinfo = 0; | 380 | sin->sin6_flowinfo = 0; |
381 | sin->sin6_port = 0; | ||
380 | if (skb->protocol == htons(ETH_P_IPV6)) { | 382 | if (skb->protocol == htons(ETH_P_IPV6)) { |
381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 383 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
382 | if (np->rxopt.all) | 384 | if (np->rxopt.all) |
@@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error); | |||
423 | /* | 425 | /* |
424 | * Handle IPV6_RECVPATHMTU | 426 | * Handle IPV6_RECVPATHMTU |
425 | */ | 427 | */ |
426 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | 428 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, |
429 | int *addr_len) | ||
427 | { | 430 | { |
428 | struct ipv6_pinfo *np = inet6_sk(sk); | 431 | struct ipv6_pinfo *np = inet6_sk(sk); |
429 | struct sk_buff *skb; | 432 | struct sk_buff *skb; |
@@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | |||
457 | sin->sin6_port = 0; | 460 | sin->sin6_port = 0; |
458 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; | 461 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; |
459 | sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; | 462 | sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; |
463 | *addr_len = sizeof(*sin); | ||
460 | } | 464 | } |
461 | 465 | ||
462 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); | 466 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 59df872e2f4d..4acdb63495db 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
116 | } | 116 | } |
117 | rcu_read_unlock_bh(); | 117 | rcu_read_unlock_bh(); |
118 | 118 | ||
119 | IP6_INC_STATS_BH(dev_net(dst->dev), | 119 | IP6_INC_STATS(dev_net(dst->dev), |
120 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | 120 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
121 | kfree_skb(skb); | 121 | kfree_skb(skb); |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 8815e31a87fe..a83243c3d656 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = { | |||
57 | 57 | ||
58 | 58 | ||
59 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ | 59 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ |
60 | static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | 60 | static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
61 | int *addr_len) | ||
61 | { | 62 | { |
62 | return -EAFNOSUPPORT; | 63 | return -EAFNOSUPPORT; |
63 | } | 64 | } |
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 22d1bd4670da..e048cf1bb6a2 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(inet6_add_protocol); | 37 | EXPORT_SYMBOL(inet6_add_protocol); |
38 | 38 | ||
39 | /* | ||
40 | * Remove a protocol from the hash tables. | ||
41 | */ | ||
42 | |||
43 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 39 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
44 | { | 40 | { |
45 | int ret; | 41 | int ret; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e24ff1df0401..7fb4e14c467f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
466 | return -EOPNOTSUPP; | 466 | return -EOPNOTSUPP; |
467 | 467 | ||
468 | if (flags & MSG_ERRQUEUE) | 468 | if (flags & MSG_ERRQUEUE) |
469 | return ipv6_recv_error(sk, msg, len); | 469 | return ipv6_recv_error(sk, msg, len, addr_len); |
470 | 470 | ||
471 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | 471 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) |
472 | return ipv6_recv_rxpmtu(sk, msg, len); | 472 | return ipv6_recv_rxpmtu(sk, msg, len, addr_len); |
473 | 473 | ||
474 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 474 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
475 | if (!skb) | 475 | if (!skb) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1b4a4a953675..366fbba3359a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
478 | dev_put(dev); | 478 | dev_put(dev); |
479 | } | 479 | } |
480 | 480 | ||
481 | /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH | ||
482 | * if sufficient data bytes are available | ||
483 | */ | ||
484 | static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb) | ||
485 | { | ||
486 | const struct iphdr *iph = (const struct iphdr *) skb->data; | ||
487 | struct rt6_info *rt; | ||
488 | struct sk_buff *skb2; | ||
489 | |||
490 | if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8)) | ||
491 | return 1; | ||
492 | |||
493 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
494 | |||
495 | if (!skb2) | ||
496 | return 1; | ||
497 | |||
498 | skb_dst_drop(skb2); | ||
499 | skb_pull(skb2, iph->ihl * 4); | ||
500 | skb_reset_network_header(skb2); | ||
501 | |||
502 | rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); | ||
503 | |||
504 | if (rt && rt->dst.dev) | ||
505 | skb2->dev = rt->dst.dev; | ||
506 | |||
507 | icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); | ||
508 | |||
509 | if (rt) | ||
510 | ip6_rt_put(rt); | ||
511 | |||
512 | kfree_skb(skb2); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
481 | 516 | ||
482 | static int ipip6_err(struct sk_buff *skb, u32 info) | 517 | static int ipip6_err(struct sk_buff *skb, u32 info) |
483 | { | 518 | { |
484 | |||
485 | /* All the routers (except for Linux) return only | ||
486 | 8 bytes of packet payload. It means, that precise relaying of | ||
487 | ICMP in the real Internet is absolutely infeasible. | ||
488 | */ | ||
489 | const struct iphdr *iph = (const struct iphdr *)skb->data; | 519 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
490 | const int type = icmp_hdr(skb)->type; | 520 | const int type = icmp_hdr(skb)->type; |
491 | const int code = icmp_hdr(skb)->code; | 521 | const int code = icmp_hdr(skb)->code; |
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
500 | case ICMP_DEST_UNREACH: | 530 | case ICMP_DEST_UNREACH: |
501 | switch (code) { | 531 | switch (code) { |
502 | case ICMP_SR_FAILED: | 532 | case ICMP_SR_FAILED: |
503 | case ICMP_PORT_UNREACH: | ||
504 | /* Impossible event. */ | 533 | /* Impossible event. */ |
505 | return 0; | 534 | return 0; |
506 | default: | 535 | default: |
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
545 | goto out; | 574 | goto out; |
546 | 575 | ||
547 | err = 0; | 576 | err = 0; |
577 | if (!ipip6_err_gen_icmpv6_unreach(skb)) | ||
578 | goto out; | ||
579 | |||
548 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) | 580 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) |
549 | goto out; | 581 | goto out; |
550 | 582 | ||
@@ -919,7 +951,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
919 | if (!new_skb) { | 951 | if (!new_skb) { |
920 | ip_rt_put(rt); | 952 | ip_rt_put(rt); |
921 | dev->stats.tx_dropped++; | 953 | dev->stats.tx_dropped++; |
922 | dev_kfree_skb(skb); | 954 | kfree_skb(skb); |
923 | return NETDEV_TX_OK; | 955 | return NETDEV_TX_OK; |
924 | } | 956 | } |
925 | if (skb->sk) | 957 | if (skb->sk) |
@@ -945,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
945 | tx_error_icmp: | 977 | tx_error_icmp: |
946 | dst_link_failure(skb); | 978 | dst_link_failure(skb); |
947 | tx_error: | 979 | tx_error: |
948 | dev_kfree_skb(skb); | 980 | kfree_skb(skb); |
949 | out: | 981 | out: |
950 | dev->stats.tx_errors++; | 982 | dev->stats.tx_errors++; |
951 | return NETDEV_TX_OK; | 983 | return NETDEV_TX_OK; |
@@ -985,7 +1017,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, | |||
985 | 1017 | ||
986 | tx_err: | 1018 | tx_err: |
987 | dev->stats.tx_errors++; | 1019 | dev->stats.tx_errors++; |
988 | dev_kfree_skb(skb); | 1020 | kfree_skb(skb); |
989 | return NETDEV_TX_OK; | 1021 | return NETDEV_TX_OK; |
990 | 1022 | ||
991 | } | 1023 | } |
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index c1097c798900..6d18157dc32c 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c | |||
@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, | |||
37 | { | 37 | { |
38 | const struct ipv6hdr *iph = skb_gro_network_header(skb); | 38 | const struct ipv6hdr *iph = skb_gro_network_header(skb); |
39 | __wsum wsum; | 39 | __wsum wsum; |
40 | __sum16 sum; | 40 | |
41 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
42 | if (NAPI_GRO_CB(skb)->flush) | ||
43 | goto skip_csum; | ||
44 | |||
45 | wsum = skb->csum; | ||
41 | 46 | ||
42 | switch (skb->ip_summed) { | 47 | switch (skb->ip_summed) { |
48 | case CHECKSUM_NONE: | ||
49 | wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), | ||
50 | wsum); | ||
51 | |||
52 | /* fall through */ | ||
53 | |||
43 | case CHECKSUM_COMPLETE: | 54 | case CHECKSUM_COMPLETE: |
44 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, | 55 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, |
45 | skb->csum)) { | 56 | wsum)) { |
46 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 57 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
47 | break; | 58 | break; |
48 | } | 59 | } |
49 | flush: | 60 | |
50 | NAPI_GRO_CB(skb)->flush = 1; | 61 | NAPI_GRO_CB(skb)->flush = 1; |
51 | return NULL; | 62 | return NULL; |
52 | |||
53 | case CHECKSUM_NONE: | ||
54 | wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, | ||
55 | skb_gro_len(skb), | ||
56 | IPPROTO_TCP, 0)); | ||
57 | sum = csum_fold(skb_checksum(skb, | ||
58 | skb_gro_offset(skb), | ||
59 | skb_gro_len(skb), | ||
60 | wsum)); | ||
61 | if (sum) | ||
62 | goto flush; | ||
63 | |||
64 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
65 | break; | ||
66 | } | 63 | } |
67 | 64 | ||
65 | skip_csum: | ||
68 | return tcp_gro_receive(head, skb); | 66 | return tcp_gro_receive(head, skb); |
69 | } | 67 | } |
70 | 68 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 81eb8cf8389b..bcd5699313c3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
393 | bool slow; | 393 | bool slow; |
394 | 394 | ||
395 | if (flags & MSG_ERRQUEUE) | 395 | if (flags & MSG_ERRQUEUE) |
396 | return ipv6_recv_error(sk, msg, len); | 396 | return ipv6_recv_error(sk, msg, len, addr_len); |
397 | 397 | ||
398 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | 398 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) |
399 | return ipv6_recv_rxpmtu(sk, msg, len); | 399 | return ipv6_recv_rxpmtu(sk, msg, len, addr_len); |
400 | 400 | ||
401 | try_again: | 401 | try_again: |
402 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 402 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index cfd65304be60..d9b437e55007 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
665 | *addr_len = sizeof(*lsa); | 665 | *addr_len = sizeof(*lsa); |
666 | 666 | ||
667 | if (flags & MSG_ERRQUEUE) | 667 | if (flags & MSG_ERRQUEUE) |
668 | return ipv6_recv_error(sk, msg, len); | 668 | return ipv6_recv_error(sk, msg, len, addr_len); |
669 | 669 | ||
670 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 670 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
671 | if (!skb) | 671 | if (!skb) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 4518a57aa5fe..713671ae45af 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE]; | |||
74 | * Bit 17 is marked as already used since the VFS quota code | 74 | * Bit 17 is marked as already used since the VFS quota code |
75 | * also abused this API and relied on family == group ID, we | 75 | * also abused this API and relied on family == group ID, we |
76 | * cater to that by giving it a static family and group ID. | 76 | * cater to that by giving it a static family and group ID. |
77 | * Bit 18 is marked as already used since the PMCRAID driver | ||
78 | * did the same thing as the VFS quota code (maybe copied?) | ||
77 | */ | 79 | */ |
78 | static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | | 80 | static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | |
79 | BIT(GENL_ID_VFS_DQUOT); | 81 | BIT(GENL_ID_VFS_DQUOT) | |
82 | BIT(GENL_ID_PMCRAID); | ||
80 | static unsigned long *mc_groups = &mc_group_start; | 83 | static unsigned long *mc_groups = &mc_group_start; |
81 | static unsigned long mc_groups_longs = 1; | 84 | static unsigned long mc_groups_longs = 1; |
82 | 85 | ||
@@ -139,6 +142,7 @@ static u16 genl_generate_id(void) | |||
139 | 142 | ||
140 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { | 143 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
141 | if (id_gen_idx != GENL_ID_VFS_DQUOT && | 144 | if (id_gen_idx != GENL_ID_VFS_DQUOT && |
145 | id_gen_idx != GENL_ID_PMCRAID && | ||
142 | !genl_family_find_byid(id_gen_idx)) | 146 | !genl_family_find_byid(id_gen_idx)) |
143 | return id_gen_idx; | 147 | return id_gen_idx; |
144 | if (++id_gen_idx > GENL_MAX_ID) | 148 | if (++id_gen_idx > GENL_MAX_ID) |
@@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family) | |||
214 | { | 218 | { |
215 | int first_id; | 219 | int first_id; |
216 | int n_groups = family->n_mcgrps; | 220 | int n_groups = family->n_mcgrps; |
217 | int err, i; | 221 | int err = 0, i; |
218 | bool groups_allocated = false; | 222 | bool groups_allocated = false; |
219 | 223 | ||
220 | if (!n_groups) | 224 | if (!n_groups) |
@@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family) | |||
236 | } else if (strcmp(family->name, "NET_DM") == 0) { | 240 | } else if (strcmp(family->name, "NET_DM") == 0) { |
237 | first_id = 1; | 241 | first_id = 1; |
238 | BUG_ON(n_groups != 1); | 242 | BUG_ON(n_groups != 1); |
239 | } else if (strcmp(family->name, "VFS_DQUOT") == 0) { | 243 | } else if (family->id == GENL_ID_VFS_DQUOT) { |
240 | first_id = GENL_ID_VFS_DQUOT; | 244 | first_id = GENL_ID_VFS_DQUOT; |
241 | BUG_ON(n_groups != 1); | 245 | BUG_ON(n_groups != 1); |
246 | } else if (family->id == GENL_ID_PMCRAID) { | ||
247 | first_id = GENL_ID_PMCRAID; | ||
248 | BUG_ON(n_groups != 1); | ||
242 | } else { | 249 | } else { |
243 | groups_allocated = true; | 250 | groups_allocated = true; |
244 | err = genl_allocate_reserve_groups(n_groups, &first_id); | 251 | err = genl_allocate_reserve_groups(n_groups, &first_id); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ac27c86ef6d1..ba2548bd85bf 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -439,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |||
439 | 439 | ||
440 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | 440 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; |
441 | 441 | ||
442 | spin_lock(&rb_queue->lock); | 442 | spin_lock_bh(&rb_queue->lock); |
443 | pkc->delete_blk_timer = 1; | 443 | pkc->delete_blk_timer = 1; |
444 | spin_unlock(&rb_queue->lock); | 444 | spin_unlock_bh(&rb_queue->lock); |
445 | 445 | ||
446 | prb_del_retire_blk_timer(pkc); | 446 | prb_del_retire_blk_timer(pkc); |
447 | } | 447 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 75c94e59a3bd..bccd52b36e97 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q) | |||
215 | if (rnd < clg->a4) { | 215 | if (rnd < clg->a4) { |
216 | clg->state = 4; | 216 | clg->state = 4; |
217 | return true; | 217 | return true; |
218 | } else if (clg->a4 < rnd && rnd < clg->a1) { | 218 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
219 | clg->state = 3; | 219 | clg->state = 3; |
220 | return true; | 220 | return true; |
221 | } else if (clg->a1 < rnd) | 221 | } else if (clg->a1 + clg->a4 < rnd) |
222 | clg->state = 1; | 222 | clg->state = 1; |
223 | 223 | ||
224 | break; | 224 | break; |
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q) | |||
268 | clg->state = 2; | 268 | clg->state = 2; |
269 | if (net_random() < clg->a4) | 269 | if (net_random() < clg->a4) |
270 | return true; | 270 | return true; |
271 | break; | ||
271 | case 2: | 272 | case 2: |
272 | if (net_random() < clg->a2) | 273 | if (net_random() < clg->a2) |
273 | clg->state = 1; | 274 | clg->state = 1; |
274 | if (clg->a3 > net_random()) | 275 | if (net_random() > clg->a3) |
275 | return true; | 276 | return true; |
276 | } | 277 | } |
277 | 278 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 68f98595819c..a6090051c5db 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
22 | #include <net/sch_generic.h> | 22 | #include <net/sch_generic.h> |
23 | #include <net/pkt_sched.h> | 23 | #include <net/pkt_sched.h> |
24 | #include <net/tcp.h> | ||
24 | 25 | ||
25 | 26 | ||
26 | /* Simple Token Bucket Filter. | 27 | /* Simple Token Bucket Filter. |
@@ -117,6 +118,22 @@ struct tbf_sched_data { | |||
117 | }; | 118 | }; |
118 | 119 | ||
119 | 120 | ||
121 | /* | ||
122 | * Return length of individual segments of a gso packet, | ||
123 | * including all headers (MAC, IP, TCP/UDP) | ||
124 | */ | ||
125 | static unsigned int skb_gso_seglen(const struct sk_buff *skb) | ||
126 | { | ||
127 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
128 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
129 | |||
130 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | ||
131 | hdr_len += tcp_hdrlen(skb); | ||
132 | else | ||
133 | hdr_len += sizeof(struct udphdr); | ||
134 | return hdr_len + shinfo->gso_size; | ||
135 | } | ||
136 | |||
120 | /* GSO packet is too big, segment it so that tbf can transmit | 137 | /* GSO packet is too big, segment it so that tbf can transmit |
121 | * each segment in time | 138 | * each segment in time |
122 | */ | 139 | */ |
@@ -136,12 +153,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) | |||
136 | while (segs) { | 153 | while (segs) { |
137 | nskb = segs->next; | 154 | nskb = segs->next; |
138 | segs->next = NULL; | 155 | segs->next = NULL; |
139 | if (likely(segs->len <= q->max_size)) { | 156 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
140 | qdisc_skb_cb(segs)->pkt_len = segs->len; | 157 | ret = qdisc_enqueue(segs, q->qdisc); |
141 | ret = qdisc_enqueue(segs, q->qdisc); | ||
142 | } else { | ||
143 | ret = qdisc_reshape_fail(skb, sch); | ||
144 | } | ||
145 | if (ret != NET_XMIT_SUCCESS) { | 158 | if (ret != NET_XMIT_SUCCESS) { |
146 | if (net_xmit_drop_count(ret)) | 159 | if (net_xmit_drop_count(ret)) |
147 | sch->qstats.drops++; | 160 | sch->qstats.drops++; |
@@ -163,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
163 | int ret; | 176 | int ret; |
164 | 177 | ||
165 | if (qdisc_pkt_len(skb) > q->max_size) { | 178 | if (qdisc_pkt_len(skb) > q->max_size) { |
166 | if (skb_is_gso(skb)) | 179 | if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) |
167 | return tbf_segment(skb, sch); | 180 | return tbf_segment(skb, sch); |
168 | return qdisc_reshape_fail(skb, sch); | 181 | return qdisc_reshape_fail(skb, sch); |
169 | } | 182 | } |
@@ -319,6 +332,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
319 | if (max_size < 0) | 332 | if (max_size < 0) |
320 | goto done; | 333 | goto done; |
321 | 334 | ||
335 | if (max_size < psched_mtu(qdisc_dev(sch))) | ||
336 | pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", | ||
337 | max_size, qdisc_dev(sch)->name, | ||
338 | psched_mtu(qdisc_dev(sch))); | ||
339 | |||
322 | if (q->qdisc != &noop_qdisc) { | 340 | if (q->qdisc != &noop_qdisc) { |
323 | err = fifo_set_limit(q->qdisc, qopt->limit); | 341 | err = fifo_set_limit(q->qdisc, qopt->limit); |
324 | if (err) | 342 | if (err) |
diff --git a/net/sctp/output.c b/net/sctp/output.c index e650978daf27..0e2644d0a773 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
474 | * for a given destination transport address. | 474 | * for a given destination transport address. |
475 | */ | 475 | */ |
476 | 476 | ||
477 | if (!tp->rto_pending) { | 477 | if (!chunk->resent && !tp->rto_pending) { |
478 | chunk->rtt_in_progress = 1; | 478 | chunk->rtt_in_progress = 1; |
479 | tp->rto_pending = 1; | 479 | tp->rto_pending = 1; |
480 | } | 480 | } |
481 | |||
481 | has_data = 1; | 482 | has_data = 1; |
482 | } | 483 | } |
483 | 484 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 94df75877869..f51ba985a36e 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -446,6 +446,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
446 | transport->rto_pending = 0; | 446 | transport->rto_pending = 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | chunk->resent = 1; | ||
450 | |||
449 | /* Move the chunk to the retransmit queue. The chunks | 451 | /* Move the chunk to the retransmit queue. The chunks |
450 | * on the retransmit queue are always kept in order. | 452 | * on the retransmit queue are always kept in order. |
451 | */ | 453 | */ |
@@ -1375,6 +1377,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1375 | * instance). | 1377 | * instance). |
1376 | */ | 1378 | */ |
1377 | if (!tchunk->tsn_gap_acked && | 1379 | if (!tchunk->tsn_gap_acked && |
1380 | !tchunk->resent && | ||
1378 | tchunk->rtt_in_progress) { | 1381 | tchunk->rtt_in_progress) { |
1379 | tchunk->rtt_in_progress = 0; | 1382 | tchunk->rtt_in_progress = 0; |
1380 | rtt = jiffies - tchunk->sent_at; | 1383 | rtt = jiffies - tchunk->sent_at; |
@@ -1391,7 +1394,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1391 | */ | 1394 | */ |
1392 | if (!tchunk->tsn_gap_acked) { | 1395 | if (!tchunk->tsn_gap_acked) { |
1393 | tchunk->tsn_gap_acked = 1; | 1396 | tchunk->tsn_gap_acked = 1; |
1394 | *highest_new_tsn_in_sack = tsn; | 1397 | if (TSN_lt(*highest_new_tsn_in_sack, tsn)) |
1398 | *highest_new_tsn_in_sack = tsn; | ||
1395 | bytes_acked += sctp_data_size(tchunk); | 1399 | bytes_acked += sctp_data_size(tchunk); |
1396 | if (!tchunk->transport) | 1400 | if (!tchunk->transport) |
1397 | migrate_bytes += sctp_data_size(tchunk); | 1401 | migrate_bytes += sctp_data_size(tchunk); |
diff --git a/net/socket.c b/net/socket.c index 0b18693f2be6..e83c416708af 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1973,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
1973 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1973 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
1974 | return -EFAULT; | 1974 | return -EFAULT; |
1975 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 1975 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
1976 | return -EINVAL; | 1976 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
1977 | return 0; | 1977 | return 0; |
1978 | } | 1978 | } |
1979 | 1979 | ||
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index 913e1927f916..635695f6a185 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c | |||
@@ -110,7 +110,7 @@ static int template_desc_init_fields(const char *template_fmt, | |||
110 | struct ima_template_field ***fields, | 110 | struct ima_template_field ***fields, |
111 | int *num_fields) | 111 | int *num_fields) |
112 | { | 112 | { |
113 | char *c, *template_fmt_copy; | 113 | char *c, *template_fmt_copy, *template_fmt_ptr; |
114 | int template_num_fields = template_fmt_size(template_fmt); | 114 | int template_num_fields = template_fmt_size(template_fmt); |
115 | int i, result = 0; | 115 | int i, result = 0; |
116 | 116 | ||
@@ -127,7 +127,9 @@ static int template_desc_init_fields(const char *template_fmt, | |||
127 | result = -ENOMEM; | 127 | result = -ENOMEM; |
128 | goto out; | 128 | goto out; |
129 | } | 129 | } |
130 | for (i = 0; (c = strsep(&template_fmt_copy, "|")) != NULL && | 130 | |
131 | template_fmt_ptr = template_fmt_copy; | ||
132 | for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL && | ||
131 | i < template_num_fields; i++) { | 133 | i < template_num_fields; i++) { |
132 | struct ima_template_field *f = lookup_template_field(c); | 134 | struct ima_template_field *f = lookup_template_field(c); |
133 | 135 | ||
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 0362d575de7d..217c82ee3665 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -1606,6 +1606,24 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok) | |||
1606 | static enum event_type | 1606 | static enum event_type |
1607 | process_op(struct event_format *event, struct print_arg *arg, char **tok); | 1607 | process_op(struct event_format *event, struct print_arg *arg, char **tok); |
1608 | 1608 | ||
1609 | /* | ||
1610 | * For __print_symbolic() and __print_flags, we need to completely | ||
1611 | * evaluate the first argument, which defines what to print next. | ||
1612 | */ | ||
1613 | static enum event_type | ||
1614 | process_field_arg(struct event_format *event, struct print_arg *arg, char **tok) | ||
1615 | { | ||
1616 | enum event_type type; | ||
1617 | |||
1618 | type = process_arg(event, arg, tok); | ||
1619 | |||
1620 | while (type == EVENT_OP) { | ||
1621 | type = process_op(event, arg, tok); | ||
1622 | } | ||
1623 | |||
1624 | return type; | ||
1625 | } | ||
1626 | |||
1609 | static enum event_type | 1627 | static enum event_type |
1610 | process_cond(struct event_format *event, struct print_arg *top, char **tok) | 1628 | process_cond(struct event_format *event, struct print_arg *top, char **tok) |
1611 | { | 1629 | { |
@@ -2371,7 +2389,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) | |||
2371 | goto out_free; | 2389 | goto out_free; |
2372 | } | 2390 | } |
2373 | 2391 | ||
2374 | type = process_arg(event, field, &token); | 2392 | type = process_field_arg(event, field, &token); |
2375 | 2393 | ||
2376 | /* Handle operations in the first argument */ | 2394 | /* Handle operations in the first argument */ |
2377 | while (type == EVENT_OP) | 2395 | while (type == EVENT_OP) |
@@ -2424,7 +2442,8 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) | |||
2424 | goto out_free; | 2442 | goto out_free; |
2425 | } | 2443 | } |
2426 | 2444 | ||
2427 | type = process_arg(event, field, &token); | 2445 | type = process_field_arg(event, field, &token); |
2446 | |||
2428 | if (test_type_token(type, token, EVENT_DELIM, ",")) | 2447 | if (test_type_token(type, token, EVENT_DELIM, ",")) |
2429 | goto out_free_field; | 2448 | goto out_free_field; |
2430 | 2449 | ||
@@ -3446,7 +3465,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg | |||
3446 | * is in the bottom half of the 32 bit field. | 3465 | * is in the bottom half of the 32 bit field. |
3447 | */ | 3466 | */ |
3448 | offset &= 0xffff; | 3467 | offset &= 0xffff; |
3449 | val = (unsigned long long)(data + offset); | 3468 | val = (unsigned long long)((unsigned long)data + offset); |
3450 | break; | 3469 | break; |
3451 | default: /* not sure what to do there */ | 3470 | default: /* not sure what to do there */ |
3452 | return 0; | 3471 | return 0; |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 369c03648f88..1cd035708931 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -2078,8 +2078,10 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused, | |||
2078 | if (evsel->idx == (int) desc[i].leader_idx) { | 2078 | if (evsel->idx == (int) desc[i].leader_idx) { |
2079 | evsel->leader = evsel; | 2079 | evsel->leader = evsel; |
2080 | /* {anon_group} is a dummy name */ | 2080 | /* {anon_group} is a dummy name */ |
2081 | if (strcmp(desc[i].name, "{anon_group}")) | 2081 | if (strcmp(desc[i].name, "{anon_group}")) { |
2082 | evsel->group_name = desc[i].name; | 2082 | evsel->group_name = desc[i].name; |
2083 | desc[i].name = NULL; | ||
2084 | } | ||
2083 | evsel->nr_members = desc[i].nr_members; | 2085 | evsel->nr_members = desc[i].nr_members; |
2084 | 2086 | ||
2085 | if (i >= nr_groups || nr > 0) { | 2087 | if (i >= nr_groups || nr > 0) { |
@@ -2105,7 +2107,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused, | |||
2105 | 2107 | ||
2106 | ret = 0; | 2108 | ret = 0; |
2107 | out_free: | 2109 | out_free: |
2108 | while ((int) --i >= 0) | 2110 | for (i = 0; i < nr_groups; i++) |
2109 | free(desc[i].name); | 2111 | free(desc[i].name); |
2110 | free(desc); | 2112 | free(desc); |
2111 | 2113 | ||
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index cd8e2f592719..49eaf1d7d89d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -70,14 +70,13 @@ int thread__set_comm(struct thread *thread, const char *str, u64 timestamp) | |||
70 | /* Override latest entry if it had no specific time coverage */ | 70 | /* Override latest entry if it had no specific time coverage */ |
71 | if (!curr->start) { | 71 | if (!curr->start) { |
72 | comm__override(curr, str, timestamp); | 72 | comm__override(curr, str, timestamp); |
73 | return 0; | 73 | } else { |
74 | new = comm__new(str, timestamp); | ||
75 | if (!new) | ||
76 | return -ENOMEM; | ||
77 | list_add(&new->list, &thread->comm_list); | ||
74 | } | 78 | } |
75 | 79 | ||
76 | new = comm__new(str, timestamp); | ||
77 | if (!new) | ||
78 | return -ENOMEM; | ||
79 | |||
80 | list_add(&new->list, &thread->comm_list); | ||
81 | thread->comm_set = true; | 80 | thread->comm_set = true; |
82 | 81 | ||
83 | return 0; | 82 | return 0; |