diff options
129 files changed, 1244 insertions, 984 deletions
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index cfcc52705ed8..6151999c5dca 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt | |||
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in | |||
4 | APM X-Gene SoC. | 4 | APM X-Gene SoC. |
5 | 5 | ||
6 | Required properties for all the ethernet interfaces: | 6 | Required properties for all the ethernet interfaces: |
7 | - compatible: Should be "apm,xgene-enet" | 7 | - compatible: Should state binding information from the following list, |
8 | - "apm,xgene-enet": RGMII based 1G interface | ||
9 | - "apm,xgene1-sgenet": SGMII based 1G interface | ||
10 | - "apm,xgene1-xgenet": XFI based 10G interface | ||
8 | - reg: Address and length of the register set for the device. It contains the | 11 | - reg: Address and length of the register set for the device. It contains the |
9 | information of registers in the same order as described by reg-names | 12 | information of registers in the same order as described by reg-names |
10 | - reg-names: Should contain the register set names | 13 | - reg-names: Should contain the register set names |
diff --git a/MAINTAINERS b/MAINTAINERS index 6239a305dff0..69cc89f7a9c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2369,8 +2369,9 @@ F: arch/x86/include/asm/tce.h | |||
2369 | 2369 | ||
2370 | CAN NETWORK LAYER | 2370 | CAN NETWORK LAYER |
2371 | M: Oliver Hartkopp <socketcan@hartkopp.net> | 2371 | M: Oliver Hartkopp <socketcan@hartkopp.net> |
2372 | M: Marc Kleine-Budde <mkl@pengutronix.de> | ||
2372 | L: linux-can@vger.kernel.org | 2373 | L: linux-can@vger.kernel.org |
2373 | W: http://gitorious.org/linux-can | 2374 | W: https://github.com/linux-can |
2374 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2376 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2376 | S: Maintained | 2377 | S: Maintained |
@@ -2386,7 +2387,7 @@ CAN NETWORK DRIVERS | |||
2386 | M: Wolfgang Grandegger <wg@grandegger.com> | 2387 | M: Wolfgang Grandegger <wg@grandegger.com> |
2387 | M: Marc Kleine-Budde <mkl@pengutronix.de> | 2388 | M: Marc Kleine-Budde <mkl@pengutronix.de> |
2388 | L: linux-can@vger.kernel.org | 2389 | L: linux-can@vger.kernel.org |
2389 | W: http://gitorious.org/linux-can | 2390 | W: https://github.com/linux-can |
2390 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2391 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2391 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2392 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2392 | S: Maintained | 2393 | S: Maintained |
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 7d8eab857a93..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/video-pxafb.h> | 36 | #include <linux/platform_data/video-pxafb.h> |
37 | #include <mach/bitfield.h> | 37 | #include <mach/bitfield.h> |
38 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <linux/platform_data/mmc-pxamci.h> |
39 | #include <linux/smc91x.h> | ||
39 | 40 | ||
40 | #include "generic.h" | 41 | #include "generic.h" |
41 | #include "devices.h" | 42 | #include "devices.h" |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 28da319d389f..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct smc91x_platdata smc91x_platdata = { | 197 | struct smc91x_platdata smc91x_platdata = { |
198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; | 198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static struct platform_device smc91x_device = { | 201 | static struct platform_device smc91x_device = { |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 7b0cd3172354..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev) | |||
268 | .id = 0, | 268 | .id = 0, |
269 | .res = smc91x_resources, | 269 | .res = smc91x_resources, |
270 | .num_res = ARRAY_SIZE(smc91x_resources), | 270 | .num_res = ARRAY_SIZE(smc91x_resources), |
271 | .data = &smc91c_platdata, | 271 | .data = &smc91x_platdata, |
272 | .size_data = sizeof(smc91c_platdata), | 272 | .size_data = sizeof(smc91x_platdata), |
273 | }; | 273 | }; |
274 | int ret, irq; | 274 | int ret, irq; |
275 | 275 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 696fd0fe4806..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = { | |||
54 | .num_resources = ARRAY_SIZE(smc91x_resources), | 54 | .num_resources = ARRAY_SIZE(smc91x_resources), |
55 | .resource = smc91x_resources, | 55 | .resource = smc91x_resources, |
56 | .dev = { | 56 | .dev = { |
57 | .platform_data = &smc91c_platdata, | 57 | .platform_data = &smc91x_platdata, |
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -622,7 +622,7 @@ | |||
622 | }; | 622 | }; |
623 | 623 | ||
624 | sgenet0: ethernet@1f210000 { | 624 | sgenet0: ethernet@1f210000 { |
625 | compatible = "apm,xgene-enet"; | 625 | compatible = "apm,xgene1-sgenet"; |
626 | status = "disabled"; | 626 | status = "disabled"; |
627 | reg = <0x0 0x1f210000 0x0 0xd100>, | 627 | reg = <0x0 0x1f210000 0x0 0xd100>, |
628 | <0x0 0x1f200000 0x0 0Xc300>, | 628 | <0x0 0x1f200000 0x0 0Xc300>, |
@@ -636,7 +636,7 @@ | |||
636 | }; | 636 | }; |
637 | 637 | ||
638 | xgenet: ethernet@1f610000 { | 638 | xgenet: ethernet@1f610000 { |
639 | compatible = "apm,xgene-enet"; | 639 | compatible = "apm,xgene1-xgenet"; |
640 | status = "disabled"; | 640 | status = "disabled"; |
641 | reg = <0x0 0x1f610000 0x0 0xd100>, | 641 | reg = <0x0 0x1f610000 0x0 0xd100>, |
642 | <0x0 0x1f600000 0x0 0Xc300>, | 642 | <0x0 0x1f600000 0x0 0Xc300>, |
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h | |||
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; | |||
67 | */ | 67 | */ |
68 | #define pgtable_cache_init() do { } while (0) | 68 | #define pgtable_cache_init() do { } while (0) |
69 | 69 | ||
70 | /* | ||
71 | * c6x is !MMU, so define the simpliest implementation | ||
72 | */ | ||
73 | #define pgprot_writecombine pgprot_noncached | ||
74 | |||
70 | #include <asm-generic/pgtable.h> | 75 | #include <asm-generic/pgtable.h> |
71 | 76 | ||
72 | #endif /* _ASM_C6X_PGTABLE_H */ | 77 | #endif /* _ASM_C6X_PGTABLE_H */ |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception): | |||
348 | * The LP register should point to the location where the called function | 348 | * The LP register should point to the location where the called function |
349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | 349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ |
350 | /* See if the system call number is valid */ | 350 | /* See if the system call number is valid */ |
351 | blti r12, 5f | ||
351 | addi r11, r12, -__NR_syscalls; | 352 | addi r11, r12, -__NR_syscalls; |
352 | bgei r11,5f; | 353 | bgei r11, 5f; |
353 | /* Figure out which function to use for this system call. */ | 354 | /* Figure out which function to use for this system call. */ |
354 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | 355 | /* Note Microblaze barrel shift is optional, so don't rely on it */ |
355 | add r12, r12, r12; /* convert num -> ptr */ | 356 | add r12, r12, r12; /* convert num -> ptr */ |
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception): | |||
375 | 376 | ||
376 | /* The syscall number is invalid, return an error. */ | 377 | /* The syscall number is invalid, return an error. */ |
377 | 5: | 378 | 5: |
378 | rtsd r15, 8; /* looks like a normal subroutine return */ | 379 | braid ret_from_trap |
379 | addi r3, r0, -ENOSYS; | 380 | addi r3, r0, -ENOSYS; |
380 | 381 | ||
381 | /* Entry point used to return from a syscall/trap */ | 382 | /* Entry point used to return from a syscall/trap */ |
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): | |||
411 | bri 1b | 412 | bri 1b |
412 | 413 | ||
413 | /* Maybe handle a signal */ | 414 | /* Maybe handle a signal */ |
414 | 5: | 415 | 5: |
415 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; | 416 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
416 | beqi r11, 4f; /* Signals to handle, handle them */ | 417 | beqi r11, 4f; /* Signals to handle, handle them */ |
417 | 418 | ||
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h | |||
@@ -15,7 +15,54 @@ | |||
15 | 15 | ||
16 | #include <uapi/asm/ptrace.h> | 16 | #include <uapi/asm/ptrace.h> |
17 | 17 | ||
18 | /* This struct defines the way the registers are stored on the | ||
19 | stack during a system call. */ | ||
20 | |||
18 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | struct pt_regs { | ||
23 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
24 | unsigned long r9; | ||
25 | unsigned long r10; | ||
26 | unsigned long r11; | ||
27 | unsigned long r12; | ||
28 | unsigned long r13; | ||
29 | unsigned long r14; | ||
30 | unsigned long r15; | ||
31 | unsigned long r1; /* Assembler temporary */ | ||
32 | unsigned long r2; /* Retval LS 32bits */ | ||
33 | unsigned long r3; /* Retval MS 32bits */ | ||
34 | unsigned long r4; /* r4-r7 Register arguments */ | ||
35 | unsigned long r5; | ||
36 | unsigned long r6; | ||
37 | unsigned long r7; | ||
38 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
39 | unsigned long ra; /* Return address */ | ||
40 | unsigned long fp; /* Frame pointer */ | ||
41 | unsigned long sp; /* Stack pointer */ | ||
42 | unsigned long gp; /* Global pointer */ | ||
43 | unsigned long estatus; | ||
44 | unsigned long ea; /* Exception return address (pc) */ | ||
45 | unsigned long orig_r7; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * This is the extended stack used by signal handlers and the context | ||
50 | * switcher: it's pushed after the normal "struct pt_regs". | ||
51 | */ | ||
52 | struct switch_stack { | ||
53 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
54 | unsigned long r17; | ||
55 | unsigned long r18; | ||
56 | unsigned long r19; | ||
57 | unsigned long r20; | ||
58 | unsigned long r21; | ||
59 | unsigned long r22; | ||
60 | unsigned long r23; | ||
61 | unsigned long fp; | ||
62 | unsigned long gp; | ||
63 | unsigned long ra; | ||
64 | }; | ||
65 | |||
19 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) | 66 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) |
20 | 67 | ||
21 | #define instruction_pointer(regs) ((regs)->ra) | 68 | #define instruction_pointer(regs) ((regs)->ra) |
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> | ||
3 | * Copyright (C) 2004 Microtronix Datacom Ltd | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_NIOS2_UCONTEXT_H | ||
11 | #define _ASM_NIOS2_UCONTEXT_H | ||
12 | |||
13 | typedef int greg_t; | ||
14 | #define NGREG 32 | ||
15 | typedef greg_t gregset_t[NGREG]; | ||
16 | |||
17 | struct mcontext { | ||
18 | int version; | ||
19 | gregset_t gregs; | ||
20 | }; | ||
21 | |||
22 | #define MCONTEXT_VERSION 2 | ||
23 | |||
24 | struct ucontext { | ||
25 | unsigned long uc_flags; | ||
26 | struct ucontext *uc_link; | ||
27 | stack_t uc_stack; | ||
28 | struct mcontext uc_mcontext; | ||
29 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
30 | }; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..376131194cc3 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild | |||
@@ -2,3 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | header-y += elf.h | 3 | header-y += elf.h |
4 | header-y += ucontext.h | 4 | header-y += ucontext.h |
5 | |||
6 | generic-y += ucontext.h | ||
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h | |||
@@ -50,9 +50,7 @@ | |||
50 | 50 | ||
51 | typedef unsigned long elf_greg_t; | 51 | typedef unsigned long elf_greg_t; |
52 | 52 | ||
53 | #define ELF_NGREG \ | 53 | #define ELF_NGREG 49 |
54 | ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ | ||
55 | sizeof(elf_greg_t)) | ||
56 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 54 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
57 | 55 | ||
58 | typedef unsigned long elf_fpregset_t; | 56 | typedef unsigned long elf_fpregset_t; |
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..71a330597adf 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h | |||
@@ -67,53 +67,9 @@ | |||
67 | 67 | ||
68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) | 68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) |
69 | 69 | ||
70 | /* this struct defines the way the registers are stored on the | 70 | /* User structures for general purpose registers. */ |
71 | stack during a system call. | 71 | struct user_pt_regs { |
72 | 72 | __u32 regs[49]; | |
73 | There is a fake_regs in setup.c that has to match pt_regs.*/ | ||
74 | |||
75 | struct pt_regs { | ||
76 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
77 | unsigned long r9; | ||
78 | unsigned long r10; | ||
79 | unsigned long r11; | ||
80 | unsigned long r12; | ||
81 | unsigned long r13; | ||
82 | unsigned long r14; | ||
83 | unsigned long r15; | ||
84 | unsigned long r1; /* Assembler temporary */ | ||
85 | unsigned long r2; /* Retval LS 32bits */ | ||
86 | unsigned long r3; /* Retval MS 32bits */ | ||
87 | unsigned long r4; /* r4-r7 Register arguments */ | ||
88 | unsigned long r5; | ||
89 | unsigned long r6; | ||
90 | unsigned long r7; | ||
91 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
92 | unsigned long ra; /* Return address */ | ||
93 | unsigned long fp; /* Frame pointer */ | ||
94 | unsigned long sp; /* Stack pointer */ | ||
95 | unsigned long gp; /* Global pointer */ | ||
96 | unsigned long estatus; | ||
97 | unsigned long ea; /* Exception return address (pc) */ | ||
98 | unsigned long orig_r7; | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * This is the extended stack used by signal handlers and the context | ||
103 | * switcher: it's pushed after the normal "struct pt_regs". | ||
104 | */ | ||
105 | struct switch_stack { | ||
106 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
107 | unsigned long r17; | ||
108 | unsigned long r18; | ||
109 | unsigned long r19; | ||
110 | unsigned long r20; | ||
111 | unsigned long r21; | ||
112 | unsigned long r22; | ||
113 | unsigned long r23; | ||
114 | unsigned long fp; | ||
115 | unsigned long gp; | ||
116 | unsigned long ra; | ||
117 | }; | 73 | }; |
118 | 74 | ||
119 | #endif /* __ASSEMBLY__ */ | 75 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h | |||
@@ -15,14 +15,16 @@ | |||
15 | * details. | 15 | * details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef _ASM_NIOS2_SIGCONTEXT_H | 18 | #ifndef _UAPI__ASM_SIGCONTEXT_H |
19 | #define _ASM_NIOS2_SIGCONTEXT_H | 19 | #define _UAPI__ASM_SIGCONTEXT_H |
20 | 20 | ||
21 | #include <asm/ptrace.h> | 21 | #include <linux/types.h> |
22 | |||
23 | #define MCONTEXT_VERSION 2 | ||
22 | 24 | ||
23 | struct sigcontext { | 25 | struct sigcontext { |
24 | struct pt_regs regs; | 26 | int version; |
25 | unsigned long sc_mask; /* old sigmask */ | 27 | unsigned long gregs[32]; |
26 | }; | 28 | }; |
27 | 29 | ||
28 | #endif | 30 | #endif |
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..dda41e4fe707 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c | |||
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, | |||
39 | struct ucontext *uc, int *pr2) | 39 | struct ucontext *uc, int *pr2) |
40 | { | 40 | { |
41 | int temp; | 41 | int temp; |
42 | greg_t *gregs = uc->uc_mcontext.gregs; | 42 | unsigned long *gregs = uc->uc_mcontext.gregs; |
43 | int err; | 43 | int err; |
44 | 44 | ||
45 | /* Always make any pending restarted system calls return -EINTR */ | 45 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -127,7 +127,7 @@ badframe: | |||
127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) | 127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) |
128 | { | 128 | { |
129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | 129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; |
130 | greg_t *gregs = uc->uc_mcontext.gregs; | 130 | unsigned long *gregs = uc->uc_mcontext.gregs; |
131 | int err = 0; | 131 | int err = 0; |
132 | 132 | ||
133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | 133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -515,15 +515,15 @@ struct s390_io_adapter { | |||
515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ | 515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ |
516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) | 516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) |
517 | 517 | ||
518 | struct s390_model_fac { | 518 | struct kvm_s390_fac { |
519 | /* facilities used in SIE context */ | 519 | /* facility list requested by guest */ |
520 | __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; | 520 | __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; |
521 | /* subset enabled by kvm */ | 521 | /* facility mask supported by kvm & hosting machine */ |
522 | __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; | 522 | __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct kvm_s390_cpu_model { | 525 | struct kvm_s390_cpu_model { |
526 | struct s390_model_fac *fac; | 526 | struct kvm_s390_fac *fac; |
527 | struct cpuid cpu_id; | 527 | struct cpuid cpu_id; |
528 | unsigned short ibc; | 528 | unsigned short ibc; |
529 | }; | 529 | }; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
62 | { | 62 | { |
63 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
64 | 64 | ||
65 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
65 | if (prev == next) | 66 | if (prev == next) |
66 | return; | 67 | return; |
67 | if (MACHINE_HAS_TLB_LC) | 68 | if (MACHINE_HAS_TLB_LC) |
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
73 | atomic_dec(&prev->context.attach_count); | 74 | atomic_dec(&prev->context.attach_count); |
74 | if (MACHINE_HAS_TLB_LC) | 75 | if (MACHINE_HAS_TLB_LC) |
75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 76 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
77 | } | 77 | } |
78 | 78 | ||
79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end | |||
37 | #endif | 37 | #endif |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void clear_page(void *page) | 40 | #define clear_page(page) memset((page), 0, PAGE_SIZE) |
41 | { | ||
42 | register unsigned long reg1 asm ("1") = 0; | ||
43 | register void *reg2 asm ("2") = page; | ||
44 | register unsigned long reg3 asm ("3") = 4096; | ||
45 | asm volatile( | ||
46 | " mvcl 2,0" | ||
47 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | ||
48 | : "memory", "cc"); | ||
49 | } | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to | 43 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) | |||
36 | insn->offset = (entry->target - entry->code) >> 1; | 36 | insn->offset = (entry->target - entry->code) >> 1; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void jump_label_bug(struct jump_entry *entry, struct insn *insn) | 39 | static void jump_label_bug(struct jump_entry *entry, struct insn *expected, |
40 | struct insn *new) | ||
40 | { | 41 | { |
41 | unsigned char *ipc = (unsigned char *)entry->code; | 42 | unsigned char *ipc = (unsigned char *)entry->code; |
42 | unsigned char *ipe = (unsigned char *)insn; | 43 | unsigned char *ipe = (unsigned char *)expected; |
44 | unsigned char *ipn = (unsigned char *)new; | ||
43 | 45 | ||
44 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); | 46 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); |
45 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", | 47 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", |
46 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); | 48 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); |
47 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", | 49 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", |
48 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); | 50 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); |
51 | pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", | ||
52 | ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); | ||
49 | panic("Corrupted kernel text"); | 53 | panic("Corrupted kernel text"); |
50 | } | 54 | } |
51 | 55 | ||
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
69 | } | 73 | } |
70 | if (init) { | 74 | if (init) { |
71 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) | 75 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) |
72 | jump_label_bug(entry, &old); | 76 | jump_label_bug(entry, &orignop, &new); |
73 | } else { | 77 | } else { |
74 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
75 | jump_label_bug(entry, &old); | 79 | jump_label_bug(entry, &old, &new); |
76 | } | 80 | } |
77 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); |
78 | } | 82 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
436 | const Elf_Shdr *sechdrs, | 436 | const Elf_Shdr *sechdrs, |
437 | struct module *me) | 437 | struct module *me) |
438 | { | 438 | { |
439 | jump_label_apply_nops(me); | ||
439 | vfree(me->arch.syminfo); | 440 | vfree(me->arch.syminfo); |
440 | me->arch.syminfo = NULL; | 441 | me->arch.syminfo = NULL; |
441 | return 0; | 442 | return 0; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | 19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); |
20 | 20 | ||
21 | void cpu_relax(void) | 21 | void notrace cpu_relax(void) |
22 | { | 22 | { |
23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) | 23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) |
24 | asm volatile("diag 0,0,0x44"); | 24 | asm volatile("diag 0,0,0x44"); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..f6579cfde2df 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | 522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, |
523 | sizeof(struct cpuid)); | 523 | sizeof(struct cpuid)); |
524 | kvm->arch.model.ibc = proc->ibc; | 524 | kvm->arch.model.ibc = proc->ibc; |
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | 525 | memcpy(kvm->arch.model.fac->list, proc->fac_list, |
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 526 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
527 | } else | 527 | } else |
528 | ret = -EFAULT; | 528 | ret = -EFAULT; |
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
556 | } | 556 | } |
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | 557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); |
558 | proc->ibc = kvm->arch.model.ibc; | 558 | proc->ibc = kvm->arch.model.ibc; |
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | 559 | memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); |
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | 560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
561 | ret = -EFAULT; | 561 | ret = -EFAULT; |
562 | kfree(proc); | 562 | kfree(proc); |
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | |||
576 | } | 576 | } |
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | 577 | get_cpu_id((struct cpuid *) &mach->cpuid); |
578 | mach->ibc = sclp_get_ibc(); | 578 | mach->ibc = sclp_get_ibc(); |
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | 579 | memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, |
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | 580 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | 581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
582 | S390_ARCH_FAC_LIST_SIZE_U64); | 582 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | 583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
584 | ret = -EFAULT; | 584 | ret = -EFAULT; |
585 | kfree(mach); | 585 | kfree(mach); |
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
778 | static int kvm_s390_query_ap_config(u8 *config) | 778 | static int kvm_s390_query_ap_config(u8 *config) |
779 | { | 779 | { |
780 | u32 fcn_code = 0x04000000UL; | 780 | u32 fcn_code = 0x04000000UL; |
781 | u32 cc; | 781 | u32 cc = 0; |
782 | 782 | ||
783 | memset(config, 0, 128); | ||
783 | asm volatile( | 784 | asm volatile( |
784 | "lgr 0,%1\n" | 785 | "lgr 0,%1\n" |
785 | "lgr 2,%2\n" | 786 | "lgr 2,%2\n" |
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | 787 | ".long 0xb2af0000\n" /* PQAP(QCI) */ |
787 | "ipm %0\n" | 788 | "0: ipm %0\n" |
788 | "srl %0,28\n" | 789 | "srl %0,28\n" |
789 | : "=r" (cc) | 790 | "1:\n" |
791 | EX_TABLE(0b, 1b) | ||
792 | : "+r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | 793 | : "r" (fcn_code), "r" (config) |
791 | : "cc", "0", "2", "memory" | 794 | : "cc", "0", "2", "memory" |
792 | ); | 795 | ); |
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
839 | 842 | ||
840 | kvm_s390_set_crycb_format(kvm); | 843 | kvm_s390_set_crycb_format(kvm); |
841 | 844 | ||
842 | /* Disable AES/DEA protected key functions by default */ | 845 | /* Enable AES/DEA protected key functions by default */ |
843 | kvm->arch.crypto.aes_kw = 0; | 846 | kvm->arch.crypto.aes_kw = 1; |
844 | kvm->arch.crypto.dea_kw = 0; | 847 | kvm->arch.crypto.dea_kw = 1; |
848 | get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
849 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
850 | get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
851 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
845 | 852 | ||
846 | return 0; | 853 | return 0; |
847 | } | 854 | } |
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
886 | /* | 893 | /* |
887 | * The architectural maximum amount of facilities is 16 kbit. To store | 894 | * The architectural maximum amount of facilities is 16 kbit. To store |
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | 895 | * this amount, 2 kbyte of memory is required. Thus we need a full |
889 | * page to hold the active copy (arch.model.fac->sie) and the current | 896 | * page to hold the guest facility list (arch.model.fac->list) and the |
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | 897 | * facility mask (arch.model.fac->mask). Its address size has to be |
891 | * 31 bits and word aligned. | 898 | * 31 bits and word aligned. |
892 | */ | 899 | */ |
893 | kvm->arch.model.fac = | 900 | kvm->arch.model.fac = |
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 901 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
895 | if (!kvm->arch.model.fac) | 902 | if (!kvm->arch.model.fac) |
896 | goto out_nofac; | 903 | goto out_nofac; |
897 | 904 | ||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | 905 | /* Populate the facility mask initially. */ |
899 | S390_ARCH_FAC_LIST_SIZE_U64); | 906 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
900 | 907 | S390_ARCH_FAC_LIST_SIZE_BYTE); | |
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | 908 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
917 | if (i < kvm_s390_fac_list_mask_size()) | 909 | if (i < kvm_s390_fac_list_mask_size()) |
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | 910 | kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; |
919 | else | 911 | else |
920 | kvm->arch.model.fac->kvm[i] = 0UL; | 912 | kvm->arch.model.fac->mask[i] = 0UL; |
921 | } | 913 | } |
922 | 914 | ||
915 | /* Populate the facility list initially. */ | ||
916 | memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, | ||
917 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
918 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | 919 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); |
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 920 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
925 | 921 | ||
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1165 | 1161 | ||
1166 | mutex_lock(&vcpu->kvm->lock); | 1162 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | 1163 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; |
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | 1164 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; |
1171 | mutex_unlock(&vcpu->kvm->lock); | 1165 | mutex_unlock(&vcpu->kvm->lock); |
1172 | 1166 | ||
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1206 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1207 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1214 | } | 1208 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | 1209 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; |
1216 | 1210 | ||
1217 | spin_lock_init(&vcpu->arch.local_int.lock); | 1211 | spin_lock_init(&vcpu->arch.local_int.lock); |
1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1212 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
128 | /* test availability of facility in a kvm intance */ | 128 | /* test availability of facility in a kvm intance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 130 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->kvm); | 131 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | ||
132 | } | 133 | } |
133 | 134 | ||
134 | /* are cpu states controlled by user space */ | 135 | /* are cpu states controlled by user space */ |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 | 348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 |
349 | * into a u32 memory representation. They will remain bits 0-31. | 349 | * into a u32 memory representation. They will remain bits 0-31. |
350 | */ | 350 | */ |
351 | fac = *vcpu->kvm->arch.model.fac->sie >> 32; | 351 | fac = *vcpu->kvm->arch.model.fac->list >> 32; |
352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
353 | &fac, sizeof(fac)); | 353 | &fac, sizeof(fac)); |
354 | if (rc) | 354 | if (rc) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); |
288 | return (void __iomem *) addr + offset; | 288 | return (void __iomem *) addr + offset; |
289 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(pci_iomap_range); | 290 | EXPORT_SYMBOL(pci_iomap_range); |
291 | 291 | ||
292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
293 | { | 293 | { |
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |||
309 | } | 309 | } |
310 | spin_unlock(&zpci_iomap_lock); | 310 | spin_unlock(&zpci_iomap_lock); |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(pci_iounmap); | 312 | EXPORT_SYMBOL(pci_iounmap); |
313 | 313 | ||
314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
315 | int size, u32 *val) | 315 | int size, u32 *val) |
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); | 483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void zpci_map_resources(struct zpci_dev *zdev) | 486 | static void zpci_map_resources(struct pci_dev *pdev) |
487 | { | 487 | { |
488 | struct pci_dev *pdev = zdev->pdev; | ||
489 | resource_size_t len; | 488 | resource_size_t len; |
490 | int i; | 489 | int i; |
491 | 490 | ||
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
499 | } | 498 | } |
500 | } | 499 | } |
501 | 500 | ||
502 | static void zpci_unmap_resources(struct zpci_dev *zdev) | 501 | static void zpci_unmap_resources(struct pci_dev *pdev) |
503 | { | 502 | { |
504 | struct pci_dev *pdev = zdev->pdev; | ||
505 | resource_size_t len; | 503 | resource_size_t len; |
506 | int i; | 504 | int i; |
507 | 505 | ||
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
651 | 649 | ||
652 | zdev->pdev = pdev; | 650 | zdev->pdev = pdev; |
653 | pdev->dev.groups = zpci_attr_groups; | 651 | pdev->dev.groups = zpci_attr_groups; |
654 | zpci_map_resources(zdev); | 652 | zpci_map_resources(pdev); |
655 | 653 | ||
656 | for (i = 0; i < PCI_BAR_COUNT; i++) { | 654 | for (i = 0; i < PCI_BAR_COUNT; i++) { |
657 | res = &pdev->resource[i]; | 655 | res = &pdev->resource[i]; |
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
663 | return 0; | 661 | return 0; |
664 | } | 662 | } |
665 | 663 | ||
664 | void pcibios_release_device(struct pci_dev *pdev) | ||
665 | { | ||
666 | zpci_unmap_resources(pdev); | ||
667 | } | ||
668 | |||
666 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 669 | int pcibios_enable_device(struct pci_dev *pdev, int mask) |
667 | { | 670 | { |
668 | struct zpci_dev *zdev = get_zdev(pdev); | 671 | struct zpci_dev *zdev = get_zdev(pdev); |
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
670 | zdev->pdev = pdev; | 673 | zdev->pdev = pdev; |
671 | zpci_debug_init_device(zdev); | 674 | zpci_debug_init_device(zdev); |
672 | zpci_fmb_enable_device(zdev); | 675 | zpci_fmb_enable_device(zdev); |
673 | zpci_map_resources(zdev); | ||
674 | 676 | ||
675 | return pci_enable_resources(pdev, mask); | 677 | return pci_enable_resources(pdev, mask); |
676 | } | 678 | } |
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
679 | { | 681 | { |
680 | struct zpci_dev *zdev = get_zdev(pdev); | 682 | struct zpci_dev *zdev = get_zdev(pdev); |
681 | 683 | ||
682 | zpci_unmap_resources(zdev); | ||
683 | zpci_fmb_disable_device(zdev); | 684 | zpci_fmb_disable_device(zdev); |
684 | zpci_debug_exit_device(zdev); | 685 | zpci_debug_exit_device(zdev); |
685 | zdev->pdev = NULL; | 686 | zdev->pdev = NULL; |
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
688 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 689 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
689 | static int zpci_restore(struct device *dev) | 690 | static int zpci_restore(struct device *dev) |
690 | { | 691 | { |
691 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 692 | struct pci_dev *pdev = to_pci_dev(dev); |
693 | struct zpci_dev *zdev = get_zdev(pdev); | ||
692 | int ret = 0; | 694 | int ret = 0; |
693 | 695 | ||
694 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 696 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) | |||
698 | if (ret) | 700 | if (ret) |
699 | goto out; | 701 | goto out; |
700 | 702 | ||
701 | zpci_map_resources(zdev); | 703 | zpci_map_resources(pdev); |
702 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, | 704 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, |
703 | zdev->start_dma + zdev->iommu_size - 1, | 705 | zdev->start_dma + zdev->iommu_size - 1, |
704 | (u64) zdev->dma_table); | 706 | (u64) zdev->dma_table); |
@@ -709,12 +711,14 @@ out: | |||
709 | 711 | ||
710 | static int zpci_freeze(struct device *dev) | 712 | static int zpci_freeze(struct device *dev) |
711 | { | 713 | { |
712 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 714 | struct pci_dev *pdev = to_pci_dev(dev); |
715 | struct zpci_dev *zdev = get_zdev(pdev); | ||
713 | 716 | ||
714 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 717 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
715 | return 0; | 718 | return 0; |
716 | 719 | ||
717 | zpci_unregister_ioat(zdev, 0); | 720 | zpci_unregister_ioat(zdev, 0); |
721 | zpci_unmap_resources(pdev); | ||
718 | return clp_disable_fh(zdev); | 722 | return clp_disable_fh(zdev); |
719 | } | 723 | } |
720 | 724 | ||
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c | |||
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, | |||
64 | if (copy_from_user(buf, user_buffer, length)) | 64 | if (copy_from_user(buf, user_buffer, length)) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | memcpy_toio(io_addr, buf, length); | 67 | ret = zpci_memcpy_toio(io_addr, buf, length); |
68 | ret = 0; | ||
69 | out: | 68 | out: |
70 | if (buf != local_buf) | 69 | if (buf != local_buf) |
71 | kfree(buf); | 70 | kfree(buf); |
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, | |||
98 | goto out; | 97 | goto out; |
99 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); | 98 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); |
100 | 99 | ||
101 | ret = -EFAULT; | 100 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { |
102 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) | 101 | ret = -EFAULT; |
103 | goto out; | 102 | goto out; |
104 | 103 | } | |
105 | memcpy_fromio(buf, io_addr, length); | 104 | ret = zpci_memcpy_fromio(buf, io_addr, length); |
106 | 105 | if (ret) | |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | goto out; | 106 | goto out; |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | ret = -EFAULT; | ||
109 | 109 | ||
110 | ret = 0; | ||
111 | out: | 110 | out: |
112 | if (buf != local_buf) | 111 | if (buf != local_buf) |
113 | kfree(buf); | 112 | kfree(buf); |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -869,6 +869,8 @@ try_offline_again: | |||
869 | */ | 869 | */ |
870 | ata_msleep(ap, 1); | 870 | ata_msleep(ap, 1); |
871 | 871 | ||
872 | sata_set_spd(link); | ||
873 | |||
872 | /* | 874 | /* |
873 | * Now, bring the host controller online again, this can take time | 875 | * Now, bring the host controller online again, this can take time |
874 | * as PHY reset and communication establishment, 1st D2H FIS and | 876 | * as PHY reset and communication establishment, 1st D2H FIS and |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) | |||
140 | { | 140 | { |
141 | int rc; | 141 | int rc; |
142 | 142 | ||
143 | rc = device_add(&chip->dev); | 143 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); |
144 | if (rc) { | 144 | if (rc) { |
145 | dev_err(&chip->dev, | 145 | dev_err(&chip->dev, |
146 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 146 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
147 | chip->devname, MAJOR(chip->dev.devt), | 147 | chip->devname, MAJOR(chip->dev.devt), |
148 | MINOR(chip->dev.devt), rc); | 148 | MINOR(chip->dev.devt), rc); |
149 | 149 | ||
150 | device_unregister(&chip->dev); | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
152 | 153 | ||
153 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); | 154 | rc = device_add(&chip->dev); |
154 | if (rc) { | 155 | if (rc) { |
155 | dev_err(&chip->dev, | 156 | dev_err(&chip->dev, |
156 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 157 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
157 | chip->devname, MAJOR(chip->dev.devt), | 158 | chip->devname, MAJOR(chip->dev.devt), |
158 | MINOR(chip->dev.devt), rc); | 159 | MINOR(chip->dev.devt), rc); |
159 | 160 | ||
160 | device_unregister(&chip->dev); | ||
161 | return rc; | 161 | return rc; |
162 | } | 162 | } |
163 | 163 | ||
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) | |||
174 | * tpm_chip_register() - create a character device for the TPM chip | 174 | * tpm_chip_register() - create a character device for the TPM chip |
175 | * @chip: TPM chip to use. | 175 | * @chip: TPM chip to use. |
176 | * | 176 | * |
177 | * Creates a character device for the TPM chip and adds sysfs interfaces for | 177 | * Creates a character device for the TPM chip and adds sysfs attributes for |
178 | * the device, PPI and TCPA. As the last step this function adds the | 178 | * the device. As the last step this function adds the chip to the list of TPM |
179 | * chip to the list of TPM chips available for use. | 179 | * chips available for in-kernel use. |
180 | * | 180 | * |
181 | * NOTE: This function should be only called after the chip initialization | 181 | * This function should be only called after the chip initialization is |
182 | * is complete. | 182 | * complete. |
183 | * | ||
184 | * Called from tpm_<specific>.c probe function only for devices | ||
185 | * the driver has determined it should claim. Prior to calling | ||
186 | * this function the specific probe function has called pci_enable_device | ||
187 | * upon errant exit from this function specific probe function should call | ||
188 | * pci_disable_device | ||
189 | */ | 183 | */ |
190 | int tpm_chip_register(struct tpm_chip *chip) | 184 | int tpm_chip_register(struct tpm_chip *chip) |
191 | { | 185 | { |
192 | int rc; | 186 | int rc; |
193 | 187 | ||
194 | rc = tpm_dev_add_device(chip); | ||
195 | if (rc) | ||
196 | return rc; | ||
197 | |||
198 | /* Populate sysfs for TPM1 devices. */ | 188 | /* Populate sysfs for TPM1 devices. */ |
199 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 189 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
200 | rc = tpm_sysfs_add_device(chip); | 190 | rc = tpm_sysfs_add_device(chip); |
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
208 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 198 | chip->bios_dir = tpm_bios_log_setup(chip->devname); |
209 | } | 199 | } |
210 | 200 | ||
201 | rc = tpm_dev_add_device(chip); | ||
202 | if (rc) | ||
203 | return rc; | ||
204 | |||
211 | /* Make the chip available. */ | 205 | /* Make the chip available. */ |
212 | spin_lock(&driver_lock); | 206 | spin_lock(&driver_lock); |
213 | list_add_rcu(&chip->list, &tpm_chip_list); | 207 | list_add_rcu(&chip->list, &tpm_chip_list); |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
124 | { | 124 | { |
125 | struct ibmvtpm_dev *ibmvtpm; | 125 | struct ibmvtpm_dev *ibmvtpm; |
126 | struct ibmvtpm_crq crq; | 126 | struct ibmvtpm_crq crq; |
127 | u64 *word = (u64 *) &crq; | 127 | __be64 *word = (__be64 *)&crq; |
128 | int rc; | 128 | int rc; |
129 | 129 | ||
130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); |
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
146 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; |
147 | crq.msg = (u8)VTPM_TPM_COMMAND; | 147 | crq.msg = (u8)VTPM_TPM_COMMAND; |
148 | crq.len = (u16)count; | 148 | crq.len = cpu_to_be16(count); |
149 | crq.data = ibmvtpm->rtce_dma_handle; | 149 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); |
150 | 150 | ||
151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), | 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), |
152 | cpu_to_be64(word[1])); | 152 | be64_to_cpu(word[1])); |
153 | if (rc != H_SUCCESS) { | 153 | if (rc != H_SUCCESS) { |
154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
155 | rc = 0; | 155 | rc = 0; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h | |||
@@ -22,9 +22,9 @@ | |||
22 | struct ibmvtpm_crq { | 22 | struct ibmvtpm_crq { |
23 | u8 valid; | 23 | u8 valid; |
24 | u8 msg; | 24 | u8 msg; |
25 | u16 len; | 25 | __be16 len; |
26 | u32 data; | 26 | __be32 data; |
27 | u64 reserved; | 27 | __be64 reserved; |
28 | } __attribute__((packed, aligned(8))); | 28 | } __attribute__((packed, aligned(8))); |
29 | 29 | ||
30 | struct ibmvtpm_crq_queue { | 30 | struct ibmvtpm_crq_queue { |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) | |||
679 | status = driver->remove(client); | 679 | status = driver->remove(client); |
680 | } | 680 | } |
681 | 681 | ||
682 | if (dev->of_node) | ||
683 | irq_dispose_mapping(client->irq); | ||
684 | |||
685 | dev_pm_domain_detach(&client->dev, true); | 682 | dev_pm_domain_detach(&client->dev, true); |
686 | return status; | 683 | return status; |
687 | } | 684 | } |
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c | |||
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) | |||
411 | 411 | ||
412 | input_set_drvdata(input, keypad); | 412 | input_set_drvdata(input, keypad); |
413 | 413 | ||
414 | error = request_threaded_irq(irq, NULL, | 414 | error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, |
415 | tc3589x_keypad_irq, plat->irqtype, | 415 | plat->irqtype | IRQF_ONESHOT, |
416 | "tc3589x-keypad", keypad); | 416 | "tc3589x-keypad", keypad); |
417 | if (error < 0) { | 417 | if (error < 0) { |
418 | dev_err(&pdev->dev, | 418 | dev_err(&pdev->dev, |
419 | "Could not allocate irq %d,error %d\n", | 419 | "Could not allocate irq %d,error %d\n", |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, | |||
187 | idev->private = m; | 187 | idev->private = m; |
188 | idev->input->name = MMA8450_DRV_NAME; | 188 | idev->input->name = MMA8450_DRV_NAME; |
189 | idev->input->id.bustype = BUS_I2C; | 189 | idev->input->id.bustype = BUS_I2C; |
190 | idev->input->dev.parent = &c->dev; | ||
190 | idev->poll = mma8450_poll; | 191 | idev->poll = mma8450_poll; |
191 | idev->poll_interval = POLL_INTERVAL; | 192 | idev->poll_interval = POLL_INTERVAL; |
192 | idev->poll_interval_max = POLL_INTERVAL_MAX; | 193 | idev->poll_interval_max = POLL_INTERVAL_MAX; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) | |||
2605 | return -ENOMEM; | 2605 | return -ENOMEM; |
2606 | 2606 | ||
2607 | error = alps_identify(psmouse, priv); | 2607 | error = alps_identify(psmouse, priv); |
2608 | if (error) | 2608 | if (error) { |
2609 | kfree(priv); | ||
2609 | return error; | 2610 | return error; |
2611 | } | ||
2610 | 2612 | ||
2611 | if (set_properties) { | 2613 | if (set_properties) { |
2612 | psmouse->vendor = "ALPS"; | 2614 | psmouse->vendor = "ALPS"; |
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/input/mt.h> | 20 | #include <linux/input/mt.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/unaligned/access_ok.h> | 23 | #include <asm/unaligned.h> |
24 | #include "cyapa.h" | 24 | #include "cyapa.h" |
25 | 25 | ||
26 | 26 | ||
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/unaligned/access_ok.h> | 20 | #include <asm/unaligned.h> |
21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
22 | #include "cyapa.h" | 22 | #include "cyapa.h" |
23 | 23 | ||
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, | |||
1926 | electrodes_tx = cyapa->electrodes_x; | 1926 | electrodes_tx = cyapa->electrodes_x; |
1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & | 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & |
1928 | ~7u) * electrodes_tx; | 1928 | ~7u) * electrodes_tx; |
1929 | } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { | 1929 | } else { |
1930 | offset = 2; | 1930 | offset = 2; |
1931 | max_element_cnt = cyapa->electrodes_x + | 1931 | max_element_cnt = cyapa->electrodes_x + |
1932 | cyapa->electrodes_y; | 1932 | cyapa->electrodes_y; |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) | |||
67 | 67 | ||
68 | #define FOC_MAX_FINGERS 5 | 68 | #define FOC_MAX_FINGERS 5 |
69 | 69 | ||
70 | #define FOC_MAX_X 2431 | ||
71 | #define FOC_MAX_Y 1663 | ||
72 | |||
73 | /* | 70 | /* |
74 | * Current state of a single finger on the touchpad. | 71 | * Current state of a single finger on the touchpad. |
75 | */ | 72 | */ |
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
129 | input_mt_slot(dev, i); | 126 | input_mt_slot(dev, i); |
130 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); | 127 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); |
131 | if (active) { | 128 | if (active) { |
132 | input_report_abs(dev, ABS_MT_POSITION_X, finger->x); | 129 | unsigned int clamped_x, clamped_y; |
130 | /* | ||
131 | * The touchpad might report invalid data, so we clamp | ||
132 | * the resulting values so that we do not confuse | ||
133 | * userspace. | ||
134 | */ | ||
135 | clamped_x = clamp(finger->x, 0U, priv->x_max); | ||
136 | clamped_y = clamp(finger->y, 0U, priv->y_max); | ||
137 | input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); | ||
133 | input_report_abs(dev, ABS_MT_POSITION_Y, | 138 | input_report_abs(dev, ABS_MT_POSITION_Y, |
134 | FOC_MAX_Y - finger->y); | 139 | priv->y_max - clamped_y); |
135 | } | 140 | } |
136 | } | 141 | } |
137 | input_mt_report_pointer_emulation(dev, true); | 142 | input_mt_report_pointer_emulation(dev, true); |
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, | |||
180 | 185 | ||
181 | state->pressed = (packet[0] >> 4) & 1; | 186 | state->pressed = (packet[0] >> 4) & 1; |
182 | 187 | ||
183 | /* | ||
184 | * packet[5] contains some kind of tool size in the most | ||
185 | * significant nibble. 0xff is a special value (latching) that | ||
186 | * signals a large contact area. | ||
187 | */ | ||
188 | if (packet[5] == 0xff) { | ||
189 | state->fingers[finger].valid = false; | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; | 188 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; |
194 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; | 189 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; |
195 | state->fingers[finger].valid = true; | 190 | state->fingers[finger].valid = true; |
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) | |||
381 | 376 | ||
382 | return 0; | 377 | return 0; |
383 | } | 378 | } |
379 | |||
380 | void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) | ||
381 | { | ||
382 | /* not supported yet */ | ||
383 | } | ||
384 | |||
385 | static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) | ||
386 | { | ||
387 | /* not supported yet */ | ||
388 | } | ||
389 | |||
390 | static void focaltech_set_scale(struct psmouse *psmouse, | ||
391 | enum psmouse_scale scale) | ||
392 | { | ||
393 | /* not supported yet */ | ||
394 | } | ||
395 | |||
384 | int focaltech_init(struct psmouse *psmouse) | 396 | int focaltech_init(struct psmouse *psmouse) |
385 | { | 397 | { |
386 | struct focaltech_data *priv; | 398 | struct focaltech_data *priv; |
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) | |||
415 | psmouse->cleanup = focaltech_reset; | 427 | psmouse->cleanup = focaltech_reset; |
416 | /* resync is not supported yet */ | 428 | /* resync is not supported yet */ |
417 | psmouse->resync_time = 0; | 429 | psmouse->resync_time = 0; |
430 | /* | ||
431 | * rate/resolution/scale changes are not supported yet, and | ||
432 | * the generic implementations of these functions seem to | ||
433 | * confuse some touchpads | ||
434 | */ | ||
435 | psmouse->set_resolution = focaltech_set_resolution; | ||
436 | psmouse->set_rate = focaltech_set_rate; | ||
437 | psmouse->set_scale = focaltech_set_scale; | ||
418 | 438 | ||
419 | return 0; | 439 | return 0; |
420 | 440 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * Here we set the mouse scaling. | ||
458 | */ | ||
459 | |||
460 | static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) | ||
461 | { | ||
462 | ps2_command(&psmouse->ps2dev, NULL, | ||
463 | scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : | ||
464 | PSMOUSE_CMD_SETSCALE11); | ||
465 | } | ||
466 | |||
467 | /* | ||
457 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. | 468 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. |
458 | */ | 469 | */ |
459 | 470 | ||
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
689 | 700 | ||
690 | psmouse->set_rate = psmouse_set_rate; | 701 | psmouse->set_rate = psmouse_set_rate; |
691 | psmouse->set_resolution = psmouse_set_resolution; | 702 | psmouse->set_resolution = psmouse_set_resolution; |
703 | psmouse->set_scale = psmouse_set_scale; | ||
692 | psmouse->poll = psmouse_poll; | 704 | psmouse->poll = psmouse_poll; |
693 | psmouse->protocol_handler = psmouse_process_byte; | 705 | psmouse->protocol_handler = psmouse_process_byte; |
694 | psmouse->pktsize = 3; | 706 | psmouse->pktsize = 3; |
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) | |||
1160 | if (psmouse_max_proto != PSMOUSE_PS2) { | 1172 | if (psmouse_max_proto != PSMOUSE_PS2) { |
1161 | psmouse->set_rate(psmouse, psmouse->rate); | 1173 | psmouse->set_rate(psmouse, psmouse->rate); |
1162 | psmouse->set_resolution(psmouse, psmouse->resolution); | 1174 | psmouse->set_resolution(psmouse, psmouse->resolution); |
1163 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); | 1175 | psmouse->set_scale(psmouse, PSMOUSE_SCALE11); |
1164 | } | 1176 | } |
1165 | } | 1177 | } |
1166 | 1178 | ||
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h | |||
@@ -36,6 +36,11 @@ typedef enum { | |||
36 | PSMOUSE_FULL_PACKET | 36 | PSMOUSE_FULL_PACKET |
37 | } psmouse_ret_t; | 37 | } psmouse_ret_t; |
38 | 38 | ||
39 | enum psmouse_scale { | ||
40 | PSMOUSE_SCALE11, | ||
41 | PSMOUSE_SCALE21 | ||
42 | }; | ||
43 | |||
39 | struct psmouse { | 44 | struct psmouse { |
40 | void *private; | 45 | void *private; |
41 | struct input_dev *dev; | 46 | struct input_dev *dev; |
@@ -67,6 +72,7 @@ struct psmouse { | |||
67 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); | 72 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); |
68 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); | 73 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); |
69 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); | 74 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); |
75 | void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); | ||
70 | 76 | ||
71 | int (*reconnect)(struct psmouse *psmouse); | 77 | int (*reconnect)(struct psmouse *psmouse); |
72 | void (*disconnect)(struct psmouse *psmouse); | 78 | void (*disconnect)(struct psmouse *psmouse); |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I | |||
943 | tristate "Allwinner sun4i resistive touchscreen controller support" | 943 | tristate "Allwinner sun4i resistive touchscreen controller support" |
944 | depends on ARCH_SUNXI || COMPILE_TEST | 944 | depends on ARCH_SUNXI || COMPILE_TEST |
945 | depends on HWMON | 945 | depends on HWMON |
946 | depends on THERMAL || !THERMAL_OF | ||
946 | help | 947 | help |
947 | This selects support for the resistive touchscreen controller | 948 | This selects support for the resistive touchscreen controller |
948 | found on Allwinner sunxi SoCs. | 949 | found on Allwinner sunxi SoCs. |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | depends on ARM || ARM64 || COMPILE_TEST | ||
26 | help | 27 | help |
27 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
@@ -63,6 +64,7 @@ config MSM_IOMMU | |||
63 | bool "MSM IOMMU Support" | 64 | bool "MSM IOMMU Support" |
64 | depends on ARM | 65 | depends on ARM |
65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | 66 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST |
67 | depends on BROKEN | ||
66 | select IOMMU_API | 68 | select IOMMU_API |
67 | help | 69 | help |
68 | Support for the IOMMUs found on certain Qualcomm SOCs. | 70 | Support for the IOMMUs found on certain Qualcomm SOCs. |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { | |||
1186 | 1186 | ||
1187 | static int __init exynos_iommu_init(void) | 1187 | static int __init exynos_iommu_init(void) |
1188 | { | 1188 | { |
1189 | struct device_node *np; | ||
1189 | int ret; | 1190 | int ret; |
1190 | 1191 | ||
1192 | np = of_find_matching_node(NULL, sysmmu_of_match); | ||
1193 | if (!np) | ||
1194 | return 0; | ||
1195 | |||
1196 | of_node_put(np); | ||
1197 | |||
1191 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", | 1198 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1192 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | 1199 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); |
1193 | if (!lv2table_kmem_cache) { | 1200 | if (!lv2table_kmem_cache) { |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -56,7 +56,8 @@ | |||
56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
57 | * (d)->bits_per_level) + (d)->pg_shift) | 57 | * (d)->bits_per_level) + (d)->pg_shift) |
58 | 58 | ||
59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | 59 | #define ARM_LPAE_PAGES_PER_PGD(d) \ |
60 | DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Calculate the index at level l used to map virtual address a using the | 63 | * Calculate the index at level l used to map virtual address a using the |
@@ -66,7 +67,7 @@ | |||
66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | 67 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
67 | 68 | ||
68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | 69 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | 70 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | 71 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
71 | 72 | ||
72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 73 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) | |||
1376 | struct kmem_cache *p; | 1376 | struct kmem_cache *p; |
1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1379 | struct device_node *np; | ||
1380 | |||
1381 | np = of_find_matching_node(NULL, omap_iommu_of_match); | ||
1382 | if (!np) | ||
1383 | return 0; | ||
1384 | |||
1385 | of_node_put(np); | ||
1379 | 1386 | ||
1380 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1387 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1381 | iopte_cachep_ctor); | 1388 | iopte_cachep_ctor); |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { | |||
1015 | 1015 | ||
1016 | static int __init rk_iommu_init(void) | 1016 | static int __init rk_iommu_init(void) |
1017 | { | 1017 | { |
1018 | struct device_node *np; | ||
1018 | int ret; | 1019 | int ret; |
1019 | 1020 | ||
1021 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); | ||
1022 | if (!np) | ||
1023 | return 0; | ||
1024 | |||
1025 | of_node_put(np); | ||
1026 | |||
1020 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | 1027 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
1021 | if (ret) | 1028 | if (ret) |
1022 | return ret; | 1029 | return ret; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI | |||
526 | 526 | ||
527 | config MTD_NAND_HISI504 | 527 | config MTD_NAND_HISI504 |
528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" | 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" |
529 | depends on HAS_DMA | ||
529 | help | 530 | help |
530 | Enables support for NAND controller on Hisilicon SoC Hip04. | 531 | Enables support for NAND controller on Hisilicon SoC Hip04. |
531 | 532 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
480 | nand_writel(info, NDCR, ndcr | int_mask); | 480 | nand_writel(info, NDCR, ndcr | int_mask); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) | ||
484 | { | ||
485 | if (info->ecc_bch) { | ||
486 | int timeout; | ||
487 | |||
488 | /* | ||
489 | * According to the datasheet, when reading from NDDB | ||
490 | * with BCH enabled, after each 32 bytes reads, we | ||
491 | * have to make sure that the NDSR.RDDREQ bit is set. | ||
492 | * | ||
493 | * Drain the FIFO 8 32 bits reads at a time, and skip | ||
494 | * the polling on the last read. | ||
495 | */ | ||
496 | while (len > 8) { | ||
497 | __raw_readsl(info->mmio_base + NDDB, data, 8); | ||
498 | |||
499 | for (timeout = 0; | ||
500 | !(nand_readl(info, NDSR) & NDSR_RDDREQ); | ||
501 | timeout++) { | ||
502 | if (timeout >= 5) { | ||
503 | dev_err(&info->pdev->dev, | ||
504 | "Timeout on RDDREQ while draining the FIFO\n"); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | mdelay(1); | ||
509 | } | ||
510 | |||
511 | data += 32; | ||
512 | len -= 8; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | __raw_readsl(info->mmio_base + NDDB, data, len); | ||
517 | } | ||
518 | |||
483 | static void handle_data_pio(struct pxa3xx_nand_info *info) | 519 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
484 | { | 520 | { |
485 | unsigned int do_bytes = min(info->data_size, info->chunk_size); | 521 | unsigned int do_bytes = min(info->data_size, info->chunk_size); |
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
496 | DIV_ROUND_UP(info->oob_size, 4)); | 532 | DIV_ROUND_UP(info->oob_size, 4)); |
497 | break; | 533 | break; |
498 | case STATE_PIO_READING: | 534 | case STATE_PIO_READING: |
499 | __raw_readsl(info->mmio_base + NDDB, | 535 | drain_fifo(info, |
500 | info->data_buff + info->data_buff_pos, | 536 | info->data_buff + info->data_buff_pos, |
501 | DIV_ROUND_UP(do_bytes, 4)); | 537 | DIV_ROUND_UP(do_bytes, 4)); |
502 | 538 | ||
503 | if (info->oob_size > 0) | 539 | if (info->oob_size > 0) |
504 | __raw_readsl(info->mmio_base + NDDB, | 540 | drain_fifo(info, |
505 | info->oob_buff + info->oob_buff_pos, | 541 | info->oob_buff + info->oob_buff_pos, |
506 | DIV_ROUND_UP(info->oob_size, 4)); | 542 | DIV_ROUND_UP(info->oob_size, 4)); |
507 | break; | 543 | break; |
508 | default: | 544 | default: |
509 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, | 545 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1572 | int ret, irq, cs; | 1608 | int ret, irq, cs; |
1573 | 1609 | ||
1574 | pdata = dev_get_platdata(&pdev->dev); | 1610 | pdata = dev_get_platdata(&pdev->dev); |
1611 | if (pdata->num_cs <= 0) | ||
1612 | return -ENODEV; | ||
1575 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + | 1613 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + |
1576 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); | 1614 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
1577 | if (!info) | 1615 | if (!info) |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) | |||
579 | skb->pkt_type = PACKET_BROADCAST; | 579 | skb->pkt_type = PACKET_BROADCAST; |
580 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
581 | 581 | ||
582 | skb_reset_mac_header(skb); | ||
583 | skb_reset_network_header(skb); | ||
584 | skb_reset_transport_header(skb); | ||
585 | |||
582 | can_skb_reserve(skb); | 586 | can_skb_reserve(skb); |
583 | can_skb_prv(skb)->ifindex = dev->ifindex; | 587 | can_skb_prv(skb)->ifindex = dev->ifindex; |
584 | 588 | ||
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
603 | skb->pkt_type = PACKET_BROADCAST; | 607 | skb->pkt_type = PACKET_BROADCAST; |
604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 608 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
605 | 609 | ||
610 | skb_reset_mac_header(skb); | ||
611 | skb_reset_network_header(skb); | ||
612 | skb_reset_transport_header(skb); | ||
613 | |||
606 | can_skb_reserve(skb); | 614 | can_skb_reserve(skb); |
607 | can_skb_prv(skb)->ifindex = dev->ifindex; | 615 | can_skb_prv(skb)->ifindex = dev->ifindex; |
608 | 616 | ||
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..a316fa4b91ab 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * Copyright (C) 2015 Valeo S.A. | 14 | * Copyright (C) 2015 Valeo S.A. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | ||
17 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
@@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
584 | while (pos <= actual_len - MSG_HEADER_LEN) { | 585 | while (pos <= actual_len - MSG_HEADER_LEN) { |
585 | tmp = buf + pos; | 586 | tmp = buf + pos; |
586 | 587 | ||
587 | if (!tmp->len) | 588 | /* Handle messages crossing the USB endpoint max packet |
588 | break; | 589 | * size boundary. Check kvaser_usb_read_bulk_callback() |
590 | * for further details. | ||
591 | */ | ||
592 | if (tmp->len == 0) { | ||
593 | pos = round_up(pos, | ||
594 | dev->bulk_in->wMaxPacketSize); | ||
595 | continue; | ||
596 | } | ||
589 | 597 | ||
590 | if (pos + tmp->len > actual_len) { | 598 | if (pos + tmp->len > actual_len) { |
591 | dev_err(dev->udev->dev.parent, | 599 | dev_err(dev->udev->dev.parent, |
@@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
787 | netdev_err(netdev, "Error transmitting URB\n"); | 795 | netdev_err(netdev, "Error transmitting URB\n"); |
788 | usb_unanchor_urb(urb); | 796 | usb_unanchor_urb(urb); |
789 | usb_free_urb(urb); | 797 | usb_free_urb(urb); |
790 | kfree(buf); | ||
791 | return err; | 798 | return err; |
792 | } | 799 | } |
793 | 800 | ||
@@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1317 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { | 1324 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { |
1318 | msg = urb->transfer_buffer + pos; | 1325 | msg = urb->transfer_buffer + pos; |
1319 | 1326 | ||
1320 | if (!msg->len) | 1327 | /* The Kvaser firmware can only read and write messages that |
1321 | break; | 1328 | * does not cross the USB's endpoint wMaxPacketSize boundary. |
1329 | * If a follow-up command crosses such boundary, firmware puts | ||
1330 | * a placeholder zero-length command in its place then aligns | ||
1331 | * the real command to the next max packet size. | ||
1332 | * | ||
1333 | * Handle such cases or we're going to miss a significant | ||
1334 | * number of events in case of a heavy rx load on the bus. | ||
1335 | */ | ||
1336 | if (msg->len == 0) { | ||
1337 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | ||
1338 | continue; | ||
1339 | } | ||
1322 | 1340 | ||
1323 | if (pos + msg->len > urb->actual_length) { | 1341 | if (pos + msg->len > urb->actual_length) { |
1324 | dev_err(dev->udev->dev.parent, "Format error\n"); | 1342 | dev_err(dev->udev->dev.parent, "Format error\n"); |
@@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1326 | } | 1344 | } |
1327 | 1345 | ||
1328 | kvaser_usb_handle_message(dev, msg); | 1346 | kvaser_usb_handle_message(dev, msg); |
1329 | |||
1330 | pos += msg->len; | 1347 | pos += msg->len; |
1331 | } | 1348 | } |
1332 | 1349 | ||
@@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1615 | struct urb *urb; | 1632 | struct urb *urb; |
1616 | void *buf; | 1633 | void *buf; |
1617 | struct kvaser_msg *msg; | 1634 | struct kvaser_msg *msg; |
1618 | int i, err; | 1635 | int i, err, ret = NETDEV_TX_OK; |
1619 | int ret = NETDEV_TX_OK; | ||
1620 | u8 *msg_tx_can_flags = NULL; /* GCC */ | 1636 | u8 *msg_tx_can_flags = NULL; /* GCC */ |
1621 | 1637 | ||
1622 | if (can_dropped_invalid_skb(netdev, skb)) | 1638 | if (can_dropped_invalid_skb(netdev, skb)) |
@@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1634 | if (!buf) { | 1650 | if (!buf) { |
1635 | stats->tx_dropped++; | 1651 | stats->tx_dropped++; |
1636 | dev_kfree_skb(skb); | 1652 | dev_kfree_skb(skb); |
1637 | goto nobufmem; | 1653 | goto freeurb; |
1638 | } | 1654 | } |
1639 | 1655 | ||
1640 | msg = buf; | 1656 | msg = buf; |
@@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1681 | /* This should never happen; it implies a flow control bug */ | 1697 | /* This should never happen; it implies a flow control bug */ |
1682 | if (!context) { | 1698 | if (!context) { |
1683 | netdev_warn(netdev, "cannot find free context\n"); | 1699 | netdev_warn(netdev, "cannot find free context\n"); |
1700 | |||
1701 | kfree(buf); | ||
1684 | ret = NETDEV_TX_BUSY; | 1702 | ret = NETDEV_TX_BUSY; |
1685 | goto releasebuf; | 1703 | goto freeurb; |
1686 | } | 1704 | } |
1687 | 1705 | ||
1688 | context->priv = priv; | 1706 | context->priv = priv; |
@@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1719 | else | 1737 | else |
1720 | netdev_warn(netdev, "Failed tx_urb %d\n", err); | 1738 | netdev_warn(netdev, "Failed tx_urb %d\n", err); |
1721 | 1739 | ||
1722 | goto releasebuf; | 1740 | goto freeurb; |
1723 | } | 1741 | } |
1724 | 1742 | ||
1725 | usb_free_urb(urb); | 1743 | ret = NETDEV_TX_OK; |
1726 | |||
1727 | return NETDEV_TX_OK; | ||
1728 | 1744 | ||
1729 | releasebuf: | 1745 | freeurb: |
1730 | kfree(buf); | ||
1731 | nobufmem: | ||
1732 | usb_free_urb(urb); | 1746 | usb_free_urb(urb); |
1733 | return ret; | 1747 | return ret; |
1734 | } | 1748 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
879 | 879 | ||
880 | pdev->usb_if = ppdev->usb_if; | 880 | pdev->usb_if = ppdev->usb_if; |
881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; | 881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; |
882 | |||
883 | /* do a copy of the ctrlmode[_supported] too */ | ||
884 | dev->can.ctrlmode = ppdev->dev.can.ctrlmode; | ||
885 | dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; | ||
882 | } | 886 | } |
883 | 887 | ||
884 | pdev->usb_if->dev[dev->ctrl_idx] = dev; | 888 | pdev->usb_if->dev[dev->ctrl_idx] = dev; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
593 | if (!xgene_ring_mgr_init(pdata)) | 593 | if (!xgene_ring_mgr_init(pdata)) |
594 | return -ENODEV; | 594 | return -ENODEV; |
595 | 595 | ||
596 | if (!efi_enabled(EFI_BOOT)) { | 596 | if (pdata->clk) { |
597 | clk_prepare_enable(pdata->clk); | 597 | clk_prepare_enable(pdata->clk); |
598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
599 | clk_prepare_enable(pdata->clk); | 599 | clk_prepare_enable(pdata->clk); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
1025 | #ifdef CONFIG_ACPI | 1025 | #ifdef CONFIG_ACPI |
1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
1027 | { "APMC0D05", }, | 1027 | { "APMC0D05", }, |
1028 | { "APMC0D30", }, | ||
1029 | { "APMC0D31", }, | ||
1028 | { } | 1030 | { } |
1029 | }; | 1031 | }; |
1030 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | 1032 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |||
1033 | #ifdef CONFIG_OF | 1035 | #ifdef CONFIG_OF |
1034 | static struct of_device_id xgene_enet_of_match[] = { | 1036 | static struct of_device_id xgene_enet_of_match[] = { |
1035 | {.compatible = "apm,xgene-enet",}, | 1037 | {.compatible = "apm,xgene-enet",}, |
1038 | {.compatible = "apm,xgene1-sgenet",}, | ||
1039 | {.compatible = "apm,xgene1-xgenet",}, | ||
1036 | {}, | 1040 | {}, |
1037 | }; | 1041 | }; |
1038 | 1042 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
486 | { | 486 | { |
487 | struct bcm_enet_priv *priv; | 487 | struct bcm_enet_priv *priv; |
488 | struct net_device *dev; | 488 | struct net_device *dev; |
489 | int tx_work_done, rx_work_done; | 489 | int rx_work_done; |
490 | 490 | ||
491 | priv = container_of(napi, struct bcm_enet_priv, napi); | 491 | priv = container_of(napi, struct bcm_enet_priv, napi); |
492 | dev = priv->net_dev; | 492 | dev = priv->net_dev; |
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
498 | ENETDMAC_IR, priv->tx_chan); | 498 | ENETDMAC_IR, priv->tx_chan); |
499 | 499 | ||
500 | /* reclaim sent skb */ | 500 | /* reclaim sent skb */ |
501 | tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 501 | bcm_enet_tx_reclaim(dev, 0); |
502 | 502 | ||
503 | spin_lock(&priv->rx_lock); | 503 | spin_lock(&priv->rx_lock); |
504 | rx_work_done = bcm_enet_receive_queue(dev, budget); | 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
505 | spin_unlock(&priv->rx_lock); | 505 | spin_unlock(&priv->rx_lock); |
506 | 506 | ||
507 | if (rx_work_done >= budget || tx_work_done > 0) { | 507 | if (rx_work_done >= budget) { |
508 | /* rx/tx queue is not yet empty/clean */ | 508 | /* rx queue is not yet empty/clean */ |
509 | return rx_work_done; | 509 | return rx_work_done; |
510 | } | 510 | } |
511 | 511 | ||
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, | |||
302 | slot->skb = skb; | 302 | slot->skb = skb; |
303 | slot->dma_addr = dma_addr; | 303 | slot->dma_addr = dma_addr; |
304 | 304 | ||
305 | if (slot->dma_addr & 0xC0000000) | ||
306 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
307 | |||
308 | return 0; | 305 | return 0; |
309 | } | 306 | } |
310 | 307 | ||
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
505 | ring->mmio_base); | 502 | ring->mmio_base); |
506 | goto err_dma_free; | 503 | goto err_dma_free; |
507 | } | 504 | } |
508 | if (ring->dma_base & 0xC0000000) | ||
509 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
510 | 505 | ||
511 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 506 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
512 | BGMAC_DMA_RING_TX); | 507 | BGMAC_DMA_RING_TX); |
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
536 | err = -ENOMEM; | 531 | err = -ENOMEM; |
537 | goto err_dma_free; | 532 | goto err_dma_free; |
538 | } | 533 | } |
539 | if (ring->dma_base & 0xC0000000) | ||
540 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
541 | 534 | ||
542 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 535 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
543 | BGMAC_DMA_RING_RX); | 536 | BGMAC_DMA_RING_RX); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..bef750a09027 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
12723 | PCICFG_VENDOR_ID_OFFSET); | 12723 | PCICFG_VENDOR_ID_OFFSET); |
12724 | 12724 | ||
12725 | /* Set PCIe reset type to fundamental for EEH recovery */ | ||
12726 | pdev->needs_freset = 1; | ||
12727 | |||
12725 | /* AER (Advanced Error reporting) configuration */ | 12728 | /* AER (Advanced Error reporting) configuration */ |
12726 | rc = pci_enable_pcie_error_reporting(pdev); | 12729 | rc = pci_enable_pcie_error_reporting(pdev); |
12727 | if (!rc) | 12730 | if (!rc) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | 75 | ||
76 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
76 | if (wol->wolopts & WAKE_MAGICSECURE) { | 77 | if (wol->wolopts & WAKE_MAGICSECURE) { |
77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | 78 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), |
78 | UMAC_MPD_PW_MS); | 79 | UMAC_MPD_PW_MS); |
79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | 80 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), |
80 | UMAC_MPD_PW_LS); | 81 | UMAC_MPD_PW_LS); |
81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
82 | reg |= MPD_PW_EN; | 82 | reg |= MPD_PW_EN; |
83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | 83 | } else { |
84 | reg &= ~MPD_PW_EN; | ||
84 | } | 85 | } |
86 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
85 | 87 | ||
86 | /* Flag the device and relevant IRQ as wakeup capable */ | 88 | /* Flag the device and relevant IRQ as wakeup capable */ |
87 | if (wol->wolopts) { | 89 | if (wol->wolopts) { |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { | |||
2113 | }; | 2113 | }; |
2114 | 2114 | ||
2115 | #if defined(CONFIG_OF) | 2115 | #if defined(CONFIG_OF) |
2116 | static struct macb_config pc302gem_config = { | 2116 | static const struct macb_config pc302gem_config = { |
2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2118 | .dma_burst_length = 16, | 2118 | .dma_burst_length = 16, |
2119 | }; | 2119 | }; |
2120 | 2120 | ||
2121 | static struct macb_config sama5d3_config = { | 2121 | static const struct macb_config sama5d3_config = { |
2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2123 | .dma_burst_length = 16, | 2123 | .dma_burst_length = 16, |
2124 | }; | 2124 | }; |
2125 | 2125 | ||
2126 | static struct macb_config sama5d4_config = { | 2126 | static const struct macb_config sama5d4_config = { |
2127 | .caps = 0, | 2127 | .caps = 0, |
2128 | .dma_burst_length = 4, | 2128 | .dma_burst_length = 4, |
2129 | }; | 2129 | }; |
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) | |||
2154 | if (bp->pdev->dev.of_node) { | 2154 | if (bp->pdev->dev.of_node) { |
2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); | 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); |
2156 | if (match && match->data) { | 2156 | if (match && match->data) { |
2157 | config = (const struct macb_config *)match->data; | 2157 | config = match->data; |
2158 | 2158 | ||
2159 | bp->caps = config->caps; | 2159 | bp->caps = config->caps; |
2160 | /* | 2160 | /* |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -351,7 +351,7 @@ | |||
351 | 351 | ||
352 | /* Bitfields in MID */ | 352 | /* Bitfields in MID */ |
353 | #define MACB_IDNUM_OFFSET 16 | 353 | #define MACB_IDNUM_OFFSET 16 |
354 | #define MACB_IDNUM_SIZE 16 | 354 | #define MACB_IDNUM_SIZE 12 |
355 | #define MACB_REV_OFFSET 0 | 355 | #define MACB_REV_OFFSET 0 |
356 | #define MACB_REV_SIZE 16 | 356 | #define MACB_REV_SIZE 16 |
357 | 357 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..99492b7e3713 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1597 | writel(int_events, fep->hwp + FEC_IEVENT); | 1597 | writel(int_events, fep->hwp + FEC_IEVENT); |
1598 | fec_enet_collect_events(fep, int_events); | 1598 | fec_enet_collect_events(fep, int_events); |
1599 | 1599 | ||
1600 | if (fep->work_tx || fep->work_rx) { | 1600 | if ((fep->work_tx || fep->work_rx) && fep->link) { |
1601 | ret = IRQ_HANDLED; | 1601 | ret = IRQ_HANDLED; |
1602 | 1602 | ||
1603 | if (napi_schedule_prep(&fep->napi)) { | 1603 | if (napi_schedule_prep(&fep->napi)) { |
@@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev) | |||
3383 | regulator_disable(fep->reg_phy); | 3383 | regulator_disable(fep->reg_phy); |
3384 | if (fep->ptp_clock) | 3384 | if (fep->ptp_clock) |
3385 | ptp_clock_unregister(fep->ptp_clock); | 3385 | ptp_clock_unregister(fep->ptp_clock); |
3386 | fec_enet_clk_enable(ndev, false); | ||
3387 | of_node_put(fep->phy_node); | 3386 | of_node_put(fep->phy_node); |
3388 | free_netdev(ndev); | 3387 | free_netdev(ndev); |
3389 | 3388 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 178e54028d10..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, | |||
747 | return 0; | 747 | return 0; |
748 | } | 748 | } |
749 | 749 | ||
750 | static int gfar_of_group_count(struct device_node *np) | ||
751 | { | ||
752 | struct device_node *child; | ||
753 | int num = 0; | ||
754 | |||
755 | for_each_available_child_of_node(np, child) | ||
756 | if (!of_node_cmp(child->name, "queue-group")) | ||
757 | num++; | ||
758 | |||
759 | return num; | ||
760 | } | ||
761 | |||
750 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
751 | { | 763 | { |
752 | const char *model; | 764 | const char *model; |
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
784 | num_rx_qs = 1; | 796 | num_rx_qs = 1; |
785 | } else { /* MQ_MG_MODE */ | 797 | } else { /* MQ_MG_MODE */ |
786 | /* get the actual number of supported groups */ | 798 | /* get the actual number of supported groups */ |
787 | unsigned int num_grps = of_get_available_child_count(np); | 799 | unsigned int num_grps = gfar_of_group_count(np); |
788 | 800 | ||
789 | if (num_grps == 0 || num_grps > MAXGROUPS) { | 801 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
790 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | 802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
851 | 863 | ||
852 | /* Parse and initialize group specific information */ | 864 | /* Parse and initialize group specific information */ |
853 | if (priv->mode == MQ_MG_MODE) { | 865 | if (priv->mode == MQ_MG_MODE) { |
854 | for_each_child_of_node(np, child) { | 866 | for_each_available_child_of_node(np, child) { |
867 | if (of_node_cmp(child->name, "queue-group")) | ||
868 | continue; | ||
869 | |||
855 | err = gfar_parse_group(child, priv, model); | 870 | err = gfar_parse_group(child, priv, model); |
856 | if (err) | 871 | if (err) |
857 | goto err_grp_init; | 872 | goto err_grp_init; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 209ee1b27f8d..5d093dc0f5f5 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -92,6 +92,7 @@ static const char version[] = | |||
92 | #include "smc91x.h" | 92 | #include "smc91x.h" |
93 | 93 | ||
94 | #if defined(CONFIG_ASSABET_NEPONSET) | 94 | #if defined(CONFIG_ASSABET_NEPONSET) |
95 | #include <mach/assabet.h> | ||
95 | #include <mach/neponset.h> | 96 | #include <mach/neponset.h> |
96 | #endif | 97 | #endif |
97 | 98 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
272 | struct stmmac_priv *priv = NULL; | 272 | struct stmmac_priv *priv = NULL; |
273 | struct plat_stmmacenet_data *plat_dat = NULL; | 273 | struct plat_stmmacenet_data *plat_dat = NULL; |
274 | const char *mac = NULL; | 274 | const char *mac = NULL; |
275 | int irq, wol_irq, lpi_irq; | ||
276 | |||
277 | /* Get IRQ information early to have an ability to ask for deferred | ||
278 | * probe if needed before we went too far with resource allocation. | ||
279 | */ | ||
280 | irq = platform_get_irq_byname(pdev, "macirq"); | ||
281 | if (irq < 0) { | ||
282 | if (irq != -EPROBE_DEFER) { | ||
283 | dev_err(dev, | ||
284 | "MAC IRQ configuration information not found\n"); | ||
285 | } | ||
286 | return irq; | ||
287 | } | ||
288 | |||
289 | /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
290 | * The external wake up irq can be passed through the platform code | ||
291 | * named as "eth_wake_irq" | ||
292 | * | ||
293 | * In case the wake up interrupt is not passed from the platform | ||
294 | * so the driver will continue to use the mac irq (ndev->irq) | ||
295 | */ | ||
296 | wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
297 | if (wol_irq < 0) { | ||
298 | if (wol_irq == -EPROBE_DEFER) | ||
299 | return -EPROBE_DEFER; | ||
300 | wol_irq = irq; | ||
301 | } | ||
302 | |||
303 | lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
304 | if (lpi_irq == -EPROBE_DEFER) | ||
305 | return -EPROBE_DEFER; | ||
275 | 306 | ||
276 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
277 | addr = devm_ioremap_resource(dev, res); | 308 | addr = devm_ioremap_resource(dev, res); |
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
323 | return PTR_ERR(priv); | 354 | return PTR_ERR(priv); |
324 | } | 355 | } |
325 | 356 | ||
357 | /* Copy IRQ values to priv structure which is now avaialble */ | ||
358 | priv->dev->irq = irq; | ||
359 | priv->wol_irq = wol_irq; | ||
360 | priv->lpi_irq = lpi_irq; | ||
361 | |||
326 | /* Get MAC address if available (DT) */ | 362 | /* Get MAC address if available (DT) */ |
327 | if (mac) | 363 | if (mac) |
328 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); | 364 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); |
329 | 365 | ||
330 | /* Get the MAC information */ | ||
331 | priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
332 | if (priv->dev->irq < 0) { | ||
333 | if (priv->dev->irq != -EPROBE_DEFER) { | ||
334 | netdev_err(priv->dev, | ||
335 | "MAC IRQ configuration information not found\n"); | ||
336 | } | ||
337 | return priv->dev->irq; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
342 | * The external wake up irq can be passed through the platform code | ||
343 | * named as "eth_wake_irq" | ||
344 | * | ||
345 | * In case the wake up interrupt is not passed from the platform | ||
346 | * so the driver will continue to use the mac irq (ndev->irq) | ||
347 | */ | ||
348 | priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
349 | if (priv->wol_irq < 0) { | ||
350 | if (priv->wol_irq == -EPROBE_DEFER) | ||
351 | return -EPROBE_DEFER; | ||
352 | priv->wol_irq = priv->dev->irq; | ||
353 | } | ||
354 | |||
355 | priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
356 | if (priv->lpi_irq == -EPROBE_DEFER) | ||
357 | return -EPROBE_DEFER; | ||
358 | |||
359 | platform_set_drvdata(pdev, priv->dev); | 366 | platform_set_drvdata(pdev, priv->dev); |
360 | 367 | ||
361 | pr_debug("STMMAC platform driver registration completed"); | 368 | pr_debug("STMMAC platform driver registration completed"); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) | |||
1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) | 1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) |
1731 | return -EADDRNOTAVAIL; | 1731 | return -EADDRNOTAVAIL; |
1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1733 | rcu_read_lock(); | 1733 | mutex_lock(&team->lock); |
1734 | list_for_each_entry_rcu(port, &team->port_list, list) | 1734 | list_for_each_entry(port, &team->port_list, list) |
1735 | if (team->ops.port_change_dev_addr) | 1735 | if (team->ops.port_change_dev_addr) |
1736 | team->ops.port_change_dev_addr(team, port); | 1736 | team->ops.port_change_dev_addr(team, port); |
1737 | rcu_read_unlock(); | 1737 | mutex_unlock(&team->lock); |
1738 | return 0; | 1738 | return 0; |
1739 | } | 1739 | } |
1740 | 1740 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
341 | int i; | 341 | int i; |
342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
343 | struct xenvif_stats *vif_stats; | ||
344 | 343 | ||
345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
350 | } | 349 | } |
351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..cab9f5257f57 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1349,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1349 | { | 1349 | { |
1350 | unsigned int offset = skb_headlen(skb); | 1350 | unsigned int offset = skb_headlen(skb); |
1351 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1351 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1352 | int i; | 1352 | int i, f; |
1353 | struct ubuf_info *uarg; | 1353 | struct ubuf_info *uarg; |
1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1355 | 1355 | ||
@@ -1389,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1389 | frags[i].page_offset = 0; | 1389 | frags[i].page_offset = 0; |
1390 | skb_frag_size_set(&frags[i], len); | 1390 | skb_frag_size_set(&frags[i], len); |
1391 | } | 1391 | } |
1392 | /* swap out with old one */ | ||
1393 | memcpy(skb_shinfo(skb)->frags, | ||
1394 | frags, | ||
1395 | i * sizeof(skb_frag_t)); | ||
1396 | skb_shinfo(skb)->nr_frags = i; | ||
1397 | skb->truesize += i * PAGE_SIZE; | ||
1398 | 1392 | ||
1399 | /* remove traces of mapped pages and frag_list */ | 1393 | /* Copied all the bits from the frag list -- free it. */ |
1400 | skb_frag_list_init(skb); | 1394 | skb_frag_list_init(skb); |
1395 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1396 | kfree_skb(nskb); | ||
1397 | |||
1398 | /* Release all the original (foreign) frags. */ | ||
1399 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1400 | skb_frag_unref(skb, f); | ||
1401 | uarg = skb_shinfo(skb)->destructor_arg; | 1401 | uarg = skb_shinfo(skb)->destructor_arg; |
1402 | /* increase inflight counter to offset decrement in callback */ | 1402 | /* increase inflight counter to offset decrement in callback */ |
1403 | atomic_inc(&queue->inflight_packets); | 1403 | atomic_inc(&queue->inflight_packets); |
1404 | uarg->callback(uarg, true); | 1404 | uarg->callback(uarg, true); |
1405 | skb_shinfo(skb)->destructor_arg = NULL; | 1405 | skb_shinfo(skb)->destructor_arg = NULL; |
1406 | 1406 | ||
1407 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1407 | /* Fill the skb with the new (local) frags. */ |
1408 | kfree_skb(nskb); | 1408 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1409 | skb_shinfo(skb)->nr_frags = i; | ||
1410 | skb->truesize += i * PAGE_SIZE; | ||
1409 | 1411 | ||
1410 | return 0; | 1412 | return 0; |
1411 | } | 1413 | } |
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
127 | return false; | 127 | return false; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | 130 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
131 | int offset) | 131 | int offset) |
132 | { | 132 | { |
133 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
137 | return NULL; | 137 | return NULL; |
138 | 138 | ||
139 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
140 | return xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus) + offset; |
141 | } | 141 | } |
142 | 142 | ||
143 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
521 | struct pci_dev *pdev = to_pci_dev(dev); | 521 | struct pci_dev *pdev = to_pci_dev(dev); |
522 | char *driver_override, *old = pdev->driver_override, *cp; | 522 | char *driver_override, *old = pdev->driver_override, *cp; |
523 | 523 | ||
524 | if (count > PATH_MAX) | 524 | /* We need to keep extra room for a newline */ |
525 | if (count >= (PAGE_SIZE - 1)) | ||
525 | return -EINVAL; | 526 | return -EINVAL; |
526 | 527 | ||
527 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 528 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, | |||
549 | { | 550 | { |
550 | struct pci_dev *pdev = to_pci_dev(dev); | 551 | struct pci_dev *pdev = to_pci_dev(dev); |
551 | 552 | ||
552 | return sprintf(buf, "%s\n", pdev->driver_override); | 553 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); |
553 | } | 554 | } |
554 | static DEVICE_ATTR_RW(driver_override); | 555 | static DEVICE_ATTR_RW(driver_override); |
555 | 556 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..1245dca79009 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, | |||
3444 | if (attr == &dev_attr_requested_microamps.attr) | 3444 | if (attr == &dev_attr_requested_microamps.attr) |
3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; | 3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
3446 | 3446 | ||
3447 | /* all the other attributes exist to support constraints; | ||
3448 | * don't show them if there are no constraints, or if the | ||
3449 | * relevant supporting methods are missing. | ||
3450 | */ | ||
3451 | if (!rdev->constraints) | ||
3452 | return 0; | ||
3453 | |||
3454 | /* constraints need specific supporting methods */ | 3447 | /* constraints need specific supporting methods */ |
3455 | if (attr == &dev_attr_min_microvolts.attr || | 3448 | if (attr == &dev_attr_min_microvolts.attr || |
3456 | attr == &dev_attr_max_microvolts.attr) | 3449 | attr == &dev_attr_max_microvolts.attr) |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
152 | config.regmap = chip->regmap; | 152 | config.regmap = chip->regmap; |
153 | config.of_node = dev->of_node; | 153 | config.of_node = dev->of_node; |
154 | 154 | ||
155 | /* Mask all interrupt sources to deassert interrupt line */ | ||
156 | error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); | ||
157 | if (!error) | ||
158 | error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); | ||
159 | if (error) { | ||
160 | dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); | ||
161 | return error; | ||
162 | } | ||
163 | |||
155 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); | 164 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); |
156 | if (IS_ERR(rdev)) { | 165 | if (IS_ERR(rdev)) { |
157 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); | 166 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
235 | .vsel_mask = RK808_LDO_VSEL_MASK, | 235 | .vsel_mask = RK808_LDO_VSEL_MASK, |
236 | .enable_reg = RK808_LDO_EN_REG, | 236 | .enable_reg = RK808_LDO_EN_REG, |
237 | .enable_mask = BIT(0), | 237 | .enable_mask = BIT(0), |
238 | .enable_time = 400, | ||
238 | .owner = THIS_MODULE, | 239 | .owner = THIS_MODULE, |
239 | }, { | 240 | }, { |
240 | .name = "LDO_REG2", | 241 | .name = "LDO_REG2", |
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
249 | .vsel_mask = RK808_LDO_VSEL_MASK, | 250 | .vsel_mask = RK808_LDO_VSEL_MASK, |
250 | .enable_reg = RK808_LDO_EN_REG, | 251 | .enable_reg = RK808_LDO_EN_REG, |
251 | .enable_mask = BIT(1), | 252 | .enable_mask = BIT(1), |
253 | .enable_time = 400, | ||
252 | .owner = THIS_MODULE, | 254 | .owner = THIS_MODULE, |
253 | }, { | 255 | }, { |
254 | .name = "LDO_REG3", | 256 | .name = "LDO_REG3", |
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
263 | .vsel_mask = RK808_BUCK4_VSEL_MASK, | 265 | .vsel_mask = RK808_BUCK4_VSEL_MASK, |
264 | .enable_reg = RK808_LDO_EN_REG, | 266 | .enable_reg = RK808_LDO_EN_REG, |
265 | .enable_mask = BIT(2), | 267 | .enable_mask = BIT(2), |
268 | .enable_time = 400, | ||
266 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
267 | }, { | 270 | }, { |
268 | .name = "LDO_REG4", | 271 | .name = "LDO_REG4", |
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
277 | .vsel_mask = RK808_LDO_VSEL_MASK, | 280 | .vsel_mask = RK808_LDO_VSEL_MASK, |
278 | .enable_reg = RK808_LDO_EN_REG, | 281 | .enable_reg = RK808_LDO_EN_REG, |
279 | .enable_mask = BIT(3), | 282 | .enable_mask = BIT(3), |
283 | .enable_time = 400, | ||
280 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
281 | }, { | 285 | }, { |
282 | .name = "LDO_REG5", | 286 | .name = "LDO_REG5", |
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
291 | .vsel_mask = RK808_LDO_VSEL_MASK, | 295 | .vsel_mask = RK808_LDO_VSEL_MASK, |
292 | .enable_reg = RK808_LDO_EN_REG, | 296 | .enable_reg = RK808_LDO_EN_REG, |
293 | .enable_mask = BIT(4), | 297 | .enable_mask = BIT(4), |
298 | .enable_time = 400, | ||
294 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
295 | }, { | 300 | }, { |
296 | .name = "LDO_REG6", | 301 | .name = "LDO_REG6", |
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
305 | .vsel_mask = RK808_LDO_VSEL_MASK, | 310 | .vsel_mask = RK808_LDO_VSEL_MASK, |
306 | .enable_reg = RK808_LDO_EN_REG, | 311 | .enable_reg = RK808_LDO_EN_REG, |
307 | .enable_mask = BIT(5), | 312 | .enable_mask = BIT(5), |
313 | .enable_time = 400, | ||
308 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
309 | }, { | 315 | }, { |
310 | .name = "LDO_REG7", | 316 | .name = "LDO_REG7", |
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
319 | .vsel_mask = RK808_LDO_VSEL_MASK, | 325 | .vsel_mask = RK808_LDO_VSEL_MASK, |
320 | .enable_reg = RK808_LDO_EN_REG, | 326 | .enable_reg = RK808_LDO_EN_REG, |
321 | .enable_mask = BIT(6), | 327 | .enable_mask = BIT(6), |
328 | .enable_time = 400, | ||
322 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
323 | }, { | 330 | }, { |
324 | .name = "LDO_REG8", | 331 | .name = "LDO_REG8", |
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
333 | .vsel_mask = RK808_LDO_VSEL_MASK, | 340 | .vsel_mask = RK808_LDO_VSEL_MASK, |
334 | .enable_reg = RK808_LDO_EN_REG, | 341 | .enable_reg = RK808_LDO_EN_REG, |
335 | .enable_mask = BIT(7), | 342 | .enable_mask = BIT(7), |
343 | .enable_time = 400, | ||
336 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
337 | }, { | 345 | }, { |
338 | .name = "SWITCH_REG1", | 346 | .name = "SWITCH_REG1", |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { | |||
849 | 849 | ||
850 | static struct s3c_rtc_data const s3c6410_rtc_data = { | 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { |
851 | .max_user_freq = 32768, | 851 | .max_user_freq = 32768, |
852 | .needs_src_clk = true, | ||
852 | .irq_handler = s3c6410_rtc_irq, | 853 | .irq_handler = s3c6410_rtc_irq, |
853 | .set_freq = s3c6410_rtc_setfreq, | 854 | .set_freq = s3c6410_rtc_setfreq, |
854 | .enable_tick = s3c6410_rtc_enable_tick, | 855 | .enable_tick = s3c6410_rtc_enable_tick, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
547 | * parse input | 547 | * parse input |
548 | */ | 548 | */ |
549 | num_of_segments = 0; | 549 | num_of_segments = 0; |
550 | for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { | 550 | for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { |
551 | for (j = i; (buf[j] != ':') && | 551 | for (j = i; (buf[j] != ':') && |
552 | (buf[j] != '\0') && | 552 | (buf[j] != '\0') && |
553 | (buf[j] != '\n') && | 553 | (buf[j] != '\n') && |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) | |||
92 | add = 0; | 92 | add = 0; |
93 | continue; | 93 | continue; |
94 | } | 94 | } |
95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
96 | if (clusters_intersect(req, iter->request[pos]) && | 96 | if (clusters_intersect(req, iter->request[pos]) && |
97 | (rq_data_dir(req) == WRITE || | 97 | (rq_data_dir(req) == WRITE || |
98 | rq_data_dir(iter->request[pos]) == WRITE)) { | 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
764 | (unsigned long long)xfer->rx_dma); | 764 | (unsigned long long)xfer->rx_dma); |
765 | } | 765 | } |
766 | 766 | ||
767 | /* REVISIT: We're waiting for ENDRX before we start the next | 767 | /* REVISIT: We're waiting for RXBUFF before we start the next |
768 | * transfer because we need to handle some difficult timing | 768 | * transfer because we need to handle some difficult timing |
769 | * issues otherwise. If we wait for ENDTX in one transfer and | 769 | * issues otherwise. If we wait for TXBUFE in one transfer and |
770 | * then starts waiting for ENDRX in the next, it's difficult | 770 | * then starts waiting for RXBUFF in the next, it's difficult |
771 | * to tell the difference between the ENDRX interrupt we're | 771 | * to tell the difference between the RXBUFF interrupt we're |
772 | * actually waiting for and the ENDRX interrupt of the | 772 | * actually waiting for and the RXBUFF interrupt of the |
773 | * previous transfer. | 773 | * previous transfer. |
774 | * | 774 | * |
775 | * It should be doable, though. Just not now... | 775 | * It should be doable, though. Just not now... |
776 | */ | 776 | */ |
777 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); | 777 | spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); |
778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
779 | } | 779 | } |
780 | 780 | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..3ce39d10fafb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | |||
139 | 1, | 139 | 1, |
140 | DMA_MEM_TO_DEV, | 140 | DMA_MEM_TO_DEV, |
141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
142 | if (!txdesc) | ||
143 | return NULL; | ||
144 | |||
142 | txdesc->callback = dw_spi_dma_tx_done; | 145 | txdesc->callback = dw_spi_dma_tx_done; |
143 | txdesc->callback_param = dws; | 146 | txdesc->callback_param = dws; |
144 | 147 | ||
@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | |||
184 | 1, | 187 | 1, |
185 | DMA_DEV_TO_MEM, | 188 | DMA_DEV_TO_MEM, |
186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 189 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
190 | if (!rxdesc) | ||
191 | return NULL; | ||
192 | |||
187 | rxdesc->callback = dw_spi_dma_rx_done; | 193 | rxdesc->callback = dw_spi_dma_rx_done; |
188 | rxdesc->callback_param = dws; | 194 | rxdesc->callback_param = dws; |
189 | 195 | ||
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
@@ -36,13 +36,13 @@ struct spi_pci_desc { | |||
36 | 36 | ||
37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
38 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
39 | .num_cs = 32, | 39 | .num_cs = 5, |
40 | .bus_num = 0, | 40 | .bus_num = 0, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { |
44 | .setup = dw_spi_mid_init, | 44 | .setup = dw_spi_mid_init, |
45 | .num_cs = 4, | 45 | .num_cs = 2, |
46 | .bus_num = 1, | 46 | .bus_num = 1, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
622 | u32 fifo; | 622 | u32 fifo; |
623 | 623 | ||
624 | for (fifo = 2; fifo <= 256; fifo++) { | 624 | for (fifo = 1; fifo < 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
627 | break; | 627 | break; |
628 | } | 628 | } |
629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
630 | 630 | ||
631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | 631 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
633 | } | 633 | } |
634 | } | 634 | } |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
459 | unsigned long flags; | 459 | unsigned long flags; |
460 | int ret; | 460 | int ret; |
461 | 461 | ||
462 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { | ||
463 | dev_err(spfi->dev, | ||
464 | "Transfer length (%d) is greater than the max supported (%d)", | ||
465 | xfer->len, SPFI_TRANSACTION_TSIZE_MASK); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
462 | /* | 469 | /* |
463 | * Stop all DMA and reset the controller if the previous transaction | 470 | * Stop all DMA and reset the controller if the previous transaction |
464 | * timed-out and never completed it's DMA. | 471 | * timed-out and never completed it's DMA. |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) | |||
534 | pl022->cur_msg = NULL; | 534 | pl022->cur_msg = NULL; |
535 | pl022->cur_transfer = NULL; | 535 | pl022->cur_transfer = NULL; |
536 | pl022->cur_chip = NULL; | 536 | pl022->cur_chip = NULL; |
537 | spi_finalize_current_message(pl022->master); | ||
538 | 537 | ||
539 | /* disable the SPI/SSP operation */ | 538 | /* disable the SPI/SSP operation */ |
540 | writew((readw(SSP_CR1(pl022->virtbase)) & | 539 | writew((readw(SSP_CR1(pl022->virtbase)) & |
541 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 540 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
542 | 541 | ||
542 | spi_finalize_current_message(pl022->master); | ||
543 | } | 543 | } |
544 | 544 | ||
545 | /** | 545 | /** |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -101,6 +101,7 @@ struct ti_qspi { | |||
101 | #define QSPI_FLEN(n) ((n - 1) << 0) | 101 | #define QSPI_FLEN(n) ((n - 1) << 0) |
102 | 102 | ||
103 | /* STATUS REGISTER */ | 103 | /* STATUS REGISTER */ |
104 | #define BUSY 0x01 | ||
104 | #define WC 0x02 | 105 | #define WC 0x02 |
105 | 106 | ||
106 | /* INTERRUPT REGISTER */ | 107 | /* INTERRUPT REGISTER */ |
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | 200 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); |
200 | } | 201 | } |
201 | 202 | ||
203 | static inline u32 qspi_is_busy(struct ti_qspi *qspi) | ||
204 | { | ||
205 | u32 stat; | ||
206 | unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; | ||
207 | |||
208 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
209 | while ((stat & BUSY) && time_after(timeout, jiffies)) { | ||
210 | cpu_relax(); | ||
211 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
212 | } | ||
213 | |||
214 | WARN(stat & BUSY, "qspi busy\n"); | ||
215 | return stat & BUSY; | ||
216 | } | ||
217 | |||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 218 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
203 | { | 219 | { |
204 | int wlen, count; | 220 | int wlen, count; |
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
211 | wlen = t->bits_per_word >> 3; /* in bytes */ | 227 | wlen = t->bits_per_word >> 3; /* in bytes */ |
212 | 228 | ||
213 | while (count) { | 229 | while (count) { |
230 | if (qspi_is_busy(qspi)) | ||
231 | return -EBUSY; | ||
232 | |||
214 | switch (wlen) { | 233 | switch (wlen) { |
215 | case 1: | 234 | case 1: |
216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | 235 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", |
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
266 | 285 | ||
267 | while (count) { | 286 | while (count) { |
268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 287 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
288 | if (qspi_is_busy(qspi)) | ||
289 | return -EBUSY; | ||
290 | |||
269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 291 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, | 292 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
271 | QSPI_COMPLETION_TIMEOUT)) { | 293 | QSPI_COMPLETION_TIMEOUT)) { |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -144,10 +144,9 @@ struct ffs_io_data { | |||
144 | bool read; | 144 | bool read; |
145 | 145 | ||
146 | struct kiocb *kiocb; | 146 | struct kiocb *kiocb; |
147 | const struct iovec *iovec; | 147 | struct iov_iter data; |
148 | unsigned long nr_segs; | 148 | const void *to_free; |
149 | char __user *buf; | 149 | char *buf; |
150 | size_t len; | ||
151 | 150 | ||
152 | struct mm_struct *mm; | 151 | struct mm_struct *mm; |
153 | struct work_struct work; | 152 | struct work_struct work; |
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
649 | io_data->req->actual; | 648 | io_data->req->actual; |
650 | 649 | ||
651 | if (io_data->read && ret > 0) { | 650 | if (io_data->read && ret > 0) { |
652 | int i; | ||
653 | size_t pos = 0; | ||
654 | |||
655 | /* | ||
656 | * Since req->length may be bigger than io_data->len (after | ||
657 | * being rounded up to maxpacketsize), we may end up with more | ||
658 | * data then user space has space for. | ||
659 | */ | ||
660 | ret = min_t(int, ret, io_data->len); | ||
661 | |||
662 | use_mm(io_data->mm); | 651 | use_mm(io_data->mm); |
663 | for (i = 0; i < io_data->nr_segs; i++) { | 652 | ret = copy_to_iter(io_data->buf, ret, &io_data->data); |
664 | size_t len = min_t(size_t, ret - pos, | 653 | if (iov_iter_count(&io_data->data)) |
665 | io_data->iovec[i].iov_len); | 654 | ret = -EFAULT; |
666 | if (!len) | ||
667 | break; | ||
668 | if (unlikely(copy_to_user(io_data->iovec[i].iov_base, | ||
669 | &io_data->buf[pos], len))) { | ||
670 | ret = -EFAULT; | ||
671 | break; | ||
672 | } | ||
673 | pos += len; | ||
674 | } | ||
675 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
676 | } | 656 | } |
677 | 657 | ||
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
684 | 664 | ||
685 | io_data->kiocb->private = NULL; | 665 | io_data->kiocb->private = NULL; |
686 | if (io_data->read) | 666 | if (io_data->read) |
687 | kfree(io_data->iovec); | 667 | kfree(io_data->to_free); |
688 | kfree(io_data->buf); | 668 | kfree(io_data->buf); |
689 | kfree(io_data); | 669 | kfree(io_data); |
690 | } | 670 | } |
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
743 | * before the waiting completes, so do not assign to 'gadget' earlier | 723 | * before the waiting completes, so do not assign to 'gadget' earlier |
744 | */ | 724 | */ |
745 | struct usb_gadget *gadget = epfile->ffs->gadget; | 725 | struct usb_gadget *gadget = epfile->ffs->gadget; |
726 | size_t copied; | ||
746 | 727 | ||
747 | spin_lock_irq(&epfile->ffs->eps_lock); | 728 | spin_lock_irq(&epfile->ffs->eps_lock); |
748 | /* In the meantime, endpoint got disabled or changed. */ | 729 | /* In the meantime, endpoint got disabled or changed. */ |
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
750 | spin_unlock_irq(&epfile->ffs->eps_lock); | 731 | spin_unlock_irq(&epfile->ffs->eps_lock); |
751 | return -ESHUTDOWN; | 732 | return -ESHUTDOWN; |
752 | } | 733 | } |
734 | data_len = iov_iter_count(&io_data->data); | ||
753 | /* | 735 | /* |
754 | * Controller may require buffer size to be aligned to | 736 | * Controller may require buffer size to be aligned to |
755 | * maxpacketsize of an out endpoint. | 737 | * maxpacketsize of an out endpoint. |
756 | */ | 738 | */ |
757 | data_len = io_data->read ? | 739 | if (io_data->read) |
758 | usb_ep_align_maybe(gadget, ep->ep, io_data->len) : | 740 | data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); |
759 | io_data->len; | ||
760 | spin_unlock_irq(&epfile->ffs->eps_lock); | 741 | spin_unlock_irq(&epfile->ffs->eps_lock); |
761 | 742 | ||
762 | data = kmalloc(data_len, GFP_KERNEL); | 743 | data = kmalloc(data_len, GFP_KERNEL); |
763 | if (unlikely(!data)) | 744 | if (unlikely(!data)) |
764 | return -ENOMEM; | 745 | return -ENOMEM; |
765 | if (io_data->aio && !io_data->read) { | 746 | if (!io_data->read) { |
766 | int i; | 747 | copied = copy_from_iter(data, data_len, &io_data->data); |
767 | size_t pos = 0; | 748 | if (copied != data_len) { |
768 | for (i = 0; i < io_data->nr_segs; i++) { | ||
769 | if (unlikely(copy_from_user(&data[pos], | ||
770 | io_data->iovec[i].iov_base, | ||
771 | io_data->iovec[i].iov_len))) { | ||
772 | ret = -EFAULT; | ||
773 | goto error; | ||
774 | } | ||
775 | pos += io_data->iovec[i].iov_len; | ||
776 | } | ||
777 | } else { | ||
778 | if (!io_data->read && | ||
779 | unlikely(__copy_from_user(data, io_data->buf, | ||
780 | io_data->len))) { | ||
781 | ret = -EFAULT; | 749 | ret = -EFAULT; |
782 | goto error; | 750 | goto error; |
783 | } | 751 | } |
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
876 | */ | 844 | */ |
877 | ret = ep->status; | 845 | ret = ep->status; |
878 | if (io_data->read && ret > 0) { | 846 | if (io_data->read && ret > 0) { |
879 | ret = min_t(size_t, ret, io_data->len); | 847 | ret = copy_to_iter(data, ret, &io_data->data); |
880 | 848 | if (unlikely(iov_iter_count(&io_data->data))) | |
881 | if (unlikely(copy_to_user(io_data->buf, | ||
882 | data, ret))) | ||
883 | ret = -EFAULT; | 849 | ret = -EFAULT; |
884 | } | 850 | } |
885 | } | 851 | } |
@@ -898,37 +864,6 @@ error: | |||
898 | return ret; | 864 | return ret; |
899 | } | 865 | } |
900 | 866 | ||
901 | static ssize_t | ||
902 | ffs_epfile_write(struct file *file, const char __user *buf, size_t len, | ||
903 | loff_t *ptr) | ||
904 | { | ||
905 | struct ffs_io_data io_data; | ||
906 | |||
907 | ENTER(); | ||
908 | |||
909 | io_data.aio = false; | ||
910 | io_data.read = false; | ||
911 | io_data.buf = (char * __user)buf; | ||
912 | io_data.len = len; | ||
913 | |||
914 | return ffs_epfile_io(file, &io_data); | ||
915 | } | ||
916 | |||
917 | static ssize_t | ||
918 | ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) | ||
919 | { | ||
920 | struct ffs_io_data io_data; | ||
921 | |||
922 | ENTER(); | ||
923 | |||
924 | io_data.aio = false; | ||
925 | io_data.read = true; | ||
926 | io_data.buf = buf; | ||
927 | io_data.len = len; | ||
928 | |||
929 | return ffs_epfile_io(file, &io_data); | ||
930 | } | ||
931 | |||
932 | static int | 867 | static int |
933 | ffs_epfile_open(struct inode *inode, struct file *file) | 868 | ffs_epfile_open(struct inode *inode, struct file *file) |
934 | { | 869 | { |
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) | |||
965 | return value; | 900 | return value; |
966 | } | 901 | } |
967 | 902 | ||
968 | static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, | 903 | static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
969 | const struct iovec *iovec, | ||
970 | unsigned long nr_segs, loff_t loff) | ||
971 | { | 904 | { |
972 | struct ffs_io_data *io_data; | 905 | struct ffs_io_data io_data, *p = &io_data; |
906 | ssize_t res; | ||
973 | 907 | ||
974 | ENTER(); | 908 | ENTER(); |
975 | 909 | ||
976 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 910 | if (!is_sync_kiocb(kiocb)) { |
977 | if (unlikely(!io_data)) | 911 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
978 | return -ENOMEM; | 912 | if (unlikely(!p)) |
913 | return -ENOMEM; | ||
914 | p->aio = true; | ||
915 | } else { | ||
916 | p->aio = false; | ||
917 | } | ||
979 | 918 | ||
980 | io_data->aio = true; | 919 | p->read = false; |
981 | io_data->read = false; | 920 | p->kiocb = kiocb; |
982 | io_data->kiocb = kiocb; | 921 | p->data = *from; |
983 | io_data->iovec = iovec; | 922 | p->mm = current->mm; |
984 | io_data->nr_segs = nr_segs; | ||
985 | io_data->len = kiocb->ki_nbytes; | ||
986 | io_data->mm = current->mm; | ||
987 | 923 | ||
988 | kiocb->private = io_data; | 924 | kiocb->private = p; |
989 | 925 | ||
990 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 926 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
991 | 927 | ||
992 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 928 | res = ffs_epfile_io(kiocb->ki_filp, p); |
929 | if (res == -EIOCBQUEUED) | ||
930 | return res; | ||
931 | if (p->aio) | ||
932 | kfree(p); | ||
933 | else | ||
934 | *from = p->data; | ||
935 | return res; | ||
993 | } | 936 | } |
994 | 937 | ||
995 | static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, | 938 | static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
996 | const struct iovec *iovec, | ||
997 | unsigned long nr_segs, loff_t loff) | ||
998 | { | 939 | { |
999 | struct ffs_io_data *io_data; | 940 | struct ffs_io_data io_data, *p = &io_data; |
1000 | struct iovec *iovec_copy; | 941 | ssize_t res; |
1001 | 942 | ||
1002 | ENTER(); | 943 | ENTER(); |
1003 | 944 | ||
1004 | iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); | 945 | if (!is_sync_kiocb(kiocb)) { |
1005 | if (unlikely(!iovec_copy)) | 946 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
1006 | return -ENOMEM; | 947 | if (unlikely(!p)) |
1007 | 948 | return -ENOMEM; | |
1008 | memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); | 949 | p->aio = true; |
1009 | 950 | } else { | |
1010 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 951 | p->aio = false; |
1011 | if (unlikely(!io_data)) { | ||
1012 | kfree(iovec_copy); | ||
1013 | return -ENOMEM; | ||
1014 | } | 952 | } |
1015 | 953 | ||
1016 | io_data->aio = true; | 954 | p->read = true; |
1017 | io_data->read = true; | 955 | p->kiocb = kiocb; |
1018 | io_data->kiocb = kiocb; | 956 | if (p->aio) { |
1019 | io_data->iovec = iovec_copy; | 957 | p->to_free = dup_iter(&p->data, to, GFP_KERNEL); |
1020 | io_data->nr_segs = nr_segs; | 958 | if (!p->to_free) { |
1021 | io_data->len = kiocb->ki_nbytes; | 959 | kfree(p); |
1022 | io_data->mm = current->mm; | 960 | return -ENOMEM; |
961 | } | ||
962 | } else { | ||
963 | p->data = *to; | ||
964 | p->to_free = NULL; | ||
965 | } | ||
966 | p->mm = current->mm; | ||
1023 | 967 | ||
1024 | kiocb->private = io_data; | 968 | kiocb->private = p; |
1025 | 969 | ||
1026 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 970 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
1027 | 971 | ||
1028 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 972 | res = ffs_epfile_io(kiocb->ki_filp, p); |
973 | if (res == -EIOCBQUEUED) | ||
974 | return res; | ||
975 | |||
976 | if (p->aio) { | ||
977 | kfree(p->to_free); | ||
978 | kfree(p); | ||
979 | } else { | ||
980 | *to = p->data; | ||
981 | } | ||
982 | return res; | ||
1029 | } | 983 | } |
1030 | 984 | ||
1031 | static int | 985 | static int |
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { | |||
1105 | .llseek = no_llseek, | 1059 | .llseek = no_llseek, |
1106 | 1060 | ||
1107 | .open = ffs_epfile_open, | 1061 | .open = ffs_epfile_open, |
1108 | .write = ffs_epfile_write, | 1062 | .write = new_sync_write, |
1109 | .read = ffs_epfile_read, | 1063 | .read = new_sync_read, |
1110 | .aio_write = ffs_epfile_aio_write, | 1064 | .write_iter = ffs_epfile_write_iter, |
1111 | .aio_read = ffs_epfile_aio_read, | 1065 | .read_iter = ffs_epfile_read_iter, |
1112 | .release = ffs_epfile_release, | 1066 | .release = ffs_epfile_release, |
1113 | .unlocked_ioctl = ffs_epfile_ioctl, | 1067 | .unlocked_ioctl = ffs_epfile_ioctl, |
1114 | }; | 1068 | }; |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); | |||
74 | MODULE_AUTHOR ("David Brownell"); | 74 | MODULE_AUTHOR ("David Brownell"); |
75 | MODULE_LICENSE ("GPL"); | 75 | MODULE_LICENSE ("GPL"); |
76 | 76 | ||
77 | static int ep_open(struct inode *, struct file *); | ||
78 | |||
77 | 79 | ||
78 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
79 | 81 | ||
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) | |||
283 | * still need dev->lock to use epdata->ep. | 285 | * still need dev->lock to use epdata->ep. |
284 | */ | 286 | */ |
285 | static int | 287 | static int |
286 | get_ready_ep (unsigned f_flags, struct ep_data *epdata) | 288 | get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) |
287 | { | 289 | { |
288 | int val; | 290 | int val; |
289 | 291 | ||
290 | if (f_flags & O_NONBLOCK) { | 292 | if (f_flags & O_NONBLOCK) { |
291 | if (!mutex_trylock(&epdata->lock)) | 293 | if (!mutex_trylock(&epdata->lock)) |
292 | goto nonblock; | 294 | goto nonblock; |
293 | if (epdata->state != STATE_EP_ENABLED) { | 295 | if (epdata->state != STATE_EP_ENABLED && |
296 | (!is_write || epdata->state != STATE_EP_READY)) { | ||
294 | mutex_unlock(&epdata->lock); | 297 | mutex_unlock(&epdata->lock); |
295 | nonblock: | 298 | nonblock: |
296 | val = -EAGAIN; | 299 | val = -EAGAIN; |
@@ -305,18 +308,20 @@ nonblock: | |||
305 | 308 | ||
306 | switch (epdata->state) { | 309 | switch (epdata->state) { |
307 | case STATE_EP_ENABLED: | 310 | case STATE_EP_ENABLED: |
311 | return 0; | ||
312 | case STATE_EP_READY: /* not configured yet */ | ||
313 | if (is_write) | ||
314 | return 0; | ||
315 | // FALLTHRU | ||
316 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
308 | break; | 317 | break; |
309 | // case STATE_EP_DISABLED: /* "can't happen" */ | 318 | // case STATE_EP_DISABLED: /* "can't happen" */ |
310 | // case STATE_EP_READY: /* "can't happen" */ | ||
311 | default: /* error! */ | 319 | default: /* error! */ |
312 | pr_debug ("%s: ep %p not available, state %d\n", | 320 | pr_debug ("%s: ep %p not available, state %d\n", |
313 | shortname, epdata, epdata->state); | 321 | shortname, epdata, epdata->state); |
314 | // FALLTHROUGH | ||
315 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
316 | val = -ENODEV; | ||
317 | mutex_unlock(&epdata->lock); | ||
318 | } | 322 | } |
319 | return val; | 323 | mutex_unlock(&epdata->lock); |
324 | return -ENODEV; | ||
320 | } | 325 | } |
321 | 326 | ||
322 | static ssize_t | 327 | static ssize_t |
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
363 | return value; | 368 | return value; |
364 | } | 369 | } |
365 | 370 | ||
366 | |||
367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
368 | static ssize_t | ||
369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
370 | { | ||
371 | struct ep_data *data = fd->private_data; | ||
372 | void *kbuf; | ||
373 | ssize_t value; | ||
374 | |||
375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
376 | return value; | ||
377 | |||
378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
381 | mutex_unlock(&data->lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | DBG (data->dev, "%s halt\n", data->name); | ||
385 | spin_lock_irq (&data->dev->lock); | ||
386 | if (likely (data->ep != NULL)) | ||
387 | usb_ep_set_halt (data->ep); | ||
388 | spin_unlock_irq (&data->dev->lock); | ||
389 | mutex_unlock(&data->lock); | ||
390 | return -EBADMSG; | ||
391 | } | ||
392 | |||
393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
394 | |||
395 | value = -ENOMEM; | ||
396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
397 | if (unlikely (!kbuf)) | ||
398 | goto free1; | ||
399 | |||
400 | value = ep_io (data, kbuf, len); | ||
401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
402 | data->name, len, (int) value); | ||
403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
404 | value = -EFAULT; | ||
405 | |||
406 | free1: | ||
407 | mutex_unlock(&data->lock); | ||
408 | kfree (kbuf); | ||
409 | return value; | ||
410 | } | ||
411 | |||
412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
413 | static ssize_t | ||
414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
415 | { | ||
416 | struct ep_data *data = fd->private_data; | ||
417 | void *kbuf; | ||
418 | ssize_t value; | ||
419 | |||
420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
421 | return value; | ||
422 | |||
423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
426 | mutex_unlock(&data->lock); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | DBG (data->dev, "%s halt\n", data->name); | ||
430 | spin_lock_irq (&data->dev->lock); | ||
431 | if (likely (data->ep != NULL)) | ||
432 | usb_ep_set_halt (data->ep); | ||
433 | spin_unlock_irq (&data->dev->lock); | ||
434 | mutex_unlock(&data->lock); | ||
435 | return -EBADMSG; | ||
436 | } | ||
437 | |||
438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
439 | |||
440 | value = -ENOMEM; | ||
441 | kbuf = memdup_user(buf, len); | ||
442 | if (IS_ERR(kbuf)) { | ||
443 | value = PTR_ERR(kbuf); | ||
444 | kbuf = NULL; | ||
445 | goto free1; | ||
446 | } | ||
447 | |||
448 | value = ep_io (data, kbuf, len); | ||
449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
450 | data->name, len, (int) value); | ||
451 | free1: | ||
452 | mutex_unlock(&data->lock); | ||
453 | kfree (kbuf); | ||
454 | return value; | ||
455 | } | ||
456 | |||
457 | static int | 371 | static int |
458 | ep_release (struct inode *inode, struct file *fd) | 372 | ep_release (struct inode *inode, struct file *fd) |
459 | { | 373 | { |
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) | |||
481 | struct ep_data *data = fd->private_data; | 395 | struct ep_data *data = fd->private_data; |
482 | int status; | 396 | int status; |
483 | 397 | ||
484 | if ((status = get_ready_ep (fd->f_flags, data)) < 0) | 398 | if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) |
485 | return status; | 399 | return status; |
486 | 400 | ||
487 | spin_lock_irq (&data->dev->lock); | 401 | spin_lock_irq (&data->dev->lock); |
@@ -517,8 +431,8 @@ struct kiocb_priv { | |||
517 | struct mm_struct *mm; | 431 | struct mm_struct *mm; |
518 | struct work_struct work; | 432 | struct work_struct work; |
519 | void *buf; | 433 | void *buf; |
520 | const struct iovec *iv; | 434 | struct iov_iter to; |
521 | unsigned long nr_segs; | 435 | const void *to_free; |
522 | unsigned actual; | 436 | unsigned actual; |
523 | }; | 437 | }; |
524 | 438 | ||
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
541 | return value; | 455 | return value; |
542 | } | 456 | } |
543 | 457 | ||
544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
545 | { | ||
546 | ssize_t len, total; | ||
547 | void *to_copy; | ||
548 | int i; | ||
549 | |||
550 | /* copy stuff into user buffers */ | ||
551 | total = priv->actual; | ||
552 | len = 0; | ||
553 | to_copy = priv->buf; | ||
554 | for (i=0; i < priv->nr_segs; i++) { | ||
555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
556 | |||
557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
558 | if (len == 0) | ||
559 | len = -EFAULT; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | total -= this; | ||
564 | len += this; | ||
565 | to_copy += this; | ||
566 | if (total == 0) | ||
567 | break; | ||
568 | } | ||
569 | |||
570 | return len; | ||
571 | } | ||
572 | |||
573 | static void ep_user_copy_worker(struct work_struct *work) | 458 | static void ep_user_copy_worker(struct work_struct *work) |
574 | { | 459 | { |
575 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 460 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
578 | size_t ret; | 463 | size_t ret; |
579 | 464 | ||
580 | use_mm(mm); | 465 | use_mm(mm); |
581 | ret = ep_copy_to_user(priv); | 466 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
582 | unuse_mm(mm); | 467 | unuse_mm(mm); |
468 | if (!ret) | ||
469 | ret = -EFAULT; | ||
583 | 470 | ||
584 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
585 | aio_complete(iocb, ret, ret); | 472 | aio_complete(iocb, ret, ret); |
586 | 473 | ||
587 | kfree(priv->buf); | 474 | kfree(priv->buf); |
475 | kfree(priv->to_free); | ||
588 | kfree(priv); | 476 | kfree(priv); |
589 | } | 477 | } |
590 | 478 | ||
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
603 | * don't need to copy anything to userspace, so we can | 491 | * don't need to copy anything to userspace, so we can |
604 | * complete the aio request immediately. | 492 | * complete the aio request immediately. |
605 | */ | 493 | */ |
606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 494 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
607 | kfree(req->buf); | 495 | kfree(req->buf); |
496 | kfree(priv->to_free); | ||
608 | kfree(priv); | 497 | kfree(priv); |
609 | iocb->private = NULL; | 498 | iocb->private = NULL; |
610 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
618 | 507 | ||
619 | priv->buf = req->buf; | 508 | priv->buf = req->buf; |
620 | priv->actual = req->actual; | 509 | priv->actual = req->actual; |
510 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
621 | schedule_work(&priv->work); | 511 | schedule_work(&priv->work); |
622 | } | 512 | } |
623 | spin_unlock(&epdata->dev->lock); | 513 | spin_unlock(&epdata->dev->lock); |
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
626 | put_ep(epdata); | 516 | put_ep(epdata); |
627 | } | 517 | } |
628 | 518 | ||
629 | static ssize_t | 519 | static ssize_t ep_aio(struct kiocb *iocb, |
630 | ep_aio_rwtail( | 520 | struct kiocb_priv *priv, |
631 | struct kiocb *iocb, | 521 | struct ep_data *epdata, |
632 | char *buf, | 522 | char *buf, |
633 | size_t len, | 523 | size_t len) |
634 | struct ep_data *epdata, | ||
635 | const struct iovec *iv, | ||
636 | unsigned long nr_segs | ||
637 | ) | ||
638 | { | 524 | { |
639 | struct kiocb_priv *priv; | 525 | struct usb_request *req; |
640 | struct usb_request *req; | 526 | ssize_t value; |
641 | ssize_t value; | ||
642 | 527 | ||
643 | priv = kmalloc(sizeof *priv, GFP_KERNEL); | ||
644 | if (!priv) { | ||
645 | value = -ENOMEM; | ||
646 | fail: | ||
647 | kfree(buf); | ||
648 | return value; | ||
649 | } | ||
650 | iocb->private = priv; | 528 | iocb->private = priv; |
651 | priv->iocb = iocb; | 529 | priv->iocb = iocb; |
652 | priv->iv = iv; | ||
653 | priv->nr_segs = nr_segs; | ||
654 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
655 | |||
656 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
657 | if (unlikely(value < 0)) { | ||
658 | kfree(priv); | ||
659 | goto fail; | ||
660 | } | ||
661 | 530 | ||
662 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 531 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
663 | get_ep(epdata); | 532 | get_ep(epdata); |
@@ -669,75 +538,154 @@ fail: | |||
669 | * allocate or submit those if the host disconnected. | 538 | * allocate or submit those if the host disconnected. |
670 | */ | 539 | */ |
671 | spin_lock_irq(&epdata->dev->lock); | 540 | spin_lock_irq(&epdata->dev->lock); |
672 | if (likely(epdata->ep)) { | 541 | value = -ENODEV; |
673 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 542 | if (unlikely(epdata->ep)) |
674 | if (likely(req)) { | 543 | goto fail; |
675 | priv->req = req; | ||
676 | req->buf = buf; | ||
677 | req->length = len; | ||
678 | req->complete = ep_aio_complete; | ||
679 | req->context = iocb; | ||
680 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
681 | if (unlikely(0 != value)) | ||
682 | usb_ep_free_request(epdata->ep, req); | ||
683 | } else | ||
684 | value = -EAGAIN; | ||
685 | } else | ||
686 | value = -ENODEV; | ||
687 | spin_unlock_irq(&epdata->dev->lock); | ||
688 | 544 | ||
689 | mutex_unlock(&epdata->lock); | 545 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
546 | value = -ENOMEM; | ||
547 | if (unlikely(!req)) | ||
548 | goto fail; | ||
690 | 549 | ||
691 | if (unlikely(value)) { | 550 | priv->req = req; |
692 | kfree(priv); | 551 | req->buf = buf; |
693 | put_ep(epdata); | 552 | req->length = len; |
694 | } else | 553 | req->complete = ep_aio_complete; |
695 | value = -EIOCBQUEUED; | 554 | req->context = iocb; |
555 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
556 | if (unlikely(0 != value)) { | ||
557 | usb_ep_free_request(epdata->ep, req); | ||
558 | goto fail; | ||
559 | } | ||
560 | spin_unlock_irq(&epdata->dev->lock); | ||
561 | return -EIOCBQUEUED; | ||
562 | |||
563 | fail: | ||
564 | spin_unlock_irq(&epdata->dev->lock); | ||
565 | kfree(priv->to_free); | ||
566 | kfree(priv); | ||
567 | put_ep(epdata); | ||
696 | return value; | 568 | return value; |
697 | } | 569 | } |
698 | 570 | ||
699 | static ssize_t | 571 | static ssize_t |
700 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 572 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
701 | unsigned long nr_segs, loff_t o) | ||
702 | { | 573 | { |
703 | struct ep_data *epdata = iocb->ki_filp->private_data; | 574 | struct file *file = iocb->ki_filp; |
704 | char *buf; | 575 | struct ep_data *epdata = file->private_data; |
576 | size_t len = iov_iter_count(to); | ||
577 | ssize_t value; | ||
578 | char *buf; | ||
705 | 579 | ||
706 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 580 | if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) |
707 | return -EINVAL; | 581 | return value; |
708 | 582 | ||
709 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 583 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
710 | if (unlikely(!buf)) | 584 | if (usb_endpoint_dir_in(&epdata->desc)) { |
711 | return -ENOMEM; | 585 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
586 | !is_sync_kiocb(iocb)) { | ||
587 | mutex_unlock(&epdata->lock); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
591 | spin_lock_irq(&epdata->dev->lock); | ||
592 | if (likely(epdata->ep != NULL)) | ||
593 | usb_ep_set_halt(epdata->ep); | ||
594 | spin_unlock_irq(&epdata->dev->lock); | ||
595 | mutex_unlock(&epdata->lock); | ||
596 | return -EBADMSG; | ||
597 | } | ||
712 | 598 | ||
713 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 599 | buf = kmalloc(len, GFP_KERNEL); |
600 | if (unlikely(!buf)) { | ||
601 | mutex_unlock(&epdata->lock); | ||
602 | return -ENOMEM; | ||
603 | } | ||
604 | if (is_sync_kiocb(iocb)) { | ||
605 | value = ep_io(epdata, buf, len); | ||
606 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
607 | value = -EFAULT; | ||
608 | } else { | ||
609 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
610 | value = -ENOMEM; | ||
611 | if (!priv) | ||
612 | goto fail; | ||
613 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
614 | if (!priv->to_free) { | ||
615 | kfree(priv); | ||
616 | goto fail; | ||
617 | } | ||
618 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
619 | if (value == -EIOCBQUEUED) | ||
620 | buf = NULL; | ||
621 | } | ||
622 | fail: | ||
623 | kfree(buf); | ||
624 | mutex_unlock(&epdata->lock); | ||
625 | return value; | ||
714 | } | 626 | } |
715 | 627 | ||
628 | static ssize_t ep_config(struct ep_data *, const char *, size_t); | ||
629 | |||
716 | static ssize_t | 630 | static ssize_t |
717 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 631 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
718 | unsigned long nr_segs, loff_t o) | ||
719 | { | 632 | { |
720 | struct ep_data *epdata = iocb->ki_filp->private_data; | 633 | struct file *file = iocb->ki_filp; |
721 | char *buf; | 634 | struct ep_data *epdata = file->private_data; |
722 | size_t len = 0; | 635 | size_t len = iov_iter_count(from); |
723 | int i = 0; | 636 | bool configured; |
637 | ssize_t value; | ||
638 | char *buf; | ||
639 | |||
640 | if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) | ||
641 | return value; | ||
724 | 642 | ||
725 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 643 | configured = epdata->state == STATE_EP_ENABLED; |
726 | return -EINVAL; | ||
727 | 644 | ||
728 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 645 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
729 | if (unlikely(!buf)) | 646 | if (configured && !usb_endpoint_dir_in(&epdata->desc)) { |
647 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
648 | !is_sync_kiocb(iocb)) { | ||
649 | mutex_unlock(&epdata->lock); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
653 | spin_lock_irq(&epdata->dev->lock); | ||
654 | if (likely(epdata->ep != NULL)) | ||
655 | usb_ep_set_halt(epdata->ep); | ||
656 | spin_unlock_irq(&epdata->dev->lock); | ||
657 | mutex_unlock(&epdata->lock); | ||
658 | return -EBADMSG; | ||
659 | } | ||
660 | |||
661 | buf = kmalloc(len, GFP_KERNEL); | ||
662 | if (unlikely(!buf)) { | ||
663 | mutex_unlock(&epdata->lock); | ||
730 | return -ENOMEM; | 664 | return -ENOMEM; |
665 | } | ||
731 | 666 | ||
732 | for (i=0; i < nr_segs; i++) { | 667 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
733 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 668 | value = -EFAULT; |
734 | iov[i].iov_len) != 0)) { | 669 | goto out; |
735 | kfree(buf); | 670 | } |
736 | return -EFAULT; | 671 | |
672 | if (unlikely(!configured)) { | ||
673 | value = ep_config(epdata, buf, len); | ||
674 | } else if (is_sync_kiocb(iocb)) { | ||
675 | value = ep_io(epdata, buf, len); | ||
676 | } else { | ||
677 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
678 | value = -ENOMEM; | ||
679 | if (priv) { | ||
680 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
681 | if (value == -EIOCBQUEUED) | ||
682 | buf = NULL; | ||
737 | } | 683 | } |
738 | len += iov[i].iov_len; | ||
739 | } | 684 | } |
740 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 685 | out: |
686 | kfree(buf); | ||
687 | mutex_unlock(&epdata->lock); | ||
688 | return value; | ||
741 | } | 689 | } |
742 | 690 | ||
743 | /*----------------------------------------------------------------------*/ | 691 | /*----------------------------------------------------------------------*/ |
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
745 | /* used after endpoint configuration */ | 693 | /* used after endpoint configuration */ |
746 | static const struct file_operations ep_io_operations = { | 694 | static const struct file_operations ep_io_operations = { |
747 | .owner = THIS_MODULE, | 695 | .owner = THIS_MODULE, |
748 | .llseek = no_llseek, | ||
749 | 696 | ||
750 | .read = ep_read, | 697 | .open = ep_open, |
751 | .write = ep_write, | ||
752 | .unlocked_ioctl = ep_ioctl, | ||
753 | .release = ep_release, | 698 | .release = ep_release, |
754 | 699 | .llseek = no_llseek, | |
755 | .aio_read = ep_aio_read, | 700 | .read = new_sync_read, |
756 | .aio_write = ep_aio_write, | 701 | .write = new_sync_write, |
702 | .unlocked_ioctl = ep_ioctl, | ||
703 | .read_iter = ep_read_iter, | ||
704 | .write_iter = ep_write_iter, | ||
757 | }; | 705 | }; |
758 | 706 | ||
759 | /* ENDPOINT INITIALIZATION | 707 | /* ENDPOINT INITIALIZATION |
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { | |||
770 | * speed descriptor, then optional high speed descriptor. | 718 | * speed descriptor, then optional high speed descriptor. |
771 | */ | 719 | */ |
772 | static ssize_t | 720 | static ssize_t |
773 | ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | 721 | ep_config (struct ep_data *data, const char *buf, size_t len) |
774 | { | 722 | { |
775 | struct ep_data *data = fd->private_data; | ||
776 | struct usb_ep *ep; | 723 | struct usb_ep *ep; |
777 | u32 tag; | 724 | u32 tag; |
778 | int value, length = len; | 725 | int value, length = len; |
779 | 726 | ||
780 | value = mutex_lock_interruptible(&data->lock); | ||
781 | if (value < 0) | ||
782 | return value; | ||
783 | |||
784 | if (data->state != STATE_EP_READY) { | 727 | if (data->state != STATE_EP_READY) { |
785 | value = -EL2HLT; | 728 | value = -EL2HLT; |
786 | goto fail; | 729 | goto fail; |
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
791 | goto fail0; | 734 | goto fail0; |
792 | 735 | ||
793 | /* we might need to change message format someday */ | 736 | /* we might need to change message format someday */ |
794 | if (copy_from_user (&tag, buf, 4)) { | 737 | memcpy(&tag, buf, 4); |
795 | goto fail1; | ||
796 | } | ||
797 | if (tag != 1) { | 738 | if (tag != 1) { |
798 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); | 739 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); |
799 | goto fail0; | 740 | goto fail0; |
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
806 | */ | 747 | */ |
807 | 748 | ||
808 | /* full/low speed descriptor, then high speed */ | 749 | /* full/low speed descriptor, then high speed */ |
809 | if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { | 750 | memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); |
810 | goto fail1; | ||
811 | } | ||
812 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE | 751 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE |
813 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) | 752 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) |
814 | goto fail0; | 753 | goto fail0; |
815 | if (len != USB_DT_ENDPOINT_SIZE) { | 754 | if (len != USB_DT_ENDPOINT_SIZE) { |
816 | if (len != 2 * USB_DT_ENDPOINT_SIZE) | 755 | if (len != 2 * USB_DT_ENDPOINT_SIZE) |
817 | goto fail0; | 756 | goto fail0; |
818 | if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, | 757 | memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, |
819 | USB_DT_ENDPOINT_SIZE)) { | 758 | USB_DT_ENDPOINT_SIZE); |
820 | goto fail1; | ||
821 | } | ||
822 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE | 759 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE |
823 | || data->hs_desc.bDescriptorType | 760 | || data->hs_desc.bDescriptorType |
824 | != USB_DT_ENDPOINT) { | 761 | != USB_DT_ENDPOINT) { |
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
840 | case USB_SPEED_LOW: | 777 | case USB_SPEED_LOW: |
841 | case USB_SPEED_FULL: | 778 | case USB_SPEED_FULL: |
842 | ep->desc = &data->desc; | 779 | ep->desc = &data->desc; |
843 | value = usb_ep_enable(ep); | ||
844 | if (value == 0) | ||
845 | data->state = STATE_EP_ENABLED; | ||
846 | break; | 780 | break; |
847 | case USB_SPEED_HIGH: | 781 | case USB_SPEED_HIGH: |
848 | /* fails if caller didn't provide that descriptor... */ | 782 | /* fails if caller didn't provide that descriptor... */ |
849 | ep->desc = &data->hs_desc; | 783 | ep->desc = &data->hs_desc; |
850 | value = usb_ep_enable(ep); | ||
851 | if (value == 0) | ||
852 | data->state = STATE_EP_ENABLED; | ||
853 | break; | 784 | break; |
854 | default: | 785 | default: |
855 | DBG(data->dev, "unconnected, %s init abandoned\n", | 786 | DBG(data->dev, "unconnected, %s init abandoned\n", |
856 | data->name); | 787 | data->name); |
857 | value = -EINVAL; | 788 | value = -EINVAL; |
789 | goto gone; | ||
858 | } | 790 | } |
791 | value = usb_ep_enable(ep); | ||
859 | if (value == 0) { | 792 | if (value == 0) { |
860 | fd->f_op = &ep_io_operations; | 793 | data->state = STATE_EP_ENABLED; |
861 | value = length; | 794 | value = length; |
862 | } | 795 | } |
863 | gone: | 796 | gone: |
@@ -867,14 +800,10 @@ fail: | |||
867 | data->desc.bDescriptorType = 0; | 800 | data->desc.bDescriptorType = 0; |
868 | data->hs_desc.bDescriptorType = 0; | 801 | data->hs_desc.bDescriptorType = 0; |
869 | } | 802 | } |
870 | mutex_unlock(&data->lock); | ||
871 | return value; | 803 | return value; |
872 | fail0: | 804 | fail0: |
873 | value = -EINVAL; | 805 | value = -EINVAL; |
874 | goto fail; | 806 | goto fail; |
875 | fail1: | ||
876 | value = -EFAULT; | ||
877 | goto fail; | ||
878 | } | 807 | } |
879 | 808 | ||
880 | static int | 809 | static int |
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) | |||
902 | return value; | 831 | return value; |
903 | } | 832 | } |
904 | 833 | ||
905 | /* used before endpoint configuration */ | ||
906 | static const struct file_operations ep_config_operations = { | ||
907 | .llseek = no_llseek, | ||
908 | |||
909 | .open = ep_open, | ||
910 | .write = ep_config, | ||
911 | .release = ep_release, | ||
912 | }; | ||
913 | |||
914 | /*----------------------------------------------------------------------*/ | 834 | /*----------------------------------------------------------------------*/ |
915 | 835 | ||
916 | /* EP0 IMPLEMENTATION can be partly in userspace. | 836 | /* EP0 IMPLEMENTATION can be partly in userspace. |
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
989 | enum ep0_state state; | 909 | enum ep0_state state; |
990 | 910 | ||
991 | spin_lock_irq (&dev->lock); | 911 | spin_lock_irq (&dev->lock); |
912 | if (dev->state <= STATE_DEV_OPENED) { | ||
913 | retval = -EINVAL; | ||
914 | goto done; | ||
915 | } | ||
992 | 916 | ||
993 | /* report fd mode change before acting on it */ | 917 | /* report fd mode change before acting on it */ |
994 | if (dev->setup_abort) { | 918 | if (dev->setup_abort) { |
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1187 | struct dev_data *dev = fd->private_data; | 1111 | struct dev_data *dev = fd->private_data; |
1188 | ssize_t retval = -ESRCH; | 1112 | ssize_t retval = -ESRCH; |
1189 | 1113 | ||
1190 | spin_lock_irq (&dev->lock); | ||
1191 | |||
1192 | /* report fd mode change before acting on it */ | 1114 | /* report fd mode change before acting on it */ |
1193 | if (dev->setup_abort) { | 1115 | if (dev->setup_abort) { |
1194 | dev->setup_abort = 0; | 1116 | dev->setup_abort = 0; |
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1234 | } else | 1156 | } else |
1235 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); | 1157 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); |
1236 | 1158 | ||
1237 | spin_unlock_irq (&dev->lock); | ||
1238 | return retval; | 1159 | return retval; |
1239 | } | 1160 | } |
1240 | 1161 | ||
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) | |||
1281 | struct dev_data *dev = fd->private_data; | 1202 | struct dev_data *dev = fd->private_data; |
1282 | int mask = 0; | 1203 | int mask = 0; |
1283 | 1204 | ||
1205 | if (dev->state <= STATE_DEV_OPENED) | ||
1206 | return DEFAULT_POLLMASK; | ||
1207 | |||
1284 | poll_wait(fd, &dev->wait, wait); | 1208 | poll_wait(fd, &dev->wait, wait); |
1285 | 1209 | ||
1286 | spin_lock_irq (&dev->lock); | 1210 | spin_lock_irq (&dev->lock); |
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
1316 | return ret; | 1240 | return ret; |
1317 | } | 1241 | } |
1318 | 1242 | ||
1319 | /* used after device configuration */ | ||
1320 | static const struct file_operations ep0_io_operations = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .llseek = no_llseek, | ||
1323 | |||
1324 | .read = ep0_read, | ||
1325 | .write = ep0_write, | ||
1326 | .fasync = ep0_fasync, | ||
1327 | .poll = ep0_poll, | ||
1328 | .unlocked_ioctl = dev_ioctl, | ||
1329 | .release = dev_release, | ||
1330 | }; | ||
1331 | |||
1332 | /*----------------------------------------------------------------------*/ | 1243 | /*----------------------------------------------------------------------*/ |
1333 | 1244 | ||
1334 | /* The in-kernel gadget driver handles most ep0 issues, in particular | 1245 | /* The in-kernel gadget driver handles most ep0 issues, in particular |
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) | |||
1650 | goto enomem1; | 1561 | goto enomem1; |
1651 | 1562 | ||
1652 | data->dentry = gadgetfs_create_file (dev->sb, data->name, | 1563 | data->dentry = gadgetfs_create_file (dev->sb, data->name, |
1653 | data, &ep_config_operations); | 1564 | data, &ep_io_operations); |
1654 | if (!data->dentry) | 1565 | if (!data->dentry) |
1655 | goto enomem2; | 1566 | goto enomem2; |
1656 | list_add_tail (&data->epfiles, &dev->epfiles); | 1567 | list_add_tail (&data->epfiles, &dev->epfiles); |
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1852 | u32 tag; | 1763 | u32 tag; |
1853 | char *kbuf; | 1764 | char *kbuf; |
1854 | 1765 | ||
1766 | spin_lock_irq(&dev->lock); | ||
1767 | if (dev->state > STATE_DEV_OPENED) { | ||
1768 | value = ep0_write(fd, buf, len, ptr); | ||
1769 | spin_unlock_irq(&dev->lock); | ||
1770 | return value; | ||
1771 | } | ||
1772 | spin_unlock_irq(&dev->lock); | ||
1773 | |||
1855 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) | 1774 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
1856 | return -EINVAL; | 1775 | return -EINVAL; |
1857 | 1776 | ||
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1925 | * on, they can work ... except in cleanup paths that | 1844 | * on, they can work ... except in cleanup paths that |
1926 | * kick in after the ep0 descriptor is closed. | 1845 | * kick in after the ep0 descriptor is closed. |
1927 | */ | 1846 | */ |
1928 | fd->f_op = &ep0_io_operations; | ||
1929 | value = len; | 1847 | value = len; |
1930 | } | 1848 | } |
1931 | return value; | 1849 | return value; |
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) | |||
1956 | return value; | 1874 | return value; |
1957 | } | 1875 | } |
1958 | 1876 | ||
1959 | static const struct file_operations dev_init_operations = { | 1877 | static const struct file_operations ep0_operations = { |
1960 | .llseek = no_llseek, | 1878 | .llseek = no_llseek, |
1961 | 1879 | ||
1962 | .open = dev_open, | 1880 | .open = dev_open, |
1881 | .read = ep0_read, | ||
1963 | .write = dev_config, | 1882 | .write = dev_config, |
1964 | .fasync = ep0_fasync, | 1883 | .fasync = ep0_fasync, |
1884 | .poll = ep0_poll, | ||
1965 | .unlocked_ioctl = dev_ioctl, | 1885 | .unlocked_ioctl = dev_ioctl, |
1966 | .release = dev_release, | 1886 | .release = dev_release, |
1967 | }; | 1887 | }; |
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) | |||
2077 | goto Enomem; | 1997 | goto Enomem; |
2078 | 1998 | ||
2079 | dev->sb = sb; | 1999 | dev->sb = sb; |
2080 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); | 2000 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); |
2081 | if (!dev->dentry) { | 2001 | if (!dev->dentry) { |
2082 | put_dev(dev); | 2002 | put_dev(dev); |
2083 | goto Enomem; | 2003 | goto Enomem; |
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, | |||
599 | 599 | ||
600 | len = clcdfb_snprintf_mode(NULL, 0, mode); | 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); |
601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); | 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); |
602 | if (!name) | ||
603 | return -ENOMEM; | ||
604 | |||
602 | clcdfb_snprintf_mode(name, len + 1, mode); | 605 | clcdfb_snprintf_mode(name, len + 1, mode); |
603 | mode->name = name; | 606 | mode->name = name; |
604 | 607 | ||
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
624 | int num = 0, i, first = 1; | 624 | int num = 0, i, first = 1; |
625 | int ver, rev; | 625 | int ver, rev; |
626 | 626 | ||
627 | ver = edid[EDID_STRUCT_VERSION]; | ||
628 | rev = edid[EDID_STRUCT_REVISION]; | ||
629 | |||
630 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); | 627 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); |
631 | if (mode == NULL) | 628 | if (mode == NULL) |
632 | return NULL; | 629 | return NULL; |
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
637 | return NULL; | 634 | return NULL; |
638 | } | 635 | } |
639 | 636 | ||
637 | ver = edid[EDID_STRUCT_VERSION]; | ||
638 | rev = edid[EDID_STRUCT_REVISION]; | ||
639 | |||
640 | *dbsize = 0; | 640 | *dbsize = 0; |
641 | 641 | ||
642 | DPRINTK(" Detailed Timings\n"); | 642 | DPRINTK(" Detailed Timings\n"); |
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c | |||
@@ -28,44 +28,22 @@ | |||
28 | #include <video/omapdss.h> | 28 | #include <video/omapdss.h> |
29 | #include "dss.h" | 29 | #include "dss.h" |
30 | 30 | ||
31 | static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) | 31 | static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) |
32 | { | 32 | { |
33 | struct omap_dss_device *dssdev = NULL; | ||
34 | |||
35 | for_each_dss_dev(dssdev) { | ||
36 | if (dssdev->dev == dev) { | ||
37 | omap_dss_put_device(dssdev); | ||
38 | return dssdev; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | return NULL; | ||
43 | } | ||
44 | |||
45 | static ssize_t display_name_show(struct device *dev, | ||
46 | struct device_attribute *attr, char *buf) | ||
47 | { | ||
48 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
49 | |||
50 | return snprintf(buf, PAGE_SIZE, "%s\n", | 33 | return snprintf(buf, PAGE_SIZE, "%s\n", |
51 | dssdev->name ? | 34 | dssdev->name ? |
52 | dssdev->name : ""); | 35 | dssdev->name : ""); |
53 | } | 36 | } |
54 | 37 | ||
55 | static ssize_t display_enabled_show(struct device *dev, | 38 | static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) |
56 | struct device_attribute *attr, char *buf) | ||
57 | { | 39 | { |
58 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
59 | |||
60 | return snprintf(buf, PAGE_SIZE, "%d\n", | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", |
61 | omapdss_device_is_enabled(dssdev)); | 41 | omapdss_device_is_enabled(dssdev)); |
62 | } | 42 | } |
63 | 43 | ||
64 | static ssize_t display_enabled_store(struct device *dev, | 44 | static ssize_t display_enabled_store(struct omap_dss_device *dssdev, |
65 | struct device_attribute *attr, | ||
66 | const char *buf, size_t size) | 45 | const char *buf, size_t size) |
67 | { | 46 | { |
68 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
69 | int r; | 47 | int r; |
70 | bool enable; | 48 | bool enable; |
71 | 49 | ||
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, | |||
90 | return size; | 68 | return size; |
91 | } | 69 | } |
92 | 70 | ||
93 | static ssize_t display_tear_show(struct device *dev, | 71 | static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) |
94 | struct device_attribute *attr, char *buf) | ||
95 | { | 72 | { |
96 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
97 | return snprintf(buf, PAGE_SIZE, "%d\n", | 73 | return snprintf(buf, PAGE_SIZE, "%d\n", |
98 | dssdev->driver->get_te ? | 74 | dssdev->driver->get_te ? |
99 | dssdev->driver->get_te(dssdev) : 0); | 75 | dssdev->driver->get_te(dssdev) : 0); |
100 | } | 76 | } |
101 | 77 | ||
102 | static ssize_t display_tear_store(struct device *dev, | 78 | static ssize_t display_tear_store(struct omap_dss_device *dssdev, |
103 | struct device_attribute *attr, const char *buf, size_t size) | 79 | const char *buf, size_t size) |
104 | { | 80 | { |
105 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
106 | int r; | 81 | int r; |
107 | bool te; | 82 | bool te; |
108 | 83 | ||
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, | |||
120 | return size; | 95 | return size; |
121 | } | 96 | } |
122 | 97 | ||
123 | static ssize_t display_timings_show(struct device *dev, | 98 | static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) |
124 | struct device_attribute *attr, char *buf) | ||
125 | { | 99 | { |
126 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
127 | struct omap_video_timings t; | 100 | struct omap_video_timings t; |
128 | 101 | ||
129 | if (!dssdev->driver->get_timings) | 102 | if (!dssdev->driver->get_timings) |
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, | |||
137 | t.y_res, t.vfp, t.vbp, t.vsw); | 110 | t.y_res, t.vfp, t.vbp, t.vsw); |
138 | } | 111 | } |
139 | 112 | ||
140 | static ssize_t display_timings_store(struct device *dev, | 113 | static ssize_t display_timings_store(struct omap_dss_device *dssdev, |
141 | struct device_attribute *attr, const char *buf, size_t size) | 114 | const char *buf, size_t size) |
142 | { | 115 | { |
143 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
144 | struct omap_video_timings t = dssdev->panel.timings; | 116 | struct omap_video_timings t = dssdev->panel.timings; |
145 | int r, found; | 117 | int r, found; |
146 | 118 | ||
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, | |||
176 | return size; | 148 | return size; |
177 | } | 149 | } |
178 | 150 | ||
179 | static ssize_t display_rotate_show(struct device *dev, | 151 | static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) |
180 | struct device_attribute *attr, char *buf) | ||
181 | { | 152 | { |
182 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
183 | int rotate; | 153 | int rotate; |
184 | if (!dssdev->driver->get_rotate) | 154 | if (!dssdev->driver->get_rotate) |
185 | return -ENOENT; | 155 | return -ENOENT; |
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, | |||
187 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); | 157 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); |
188 | } | 158 | } |
189 | 159 | ||
190 | static ssize_t display_rotate_store(struct device *dev, | 160 | static ssize_t display_rotate_store(struct omap_dss_device *dssdev, |
191 | struct device_attribute *attr, const char *buf, size_t size) | 161 | const char *buf, size_t size) |
192 | { | 162 | { |
193 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
194 | int rot, r; | 163 | int rot, r; |
195 | 164 | ||
196 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) | 165 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) |
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, | |||
207 | return size; | 176 | return size; |
208 | } | 177 | } |
209 | 178 | ||
210 | static ssize_t display_mirror_show(struct device *dev, | 179 | static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) |
211 | struct device_attribute *attr, char *buf) | ||
212 | { | 180 | { |
213 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
214 | int mirror; | 181 | int mirror; |
215 | if (!dssdev->driver->get_mirror) | 182 | if (!dssdev->driver->get_mirror) |
216 | return -ENOENT; | 183 | return -ENOENT; |
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, | |||
218 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); | 185 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); |
219 | } | 186 | } |
220 | 187 | ||
221 | static ssize_t display_mirror_store(struct device *dev, | 188 | static ssize_t display_mirror_store(struct omap_dss_device *dssdev, |
222 | struct device_attribute *attr, const char *buf, size_t size) | 189 | const char *buf, size_t size) |
223 | { | 190 | { |
224 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
225 | int r; | 191 | int r; |
226 | bool mirror; | 192 | bool mirror; |
227 | 193 | ||
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, | |||
239 | return size; | 205 | return size; |
240 | } | 206 | } |
241 | 207 | ||
242 | static ssize_t display_wss_show(struct device *dev, | 208 | static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) |
243 | struct device_attribute *attr, char *buf) | ||
244 | { | 209 | { |
245 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
246 | unsigned int wss; | 210 | unsigned int wss; |
247 | 211 | ||
248 | if (!dssdev->driver->get_wss) | 212 | if (!dssdev->driver->get_wss) |
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, | |||
253 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); | 217 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); |
254 | } | 218 | } |
255 | 219 | ||
256 | static ssize_t display_wss_store(struct device *dev, | 220 | static ssize_t display_wss_store(struct omap_dss_device *dssdev, |
257 | struct device_attribute *attr, const char *buf, size_t size) | 221 | const char *buf, size_t size) |
258 | { | 222 | { |
259 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
260 | u32 wss; | 223 | u32 wss; |
261 | int r; | 224 | int r; |
262 | 225 | ||
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, | |||
277 | return size; | 240 | return size; |
278 | } | 241 | } |
279 | 242 | ||
280 | static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); | 243 | struct display_attribute { |
281 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 244 | struct attribute attr; |
245 | ssize_t (*show)(struct omap_dss_device *, char *); | ||
246 | ssize_t (*store)(struct omap_dss_device *, const char *, size_t); | ||
247 | }; | ||
248 | |||
249 | #define DISPLAY_ATTR(_name, _mode, _show, _store) \ | ||
250 | struct display_attribute display_attr_##_name = \ | ||
251 | __ATTR(_name, _mode, _show, _store) | ||
252 | |||
253 | static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); | ||
254 | static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); | ||
255 | static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, | ||
282 | display_enabled_show, display_enabled_store); | 256 | display_enabled_show, display_enabled_store); |
283 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 257 | static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
284 | display_tear_show, display_tear_store); | 258 | display_tear_show, display_tear_store); |
285 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 259 | static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, |
286 | display_timings_show, display_timings_store); | 260 | display_timings_show, display_timings_store); |
287 | static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, | 261 | static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, |
288 | display_rotate_show, display_rotate_store); | 262 | display_rotate_show, display_rotate_store); |
289 | static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, | 263 | static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, |
290 | display_mirror_show, display_mirror_store); | 264 | display_mirror_show, display_mirror_store); |
291 | static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | 265 | static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, |
292 | display_wss_show, display_wss_store); | 266 | display_wss_show, display_wss_store); |
293 | 267 | ||
294 | static const struct attribute *display_sysfs_attrs[] = { | 268 | static struct attribute *display_sysfs_attrs[] = { |
295 | &dev_attr_display_name.attr, | 269 | &display_attr_name.attr, |
296 | &dev_attr_enabled.attr, | 270 | &display_attr_display_name.attr, |
297 | &dev_attr_tear_elim.attr, | 271 | &display_attr_enabled.attr, |
298 | &dev_attr_timings.attr, | 272 | &display_attr_tear_elim.attr, |
299 | &dev_attr_rotate.attr, | 273 | &display_attr_timings.attr, |
300 | &dev_attr_mirror.attr, | 274 | &display_attr_rotate.attr, |
301 | &dev_attr_wss.attr, | 275 | &display_attr_mirror.attr, |
276 | &display_attr_wss.attr, | ||
302 | NULL | 277 | NULL |
303 | }; | 278 | }; |
304 | 279 | ||
280 | static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, | ||
281 | char *buf) | ||
282 | { | ||
283 | struct omap_dss_device *dssdev; | ||
284 | struct display_attribute *display_attr; | ||
285 | |||
286 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
287 | display_attr = container_of(attr, struct display_attribute, attr); | ||
288 | |||
289 | if (!display_attr->show) | ||
290 | return -ENOENT; | ||
291 | |||
292 | return display_attr->show(dssdev, buf); | ||
293 | } | ||
294 | |||
295 | static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, | ||
296 | const char *buf, size_t size) | ||
297 | { | ||
298 | struct omap_dss_device *dssdev; | ||
299 | struct display_attribute *display_attr; | ||
300 | |||
301 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
302 | display_attr = container_of(attr, struct display_attribute, attr); | ||
303 | |||
304 | if (!display_attr->store) | ||
305 | return -ENOENT; | ||
306 | |||
307 | return display_attr->store(dssdev, buf, size); | ||
308 | } | ||
309 | |||
310 | static const struct sysfs_ops display_sysfs_ops = { | ||
311 | .show = display_attr_show, | ||
312 | .store = display_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct kobj_type display_ktype = { | ||
316 | .sysfs_ops = &display_sysfs_ops, | ||
317 | .default_attrs = display_sysfs_attrs, | ||
318 | }; | ||
319 | |||
305 | int display_init_sysfs(struct platform_device *pdev) | 320 | int display_init_sysfs(struct platform_device *pdev) |
306 | { | 321 | { |
307 | struct omap_dss_device *dssdev = NULL; | 322 | struct omap_dss_device *dssdev = NULL; |
308 | int r; | 323 | int r; |
309 | 324 | ||
310 | for_each_dss_dev(dssdev) { | 325 | for_each_dss_dev(dssdev) { |
311 | struct kobject *kobj = &dssdev->dev->kobj; | 326 | r = kobject_init_and_add(&dssdev->kobj, &display_ktype, |
312 | 327 | &pdev->dev.kobj, dssdev->alias); | |
313 | r = sysfs_create_files(kobj, display_sysfs_attrs); | ||
314 | if (r) { | 328 | if (r) { |
315 | DSSERR("failed to create sysfs files\n"); | 329 | DSSERR("failed to create sysfs files\n"); |
316 | goto err; | 330 | omap_dss_put_device(dssdev); |
317 | } | ||
318 | |||
319 | r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); | ||
320 | if (r) { | ||
321 | sysfs_remove_files(kobj, display_sysfs_attrs); | ||
322 | |||
323 | DSSERR("failed to create sysfs display link\n"); | ||
324 | goto err; | 331 | goto err; |
325 | } | 332 | } |
326 | } | 333 | } |
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) | |||
338 | struct omap_dss_device *dssdev = NULL; | 345 | struct omap_dss_device *dssdev = NULL; |
339 | 346 | ||
340 | for_each_dss_dev(dssdev) { | 347 | for_each_dss_dev(dssdev) { |
341 | sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); | 348 | if (kobject_name(&dssdev->kobj) == NULL) |
342 | sysfs_remove_files(&dssdev->dev->kobj, | 349 | continue; |
343 | display_sysfs_attrs); | 350 | |
351 | kobject_del(&dssdev->kobj); | ||
352 | kobject_put(&dssdev->kobj); | ||
353 | |||
354 | memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); | ||
344 | } | 355 | } |
345 | } | 356 | } |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1907 | struct the_nilfs *nilfs) | 1907 | struct the_nilfs *nilfs) |
1908 | { | 1908 | { |
1909 | struct nilfs_inode_info *ii, *n; | 1909 | struct nilfs_inode_info *ii, *n; |
1910 | int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); | ||
1910 | int defer_iput = false; | 1911 | int defer_iput = false; |
1911 | 1912 | ||
1912 | spin_lock(&nilfs->ns_inode_lock); | 1913 | spin_lock(&nilfs->ns_inode_lock); |
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1919 | brelse(ii->i_bh); | 1920 | brelse(ii->i_bh); |
1920 | ii->i_bh = NULL; | 1921 | ii->i_bh = NULL; |
1921 | list_del_init(&ii->i_dirty); | 1922 | list_del_init(&ii->i_dirty); |
1922 | if (!ii->vfs_inode.i_nlink) { | 1923 | if (!ii->vfs_inode.i_nlink || during_mount) { |
1923 | /* | 1924 | /* |
1924 | * Defer calling iput() to avoid a deadlock | 1925 | * Defer calling iput() to avoid deadlocks if |
1925 | * over I_SYNC flag for inodes with i_nlink == 0 | 1926 | * i_nlink == 0 or mount is not yet finished. |
1926 | */ | 1927 | */ |
1927 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); | 1928 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); |
1928 | defer_iput = true; | 1929 | defer_iput = true; |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) | 143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) |
144 | return false; | 144 | return false; |
145 | 145 | ||
146 | if (event_mask & marks_mask & ~marks_ignored_mask) | 146 | if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & |
147 | ~marks_ignored_mask) | ||
147 | return true; | 148 | return true; |
148 | 149 | ||
149 | return false; | 150 | return false; |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) | |||
502 | 502 | ||
503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) | 503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) |
504 | { | 504 | { |
505 | if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | 505 | if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) |
506 | return 1; | 506 | return 1; |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -102,11 +102,11 @@ | |||
102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | 102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ |
103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | 103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ |
104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ | 104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ |
105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) | 105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ |
106 | | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) | ||
106 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | 107 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ |
107 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | 108 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ |
108 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ | 109 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) |
109 | | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | ||
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Heartbeat-only devices are missing journals and other files. The | 112 | * Heartbeat-only devices are missing journals and other files. The |
@@ -179,6 +179,11 @@ | |||
179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 | 179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Append Direct IO support | ||
183 | */ | ||
184 | #define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 | ||
185 | |||
186 | /* | ||
182 | * backup superblock flag is used to indicate that this volume | 187 | * backup superblock flag is used to indicate that this volume |
183 | * has backup superblocks. | 188 | * has backup superblocks. |
184 | */ | 189 | */ |
@@ -200,10 +205,6 @@ | |||
200 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 | 205 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 |
201 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 | 206 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 |
202 | 207 | ||
203 | /* | ||
204 | * Append Direct IO support | ||
205 | */ | ||
206 | #define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 | ||
207 | 208 | ||
208 | /* The byte offset of the first backup block will be 1G. | 209 | /* The byte offset of the first backup block will be 1G. |
209 | * The following will be 4G, 16G, 64G, 256G and 1T. | 210 | * The following will be 4G, 16G, 64G, 256G and 1T. |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | struct kmem_cache; | 6 | struct kmem_cache; |
7 | struct page; | 7 | struct page; |
8 | struct vm_struct; | ||
8 | 9 | ||
9 | #ifdef CONFIG_KASAN | 10 | #ifdef CONFIG_KASAN |
10 | 11 | ||
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); | |||
49 | void kasan_slab_alloc(struct kmem_cache *s, void *object); | 50 | void kasan_slab_alloc(struct kmem_cache *s, void *object); |
50 | void kasan_slab_free(struct kmem_cache *s, void *object); | 51 | void kasan_slab_free(struct kmem_cache *s, void *object); |
51 | 52 | ||
52 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
53 | |||
54 | int kasan_module_alloc(void *addr, size_t size); | 53 | int kasan_module_alloc(void *addr, size_t size); |
55 | void kasan_module_free(void *addr); | 54 | void kasan_free_shadow(const struct vm_struct *vm); |
56 | 55 | ||
57 | #else /* CONFIG_KASAN */ | 56 | #else /* CONFIG_KASAN */ |
58 | 57 | ||
59 | #define MODULE_ALIGN 1 | ||
60 | |||
61 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} | 58 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} |
62 | 59 | ||
63 | static inline void kasan_enable_current(void) {} | 60 | static inline void kasan_enable_current(void) {} |
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} | |||
82 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} | 79 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} |
83 | 80 | ||
84 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } | 81 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } |
85 | static inline void kasan_module_free(void *addr) {} | 82 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} |
86 | 83 | ||
87 | #endif /* CONFIG_KASAN */ | 84 | #endif /* CONFIG_KASAN */ |
88 | 85 | ||
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h | |||
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); | |||
84 | 84 | ||
85 | /* Any cleanup before freeing mod->module_init */ | 85 | /* Any cleanup before freeing mod->module_init */ |
86 | void module_arch_freeing_init(struct module *mod); | 86 | void module_arch_freeing_init(struct module *mod); |
87 | |||
88 | #ifdef CONFIG_KASAN | ||
89 | #include <linux/kasan.h> | ||
90 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
91 | #else | ||
92 | #define MODULE_ALIGN PAGE_SIZE | ||
93 | #endif | ||
94 | |||
87 | #endif | 95 | #endif |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..856d34dde79b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -649,7 +649,7 @@ struct spi_transfer { | |||
649 | * sequence completes. On some systems, many such sequences can execute as | 649 | * sequence completes. On some systems, many such sequences can execute as |
650 | * as single programmed DMA transfer. On all systems, these messages are | 650 | * as single programmed DMA transfer. On all systems, these messages are |
651 | * queued, and might complete after transactions to other devices. Messages | 651 | * queued, and might complete after transactions to other devices. Messages |
652 | * sent to a given spi_device are alway executed in FIFO order. | 652 | * sent to a given spi_device are always executed in FIFO order. |
653 | * | 653 | * |
654 | * The code that submits an spi_message (and its spi_transfers) | 654 | * The code that submits an spi_message (and its spi_transfers) |
655 | * to the lower layers is responsible for managing its memory. | 655 | * to the lower layers is responsible for managing its memory. |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | |||
98 | size_t maxsize, size_t *start); | 98 | size_t maxsize, size_t *start); |
99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
100 | 100 | ||
101 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); | ||
102 | |||
101 | static inline size_t iov_iter_count(struct iov_iter *i) | 103 | static inline size_t iov_iter_count(struct iov_iter *i) |
102 | { | 104 | { |
103 | return i->count; | 105 | return i->count; |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | |||
17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | 17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ | 18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ | 19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
20 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ | ||
20 | /* bits [20..32] reserved for arch specific ioremap internals */ | 21 | /* bits [20..32] reserved for arch specific ioremap internals */ |
21 | 22 | ||
22 | /* | 23 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -70,7 +70,8 @@ enum { | |||
70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ | 70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, | 71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
72 | 72 | ||
73 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | 73 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
74 | WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), | ||
74 | 75 | ||
75 | /* | 76 | /* |
76 | * When a work item is off queue, its high bits point to the last | 77 | * When a work item is off queue, its high bits point to the last |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, | |||
119 | const struct nft_data *data, | 119 | const struct nft_data *data, |
120 | enum nft_data_types type); | 120 | enum nft_data_types type); |
121 | 121 | ||
122 | |||
123 | /** | ||
124 | * struct nft_userdata - user defined data associated with an object | ||
125 | * | ||
126 | * @len: length of the data | ||
127 | * @data: content | ||
128 | * | ||
129 | * The presence of user data is indicated in an object specific fashion, | ||
130 | * so a length of zero can't occur and the value "len" indicates data | ||
131 | * of length len + 1. | ||
132 | */ | ||
133 | struct nft_userdata { | ||
134 | u8 len; | ||
135 | unsigned char data[0]; | ||
136 | }; | ||
137 | |||
122 | /** | 138 | /** |
123 | * struct nft_set_elem - generic representation of set elements | 139 | * struct nft_set_elem - generic representation of set elements |
124 | * | 140 | * |
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) | |||
380 | * @handle: rule handle | 396 | * @handle: rule handle |
381 | * @genmask: generation mask | 397 | * @genmask: generation mask |
382 | * @dlen: length of expression data | 398 | * @dlen: length of expression data |
383 | * @ulen: length of user data (used for comments) | 399 | * @udata: user data is appended to the rule |
384 | * @data: expression data | 400 | * @data: expression data |
385 | */ | 401 | */ |
386 | struct nft_rule { | 402 | struct nft_rule { |
@@ -388,7 +404,7 @@ struct nft_rule { | |||
388 | u64 handle:42, | 404 | u64 handle:42, |
389 | genmask:2, | 405 | genmask:2, |
390 | dlen:12, | 406 | dlen:12, |
391 | ulen:8; | 407 | udata:1; |
392 | unsigned char data[] | 408 | unsigned char data[] |
393 | __attribute__((aligned(__alignof__(struct nft_expr)))); | 409 | __attribute__((aligned(__alignof__(struct nft_expr)))); |
394 | }; | 410 | }; |
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) | |||
476 | return (struct nft_expr *)&rule->data[rule->dlen]; | 492 | return (struct nft_expr *)&rule->data[rule->dlen]; |
477 | } | 493 | } |
478 | 494 | ||
479 | static inline void *nft_userdata(const struct nft_rule *rule) | 495 | static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) |
480 | { | 496 | { |
481 | return (void *)&rule->data[rule->dlen]; | 497 | return (void *)&rule->data[rule->dlen]; |
482 | } | 498 | } |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops { | |||
689 | }; | 689 | }; |
690 | 690 | ||
691 | struct omap_dss_device { | 691 | struct omap_dss_device { |
692 | struct kobject kobj; | ||
692 | struct device *dev; | 693 | struct device *dev; |
693 | 694 | ||
694 | struct module *owner; | 695 | struct module *owner; |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
548 | 548 | ||
549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
551 | if (cp == root_cs) | ||
552 | continue; | ||
553 | |||
554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
875 | */ | 872 | */ |
876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
878 | 875 | ||
879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
1131 | */ | 1128 | */ |
1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
1134 | 1131 | ||
1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1979 | 1976 | ||
1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
1979 | cs->effective_mems = parent->mems_allowed; | ||
1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
1984 | out_unlock: | 1983 | out_unlock: |
1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..b3d634ed06c9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
59 | #include <linux/kasan.h> | ||
60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
1815 | { | 1814 | { |
1816 | vfree(module_region); | 1815 | vfree(module_region); |
1817 | kasan_module_free(module_region); | ||
1818 | } | 1816 | } |
1819 | 1817 | ||
1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1059 | 1059 | ||
1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1061 | 1061 | ||
1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1063 | static int ftrace_graph_active; | ||
1064 | #else | ||
1065 | # define ftrace_graph_active 0 | ||
1066 | #endif | ||
1067 | |||
1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1063 | 1069 | ||
1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
2043 | else | 2049 | else |
2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
2052 | * and REGS states. The _EN flags must be disabled though. | ||
2053 | */ | ||
2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
2055 | FTRACE_FL_REGS_EN); | ||
2046 | } | 2056 | } |
2047 | 2057 | ||
2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2688 | 2698 | ||
2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
2690 | { | 2700 | { |
2701 | int command; | ||
2702 | |||
2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
2692 | return; | 2704 | return; |
2693 | 2705 | ||
2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
2711 | if (ftrace_graph_active) | ||
2712 | command |= FTRACE_START_FUNC_RET; | ||
2713 | ftrace_startup_enable(command); | ||
2714 | } | ||
2699 | } | 2715 | } |
2700 | 2716 | ||
2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
2702 | { | 2718 | { |
2719 | int command; | ||
2720 | |||
2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
2704 | return; | 2722 | return; |
2705 | 2723 | ||
2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
2727 | if (ftrace_graph_active) | ||
2728 | command |= FTRACE_STOP_FUNC_RET; | ||
2729 | ftrace_run_update_code(command); | ||
2730 | } | ||
2709 | } | 2731 | } |
2710 | 2732 | ||
2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5558 | 5580 | ||
5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
5560 | 5582 | ||
5561 | ftrace_startup_sysctl(); | ||
5562 | |||
5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
5566 | 5586 | ||
5587 | ftrace_startup_sysctl(); | ||
5588 | |||
5567 | } else { | 5589 | } else { |
5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5591 | }; | 5613 | }; |
5592 | 5614 | ||
5593 | static int ftrace_graph_active; | ||
5594 | |||
5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
5596 | { | 5616 | { |
5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
2728 | } | 2728 | } |
2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
2730 | 2730 | ||
2731 | struct cwt_wait { | ||
2732 | wait_queue_t wait; | ||
2733 | struct work_struct *work; | ||
2734 | }; | ||
2735 | |||
2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
2737 | { | ||
2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
2739 | |||
2740 | if (cwait->work != key) | ||
2741 | return 0; | ||
2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
2743 | } | ||
2744 | |||
2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
2732 | { | 2746 | { |
2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
2733 | unsigned long flags; | 2748 | unsigned long flags; |
2734 | int ret; | 2749 | int ret; |
2735 | 2750 | ||
2736 | do { | 2751 | do { |
2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
2738 | /* | 2753 | /* |
2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
2756 | * because we may get scheduled between @work's completion | ||
2757 | * and the other canceling task resuming and clearing | ||
2758 | * CANCELING - flush_work() will return false immediately | ||
2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
2760 | * return -ENOENT as @work is still being canceled and the | ||
2761 | * other canceling task won't be able to clear CANCELING as | ||
2762 | * we're hogging the CPU. | ||
2763 | * | ||
2764 | * Let's wait for completion using a waitqueue. As this | ||
2765 | * may lead to the thundering herd problem, use a custom | ||
2766 | * wake function which matches @work along with exclusive | ||
2767 | * wait and wakeup. | ||
2741 | */ | 2768 | */ |
2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
2771 | |||
2772 | init_wait(&cwait.wait); | ||
2773 | cwait.wait.func = cwt_wakefn; | ||
2774 | cwait.work = work; | ||
2775 | |||
2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
2777 | TASK_UNINTERRUPTIBLE); | ||
2778 | if (work_is_canceling(work)) | ||
2779 | schedule(); | ||
2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
2781 | } | ||
2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
2745 | 2783 | ||
2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2749 | 2787 | ||
2750 | flush_work(work); | 2788 | flush_work(work); |
2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
2790 | |||
2791 | /* | ||
2792 | * Paired with prepare_to_wait() above so that either | ||
2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
2794 | * visible there. | ||
2795 | */ | ||
2796 | smp_mb(); | ||
2797 | if (waitqueue_active(&cancel_waitq)) | ||
2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
2799 | |||
2752 | return ret; | 2800 | return ret; |
2753 | } | 2801 | } |
2754 | 2802 | ||
diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -24,7 +24,7 @@ obj-y += lockref.o | |||
24 | 24 | ||
25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) | |||
751 | return npages; | 751 | return npages; |
752 | } | 752 | } |
753 | EXPORT_SYMBOL(iov_iter_npages); | 753 | EXPORT_SYMBOL(iov_iter_npages); |
754 | |||
755 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | ||
756 | { | ||
757 | *new = *old; | ||
758 | if (new->type & ITER_BVEC) | ||
759 | return new->bvec = kmemdup(new->bvec, | ||
760 | new->nr_segs * sizeof(struct bio_vec), | ||
761 | flags); | ||
762 | else | ||
763 | /* iovec and kvec have identical layout */ | ||
764 | return new->iov = kmemdup(new->iov, | ||
765 | new->nr_segs * sizeof(struct iovec), | ||
766 | flags); | ||
767 | } | ||
768 | EXPORT_SYMBOL(dup_iter); | ||
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 88c0854bd752..5c94e1012a91 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | |||
61 | 61 | ||
62 | if (s->len < s->size) { | 62 | if (s->len < s->size) { |
63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); |
64 | if (seq_buf_can_fit(s, len)) { | 64 | if (s->len + len < s->size) { |
65 | s->len += len; | 65 | s->len += len; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | |||
118 | 118 | ||
119 | if (s->len < s->size) { | 119 | if (s->len < s->size) { |
120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
121 | if (seq_buf_can_fit(s, ret)) { | 121 | if (s->len + ret < s->size) { |
122 | s->len += ret; | 122 | s->len += ret; |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ | |||
21 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 21 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
22 | compaction.o vmacache.o \ | 22 | compaction.o vmacache.o \ |
23 | interval_tree.o list_lru.o workingset.o \ | 23 | interval_tree.o list_lru.o workingset.o \ |
24 | iov_iter.o debug.o $(mmu-y) | 24 | debug.o $(mmu-y) |
25 | 25 | ||
26 | obj-y += init-mm.o | 26 | obj-y += init-mm.o |
27 | 27 | ||
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
64 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Find a PFN aligned to the specified order and return an offset represented in | ||
69 | * order_per_bits. | ||
70 | */ | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 71 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) |
68 | { | 72 | { |
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | 73 | if (align_order <= cma->order_per_bit) |
72 | return 0; | 74 | return 0; |
73 | alignment = 1UL << (align_order - cma->order_per_bit); | 75 | |
74 | return ALIGN(cma->base_pfn, alignment) - | 76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
75 | (cma->base_pfn >> cma->order_per_bit); | 77 | - cma->base_pfn) >> cma->order_per_bit; |
76 | } | 78 | } |
77 | 79 | ||
78 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 80 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..626e93db28ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1295 | * Avoid grouping on DSO/COW pages in specific and RO pages |
1296 | * in general, RO pages shouldn't hurt as much anyway since | 1296 | * in general, RO pages shouldn't hurt as much anyway since |
1297 | * they can be in shared cache state. | 1297 | * they can be in shared cache state. |
1298 | * | ||
1299 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
1300 | * "is this a read-only page", since checking "pmd_write()" | ||
1301 | * is even more broken. We haven't actually turned this into | ||
1302 | * a writable page, so pmd_write() will always be false. | ||
1298 | */ | 1303 | */ |
1299 | if (!pmd_write(pmd)) | 1304 | if (!pmd_dirty(pmd)) |
1300 | flags |= TNF_NO_GROUP; | 1305 | flags |= TNF_NO_GROUP; |
1301 | 1306 | ||
1302 | /* | 1307 | /* |
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1482 | 1487 | ||
1483 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1488 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1484 | pmd_t entry; | 1489 | pmd_t entry; |
1490 | ret = 1; | ||
1485 | 1491 | ||
1486 | /* | 1492 | /* |
1487 | * Avoid trapping faults against the zero page. The read-only | 1493 | * Avoid trapping faults against the zero page. The read-only |
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1490 | */ | 1496 | */ |
1491 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | 1497 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
1492 | spin_unlock(ptl); | 1498 | spin_unlock(ptl); |
1493 | return 0; | 1499 | return ret; |
1494 | } | 1500 | } |
1495 | 1501 | ||
1496 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1502 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1497 | ret = 1; | ||
1498 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1503 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1499 | entry = pmd_modify(entry, newprot); | 1504 | entry = pmd_modify(entry, newprot); |
1500 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
917 | __SetPageHead(page); | 917 | __SetPageHead(page); |
918 | __ClearPageReserved(page); | 918 | __ClearPageReserved(page); |
919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
920 | __SetPageTail(p); | ||
921 | /* | 920 | /* |
922 | * For gigantic hugepages allocated through bootmem at | 921 | * For gigantic hugepages allocated through bootmem at |
923 | * boot, it's safer to be consistent with the not-gigantic | 922 | * boot, it's safer to be consistent with the not-gigantic |
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
933 | __ClearPageReserved(p); | 932 | __ClearPageReserved(p); |
934 | set_page_count(p, 0); | 933 | set_page_count(p, 0); |
935 | p->first_page = page; | 934 | p->first_page = page; |
935 | /* Make sure p->first_page is always valid for PageTail() */ | ||
936 | smp_wmb(); | ||
937 | __SetPageTail(p); | ||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/stacktrace.h> | 29 | #include <linux/stacktrace.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/kasan.h> | 33 | #include <linux/kasan.h> |
33 | 34 | ||
34 | #include "kasan.h" | 35 | #include "kasan.h" |
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) | |||
414 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 415 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
415 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 416 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
416 | __builtin_return_address(0)); | 417 | __builtin_return_address(0)); |
417 | return ret ? 0 : -ENOMEM; | 418 | |
419 | if (ret) { | ||
420 | find_vm_area(addr)->flags |= VM_KASAN; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return -ENOMEM; | ||
418 | } | 425 | } |
419 | 426 | ||
420 | void kasan_module_free(void *addr) | 427 | void kasan_free_shadow(const struct vm_struct *vm) |
421 | { | 428 | { |
422 | vfree(kasan_mem_to_shadow(addr)); | 429 | if (vm->flags & VM_KASAN) |
430 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
423 | } | 431 | } |
424 | 432 | ||
425 | static void register_global(struct kasan_global *global) | 433 | static void register_global(struct kasan_global *global) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) | |||
5232 | * on for the root memcg is enough. | 5232 | * on for the root memcg is enough. |
5233 | */ | 5233 | */ |
5234 | if (cgroup_on_dfl(root_css->cgroup)) | 5234 | if (cgroup_on_dfl(root_css->cgroup)) |
5235 | mem_cgroup_from_css(root_css)->use_hierarchy = true; | 5235 | root_mem_cgroup->use_hierarchy = true; |
5236 | else | ||
5237 | root_mem_cgroup->use_hierarchy = false; | ||
5236 | } | 5238 | } |
5237 | 5239 | ||
5238 | static u64 memory_current_read(struct cgroup_subsys_state *css, | 5240 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..411144f977b1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3072 | * Avoid grouping on DSO/COW pages in specific and RO pages |
3073 | * in general, RO pages shouldn't hurt as much anyway since | 3073 | * in general, RO pages shouldn't hurt as much anyway since |
3074 | * they can be in shared cache state. | 3074 | * they can be in shared cache state. |
3075 | * | ||
3076 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
3077 | * "is this a read-only page", since checking "pmd_write()" | ||
3078 | * is even more broken. We haven't actually turned this into | ||
3079 | * a writable page, so pmd_write() will always be false. | ||
3075 | */ | 3080 | */ |
3076 | if (!pte_write(pte)) | 3081 | if (!pte_dirty(pte)) |
3077 | flags |= TNF_NO_GROUP; | 3082 | flags |= TNF_NO_GROUP; |
3078 | 3083 | ||
3079 | /* | 3084 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | int can_do_mlock(void) | 27 | int can_do_mlock(void) |
28 | { | 28 | { |
29 | if (capable(CAP_IPC_LOCK)) | ||
30 | return 1; | ||
31 | if (rlimit(RLIMIT_MEMLOCK) != 0) | 29 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
32 | return 1; | 30 | return 1; |
31 | if (capable(CAP_IPC_LOCK)) | ||
32 | return 1; | ||
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(can_do_mlock); | 35 | EXPORT_SYMBOL(can_do_mlock); |
diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -62,6 +62,7 @@ void *high_memory; | |||
62 | EXPORT_SYMBOL(high_memory); | 62 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 63 | struct page *mem_map; |
64 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
65 | EXPORT_SYMBOL(max_mapnr); | ||
65 | unsigned long highest_memmap_pfn; | 66 | unsigned long highest_memmap_pfn; |
66 | struct percpu_counter vm_committed_as; | 67 | struct percpu_counter vm_committed_as; |
67 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 68 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2373 | goto out; | 2373 | goto out; |
2374 | } | 2374 | } |
2375 | /* Exhausted what can be done so it's blamo time */ | 2375 | /* Exhausted what can be done so it's blamo time */ |
2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) | 2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) |
2377 | || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) | ||
2377 | *did_some_progress = 1; | 2378 | *did_some_progress = 1; |
2378 | out: | 2379 | out: |
2379 | oom_zonelist_unlock(ac->zonelist, gfp_mask); | 2380 | oom_zonelist_unlock(ac->zonelist, gfp_mask); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1418 | spin_unlock(&vmap_area_lock); | 1418 | spin_unlock(&vmap_area_lock); |
1419 | 1419 | ||
1420 | vmap_debug_free_range(va->va_start, va->va_end); | 1420 | vmap_debug_free_range(va->va_start, va->va_end); |
1421 | kasan_free_shadow(vm); | ||
1421 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
1422 | vm->size -= PAGE_SIZE; | 1423 | vm->size -= PAGE_SIZE; |
1423 | 1424 | ||
diff --git a/net/can/af_can.c b/net/can/af_can.c index 66e08040ced7..32d710eaf1fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop) | |||
259 | goto inval_skb; | 259 | goto inval_skb; |
260 | } | 260 | } |
261 | 261 | ||
262 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
263 | |||
264 | skb_reset_mac_header(skb); | ||
262 | skb_reset_network_header(skb); | 265 | skb_reset_network_header(skb); |
263 | skb_reset_transport_header(skb); | 266 | skb_reset_transport_header(skb); |
264 | 267 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2c8d98e728c0..145a50c4d566 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag); | |||
659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) | 659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
660 | { | 660 | { |
661 | struct iphdr iph; | 661 | struct iphdr iph; |
662 | int netoff; | ||
662 | u32 len; | 663 | u32 len; |
663 | 664 | ||
664 | if (skb->protocol != htons(ETH_P_IP)) | 665 | if (skb->protocol != htons(ETH_P_IP)) |
665 | return skb; | 666 | return skb; |
666 | 667 | ||
667 | if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) | 668 | netoff = skb_network_offset(skb); |
669 | |||
670 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | ||
668 | return skb; | 671 | return skb; |
669 | 672 | ||
670 | if (iph.ihl < 5 || iph.version != 4) | 673 | if (iph.ihl < 5 || iph.version != 4) |
671 | return skb; | 674 | return skb; |
672 | 675 | ||
673 | len = ntohs(iph.tot_len); | 676 | len = ntohs(iph.tot_len); |
674 | if (skb->len < len || len < (iph.ihl * 4)) | 677 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
675 | return skb; | 678 | return skb; |
676 | 679 | ||
677 | if (ip_is_fragment(&iph)) { | 680 | if (ip_is_fragment(&iph)) { |
678 | skb = skb_share_check(skb, GFP_ATOMIC); | 681 | skb = skb_share_check(skb, GFP_ATOMIC); |
679 | if (skb) { | 682 | if (skb) { |
680 | if (!pskb_may_pull(skb, iph.ihl*4)) | 683 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
681 | return skb; | 684 | return skb; |
682 | if (pskb_trim_rcsum(skb, len)) | 685 | if (pskb_trim_rcsum(skb, netoff + len)) |
683 | return skb; | 686 | return skb; |
684 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 687 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
685 | if (ip_defrag(skb, user)) | 688 | if (ip_defrag(skb, user)) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 31d8c71986b4..5cd99271d3a6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
432 | kfree_skb(skb); | 432 | kfree_skb(skb); |
433 | } | 433 | } |
434 | 434 | ||
435 | static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, | 435 | /* IPv4 supports cmsg on all imcp errors and some timestamps |
436 | const struct sk_buff *skb, | 436 | * |
437 | int ee_origin) | 437 | * Timestamp code paths do not initialize the fields expected by cmsg: |
438 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
439 | */ | ||
440 | static bool ipv4_datagram_support_cmsg(const struct sock *sk, | ||
441 | struct sk_buff *skb, | ||
442 | int ee_origin) | ||
438 | { | 443 | { |
439 | struct in_pktinfo *info = PKTINFO_SKB_CB(skb); | 444 | struct in_pktinfo *info; |
445 | |||
446 | if (ee_origin == SO_EE_ORIGIN_ICMP) | ||
447 | return true; | ||
440 | 448 | ||
441 | if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || | 449 | if (ee_origin == SO_EE_ORIGIN_LOCAL) |
442 | (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | 450 | return false; |
451 | |||
452 | /* Support IP_PKTINFO on tstamp packets if requested, to correlate | ||
453 | * timestamp with egress dev. Not possible for packets without dev | ||
454 | * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). | ||
455 | */ | ||
456 | if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | ||
443 | (!skb->dev)) | 457 | (!skb->dev)) |
444 | return false; | 458 | return false; |
445 | 459 | ||
460 | info = PKTINFO_SKB_CB(skb); | ||
446 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; | 461 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; |
447 | info->ipi_ifindex = skb->dev->ifindex; | 462 | info->ipi_ifindex = skb->dev->ifindex; |
448 | return true; | 463 | return true; |
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
483 | 498 | ||
484 | serr = SKB_EXT_ERR(skb); | 499 | serr = SKB_EXT_ERR(skb); |
485 | 500 | ||
486 | if (sin && skb->len) { | 501 | if (sin && serr->port) { |
487 | sin->sin_family = AF_INET; | 502 | sin->sin_family = AF_INET; |
488 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + | 503 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + |
489 | serr->addr_offset); | 504 | serr->addr_offset); |
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
496 | sin = &errhdr.offender; | 511 | sin = &errhdr.offender; |
497 | memset(sin, 0, sizeof(*sin)); | 512 | memset(sin, 0, sizeof(*sin)); |
498 | 513 | ||
499 | if (skb->len && | 514 | if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { |
500 | (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || | ||
501 | ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) { | ||
502 | sin->sin_family = AF_INET; | 515 | sin->sin_family = AF_INET; |
503 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 516 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
504 | if (inet_sk(sk)->cmsg_flags) | 517 | if (inet_sk(sk)->cmsg_flags) |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e9f66e1cda50..208d5439e59b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk) | |||
259 | kgid_t low, high; | 259 | kgid_t low, high; |
260 | int ret = 0; | 260 | int ret = 0; |
261 | 261 | ||
262 | if (sk->sk_family == AF_INET6) | ||
263 | sk->sk_ipv6only = 1; | ||
264 | |||
262 | inet_get_ping_group_range_net(net, &low, &high); | 265 | inet_get_ping_group_range_net(net, &low, &high); |
263 | if (gid_lte(low, group) && gid_lte(group, high)) | 266 | if (gid_lte(low, group) && gid_lte(group, high)) |
264 | return 0; | 267 | return 0; |
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
305 | if (addr_len < sizeof(*addr)) | 308 | if (addr_len < sizeof(*addr)) |
306 | return -EINVAL; | 309 | return -EINVAL; |
307 | 310 | ||
311 | if (addr->sin_family != AF_INET && | ||
312 | !(addr->sin_family == AF_UNSPEC && | ||
313 | addr->sin_addr.s_addr == htonl(INADDR_ANY))) | ||
314 | return -EAFNOSUPPORT; | ||
315 | |||
308 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", | 316 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", |
309 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); | 317 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); |
310 | 318 | ||
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
330 | return -EINVAL; | 338 | return -EINVAL; |
331 | 339 | ||
332 | if (addr->sin6_family != AF_INET6) | 340 | if (addr->sin6_family != AF_INET6) |
333 | return -EINVAL; | 341 | return -EAFNOSUPPORT; |
334 | 342 | ||
335 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", | 343 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", |
336 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); | 344 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); |
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
716 | if (msg->msg_namelen < sizeof(*usin)) | 724 | if (msg->msg_namelen < sizeof(*usin)) |
717 | return -EINVAL; | 725 | return -EINVAL; |
718 | if (usin->sin_family != AF_INET) | 726 | if (usin->sin_family != AF_INET) |
719 | return -EINVAL; | 727 | return -EAFNOSUPPORT; |
720 | daddr = usin->sin_addr.s_addr; | 728 | daddr = usin->sin_addr.s_addr; |
721 | /* no remote port */ | 729 | /* no remote port */ |
722 | } else { | 730 | } else { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9d72a0fcd928..995a2259bcfc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
835 | int large_allowed) | 835 | int large_allowed) |
836 | { | 836 | { |
837 | struct tcp_sock *tp = tcp_sk(sk); | 837 | struct tcp_sock *tp = tcp_sk(sk); |
838 | u32 new_size_goal, size_goal, hlen; | 838 | u32 new_size_goal, size_goal; |
839 | 839 | ||
840 | if (!large_allowed || !sk_can_gso(sk)) | 840 | if (!large_allowed || !sk_can_gso(sk)) |
841 | return mss_now; | 841 | return mss_now; |
842 | 842 | ||
843 | /* Maybe we should/could use sk->sk_prot->max_header here ? */ | 843 | /* Note : tcp_tso_autosize() will eventually split this later */ |
844 | hlen = inet_csk(sk)->icsk_af_ops->net_header_len + | 844 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; |
845 | inet_csk(sk)->icsk_ext_hdr_len + | ||
846 | tp->tcp_header_len; | ||
847 | |||
848 | new_size_goal = sk->sk_gso_max_size - 1 - hlen; | ||
849 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); | 845 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); |
850 | 846 | ||
851 | /* We try hard to avoid divides here */ | 847 | /* We try hard to avoid divides here */ |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index c215be70cac0..ace8daca5c83 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
325 | kfree_skb(skb); | 325 | kfree_skb(skb); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) | 328 | /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL. |
329 | * | ||
330 | * At one point, excluding local errors was a quick test to identify icmp/icmp6 | ||
331 | * errors. This is no longer true, but the test remained, so the v6 stack, | ||
332 | * unlike v4, also honors cmsg requests on all wifi and timestamp errors. | ||
333 | * | ||
334 | * Timestamp code paths do not initialize the fields expected by cmsg: | ||
335 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
336 | */ | ||
337 | static bool ip6_datagram_support_cmsg(struct sk_buff *skb, | ||
338 | struct sock_exterr_skb *serr) | ||
329 | { | 339 | { |
330 | int ifindex = skb->dev ? skb->dev->ifindex : -1; | 340 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
341 | serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) | ||
342 | return true; | ||
343 | |||
344 | if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) | ||
345 | return false; | ||
346 | |||
347 | if (!skb->dev) | ||
348 | return false; | ||
331 | 349 | ||
332 | if (skb->protocol == htons(ETH_P_IPV6)) | 350 | if (skb->protocol == htons(ETH_P_IPV6)) |
333 | IP6CB(skb)->iif = ifindex; | 351 | IP6CB(skb)->iif = skb->dev->ifindex; |
334 | else | 352 | else |
335 | PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; | 353 | PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; |
354 | |||
355 | return true; | ||
336 | } | 356 | } |
337 | 357 | ||
338 | /* | 358 | /* |
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
369 | 389 | ||
370 | serr = SKB_EXT_ERR(skb); | 390 | serr = SKB_EXT_ERR(skb); |
371 | 391 | ||
372 | if (sin && skb->len) { | 392 | if (sin && serr->port) { |
373 | const unsigned char *nh = skb_network_header(skb); | 393 | const unsigned char *nh = skb_network_header(skb); |
374 | sin->sin6_family = AF_INET6; | 394 | sin->sin6_family = AF_INET6; |
375 | sin->sin6_flowinfo = 0; | 395 | sin->sin6_flowinfo = 0; |
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
394 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 414 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
395 | sin = &errhdr.offender; | 415 | sin = &errhdr.offender; |
396 | memset(sin, 0, sizeof(*sin)); | 416 | memset(sin, 0, sizeof(*sin)); |
397 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { | 417 | |
418 | if (ip6_datagram_support_cmsg(skb, serr)) { | ||
398 | sin->sin6_family = AF_INET6; | 419 | sin->sin6_family = AF_INET6; |
399 | if (np->rxopt.all) { | 420 | if (np->rxopt.all) |
400 | if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && | ||
401 | serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) | ||
402 | ip6_datagram_prepare_pktinfo_errqueue(skb); | ||
403 | ip6_datagram_recv_common_ctl(sk, msg, skb); | 421 | ip6_datagram_recv_common_ctl(sk, msg, skb); |
404 | } | ||
405 | if (skb->protocol == htons(ETH_P_IPV6)) { | 422 | if (skb->protocol == htons(ETH_P_IPV6)) { |
406 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 423 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
407 | if (np->rxopt.all) | 424 | if (np->rxopt.all) |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index bd46f736f61d..a2dfff6ff227 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
102 | 102 | ||
103 | if (msg->msg_name) { | 103 | if (msg->msg_name) { |
104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); | 104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); |
105 | if (msg->msg_namelen < sizeof(struct sockaddr_in6) || | 105 | if (msg->msg_namelen < sizeof(*u)) |
106 | u->sin6_family != AF_INET6) { | ||
107 | return -EINVAL; | 106 | return -EINVAL; |
107 | if (u->sin6_family != AF_INET6) { | ||
108 | return -EAFNOSUPPORT; | ||
108 | } | 109 | } |
109 | if (sk->sk_bound_dev_if && | 110 | if (sk->sk_bound_dev_if && |
110 | sk->sk_bound_dev_if != u->sin6_scope_id) { | 111 | sk->sk_bound_dev_if != u->sin6_scope_id) { |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index c47ffd7a0a70..d93ceeb3ef04 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, | |||
896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); | 896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); |
897 | return; | 897 | return; |
898 | } | 898 | } |
899 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | ||
900 | kfree(param->pe_data); | ||
899 | } | 901 | } |
900 | 902 | ||
901 | if (opt) | 903 | if (opt) |
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) | |||
1169 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1171 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1170 | ); | 1172 | ); |
1171 | #endif | 1173 | #endif |
1174 | ip_vs_pe_put(param.pe); | ||
1172 | return 0; | 1175 | return 0; |
1173 | /* Error exit */ | 1176 | /* Error exit */ |
1174 | out: | 1177 | out: |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 199fd0f27b0e..6ab777912237 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) | |||
227 | 227 | ||
228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) | 228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) |
229 | { | 229 | { |
230 | rule->genmask = 0; | 230 | rule->genmask &= ~(1 << gencursor_next(net)); |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | static int |
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, | |||
1711 | } | 1711 | } |
1712 | nla_nest_end(skb, list); | 1712 | nla_nest_end(skb, list); |
1713 | 1713 | ||
1714 | if (rule->ulen && | 1714 | if (rule->udata) { |
1715 | nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) | 1715 | struct nft_userdata *udata = nft_userdata(rule); |
1716 | goto nla_put_failure; | 1716 | if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1, |
1717 | udata->data) < 0) | ||
1718 | goto nla_put_failure; | ||
1719 | } | ||
1717 | 1720 | ||
1718 | nlmsg_end(skb, nlh); | 1721 | nlmsg_end(skb, nlh); |
1719 | return 0; | 1722 | return 0; |
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1896 | struct nft_table *table; | 1899 | struct nft_table *table; |
1897 | struct nft_chain *chain; | 1900 | struct nft_chain *chain; |
1898 | struct nft_rule *rule, *old_rule = NULL; | 1901 | struct nft_rule *rule, *old_rule = NULL; |
1902 | struct nft_userdata *udata; | ||
1899 | struct nft_trans *trans = NULL; | 1903 | struct nft_trans *trans = NULL; |
1900 | struct nft_expr *expr; | 1904 | struct nft_expr *expr; |
1901 | struct nft_ctx ctx; | 1905 | struct nft_ctx ctx; |
1902 | struct nlattr *tmp; | 1906 | struct nlattr *tmp; |
1903 | unsigned int size, i, n, ulen = 0; | 1907 | unsigned int size, i, n, ulen = 0, usize = 0; |
1904 | int err, rem; | 1908 | int err, rem; |
1905 | bool create; | 1909 | bool create; |
1906 | u64 handle, pos_handle; | 1910 | u64 handle, pos_handle; |
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1968 | n++; | 1972 | n++; |
1969 | } | 1973 | } |
1970 | } | 1974 | } |
1975 | /* Check for overflow of dlen field */ | ||
1976 | err = -EFBIG; | ||
1977 | if (size >= 1 << 12) | ||
1978 | goto err1; | ||
1971 | 1979 | ||
1972 | if (nla[NFTA_RULE_USERDATA]) | 1980 | if (nla[NFTA_RULE_USERDATA]) { |
1973 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); | 1981 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); |
1982 | if (ulen > 0) | ||
1983 | usize = sizeof(struct nft_userdata) + ulen; | ||
1984 | } | ||
1974 | 1985 | ||
1975 | err = -ENOMEM; | 1986 | err = -ENOMEM; |
1976 | rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); | 1987 | rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); |
1977 | if (rule == NULL) | 1988 | if (rule == NULL) |
1978 | goto err1; | 1989 | goto err1; |
1979 | 1990 | ||
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1981 | 1992 | ||
1982 | rule->handle = handle; | 1993 | rule->handle = handle; |
1983 | rule->dlen = size; | 1994 | rule->dlen = size; |
1984 | rule->ulen = ulen; | 1995 | rule->udata = ulen ? 1 : 0; |
1985 | 1996 | ||
1986 | if (ulen) | 1997 | if (ulen) { |
1987 | nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); | 1998 | udata = nft_userdata(rule); |
1999 | udata->len = ulen - 1; | ||
2000 | nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen); | ||
2001 | } | ||
1988 | 2002 | ||
1989 | expr = nft_expr_first(rule); | 2003 | expr = nft_expr_first(rule); |
1990 | for (i = 0; i < n; i++) { | 2004 | for (i = 0; i < n; i++) { |
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
2031 | 2045 | ||
2032 | err3: | 2046 | err3: |
2033 | list_del_rcu(&rule->list); | 2047 | list_del_rcu(&rule->list); |
2034 | if (trans) { | ||
2035 | list_del_rcu(&nft_trans_rule(trans)->list); | ||
2036 | nft_rule_clear(net, nft_trans_rule(trans)); | ||
2037 | nft_trans_destroy(trans); | ||
2038 | chain->use++; | ||
2039 | } | ||
2040 | err2: | 2048 | err2: |
2041 | nf_tables_rule_destroy(&ctx, rule); | 2049 | nf_tables_rule_destroy(&ctx, rule); |
2042 | err1: | 2050 | err1: |
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3612 | &te->elem, | 3620 | &te->elem, |
3613 | NFT_MSG_DELSETELEM, 0); | 3621 | NFT_MSG_DELSETELEM, 0); |
3614 | te->set->ops->get(te->set, &te->elem); | 3622 | te->set->ops->get(te->set, &te->elem); |
3615 | te->set->ops->remove(te->set, &te->elem); | ||
3616 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); | 3623 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3617 | if (te->elem.flags & NFT_SET_MAP) { | 3624 | if (te->set->flags & NFT_SET_MAP && |
3618 | nft_data_uninit(&te->elem.data, | 3625 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) |
3619 | te->set->dtype); | 3626 | nft_data_uninit(&te->elem.data, te->set->dtype); |
3620 | } | 3627 | te->set->ops->remove(te->set, &te->elem); |
3621 | nft_trans_destroy(trans); | 3628 | nft_trans_destroy(trans); |
3622 | break; | 3629 | break; |
3623 | } | 3630 | } |
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3658 | { | 3665 | { |
3659 | struct net *net = sock_net(skb->sk); | 3666 | struct net *net = sock_net(skb->sk); |
3660 | struct nft_trans *trans, *next; | 3667 | struct nft_trans *trans, *next; |
3661 | struct nft_set *set; | 3668 | struct nft_trans_elem *te; |
3662 | 3669 | ||
3663 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3670 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
3664 | switch (trans->msg_type) { | 3671 | switch (trans->msg_type) { |
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3719 | break; | 3726 | break; |
3720 | case NFT_MSG_NEWSETELEM: | 3727 | case NFT_MSG_NEWSETELEM: |
3721 | nft_trans_elem_set(trans)->nelems--; | 3728 | nft_trans_elem_set(trans)->nelems--; |
3722 | set = nft_trans_elem_set(trans); | 3729 | te = (struct nft_trans_elem *)trans->data; |
3723 | set->ops->get(set, &nft_trans_elem(trans)); | 3730 | te->set->ops->get(te->set, &te->elem); |
3724 | set->ops->remove(set, &nft_trans_elem(trans)); | 3731 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3732 | if (te->set->flags & NFT_SET_MAP && | ||
3733 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) | ||
3734 | nft_data_uninit(&te->elem.data, te->set->dtype); | ||
3735 | te->set->ops->remove(te->set, &te->elem); | ||
3725 | nft_trans_destroy(trans); | 3736 | nft_trans_destroy(trans); |
3726 | break; | 3737 | break; |
3727 | case NFT_MSG_DELSETELEM: | 3738 | case NFT_MSG_DELSETELEM: |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 1279cd85663e..213584cf04b3 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -123,7 +123,7 @@ static void | |||
123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, | 123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, |
124 | const struct nft_ctx *ctx, | 124 | const struct nft_ctx *ctx, |
125 | struct xt_target *target, void *info, | 125 | struct xt_target *target, void *info, |
126 | union nft_entry *entry, u8 proto, bool inv) | 126 | union nft_entry *entry, u16 proto, bool inv) |
127 | { | 127 | { |
128 | par->net = ctx->net; | 128 | par->net = ctx->net; |
129 | par->table = ctx->table->name; | 129 | par->table = ctx->table->name; |
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
138 | break; | 138 | break; |
139 | case NFPROTO_BRIDGE: | 139 | case NFPROTO_BRIDGE: |
140 | entry->ebt.ethproto = proto; | 140 | entry->ebt.ethproto = (__force __be16)proto; |
141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
142 | break; | 142 | break; |
143 | } | 143 | } |
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] | |||
171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, | 171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, |
172 | }; | 172 | }; |
173 | 173 | ||
174 | static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) | 174 | static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) |
175 | { | 175 | { |
176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; | 176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; |
177 | u32 flags; | 177 | u32 flags; |
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
203 | struct xt_target *target = expr->ops->data; | 203 | struct xt_target *target = expr->ops->data; |
204 | struct xt_tgchk_param par; | 204 | struct xt_tgchk_param par; |
205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | 205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); |
206 | u8 proto = 0; | 206 | u16 proto = 0; |
207 | bool inv = false; | 207 | bool inv = false; |
208 | union nft_entry e = {}; | 208 | union nft_entry e = {}; |
209 | int ret; | 209 | int ret; |
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { | |||
334 | static void | 334 | static void |
335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | 335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, |
336 | struct xt_match *match, void *info, | 336 | struct xt_match *match, void *info, |
337 | union nft_entry *entry, u8 proto, bool inv) | 337 | union nft_entry *entry, u16 proto, bool inv) |
338 | { | 338 | { |
339 | par->net = ctx->net; | 339 | par->net = ctx->net; |
340 | par->table = ctx->table->name; | 340 | par->table = ctx->table->name; |
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
349 | break; | 349 | break; |
350 | case NFPROTO_BRIDGE: | 350 | case NFPROTO_BRIDGE: |
351 | entry->ebt.ethproto = proto; | 351 | entry->ebt.ethproto = (__force __be16)proto; |
352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
353 | break; | 353 | break; |
354 | } | 354 | } |
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
385 | struct xt_match *match = expr->ops->data; | 385 | struct xt_match *match = expr->ops->data; |
386 | struct xt_mtchk_param par; | 386 | struct xt_mtchk_param par; |
387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | 387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); |
388 | u8 proto = 0; | 388 | u16 proto = 0; |
389 | bool inv = false; | 389 | bool inv = false; |
390 | union nft_entry e = {}; | 390 | union nft_entry e = {}; |
391 | int ret; | 391 | int ret; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5bf1e968a728..f8db7064d81c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
3123 | return 0; | 3123 | return 0; |
3124 | } | 3124 | } |
3125 | 3125 | ||
3126 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 3126 | static void packet_dev_mclist_delete(struct net_device *dev, |
3127 | struct packet_mclist **mlp) | ||
3127 | { | 3128 | { |
3128 | for ( ; i; i = i->next) { | 3129 | struct packet_mclist *ml; |
3129 | if (i->ifindex == dev->ifindex) | 3130 | |
3130 | packet_dev_mc(dev, i, what); | 3131 | while ((ml = *mlp) != NULL) { |
3132 | if (ml->ifindex == dev->ifindex) { | ||
3133 | packet_dev_mc(dev, ml, -1); | ||
3134 | *mlp = ml->next; | ||
3135 | kfree(ml); | ||
3136 | } else | ||
3137 | mlp = &ml->next; | ||
3131 | } | 3138 | } |
3132 | } | 3139 | } |
3133 | 3140 | ||
@@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
3204 | packet_dev_mc(dev, ml, -1); | 3211 | packet_dev_mc(dev, ml, -1); |
3205 | kfree(ml); | 3212 | kfree(ml); |
3206 | } | 3213 | } |
3207 | rtnl_unlock(); | 3214 | break; |
3208 | return 0; | ||
3209 | } | 3215 | } |
3210 | } | 3216 | } |
3211 | rtnl_unlock(); | 3217 | rtnl_unlock(); |
3212 | return -EADDRNOTAVAIL; | 3218 | return 0; |
3213 | } | 3219 | } |
3214 | 3220 | ||
3215 | static void packet_flush_mclist(struct sock *sk) | 3221 | static void packet_flush_mclist(struct sock *sk) |
@@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this, | |||
3559 | switch (msg) { | 3565 | switch (msg) { |
3560 | case NETDEV_UNREGISTER: | 3566 | case NETDEV_UNREGISTER: |
3561 | if (po->mclist) | 3567 | if (po->mclist) |
3562 | packet_dev_mclist(dev, po->mclist, -1); | 3568 | packet_dev_mclist_delete(dev, &po->mclist); |
3563 | /* fallthrough */ | 3569 | /* fallthrough */ |
3564 | 3570 | ||
3565 | case NETDEV_DOWN: | 3571 | case NETDEV_DOWN: |
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index 5394b6be46ec..0610efa83d72 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c | |||
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
42 | _leave("UDP socket errqueue empty"); | 42 | _leave("UDP socket errqueue empty"); |
43 | return; | 43 | return; |
44 | } | 44 | } |
45 | if (!skb->len) { | 45 | serr = SKB_EXT_ERR(skb); |
46 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | ||
46 | _leave("UDP empty message"); | 47 | _leave("UDP empty message"); |
47 | kfree_skb(skb); | 48 | kfree_skb(skb); |
48 | return; | 49 | return; |
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
50 | 51 | ||
51 | rxrpc_new_skb(skb); | 52 | rxrpc_new_skb(skb); |
52 | 53 | ||
53 | serr = SKB_EXT_ERR(skb); | ||
54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); | 54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); |
55 | port = serr->port; | 55 | port = serr->port; |
56 | 56 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..14f09b3cb87c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
464 | /* Clean up all queues, except inputq: */ | 464 | /* Clean up all queues, except inputq: */ |
465 | __skb_queue_purge(&l_ptr->outqueue); | 465 | __skb_queue_purge(&l_ptr->outqueue); |
466 | __skb_queue_purge(&l_ptr->deferred_queue); | 466 | __skb_queue_purge(&l_ptr->deferred_queue); |
467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 467 | if (!owner->inputq) |
468 | if (!skb_queue_empty(&l_ptr->inputq)) | 468 | owner->inputq = &l_ptr->inputq; |
469 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
470 | if (!skb_queue_empty(owner->inputq)) | ||
469 | owner->action_flags |= TIPC_MSG_EVT; | 471 | owner->action_flags |= TIPC_MSG_EVT; |
470 | owner->inputq = &l_ptr->inputq; | ||
471 | l_ptr->next_out = NULL; | 472 | l_ptr->next_out = NULL; |
472 | l_ptr->unacked_window = 0; | 473 | l_ptr->unacked_window = 0; |
473 | l_ptr->checkpoint = 1; | 474 | l_ptr->checkpoint = 1; |