diff options
289 files changed, 2532 insertions, 1624 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes | |||
@@ -43,7 +43,7 @@ o udev 081 # udevd --version | |||
43 | o grub 0.93 # grub --version || grub-install --version | 43 | o grub 0.93 # grub --version || grub-install --version |
44 | o mcelog 0.6 # mcelog --version | 44 | o mcelog 0.6 # mcelog --version |
45 | o iptables 1.4.2 # iptables -V | 45 | o iptables 1.4.2 # iptables -V |
46 | o openssl & libcrypto 1.0.1k # openssl version | 46 | o openssl & libcrypto 1.0.0 # openssl version |
47 | 47 | ||
48 | 48 | ||
49 | Kernel compilation | 49 | Kernel compilation |
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt | |||
@@ -25,7 +25,7 @@ Example: | |||
25 | /* Cypress Gen3 touchpad */ | 25 | /* Cypress Gen3 touchpad */ |
26 | touchpad@67 { | 26 | touchpad@67 { |
27 | compatible = "cypress,cyapa"; | 27 | compatible = "cypress,cyapa"; |
28 | reg = <0x24>; | 28 | reg = <0x67>; |
29 | interrupt-parent = <&gpio>; | 29 | interrupt-parent = <&gpio>; |
30 | interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ | 30 | interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ |
31 | wakeup-source; | 31 | wakeup-source; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt | |||
@@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority | |||
4 | interrupt. | 4 | interrupt. |
5 | 5 | ||
6 | Required Properties: | 6 | Required Properties: |
7 | - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" | 7 | - compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or |
8 | as fallback | 8 | "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc" |
9 | - reg: Base address and size of the controllers memory area | 9 | - reg: Base address and size of the controllers memory area |
10 | - interrupt-parent: phandle of the parent interrupt controller. | 10 | - interrupt-parent: phandle of the parent interrupt controller. |
11 | - interrupts: Interrupt specifier for the controllers interrupt. | 11 | - interrupts: Interrupt specifier for the controllers interrupt. |
@@ -13,6 +13,9 @@ Required Properties: | |||
13 | - #interrupt-cells : Specifies the number of cells needed to encode interrupt | 13 | - #interrupt-cells : Specifies the number of cells needed to encode interrupt |
14 | source, should be 1 | 14 | source, should be 1 |
15 | 15 | ||
16 | Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, | ||
17 | use ar7240 for all other SoCs. | ||
18 | |||
16 | Please refer to interrupts.txt in this directory for details of the common | 19 | Please refer to interrupts.txt in this directory for details of the common |
17 | Interrupt Controllers bindings used by client devices. | 20 | Interrupt Controllers bindings used by client devices. |
18 | 21 | ||
@@ -28,3 +31,16 @@ Example: | |||
28 | interrupt-controller; | 31 | interrupt-controller; |
29 | #interrupt-cells = <1>; | 32 | #interrupt-cells = <1>; |
30 | }; | 33 | }; |
34 | |||
35 | Another example: | ||
36 | |||
37 | interrupt-controller@18060010 { | ||
38 | compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; | ||
39 | reg = <0x18060010 0x4>; | ||
40 | |||
41 | interrupt-parent = <&cpuintc>; | ||
42 | interrupts = <6>; | ||
43 | |||
44 | interrupt-controller; | ||
45 | #interrupt-cells = <1>; | ||
46 | }; | ||
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt | |||
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is | |||
361 | ABS_MT_POSITION_X := T_X | 361 | ABS_MT_POSITION_X := T_X |
362 | ABS_MT_POSITION_Y := T_Y | 362 | ABS_MT_POSITION_Y := T_Y |
363 | ABS_MT_TOOL_X := C_X | 363 | ABS_MT_TOOL_X := C_X |
364 | ABS_MT_TOOL_X := C_Y | 364 | ABS_MT_TOOL_Y := C_Y |
365 | 365 | ||
366 | Unfortunately, there is not enough information to specify both the touching | 366 | Unfortunately, there is not enough information to specify both the touching |
367 | ellipse and the tool ellipse, so one has to resort to approximations. One | 367 | ellipse and the tool ellipse, so one has to resort to approximations. One |
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt | |||
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned | |||
979 | (alternatively, the runtime_suspend() callback will have to check if the | 979 | (alternatively, the runtime_suspend() callback will have to check if the |
980 | device should really be suspended and return -EAGAIN if that is not the case). | 980 | device should really be suspended and return -EAGAIN if that is not the case). |
981 | 981 | ||
982 | The runtime PM of PCI devices is disabled by default. It is also blocked by | 982 | The runtime PM of PCI devices is enabled by default by the PCI core. PCI |
983 | pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI | 983 | device drivers do not need to enable it and should not attempt to do so. |
984 | driver implements the runtime PM callbacks and intends to use the runtime PM | 984 | However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() |
985 | framework provided by the PM core and the PCI subsystem, it should enable this | 985 | helper function. In addition to that, the runtime PM usage counter of |
986 | feature by executing the pm_runtime_enable() helper function. However, the | 986 | each PCI device is incremented by local_pci_probe() before executing the |
987 | driver should not call the pm_runtime_allow() helper function unblocking | 987 | probe callback provided by the device's driver. |
988 | the runtime PM of the device. Instead, it should allow user space or some | 988 | |
989 | platform-specific code to do that (user space can do it via sysfs), although | 989 | If a PCI driver implements the runtime PM callbacks and intends to use the |
990 | once it has called pm_runtime_enable(), it must be prepared to handle the | 990 | runtime PM framework provided by the PM core and the PCI subsystem, it needs |
991 | to decrement the device's runtime PM usage counter in its probe callback | ||
992 | function. If it doesn't do that, the counter will always be different from | ||
993 | zero for the device and it will never be runtime-suspended. The simplest | ||
994 | way to do that is by calling pm_runtime_put_noidle(), but if the driver | ||
995 | wants to schedule an autosuspend right away, for example, it may call | ||
996 | pm_runtime_put_autosuspend() instead for this purpose. Generally, it | ||
997 | just needs to call a function that decrements the devices usage counter | ||
998 | from its probe routine to make runtime PM work for the device. | ||
999 | |||
1000 | It is important to remember that the driver's runtime_suspend() callback | ||
1001 | may be executed right after the usage counter has been decremented, because | ||
1002 | user space may already have cuased the pm_runtime_allow() helper function | ||
1003 | unblocking the runtime PM of the device to run via sysfs, so the driver must | ||
1004 | be prepared to cope with that. | ||
1005 | |||
1006 | The driver itself should not call pm_runtime_allow(), though. Instead, it | ||
1007 | should let user space or some platform-specific code do that (user space can | ||
1008 | do it via sysfs as stated above), but it must be prepared to handle the | ||
991 | runtime PM of the device correctly as soon as pm_runtime_allow() is called | 1009 | runtime PM of the device correctly as soon as pm_runtime_allow() is called |
992 | (which may happen at any time). [It also is possible that user space causes | 1010 | (which may happen at any time, even before the driver is loaded). |
993 | pm_runtime_allow() to be called via sysfs before the driver is loaded, so in | 1011 | |
994 | fact the driver has to be prepared to handle the runtime PM of the device as | 1012 | When the driver's remove callback runs, it has to balance the decrementation |
995 | soon as it calls pm_runtime_enable().] | 1013 | of the device's runtime PM usage counter at the probe time. For this reason, |
1014 | if it has decremented the counter in its probe callback, it must run | ||
1015 | pm_runtime_get_noresume() in its remove callback. [Since the core carries | ||
1016 | out a runtime resume of the device and bumps up the device's usage counter | ||
1017 | before running the driver's remove callback, the runtime PM of the device | ||
1018 | is effectively disabled for the duration of the remove execution and all | ||
1019 | runtime PM helper functions incrementing the device's usage counter are | ||
1020 | then effectively equivalent to pm_runtime_get_noresume().] | ||
996 | 1021 | ||
997 | The runtime PM framework works by processing requests to suspend or resume | 1022 | The runtime PM framework works by processing requests to suspend or resume |
998 | devices, or to check if they are idle (in which cases it is reasonable to | 1023 | devices, or to check if they are idle (in which cases it is reasonable to |
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ | 19 | */ |
20 | #define _GNU_SOURCE | 20 | #define _GNU_SOURCE |
21 | #define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ | ||
21 | #include <errno.h> | 22 | #include <errno.h> |
22 | #include <fcntl.h> | 23 | #include <fcntl.h> |
23 | #include <inttypes.h> | 24 | #include <inttypes.h> |
diff --git a/MAINTAINERS b/MAINTAINERS index 9f6685f6c5a9..797236befd27 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5957,7 +5957,7 @@ F: virt/kvm/ | |||
5957 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V | 5957 | KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V |
5958 | M: Joerg Roedel <joro@8bytes.org> | 5958 | M: Joerg Roedel <joro@8bytes.org> |
5959 | L: kvm@vger.kernel.org | 5959 | L: kvm@vger.kernel.org |
5960 | W: http://kvm.qumranet.com | 5960 | W: http://www.linux-kvm.org/ |
5961 | S: Maintained | 5961 | S: Maintained |
5962 | F: arch/x86/include/asm/svm.h | 5962 | F: arch/x86/include/asm/svm.h |
5963 | F: arch/x86/kvm/svm.c | 5963 | F: arch/x86/kvm/svm.c |
@@ -5965,7 +5965,7 @@ F: arch/x86/kvm/svm.c | |||
5965 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC | 5965 | KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC |
5966 | M: Alexander Graf <agraf@suse.com> | 5966 | M: Alexander Graf <agraf@suse.com> |
5967 | L: kvm-ppc@vger.kernel.org | 5967 | L: kvm-ppc@vger.kernel.org |
5968 | W: http://kvm.qumranet.com | 5968 | W: http://www.linux-kvm.org/ |
5969 | T: git git://github.com/agraf/linux-2.6.git | 5969 | T: git git://github.com/agraf/linux-2.6.git |
5970 | S: Supported | 5970 | S: Supported |
5971 | F: arch/powerpc/include/asm/kvm* | 5971 | F: arch/powerpc/include/asm/kvm* |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 3 | 2 | PATCHLEVEL = 3 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild | |||
@@ -48,4 +48,5 @@ generic-y += types.h | |||
48 | generic-y += ucontext.h | 48 | generic-y += ucontext.h |
49 | generic-y += user.h | 49 | generic-y += user.h |
50 | generic-y += vga.h | 50 | generic-y += vga.h |
51 | generic-y += word-at-a-time.h | ||
51 | generic-y += xor.h | 52 | generic-y += xor.h |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b0329be95cb1..26b066690593 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
79 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 79 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
80 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) | 80 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
81 | 81 | ||
82 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) | 82 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
83 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) | 83 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
84 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) | 84 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
85 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 85 | #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
@@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) | |||
496 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 496 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
497 | { | 497 | { |
498 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 498 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
499 | PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; | 499 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
500 | /* preserve the hardware dirty information */ | 500 | /* preserve the hardware dirty information */ |
501 | if (pte_hw_dirty(pte)) | 501 | if (pte_hw_dirty(pte)) |
502 | pte = pte_mkdirty(pte); | 502 | pte = pte_mkdirty(pte); |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..13671a9cf016 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void) | |||
258 | */ | 258 | */ |
259 | if (!is_normal_ram(md)) | 259 | if (!is_normal_ram(md)) |
260 | prot = __pgprot(PROT_DEVICE_nGnRE); | 260 | prot = __pgprot(PROT_DEVICE_nGnRE); |
261 | else if (md->type == EFI_RUNTIME_SERVICES_CODE) | 261 | else if (md->type == EFI_RUNTIME_SERVICES_CODE || |
262 | !PAGE_ALIGNED(md->phys_addr)) | ||
262 | prot = PAGE_KERNEL_EXEC; | 263 | prot = PAGE_KERNEL_EXEC; |
263 | else | 264 | else |
264 | prot = PAGE_KERNEL; | 265 | prot = PAGE_KERNEL; |
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub) | |||
178 | ENDPROC(ftrace_stub) | 178 | ENDPROC(ftrace_stub) |
179 | 179 | ||
180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 180 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
181 | /* save return value regs*/ | ||
182 | .macro save_return_regs | ||
183 | sub sp, sp, #64 | ||
184 | stp x0, x1, [sp] | ||
185 | stp x2, x3, [sp, #16] | ||
186 | stp x4, x5, [sp, #32] | ||
187 | stp x6, x7, [sp, #48] | ||
188 | .endm | ||
189 | |||
190 | /* restore return value regs*/ | ||
191 | .macro restore_return_regs | ||
192 | ldp x0, x1, [sp] | ||
193 | ldp x2, x3, [sp, #16] | ||
194 | ldp x4, x5, [sp, #32] | ||
195 | ldp x6, x7, [sp, #48] | ||
196 | add sp, sp, #64 | ||
197 | .endm | ||
198 | |||
181 | /* | 199 | /* |
182 | * void ftrace_graph_caller(void) | 200 | * void ftrace_graph_caller(void) |
183 | * | 201 | * |
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) | |||
204 | * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. | 222 | * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. |
205 | */ | 223 | */ |
206 | ENTRY(return_to_handler) | 224 | ENTRY(return_to_handler) |
207 | str x0, [sp, #-16]! | 225 | save_return_regs |
208 | mov x0, x29 // parent's fp | 226 | mov x0, x29 // parent's fp |
209 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); | 227 | bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); |
210 | mov x30, x0 // restore the original return address | 228 | mov x30, x0 // restore the original return address |
211 | ldr x0, [sp], #16 | 229 | restore_return_regs |
212 | ret | 230 | ret |
213 | END(return_to_handler) | 231 | END(return_to_handler) |
214 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 232 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild | |||
@@ -20,4 +20,5 @@ generic-y += sections.h | |||
20 | generic-y += topology.h | 20 | generic-y += topology.h |
21 | generic-y += trace_clock.h | 21 | generic-y += trace_clock.h |
22 | generic-y += vga.h | 22 | generic-y += vga.h |
23 | generic-y += word-at-a-time.h | ||
23 | generic-y += xor.h | 24 | generic-y += xor.h |
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild | |||
@@ -46,4 +46,5 @@ generic-y += types.h | |||
46 | generic-y += ucontext.h | 46 | generic-y += ucontext.h |
47 | generic-y += unaligned.h | 47 | generic-y += unaligned.h |
48 | generic-y += user.h | 48 | generic-y += user.h |
49 | generic-y += word-at-a-time.h | ||
49 | generic-y += xor.h | 50 | generic-y += xor.h |
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
@@ -59,4 +59,5 @@ generic-y += types.h | |||
59 | generic-y += ucontext.h | 59 | generic-y += ucontext.h |
60 | generic-y += user.h | 60 | generic-y += user.h |
61 | generic-y += vga.h | 61 | generic-y += vga.h |
62 | generic-y += word-at-a-time.h | ||
62 | generic-y += xor.h | 63 | generic-y += xor.h |
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild | |||
@@ -43,4 +43,5 @@ generic-y += topology.h | |||
43 | generic-y += trace_clock.h | 43 | generic-y += trace_clock.h |
44 | generic-y += types.h | 44 | generic-y += types.h |
45 | generic-y += vga.h | 45 | generic-y += vga.h |
46 | generic-y += word-at-a-time.h | ||
46 | generic-y += xor.h | 47 | generic-y += xor.h |
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild | |||
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h | |||
7 | generic-y += mm-arch-hooks.h | 7 | generic-y += mm-arch-hooks.h |
8 | generic-y += preempt.h | 8 | generic-y += preempt.h |
9 | generic-y += trace_clock.h | 9 | generic-y += trace_clock.h |
10 | generic-y += word-at-a-time.h | ||
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
@@ -58,4 +58,5 @@ generic-y += types.h | |||
58 | generic-y += ucontext.h | 58 | generic-y += ucontext.h |
59 | generic-y += unaligned.h | 59 | generic-y += unaligned.h |
60 | generic-y += vga.h | 60 | generic-y += vga.h |
61 | generic-y += word-at-a-time.h | ||
61 | generic-y += xor.h | 62 | generic-y += xor.h |
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild | |||
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h | |||
8 | generic-y += preempt.h | 8 | generic-y += preempt.h |
9 | generic-y += trace_clock.h | 9 | generic-y += trace_clock.h |
10 | generic-y += vtime.h | 10 | generic-y += vtime.h |
11 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild | |||
@@ -9,3 +9,4 @@ generic-y += module.h | |||
9 | generic-y += preempt.h | 9 | generic-y += preempt.h |
10 | generic-y += sections.h | 10 | generic-y += sections.h |
11 | generic-y += trace_clock.h | 11 | generic-y += trace_clock.h |
12 | generic-y += word-at-a-time.h | ||
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
57 | CONFIG_NET_IPGRE=m | 58 | CONFIG_NET_IPGRE=m |
58 | CONFIG_NET_IPVTI=m | 59 | CONFIG_NET_IPVTI=m |
59 | CONFIG_NET_FOU_IP_TUNNELS=y | 60 | CONFIG_NET_FOU_IP_TUNNELS=y |
60 | CONFIG_GENEVE_CORE=m | ||
61 | CONFIG_INET_AH=m | 61 | CONFIG_INET_AH=m |
62 | CONFIG_INET_ESP=m | 62 | CONFIG_INET_ESP=m |
63 | CONFIG_INET_IPCOMP=m | 63 | CONFIG_INET_IPCOMP=m |
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
67 | # CONFIG_INET_LRO is not set | 67 | # CONFIG_INET_LRO is not set |
68 | CONFIG_INET_DIAG=m | 68 | CONFIG_INET_DIAG=m |
69 | CONFIG_INET_UDP_DIAG=m | 69 | CONFIG_INET_UDP_DIAG=m |
70 | CONFIG_IPV6=m | ||
70 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
71 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
72 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
73 | CONFIG_INET6_IPCOMP=m | 74 | CONFIG_INET6_IPCOMP=m |
75 | CONFIG_IPV6_ILA=m | ||
74 | CONFIG_IPV6_VTI=m | 76 | CONFIG_IPV6_VTI=m |
75 | CONFIG_IPV6_GRE=m | 77 | CONFIG_IPV6_GRE=m |
76 | CONFIG_NETFILTER=y | 78 | CONFIG_NETFILTER=y |
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
179 | CONFIG_IP_SET_LIST_SET=m | 181 | CONFIG_IP_SET_LIST_SET=m |
180 | CONFIG_NF_CONNTRACK_IPV4=m | 182 | CONFIG_NF_CONNTRACK_IPV4=m |
181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 183 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
184 | CONFIG_NFT_DUP_IPV4=m | ||
182 | CONFIG_NF_TABLES_ARP=m | 185 | CONFIG_NF_TABLES_ARP=m |
183 | CONFIG_NF_LOG_ARP=m | 186 | CONFIG_NF_LOG_ARP=m |
184 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 187 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
206 | CONFIG_IP_NF_ARP_MANGLE=m | 209 | CONFIG_IP_NF_ARP_MANGLE=m |
207 | CONFIG_NF_CONNTRACK_IPV6=m | 210 | CONFIG_NF_CONNTRACK_IPV6=m |
208 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 211 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
212 | CONFIG_NFT_DUP_IPV6=m | ||
209 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 213 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
210 | CONFIG_NFT_MASQ_IPV6=m | 214 | CONFIG_NFT_MASQ_IPV6=m |
211 | CONFIG_NFT_REDIR_IPV6=m | 215 | CONFIG_NFT_REDIR_IPV6=m |
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m | |||
271 | CONFIG_MPLS=y | 275 | CONFIG_MPLS=y |
272 | CONFIG_NET_MPLS_GSO=m | 276 | CONFIG_NET_MPLS_GSO=m |
273 | CONFIG_MPLS_ROUTING=m | 277 | CONFIG_MPLS_ROUTING=m |
278 | CONFIG_MPLS_IPTUNNEL=m | ||
274 | # CONFIG_WIRELESS is not set | 279 | # CONFIG_WIRELESS is not set |
275 | # CONFIG_UEVENT_HELPER is not set | 280 | # CONFIG_UEVENT_HELPER is not set |
276 | CONFIG_DEVTMPFS=y | 281 | CONFIG_DEVTMPFS=y |
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y | |||
370 | # CONFIG_NET_VENDOR_SEEQ is not set | 375 | # CONFIG_NET_VENDOR_SEEQ is not set |
371 | # CONFIG_NET_VENDOR_SMSC is not set | 376 | # CONFIG_NET_VENDOR_SMSC is not set |
372 | # CONFIG_NET_VENDOR_STMICRO is not set | 377 | # CONFIG_NET_VENDOR_STMICRO is not set |
378 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
373 | # CONFIG_NET_VENDOR_VIA is not set | 379 | # CONFIG_NET_VENDOR_VIA is not set |
374 | # CONFIG_NET_VENDOR_WIZNET is not set | 380 | # CONFIG_NET_VENDOR_WIZNET is not set |
375 | CONFIG_PPP=m | 381 | CONFIG_PPP=m |
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m | |||
537 | CONFIG_TEST_BPF=m | 543 | CONFIG_TEST_BPF=m |
538 | CONFIG_TEST_FIRMWARE=m | 544 | CONFIG_TEST_FIRMWARE=m |
539 | CONFIG_TEST_UDELAY=m | 545 | CONFIG_TEST_UDELAY=m |
546 | CONFIG_TEST_STATIC_KEYS=m | ||
540 | CONFIG_EARLY_PRINTK=y | 547 | CONFIG_EARLY_PRINTK=y |
541 | CONFIG_ENCRYPTED_KEYS=m | 548 | CONFIG_ENCRYPTED_KEYS=m |
542 | CONFIG_CRYPTO_RSA=m | 549 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -344,6 +349,7 @@ CONFIG_VETH=m | |||
344 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 349 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
345 | # CONFIG_NET_VENDOR_SEEQ is not set | 350 | # CONFIG_NET_VENDOR_SEEQ is not set |
346 | # CONFIG_NET_VENDOR_STMICRO is not set | 351 | # CONFIG_NET_VENDOR_STMICRO is not set |
352 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
347 | # CONFIG_NET_VENDOR_VIA is not set | 353 | # CONFIG_NET_VENDOR_VIA is not set |
348 | # CONFIG_NET_VENDOR_WIZNET is not set | 354 | # CONFIG_NET_VENDOR_WIZNET is not set |
349 | CONFIG_PPP=m | 355 | CONFIG_PPP=m |
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m | |||
495 | CONFIG_TEST_BPF=m | 501 | CONFIG_TEST_BPF=m |
496 | CONFIG_TEST_FIRMWARE=m | 502 | CONFIG_TEST_FIRMWARE=m |
497 | CONFIG_TEST_UDELAY=m | 503 | CONFIG_TEST_UDELAY=m |
504 | CONFIG_TEST_STATIC_KEYS=m | ||
498 | CONFIG_EARLY_PRINTK=y | 505 | CONFIG_EARLY_PRINTK=y |
499 | CONFIG_ENCRYPTED_KEYS=m | 506 | CONFIG_ENCRYPTED_KEYS=m |
500 | CONFIG_CRYPTO_RSA=m | 507 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -355,6 +360,7 @@ CONFIG_NE2000=y | |||
355 | # CONFIG_NET_VENDOR_SEEQ is not set | 360 | # CONFIG_NET_VENDOR_SEEQ is not set |
356 | CONFIG_SMC91X=y | 361 | CONFIG_SMC91X=y |
357 | # CONFIG_NET_VENDOR_STMICRO is not set | 362 | # CONFIG_NET_VENDOR_STMICRO is not set |
363 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
358 | # CONFIG_NET_VENDOR_VIA is not set | 364 | # CONFIG_NET_VENDOR_VIA is not set |
359 | # CONFIG_NET_VENDOR_WIZNET is not set | 365 | # CONFIG_NET_VENDOR_WIZNET is not set |
360 | CONFIG_PPP=m | 366 | CONFIG_PPP=m |
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m | |||
517 | CONFIG_TEST_BPF=m | 523 | CONFIG_TEST_BPF=m |
518 | CONFIG_TEST_FIRMWARE=m | 524 | CONFIG_TEST_FIRMWARE=m |
519 | CONFIG_TEST_UDELAY=m | 525 | CONFIG_TEST_UDELAY=m |
526 | CONFIG_TEST_STATIC_KEYS=m | ||
520 | CONFIG_EARLY_PRINTK=y | 527 | CONFIG_EARLY_PRINTK=y |
521 | CONFIG_ENCRYPTED_KEYS=m | 528 | CONFIG_ENCRYPTED_KEYS=m |
522 | CONFIG_CRYPTO_RSA=m | 529 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
55 | CONFIG_NET_IPGRE=m | 56 | CONFIG_NET_IPGRE=m |
56 | CONFIG_NET_IPVTI=m | 57 | CONFIG_NET_IPVTI=m |
57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
58 | CONFIG_GENEVE_CORE=m | ||
59 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
60 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
61 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
65 | # CONFIG_INET_LRO is not set | 65 | # CONFIG_INET_LRO is not set |
66 | CONFIG_INET_DIAG=m | 66 | CONFIG_INET_DIAG=m |
67 | CONFIG_INET_UDP_DIAG=m | 67 | CONFIG_INET_UDP_DIAG=m |
68 | CONFIG_IPV6=m | ||
68 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
69 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
70 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
71 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
73 | CONFIG_IPV6_ILA=m | ||
72 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
73 | CONFIG_IPV6_GRE=m | 75 | CONFIG_IPV6_GRE=m |
74 | CONFIG_NETFILTER=y | 76 | CONFIG_NETFILTER=y |
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
177 | CONFIG_IP_SET_LIST_SET=m | 179 | CONFIG_IP_SET_LIST_SET=m |
178 | CONFIG_NF_CONNTRACK_IPV4=m | 180 | CONFIG_NF_CONNTRACK_IPV4=m |
179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 181 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
182 | CONFIG_NFT_DUP_IPV4=m | ||
180 | CONFIG_NF_TABLES_ARP=m | 183 | CONFIG_NF_TABLES_ARP=m |
181 | CONFIG_NF_LOG_ARP=m | 184 | CONFIG_NF_LOG_ARP=m |
182 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 185 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
204 | CONFIG_IP_NF_ARP_MANGLE=m | 207 | CONFIG_IP_NF_ARP_MANGLE=m |
205 | CONFIG_NF_CONNTRACK_IPV6=m | 208 | CONFIG_NF_CONNTRACK_IPV6=m |
206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 209 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
210 | CONFIG_NFT_DUP_IPV6=m | ||
207 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 211 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
208 | CONFIG_NFT_MASQ_IPV6=m | 212 | CONFIG_NFT_MASQ_IPV6=m |
209 | CONFIG_NFT_REDIR_IPV6=m | 213 | CONFIG_NFT_REDIR_IPV6=m |
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m | |||
269 | CONFIG_MPLS=y | 273 | CONFIG_MPLS=y |
270 | CONFIG_NET_MPLS_GSO=m | 274 | CONFIG_NET_MPLS_GSO=m |
271 | CONFIG_MPLS_ROUTING=m | 275 | CONFIG_MPLS_ROUTING=m |
276 | CONFIG_MPLS_IPTUNNEL=m | ||
272 | # CONFIG_WIRELESS is not set | 277 | # CONFIG_WIRELESS is not set |
273 | # CONFIG_UEVENT_HELPER is not set | 278 | # CONFIG_UEVENT_HELPER is not set |
274 | CONFIG_DEVTMPFS=y | 279 | CONFIG_DEVTMPFS=y |
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y | |||
345 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 350 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
346 | # CONFIG_NET_VENDOR_SEEQ is not set | 351 | # CONFIG_NET_VENDOR_SEEQ is not set |
347 | # CONFIG_NET_VENDOR_STMICRO is not set | 352 | # CONFIG_NET_VENDOR_STMICRO is not set |
353 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
348 | # CONFIG_NET_VENDOR_VIA is not set | 354 | # CONFIG_NET_VENDOR_VIA is not set |
349 | # CONFIG_NET_VENDOR_WIZNET is not set | 355 | # CONFIG_NET_VENDOR_WIZNET is not set |
350 | CONFIG_PPP=m | 356 | CONFIG_PPP=m |
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m | |||
497 | CONFIG_TEST_BPF=m | 503 | CONFIG_TEST_BPF=m |
498 | CONFIG_TEST_FIRMWARE=m | 504 | CONFIG_TEST_FIRMWARE=m |
499 | CONFIG_TEST_UDELAY=m | 505 | CONFIG_TEST_UDELAY=m |
506 | CONFIG_TEST_STATIC_KEYS=m | ||
500 | CONFIG_EARLY_PRINTK=y | 507 | CONFIG_EARLY_PRINTK=y |
501 | CONFIG_ENCRYPTED_KEYS=m | 508 | CONFIG_ENCRYPTED_KEYS=m |
502 | CONFIG_CRYPTO_RSA=m | 509 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
54 | CONFIG_NET_IPGRE=m | 55 | CONFIG_NET_IPGRE=m |
55 | CONFIG_NET_IPVTI=m | 56 | CONFIG_NET_IPVTI=m |
56 | CONFIG_NET_FOU_IP_TUNNELS=y | 57 | CONFIG_NET_FOU_IP_TUNNELS=y |
57 | CONFIG_GENEVE_CORE=m | ||
58 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
59 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
60 | CONFIG_INET_IPCOMP=m | 60 | CONFIG_INET_IPCOMP=m |
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
64 | # CONFIG_INET_LRO is not set | 64 | # CONFIG_INET_LRO is not set |
65 | CONFIG_INET_DIAG=m | 65 | CONFIG_INET_DIAG=m |
66 | CONFIG_INET_UDP_DIAG=m | 66 | CONFIG_INET_UDP_DIAG=m |
67 | CONFIG_IPV6=m | ||
67 | CONFIG_IPV6_ROUTER_PREF=y | 68 | CONFIG_IPV6_ROUTER_PREF=y |
68 | CONFIG_INET6_AH=m | 69 | CONFIG_INET6_AH=m |
69 | CONFIG_INET6_ESP=m | 70 | CONFIG_INET6_ESP=m |
70 | CONFIG_INET6_IPCOMP=m | 71 | CONFIG_INET6_IPCOMP=m |
72 | CONFIG_IPV6_ILA=m | ||
71 | CONFIG_IPV6_VTI=m | 73 | CONFIG_IPV6_VTI=m |
72 | CONFIG_IPV6_GRE=m | 74 | CONFIG_IPV6_GRE=m |
73 | CONFIG_NETFILTER=y | 75 | CONFIG_NETFILTER=y |
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
176 | CONFIG_IP_SET_LIST_SET=m | 178 | CONFIG_IP_SET_LIST_SET=m |
177 | CONFIG_NF_CONNTRACK_IPV4=m | 179 | CONFIG_NF_CONNTRACK_IPV4=m |
178 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 180 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
181 | CONFIG_NFT_DUP_IPV4=m | ||
179 | CONFIG_NF_TABLES_ARP=m | 182 | CONFIG_NF_TABLES_ARP=m |
180 | CONFIG_NF_LOG_ARP=m | 183 | CONFIG_NF_LOG_ARP=m |
181 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 184 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
203 | CONFIG_IP_NF_ARP_MANGLE=m | 206 | CONFIG_IP_NF_ARP_MANGLE=m |
204 | CONFIG_NF_CONNTRACK_IPV6=m | 207 | CONFIG_NF_CONNTRACK_IPV6=m |
205 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 208 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
209 | CONFIG_NFT_DUP_IPV6=m | ||
206 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 210 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
207 | CONFIG_NFT_MASQ_IPV6=m | 211 | CONFIG_NFT_MASQ_IPV6=m |
208 | CONFIG_NFT_REDIR_IPV6=m | 212 | CONFIG_NFT_REDIR_IPV6=m |
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m | |||
271 | CONFIG_MPLS=y | 275 | CONFIG_MPLS=y |
272 | CONFIG_NET_MPLS_GSO=m | 276 | CONFIG_NET_MPLS_GSO=m |
273 | CONFIG_MPLS_ROUTING=m | 277 | CONFIG_MPLS_ROUTING=m |
278 | CONFIG_MPLS_IPTUNNEL=m | ||
274 | # CONFIG_WIRELESS is not set | 279 | # CONFIG_WIRELESS is not set |
275 | # CONFIG_UEVENT_HELPER is not set | 280 | # CONFIG_UEVENT_HELPER is not set |
276 | CONFIG_DEVTMPFS=y | 281 | CONFIG_DEVTMPFS=y |
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y | |||
364 | # CONFIG_NET_VENDOR_SEEQ is not set | 369 | # CONFIG_NET_VENDOR_SEEQ is not set |
365 | # CONFIG_NET_VENDOR_SMSC is not set | 370 | # CONFIG_NET_VENDOR_SMSC is not set |
366 | # CONFIG_NET_VENDOR_STMICRO is not set | 371 | # CONFIG_NET_VENDOR_STMICRO is not set |
372 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
367 | # CONFIG_NET_VENDOR_VIA is not set | 373 | # CONFIG_NET_VENDOR_VIA is not set |
368 | # CONFIG_NET_VENDOR_WIZNET is not set | 374 | # CONFIG_NET_VENDOR_WIZNET is not set |
369 | CONFIG_PPP=m | 375 | CONFIG_PPP=m |
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m | |||
519 | CONFIG_TEST_BPF=m | 525 | CONFIG_TEST_BPF=m |
520 | CONFIG_TEST_FIRMWARE=m | 526 | CONFIG_TEST_FIRMWARE=m |
521 | CONFIG_TEST_UDELAY=m | 527 | CONFIG_TEST_UDELAY=m |
528 | CONFIG_TEST_STATIC_KEYS=m | ||
522 | CONFIG_EARLY_PRINTK=y | 529 | CONFIG_EARLY_PRINTK=y |
523 | CONFIG_ENCRYPTED_KEYS=m | 530 | CONFIG_ENCRYPTED_KEYS=m |
524 | CONFIG_CRYPTO_RSA=m | 531 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
64 | CONFIG_NET_IPGRE=m | 65 | CONFIG_NET_IPGRE=m |
65 | CONFIG_NET_IPVTI=m | 66 | CONFIG_NET_IPVTI=m |
66 | CONFIG_NET_FOU_IP_TUNNELS=y | 67 | CONFIG_NET_FOU_IP_TUNNELS=y |
67 | CONFIG_GENEVE_CORE=m | ||
68 | CONFIG_INET_AH=m | 68 | CONFIG_INET_AH=m |
69 | CONFIG_INET_ESP=m | 69 | CONFIG_INET_ESP=m |
70 | CONFIG_INET_IPCOMP=m | 70 | CONFIG_INET_IPCOMP=m |
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
74 | # CONFIG_INET_LRO is not set | 74 | # CONFIG_INET_LRO is not set |
75 | CONFIG_INET_DIAG=m | 75 | CONFIG_INET_DIAG=m |
76 | CONFIG_INET_UDP_DIAG=m | 76 | CONFIG_INET_UDP_DIAG=m |
77 | CONFIG_IPV6=m | ||
77 | CONFIG_IPV6_ROUTER_PREF=y | 78 | CONFIG_IPV6_ROUTER_PREF=y |
78 | CONFIG_INET6_AH=m | 79 | CONFIG_INET6_AH=m |
79 | CONFIG_INET6_ESP=m | 80 | CONFIG_INET6_ESP=m |
80 | CONFIG_INET6_IPCOMP=m | 81 | CONFIG_INET6_IPCOMP=m |
82 | CONFIG_IPV6_ILA=m | ||
81 | CONFIG_IPV6_VTI=m | 83 | CONFIG_IPV6_VTI=m |
82 | CONFIG_IPV6_GRE=m | 84 | CONFIG_IPV6_GRE=m |
83 | CONFIG_NETFILTER=y | 85 | CONFIG_NETFILTER=y |
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
186 | CONFIG_IP_SET_LIST_SET=m | 188 | CONFIG_IP_SET_LIST_SET=m |
187 | CONFIG_NF_CONNTRACK_IPV4=m | 189 | CONFIG_NF_CONNTRACK_IPV4=m |
188 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 190 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
191 | CONFIG_NFT_DUP_IPV4=m | ||
189 | CONFIG_NF_TABLES_ARP=m | 192 | CONFIG_NF_TABLES_ARP=m |
190 | CONFIG_NF_LOG_ARP=m | 193 | CONFIG_NF_LOG_ARP=m |
191 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 194 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
213 | CONFIG_IP_NF_ARP_MANGLE=m | 216 | CONFIG_IP_NF_ARP_MANGLE=m |
214 | CONFIG_NF_CONNTRACK_IPV6=m | 217 | CONFIG_NF_CONNTRACK_IPV6=m |
215 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 218 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
219 | CONFIG_NFT_DUP_IPV6=m | ||
216 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 220 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
217 | CONFIG_NFT_MASQ_IPV6=m | 221 | CONFIG_NFT_MASQ_IPV6=m |
218 | CONFIG_NFT_REDIR_IPV6=m | 222 | CONFIG_NFT_REDIR_IPV6=m |
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m | |||
281 | CONFIG_MPLS=y | 285 | CONFIG_MPLS=y |
282 | CONFIG_NET_MPLS_GSO=m | 286 | CONFIG_NET_MPLS_GSO=m |
283 | CONFIG_MPLS_ROUTING=m | 287 | CONFIG_MPLS_ROUTING=m |
288 | CONFIG_MPLS_IPTUNNEL=m | ||
284 | # CONFIG_WIRELESS is not set | 289 | # CONFIG_WIRELESS is not set |
285 | # CONFIG_UEVENT_HELPER is not set | 290 | # CONFIG_UEVENT_HELPER is not set |
286 | CONFIG_DEVTMPFS=y | 291 | CONFIG_DEVTMPFS=y |
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y | |||
410 | # CONFIG_NET_VENDOR_SEEQ is not set | 415 | # CONFIG_NET_VENDOR_SEEQ is not set |
411 | CONFIG_SMC91X=y | 416 | CONFIG_SMC91X=y |
412 | # CONFIG_NET_VENDOR_STMICRO is not set | 417 | # CONFIG_NET_VENDOR_STMICRO is not set |
418 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
413 | # CONFIG_NET_VENDOR_VIA is not set | 419 | # CONFIG_NET_VENDOR_VIA is not set |
414 | # CONFIG_NET_VENDOR_WIZNET is not set | 420 | # CONFIG_NET_VENDOR_WIZNET is not set |
415 | CONFIG_PLIP=m | 421 | CONFIG_PLIP=m |
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m | |||
599 | CONFIG_TEST_BPF=m | 605 | CONFIG_TEST_BPF=m |
600 | CONFIG_TEST_FIRMWARE=m | 606 | CONFIG_TEST_FIRMWARE=m |
601 | CONFIG_TEST_UDELAY=m | 607 | CONFIG_TEST_UDELAY=m |
608 | CONFIG_TEST_STATIC_KEYS=m | ||
602 | CONFIG_EARLY_PRINTK=y | 609 | CONFIG_EARLY_PRINTK=y |
603 | CONFIG_ENCRYPTED_KEYS=m | 610 | CONFIG_ENCRYPTED_KEYS=m |
604 | CONFIG_CRYPTO_RSA=m | 611 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
52 | CONFIG_NET_IPGRE=m | 53 | CONFIG_NET_IPGRE=m |
53 | CONFIG_NET_IPVTI=m | 54 | CONFIG_NET_IPVTI=m |
54 | CONFIG_NET_FOU_IP_TUNNELS=y | 55 | CONFIG_NET_FOU_IP_TUNNELS=y |
55 | CONFIG_GENEVE_CORE=m | ||
56 | CONFIG_INET_AH=m | 56 | CONFIG_INET_AH=m |
57 | CONFIG_INET_ESP=m | 57 | CONFIG_INET_ESP=m |
58 | CONFIG_INET_IPCOMP=m | 58 | CONFIG_INET_IPCOMP=m |
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
62 | # CONFIG_INET_LRO is not set | 62 | # CONFIG_INET_LRO is not set |
63 | CONFIG_INET_DIAG=m | 63 | CONFIG_INET_DIAG=m |
64 | CONFIG_INET_UDP_DIAG=m | 64 | CONFIG_INET_UDP_DIAG=m |
65 | CONFIG_IPV6=m | ||
65 | CONFIG_IPV6_ROUTER_PREF=y | 66 | CONFIG_IPV6_ROUTER_PREF=y |
66 | CONFIG_INET6_AH=m | 67 | CONFIG_INET6_AH=m |
67 | CONFIG_INET6_ESP=m | 68 | CONFIG_INET6_ESP=m |
68 | CONFIG_INET6_IPCOMP=m | 69 | CONFIG_INET6_IPCOMP=m |
70 | CONFIG_IPV6_ILA=m | ||
69 | CONFIG_IPV6_VTI=m | 71 | CONFIG_IPV6_VTI=m |
70 | CONFIG_IPV6_GRE=m | 72 | CONFIG_IPV6_GRE=m |
71 | CONFIG_NETFILTER=y | 73 | CONFIG_NETFILTER=y |
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
174 | CONFIG_IP_SET_LIST_SET=m | 176 | CONFIG_IP_SET_LIST_SET=m |
175 | CONFIG_NF_CONNTRACK_IPV4=m | 177 | CONFIG_NF_CONNTRACK_IPV4=m |
176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 178 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
179 | CONFIG_NFT_DUP_IPV4=m | ||
177 | CONFIG_NF_TABLES_ARP=m | 180 | CONFIG_NF_TABLES_ARP=m |
178 | CONFIG_NF_LOG_ARP=m | 181 | CONFIG_NF_LOG_ARP=m |
179 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 182 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
201 | CONFIG_IP_NF_ARP_MANGLE=m | 204 | CONFIG_IP_NF_ARP_MANGLE=m |
202 | CONFIG_NF_CONNTRACK_IPV6=m | 205 | CONFIG_NF_CONNTRACK_IPV6=m |
203 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 206 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
207 | CONFIG_NFT_DUP_IPV6=m | ||
204 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 208 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
205 | CONFIG_NFT_MASQ_IPV6=m | 209 | CONFIG_NFT_MASQ_IPV6=m |
206 | CONFIG_NFT_REDIR_IPV6=m | 210 | CONFIG_NFT_REDIR_IPV6=m |
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m | |||
266 | CONFIG_MPLS=y | 270 | CONFIG_MPLS=y |
267 | CONFIG_NET_MPLS_GSO=m | 271 | CONFIG_NET_MPLS_GSO=m |
268 | CONFIG_MPLS_ROUTING=m | 272 | CONFIG_MPLS_ROUTING=m |
273 | CONFIG_MPLS_IPTUNNEL=m | ||
269 | # CONFIG_WIRELESS is not set | 274 | # CONFIG_WIRELESS is not set |
270 | # CONFIG_UEVENT_HELPER is not set | 275 | # CONFIG_UEVENT_HELPER is not set |
271 | CONFIG_DEVTMPFS=y | 276 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y | |||
343 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 348 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
344 | # CONFIG_NET_VENDOR_SEEQ is not set | 349 | # CONFIG_NET_VENDOR_SEEQ is not set |
345 | # CONFIG_NET_VENDOR_STMICRO is not set | 350 | # CONFIG_NET_VENDOR_STMICRO is not set |
351 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
346 | # CONFIG_NET_VENDOR_VIA is not set | 352 | # CONFIG_NET_VENDOR_VIA is not set |
347 | # CONFIG_NET_VENDOR_WIZNET is not set | 353 | # CONFIG_NET_VENDOR_WIZNET is not set |
348 | CONFIG_PPP=m | 354 | CONFIG_PPP=m |
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m | |||
488 | CONFIG_TEST_BPF=m | 494 | CONFIG_TEST_BPF=m |
489 | CONFIG_TEST_FIRMWARE=m | 495 | CONFIG_TEST_FIRMWARE=m |
490 | CONFIG_TEST_UDELAY=m | 496 | CONFIG_TEST_UDELAY=m |
497 | CONFIG_TEST_STATIC_KEYS=m | ||
491 | CONFIG_EARLY_PRINTK=y | 498 | CONFIG_EARLY_PRINTK=y |
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
53 | CONFIG_NET_IPGRE=m | 54 | CONFIG_NET_IPGRE=m |
54 | CONFIG_NET_IPVTI=m | 55 | CONFIG_NET_IPVTI=m |
55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
56 | CONFIG_GENEVE_CORE=m | ||
57 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
58 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
59 | CONFIG_INET_IPCOMP=m | 59 | CONFIG_INET_IPCOMP=m |
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
63 | # CONFIG_INET_LRO is not set | 63 | # CONFIG_INET_LRO is not set |
64 | CONFIG_INET_DIAG=m | 64 | CONFIG_INET_DIAG=m |
65 | CONFIG_INET_UDP_DIAG=m | 65 | CONFIG_INET_UDP_DIAG=m |
66 | CONFIG_IPV6=m | ||
66 | CONFIG_IPV6_ROUTER_PREF=y | 67 | CONFIG_IPV6_ROUTER_PREF=y |
67 | CONFIG_INET6_AH=m | 68 | CONFIG_INET6_AH=m |
68 | CONFIG_INET6_ESP=m | 69 | CONFIG_INET6_ESP=m |
69 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
71 | CONFIG_IPV6_ILA=m | ||
70 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
71 | CONFIG_IPV6_GRE=m | 73 | CONFIG_IPV6_GRE=m |
72 | CONFIG_NETFILTER=y | 74 | CONFIG_NETFILTER=y |
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
175 | CONFIG_IP_SET_LIST_SET=m | 177 | CONFIG_IP_SET_LIST_SET=m |
176 | CONFIG_NF_CONNTRACK_IPV4=m | 178 | CONFIG_NF_CONNTRACK_IPV4=m |
177 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 179 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
180 | CONFIG_NFT_DUP_IPV4=m | ||
178 | CONFIG_NF_TABLES_ARP=m | 181 | CONFIG_NF_TABLES_ARP=m |
179 | CONFIG_NF_LOG_ARP=m | 182 | CONFIG_NF_LOG_ARP=m |
180 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 183 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
202 | CONFIG_IP_NF_ARP_MANGLE=m | 205 | CONFIG_IP_NF_ARP_MANGLE=m |
203 | CONFIG_NF_CONNTRACK_IPV6=m | 206 | CONFIG_NF_CONNTRACK_IPV6=m |
204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 207 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
208 | CONFIG_NFT_DUP_IPV6=m | ||
205 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 209 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
206 | CONFIG_NFT_MASQ_IPV6=m | 210 | CONFIG_NFT_MASQ_IPV6=m |
207 | CONFIG_NFT_REDIR_IPV6=m | 211 | CONFIG_NFT_REDIR_IPV6=m |
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m | |||
267 | CONFIG_MPLS=y | 271 | CONFIG_MPLS=y |
268 | CONFIG_NET_MPLS_GSO=m | 272 | CONFIG_NET_MPLS_GSO=m |
269 | CONFIG_MPLS_ROUTING=m | 273 | CONFIG_MPLS_ROUTING=m |
274 | CONFIG_MPLS_IPTUNNEL=m | ||
270 | # CONFIG_WIRELESS is not set | 275 | # CONFIG_WIRELESS is not set |
271 | # CONFIG_UEVENT_HELPER is not set | 276 | # CONFIG_UEVENT_HELPER is not set |
272 | CONFIG_DEVTMPFS=y | 277 | CONFIG_DEVTMPFS=y |
@@ -354,6 +359,7 @@ CONFIG_NE2000=y | |||
354 | # CONFIG_NET_VENDOR_SEEQ is not set | 359 | # CONFIG_NET_VENDOR_SEEQ is not set |
355 | # CONFIG_NET_VENDOR_SMSC is not set | 360 | # CONFIG_NET_VENDOR_SMSC is not set |
356 | # CONFIG_NET_VENDOR_STMICRO is not set | 361 | # CONFIG_NET_VENDOR_STMICRO is not set |
362 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
357 | # CONFIG_NET_VENDOR_VIA is not set | 363 | # CONFIG_NET_VENDOR_VIA is not set |
358 | # CONFIG_NET_VENDOR_WIZNET is not set | 364 | # CONFIG_NET_VENDOR_WIZNET is not set |
359 | CONFIG_PLIP=m | 365 | CONFIG_PLIP=m |
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m | |||
510 | CONFIG_TEST_BPF=m | 516 | CONFIG_TEST_BPF=m |
511 | CONFIG_TEST_FIRMWARE=m | 517 | CONFIG_TEST_FIRMWARE=m |
512 | CONFIG_TEST_UDELAY=m | 518 | CONFIG_TEST_UDELAY=m |
519 | CONFIG_TEST_STATIC_KEYS=m | ||
513 | CONFIG_EARLY_PRINTK=y | 520 | CONFIG_EARLY_PRINTK=y |
514 | CONFIG_ENCRYPTED_KEYS=m | 521 | CONFIG_ENCRYPTED_KEYS=m |
515 | CONFIG_CRYPTO_RSA=m | 522 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
50 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
51 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
52 | CONFIG_NET_FOU_IP_TUNNELS=y | 53 | CONFIG_NET_FOU_IP_TUNNELS=y |
53 | CONFIG_GENEVE_CORE=m | ||
54 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
56 | CONFIG_INET_IPCOMP=m | 56 | CONFIG_INET_IPCOMP=m |
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6=m | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 64 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 65 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 66 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 67 | CONFIG_INET6_IPCOMP=m |
68 | CONFIG_IPV6_ILA=m | ||
67 | CONFIG_IPV6_VTI=m | 69 | CONFIG_IPV6_VTI=m |
68 | CONFIG_IPV6_GRE=m | 70 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 71 | CONFIG_NETFILTER=y |
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
172 | CONFIG_IP_SET_LIST_SET=m | 174 | CONFIG_IP_SET_LIST_SET=m |
173 | CONFIG_NF_CONNTRACK_IPV4=m | 175 | CONFIG_NF_CONNTRACK_IPV4=m |
174 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
177 | CONFIG_NFT_DUP_IPV4=m | ||
175 | CONFIG_NF_TABLES_ARP=m | 178 | CONFIG_NF_TABLES_ARP=m |
176 | CONFIG_NF_LOG_ARP=m | 179 | CONFIG_NF_LOG_ARP=m |
177 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 180 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
199 | CONFIG_IP_NF_ARP_MANGLE=m | 202 | CONFIG_IP_NF_ARP_MANGLE=m |
200 | CONFIG_NF_CONNTRACK_IPV6=m | 203 | CONFIG_NF_CONNTRACK_IPV6=m |
201 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
205 | CONFIG_NFT_DUP_IPV6=m | ||
202 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 206 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
203 | CONFIG_NFT_MASQ_IPV6=m | 207 | CONFIG_NFT_MASQ_IPV6=m |
204 | CONFIG_NFT_REDIR_IPV6=m | 208 | CONFIG_NFT_REDIR_IPV6=m |
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m | |||
264 | CONFIG_MPLS=y | 268 | CONFIG_MPLS=y |
265 | CONFIG_NET_MPLS_GSO=m | 269 | CONFIG_NET_MPLS_GSO=m |
266 | CONFIG_MPLS_ROUTING=m | 270 | CONFIG_MPLS_ROUTING=m |
271 | CONFIG_MPLS_IPTUNNEL=m | ||
267 | # CONFIG_WIRELESS is not set | 272 | # CONFIG_WIRELESS is not set |
268 | # CONFIG_UEVENT_HELPER is not set | 273 | # CONFIG_UEVENT_HELPER is not set |
269 | CONFIG_DEVTMPFS=y | 274 | CONFIG_DEVTMPFS=y |
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y | |||
341 | # CONFIG_NET_VENDOR_SEEQ is not set | 346 | # CONFIG_NET_VENDOR_SEEQ is not set |
342 | # CONFIG_NET_VENDOR_STMICRO is not set | 347 | # CONFIG_NET_VENDOR_STMICRO is not set |
343 | # CONFIG_NET_VENDOR_SUN is not set | 348 | # CONFIG_NET_VENDOR_SUN is not set |
349 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
344 | # CONFIG_NET_VENDOR_VIA is not set | 350 | # CONFIG_NET_VENDOR_VIA is not set |
345 | # CONFIG_NET_VENDOR_WIZNET is not set | 351 | # CONFIG_NET_VENDOR_WIZNET is not set |
346 | CONFIG_PPP=m | 352 | CONFIG_PPP=m |
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m | |||
489 | CONFIG_TEST_BPF=m | 495 | CONFIG_TEST_BPF=m |
490 | CONFIG_TEST_FIRMWARE=m | 496 | CONFIG_TEST_FIRMWARE=m |
491 | CONFIG_TEST_UDELAY=m | 497 | CONFIG_TEST_UDELAY=m |
498 | CONFIG_TEST_STATIC_KEYS=m | ||
492 | CONFIG_ENCRYPTED_KEYS=m | 499 | CONFIG_ENCRYPTED_KEYS=m |
493 | CONFIG_CRYPTO_RSA=m | 500 | CONFIG_CRYPTO_RSA=m |
494 | CONFIG_CRYPTO_MANAGER=y | 501 | CONFIG_CRYPTO_MANAGER=y |
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig | |||
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 | |||
10 | # CONFIG_PID_NS is not set | 10 | # CONFIG_PID_NS is not set |
11 | # CONFIG_NET_NS is not set | 11 | # CONFIG_NET_NS is not set |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | CONFIG_USERFAULTFD=y | ||
13 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
15 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m | |||
50 | CONFIG_NET_IPGRE=m | 51 | CONFIG_NET_IPGRE=m |
51 | CONFIG_NET_IPVTI=m | 52 | CONFIG_NET_IPVTI=m |
52 | CONFIG_NET_FOU_IP_TUNNELS=y | 53 | CONFIG_NET_FOU_IP_TUNNELS=y |
53 | CONFIG_GENEVE_CORE=m | ||
54 | CONFIG_INET_AH=m | 54 | CONFIG_INET_AH=m |
55 | CONFIG_INET_ESP=m | 55 | CONFIG_INET_ESP=m |
56 | CONFIG_INET_IPCOMP=m | 56 | CONFIG_INET_IPCOMP=m |
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m | |||
60 | # CONFIG_INET_LRO is not set | 60 | # CONFIG_INET_LRO is not set |
61 | CONFIG_INET_DIAG=m | 61 | CONFIG_INET_DIAG=m |
62 | CONFIG_INET_UDP_DIAG=m | 62 | CONFIG_INET_UDP_DIAG=m |
63 | CONFIG_IPV6=m | ||
63 | CONFIG_IPV6_ROUTER_PREF=y | 64 | CONFIG_IPV6_ROUTER_PREF=y |
64 | CONFIG_INET6_AH=m | 65 | CONFIG_INET6_AH=m |
65 | CONFIG_INET6_ESP=m | 66 | CONFIG_INET6_ESP=m |
66 | CONFIG_INET6_IPCOMP=m | 67 | CONFIG_INET6_IPCOMP=m |
68 | CONFIG_IPV6_ILA=m | ||
67 | CONFIG_IPV6_VTI=m | 69 | CONFIG_IPV6_VTI=m |
68 | CONFIG_IPV6_GRE=m | 70 | CONFIG_IPV6_GRE=m |
69 | CONFIG_NETFILTER=y | 71 | CONFIG_NETFILTER=y |
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m | |||
172 | CONFIG_IP_SET_LIST_SET=m | 174 | CONFIG_IP_SET_LIST_SET=m |
173 | CONFIG_NF_CONNTRACK_IPV4=m | 175 | CONFIG_NF_CONNTRACK_IPV4=m |
174 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 176 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
177 | CONFIG_NFT_DUP_IPV4=m | ||
175 | CONFIG_NF_TABLES_ARP=m | 178 | CONFIG_NF_TABLES_ARP=m |
176 | CONFIG_NF_LOG_ARP=m | 179 | CONFIG_NF_LOG_ARP=m |
177 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 180 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m | |||
199 | CONFIG_IP_NF_ARP_MANGLE=m | 202 | CONFIG_IP_NF_ARP_MANGLE=m |
200 | CONFIG_NF_CONNTRACK_IPV6=m | 203 | CONFIG_NF_CONNTRACK_IPV6=m |
201 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 204 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
205 | CONFIG_NFT_DUP_IPV6=m | ||
202 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 206 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
203 | CONFIG_NFT_MASQ_IPV6=m | 207 | CONFIG_NFT_MASQ_IPV6=m |
204 | CONFIG_NFT_REDIR_IPV6=m | 208 | CONFIG_NFT_REDIR_IPV6=m |
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m | |||
264 | CONFIG_MPLS=y | 268 | CONFIG_MPLS=y |
265 | CONFIG_NET_MPLS_GSO=m | 269 | CONFIG_NET_MPLS_GSO=m |
266 | CONFIG_MPLS_ROUTING=m | 270 | CONFIG_MPLS_ROUTING=m |
271 | CONFIG_MPLS_IPTUNNEL=m | ||
267 | # CONFIG_WIRELESS is not set | 272 | # CONFIG_WIRELESS is not set |
268 | # CONFIG_UEVENT_HELPER is not set | 273 | # CONFIG_UEVENT_HELPER is not set |
269 | CONFIG_DEVTMPFS=y | 274 | CONFIG_DEVTMPFS=y |
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y | |||
341 | # CONFIG_NET_VENDOR_SAMSUNG is not set | 346 | # CONFIG_NET_VENDOR_SAMSUNG is not set |
342 | # CONFIG_NET_VENDOR_SEEQ is not set | 347 | # CONFIG_NET_VENDOR_SEEQ is not set |
343 | # CONFIG_NET_VENDOR_STMICRO is not set | 348 | # CONFIG_NET_VENDOR_STMICRO is not set |
349 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
344 | # CONFIG_NET_VENDOR_VIA is not set | 350 | # CONFIG_NET_VENDOR_VIA is not set |
345 | # CONFIG_NET_VENDOR_WIZNET is not set | 351 | # CONFIG_NET_VENDOR_WIZNET is not set |
346 | CONFIG_PPP=m | 352 | CONFIG_PPP=m |
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m | |||
489 | CONFIG_TEST_BPF=m | 495 | CONFIG_TEST_BPF=m |
490 | CONFIG_TEST_FIRMWARE=m | 496 | CONFIG_TEST_FIRMWARE=m |
491 | CONFIG_TEST_UDELAY=m | 497 | CONFIG_TEST_UDELAY=m |
498 | CONFIG_TEST_STATIC_KEYS=m | ||
492 | CONFIG_EARLY_PRINTK=y | 499 | CONFIG_EARLY_PRINTK=y |
493 | CONFIG_ENCRYPTED_KEYS=m | 500 | CONFIG_ENCRYPTED_KEYS=m |
494 | CONFIG_CRYPTO_RSA=m | 501 | CONFIG_CRYPTO_RSA=m |
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h | |||
@@ -4,4 +4,34 @@ | |||
4 | #define __ALIGN .align 4 | 4 | #define __ALIGN .align 4 |
5 | #define __ALIGN_STR ".align 4" | 5 | #define __ALIGN_STR ".align 4" |
6 | 6 | ||
7 | /* | ||
8 | * Make sure the compiler doesn't do anything stupid with the | ||
9 | * arguments on the stack - they are owned by the *caller*, not | ||
10 | * the callee. This just fools gcc into not spilling into them, | ||
11 | * and keeps it from doing tailcall recursion and/or using the | ||
12 | * stack slots for temporaries, since they are live and "used" | ||
13 | * all the way to the end of the function. | ||
14 | */ | ||
15 | #define asmlinkage_protect(n, ret, args...) \ | ||
16 | __asmlinkage_protect##n(ret, ##args) | ||
17 | #define __asmlinkage_protect_n(ret, args...) \ | ||
18 | __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) | ||
19 | #define __asmlinkage_protect0(ret) \ | ||
20 | __asmlinkage_protect_n(ret) | ||
21 | #define __asmlinkage_protect1(ret, arg1) \ | ||
22 | __asmlinkage_protect_n(ret, "m" (arg1)) | ||
23 | #define __asmlinkage_protect2(ret, arg1, arg2) \ | ||
24 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) | ||
25 | #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ | ||
26 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) | ||
27 | #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ | ||
28 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
29 | "m" (arg4)) | ||
30 | #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ | ||
31 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
32 | "m" (arg4), "m" (arg5)) | ||
33 | #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ | ||
34 | __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ | ||
35 | "m" (arg4), "m" (arg5), "m" (arg6)) | ||
36 | |||
7 | #endif | 37 | #endif |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 356 | 7 | #define NR_syscalls 375 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -361,5 +361,24 @@ | |||
361 | #define __NR_memfd_create 353 | 361 | #define __NR_memfd_create 353 |
362 | #define __NR_bpf 354 | 362 | #define __NR_bpf 354 |
363 | #define __NR_execveat 355 | 363 | #define __NR_execveat 355 |
364 | #define __NR_socket 356 | ||
365 | #define __NR_socketpair 357 | ||
366 | #define __NR_bind 358 | ||
367 | #define __NR_connect 359 | ||
368 | #define __NR_listen 360 | ||
369 | #define __NR_accept4 361 | ||
370 | #define __NR_getsockopt 362 | ||
371 | #define __NR_setsockopt 363 | ||
372 | #define __NR_getsockname 364 | ||
373 | #define __NR_getpeername 365 | ||
374 | #define __NR_sendto 366 | ||
375 | #define __NR_sendmsg 367 | ||
376 | #define __NR_recvfrom 368 | ||
377 | #define __NR_recvmsg 369 | ||
378 | #define __NR_shutdown 370 | ||
379 | #define __NR_recvmmsg 371 | ||
380 | #define __NR_sendmmsg 372 | ||
381 | #define __NR_userfaultfd 373 | ||
382 | #define __NR_membarrier 374 | ||
364 | 383 | ||
365 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 384 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -376,4 +376,22 @@ ENTRY(sys_call_table) | |||
376 | .long sys_memfd_create | 376 | .long sys_memfd_create |
377 | .long sys_bpf | 377 | .long sys_bpf |
378 | .long sys_execveat /* 355 */ | 378 | .long sys_execveat /* 355 */ |
379 | 379 | .long sys_socket | |
380 | .long sys_socketpair | ||
381 | .long sys_bind | ||
382 | .long sys_connect | ||
383 | .long sys_listen /* 360 */ | ||
384 | .long sys_accept4 | ||
385 | .long sys_getsockopt | ||
386 | .long sys_setsockopt | ||
387 | .long sys_getsockname | ||
388 | .long sys_getpeername /* 365 */ | ||
389 | .long sys_sendto | ||
390 | .long sys_sendmsg | ||
391 | .long sys_recvfrom | ||
392 | .long sys_recvmsg | ||
393 | .long sys_shutdown /* 370 */ | ||
394 | .long sys_recvmmsg | ||
395 | .long sys_sendmmsg | ||
396 | .long sys_userfaultfd | ||
397 | .long sys_membarrier | ||
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild | |||
@@ -54,4 +54,5 @@ generic-y += ucontext.h | |||
54 | generic-y += unaligned.h | 54 | generic-y += unaligned.h |
55 | generic-y += user.h | 55 | generic-y += user.h |
56 | generic-y += vga.h | 56 | generic-y += vga.h |
57 | generic-y += word-at-a-time.h | ||
57 | generic-y += xor.h | 58 | generic-y += xor.h |
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h | |||
10 | generic-y += preempt.h | 10 | generic-y += preempt.h |
11 | generic-y += syscalls.h | 11 | generic-y += syscalls.h |
12 | generic-y += trace_clock.h | 12 | generic-y += trace_clock.h |
13 | generic-y += word-at-a-time.h | ||
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 15ecb4831e12..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c | |||
@@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( | |||
293 | 293 | ||
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", | 296 | |
297 | ath79_misc_intc_of_init); | 297 | static int __init ar7100_misc_intc_of_init( |
298 | struct device_node *node, struct device_node *parent) | ||
299 | { | ||
300 | ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; | ||
301 | return ath79_misc_intc_of_init(node, parent); | ||
302 | } | ||
303 | |||
304 | IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", | ||
305 | ar7100_misc_intc_of_init); | ||
306 | |||
307 | static int __init ar7240_misc_intc_of_init( | ||
308 | struct device_node *node, struct device_node *parent) | ||
309 | { | ||
310 | ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; | ||
311 | return ath79_misc_intc_of_init(node, parent); | ||
312 | } | ||
313 | |||
314 | IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", | ||
315 | ar7240_misc_intc_of_init); | ||
298 | 316 | ||
299 | static int __init ar79_cpu_intc_of_init( | 317 | static int __init ar79_cpu_intc_of_init( |
300 | struct device_node *node, struct device_node *parent) | 318 | struct device_node *node, struct device_node *parent) |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void) | |||
933 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) | 933 | while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) |
934 | && (total < MAX_MEMORY)) { | 934 | && (total < MAX_MEMORY)) { |
935 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, | 935 | memory = cvmx_bootmem_phy_alloc(mem_alloc_size, |
936 | __pa_symbol(&__init_end), -1, | 936 | __pa_symbol(&_end), -1, |
937 | 0x100000, | 937 | 0x100000, |
938 | CVMX_BOOTMEM_FLAG_NO_LOCKING); | 938 | CVMX_BOOTMEM_FLAG_NO_LOCKING); |
939 | if (memory >= 0) { | 939 | if (memory >= 0) { |
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild | |||
@@ -17,4 +17,5 @@ generic-y += segment.h | |||
17 | generic-y += serial.h | 17 | generic-y += serial.h |
18 | generic-y += trace_clock.h | 18 | generic-y += trace_clock.h |
19 | generic-y += user.h | 19 | generic-y += user.h |
20 | generic-y += word-at-a-time.h | ||
20 | generic-y += xor.h | 21 | generic-y += xor.h |
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -20,6 +20,9 @@ | |||
20 | #ifndef cpu_has_tlb | 20 | #ifndef cpu_has_tlb |
21 | #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) | 21 | #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) |
22 | #endif | 22 | #endif |
23 | #ifndef cpu_has_ftlb | ||
24 | #define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) | ||
25 | #endif | ||
23 | #ifndef cpu_has_tlbinv | 26 | #ifndef cpu_has_tlbinv |
24 | #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) | 27 | #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) |
25 | #endif | 28 | #endif |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -385,6 +385,7 @@ enum cpu_type_enum { | |||
385 | #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ | 385 | #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ |
386 | #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ | 386 | #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ |
387 | #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ | 387 | #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ |
388 | #define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ | ||
388 | 389 | ||
389 | /* | 390 | /* |
390 | * CPU ASE encodings | 391 | * CPU ASE encodings |
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h | |||
@@ -66,6 +66,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * maar_init() - initialise MAARs | ||
70 | * | ||
71 | * Performs initialisation of MAARs for the current CPU, making use of the | ||
72 | * platforms implementation of platform_maar_init where necessary and | ||
73 | * duplicating the setup it provides on secondary CPUs. | ||
74 | */ | ||
75 | extern void maar_init(void); | ||
76 | |||
77 | /** | ||
69 | * struct maar_config - MAAR configuration data | 78 | * struct maar_config - MAAR configuration data |
70 | * @lower: The lowest address that the MAAR pair will affect. Must be | 79 | * @lower: The lowest address that the MAAR pair will affect. Must be |
71 | * aligned to a 2^16 byte boundary. | 80 | * aligned to a 2^16 byte boundary. |
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h | |||
@@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) | |||
194 | BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) | 194 | BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) |
195 | BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) | 195 | BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) |
196 | BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) | 196 | BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) |
197 | BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) | ||
197 | 198 | ||
198 | /* Core Local & Core Other register accessor functions */ | 199 | /* Core Local & Core Other register accessor functions */ |
199 | BUILD_CM_Cx_RW(reset_release, 0x00) | 200 | BUILD_CM_Cx_RW(reset_release, 0x00) |
@@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) | |||
316 | #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 | 317 | #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 |
317 | #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) | 318 | #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) |
318 | 319 | ||
320 | /* GCR_SYS_CONFIG2 register fields */ | ||
321 | #define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 | ||
322 | #define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) | ||
323 | |||
319 | /* GCR_Cx_COHERENCE register fields */ | 324 | /* GCR_Cx_COHERENCE register fields */ |
320 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 | 325 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 |
321 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) | 326 | #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) |
@@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) | |||
405 | return read_gcr_rev(); | 410 | return read_gcr_rev(); |
406 | } | 411 | } |
407 | 412 | ||
413 | /** | ||
414 | * mips_cm_max_vp_width() - return the width in bits of VP indices | ||
415 | * | ||
416 | * Return: the width, in bits, of VP indices in fields that combine core & VP | ||
417 | * indices. | ||
418 | */ | ||
419 | static inline unsigned int mips_cm_max_vp_width(void) | ||
420 | { | ||
421 | extern int smp_num_siblings; | ||
422 | |||
423 | if (mips_cm_revision() >= CM_REV_CM3) | ||
424 | return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; | ||
425 | |||
426 | return smp_num_siblings; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * mips_cm_vp_id() - calculate the hardware VP ID for a CPU | ||
431 | * @cpu: the CPU whose VP ID to calculate | ||
432 | * | ||
433 | * Hardware such as the GIC uses identifiers for VPs which may not match the | ||
434 | * CPU numbers used by Linux. This function calculates the hardware VP | ||
435 | * identifier corresponding to a given CPU. | ||
436 | * | ||
437 | * Return: the VP ID for the CPU. | ||
438 | */ | ||
439 | static inline unsigned int mips_cm_vp_id(unsigned int cpu) | ||
440 | { | ||
441 | unsigned int core = cpu_data[cpu].core; | ||
442 | unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); | ||
443 | |||
444 | return (core * mips_cm_max_vp_width()) + vp; | ||
445 | } | ||
446 | |||
408 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ | 447 | #endif /* __MIPS_ASM_MIPS_CM_H__ */ |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -487,6 +487,8 @@ | |||
487 | 487 | ||
488 | /* Bits specific to the MIPS32/64 PRA. */ | 488 | /* Bits specific to the MIPS32/64 PRA. */ |
489 | #define MIPS_CONF_MT (_ULCAST_(7) << 7) | 489 | #define MIPS_CONF_MT (_ULCAST_(7) << 7) |
490 | #define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) | ||
491 | #define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) | ||
490 | #define MIPS_CONF_AR (_ULCAST_(7) << 10) | 492 | #define MIPS_CONF_AR (_ULCAST_(7) << 10) |
491 | #define MIPS_CONF_AT (_ULCAST_(3) << 13) | 493 | #define MIPS_CONF_AT (_ULCAST_(3) << 13) |
492 | #define MIPS_CONF_M (_ULCAST_(1) << 31) | 494 | #define MIPS_CONF_M (_ULCAST_(1) << 31) |
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h | |||
@@ -377,16 +377,18 @@ | |||
377 | #define __NR_memfd_create (__NR_Linux + 354) | 377 | #define __NR_memfd_create (__NR_Linux + 354) |
378 | #define __NR_bpf (__NR_Linux + 355) | 378 | #define __NR_bpf (__NR_Linux + 355) |
379 | #define __NR_execveat (__NR_Linux + 356) | 379 | #define __NR_execveat (__NR_Linux + 356) |
380 | #define __NR_userfaultfd (__NR_Linux + 357) | ||
381 | #define __NR_membarrier (__NR_Linux + 358) | ||
380 | 382 | ||
381 | /* | 383 | /* |
382 | * Offset of the last Linux o32 flavoured syscall | 384 | * Offset of the last Linux o32 flavoured syscall |
383 | */ | 385 | */ |
384 | #define __NR_Linux_syscalls 356 | 386 | #define __NR_Linux_syscalls 358 |
385 | 387 | ||
386 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 388 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
387 | 389 | ||
388 | #define __NR_O32_Linux 4000 | 390 | #define __NR_O32_Linux 4000 |
389 | #define __NR_O32_Linux_syscalls 356 | 391 | #define __NR_O32_Linux_syscalls 358 |
390 | 392 | ||
391 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 393 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
392 | 394 | ||
@@ -711,16 +713,18 @@ | |||
711 | #define __NR_memfd_create (__NR_Linux + 314) | 713 | #define __NR_memfd_create (__NR_Linux + 314) |
712 | #define __NR_bpf (__NR_Linux + 315) | 714 | #define __NR_bpf (__NR_Linux + 315) |
713 | #define __NR_execveat (__NR_Linux + 316) | 715 | #define __NR_execveat (__NR_Linux + 316) |
716 | #define __NR_userfaultfd (__NR_Linux + 317) | ||
717 | #define __NR_membarrier (__NR_Linux + 318) | ||
714 | 718 | ||
715 | /* | 719 | /* |
716 | * Offset of the last Linux 64-bit flavoured syscall | 720 | * Offset of the last Linux 64-bit flavoured syscall |
717 | */ | 721 | */ |
718 | #define __NR_Linux_syscalls 316 | 722 | #define __NR_Linux_syscalls 318 |
719 | 723 | ||
720 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 724 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
721 | 725 | ||
722 | #define __NR_64_Linux 5000 | 726 | #define __NR_64_Linux 5000 |
723 | #define __NR_64_Linux_syscalls 316 | 727 | #define __NR_64_Linux_syscalls 318 |
724 | 728 | ||
725 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 729 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
726 | 730 | ||
@@ -1049,15 +1053,17 @@ | |||
1049 | #define __NR_memfd_create (__NR_Linux + 318) | 1053 | #define __NR_memfd_create (__NR_Linux + 318) |
1050 | #define __NR_bpf (__NR_Linux + 319) | 1054 | #define __NR_bpf (__NR_Linux + 319) |
1051 | #define __NR_execveat (__NR_Linux + 320) | 1055 | #define __NR_execveat (__NR_Linux + 320) |
1056 | #define __NR_userfaultfd (__NR_Linux + 321) | ||
1057 | #define __NR_membarrier (__NR_Linux + 322) | ||
1052 | 1058 | ||
1053 | /* | 1059 | /* |
1054 | * Offset of the last N32 flavoured syscall | 1060 | * Offset of the last N32 flavoured syscall |
1055 | */ | 1061 | */ |
1056 | #define __NR_Linux_syscalls 320 | 1062 | #define __NR_Linux_syscalls 322 |
1057 | 1063 | ||
1058 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1064 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
1059 | 1065 | ||
1060 | #define __NR_N32_Linux 6000 | 1066 | #define __NR_N32_Linux 6000 |
1061 | #define __NR_N32_Linux_syscalls 320 | 1067 | #define __NR_N32_Linux_syscalls 322 |
1062 | 1068 | ||
1063 | #endif /* _UAPI_ASM_UNISTD_H */ | 1069 | #endif /* _UAPI_ASM_UNISTD_H */ |
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/power/jz4740-battery.h> | 26 | #include <linux/power/jz4740-battery.h> |
27 | #include <linux/power/gpio-charger.h> | 27 | #include <linux/power/gpio-charger.h> |
28 | 28 | ||
29 | #include <asm/mach-jz4740/gpio.h> | ||
29 | #include <asm/mach-jz4740/jz4740_fb.h> | 30 | #include <asm/mach-jz4740/jz4740_fb.h> |
30 | #include <asm/mach-jz4740/jz4740_mmc.h> | 31 | #include <asm/mach-jz4740/jz4740_mmc.h> |
31 | #include <asm/mach-jz4740/jz4740_nand.h> | 32 | #include <asm/mach-jz4740/jz4740_nand.h> |
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index a74e181058b0..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | 29 | ||
30 | #include <asm/mach-jz4740/base.h> | 30 | #include <asm/mach-jz4740/base.h> |
31 | #include <asm/mach-jz4740/gpio.h> | ||
31 | 32 | ||
32 | #define JZ4740_GPIO_BASE_A (32*0) | 33 | #define JZ4740_GPIO_BASE_A (32*0) |
33 | #define JZ4740_GPIO_BASE_B (32*1) | 34 | #define JZ4740_GPIO_BASE_B (32*1) |
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S | |||
@@ -39,6 +39,7 @@ | |||
39 | mfc0 \dest, CP0_CONFIG, 3 | 39 | mfc0 \dest, CP0_CONFIG, 3 |
40 | andi \dest, \dest, MIPS_CONF3_MT | 40 | andi \dest, \dest, MIPS_CONF3_MT |
41 | beqz \dest, \nomt | 41 | beqz \dest, \nomt |
42 | nop | ||
42 | .endm | 43 | .endm |
43 | 44 | ||
44 | .section .text.cps-vec | 45 | .section .text.cps-vec |
@@ -223,10 +224,9 @@ LEAF(excep_ejtag) | |||
223 | END(excep_ejtag) | 224 | END(excep_ejtag) |
224 | 225 | ||
225 | LEAF(mips_cps_core_init) | 226 | LEAF(mips_cps_core_init) |
226 | #ifdef CONFIG_MIPS_MT | 227 | #ifdef CONFIG_MIPS_MT_SMP |
227 | /* Check that the core implements the MT ASE */ | 228 | /* Check that the core implements the MT ASE */ |
228 | has_mt t0, 3f | 229 | has_mt t0, 3f |
229 | nop | ||
230 | 230 | ||
231 | .set push | 231 | .set push |
232 | .set mips64r2 | 232 | .set mips64r2 |
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) | |||
310 | PTR_ADDU t0, t0, t1 | 310 | PTR_ADDU t0, t0, t1 |
311 | 311 | ||
312 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ | 312 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ |
313 | li t9, 0 | ||
314 | #ifdef CONFIG_MIPS_MT_SMP | ||
313 | has_mt ta2, 1f | 315 | has_mt ta2, 1f |
314 | li t9, 0 | ||
315 | 316 | ||
316 | /* Find the number of VPEs present in the core */ | 317 | /* Find the number of VPEs present in the core */ |
317 | mfc0 t1, CP0_MVPCONF0 | 318 | mfc0 t1, CP0_MVPCONF0 |
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) | |||
330 | /* Retrieve the VPE ID from EBase.CPUNum */ | 331 | /* Retrieve the VPE ID from EBase.CPUNum */ |
331 | mfc0 t9, $15, 1 | 332 | mfc0 t9, $15, 1 |
332 | and t9, t9, t1 | 333 | and t9, t9, t1 |
334 | #endif | ||
333 | 335 | ||
334 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ | 336 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ |
335 | li t1, VPEBOOTCFG_SIZE | 337 | li t1, VPEBOOTCFG_SIZE |
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) | |||
337 | PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) | 339 | PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) |
338 | PTR_ADDU v0, v0, ta3 | 340 | PTR_ADDU v0, v0, ta3 |
339 | 341 | ||
340 | #ifdef CONFIG_MIPS_MT | 342 | #ifdef CONFIG_MIPS_MT_SMP |
341 | 343 | ||
342 | /* If the core doesn't support MT then return */ | 344 | /* If the core doesn't support MT then return */ |
343 | bnez ta2, 1f | 345 | bnez ta2, 1f |
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) | |||
451 | 453 | ||
452 | 2: .set pop | 454 | 2: .set pop |
453 | 455 | ||
454 | #endif /* CONFIG_MIPS_MT */ | 456 | #endif /* CONFIG_MIPS_MT_SMP */ |
455 | 457 | ||
456 | /* Return */ | 458 | /* Return */ |
457 | jr ra | 459 | jr ra |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) | |||
410 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) | 410 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) |
411 | { | 411 | { |
412 | unsigned int config0; | 412 | unsigned int config0; |
413 | int isa; | 413 | int isa, mt; |
414 | 414 | ||
415 | config0 = read_c0_config(); | 415 | config0 = read_c0_config(); |
416 | 416 | ||
417 | /* | 417 | /* |
418 | * Look for Standard TLB or Dual VTLB and FTLB | 418 | * Look for Standard TLB or Dual VTLB and FTLB |
419 | */ | 419 | */ |
420 | if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || | 420 | mt = config0 & MIPS_CONF_MT; |
421 | (((config0 & MIPS_CONF_MT) >> 7) == 4)) | 421 | if (mt == MIPS_CONF_MT_TLB) |
422 | c->options |= MIPS_CPU_TLB; | 422 | c->options |= MIPS_CPU_TLB; |
423 | else if (mt == MIPS_CONF_MT_FTLB) | ||
424 | c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; | ||
423 | 425 | ||
424 | isa = (config0 & MIPS_CONF_AT) >> 13; | 426 | isa = (config0 & MIPS_CONF_AT) >> 13; |
425 | switch (isa) { | 427 | switch (isa) { |
@@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) | |||
559 | if (cpu_has_tlb) { | 561 | if (cpu_has_tlb) { |
560 | if (((config4 & MIPS_CONF4_IE) >> 29) == 2) | 562 | if (((config4 & MIPS_CONF4_IE) >> 29) == 2) |
561 | c->options |= MIPS_CPU_TLBINV; | 563 | c->options |= MIPS_CPU_TLBINV; |
564 | |||
562 | /* | 565 | /* |
563 | * This is a bit ugly. R6 has dropped that field from | 566 | * R6 has dropped the MMUExtDef field from config4. |
564 | * config4 and the only valid configuration is VTLB+FTLB so | 567 | * On R6 the fields always describe the FTLB, and only if it is |
565 | * set a good value for mmuextdef for that case. | 568 | * present according to Config.MT. |
566 | */ | 569 | */ |
567 | if (cpu_has_mips_r6) | 570 | if (!cpu_has_mips_r6) |
571 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | ||
572 | else if (cpu_has_ftlb) | ||
568 | mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; | 573 | mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; |
569 | else | 574 | else |
570 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | 575 | mmuextdef = 0; |
571 | 576 | ||
572 | switch (mmuextdef) { | 577 | switch (mmuextdef) { |
573 | case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: | 578 | case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: |
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -18,7 +18,7 @@ | |||
18 | .set pop | 18 | .set pop |
19 | /* | 19 | /* |
20 | * task_struct *resume(task_struct *prev, task_struct *next, | 20 | * task_struct *resume(task_struct *prev, task_struct *next, |
21 | * struct thread_info *next_ti, int usedfpu) | 21 | * struct thread_info *next_ti) |
22 | */ | 22 | */ |
23 | .align 7 | 23 | .align 7 |
24 | LEAF(resume) | 24 | LEAF(resume) |
@@ -28,30 +28,6 @@ | |||
28 | cpu_save_nonscratch a0 | 28 | cpu_save_nonscratch a0 |
29 | LONG_S ra, THREAD_REG31(a0) | 29 | LONG_S ra, THREAD_REG31(a0) |
30 | 30 | ||
31 | /* | ||
32 | * check if we need to save FPU registers | ||
33 | */ | ||
34 | .set push | ||
35 | .set noreorder | ||
36 | beqz a3, 1f | ||
37 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
38 | .set pop | ||
39 | |||
40 | /* | ||
41 | * clear saved user stack CU1 bit | ||
42 | */ | ||
43 | LONG_L t0, ST_OFF(t3) | ||
44 | li t1, ~ST0_CU1 | ||
45 | and t0, t0, t1 | ||
46 | LONG_S t0, ST_OFF(t3) | ||
47 | |||
48 | .set push | ||
49 | .set arch=mips64r2 | ||
50 | fpu_save_double a0 t0 t1 # c0_status passed in t0 | ||
51 | # clobbers t1 | ||
52 | .set pop | ||
53 | 1: | ||
54 | |||
55 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | 31 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 |
56 | /* Check if we need to store CVMSEG state */ | 32 | /* Check if we need to store CVMSEG state */ |
57 | dmfc0 t0, $11,7 /* CvmMemCtl */ | 33 | dmfc0 t0, $11,7 /* CvmMemCtl */ |
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S | |||
@@ -31,18 +31,8 @@ | |||
31 | #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) | 31 | #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * FPU context is saved iff the process has used it's FPU in the current | ||
35 | * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user | ||
36 | * space STATUS register should be 0, so that a process *always* starts its | ||
37 | * userland with FPU disabled after each context switch. | ||
38 | * | ||
39 | * FPU will be enabled as soon as the process accesses FPU again, through | ||
40 | * do_cpu() trap. | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | * task_struct *resume(task_struct *prev, task_struct *next, | 34 | * task_struct *resume(task_struct *prev, task_struct *next, |
45 | * struct thread_info *next_ti, int usedfpu) | 35 | * struct thread_info *next_ti) |
46 | */ | 36 | */ |
47 | LEAF(resume) | 37 | LEAF(resume) |
48 | mfc0 t1, CP0_STATUS | 38 | mfc0 t1, CP0_STATUS |
@@ -50,22 +40,6 @@ LEAF(resume) | |||
50 | cpu_save_nonscratch a0 | 40 | cpu_save_nonscratch a0 |
51 | sw ra, THREAD_REG31(a0) | 41 | sw ra, THREAD_REG31(a0) |
52 | 42 | ||
53 | beqz a3, 1f | ||
54 | |||
55 | PTR_L t3, TASK_THREAD_INFO(a0) | ||
56 | |||
57 | /* | ||
58 | * clear saved user stack CU1 bit | ||
59 | */ | ||
60 | lw t0, ST_OFF(t3) | ||
61 | li t1, ~ST0_CU1 | ||
62 | and t0, t0, t1 | ||
63 | sw t0, ST_OFF(t3) | ||
64 | |||
65 | fpu_save_single a0, t0 # clobbers t0 | ||
66 | |||
67 | 1: | ||
68 | |||
69 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 43 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
70 | PTR_LA t8, __stack_chk_guard | 44 | PTR_LA t8, __stack_chk_guard |
71 | LONG_L t9, TASK_STACK_CANARY(a1) | 45 | LONG_L t9, TASK_STACK_CANARY(a1) |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) | |||
36 | lw t1, PT_EPC(sp) # skip syscall on return | 36 | lw t1, PT_EPC(sp) # skip syscall on return |
37 | 37 | ||
38 | subu v0, v0, __NR_O32_Linux # check syscall number | 38 | subu v0, v0, __NR_O32_Linux # check syscall number |
39 | sltiu t0, v0, __NR_O32_Linux_syscalls + 1 | ||
40 | addiu t1, 4 # skip to next instruction | 39 | addiu t1, 4 # skip to next instruction |
41 | sw t1, PT_EPC(sp) | 40 | sw t1, PT_EPC(sp) |
42 | beqz t0, illegal_syscall | ||
43 | |||
44 | sll t0, v0, 2 | ||
45 | la t1, sys_call_table | ||
46 | addu t1, t0 | ||
47 | lw t2, (t1) # syscall routine | ||
48 | beqz t2, illegal_syscall | ||
49 | 41 | ||
50 | sw a3, PT_R26(sp) # save a3 for syscall restarting | 42 | sw a3, PT_R26(sp) # save a3 for syscall restarting |
51 | 43 | ||
@@ -96,6 +88,16 @@ loads_done: | |||
96 | li t1, _TIF_WORK_SYSCALL_ENTRY | 88 | li t1, _TIF_WORK_SYSCALL_ENTRY |
97 | and t0, t1 | 89 | and t0, t1 |
98 | bnez t0, syscall_trace_entry # -> yes | 90 | bnez t0, syscall_trace_entry # -> yes |
91 | syscall_common: | ||
92 | sltiu t0, v0, __NR_O32_Linux_syscalls + 1 | ||
93 | beqz t0, illegal_syscall | ||
94 | |||
95 | sll t0, v0, 2 | ||
96 | la t1, sys_call_table | ||
97 | addu t1, t0 | ||
98 | lw t2, (t1) # syscall routine | ||
99 | |||
100 | beqz t2, illegal_syscall | ||
99 | 101 | ||
100 | jalr t2 # Do The Real Thing (TM) | 102 | jalr t2 # Do The Real Thing (TM) |
101 | 103 | ||
@@ -116,7 +118,7 @@ o32_syscall_exit: | |||
116 | 118 | ||
117 | syscall_trace_entry: | 119 | syscall_trace_entry: |
118 | SAVE_STATIC | 120 | SAVE_STATIC |
119 | move s0, t2 | 121 | move s0, v0 |
120 | move a0, sp | 122 | move a0, sp |
121 | 123 | ||
122 | /* | 124 | /* |
@@ -129,27 +131,18 @@ syscall_trace_entry: | |||
129 | 131 | ||
130 | 1: jal syscall_trace_enter | 132 | 1: jal syscall_trace_enter |
131 | 133 | ||
132 | bltz v0, 2f # seccomp failed? Skip syscall | 134 | bltz v0, 1f # seccomp failed? Skip syscall |
135 | |||
136 | move v0, s0 # restore syscall | ||
133 | 137 | ||
134 | move t0, s0 | ||
135 | RESTORE_STATIC | 138 | RESTORE_STATIC |
136 | lw a0, PT_R4(sp) # Restore argument registers | 139 | lw a0, PT_R4(sp) # Restore argument registers |
137 | lw a1, PT_R5(sp) | 140 | lw a1, PT_R5(sp) |
138 | lw a2, PT_R6(sp) | 141 | lw a2, PT_R6(sp) |
139 | lw a3, PT_R7(sp) | 142 | lw a3, PT_R7(sp) |
140 | jalr t0 | 143 | j syscall_common |
141 | |||
142 | li t0, -EMAXERRNO - 1 # error? | ||
143 | sltu t0, t0, v0 | ||
144 | sw t0, PT_R7(sp) # set error flag | ||
145 | beqz t0, 1f | ||
146 | |||
147 | lw t1, PT_R2(sp) # syscall number | ||
148 | negu v0 # error | ||
149 | sw t1, PT_R0(sp) # save it for syscall restarting | ||
150 | 1: sw v0, PT_R2(sp) # result | ||
151 | 144 | ||
152 | 2: j syscall_exit | 145 | 1: j syscall_exit |
153 | 146 | ||
154 | /* ------------------------------------------------------------------------ */ | 147 | /* ------------------------------------------------------------------------ */ |
155 | 148 | ||
@@ -599,3 +592,5 @@ EXPORT(sys_call_table) | |||
599 | PTR sys_memfd_create | 592 | PTR sys_memfd_create |
600 | PTR sys_bpf /* 4355 */ | 593 | PTR sys_bpf /* 4355 */ |
601 | PTR sys_execveat | 594 | PTR sys_execveat |
595 | PTR sys_userfaultfd | ||
596 | PTR sys_membarrier | ||
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
39 | .set at | 39 | .set at |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | dsubu t0, v0, __NR_64_Linux # check syscall number | ||
43 | sltiu t0, t0, __NR_64_Linux_syscalls + 1 | ||
44 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) | 42 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) |
45 | ld t1, PT_EPC(sp) # skip syscall on return | 43 | ld t1, PT_EPC(sp) # skip syscall on return |
46 | daddiu t1, 4 # skip to next instruction | 44 | daddiu t1, 4 # skip to next instruction |
47 | sd t1, PT_EPC(sp) | 45 | sd t1, PT_EPC(sp) |
48 | #endif | 46 | #endif |
49 | beqz t0, illegal_syscall | ||
50 | |||
51 | dsll t0, v0, 3 # offset into table | ||
52 | ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) | ||
53 | # syscall routine | ||
54 | 47 | ||
55 | sd a3, PT_R26(sp) # save a3 for syscall restarting | 48 | sd a3, PT_R26(sp) # save a3 for syscall restarting |
56 | 49 | ||
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) | |||
59 | and t0, t1, t0 | 52 | and t0, t1, t0 |
60 | bnez t0, syscall_trace_entry | 53 | bnez t0, syscall_trace_entry |
61 | 54 | ||
55 | syscall_common: | ||
56 | dsubu t2, v0, __NR_64_Linux | ||
57 | sltiu t0, t2, __NR_64_Linux_syscalls + 1 | ||
58 | beqz t0, illegal_syscall | ||
59 | |||
60 | dsll t0, t2, 3 # offset into table | ||
61 | dla t2, sys_call_table | ||
62 | daddu t0, t2, t0 | ||
63 | ld t2, (t0) # syscall routine | ||
64 | beqz t2, illegal_syscall | ||
65 | |||
62 | jalr t2 # Do The Real Thing (TM) | 66 | jalr t2 # Do The Real Thing (TM) |
63 | 67 | ||
64 | li t0, -EMAXERRNO - 1 # error? | 68 | li t0, -EMAXERRNO - 1 # error? |
@@ -78,14 +82,14 @@ n64_syscall_exit: | |||
78 | 82 | ||
79 | syscall_trace_entry: | 83 | syscall_trace_entry: |
80 | SAVE_STATIC | 84 | SAVE_STATIC |
81 | move s0, t2 | 85 | move s0, v0 |
82 | move a0, sp | 86 | move a0, sp |
83 | move a1, v0 | 87 | move a1, v0 |
84 | jal syscall_trace_enter | 88 | jal syscall_trace_enter |
85 | 89 | ||
86 | bltz v0, 2f # seccomp failed? Skip syscall | 90 | bltz v0, 1f # seccomp failed? Skip syscall |
87 | 91 | ||
88 | move t0, s0 | 92 | move v0, s0 |
89 | RESTORE_STATIC | 93 | RESTORE_STATIC |
90 | ld a0, PT_R4(sp) # Restore argument registers | 94 | ld a0, PT_R4(sp) # Restore argument registers |
91 | ld a1, PT_R5(sp) | 95 | ld a1, PT_R5(sp) |
@@ -93,19 +97,9 @@ syscall_trace_entry: | |||
93 | ld a3, PT_R7(sp) | 97 | ld a3, PT_R7(sp) |
94 | ld a4, PT_R8(sp) | 98 | ld a4, PT_R8(sp) |
95 | ld a5, PT_R9(sp) | 99 | ld a5, PT_R9(sp) |
96 | jalr t0 | 100 | j syscall_common |
97 | |||
98 | li t0, -EMAXERRNO - 1 # error? | ||
99 | sltu t0, t0, v0 | ||
100 | sd t0, PT_R7(sp) # set error flag | ||
101 | beqz t0, 1f | ||
102 | |||
103 | ld t1, PT_R2(sp) # syscall number | ||
104 | dnegu v0 # error | ||
105 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
106 | 1: sd v0, PT_R2(sp) # result | ||
107 | 101 | ||
108 | 2: j syscall_exit | 102 | 1: j syscall_exit |
109 | 103 | ||
110 | illegal_syscall: | 104 | illegal_syscall: |
111 | /* This also isn't a 64-bit syscall, throw an error. */ | 105 | /* This also isn't a 64-bit syscall, throw an error. */ |
@@ -436,4 +430,6 @@ EXPORT(sys_call_table) | |||
436 | PTR sys_memfd_create | 430 | PTR sys_memfd_create |
437 | PTR sys_bpf /* 5315 */ | 431 | PTR sys_bpf /* 5315 */ |
438 | PTR sys_execveat | 432 | PTR sys_execveat |
433 | PTR sys_userfaultfd | ||
434 | PTR sys_membarrier | ||
439 | .size sys_call_table,.-sys_call_table | 435 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) | |||
52 | and t0, t1, t0 | 52 | and t0, t1, t0 |
53 | bnez t0, n32_syscall_trace_entry | 53 | bnez t0, n32_syscall_trace_entry |
54 | 54 | ||
55 | syscall_common: | ||
55 | jalr t2 # Do The Real Thing (TM) | 56 | jalr t2 # Do The Real Thing (TM) |
56 | 57 | ||
57 | li t0, -EMAXERRNO - 1 # error? | 58 | li t0, -EMAXERRNO - 1 # error? |
@@ -75,9 +76,9 @@ n32_syscall_trace_entry: | |||
75 | move a1, v0 | 76 | move a1, v0 |
76 | jal syscall_trace_enter | 77 | jal syscall_trace_enter |
77 | 78 | ||
78 | bltz v0, 2f # seccomp failed? Skip syscall | 79 | bltz v0, 1f # seccomp failed? Skip syscall |
79 | 80 | ||
80 | move t0, s0 | 81 | move t2, s0 |
81 | RESTORE_STATIC | 82 | RESTORE_STATIC |
82 | ld a0, PT_R4(sp) # Restore argument registers | 83 | ld a0, PT_R4(sp) # Restore argument registers |
83 | ld a1, PT_R5(sp) | 84 | ld a1, PT_R5(sp) |
@@ -85,19 +86,9 @@ n32_syscall_trace_entry: | |||
85 | ld a3, PT_R7(sp) | 86 | ld a3, PT_R7(sp) |
86 | ld a4, PT_R8(sp) | 87 | ld a4, PT_R8(sp) |
87 | ld a5, PT_R9(sp) | 88 | ld a5, PT_R9(sp) |
88 | jalr t0 | 89 | j syscall_common |
89 | 90 | ||
90 | li t0, -EMAXERRNO - 1 # error? | 91 | 1: j syscall_exit |
91 | sltu t0, t0, v0 | ||
92 | sd t0, PT_R7(sp) # set error flag | ||
93 | beqz t0, 1f | ||
94 | |||
95 | ld t1, PT_R2(sp) # syscall number | ||
96 | dnegu v0 # error | ||
97 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
98 | 1: sd v0, PT_R2(sp) # result | ||
99 | |||
100 | 2: j syscall_exit | ||
101 | 92 | ||
102 | not_n32_scall: | 93 | not_n32_scall: |
103 | /* This is not an n32 compatibility syscall, pass it on to | 94 | /* This is not an n32 compatibility syscall, pass it on to |
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) | |||
429 | PTR sys_memfd_create | 420 | PTR sys_memfd_create |
430 | PTR sys_bpf | 421 | PTR sys_bpf |
431 | PTR compat_sys_execveat /* 6320 */ | 422 | PTR compat_sys_execveat /* 6320 */ |
423 | PTR sys_userfaultfd | ||
424 | PTR sys_membarrier | ||
432 | .size sysn32_call_table,.-sysn32_call_table | 425 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -87,6 +87,7 @@ loads_done: | |||
87 | and t0, t1, t0 | 87 | and t0, t1, t0 |
88 | bnez t0, trace_a_syscall | 88 | bnez t0, trace_a_syscall |
89 | 89 | ||
90 | syscall_common: | ||
90 | jalr t2 # Do The Real Thing (TM) | 91 | jalr t2 # Do The Real Thing (TM) |
91 | 92 | ||
92 | li t0, -EMAXERRNO - 1 # error? | 93 | li t0, -EMAXERRNO - 1 # error? |
@@ -130,9 +131,9 @@ trace_a_syscall: | |||
130 | 131 | ||
131 | 1: jal syscall_trace_enter | 132 | 1: jal syscall_trace_enter |
132 | 133 | ||
133 | bltz v0, 2f # seccomp failed? Skip syscall | 134 | bltz v0, 1f # seccomp failed? Skip syscall |
134 | 135 | ||
135 | move t0, s0 | 136 | move t2, s0 |
136 | RESTORE_STATIC | 137 | RESTORE_STATIC |
137 | ld a0, PT_R4(sp) # Restore argument registers | 138 | ld a0, PT_R4(sp) # Restore argument registers |
138 | ld a1, PT_R5(sp) | 139 | ld a1, PT_R5(sp) |
@@ -142,19 +143,9 @@ trace_a_syscall: | |||
142 | ld a5, PT_R9(sp) | 143 | ld a5, PT_R9(sp) |
143 | ld a6, PT_R10(sp) | 144 | ld a6, PT_R10(sp) |
144 | ld a7, PT_R11(sp) # For indirect syscalls | 145 | ld a7, PT_R11(sp) # For indirect syscalls |
145 | jalr t0 | 146 | j syscall_common |
146 | 147 | ||
147 | li t0, -EMAXERRNO - 1 # error? | 148 | 1: j syscall_exit |
148 | sltu t0, t0, v0 | ||
149 | sd t0, PT_R7(sp) # set error flag | ||
150 | beqz t0, 1f | ||
151 | |||
152 | ld t1, PT_R2(sp) # syscall number | ||
153 | dnegu v0 # error | ||
154 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
155 | 1: sd v0, PT_R2(sp) # result | ||
156 | |||
157 | 2: j syscall_exit | ||
158 | 149 | ||
159 | /* ------------------------------------------------------------------------ */ | 150 | /* ------------------------------------------------------------------------ */ |
160 | 151 | ||
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table) | |||
584 | PTR sys_memfd_create | 575 | PTR sys_memfd_create |
585 | PTR sys_bpf /* 4355 */ | 576 | PTR sys_bpf /* 4355 */ |
586 | PTR compat_sys_execveat | 577 | PTR compat_sys_execveat |
578 | PTR sys_userfaultfd | ||
579 | PTR sys_membarrier | ||
587 | .size sys32_call_table,.-sys32_call_table | 580 | .size sys32_call_table,.-sys32_call_table |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -338,7 +338,7 @@ static void __init bootmem_init(void) | |||
338 | if (end <= reserved_end) | 338 | if (end <= reserved_end) |
339 | continue; | 339 | continue; |
340 | #ifdef CONFIG_BLK_DEV_INITRD | 340 | #ifdef CONFIG_BLK_DEV_INITRD |
341 | /* mapstart should be after initrd_end */ | 341 | /* Skip zones before initrd and initrd itself */ |
342 | if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) | 342 | if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) |
343 | continue; | 343 | continue; |
344 | #endif | 344 | #endif |
@@ -371,6 +371,14 @@ static void __init bootmem_init(void) | |||
371 | max_low_pfn = PFN_DOWN(HIGHMEM_START); | 371 | max_low_pfn = PFN_DOWN(HIGHMEM_START); |
372 | } | 372 | } |
373 | 373 | ||
374 | #ifdef CONFIG_BLK_DEV_INITRD | ||
375 | /* | ||
376 | * mapstart should be after initrd_end | ||
377 | */ | ||
378 | if (initrd_end) | ||
379 | mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); | ||
380 | #endif | ||
381 | |||
374 | /* | 382 | /* |
375 | * Initialize the boot-time allocator with low memory only. | 383 | * Initialize the boot-time allocator with low memory only. |
376 | */ | 384 | */ |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
43 | #include <asm/time.h> | 43 | #include <asm/time.h> |
44 | #include <asm/setup.h> | 44 | #include <asm/setup.h> |
45 | #include <asm/maar.h> | ||
45 | 46 | ||
46 | cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 47 | cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
47 | 48 | ||
@@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) | |||
157 | mips_clockevent_init(); | 158 | mips_clockevent_init(); |
158 | mp_ops->init_secondary(); | 159 | mp_ops->init_secondary(); |
159 | cpu_report(); | 160 | cpu_report(); |
161 | maar_init(); | ||
160 | 162 | ||
161 | /* | 163 | /* |
162 | * XXX parity protection should be folded in here when it's converted | 164 | * XXX parity protection should be folded in here when it's converted |
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c | |||
@@ -64,6 +64,9 @@ void __init prom_init_env(void) | |||
64 | } | 64 | } |
65 | if (memsize == 0) | 65 | if (memsize == 0) |
66 | memsize = 256; | 66 | memsize = 256; |
67 | |||
68 | loongson_sysconf.nr_uarts = 1; | ||
69 | |||
67 | pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); | 70 | pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); |
68 | #else | 71 | #else |
69 | struct boot_params *boot_p; | 72 | struct boot_params *boot_p; |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) | |||
100 | else | 100 | else |
101 | #endif | 101 | #endif |
102 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | 102 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) |
103 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | 103 | if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) |
104 | dma_flag = __GFP_DMA; | 104 | dma_flag = __GFP_DMA; |
105 | else | 105 | else |
106 | #endif | 106 | #endif |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/pgalloc.h> | 44 | #include <asm/pgalloc.h> |
45 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
46 | #include <asm/fixmap.h> | 46 | #include <asm/fixmap.h> |
47 | #include <asm/maar.h> | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * We have up to 8 empty zeroed pages so we can map one of the right colour | 50 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
@@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, | |||
252 | #endif | 253 | #endif |
253 | } | 254 | } |
254 | 255 | ||
256 | unsigned __weak platform_maar_init(unsigned num_pairs) | ||
257 | { | ||
258 | struct maar_config cfg[BOOT_MEM_MAP_MAX]; | ||
259 | unsigned i, num_configured, num_cfg = 0; | ||
260 | phys_addr_t skip; | ||
261 | |||
262 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
263 | switch (boot_mem_map.map[i].type) { | ||
264 | case BOOT_MEM_RAM: | ||
265 | case BOOT_MEM_INIT_RAM: | ||
266 | break; | ||
267 | default: | ||
268 | continue; | ||
269 | } | ||
270 | |||
271 | skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); | ||
272 | |||
273 | cfg[num_cfg].lower = boot_mem_map.map[i].addr; | ||
274 | cfg[num_cfg].lower += skip; | ||
275 | |||
276 | cfg[num_cfg].upper = cfg[num_cfg].lower; | ||
277 | cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; | ||
278 | cfg[num_cfg].upper -= skip; | ||
279 | |||
280 | cfg[num_cfg].attrs = MIPS_MAAR_S; | ||
281 | num_cfg++; | ||
282 | } | ||
283 | |||
284 | num_configured = maar_config(cfg, num_cfg, num_pairs); | ||
285 | if (num_configured < num_cfg) | ||
286 | pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", | ||
287 | num_pairs, num_cfg); | ||
288 | |||
289 | return num_configured; | ||
290 | } | ||
291 | |||
292 | void maar_init(void) | ||
293 | { | ||
294 | unsigned num_maars, used, i; | ||
295 | phys_addr_t lower, upper, attr; | ||
296 | static struct { | ||
297 | struct maar_config cfgs[3]; | ||
298 | unsigned used; | ||
299 | } recorded = { { { 0 } }, 0 }; | ||
300 | |||
301 | if (!cpu_has_maar) | ||
302 | return; | ||
303 | |||
304 | /* Detect the number of MAARs */ | ||
305 | write_c0_maari(~0); | ||
306 | back_to_back_c0_hazard(); | ||
307 | num_maars = read_c0_maari() + 1; | ||
308 | |||
309 | /* MAARs should be in pairs */ | ||
310 | WARN_ON(num_maars % 2); | ||
311 | |||
312 | /* Set MAARs using values we recorded already */ | ||
313 | if (recorded.used) { | ||
314 | used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); | ||
315 | BUG_ON(used != recorded.used); | ||
316 | } else { | ||
317 | /* Configure the required MAARs */ | ||
318 | used = platform_maar_init(num_maars / 2); | ||
319 | } | ||
320 | |||
321 | /* Disable any further MAARs */ | ||
322 | for (i = (used * 2); i < num_maars; i++) { | ||
323 | write_c0_maari(i); | ||
324 | back_to_back_c0_hazard(); | ||
325 | write_c0_maar(0); | ||
326 | back_to_back_c0_hazard(); | ||
327 | } | ||
328 | |||
329 | if (recorded.used) | ||
330 | return; | ||
331 | |||
332 | pr_info("MAAR configuration:\n"); | ||
333 | for (i = 0; i < num_maars; i += 2) { | ||
334 | write_c0_maari(i); | ||
335 | back_to_back_c0_hazard(); | ||
336 | upper = read_c0_maar(); | ||
337 | |||
338 | write_c0_maari(i + 1); | ||
339 | back_to_back_c0_hazard(); | ||
340 | lower = read_c0_maar(); | ||
341 | |||
342 | attr = lower & upper; | ||
343 | lower = (lower & MIPS_MAAR_ADDR) << 4; | ||
344 | upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; | ||
345 | |||
346 | pr_info(" [%d]: ", i / 2); | ||
347 | if (!(attr & MIPS_MAAR_V)) { | ||
348 | pr_cont("disabled\n"); | ||
349 | continue; | ||
350 | } | ||
351 | |||
352 | pr_cont("%pa-%pa", &lower, &upper); | ||
353 | |||
354 | if (attr & MIPS_MAAR_S) | ||
355 | pr_cont(" speculate"); | ||
356 | |||
357 | pr_cont("\n"); | ||
358 | |||
359 | /* Record the setup for use on secondary CPUs */ | ||
360 | if (used <= ARRAY_SIZE(recorded.cfgs)) { | ||
361 | recorded.cfgs[recorded.used].lower = lower; | ||
362 | recorded.cfgs[recorded.used].upper = upper; | ||
363 | recorded.cfgs[recorded.used].attrs = attr; | ||
364 | recorded.used++; | ||
365 | } | ||
366 | } | ||
367 | } | ||
368 | |||
255 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 369 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
256 | int page_is_ram(unsigned long pagenr) | 370 | int page_is_ram(unsigned long pagenr) |
257 | { | 371 | { |
@@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) | |||
334 | #endif | 448 | #endif |
335 | } | 449 | } |
336 | 450 | ||
337 | unsigned __weak platform_maar_init(unsigned num_pairs) | ||
338 | { | ||
339 | struct maar_config cfg[BOOT_MEM_MAP_MAX]; | ||
340 | unsigned i, num_configured, num_cfg = 0; | ||
341 | phys_addr_t skip; | ||
342 | |||
343 | for (i = 0; i < boot_mem_map.nr_map; i++) { | ||
344 | switch (boot_mem_map.map[i].type) { | ||
345 | case BOOT_MEM_RAM: | ||
346 | case BOOT_MEM_INIT_RAM: | ||
347 | break; | ||
348 | default: | ||
349 | continue; | ||
350 | } | ||
351 | |||
352 | skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); | ||
353 | |||
354 | cfg[num_cfg].lower = boot_mem_map.map[i].addr; | ||
355 | cfg[num_cfg].lower += skip; | ||
356 | |||
357 | cfg[num_cfg].upper = cfg[num_cfg].lower; | ||
358 | cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; | ||
359 | cfg[num_cfg].upper -= skip; | ||
360 | |||
361 | cfg[num_cfg].attrs = MIPS_MAAR_S; | ||
362 | num_cfg++; | ||
363 | } | ||
364 | |||
365 | num_configured = maar_config(cfg, num_cfg, num_pairs); | ||
366 | if (num_configured < num_cfg) | ||
367 | pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", | ||
368 | num_pairs, num_cfg); | ||
369 | |||
370 | return num_configured; | ||
371 | } | ||
372 | |||
373 | static void maar_init(void) | ||
374 | { | ||
375 | unsigned num_maars, used, i; | ||
376 | |||
377 | if (!cpu_has_maar) | ||
378 | return; | ||
379 | |||
380 | /* Detect the number of MAARs */ | ||
381 | write_c0_maari(~0); | ||
382 | back_to_back_c0_hazard(); | ||
383 | num_maars = read_c0_maari() + 1; | ||
384 | |||
385 | /* MAARs should be in pairs */ | ||
386 | WARN_ON(num_maars % 2); | ||
387 | |||
388 | /* Configure the required MAARs */ | ||
389 | used = platform_maar_init(num_maars / 2); | ||
390 | |||
391 | /* Disable any further MAARs */ | ||
392 | for (i = (used * 2); i < num_maars; i++) { | ||
393 | write_c0_maari(i); | ||
394 | back_to_back_c0_hazard(); | ||
395 | write_c0_maar(0); | ||
396 | back_to_back_c0_hazard(); | ||
397 | } | ||
398 | } | ||
399 | |||
400 | void __init mem_init(void) | 451 | void __init mem_init(void) |
401 | { | 452 | { |
402 | #ifdef CONFIG_HIGHMEM | 453 | #ifdef CONFIG_HIGHMEM |
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S | |||
@@ -57,15 +57,28 @@ | |||
57 | 57 | ||
58 | LEAF(sk_load_word) | 58 | LEAF(sk_load_word) |
59 | is_offset_negative(word) | 59 | is_offset_negative(word) |
60 | .globl sk_load_word_positive | 60 | FEXPORT(sk_load_word_positive) |
61 | sk_load_word_positive: | ||
62 | is_offset_in_header(4, word) | 61 | is_offset_in_header(4, word) |
63 | /* Offset within header boundaries */ | 62 | /* Offset within header boundaries */ |
64 | PTR_ADDU t1, $r_skb_data, offset | 63 | PTR_ADDU t1, $r_skb_data, offset |
64 | .set reorder | ||
65 | lw $r_A, 0(t1) | 65 | lw $r_A, 0(t1) |
66 | .set noreorder | ||
66 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 67 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
68 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
67 | wsbh t0, $r_A | 69 | wsbh t0, $r_A |
68 | rotr $r_A, t0, 16 | 70 | rotr $r_A, t0, 16 |
71 | # else | ||
72 | sll t0, $r_A, 24 | ||
73 | srl t1, $r_A, 24 | ||
74 | srl t2, $r_A, 8 | ||
75 | or t0, t0, t1 | ||
76 | andi t2, t2, 0xff00 | ||
77 | andi t1, $r_A, 0xff00 | ||
78 | or t0, t0, t2 | ||
79 | sll t1, t1, 8 | ||
80 | or $r_A, t0, t1 | ||
81 | # endif | ||
69 | #endif | 82 | #endif |
70 | jr $r_ra | 83 | jr $r_ra |
71 | move $r_ret, zero | 84 | move $r_ret, zero |
@@ -73,15 +86,24 @@ sk_load_word_positive: | |||
73 | 86 | ||
74 | LEAF(sk_load_half) | 87 | LEAF(sk_load_half) |
75 | is_offset_negative(half) | 88 | is_offset_negative(half) |
76 | .globl sk_load_half_positive | 89 | FEXPORT(sk_load_half_positive) |
77 | sk_load_half_positive: | ||
78 | is_offset_in_header(2, half) | 90 | is_offset_in_header(2, half) |
79 | /* Offset within header boundaries */ | 91 | /* Offset within header boundaries */ |
80 | PTR_ADDU t1, $r_skb_data, offset | 92 | PTR_ADDU t1, $r_skb_data, offset |
93 | .set reorder | ||
81 | lh $r_A, 0(t1) | 94 | lh $r_A, 0(t1) |
95 | .set noreorder | ||
82 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 96 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
97 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
83 | wsbh t0, $r_A | 98 | wsbh t0, $r_A |
84 | seh $r_A, t0 | 99 | seh $r_A, t0 |
100 | # else | ||
101 | sll t0, $r_A, 24 | ||
102 | andi t1, $r_A, 0xff00 | ||
103 | sra t0, t0, 16 | ||
104 | srl t1, t1, 8 | ||
105 | or $r_A, t0, t1 | ||
106 | # endif | ||
85 | #endif | 107 | #endif |
86 | jr $r_ra | 108 | jr $r_ra |
87 | move $r_ret, zero | 109 | move $r_ret, zero |
@@ -89,8 +111,7 @@ sk_load_half_positive: | |||
89 | 111 | ||
90 | LEAF(sk_load_byte) | 112 | LEAF(sk_load_byte) |
91 | is_offset_negative(byte) | 113 | is_offset_negative(byte) |
92 | .globl sk_load_byte_positive | 114 | FEXPORT(sk_load_byte_positive) |
93 | sk_load_byte_positive: | ||
94 | is_offset_in_header(1, byte) | 115 | is_offset_in_header(1, byte) |
95 | /* Offset within header boundaries */ | 116 | /* Offset within header boundaries */ |
96 | PTR_ADDU t1, $r_skb_data, offset | 117 | PTR_ADDU t1, $r_skb_data, offset |
@@ -148,23 +169,47 @@ sk_load_byte_positive: | |||
148 | NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) | 169 | NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) |
149 | bpf_slow_path_common(4) | 170 | bpf_slow_path_common(4) |
150 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 171 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
172 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
151 | wsbh t0, $r_s0 | 173 | wsbh t0, $r_s0 |
152 | jr $r_ra | 174 | jr $r_ra |
153 | rotr $r_A, t0, 16 | 175 | rotr $r_A, t0, 16 |
154 | #endif | 176 | # else |
177 | sll t0, $r_s0, 24 | ||
178 | srl t1, $r_s0, 24 | ||
179 | srl t2, $r_s0, 8 | ||
180 | or t0, t0, t1 | ||
181 | andi t2, t2, 0xff00 | ||
182 | andi t1, $r_s0, 0xff00 | ||
183 | or t0, t0, t2 | ||
184 | sll t1, t1, 8 | ||
185 | jr $r_ra | ||
186 | or $r_A, t0, t1 | ||
187 | # endif | ||
188 | #else | ||
155 | jr $r_ra | 189 | jr $r_ra |
156 | move $r_A, $r_s0 | 190 | move $r_A, $r_s0 |
191 | #endif | ||
157 | 192 | ||
158 | END(bpf_slow_path_word) | 193 | END(bpf_slow_path_word) |
159 | 194 | ||
160 | NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) | 195 | NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) |
161 | bpf_slow_path_common(2) | 196 | bpf_slow_path_common(2) |
162 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | 197 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
198 | # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) | ||
163 | jr $r_ra | 199 | jr $r_ra |
164 | wsbh $r_A, $r_s0 | 200 | wsbh $r_A, $r_s0 |
165 | #endif | 201 | # else |
202 | sll t0, $r_s0, 8 | ||
203 | andi t1, $r_s0, 0xff00 | ||
204 | andi t0, t0, 0xff00 | ||
205 | srl t1, t1, 8 | ||
206 | jr $r_ra | ||
207 | or $r_A, t0, t1 | ||
208 | # endif | ||
209 | #else | ||
166 | jr $r_ra | 210 | jr $r_ra |
167 | move $r_A, $r_s0 | 211 | move $r_A, $r_s0 |
212 | #endif | ||
168 | 213 | ||
169 | END(bpf_slow_path_half) | 214 | END(bpf_slow_path_half) |
170 | 215 | ||
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild | |||
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h | |||
9 | generic-y += preempt.h | 9 | generic-y += preempt.h |
10 | generic-y += sections.h | 10 | generic-y += sections.h |
11 | generic-y += trace_clock.h | 11 | generic-y += trace_clock.h |
12 | generic-y += word-at-a-time.h | ||
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild | |||
@@ -61,4 +61,5 @@ generic-y += types.h | |||
61 | generic-y += unaligned.h | 61 | generic-y += unaligned.h |
62 | generic-y += user.h | 62 | generic-y += user.h |
63 | generic-y += vga.h | 63 | generic-y += vga.h |
64 | generic-y += word-at-a-time.h | ||
64 | generic-y += xor.h | 65 | generic-y += xor.h |
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index ab9f4e0ed4cf..ac1662956e0c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild | |||
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h | |||
7 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += rwsem.h | 8 | generic-y += rwsem.h |
9 | generic-y += vtime.h | 9 | generic-y += vtime.h |
10 | generic-y += word-at-a-time.h | ||
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild | |||
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h | |||
6 | generic-y += mm-arch-hooks.h | 6 | generic-y += mm-arch-hooks.h |
7 | generic-y += preempt.h | 7 | generic-y += preempt.h |
8 | generic-y += trace_clock.h | 8 | generic-y += trace_clock.h |
9 | generic-y += word-at-a-time.h | ||
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild | |||
@@ -13,3 +13,4 @@ generic-y += sections.h | |||
13 | generic-y += trace_clock.h | 13 | generic-y += trace_clock.h |
14 | generic-y += xor.h | 14 | generic-y += xor.h |
15 | generic-y += serial.h | 15 | generic-y += serial.h |
16 | generic-y += word-at-a-time.h | ||
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/string.h> | ||
22 | 23 | ||
23 | #include <gxio/iorpc_globals.h> | 24 | #include <gxio/iorpc_globals.h> |
24 | #include <gxio/iorpc_mpipe.h> | 25 | #include <gxio/iorpc_mpipe.h> |
@@ -29,32 +30,6 @@ | |||
29 | /* HACK: Avoid pointless "shadow" warnings. */ | 30 | /* HACK: Avoid pointless "shadow" warnings. */ |
30 | #define link link_shadow | 31 | #define link link_shadow |
31 | 32 | ||
32 | /** | ||
33 | * strscpy - Copy a C-string into a sized buffer, but only if it fits | ||
34 | * @dest: Where to copy the string to | ||
35 | * @src: Where to copy the string from | ||
36 | * @size: size of destination buffer | ||
37 | * | ||
38 | * Use this routine to avoid copying too-long strings. | ||
39 | * The routine returns the total number of bytes copied | ||
40 | * (including the trailing NUL) or zero if the buffer wasn't | ||
41 | * big enough. To ensure that programmers pay attention | ||
42 | * to the return code, the destination has a single NUL | ||
43 | * written at the front (if size is non-zero) when the | ||
44 | * buffer is not big enough. | ||
45 | */ | ||
46 | static size_t strscpy(char *dest, const char *src, size_t size) | ||
47 | { | ||
48 | size_t len = strnlen(src, size) + 1; | ||
49 | if (len > size) { | ||
50 | if (size) | ||
51 | dest[0] = '\0'; | ||
52 | return 0; | ||
53 | } | ||
54 | memcpy(dest, src, len); | ||
55 | return len; | ||
56 | } | ||
57 | |||
58 | int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) | 33 | int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) |
59 | { | 34 | { |
60 | char file[32]; | 35 | char file[32]; |
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) | |||
540 | if (!context) | 515 | if (!context) |
541 | return GXIO_ERR_NO_DEVICE; | 516 | return GXIO_ERR_NO_DEVICE; |
542 | 517 | ||
543 | if (strscpy(name.name, link_name, sizeof(name.name)) == 0) | 518 | if (strscpy(name.name, link_name, sizeof(name.name)) < 0) |
544 | return GXIO_ERR_NO_DEVICE; | 519 | return GXIO_ERR_NO_DEVICE; |
545 | 520 | ||
546 | return gxio_mpipe_info_instance_aux(context, name); | 521 | return gxio_mpipe_info_instance_aux(context, name); |
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) | |||
559 | 534 | ||
560 | rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); | 535 | rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); |
561 | if (rv >= 0) { | 536 | if (rv >= 0) { |
562 | if (strscpy(link_name, name.name, sizeof(name.name)) == 0) | 537 | if (strscpy(link_name, name.name, sizeof(name.name)) < 0) |
563 | return GXIO_ERR_INVAL_MEMORY_SIZE; | 538 | return GXIO_ERR_INVAL_MEMORY_SIZE; |
564 | memcpy(link_mac, mac.mac, sizeof(mac.mac)); | 539 | memcpy(link_mac, mac.mac, sizeof(mac.mac)); |
565 | } | 540 | } |
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, | |||
576 | _gxio_mpipe_link_name_t name; | 551 | _gxio_mpipe_link_name_t name; |
577 | int rv; | 552 | int rv; |
578 | 553 | ||
579 | if (strscpy(name.name, link_name, sizeof(name.name)) == 0) | 554 | if (strscpy(name.name, link_name, sizeof(name.name)) < 0) |
580 | return GXIO_ERR_NO_DEVICE; | 555 | return GXIO_ERR_NO_DEVICE; |
581 | 556 | ||
582 | rv = gxio_mpipe_link_open_aux(context, name, flags); | 557 | rv = gxio_mpipe_link_open_aux(context, name, flags); |
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index ba35c41c71ff..0b6cacaad933 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild | |||
@@ -40,4 +40,5 @@ generic-y += termbits.h | |||
40 | generic-y += termios.h | 40 | generic-y += termios.h |
41 | generic-y += trace_clock.h | 41 | generic-y += trace_clock.h |
42 | generic-y += types.h | 42 | generic-y += types.h |
43 | generic-y += word-at-a-time.h | ||
43 | generic-y += xor.h | 44 | generic-y += xor.h |
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/usb/tilegx.h> | 23 | #include <linux/usb/tilegx.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
26 | 27 | ||
27 | static u64 ehci_dmamask = DMA_BIT_MASK(32); | 28 | static u64 ehci_dmamask = DMA_BIT_MASK(32); |
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
@@ -25,4 +25,5 @@ generic-y += preempt.h | |||
25 | generic-y += switch_to.h | 25 | generic-y += switch_to.h |
26 | generic-y += topology.h | 26 | generic-y += topology.h |
27 | generic-y += trace_clock.h | 27 | generic-y += trace_clock.h |
28 | generic-y += word-at-a-time.h | ||
28 | generic-y += xor.h | 29 | generic-y += xor.h |
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
@@ -62,4 +62,5 @@ generic-y += ucontext.h | |||
62 | generic-y += unaligned.h | 62 | generic-y += unaligned.h |
63 | generic-y += user.h | 63 | generic-y += user.h |
64 | generic-y += vga.h | 64 | generic-y += vga.h |
65 | generic-y += word-at-a-time.h | ||
65 | generic-y += xor.h | 66 | generic-y += xor.h |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e6cf2ad350d1..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -193,7 +193,7 @@ | |||
193 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ | 193 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
194 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ | 194 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
195 | #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ | 195 | #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ |
196 | #define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ | 196 | #define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ |
197 | #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ | 197 | #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ |
198 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ | 198 | #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ |
199 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ | 199 | #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index ab5f1d447ef9..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...); | |||
86 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, | 86 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, |
87 | u32 type, u64 attribute); | 87 | u32 type, u64 attribute); |
88 | 88 | ||
89 | #ifdef CONFIG_KASAN | ||
89 | /* | 90 | /* |
90 | * CONFIG_KASAN may redefine memset to __memset. __memset function is present | 91 | * CONFIG_KASAN may redefine memset to __memset. __memset function is present |
91 | * only in kernel binary. Since the EFI stub linked into a separate binary it | 92 | * only in kernel binary. Since the EFI stub linked into a separate binary it |
@@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, | |||
95 | #undef memcpy | 96 | #undef memcpy |
96 | #undef memset | 97 | #undef memset |
97 | #undef memmove | 98 | #undef memmove |
99 | #endif | ||
98 | 100 | ||
99 | #endif /* CONFIG_X86_32 */ | 101 | #endif /* CONFIG_X86_32 */ |
100 | 102 | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b98b471a3b7e..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -141,6 +141,8 @@ | |||
141 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) | 141 | #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
142 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) | 142 | #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
143 | 143 | ||
144 | #define MSR_PEBS_FRONTEND 0x000003f7 | ||
145 | |||
144 | #define MSR_IA32_POWER_CTL 0x000001fc | 146 | #define MSR_IA32_POWER_CTL 0x000001fc |
145 | 147 | ||
146 | #define MSR_IA32_MC0_CTL 0x00000400 | 148 | #define MSR_IA32_MC0_CTL 0x00000400 |
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h | |||
@@ -41,6 +41,7 @@ struct pvclock_wall_clock { | |||
41 | 41 | ||
42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) | 42 | #define PVCLOCK_TSC_STABLE_BIT (1 << 0) |
43 | #define PVCLOCK_GUEST_STOPPED (1 << 1) | 43 | #define PVCLOCK_GUEST_STOPPED (1 << 1) |
44 | /* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ | ||
44 | #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) | 45 | #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) |
45 | #endif /* __ASSEMBLY__ */ | 46 | #endif /* __ASSEMBLY__ */ |
46 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ | 47 | #endif /* _ASM_X86_PVCLOCK_ABI_H */ |
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_X86_BITSPERLONG_H | 1 | #ifndef __ASM_X86_BITSPERLONG_H |
2 | #define __ASM_X86_BITSPERLONG_H | 2 | #define __ASM_X86_BITSPERLONG_H |
3 | 3 | ||
4 | #ifdef __x86_64__ | 4 | #if defined(__x86_64__) && !defined(__ILP32__) |
5 | # define __BITS_PER_LONG 64 | 5 | # define __BITS_PER_LONG 64 |
6 | #else | 6 | #else |
7 | # define __BITS_PER_LONG 32 | 7 | # define __BITS_PER_LONG 32 |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -34,11 +34,10 @@ | |||
34 | struct ms_hyperv_info ms_hyperv; | 34 | struct ms_hyperv_info ms_hyperv; |
35 | EXPORT_SYMBOL_GPL(ms_hyperv); | 35 | EXPORT_SYMBOL_GPL(ms_hyperv); |
36 | 36 | ||
37 | static void (*hv_kexec_handler)(void); | ||
38 | static void (*hv_crash_handler)(struct pt_regs *regs); | ||
39 | |||
40 | #if IS_ENABLED(CONFIG_HYPERV) | 37 | #if IS_ENABLED(CONFIG_HYPERV) |
41 | static void (*vmbus_handler)(void); | 38 | static void (*vmbus_handler)(void); |
39 | static void (*hv_kexec_handler)(void); | ||
40 | static void (*hv_crash_handler)(struct pt_regs *regs); | ||
42 | 41 | ||
43 | void hyperv_vector_handler(struct pt_regs *regs) | 42 | void hyperv_vector_handler(struct pt_regs *regs) |
44 | { | 43 | { |
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) | |||
96 | hv_crash_handler = NULL; | 95 | hv_crash_handler = NULL; |
97 | } | 96 | } |
98 | EXPORT_SYMBOL_GPL(hv_remove_crash_handler); | 97 | EXPORT_SYMBOL_GPL(hv_remove_crash_handler); |
99 | #endif | ||
100 | 98 | ||
99 | #ifdef CONFIG_KEXEC_CORE | ||
101 | static void hv_machine_shutdown(void) | 100 | static void hv_machine_shutdown(void) |
102 | { | 101 | { |
103 | if (kexec_in_progress && hv_kexec_handler) | 102 | if (kexec_in_progress && hv_kexec_handler) |
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) | |||
111 | hv_crash_handler(regs); | 110 | hv_crash_handler(regs); |
112 | native_machine_crash_shutdown(regs); | 111 | native_machine_crash_shutdown(regs); |
113 | } | 112 | } |
114 | 113 | #endif /* CONFIG_KEXEC_CORE */ | |
114 | #endif /* CONFIG_HYPERV */ | ||
115 | 115 | ||
116 | static uint32_t __init ms_hyperv_platform(void) | 116 | static uint32_t __init ms_hyperv_platform(void) |
117 | { | 117 | { |
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) | |||
186 | no_timer_check = 1; | 186 | no_timer_check = 1; |
187 | #endif | 187 | #endif |
188 | 188 | ||
189 | #if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) | ||
189 | machine_ops.shutdown = hv_machine_shutdown; | 190 | machine_ops.shutdown = hv_machine_shutdown; |
190 | machine_ops.crash_shutdown = hv_machine_crash_shutdown; | 191 | machine_ops.crash_shutdown = hv_machine_crash_shutdown; |
192 | #endif | ||
191 | mark_tsc_unstable("running on Hyper-V"); | 193 | mark_tsc_unstable("running on Hyper-V"); |
192 | } | 194 | } |
193 | 195 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -47,6 +47,7 @@ enum extra_reg_type { | |||
47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ | 47 | EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ |
48 | EXTRA_REG_LBR = 2, /* lbr_select */ | 48 | EXTRA_REG_LBR = 2, /* lbr_select */ |
49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ | 49 | EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ |
50 | EXTRA_REG_FE = 4, /* fe_* */ | ||
50 | 51 | ||
51 | EXTRA_REG_MAX /* number of entries needed */ | 52 | EXTRA_REG_MAX /* number of entries needed */ |
52 | }; | 53 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3fefebfbdf4b..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { | |||
205 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 205 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
206 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 206 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), |
207 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 207 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
208 | /* | ||
209 | * Note the low 8 bits eventsel code is not a continuous field, containing | ||
210 | * some #GPing bits. These are masked out. | ||
211 | */ | ||
212 | INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), | ||
208 | EVENT_EXTRA_END | 213 | EVENT_EXTRA_END |
209 | }; | 214 | }; |
210 | 215 | ||
@@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { | |||
250 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 255 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
251 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 256 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
252 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ | 257 | INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ |
253 | INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ | 258 | INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ |
254 | EVENT_CONSTRAINT_END | 259 | EVENT_CONSTRAINT_END |
255 | }; | 260 | }; |
256 | 261 | ||
@@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | |||
2891 | 2896 | ||
2892 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 2897 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
2893 | 2898 | ||
2899 | PMU_FORMAT_ATTR(frontend, "config1:0-23"); | ||
2900 | |||
2894 | static struct attribute *intel_arch3_formats_attr[] = { | 2901 | static struct attribute *intel_arch3_formats_attr[] = { |
2895 | &format_attr_event.attr, | 2902 | &format_attr_event.attr, |
2896 | &format_attr_umask.attr, | 2903 | &format_attr_umask.attr, |
@@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { | |||
2907 | NULL, | 2914 | NULL, |
2908 | }; | 2915 | }; |
2909 | 2916 | ||
2917 | static struct attribute *skl_format_attr[] = { | ||
2918 | &format_attr_frontend.attr, | ||
2919 | NULL, | ||
2920 | }; | ||
2921 | |||
2910 | static __initconst const struct x86_pmu core_pmu = { | 2922 | static __initconst const struct x86_pmu core_pmu = { |
2911 | .name = "core", | 2923 | .name = "core", |
2912 | .handle_irq = x86_pmu_handle_irq, | 2924 | .handle_irq = x86_pmu_handle_irq, |
@@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void) | |||
3516 | 3528 | ||
3517 | x86_pmu.hw_config = hsw_hw_config; | 3529 | x86_pmu.hw_config = hsw_hw_config; |
3518 | x86_pmu.get_event_constraints = hsw_get_event_constraints; | 3530 | x86_pmu.get_event_constraints = hsw_get_event_constraints; |
3519 | x86_pmu.cpu_events = hsw_events_attrs; | 3531 | x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, |
3532 | skl_format_attr); | ||
3520 | WARN_ON(!x86_pmu.format_attrs); | 3533 | WARN_ON(!x86_pmu.format_attrs); |
3521 | x86_pmu.cpu_events = hsw_events_attrs; | 3534 | x86_pmu.cpu_events = hsw_events_attrs; |
3522 | pr_cont("Skylake events, "); | 3535 | pr_cont("Skylake events, "); |
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c | |||
@@ -10,12 +10,12 @@ enum perf_msr_id { | |||
10 | PERF_MSR_EVENT_MAX, | 10 | PERF_MSR_EVENT_MAX, |
11 | }; | 11 | }; |
12 | 12 | ||
13 | bool test_aperfmperf(int idx) | 13 | static bool test_aperfmperf(int idx) |
14 | { | 14 | { |
15 | return boot_cpu_has(X86_FEATURE_APERFMPERF); | 15 | return boot_cpu_has(X86_FEATURE_APERFMPERF); |
16 | } | 16 | } |
17 | 17 | ||
18 | bool test_intel(int idx) | 18 | static bool test_intel(int idx) |
19 | { | 19 | { |
20 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || | 20 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || |
21 | boot_cpu_data.x86 != 6) | 21 | boot_cpu_data.x86 != 6) |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, | 38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, |
39 | { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, | 39 | { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, |
40 | { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, | 40 | { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, |
41 | { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, | 41 | { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, |
42 | { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, | 42 | { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, |
43 | { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, | 43 | { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | #ifdef CONFIG_KEXEC_FILE | 187 | #ifdef CONFIG_KEXEC_FILE |
188 | static int get_nr_ram_ranges_callback(unsigned long start_pfn, | 188 | static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) |
189 | unsigned long nr_pfn, void *arg) | ||
190 | { | 189 | { |
191 | int *nr_ranges = arg; | 190 | unsigned int *nr_ranges = arg; |
192 | 191 | ||
193 | (*nr_ranges)++; | 192 | (*nr_ranges)++; |
194 | return 0; | 193 | return 0; |
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, | |||
214 | 213 | ||
215 | ced->image = image; | 214 | ced->image = image; |
216 | 215 | ||
217 | walk_system_ram_range(0, -1, &nr_ranges, | 216 | walk_system_ram_res(0, -1, &nr_ranges, |
218 | get_nr_ram_ranges_callback); | 217 | get_nr_ram_ranges_callback); |
219 | 218 | ||
220 | ced->max_nr_ranges = nr_ranges; | 219 | ced->max_nr_ranges = nr_ranges; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..39e585a554b7 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
506 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | 506 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; |
507 | } | 507 | } |
508 | 508 | ||
509 | /* | ||
510 | * Called from fs/proc with a reference on @p to find the function | ||
511 | * which called into schedule(). This needs to be done carefully | ||
512 | * because the task might wake up and we might look at a stack | ||
513 | * changing under us. | ||
514 | */ | ||
515 | unsigned long get_wchan(struct task_struct *p) | ||
516 | { | ||
517 | unsigned long start, bottom, top, sp, fp, ip; | ||
518 | int count = 0; | ||
519 | |||
520 | if (!p || p == current || p->state == TASK_RUNNING) | ||
521 | return 0; | ||
522 | |||
523 | start = (unsigned long)task_stack_page(p); | ||
524 | if (!start) | ||
525 | return 0; | ||
526 | |||
527 | /* | ||
528 | * Layout of the stack page: | ||
529 | * | ||
530 | * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) | ||
531 | * PADDING | ||
532 | * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING | ||
533 | * stack | ||
534 | * ----------- bottom = start + sizeof(thread_info) | ||
535 | * thread_info | ||
536 | * ----------- start | ||
537 | * | ||
538 | * The tasks stack pointer points at the location where the | ||
539 | * framepointer is stored. The data on the stack is: | ||
540 | * ... IP FP ... IP FP | ||
541 | * | ||
542 | * We need to read FP and IP, so we need to adjust the upper | ||
543 | * bound by another unsigned long. | ||
544 | */ | ||
545 | top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; | ||
546 | top -= 2 * sizeof(unsigned long); | ||
547 | bottom = start + sizeof(struct thread_info); | ||
548 | |||
549 | sp = READ_ONCE(p->thread.sp); | ||
550 | if (sp < bottom || sp > top) | ||
551 | return 0; | ||
552 | |||
553 | fp = READ_ONCE(*(unsigned long *)sp); | ||
554 | do { | ||
555 | if (fp < bottom || fp > top) | ||
556 | return 0; | ||
557 | ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); | ||
558 | if (!in_sched_functions(ip)) | ||
559 | return ip; | ||
560 | fp = READ_ONCE(*(unsigned long *)fp); | ||
561 | } while (count++ < 16 && p->state != TASK_RUNNING); | ||
562 | return 0; | ||
563 | } | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
324 | 324 | ||
325 | return prev_p; | 325 | return prev_p; |
326 | } | 326 | } |
327 | |||
328 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | ||
329 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | ||
330 | |||
331 | unsigned long get_wchan(struct task_struct *p) | ||
332 | { | ||
333 | unsigned long bp, sp, ip; | ||
334 | unsigned long stack_page; | ||
335 | int count = 0; | ||
336 | if (!p || p == current || p->state == TASK_RUNNING) | ||
337 | return 0; | ||
338 | stack_page = (unsigned long)task_stack_page(p); | ||
339 | sp = p->thread.sp; | ||
340 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) | ||
341 | return 0; | ||
342 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ | ||
343 | bp = *(unsigned long *) sp; | ||
344 | do { | ||
345 | if (bp < stack_page || bp > top_ebp+stack_page) | ||
346 | return 0; | ||
347 | ip = *(unsigned long *) (bp+4); | ||
348 | if (!in_sched_functions(ip)) | ||
349 | return ip; | ||
350 | bp = *(unsigned long *) bp; | ||
351 | } while (count++ < 16); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) | |||
499 | } | 499 | } |
500 | EXPORT_SYMBOL_GPL(set_personality_ia32); | 500 | EXPORT_SYMBOL_GPL(set_personality_ia32); |
501 | 501 | ||
502 | unsigned long get_wchan(struct task_struct *p) | ||
503 | { | ||
504 | unsigned long stack; | ||
505 | u64 fp, ip; | ||
506 | int count = 0; | ||
507 | |||
508 | if (!p || p == current || p->state == TASK_RUNNING) | ||
509 | return 0; | ||
510 | stack = (unsigned long)task_stack_page(p); | ||
511 | if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) | ||
512 | return 0; | ||
513 | fp = *(u64 *)(p->thread.sp); | ||
514 | do { | ||
515 | if (fp < (unsigned long)stack || | ||
516 | fp >= (unsigned long)stack+THREAD_SIZE) | ||
517 | return 0; | ||
518 | ip = *(u64 *)(fp+8); | ||
519 | if (!in_sched_functions(ip)) | ||
520 | return ip; | ||
521 | fp = *(u64 *)fp; | ||
522 | } while (count++ < 16); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 502 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) |
527 | { | 503 | { |
528 | int ret = 0; | 504 | int ret = 0; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 94b7d15db3fc..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
514 | struct vcpu_svm *svm = to_svm(vcpu); | 514 | struct vcpu_svm *svm = to_svm(vcpu); |
515 | 515 | ||
516 | if (svm->vmcb->control.next_rip != 0) { | 516 | if (svm->vmcb->control.next_rip != 0) { |
517 | WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); | 517 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
518 | svm->next_rip = svm->vmcb->control.next_rip; | 518 | svm->next_rip = svm->vmcb->control.next_rip; |
519 | } | 519 | } |
520 | 520 | ||
@@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) | |||
866 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); | 866 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); |
867 | } | 867 | } |
868 | 868 | ||
869 | #define MTRR_TYPE_UC_MINUS 7 | ||
870 | #define MTRR2PROTVAL_INVALID 0xff | ||
871 | |||
872 | static u8 mtrr2protval[8]; | ||
873 | |||
874 | static u8 fallback_mtrr_type(int mtrr) | ||
875 | { | ||
876 | /* | ||
877 | * WT and WP aren't always available in the host PAT. Treat | ||
878 | * them as UC and UC- respectively. Everything else should be | ||
879 | * there. | ||
880 | */ | ||
881 | switch (mtrr) | ||
882 | { | ||
883 | case MTRR_TYPE_WRTHROUGH: | ||
884 | return MTRR_TYPE_UNCACHABLE; | ||
885 | case MTRR_TYPE_WRPROT: | ||
886 | return MTRR_TYPE_UC_MINUS; | ||
887 | default: | ||
888 | BUG(); | ||
889 | } | ||
890 | } | ||
891 | |||
892 | static void build_mtrr2protval(void) | ||
893 | { | ||
894 | int i; | ||
895 | u64 pat; | ||
896 | |||
897 | for (i = 0; i < 8; i++) | ||
898 | mtrr2protval[i] = MTRR2PROTVAL_INVALID; | ||
899 | |||
900 | /* Ignore the invalid MTRR types. */ | ||
901 | mtrr2protval[2] = 0; | ||
902 | mtrr2protval[3] = 0; | ||
903 | |||
904 | /* | ||
905 | * Use host PAT value to figure out the mapping from guest MTRR | ||
906 | * values to nested page table PAT/PCD/PWT values. We do not | ||
907 | * want to change the host PAT value every time we enter the | ||
908 | * guest. | ||
909 | */ | ||
910 | rdmsrl(MSR_IA32_CR_PAT, pat); | ||
911 | for (i = 0; i < 8; i++) { | ||
912 | u8 mtrr = pat >> (8 * i); | ||
913 | |||
914 | if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) | ||
915 | mtrr2protval[mtrr] = __cm_idx2pte(i); | ||
916 | } | ||
917 | |||
918 | for (i = 0; i < 8; i++) { | ||
919 | if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { | ||
920 | u8 fallback = fallback_mtrr_type(i); | ||
921 | mtrr2protval[i] = mtrr2protval[fallback]; | ||
922 | BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); | ||
923 | } | ||
924 | } | ||
925 | } | ||
926 | |||
927 | static __init int svm_hardware_setup(void) | 869 | static __init int svm_hardware_setup(void) |
928 | { | 870 | { |
929 | int cpu; | 871 | int cpu; |
@@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void) | |||
990 | } else | 932 | } else |
991 | kvm_disable_tdp(); | 933 | kvm_disable_tdp(); |
992 | 934 | ||
993 | build_mtrr2protval(); | ||
994 | return 0; | 935 | return 0; |
995 | 936 | ||
996 | err: | 937 | err: |
@@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | |||
1145 | return target_tsc - tsc; | 1086 | return target_tsc - tsc; |
1146 | } | 1087 | } |
1147 | 1088 | ||
1148 | static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) | ||
1149 | { | ||
1150 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
1151 | |||
1152 | /* Unlike Intel, AMD takes the guest's CR0.CD into account. | ||
1153 | * | ||
1154 | * AMD does not have IPAT. To emulate it for the case of guests | ||
1155 | * with no assigned devices, just set everything to WB. If guests | ||
1156 | * have assigned devices, however, we cannot force WB for RAM | ||
1157 | * pages only, so use the guest PAT directly. | ||
1158 | */ | ||
1159 | if (!kvm_arch_has_assigned_device(vcpu->kvm)) | ||
1160 | *g_pat = 0x0606060606060606; | ||
1161 | else | ||
1162 | *g_pat = vcpu->arch.pat; | ||
1163 | } | ||
1164 | |||
1165 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | ||
1166 | { | ||
1167 | u8 mtrr; | ||
1168 | |||
1169 | /* | ||
1170 | * 1. MMIO: trust guest MTRR, so same as item 3. | ||
1171 | * 2. No passthrough: always map as WB, and force guest PAT to WB as well | ||
1172 | * 3. Passthrough: can't guarantee the result, try to trust guest. | ||
1173 | */ | ||
1174 | if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) | ||
1175 | return 0; | ||
1176 | |||
1177 | if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && | ||
1178 | kvm_read_cr0(vcpu) & X86_CR0_CD) | ||
1179 | return _PAGE_NOCACHE; | ||
1180 | |||
1181 | mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); | ||
1182 | return mtrr2protval[mtrr]; | ||
1183 | } | ||
1184 | |||
1185 | static void init_vmcb(struct vcpu_svm *svm, bool init_event) | 1089 | static void init_vmcb(struct vcpu_svm *svm, bool init_event) |
1186 | { | 1090 | { |
1187 | struct vmcb_control_area *control = &svm->vmcb->control; | 1091 | struct vmcb_control_area *control = &svm->vmcb->control; |
@@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) | |||
1278 | clr_cr_intercept(svm, INTERCEPT_CR3_READ); | 1182 | clr_cr_intercept(svm, INTERCEPT_CR3_READ); |
1279 | clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); | 1183 | clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); |
1280 | save->g_pat = svm->vcpu.arch.pat; | 1184 | save->g_pat = svm->vcpu.arch.pat; |
1281 | svm_set_guest_pat(svm, &save->g_pat); | ||
1282 | save->cr3 = 0; | 1185 | save->cr3 = 0; |
1283 | save->cr4 = 0; | 1186 | save->cr4 = 0; |
1284 | } | 1187 | } |
@@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1673 | 1576 | ||
1674 | if (!vcpu->fpu_active) | 1577 | if (!vcpu->fpu_active) |
1675 | cr0 |= X86_CR0_TS; | 1578 | cr0 |= X86_CR0_TS; |
1676 | 1579 | /* | |
1677 | /* These are emulated via page tables. */ | 1580 | * re-enable caching here because the QEMU bios |
1678 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 1581 | * does not do it - this results in some delay at |
1679 | 1582 | * reboot | |
1583 | */ | ||
1584 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) | ||
1585 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | ||
1680 | svm->vmcb->save.cr0 = cr0; | 1586 | svm->vmcb->save.cr0 = cr0; |
1681 | mark_dirty(svm->vmcb, VMCB_CR); | 1587 | mark_dirty(svm->vmcb, VMCB_CR); |
1682 | update_cr0_intercept(svm); | 1588 | update_cr0_intercept(svm); |
@@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
3351 | case MSR_VM_IGNNE: | 3257 | case MSR_VM_IGNNE: |
3352 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); | 3258 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
3353 | break; | 3259 | break; |
3354 | case MSR_IA32_CR_PAT: | ||
3355 | if (npt_enabled) { | ||
3356 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) | ||
3357 | return 1; | ||
3358 | vcpu->arch.pat = data; | ||
3359 | svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); | ||
3360 | mark_dirty(svm->vmcb, VMCB_NPT); | ||
3361 | break; | ||
3362 | } | ||
3363 | /* fall through */ | ||
3364 | default: | 3260 | default: |
3365 | return kvm_set_msr_common(vcpu, msr); | 3261 | return kvm_set_msr_common(vcpu, msr); |
3366 | } | 3262 | } |
@@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) | |||
4195 | return true; | 4091 | return true; |
4196 | } | 4092 | } |
4197 | 4093 | ||
4094 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | ||
4095 | { | ||
4096 | return 0; | ||
4097 | } | ||
4098 | |||
4198 | static void svm_cpuid_update(struct kvm_vcpu *vcpu) | 4099 | static void svm_cpuid_update(struct kvm_vcpu *vcpu) |
4199 | { | 4100 | { |
4200 | } | 4101 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64076740251e..06ef4908ba61 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
8617 | u64 ipat = 0; | 8617 | u64 ipat = 0; |
8618 | 8618 | ||
8619 | /* For VT-d and EPT combination | 8619 | /* For VT-d and EPT combination |
8620 | * 1. MMIO: guest may want to apply WC, trust it. | 8620 | * 1. MMIO: always map as UC |
8621 | * 2. EPT with VT-d: | 8621 | * 2. EPT with VT-d: |
8622 | * a. VT-d without snooping control feature: can't guarantee the | 8622 | * a. VT-d without snooping control feature: can't guarantee the |
8623 | * result, try to trust guest. So the same as item 1. | 8623 | * result, try to trust guest. |
8624 | * b. VT-d with snooping control feature: snooping control feature of | 8624 | * b. VT-d with snooping control feature: snooping control feature of |
8625 | * VT-d engine can guarantee the cache correctness. Just set it | 8625 | * VT-d engine can guarantee the cache correctness. Just set it |
8626 | * to WB to keep consistent with host. So the same as item 3. | 8626 | * to WB to keep consistent with host. So the same as item 3. |
8627 | * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep | 8627 | * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep |
8628 | * consistent with host MTRR | 8628 | * consistent with host MTRR |
8629 | */ | 8629 | */ |
8630 | if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { | 8630 | if (is_mmio) { |
8631 | cache = MTRR_TYPE_UNCACHABLE; | ||
8632 | goto exit; | ||
8633 | } | ||
8634 | |||
8635 | if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { | ||
8631 | ipat = VMX_EPT_IPAT_BIT; | 8636 | ipat = VMX_EPT_IPAT_BIT; |
8632 | cache = MTRR_TYPE_WRBACK; | 8637 | cache = MTRR_TYPE_WRBACK; |
8633 | goto exit; | 8638 | goto exit; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 991466bf8dee..92511d4b7236 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1708 | vcpu->pvclock_set_guest_stopped_request = false; | 1708 | vcpu->pvclock_set_guest_stopped_request = false; |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; | ||
1712 | |||
1713 | /* If the host uses TSC clocksource, then it is stable */ | 1711 | /* If the host uses TSC clocksource, then it is stable */ |
1714 | if (use_master_clock) | 1712 | if (use_master_clock) |
1715 | pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; | 1713 | pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; |
@@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2007 | &vcpu->requests); | 2005 | &vcpu->requests); |
2008 | 2006 | ||
2009 | ka->boot_vcpu_runs_old_kvmclock = tmp; | 2007 | ka->boot_vcpu_runs_old_kvmclock = tmp; |
2010 | |||
2011 | ka->kvmclock_offset = -get_kernel_ns(); | ||
2012 | } | 2008 | } |
2013 | 2009 | ||
2014 | vcpu->arch.time = data; | 2010 | vcpu->arch.time = data; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) | |||
1132 | * has been zapped already via cleanup_highmem(). | 1132 | * has been zapped already via cleanup_highmem(). |
1133 | */ | 1133 | */ |
1134 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); | 1134 | all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
1135 | set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); | 1135 | set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); |
1136 | 1136 | ||
1137 | rodata_test(); | 1137 | rodata_test(); |
1138 | 1138 | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -705,6 +705,70 @@ out: | |||
705 | } | 705 | } |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * Iterate the EFI memory map in reverse order because the regions | ||
709 | * will be mapped top-down. The end result is the same as if we had | ||
710 | * mapped things forward, but doesn't require us to change the | ||
711 | * existing implementation of efi_map_region(). | ||
712 | */ | ||
713 | static inline void *efi_map_next_entry_reverse(void *entry) | ||
714 | { | ||
715 | /* Initial call */ | ||
716 | if (!entry) | ||
717 | return memmap.map_end - memmap.desc_size; | ||
718 | |||
719 | entry -= memmap.desc_size; | ||
720 | if (entry < memmap.map) | ||
721 | return NULL; | ||
722 | |||
723 | return entry; | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * efi_map_next_entry - Return the next EFI memory map descriptor | ||
728 | * @entry: Previous EFI memory map descriptor | ||
729 | * | ||
730 | * This is a helper function to iterate over the EFI memory map, which | ||
731 | * we do in different orders depending on the current configuration. | ||
732 | * | ||
733 | * To begin traversing the memory map @entry must be %NULL. | ||
734 | * | ||
735 | * Returns %NULL when we reach the end of the memory map. | ||
736 | */ | ||
737 | static void *efi_map_next_entry(void *entry) | ||
738 | { | ||
739 | if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { | ||
740 | /* | ||
741 | * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE | ||
742 | * config table feature requires us to map all entries | ||
743 | * in the same order as they appear in the EFI memory | ||
744 | * map. That is to say, entry N must have a lower | ||
745 | * virtual address than entry N+1. This is because the | ||
746 | * firmware toolchain leaves relative references in | ||
747 | * the code/data sections, which are split and become | ||
748 | * separate EFI memory regions. Mapping things | ||
749 | * out-of-order leads to the firmware accessing | ||
750 | * unmapped addresses. | ||
751 | * | ||
752 | * Since we need to map things this way whether or not | ||
753 | * the kernel actually makes use of | ||
754 | * EFI_PROPERTIES_TABLE, let's just switch to this | ||
755 | * scheme by default for 64-bit. | ||
756 | */ | ||
757 | return efi_map_next_entry_reverse(entry); | ||
758 | } | ||
759 | |||
760 | /* Initial call */ | ||
761 | if (!entry) | ||
762 | return memmap.map; | ||
763 | |||
764 | entry += memmap.desc_size; | ||
765 | if (entry >= memmap.map_end) | ||
766 | return NULL; | ||
767 | |||
768 | return entry; | ||
769 | } | ||
770 | |||
771 | /* | ||
708 | * Map the efi memory ranges of the runtime services and update new_mmap with | 772 | * Map the efi memory ranges of the runtime services and update new_mmap with |
709 | * virtual addresses. | 773 | * virtual addresses. |
710 | */ | 774 | */ |
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) | |||
714 | unsigned long left = 0; | 778 | unsigned long left = 0; |
715 | efi_memory_desc_t *md; | 779 | efi_memory_desc_t *md; |
716 | 780 | ||
717 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 781 | p = NULL; |
782 | while ((p = efi_map_next_entry(p))) { | ||
718 | md = p; | 783 | md = p; |
719 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) { | 784 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) { |
720 | #ifdef CONFIG_X86_64 | 785 | #ifdef CONFIG_X86_64 |
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild | |||
@@ -28,4 +28,5 @@ generic-y += statfs.h | |||
28 | generic-y += termios.h | 28 | generic-y += termios.h |
29 | generic-y += topology.h | 29 | generic-y += topology.h |
30 | generic-y += trace_clock.h | 30 | generic-y += trace_clock.h |
31 | generic-y += word-at-a-time.h | ||
31 | generic-y += xor.h | 32 | generic-y += xor.h |
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c | |||
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) | |||
31 | return cpu; | 31 | return cpu; |
32 | } | 32 | } |
33 | 33 | ||
34 | int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | 34 | int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, |
35 | const struct cpumask *online_mask) | ||
35 | { | 36 | { |
36 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; | 37 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; |
37 | cpumask_var_t cpus; | 38 | cpumask_var_t cpus; |
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | |||
41 | 42 | ||
42 | cpumask_clear(cpus); | 43 | cpumask_clear(cpus); |
43 | nr_cpus = nr_uniq_cpus = 0; | 44 | nr_cpus = nr_uniq_cpus = 0; |
44 | for_each_online_cpu(i) { | 45 | for_each_cpu(i, online_mask) { |
45 | nr_cpus++; | 46 | nr_cpus++; |
46 | first_sibling = get_first_sibling(i); | 47 | first_sibling = get_first_sibling(i); |
47 | if (!cpumask_test_cpu(first_sibling, cpus)) | 48 | if (!cpumask_test_cpu(first_sibling, cpus)) |
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) | |||
51 | 52 | ||
52 | queue = 0; | 53 | queue = 0; |
53 | for_each_possible_cpu(i) { | 54 | for_each_possible_cpu(i) { |
54 | if (!cpu_online(i)) { | 55 | if (!cpumask_test_cpu(i, online_mask)) { |
55 | map[i] = 0; | 56 | map[i] = 0; |
56 | continue; | 57 | continue; |
57 | } | 58 | } |
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) | |||
95 | if (!map) | 96 | if (!map) |
96 | return NULL; | 97 | return NULL; |
97 | 98 | ||
98 | if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) | 99 | if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) |
99 | return map; | 100 | return map; |
100 | 101 | ||
101 | kfree(map); | 102 | kfree(map); |
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c | |||
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) | |||
229 | unsigned int i, first = 1; | 229 | unsigned int i, first = 1; |
230 | ssize_t ret = 0; | 230 | ssize_t ret = 0; |
231 | 231 | ||
232 | blk_mq_disable_hotplug(); | ||
233 | |||
234 | for_each_cpu(i, hctx->cpumask) { | 232 | for_each_cpu(i, hctx->cpumask) { |
235 | if (first) | 233 | if (first) |
236 | ret += sprintf(ret + page, "%u", i); | 234 | ret += sprintf(ret + page, "%u", i); |
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) | |||
240 | first = 0; | 238 | first = 0; |
241 | } | 239 | } |
242 | 240 | ||
243 | blk_mq_enable_hotplug(); | ||
244 | |||
245 | ret += sprintf(ret + page, "\n"); | 241 | ret += sprintf(ret + page, "\n"); |
246 | return ret; | 242 | return ret; |
247 | } | 243 | } |
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) | |||
343 | struct blk_mq_ctx *ctx; | 339 | struct blk_mq_ctx *ctx; |
344 | int i; | 340 | int i; |
345 | 341 | ||
346 | if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) | 342 | if (!hctx->nr_ctx) |
347 | return; | 343 | return; |
348 | 344 | ||
349 | hctx_for_each_ctx(hctx, ctx, i) | 345 | hctx_for_each_ctx(hctx, ctx, i) |
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) | |||
358 | struct blk_mq_ctx *ctx; | 354 | struct blk_mq_ctx *ctx; |
359 | int i, ret; | 355 | int i, ret; |
360 | 356 | ||
361 | if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) | 357 | if (!hctx->nr_ctx) |
362 | return 0; | 358 | return 0; |
363 | 359 | ||
364 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); | 360 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); |
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) | |||
381 | struct blk_mq_ctx *ctx; | 377 | struct blk_mq_ctx *ctx; |
382 | int i, j; | 378 | int i, j; |
383 | 379 | ||
380 | blk_mq_disable_hotplug(); | ||
381 | |||
384 | queue_for_each_hw_ctx(q, hctx, i) { | 382 | queue_for_each_hw_ctx(q, hctx, i) { |
385 | blk_mq_unregister_hctx(hctx); | 383 | blk_mq_unregister_hctx(hctx); |
386 | 384 | ||
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) | |||
395 | kobject_put(&q->mq_kobj); | 393 | kobject_put(&q->mq_kobj); |
396 | 394 | ||
397 | kobject_put(&disk_to_dev(disk)->kobj); | 395 | kobject_put(&disk_to_dev(disk)->kobj); |
396 | |||
397 | q->mq_sysfs_init_done = false; | ||
398 | blk_mq_enable_hotplug(); | ||
398 | } | 399 | } |
399 | 400 | ||
400 | static void blk_mq_sysfs_init(struct request_queue *q) | 401 | static void blk_mq_sysfs_init(struct request_queue *q) |
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) | |||
425 | struct blk_mq_hw_ctx *hctx; | 426 | struct blk_mq_hw_ctx *hctx; |
426 | int ret, i; | 427 | int ret, i; |
427 | 428 | ||
429 | blk_mq_disable_hotplug(); | ||
430 | |||
428 | blk_mq_sysfs_init(q); | 431 | blk_mq_sysfs_init(q); |
429 | 432 | ||
430 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); | 433 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
431 | if (ret < 0) | 434 | if (ret < 0) |
432 | return ret; | 435 | goto out; |
433 | 436 | ||
434 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); | 437 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); |
435 | 438 | ||
436 | queue_for_each_hw_ctx(q, hctx, i) { | 439 | queue_for_each_hw_ctx(q, hctx, i) { |
437 | hctx->flags |= BLK_MQ_F_SYSFS_UP; | ||
438 | ret = blk_mq_register_hctx(hctx); | 440 | ret = blk_mq_register_hctx(hctx); |
439 | if (ret) | 441 | if (ret) |
440 | break; | 442 | break; |
441 | } | 443 | } |
442 | 444 | ||
443 | if (ret) { | 445 | if (ret) |
444 | blk_mq_unregister_disk(disk); | 446 | blk_mq_unregister_disk(disk); |
445 | return ret; | 447 | else |
446 | } | 448 | q->mq_sysfs_init_done = true; |
449 | out: | ||
450 | blk_mq_enable_hotplug(); | ||
447 | 451 | ||
448 | return 0; | 452 | return ret; |
449 | } | 453 | } |
450 | EXPORT_SYMBOL_GPL(blk_mq_register_disk); | 454 | EXPORT_SYMBOL_GPL(blk_mq_register_disk); |
451 | 455 | ||
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) | |||
454 | struct blk_mq_hw_ctx *hctx; | 458 | struct blk_mq_hw_ctx *hctx; |
455 | int i; | 459 | int i; |
456 | 460 | ||
461 | if (!q->mq_sysfs_init_done) | ||
462 | return; | ||
463 | |||
457 | queue_for_each_hw_ctx(q, hctx, i) | 464 | queue_for_each_hw_ctx(q, hctx, i) |
458 | blk_mq_unregister_hctx(hctx); | 465 | blk_mq_unregister_hctx(hctx); |
459 | } | 466 | } |
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) | |||
463 | struct blk_mq_hw_ctx *hctx; | 470 | struct blk_mq_hw_ctx *hctx; |
464 | int i, ret = 0; | 471 | int i, ret = 0; |
465 | 472 | ||
473 | if (!q->mq_sysfs_init_done) | ||
474 | return ret; | ||
475 | |||
466 | queue_for_each_hw_ctx(q, hctx, i) { | 476 | queue_for_each_hw_ctx(q, hctx, i) { |
467 | ret = blk_mq_register_hctx(hctx); | 477 | ret = blk_mq_register_hctx(hctx); |
468 | if (ret) | 478 | if (ret) |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ed96474d75cb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | |||
471 | } | 471 | } |
472 | EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); | 472 | EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); |
473 | 473 | ||
474 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 474 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, |
475 | void *priv) | 475 | void *priv) |
476 | { | 476 | { |
477 | struct blk_mq_tags *tags = hctx->tags; | 477 | struct blk_mq_hw_ctx *hctx; |
478 | int i; | ||
479 | |||
480 | |||
481 | queue_for_each_hw_ctx(q, hctx, i) { | ||
482 | struct blk_mq_tags *tags = hctx->tags; | ||
483 | |||
484 | /* | ||
485 | * If not software queues are currently mapped to this | ||
486 | * hardware queue, there's nothing to check | ||
487 | */ | ||
488 | if (!blk_mq_hw_queue_mapped(hctx)) | ||
489 | continue; | ||
490 | |||
491 | if (tags->nr_reserved_tags) | ||
492 | bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); | ||
493 | bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | ||
494 | false); | ||
495 | } | ||
478 | 496 | ||
479 | if (tags->nr_reserved_tags) | ||
480 | bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); | ||
481 | bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, | ||
482 | false); | ||
483 | } | 497 | } |
484 | EXPORT_SYMBOL(blk_mq_tag_busy_iter); | ||
485 | 498 | ||
486 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) | 499 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
487 | { | 500 | { |
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | |||
58 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); | 58 | extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); |
59 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); | 59 | extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); |
60 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); | 60 | extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); |
61 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | ||
62 | void *priv); | ||
61 | 63 | ||
62 | enum { | 64 | enum { |
63 | BLK_MQ_TAG_CACHE_MIN = 1, | 65 | BLK_MQ_TAG_CACHE_MIN = 1, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..7785ae96267a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) | |||
393 | * Ends all I/O on a request. It does not handle partial completions. | 393 | * Ends all I/O on a request. It does not handle partial completions. |
394 | * The actual completion happens out-of-order, through a IPI handler. | 394 | * The actual completion happens out-of-order, through a IPI handler. |
395 | **/ | 395 | **/ |
396 | void blk_mq_complete_request(struct request *rq) | 396 | void blk_mq_complete_request(struct request *rq, int error) |
397 | { | 397 | { |
398 | struct request_queue *q = rq->q; | 398 | struct request_queue *q = rq->q; |
399 | 399 | ||
400 | if (unlikely(blk_should_fake_timeout(q))) | 400 | if (unlikely(blk_should_fake_timeout(q))) |
401 | return; | 401 | return; |
402 | if (!blk_mark_rq_complete(rq)) | 402 | if (!blk_mark_rq_complete(rq)) { |
403 | rq->errors = error; | ||
403 | __blk_mq_complete_request(rq); | 404 | __blk_mq_complete_request(rq); |
405 | } | ||
404 | } | 406 | } |
405 | EXPORT_SYMBOL(blk_mq_complete_request); | 407 | EXPORT_SYMBOL(blk_mq_complete_request); |
406 | 408 | ||
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
616 | * If a request wasn't started before the queue was | 618 | * If a request wasn't started before the queue was |
617 | * marked dying, kill it here or it'll go unnoticed. | 619 | * marked dying, kill it here or it'll go unnoticed. |
618 | */ | 620 | */ |
619 | if (unlikely(blk_queue_dying(rq->q))) { | 621 | if (unlikely(blk_queue_dying(rq->q))) |
620 | rq->errors = -EIO; | 622 | blk_mq_complete_request(rq, -EIO); |
621 | blk_mq_complete_request(rq); | ||
622 | } | ||
623 | return; | 623 | return; |
624 | } | 624 | } |
625 | if (rq->cmd_flags & REQ_NO_TIMEOUT) | 625 | if (rq->cmd_flags & REQ_NO_TIMEOUT) |
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) | |||
641 | .next = 0, | 641 | .next = 0, |
642 | .next_set = 0, | 642 | .next_set = 0, |
643 | }; | 643 | }; |
644 | struct blk_mq_hw_ctx *hctx; | ||
645 | int i; | 644 | int i; |
646 | 645 | ||
647 | queue_for_each_hw_ctx(q, hctx, i) { | 646 | blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); |
648 | /* | ||
649 | * If not software queues are currently mapped to this | ||
650 | * hardware queue, there's nothing to check | ||
651 | */ | ||
652 | if (!blk_mq_hw_queue_mapped(hctx)) | ||
653 | continue; | ||
654 | |||
655 | blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); | ||
656 | } | ||
657 | 647 | ||
658 | if (data.next_set) { | 648 | if (data.next_set) { |
659 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); | 649 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); |
660 | mod_timer(&q->timeout, data.next); | 650 | mod_timer(&q->timeout, data.next); |
661 | } else { | 651 | } else { |
652 | struct blk_mq_hw_ctx *hctx; | ||
653 | |||
662 | queue_for_each_hw_ctx(q, hctx, i) { | 654 | queue_for_each_hw_ctx(q, hctx, i) { |
663 | /* the hctx may be unmapped, so check it here */ | 655 | /* the hctx may be unmapped, so check it here */ |
664 | if (blk_mq_hw_queue_mapped(hctx)) | 656 | if (blk_mq_hw_queue_mapped(hctx)) |
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, | |||
1789 | } | 1781 | } |
1790 | } | 1782 | } |
1791 | 1783 | ||
1792 | static void blk_mq_map_swqueue(struct request_queue *q) | 1784 | static void blk_mq_map_swqueue(struct request_queue *q, |
1785 | const struct cpumask *online_mask) | ||
1793 | { | 1786 | { |
1794 | unsigned int i; | 1787 | unsigned int i; |
1795 | struct blk_mq_hw_ctx *hctx; | 1788 | struct blk_mq_hw_ctx *hctx; |
1796 | struct blk_mq_ctx *ctx; | 1789 | struct blk_mq_ctx *ctx; |
1797 | struct blk_mq_tag_set *set = q->tag_set; | 1790 | struct blk_mq_tag_set *set = q->tag_set; |
1798 | 1791 | ||
1792 | /* | ||
1793 | * Avoid others reading imcomplete hctx->cpumask through sysfs | ||
1794 | */ | ||
1795 | mutex_lock(&q->sysfs_lock); | ||
1796 | |||
1799 | queue_for_each_hw_ctx(q, hctx, i) { | 1797 | queue_for_each_hw_ctx(q, hctx, i) { |
1800 | cpumask_clear(hctx->cpumask); | 1798 | cpumask_clear(hctx->cpumask); |
1801 | hctx->nr_ctx = 0; | 1799 | hctx->nr_ctx = 0; |
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1806 | */ | 1804 | */ |
1807 | queue_for_each_ctx(q, ctx, i) { | 1805 | queue_for_each_ctx(q, ctx, i) { |
1808 | /* If the cpu isn't online, the cpu is mapped to first hctx */ | 1806 | /* If the cpu isn't online, the cpu is mapped to first hctx */ |
1809 | if (!cpu_online(i)) | 1807 | if (!cpumask_test_cpu(i, online_mask)) |
1810 | continue; | 1808 | continue; |
1811 | 1809 | ||
1812 | hctx = q->mq_ops->map_queue(q, i); | 1810 | hctx = q->mq_ops->map_queue(q, i); |
1813 | cpumask_set_cpu(i, hctx->cpumask); | 1811 | cpumask_set_cpu(i, hctx->cpumask); |
1814 | cpumask_set_cpu(i, hctx->tags->cpumask); | ||
1815 | ctx->index_hw = hctx->nr_ctx; | 1812 | ctx->index_hw = hctx->nr_ctx; |
1816 | hctx->ctxs[hctx->nr_ctx++] = ctx; | 1813 | hctx->ctxs[hctx->nr_ctx++] = ctx; |
1817 | } | 1814 | } |
1818 | 1815 | ||
1816 | mutex_unlock(&q->sysfs_lock); | ||
1817 | |||
1819 | queue_for_each_hw_ctx(q, hctx, i) { | 1818 | queue_for_each_hw_ctx(q, hctx, i) { |
1820 | struct blk_mq_ctxmap *map = &hctx->ctx_map; | 1819 | struct blk_mq_ctxmap *map = &hctx->ctx_map; |
1821 | 1820 | ||
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1851 | hctx->next_cpu = cpumask_first(hctx->cpumask); | 1850 | hctx->next_cpu = cpumask_first(hctx->cpumask); |
1852 | hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; | 1851 | hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; |
1853 | } | 1852 | } |
1853 | |||
1854 | queue_for_each_ctx(q, ctx, i) { | ||
1855 | if (!cpumask_test_cpu(i, online_mask)) | ||
1856 | continue; | ||
1857 | |||
1858 | hctx = q->mq_ops->map_queue(q, i); | ||
1859 | cpumask_set_cpu(i, hctx->tags->cpumask); | ||
1860 | } | ||
1854 | } | 1861 | } |
1855 | 1862 | ||
1856 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) | 1863 | static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) |
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) | |||
1918 | kfree(hctx); | 1925 | kfree(hctx); |
1919 | } | 1926 | } |
1920 | 1927 | ||
1928 | kfree(q->mq_map); | ||
1929 | q->mq_map = NULL; | ||
1930 | |||
1921 | kfree(q->queue_hw_ctx); | 1931 | kfree(q->queue_hw_ctx); |
1922 | 1932 | ||
1923 | /* ctx kobj stays in queue_ctx */ | 1933 | /* ctx kobj stays in queue_ctx */ |
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, | |||
2027 | if (blk_mq_init_hw_queues(q, set)) | 2037 | if (blk_mq_init_hw_queues(q, set)) |
2028 | goto err_hctxs; | 2038 | goto err_hctxs; |
2029 | 2039 | ||
2040 | get_online_cpus(); | ||
2030 | mutex_lock(&all_q_mutex); | 2041 | mutex_lock(&all_q_mutex); |
2031 | list_add_tail(&q->all_q_node, &all_q_list); | ||
2032 | mutex_unlock(&all_q_mutex); | ||
2033 | 2042 | ||
2043 | list_add_tail(&q->all_q_node, &all_q_list); | ||
2034 | blk_mq_add_queue_tag_set(set, q); | 2044 | blk_mq_add_queue_tag_set(set, q); |
2045 | blk_mq_map_swqueue(q, cpu_online_mask); | ||
2035 | 2046 | ||
2036 | blk_mq_map_swqueue(q); | 2047 | mutex_unlock(&all_q_mutex); |
2048 | put_online_cpus(); | ||
2037 | 2049 | ||
2038 | return q; | 2050 | return q; |
2039 | 2051 | ||
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) | |||
2057 | { | 2069 | { |
2058 | struct blk_mq_tag_set *set = q->tag_set; | 2070 | struct blk_mq_tag_set *set = q->tag_set; |
2059 | 2071 | ||
2072 | mutex_lock(&all_q_mutex); | ||
2073 | list_del_init(&q->all_q_node); | ||
2074 | mutex_unlock(&all_q_mutex); | ||
2075 | |||
2060 | blk_mq_del_queue_tag_set(q); | 2076 | blk_mq_del_queue_tag_set(q); |
2061 | 2077 | ||
2062 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); | 2078 | blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); |
2063 | blk_mq_free_hw_queues(q, set); | 2079 | blk_mq_free_hw_queues(q, set); |
2064 | 2080 | ||
2065 | percpu_ref_exit(&q->mq_usage_counter); | 2081 | percpu_ref_exit(&q->mq_usage_counter); |
2066 | |||
2067 | kfree(q->mq_map); | ||
2068 | |||
2069 | q->mq_map = NULL; | ||
2070 | |||
2071 | mutex_lock(&all_q_mutex); | ||
2072 | list_del_init(&q->all_q_node); | ||
2073 | mutex_unlock(&all_q_mutex); | ||
2074 | } | 2082 | } |
2075 | 2083 | ||
2076 | /* Basically redo blk_mq_init_queue with queue frozen */ | 2084 | /* Basically redo blk_mq_init_queue with queue frozen */ |
2077 | static void blk_mq_queue_reinit(struct request_queue *q) | 2085 | static void blk_mq_queue_reinit(struct request_queue *q, |
2086 | const struct cpumask *online_mask) | ||
2078 | { | 2087 | { |
2079 | WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); | 2088 | WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); |
2080 | 2089 | ||
2081 | blk_mq_sysfs_unregister(q); | 2090 | blk_mq_sysfs_unregister(q); |
2082 | 2091 | ||
2083 | blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); | 2092 | blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); |
2084 | 2093 | ||
2085 | /* | 2094 | /* |
2086 | * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe | 2095 | * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe |
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) | |||
2088 | * involves free and re-allocate memory, worthy doing?) | 2097 | * involves free and re-allocate memory, worthy doing?) |
2089 | */ | 2098 | */ |
2090 | 2099 | ||
2091 | blk_mq_map_swqueue(q); | 2100 | blk_mq_map_swqueue(q, online_mask); |
2092 | 2101 | ||
2093 | blk_mq_sysfs_register(q); | 2102 | blk_mq_sysfs_register(q); |
2094 | } | 2103 | } |
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2097 | unsigned long action, void *hcpu) | 2106 | unsigned long action, void *hcpu) |
2098 | { | 2107 | { |
2099 | struct request_queue *q; | 2108 | struct request_queue *q; |
2109 | int cpu = (unsigned long)hcpu; | ||
2110 | /* | ||
2111 | * New online cpumask which is going to be set in this hotplug event. | ||
2112 | * Declare this cpumasks as global as cpu-hotplug operation is invoked | ||
2113 | * one-by-one and dynamically allocating this could result in a failure. | ||
2114 | */ | ||
2115 | static struct cpumask online_new; | ||
2100 | 2116 | ||
2101 | /* | 2117 | /* |
2102 | * Before new mappings are established, hotadded cpu might already | 2118 | * Before hotadded cpu starts handling requests, new mappings must |
2103 | * start handling requests. This doesn't break anything as we map | 2119 | * be established. Otherwise, these requests in hw queue might |
2104 | * offline CPUs to first hardware queue. We will re-init the queue | 2120 | * never be dispatched. |
2105 | * below to get optimal settings. | 2121 | * |
2122 | * For example, there is a single hw queue (hctx) and two CPU queues | ||
2123 | * (ctx0 for CPU0, and ctx1 for CPU1). | ||
2124 | * | ||
2125 | * Now CPU1 is just onlined and a request is inserted into | ||
2126 | * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is | ||
2127 | * still zero. | ||
2128 | * | ||
2129 | * And then while running hw queue, flush_busy_ctxs() finds bit0 is | ||
2130 | * set in pending bitmap and tries to retrieve requests in | ||
2131 | * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, | ||
2132 | * so the request in ctx1->rq_list is ignored. | ||
2106 | */ | 2133 | */ |
2107 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && | 2134 | switch (action & ~CPU_TASKS_FROZEN) { |
2108 | action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) | 2135 | case CPU_DEAD: |
2136 | case CPU_UP_CANCELED: | ||
2137 | cpumask_copy(&online_new, cpu_online_mask); | ||
2138 | break; | ||
2139 | case CPU_UP_PREPARE: | ||
2140 | cpumask_copy(&online_new, cpu_online_mask); | ||
2141 | cpumask_set_cpu(cpu, &online_new); | ||
2142 | break; | ||
2143 | default: | ||
2109 | return NOTIFY_OK; | 2144 | return NOTIFY_OK; |
2145 | } | ||
2110 | 2146 | ||
2111 | mutex_lock(&all_q_mutex); | 2147 | mutex_lock(&all_q_mutex); |
2112 | 2148 | ||
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2130 | } | 2166 | } |
2131 | 2167 | ||
2132 | list_for_each_entry(q, &all_q_list, all_q_node) | 2168 | list_for_each_entry(q, &all_q_list, all_q_node) |
2133 | blk_mq_queue_reinit(q); | 2169 | blk_mq_queue_reinit(q, &online_new); |
2134 | 2170 | ||
2135 | list_for_each_entry(q, &all_q_list, all_q_node) | 2171 | list_for_each_entry(q, &all_q_list, all_q_node) |
2136 | blk_mq_unfreeze_queue(q); | 2172 | blk_mq_unfreeze_queue(q); |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); | |||
51 | * CPU -> queue mappings | 51 | * CPU -> queue mappings |
52 | */ | 52 | */ |
53 | extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); | 53 | extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); |
54 | extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); | 54 | extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, |
55 | const struct cpumask *online_mask); | ||
55 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); | 56 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
56 | 57 | ||
57 | /* | 58 | /* |
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c | |||
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) | |||
332 | srlen = cert->raw_serial_size; | 332 | srlen = cert->raw_serial_size; |
333 | q = cert->raw_serial; | 333 | q = cert->raw_serial; |
334 | } | 334 | } |
335 | if (srlen > 1 && *q == 0) { | ||
336 | srlen--; | ||
337 | q++; | ||
338 | } | ||
339 | 335 | ||
340 | ret = -ENOMEM; | 336 | ret = -ENOMEM; |
341 | desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); | 337 | desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) | |||
1044 | goto err_exit; | 1044 | goto err_exit; |
1045 | 1045 | ||
1046 | mutex_lock(&ec->mutex); | 1046 | mutex_lock(&ec->mutex); |
1047 | result = -ENODATA; | ||
1047 | list_for_each_entry(handler, &ec->list, node) { | 1048 | list_for_each_entry(handler, &ec->list, node) { |
1048 | if (value == handler->query_bit) { | 1049 | if (value == handler->query_bit) { |
1050 | result = 0; | ||
1049 | q->handler = acpi_ec_get_query_handler(handler); | 1051 | q->handler = acpi_ec_get_query_handler(handler); |
1050 | ec_dbg_evt("Query(0x%02x) scheduled", | 1052 | ec_dbg_evt("Query(0x%02x) scheduled", |
1051 | q->handler->query_bit); | 1053 | q->handler->query_bit); |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) | |||
372 | 372 | ||
373 | /* Interrupt Line values above 0xF are forbidden */ | 373 | /* Interrupt Line values above 0xF are forbidden */ |
374 | if (dev->irq > 0 && (dev->irq <= 0xF) && | 374 | if (dev->irq > 0 && (dev->irq <= 0xF) && |
375 | acpi_isa_irq_available(dev->irq) && | ||
375 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { | 376 | (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { |
376 | dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", | 377 | dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", |
377 | pin_name(dev->pin), dev->irq); | 378 | pin_name(dev->pin), dev->irq); |
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c | |||
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) | |||
498 | PIRQ_PENALTY_PCI_POSSIBLE; | 498 | PIRQ_PENALTY_PCI_POSSIBLE; |
499 | } | 499 | } |
500 | } | 500 | } |
501 | /* Add a penalty for the SCI */ | 501 | |
502 | acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; | ||
503 | return 0; | 502 | return 0; |
504 | } | 503 | } |
505 | 504 | ||
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) | |||
553 | irq = link->irq.possible[i]; | 552 | irq = link->irq.possible[i]; |
554 | } | 553 | } |
555 | } | 554 | } |
555 | if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { | ||
556 | printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " | ||
557 | "Try pci=noacpi or acpi=off\n", | ||
558 | acpi_device_name(link->device), | ||
559 | acpi_device_bid(link->device)); | ||
560 | return -ENODEV; | ||
561 | } | ||
556 | 562 | ||
557 | /* Attempt to enable the link device at this IRQ. */ | 563 | /* Attempt to enable the link device at this IRQ. */ |
558 | if (acpi_pci_link_set(link, irq)) { | 564 | if (acpi_pci_link_set(link, irq)) { |
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) | |||
821 | } | 827 | } |
822 | } | 828 | } |
823 | 829 | ||
830 | bool acpi_isa_irq_available(int irq) | ||
831 | { | ||
832 | return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || | ||
833 | acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); | ||
834 | } | ||
835 | |||
824 | /* | 836 | /* |
825 | * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with | 837 | * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with |
826 | * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for | 838 | * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) | |||
892 | u32 microvolt[3] = {0}; | 892 | u32 microvolt[3] = {0}; |
893 | int count, ret; | 893 | int count, ret; |
894 | 894 | ||
895 | count = of_property_count_u32_elems(opp->np, "opp-microvolt"); | 895 | /* Missing property isn't a problem, but an invalid entry is */ |
896 | if (!count) | 896 | if (!of_find_property(opp->np, "opp-microvolt", NULL)) |
897 | return 0; | 897 | return 0; |
898 | 898 | ||
899 | count = of_property_count_u32_elems(opp->np, "opp-microvolt"); | ||
900 | if (count < 0) { | ||
901 | dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", | ||
902 | __func__, count); | ||
903 | return count; | ||
904 | } | ||
905 | |||
899 | /* There can be one or three elements here */ | 906 | /* There can be one or three elements here */ |
900 | if (count != 1 && count != 3) { | 907 | if (count != 1 && count != 3) { |
901 | dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", | 908 | dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", |
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); | |||
1063 | * share a common logic which is isolated here. | 1070 | * share a common logic which is isolated here. |
1064 | * | 1071 | * |
1065 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1072 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1066 | * copy operation, returns 0 if no modifcation was done OR modification was | 1073 | * copy operation, returns 0 if no modification was done OR modification was |
1067 | * successful. | 1074 | * successful. |
1068 | * | 1075 | * |
1069 | * Locking: The internal device_opp and opp structures are RCU protected. | 1076 | * Locking: The internal device_opp and opp structures are RCU protected. |
@@ -1151,7 +1158,7 @@ unlock: | |||
1151 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 1158 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
1152 | * | 1159 | * |
1153 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1160 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1154 | * copy operation, returns 0 if no modifcation was done OR modification was | 1161 | * copy operation, returns 0 if no modification was done OR modification was |
1155 | * successful. | 1162 | * successful. |
1156 | */ | 1163 | */ |
1157 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 1164 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | |||
1177 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 1184 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
1178 | * | 1185 | * |
1179 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1186 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
1180 | * copy operation, returns 0 if no modifcation was done OR modification was | 1187 | * copy operation, returns 0 if no modification was done OR modification was |
1181 | * successful. | 1188 | * successful. |
1182 | */ | 1189 | */ |
1183 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) | 1190 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) | |||
1486 | { | 1486 | { |
1487 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; | 1487 | const bool write = cmd->rq->cmd_flags & REQ_WRITE; |
1488 | struct loop_device *lo = cmd->rq->q->queuedata; | 1488 | struct loop_device *lo = cmd->rq->q->queuedata; |
1489 | int ret = -EIO; | 1489 | int ret = 0; |
1490 | 1490 | ||
1491 | if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) | 1491 | if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { |
1492 | ret = -EIO; | ||
1492 | goto failed; | 1493 | goto failed; |
1494 | } | ||
1493 | 1495 | ||
1494 | ret = do_req_filebacked(lo, cmd->rq); | 1496 | ret = do_req_filebacked(lo, cmd->rq); |
1495 | |||
1496 | failed: | 1497 | failed: |
1497 | if (ret) | 1498 | blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); |
1498 | cmd->rq->errors = -EIO; | ||
1499 | blk_mq_complete_request(cmd->rq); | ||
1500 | } | 1499 | } |
1501 | 1500 | ||
1502 | static void loop_queue_write_work(struct work_struct *work) | 1501 | static void loop_queue_write_work(struct work_struct *work) |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a295b98c6bae..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) | |||
289 | case NULL_IRQ_SOFTIRQ: | 289 | case NULL_IRQ_SOFTIRQ: |
290 | switch (queue_mode) { | 290 | switch (queue_mode) { |
291 | case NULL_Q_MQ: | 291 | case NULL_Q_MQ: |
292 | blk_mq_complete_request(cmd->rq); | 292 | blk_mq_complete_request(cmd->rq, cmd->rq->errors); |
293 | break; | 293 | break; |
294 | case NULL_Q_RQ: | 294 | case NULL_Q_RQ: |
295 | blk_complete_request(cmd->rq); | 295 | blk_complete_request(cmd->rq); |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..6f04771f1019 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
618 | spin_unlock_irqrestore(req->q->queue_lock, flags); | 618 | spin_unlock_irqrestore(req->q->queue_lock, flags); |
619 | return; | 619 | return; |
620 | } | 620 | } |
621 | |||
621 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { | 622 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { |
622 | if (cmd_rq->ctx == CMD_CTX_CANCELLED) | 623 | if (cmd_rq->ctx == CMD_CTX_CANCELLED) |
623 | req->errors = -EINTR; | 624 | status = -EINTR; |
624 | else | ||
625 | req->errors = status; | ||
626 | } else { | 625 | } else { |
627 | req->errors = nvme_error_status(status); | 626 | status = nvme_error_status(status); |
628 | } | 627 | } |
629 | } else | 628 | } |
630 | req->errors = 0; | 629 | |
631 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { | 630 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) { |
632 | u32 result = le32_to_cpup(&cqe->result); | 631 | u32 result = le32_to_cpup(&cqe->result); |
633 | req->special = (void *)(uintptr_t)result; | 632 | req->special = (void *)(uintptr_t)result; |
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, | |||
650 | } | 649 | } |
651 | nvme_free_iod(nvmeq->dev, iod); | 650 | nvme_free_iod(nvmeq->dev, iod); |
652 | 651 | ||
653 | blk_mq_complete_request(req); | 652 | blk_mq_complete_request(req, status); |
654 | } | 653 | } |
655 | 654 | ||
656 | /* length is in bytes. gfp flags indicates whether we may sleep. */ | 655 | /* length is in bytes. gfp flags indicates whether we may sleep. */ |
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
863 | if (ns && ns->ms && !blk_integrity_rq(req)) { | 862 | if (ns && ns->ms && !blk_integrity_rq(req)) { |
864 | if (!(ns->pi_type && ns->ms == 8) && | 863 | if (!(ns->pi_type && ns->ms == 8) && |
865 | req->cmd_type != REQ_TYPE_DRV_PRIV) { | 864 | req->cmd_type != REQ_TYPE_DRV_PRIV) { |
866 | req->errors = -EFAULT; | 865 | blk_mq_complete_request(req, -EFAULT); |
867 | blk_mq_complete_request(req); | ||
868 | return BLK_MQ_RQ_QUEUE_OK; | 866 | return BLK_MQ_RQ_QUEUE_OK; |
869 | } | 867 | } |
870 | } | 868 | } |
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) | |||
2439 | list_sort(NULL, &dev->namespaces, ns_cmp); | 2437 | list_sort(NULL, &dev->namespaces, ns_cmp); |
2440 | } | 2438 | } |
2441 | 2439 | ||
2440 | static void nvme_set_irq_hints(struct nvme_dev *dev) | ||
2441 | { | ||
2442 | struct nvme_queue *nvmeq; | ||
2443 | int i; | ||
2444 | |||
2445 | for (i = 0; i < dev->online_queues; i++) { | ||
2446 | nvmeq = dev->queues[i]; | ||
2447 | |||
2448 | if (!nvmeq->tags || !(*nvmeq->tags)) | ||
2449 | continue; | ||
2450 | |||
2451 | irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, | ||
2452 | blk_mq_tags_cpumask(*nvmeq->tags)); | ||
2453 | } | ||
2454 | } | ||
2455 | |||
2442 | static void nvme_dev_scan(struct work_struct *work) | 2456 | static void nvme_dev_scan(struct work_struct *work) |
2443 | { | 2457 | { |
2444 | struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); | 2458 | struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); |
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work) | |||
2450 | return; | 2464 | return; |
2451 | nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); | 2465 | nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); |
2452 | kfree(ctrl); | 2466 | kfree(ctrl); |
2467 | nvme_set_irq_hints(dev); | ||
2453 | } | 2468 | } |
2454 | 2469 | ||
2455 | /* | 2470 | /* |
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = { | |||
2953 | .compat_ioctl = nvme_dev_ioctl, | 2968 | .compat_ioctl = nvme_dev_ioctl, |
2954 | }; | 2969 | }; |
2955 | 2970 | ||
2956 | static void nvme_set_irq_hints(struct nvme_dev *dev) | ||
2957 | { | ||
2958 | struct nvme_queue *nvmeq; | ||
2959 | int i; | ||
2960 | |||
2961 | for (i = 0; i < dev->online_queues; i++) { | ||
2962 | nvmeq = dev->queues[i]; | ||
2963 | |||
2964 | if (!nvmeq->tags || !(*nvmeq->tags)) | ||
2965 | continue; | ||
2966 | |||
2967 | irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, | ||
2968 | blk_mq_tags_cpumask(*nvmeq->tags)); | ||
2969 | } | ||
2970 | } | ||
2971 | |||
2972 | static int nvme_dev_start(struct nvme_dev *dev) | 2971 | static int nvme_dev_start(struct nvme_dev *dev) |
2973 | { | 2972 | { |
2974 | int result; | 2973 | int result; |
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev) | |||
3010 | if (result) | 3009 | if (result) |
3011 | goto free_tags; | 3010 | goto free_tags; |
3012 | 3011 | ||
3013 | nvme_set_irq_hints(dev); | ||
3014 | |||
3015 | dev->event_limit = 1; | 3012 | dev->event_limit = 1; |
3016 | return result; | 3013 | return result; |
3017 | 3014 | ||
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) | |||
3062 | } else { | 3059 | } else { |
3063 | nvme_unfreeze_queues(dev); | 3060 | nvme_unfreeze_queues(dev); |
3064 | nvme_dev_add(dev); | 3061 | nvme_dev_add(dev); |
3065 | nvme_set_irq_hints(dev); | ||
3066 | } | 3062 | } |
3067 | return 0; | 3063 | return 0; |
3068 | } | 3064 | } |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) | |||
144 | do { | 144 | do { |
145 | virtqueue_disable_cb(vq); | 145 | virtqueue_disable_cb(vq); |
146 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { | 146 | while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { |
147 | blk_mq_complete_request(vbr->req); | 147 | blk_mq_complete_request(vbr->req, vbr->req->errors); |
148 | req_done = true; | 148 | req_done = true; |
149 | } | 149 | } |
150 | if (unlikely(virtqueue_is_broken(vq))) | 150 | if (unlikely(virtqueue_is_broken(vq))) |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, | |||
212 | 212 | ||
213 | static int xen_blkif_disconnect(struct xen_blkif *blkif) | 213 | static int xen_blkif_disconnect(struct xen_blkif *blkif) |
214 | { | 214 | { |
215 | struct pending_req *req, *n; | ||
216 | int i = 0, j; | ||
217 | |||
215 | if (blkif->xenblkd) { | 218 | if (blkif->xenblkd) { |
216 | kthread_stop(blkif->xenblkd); | 219 | kthread_stop(blkif->xenblkd); |
217 | wake_up(&blkif->shutdown_wq); | 220 | wake_up(&blkif->shutdown_wq); |
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
238 | /* Remove all persistent grants and the cache of ballooned pages. */ | 241 | /* Remove all persistent grants and the cache of ballooned pages. */ |
239 | xen_blkbk_free_caches(blkif); | 242 | xen_blkbk_free_caches(blkif); |
240 | 243 | ||
244 | /* Check that there is no request in use */ | ||
245 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
246 | list_del(&req->free_list); | ||
247 | |||
248 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
249 | kfree(req->segments[j]); | ||
250 | |||
251 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
252 | kfree(req->indirect_pages[j]); | ||
253 | |||
254 | kfree(req); | ||
255 | i++; | ||
256 | } | ||
257 | |||
258 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | ||
259 | blkif->nr_ring_pages = 0; | ||
260 | |||
241 | return 0; | 261 | return 0; |
242 | } | 262 | } |
243 | 263 | ||
244 | static void xen_blkif_free(struct xen_blkif *blkif) | 264 | static void xen_blkif_free(struct xen_blkif *blkif) |
245 | { | 265 | { |
246 | struct pending_req *req, *n; | ||
247 | int i = 0, j; | ||
248 | 266 | ||
249 | xen_blkif_disconnect(blkif); | 267 | xen_blkif_disconnect(blkif); |
250 | xen_vbd_free(&blkif->vbd); | 268 | xen_vbd_free(&blkif->vbd); |
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) | |||
257 | BUG_ON(!list_empty(&blkif->free_pages)); | 275 | BUG_ON(!list_empty(&blkif->free_pages)); |
258 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 276 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
259 | 277 | ||
260 | /* Check that there is no request in use */ | ||
261 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | ||
262 | list_del(&req->free_list); | ||
263 | |||
264 | for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) | ||
265 | kfree(req->segments[j]); | ||
266 | |||
267 | for (j = 0; j < MAX_INDIRECT_PAGES; j++) | ||
268 | kfree(req->indirect_pages[j]); | ||
269 | |||
270 | kfree(req); | ||
271 | i++; | ||
272 | } | ||
273 | |||
274 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | ||
275 | |||
276 | kmem_cache_free(xen_blkif_cachep, blkif); | 278 | kmem_cache_free(xen_blkif_cachep, blkif); |
277 | } | 279 | } |
278 | 280 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..611170896b8c 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1142 | RING_IDX i, rp; | 1142 | RING_IDX i, rp; |
1143 | unsigned long flags; | 1143 | unsigned long flags; |
1144 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | 1144 | struct blkfront_info *info = (struct blkfront_info *)dev_id; |
1145 | int error; | ||
1145 | 1146 | ||
1146 | spin_lock_irqsave(&info->io_lock, flags); | 1147 | spin_lock_irqsave(&info->io_lock, flags); |
1147 | 1148 | ||
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1182 | continue; | 1183 | continue; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; | 1186 | error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; |
1186 | switch (bret->operation) { | 1187 | switch (bret->operation) { |
1187 | case BLKIF_OP_DISCARD: | 1188 | case BLKIF_OP_DISCARD: |
1188 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1189 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
1189 | struct request_queue *rq = info->rq; | 1190 | struct request_queue *rq = info->rq; |
1190 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1191 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
1191 | info->gd->disk_name, op_name(bret->operation)); | 1192 | info->gd->disk_name, op_name(bret->operation)); |
1192 | req->errors = -EOPNOTSUPP; | 1193 | error = -EOPNOTSUPP; |
1193 | info->feature_discard = 0; | 1194 | info->feature_discard = 0; |
1194 | info->feature_secdiscard = 0; | 1195 | info->feature_secdiscard = 0; |
1195 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); | 1196 | queue_flag_clear(QUEUE_FLAG_DISCARD, rq); |
1196 | queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); | 1197 | queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); |
1197 | } | 1198 | } |
1198 | blk_mq_complete_request(req); | 1199 | blk_mq_complete_request(req, error); |
1199 | break; | 1200 | break; |
1200 | case BLKIF_OP_FLUSH_DISKCACHE: | 1201 | case BLKIF_OP_FLUSH_DISKCACHE: |
1201 | case BLKIF_OP_WRITE_BARRIER: | 1202 | case BLKIF_OP_WRITE_BARRIER: |
1202 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1203 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
1203 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1204 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
1204 | info->gd->disk_name, op_name(bret->operation)); | 1205 | info->gd->disk_name, op_name(bret->operation)); |
1205 | req->errors = -EOPNOTSUPP; | 1206 | error = -EOPNOTSUPP; |
1206 | } | 1207 | } |
1207 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | 1208 | if (unlikely(bret->status == BLKIF_RSP_ERROR && |
1208 | info->shadow[id].req.u.rw.nr_segments == 0)) { | 1209 | info->shadow[id].req.u.rw.nr_segments == 0)) { |
1209 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", | 1210 | printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", |
1210 | info->gd->disk_name, op_name(bret->operation)); | 1211 | info->gd->disk_name, op_name(bret->operation)); |
1211 | req->errors = -EOPNOTSUPP; | 1212 | error = -EOPNOTSUPP; |
1212 | } | 1213 | } |
1213 | if (unlikely(req->errors)) { | 1214 | if (unlikely(error)) { |
1214 | if (req->errors == -EOPNOTSUPP) | 1215 | if (error == -EOPNOTSUPP) |
1215 | req->errors = 0; | 1216 | error = 0; |
1216 | info->feature_flush = 0; | 1217 | info->feature_flush = 0; |
1217 | xlvbd_flush(info); | 1218 | xlvbd_flush(info); |
1218 | } | 1219 | } |
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1223 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " | 1224 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " |
1224 | "request: %x\n", bret->status); | 1225 | "request: %x\n", bret->status); |
1225 | 1226 | ||
1226 | blk_mq_complete_request(req); | 1227 | blk_mq_complete_request(req, error); |
1227 | break; | 1228 | break; |
1228 | default: | 1229 | default: |
1229 | BUG(); | 1230 | BUG(); |
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c | |||
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) | |||
148 | bc_timer.freq = clk_get_rate(timer_clk); | 148 | bc_timer.freq = clk_get_rate(timer_clk); |
149 | 149 | ||
150 | irq = irq_of_parse_and_map(np, 0); | 150 | irq = irq_of_parse_and_map(np, 0); |
151 | if (irq == NO_IRQ) { | 151 | if (!irq) { |
152 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); | 152 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); |
153 | return; | 153 | return; |
154 | } | 154 | } |
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c | |||
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) | |||
152 | int irq, error; | 152 | int irq, error; |
153 | 153 | ||
154 | irq = irq_of_parse_and_map(np, 0); | 154 | irq = irq_of_parse_and_map(np, 0); |
155 | if (irq == NO_IRQ) { | 155 | if (!irq) { |
156 | pr_err("%s: failed to map interrupts\n", __func__); | 156 | pr_err("%s: failed to map interrupts\n", __func__); |
157 | return; | 157 | return; |
158 | } | 158 | } |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 798277227de7..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) | |||
149 | { | 149 | { |
150 | struct acpi_cpufreq_data *data = policy->driver_data; | 150 | struct acpi_cpufreq_data *data = policy->driver_data; |
151 | 151 | ||
152 | if (unlikely(!data)) | ||
153 | return -ENODEV; | ||
154 | |||
152 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); | 155 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); |
153 | } | 156 | } |
154 | 157 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ef5ed9470de9..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) | |||
1436 | * since this is a core component, and is essential for the | 1436 | * since this is a core component, and is essential for the |
1437 | * subsequent light-weight ->init() to succeed. | 1437 | * subsequent light-weight ->init() to succeed. |
1438 | */ | 1438 | */ |
1439 | if (cpufreq_driver->exit) | 1439 | if (cpufreq_driver->exit) { |
1440 | cpufreq_driver->exit(policy); | 1440 | cpufreq_driver->exit(policy); |
1441 | policy->freq_table = NULL; | ||
1442 | } | ||
1441 | } | 1443 | } |
1442 | 1444 | ||
1443 | /** | 1445 | /** |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | |||
455 | return desc; | 455 | return desc; |
456 | } | 456 | } |
457 | 457 | ||
458 | void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) | ||
459 | { | ||
460 | memset(&desc->lld, 0, sizeof(desc->lld)); | ||
461 | INIT_LIST_HEAD(&desc->descs_list); | ||
462 | desc->direction = DMA_TRANS_NONE; | ||
463 | desc->xfer_size = 0; | ||
464 | desc->active_xfer = false; | ||
465 | } | ||
466 | |||
458 | /* Call must be protected by lock. */ | 467 | /* Call must be protected by lock. */ |
459 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | 468 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) |
460 | { | 469 | { |
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | |||
466 | desc = list_first_entry(&atchan->free_descs_list, | 475 | desc = list_first_entry(&atchan->free_descs_list, |
467 | struct at_xdmac_desc, desc_node); | 476 | struct at_xdmac_desc, desc_node); |
468 | list_del(&desc->desc_node); | 477 | list_del(&desc->desc_node); |
469 | desc->active_xfer = false; | 478 | at_xdmac_init_used_desc(desc); |
470 | } | 479 | } |
471 | 480 | ||
472 | return desc; | 481 | return desc; |
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
875 | 884 | ||
876 | if (xt->src_inc) { | 885 | if (xt->src_inc) { |
877 | if (xt->src_sgl) | 886 | if (xt->src_sgl) |
878 | chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; | 887 | chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; |
879 | else | 888 | else |
880 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; | 889 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; |
881 | } | 890 | } |
882 | 891 | ||
883 | if (xt->dst_inc) { | 892 | if (xt->dst_inc) { |
884 | if (xt->dst_sgl) | 893 | if (xt->dst_sgl) |
885 | chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; | 894 | chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; |
886 | else | 895 | else |
887 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; | 896 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; |
888 | } | 897 | } |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |||
554 | mutex_lock(&dma_list_mutex); | 554 | mutex_lock(&dma_list_mutex); |
555 | 555 | ||
556 | if (chan->client_count == 0) { | 556 | if (chan->client_count == 0) { |
557 | struct dma_device *device = chan->device; | ||
558 | |||
559 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
560 | device->privatecnt++; | ||
557 | err = dma_chan_get(chan); | 561 | err = dma_chan_get(chan); |
558 | if (err) | 562 | if (err) { |
559 | pr_debug("%s: failed to get %s: (%d)\n", | 563 | pr_debug("%s: failed to get %s: (%d)\n", |
560 | __func__, dma_chan_name(chan), err); | 564 | __func__, dma_chan_name(chan), err); |
565 | chan = NULL; | ||
566 | if (--device->privatecnt == 0) | ||
567 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
568 | } | ||
561 | } else | 569 | } else |
562 | chan = NULL; | 570 | chan = NULL; |
563 | 571 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1591 | INIT_LIST_HEAD(&dw->dma.channels); | 1591 | INIT_LIST_HEAD(&dw->dma.channels); |
1592 | for (i = 0; i < nr_channels; i++) { | 1592 | for (i = 0; i < nr_channels; i++) { |
1593 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1593 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1594 | int r = nr_channels - i - 1; | ||
1595 | 1594 | ||
1596 | dwc->chan.device = &dw->dma; | 1595 | dwc->chan.device = &dw->dma; |
1597 | dma_cookie_init(&dwc->chan); | 1596 | dma_cookie_init(&dwc->chan); |
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1603 | 1602 | ||
1604 | /* 7 is highest priority & 0 is lowest. */ | 1603 | /* 7 is highest priority & 0 is lowest. */ |
1605 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 1604 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1606 | dwc->priority = r; | 1605 | dwc->priority = nr_channels - i - 1; |
1607 | else | 1606 | else |
1608 | dwc->priority = i; | 1607 | dwc->priority = i; |
1609 | 1608 | ||
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) | |||
1622 | /* Hardware configuration */ | 1621 | /* Hardware configuration */ |
1623 | if (autocfg) { | 1622 | if (autocfg) { |
1624 | unsigned int dwc_params; | 1623 | unsigned int dwc_params; |
1624 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | ||
1625 | void __iomem *addr = chip->regs + r * sizeof(u32); | 1625 | void __iomem *addr = chip->regs + r * sizeof(u32); |
1626 | 1626 | ||
1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); | 1627 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) | |||
355 | struct idma64_desc *desc = idma64c->desc; | 355 | struct idma64_desc *desc = idma64c->desc; |
356 | struct idma64_hw_desc *hw; | 356 | struct idma64_hw_desc *hw; |
357 | size_t bytes = desc->length; | 357 | size_t bytes = desc->length; |
358 | u64 llp; | 358 | u64 llp = channel_readq(idma64c, LLP); |
359 | u32 ctlhi; | 359 | u32 ctlhi = channel_readl(idma64c, CTL_HI); |
360 | unsigned int i = 0; | 360 | unsigned int i = 0; |
361 | 361 | ||
362 | llp = channel_readq(idma64c, LLP); | ||
363 | do { | 362 | do { |
364 | hw = &desc->hw[i]; | 363 | hw = &desc->hw[i]; |
365 | } while ((hw->llp != llp) && (++i < desc->ndesc)); | 364 | if (hw->llp == llp) |
365 | break; | ||
366 | bytes -= hw->len; | ||
367 | } while (++i < desc->ndesc); | ||
366 | 368 | ||
367 | if (!i) | 369 | if (!i) |
368 | return bytes; | 370 | return bytes; |
369 | 371 | ||
370 | do { | 372 | /* The current chunk is not fully transfered yet */ |
371 | bytes -= desc->hw[--i].len; | 373 | bytes += desc->hw[--i].len; |
372 | } while (i); | ||
373 | 374 | ||
374 | ctlhi = channel_readl(idma64c, CTL_HI); | ||
375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); | 375 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); |
376 | } | 376 | } |
377 | 377 | ||
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) | |||
473 | return; | 473 | return; |
474 | 474 | ||
475 | /* clear the channel mapping in DRCMR */ | 475 | /* clear the channel mapping in DRCMR */ |
476 | reg = pxad_drcmr(chan->drcmr); | 476 | if (chan->drcmr <= DRCMR_CHLNUM) { |
477 | writel_relaxed(0, chan->phy->base + reg); | 477 | reg = pxad_drcmr(chan->drcmr); |
478 | writel_relaxed(0, chan->phy->base + reg); | ||
479 | } | ||
478 | 480 | ||
479 | spin_lock_irqsave(&pdev->phy_lock, flags); | 481 | spin_lock_irqsave(&pdev->phy_lock, flags); |
480 | for (i = 0; i < 32; i++) | 482 | for (i = 0; i < 32; i++) |
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) | |||
516 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, | 518 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, |
517 | phy, phy->idx, misaligned); | 519 | phy, phy->idx, misaligned); |
518 | 520 | ||
519 | reg = pxad_drcmr(phy->vchan->drcmr); | 521 | if (phy->vchan->drcmr <= DRCMR_CHLNUM) { |
520 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 522 | reg = pxad_drcmr(phy->vchan->drcmr); |
523 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
524 | } | ||
521 | 525 | ||
522 | dalgn = phy_readl_relaxed(phy, DALGN); | 526 | dalgn = phy_readl_relaxed(phy, DALGN); |
523 | if (misaligned) | 527 | if (misaligned) |
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, | |||
887 | struct dma_async_tx_descriptor *tx; | 891 | struct dma_async_tx_descriptor *tx; |
888 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); | 892 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); |
889 | 893 | ||
894 | INIT_LIST_HEAD(&vd->node); | ||
890 | tx = vchan_tx_prep(vc, vd, tx_flags); | 895 | tx = vchan_tx_prep(vc, vd, tx_flags); |
891 | tx->tx_submit = pxad_tx_submit; | 896 | tx->tx_submit = pxad_tx_submit; |
892 | dev_dbg(&chan->vc.chan.dev->device, | 897 | dev_dbg(&chan->vc.chan.dev->device, |
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, | |||
910 | width = chan->cfg.src_addr_width; | 915 | width = chan->cfg.src_addr_width; |
911 | dev_addr = chan->cfg.src_addr; | 916 | dev_addr = chan->cfg.src_addr; |
912 | *dev_src = dev_addr; | 917 | *dev_src = dev_addr; |
913 | *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; | 918 | *dcmd |= PXA_DCMD_INCTRGADDR; |
919 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
920 | *dcmd |= PXA_DCMD_FLOWSRC; | ||
914 | } | 921 | } |
915 | if (dir == DMA_MEM_TO_DEV) { | 922 | if (dir == DMA_MEM_TO_DEV) { |
916 | maxburst = chan->cfg.dst_maxburst; | 923 | maxburst = chan->cfg.dst_maxburst; |
917 | width = chan->cfg.dst_addr_width; | 924 | width = chan->cfg.dst_addr_width; |
918 | dev_addr = chan->cfg.dst_addr; | 925 | dev_addr = chan->cfg.dst_addr; |
919 | *dev_dst = dev_addr; | 926 | *dev_dst = dev_addr; |
920 | *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; | 927 | *dcmd |= PXA_DCMD_INCSRCADDR; |
928 | if (chan->drcmr <= DRCMR_CHLNUM) | ||
929 | *dcmd |= PXA_DCMD_FLOWTRG; | ||
921 | } | 930 | } |
922 | if (dir == DMA_MEM_TO_MEM) | 931 | if (dir == DMA_MEM_TO_MEM) |
923 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | | 932 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, | |||
1177 | else | 1186 | else |
1178 | curr = phy_readl_relaxed(chan->phy, DTADR); | 1187 | curr = phy_readl_relaxed(chan->phy, DTADR); |
1179 | 1188 | ||
1189 | /* | ||
1190 | * curr has to be actually read before checking descriptor | ||
1191 | * completion, so that a curr inside a status updater | ||
1192 | * descriptor implies the following test returns true, and | ||
1193 | * preventing reordering of curr load and the test. | ||
1194 | */ | ||
1195 | rmb(); | ||
1196 | if (is_desc_completed(vd)) | ||
1197 | goto out; | ||
1198 | |||
1180 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { | 1199 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { |
1181 | hw_desc = sw_desc->hw_desc[i]; | 1200 | hw_desc = sw_desc->hw_desc[i]; |
1182 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) | 1201 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) |
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) | |||
599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) | 599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) |
600 | { | 600 | { |
601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); | 601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); |
602 | struct sun4i_dma_promise *promise; | 602 | struct sun4i_dma_promise *promise, *tmp; |
603 | 603 | ||
604 | /* Free all the demands and completed demands */ | 604 | /* Free all the demands and completed demands */ |
605 | list_for_each_entry(promise, &contract->demands, list) | 605 | list_for_each_entry_safe(promise, tmp, &contract->demands, list) |
606 | kfree(promise); | 606 | kfree(promise); |
607 | 607 | ||
608 | list_for_each_entry(promise, &contract->completed_demands, list) | 608 | list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) |
609 | kfree(promise); | 609 | kfree(promise); |
610 | 610 | ||
611 | kfree(contract); | 611 | kfree(contract); |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | 59 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 |
60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | 60 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 |
61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | 61 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF |
62 | #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) | ||
63 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) | 62 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) |
64 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | 63 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) |
65 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | 64 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C |
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | |||
379 | return flyby_type[src_cnt]; | 378 | return flyby_type[src_cnt]; |
380 | } | 379 | } |
381 | 380 | ||
382 | static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | ||
383 | { | ||
384 | u32 __iomem *cmd_base = ring->cmd_base; | ||
385 | u32 ring_state = ioread32(&cmd_base[1]); | ||
386 | |||
387 | return XGENE_DMA_RING_DESC_CNT(ring_state); | ||
388 | } | ||
389 | |||
390 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, | 381 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
391 | dma_addr_t *paddr) | 382 | dma_addr_t *paddr) |
392 | { | 383 | { |
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | |||
659 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | 650 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); |
660 | } | 651 | } |
661 | 652 | ||
662 | static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | 653 | static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, |
663 | struct xgene_dma_desc_sw *desc_sw) | 654 | struct xgene_dma_desc_sw *desc_sw) |
664 | { | 655 | { |
656 | struct xgene_dma_ring *ring = &chan->tx_ring; | ||
665 | struct xgene_dma_desc_hw *desc_hw; | 657 | struct xgene_dma_desc_hw *desc_hw; |
666 | 658 | ||
667 | /* Check if can push more descriptor to hw for execution */ | ||
668 | if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) | ||
669 | return -EBUSY; | ||
670 | |||
671 | /* Get hw descriptor from DMA tx ring */ | 659 | /* Get hw descriptor from DMA tx ring */ |
672 | desc_hw = &ring->desc_hw[ring->head]; | 660 | desc_hw = &ring->desc_hw[ring->head]; |
673 | 661 | ||
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
694 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | 682 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); |
695 | } | 683 | } |
696 | 684 | ||
685 | /* Increment the pending transaction count */ | ||
686 | chan->pending += ((desc_sw->flags & | ||
687 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
688 | |||
697 | /* Notify the hw that we have descriptor ready for execution */ | 689 | /* Notify the hw that we have descriptor ready for execution */ |
698 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | 690 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? |
699 | 2 : 1, ring->cmd); | 691 | 2 : 1, ring->cmd); |
700 | |||
701 | return 0; | ||
702 | } | 692 | } |
703 | 693 | ||
704 | /** | 694 | /** |
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
710 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | 700 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) |
711 | { | 701 | { |
712 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | 702 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; |
713 | int ret; | ||
714 | 703 | ||
715 | /* | 704 | /* |
716 | * If the list of pending descriptors is empty, then we | 705 | * If the list of pending descriptors is empty, then we |
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | |||
735 | if (chan->pending >= chan->max_outstanding) | 724 | if (chan->pending >= chan->max_outstanding) |
736 | return; | 725 | return; |
737 | 726 | ||
738 | ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); | 727 | xgene_chan_xfer_request(chan, desc_sw); |
739 | if (ret) | ||
740 | return; | ||
741 | 728 | ||
742 | /* | 729 | /* |
743 | * Delete this element from ld pending queue and append it to | 730 | * Delete this element from ld pending queue and append it to |
744 | * ld running queue | 731 | * ld running queue |
745 | */ | 732 | */ |
746 | list_move_tail(&desc_sw->node, &chan->ld_running); | 733 | list_move_tail(&desc_sw->node, &chan->ld_running); |
747 | |||
748 | /* Increment the pending transaction count */ | ||
749 | chan->pending++; | ||
750 | } | 734 | } |
751 | } | 735 | } |
752 | 736 | ||
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |||
821 | * Decrement the pending transaction count | 805 | * Decrement the pending transaction count |
822 | * as we have processed one | 806 | * as we have processed one |
823 | */ | 807 | */ |
824 | chan->pending--; | 808 | chan->pending -= ((desc_sw->flags & |
809 | XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); | ||
825 | 810 | ||
826 | /* | 811 | /* |
827 | * Delete this node from ld running queue and append it to | 812 | * Delete this node from ld running queue and append it to |
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | |||
1421 | struct xgene_dma_ring *ring, | 1406 | struct xgene_dma_ring *ring, |
1422 | enum xgene_dma_ring_cfgsize cfgsize) | 1407 | enum xgene_dma_ring_cfgsize cfgsize) |
1423 | { | 1408 | { |
1409 | int ret; | ||
1410 | |||
1424 | /* Setup DMA ring descriptor variables */ | 1411 | /* Setup DMA ring descriptor variables */ |
1425 | ring->pdma = chan->pdma; | 1412 | ring->pdma = chan->pdma; |
1426 | ring->cfgsize = cfgsize; | 1413 | ring->cfgsize = cfgsize; |
1427 | ring->num = chan->pdma->ring_num++; | 1414 | ring->num = chan->pdma->ring_num++; |
1428 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | 1415 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); |
1429 | 1416 | ||
1430 | ring->size = xgene_dma_get_ring_size(chan, cfgsize); | 1417 | ret = xgene_dma_get_ring_size(chan, cfgsize); |
1431 | if (ring->size <= 0) | 1418 | if (ret <= 0) |
1432 | return ring->size; | 1419 | return ret; |
1420 | ring->size = ret; | ||
1433 | 1421 | ||
1434 | /* Allocate memory for DMA ring descriptor */ | 1422 | /* Allocate memory for DMA ring descriptor */ |
1435 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | 1423 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, |
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | |||
1482 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | 1470 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); |
1483 | 1471 | ||
1484 | /* Set the max outstanding request possible to this channel */ | 1472 | /* Set the max outstanding request possible to this channel */ |
1485 | chan->max_outstanding = rx_ring->slots; | 1473 | chan->max_outstanding = tx_ring->slots; |
1486 | 1474 | ||
1487 | return ret; | 1475 | return ret; |
1488 | } | 1476 | } |
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c | |||
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
739 | struct dma_chan *chan; | 739 | struct dma_chan *chan; |
740 | struct zx_dma_chan *c; | 740 | struct zx_dma_chan *c; |
741 | 741 | ||
742 | if (request > d->dma_requests) | 742 | if (request >= d->dma_requests) |
743 | return NULL; | 743 | return NULL; |
744 | 744 | ||
745 | chan = dma_get_any_slave_channel(&d->slave); | 745 | chan = dma_get_any_slave_channel(&d->slave); |
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/efi.h> | 15 | #include <linux/efi.h> |
16 | #include <linux/sort.h> | ||
16 | #include <asm/efi.h> | 17 | #include <asm/efi.h> |
17 | 18 | ||
18 | #include "efistub.h" | 19 | #include "efistub.h" |
@@ -305,6 +306,44 @@ fail: | |||
305 | */ | 306 | */ |
306 | #define EFI_RT_VIRTUAL_BASE 0x40000000 | 307 | #define EFI_RT_VIRTUAL_BASE 0x40000000 |
307 | 308 | ||
309 | static int cmp_mem_desc(const void *l, const void *r) | ||
310 | { | ||
311 | const efi_memory_desc_t *left = l, *right = r; | ||
312 | |||
313 | return (left->phys_addr > right->phys_addr) ? 1 : -1; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Returns whether region @left ends exactly where region @right starts, | ||
318 | * or false if either argument is NULL. | ||
319 | */ | ||
320 | static bool regions_are_adjacent(efi_memory_desc_t *left, | ||
321 | efi_memory_desc_t *right) | ||
322 | { | ||
323 | u64 left_end; | ||
324 | |||
325 | if (left == NULL || right == NULL) | ||
326 | return false; | ||
327 | |||
328 | left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; | ||
329 | |||
330 | return left_end == right->phys_addr; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Returns whether region @left and region @right have compatible memory type | ||
335 | * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. | ||
336 | */ | ||
337 | static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, | ||
338 | efi_memory_desc_t *right) | ||
339 | { | ||
340 | static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | | ||
341 | EFI_MEMORY_WC | EFI_MEMORY_UC | | ||
342 | EFI_MEMORY_RUNTIME; | ||
343 | |||
344 | return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; | ||
345 | } | ||
346 | |||
308 | /* | 347 | /* |
309 | * efi_get_virtmap() - create a virtual mapping for the EFI memory map | 348 | * efi_get_virtmap() - create a virtual mapping for the EFI memory map |
310 | * | 349 | * |
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, | |||
317 | int *count) | 356 | int *count) |
318 | { | 357 | { |
319 | u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; | 358 | u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; |
320 | efi_memory_desc_t *out = runtime_map; | 359 | efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; |
321 | int l; | 360 | int l; |
322 | 361 | ||
323 | for (l = 0; l < map_size; l += desc_size) { | 362 | /* |
324 | efi_memory_desc_t *in = (void *)memory_map + l; | 363 | * To work around potential issues with the Properties Table feature |
364 | * introduced in UEFI 2.5, which may split PE/COFF executable images | ||
365 | * in memory into several RuntimeServicesCode and RuntimeServicesData | ||
366 | * regions, we need to preserve the relative offsets between adjacent | ||
367 | * EFI_MEMORY_RUNTIME regions with the same memory type attributes. | ||
368 | * The easiest way to find adjacent regions is to sort the memory map | ||
369 | * before traversing it. | ||
370 | */ | ||
371 | sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); | ||
372 | |||
373 | for (l = 0; l < map_size; l += desc_size, prev = in) { | ||
325 | u64 paddr, size; | 374 | u64 paddr, size; |
326 | 375 | ||
376 | in = (void *)memory_map + l; | ||
327 | if (!(in->attribute & EFI_MEMORY_RUNTIME)) | 377 | if (!(in->attribute & EFI_MEMORY_RUNTIME)) |
328 | continue; | 378 | continue; |
329 | 379 | ||
380 | paddr = in->phys_addr; | ||
381 | size = in->num_pages * EFI_PAGE_SIZE; | ||
382 | |||
330 | /* | 383 | /* |
331 | * Make the mapping compatible with 64k pages: this allows | 384 | * Make the mapping compatible with 64k pages: this allows |
332 | * a 4k page size kernel to kexec a 64k page size kernel and | 385 | * a 4k page size kernel to kexec a 64k page size kernel and |
333 | * vice versa. | 386 | * vice versa. |
334 | */ | 387 | */ |
335 | paddr = round_down(in->phys_addr, SZ_64K); | 388 | if (!regions_are_adjacent(prev, in) || |
336 | size = round_up(in->num_pages * EFI_PAGE_SIZE + | 389 | !regions_have_compatible_memory_type_attrs(prev, in)) { |
337 | in->phys_addr - paddr, SZ_64K); | 390 | |
338 | 391 | paddr = round_down(in->phys_addr, SZ_64K); | |
339 | /* | 392 | size += in->phys_addr - paddr; |
340 | * Avoid wasting memory on PTEs by choosing a virtual base that | 393 | |
341 | * is compatible with section mappings if this region has the | 394 | /* |
342 | * appropriate size and physical alignment. (Sections are 2 MB | 395 | * Avoid wasting memory on PTEs by choosing a virtual |
343 | * on 4k granule kernels) | 396 | * base that is compatible with section mappings if this |
344 | */ | 397 | * region has the appropriate size and physical |
345 | if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) | 398 | * alignment. (Sections are 2 MB on 4k granule kernels) |
346 | efi_virt_base = round_up(efi_virt_base, SZ_2M); | 399 | */ |
400 | if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) | ||
401 | efi_virt_base = round_up(efi_virt_base, SZ_2M); | ||
402 | else | ||
403 | efi_virt_base = round_up(efi_virt_base, SZ_64K); | ||
404 | } | ||
347 | 405 | ||
348 | in->virt_addr = efi_virt_base + in->phys_addr - paddr; | 406 | in->virt_addr = efi_virt_base + in->phys_addr - paddr; |
349 | efi_virt_base += size; | 407 | efi_virt_base += size; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 1c3fc99c5465..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, | |||
208 | return ret; | 208 | return ret; |
209 | } | 209 | } |
210 | 210 | ||
211 | static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, | ||
212 | cgs_handle_t *handle) | ||
213 | { | ||
214 | CGS_FUNC_ADEV; | ||
215 | int r; | ||
216 | uint32_t dma_handle; | ||
217 | struct drm_gem_object *obj; | ||
218 | struct amdgpu_bo *bo; | ||
219 | struct drm_device *dev = adev->ddev; | ||
220 | struct drm_file *file_priv = NULL, *priv; | ||
221 | |||
222 | mutex_lock(&dev->struct_mutex); | ||
223 | list_for_each_entry(priv, &dev->filelist, lhead) { | ||
224 | rcu_read_lock(); | ||
225 | if (priv->pid == get_pid(task_pid(current))) | ||
226 | file_priv = priv; | ||
227 | rcu_read_unlock(); | ||
228 | if (file_priv) | ||
229 | break; | ||
230 | } | ||
231 | mutex_unlock(&dev->struct_mutex); | ||
232 | r = dev->driver->prime_fd_to_handle(dev, | ||
233 | file_priv, dmabuf_fd, | ||
234 | &dma_handle); | ||
235 | spin_lock(&file_priv->table_lock); | ||
236 | |||
237 | /* Check if we currently have a reference on the object */ | ||
238 | obj = idr_find(&file_priv->object_idr, dma_handle); | ||
239 | if (obj == NULL) { | ||
240 | spin_unlock(&file_priv->table_lock); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | spin_unlock(&file_priv->table_lock); | ||
244 | bo = gem_to_amdgpu_bo(obj); | ||
245 | *handle = (cgs_handle_t)bo; | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) | 211 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) |
250 | { | 212 | { |
251 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | 213 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; |
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
810 | }; | 772 | }; |
811 | 773 | ||
812 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | 774 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { |
813 | amdgpu_cgs_import_gpu_mem, | ||
814 | amdgpu_cgs_add_irq_source, | 775 | amdgpu_cgs_add_irq_source, |
815 | amdgpu_cgs_irq_get, | 776 | amdgpu_cgs_irq_get, |
816 | amdgpu_cgs_irq_put | 777 | amdgpu_cgs_irq_put |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..cb3c274edb0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
156 | uint64_t *chunk_array_user; | 156 | uint64_t *chunk_array_user; |
157 | uint64_t *chunk_array; | 157 | uint64_t *chunk_array; |
158 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | 158 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
159 | unsigned size, i; | 159 | unsigned size; |
160 | int i; | ||
160 | int ret; | 161 | int ret; |
161 | 162 | ||
162 | if (cs->in.num_chunks == 0) | 163 | if (cs->in.num_chunks == 0) |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) | |||
1279 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | 1279 | amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); |
1280 | } | 1280 | } |
1281 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 1281 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
1282 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, | 1282 | amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); |
1283 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1284 | if (ext_encoder) | 1283 | if (ext_encoder) |
1285 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); | 1284 | amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); |
1286 | } else { | 1285 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, | |||
1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | 1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | 1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | 1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); |
1265 | /* reset addr and status */ | ||
1266 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1267 | |||
1268 | if (!addr && !status) | ||
1269 | return 0; | ||
1270 | |||
1265 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | 1271 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", |
1266 | entry->src_id, entry->src_data); | 1272 | entry->src_id, entry->src_data); |
1267 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 1273 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, | |||
1269 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 1275 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
1270 | status); | 1276 | status); |
1271 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); | 1277 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); |
1272 | /* reset addr and status */ | ||
1273 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1274 | 1278 | ||
1275 | return 0; | 1279 | return 0; |
1276 | } | 1280 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |||
1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | 1262 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | 1263 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | 1264 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); |
1265 | /* reset addr and status */ | ||
1266 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1267 | |||
1268 | if (!addr && !status) | ||
1269 | return 0; | ||
1270 | |||
1265 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | 1271 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", |
1266 | entry->src_id, entry->src_data); | 1272 | entry->src_id, entry->src_data); |
1267 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | 1273 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | |||
1269 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 1275 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
1270 | status); | 1276 | status); |
1271 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); | 1277 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); |
1272 | /* reset addr and status */ | ||
1273 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1274 | 1278 | ||
1275 | return 0; | 1279 | return 0; |
1276 | } | 1280 | } |
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h | |||
@@ -27,19 +27,6 @@ | |||
27 | #include "cgs_common.h" | 27 | #include "cgs_common.h" |
28 | 28 | ||
29 | /** | 29 | /** |
30 | * cgs_import_gpu_mem() - Import dmabuf handle | ||
31 | * @cgs_device: opaque device handle | ||
32 | * @dmabuf_fd: DMABuf file descriptor | ||
33 | * @handle: memory handle (output) | ||
34 | * | ||
35 | * Must be called in the process context that dmabuf_fd belongs to. | ||
36 | * | ||
37 | * Return: 0 on success, -errno otherwise | ||
38 | */ | ||
39 | typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, | ||
40 | cgs_handle_t *handle); | ||
41 | |||
42 | /** | ||
43 | * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources | 30 | * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources |
44 | * @private_data: private data provided to cgs_add_irq_source | 31 | * @private_data: private data provided to cgs_add_irq_source |
45 | * @src_id: interrupt source ID | 32 | * @src_id: interrupt source ID |
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); | |||
114 | typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); | 101 | typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); |
115 | 102 | ||
116 | struct cgs_os_ops { | 103 | struct cgs_os_ops { |
117 | cgs_import_gpu_mem_t import_gpu_mem; | ||
118 | |||
119 | /* IRQ handling */ | 104 | /* IRQ handling */ |
120 | cgs_add_irq_source_t add_irq_source; | 105 | cgs_add_irq_source_t add_irq_source; |
121 | cgs_irq_get_t irq_get; | 106 | cgs_irq_get_t irq_get; |
122 | cgs_irq_put_t irq_put; | 107 | cgs_irq_put_t irq_put; |
123 | }; | 108 | }; |
124 | 109 | ||
125 | #define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ | ||
126 | CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) | ||
127 | #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ | 110 | #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ |
128 | CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ | 111 | CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ |
129 | private_data) | 112 | private_data) |
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..bf27a07dbce3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, | |||
53 | struct drm_dp_mst_port *port, | 53 | struct drm_dp_mst_port *port, |
54 | int offset, int size, u8 *bytes); | 54 | int offset, int size, u8 *bytes); |
55 | 55 | ||
56 | static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | 56 | static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, |
57 | struct drm_dp_mst_branch *mstb); | 57 | struct drm_dp_mst_branch *mstb); |
58 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, | 58 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, |
59 | struct drm_dp_mst_branch *mstb, | 59 | struct drm_dp_mst_branch *mstb, |
60 | struct drm_dp_mst_port *port); | 60 | struct drm_dp_mst_port *port); |
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) | |||
804 | struct drm_dp_mst_port *port, *tmp; | 804 | struct drm_dp_mst_port *port, *tmp; |
805 | bool wake_tx = false; | 805 | bool wake_tx = false; |
806 | 806 | ||
807 | cancel_work_sync(&mstb->mgr->work); | ||
808 | |||
809 | /* | 807 | /* |
810 | * destroy all ports - don't need lock | 808 | * destroy all ports - don't need lock |
811 | * as there are no more references to the mst branch | 809 | * as there are no more references to the mst branch |
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) | |||
863 | { | 861 | { |
864 | struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); | 862 | struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); |
865 | struct drm_dp_mst_topology_mgr *mgr = port->mgr; | 863 | struct drm_dp_mst_topology_mgr *mgr = port->mgr; |
864 | |||
866 | if (!port->input) { | 865 | if (!port->input) { |
867 | port->vcpi.num_slots = 0; | 866 | port->vcpi.num_slots = 0; |
868 | 867 | ||
869 | kfree(port->cached_edid); | 868 | kfree(port->cached_edid); |
870 | 869 | ||
871 | /* we can't destroy the connector here, as | 870 | /* |
872 | we might be holding the mode_config.mutex | 871 | * The only time we don't have a connector |
873 | from an EDID retrieval */ | 872 | * on an output port is if the connector init |
873 | * fails. | ||
874 | */ | ||
874 | if (port->connector) { | 875 | if (port->connector) { |
876 | /* we can't destroy the connector here, as | ||
877 | * we might be holding the mode_config.mutex | ||
878 | * from an EDID retrieval */ | ||
879 | |||
875 | mutex_lock(&mgr->destroy_connector_lock); | 880 | mutex_lock(&mgr->destroy_connector_lock); |
876 | list_add(&port->next, &mgr->destroy_connector_list); | 881 | list_add(&port->next, &mgr->destroy_connector_list); |
877 | mutex_unlock(&mgr->destroy_connector_lock); | 882 | mutex_unlock(&mgr->destroy_connector_lock); |
878 | schedule_work(&mgr->destroy_connector_work); | 883 | schedule_work(&mgr->destroy_connector_work); |
879 | return; | 884 | return; |
880 | } | 885 | } |
886 | /* no need to clean up vcpi | ||
887 | * as if we have no connector we never setup a vcpi */ | ||
881 | drm_dp_port_teardown_pdt(port, port->pdt); | 888 | drm_dp_port_teardown_pdt(port, port->pdt); |
882 | |||
883 | if (!port->input && port->vcpi.vcpi > 0) | ||
884 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | ||
885 | } | 889 | } |
886 | kfree(port); | 890 | kfree(port); |
887 | |||
888 | (*mgr->cbs->hotplug)(mgr); | ||
889 | } | 891 | } |
890 | 892 | ||
891 | static void drm_dp_put_port(struct drm_dp_mst_port *port) | 893 | static void drm_dp_put_port(struct drm_dp_mst_port *port) |
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, | |||
1027 | } | 1029 | } |
1028 | } | 1030 | } |
1029 | 1031 | ||
1030 | static void build_mst_prop_path(struct drm_dp_mst_port *port, | 1032 | static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, |
1031 | struct drm_dp_mst_branch *mstb, | 1033 | int pnum, |
1032 | char *proppath, | 1034 | char *proppath, |
1033 | size_t proppath_size) | 1035 | size_t proppath_size) |
1034 | { | 1036 | { |
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, | |||
1041 | snprintf(temp, sizeof(temp), "-%d", port_num); | 1043 | snprintf(temp, sizeof(temp), "-%d", port_num); |
1042 | strlcat(proppath, temp, proppath_size); | 1044 | strlcat(proppath, temp, proppath_size); |
1043 | } | 1045 | } |
1044 | snprintf(temp, sizeof(temp), "-%d", port->port_num); | 1046 | snprintf(temp, sizeof(temp), "-%d", pnum); |
1045 | strlcat(proppath, temp, proppath_size); | 1047 | strlcat(proppath, temp, proppath_size); |
1046 | } | 1048 | } |
1047 | 1049 | ||
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, | |||
1105 | drm_dp_port_teardown_pdt(port, old_pdt); | 1107 | drm_dp_port_teardown_pdt(port, old_pdt); |
1106 | 1108 | ||
1107 | ret = drm_dp_port_setup_pdt(port); | 1109 | ret = drm_dp_port_setup_pdt(port); |
1108 | if (ret == true) { | 1110 | if (ret == true) |
1109 | drm_dp_send_link_address(mstb->mgr, port->mstb); | 1111 | drm_dp_send_link_address(mstb->mgr, port->mstb); |
1110 | port->mstb->link_address_sent = true; | ||
1111 | } | ||
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | if (created && !port->input) { | 1114 | if (created && !port->input) { |
1115 | char proppath[255]; | 1115 | char proppath[255]; |
1116 | build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); | ||
1117 | port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); | ||
1118 | 1116 | ||
1119 | if (port->port_num >= 8) { | 1117 | build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); |
1118 | port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); | ||
1119 | if (!port->connector) { | ||
1120 | /* remove it from the port list */ | ||
1121 | mutex_lock(&mstb->mgr->lock); | ||
1122 | list_del(&port->next); | ||
1123 | mutex_unlock(&mstb->mgr->lock); | ||
1124 | /* drop port list reference */ | ||
1125 | drm_dp_put_port(port); | ||
1126 | goto out; | ||
1127 | } | ||
1128 | if (port->port_num >= DP_MST_LOGICAL_PORT_0) { | ||
1120 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); | 1129 | port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); |
1130 | drm_mode_connector_set_tile_property(port->connector); | ||
1121 | } | 1131 | } |
1132 | (*mstb->mgr->cbs->register_connector)(port->connector); | ||
1122 | } | 1133 | } |
1123 | 1134 | ||
1135 | out: | ||
1124 | /* put reference to this port */ | 1136 | /* put reference to this port */ |
1125 | drm_dp_put_port(port); | 1137 | drm_dp_put_port(port); |
1126 | } | 1138 | } |
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m | |||
1202 | { | 1214 | { |
1203 | struct drm_dp_mst_port *port; | 1215 | struct drm_dp_mst_port *port; |
1204 | struct drm_dp_mst_branch *mstb_child; | 1216 | struct drm_dp_mst_branch *mstb_child; |
1205 | if (!mstb->link_address_sent) { | 1217 | if (!mstb->link_address_sent) |
1206 | drm_dp_send_link_address(mgr, mstb); | 1218 | drm_dp_send_link_address(mgr, mstb); |
1207 | mstb->link_address_sent = true; | 1219 | |
1208 | } | ||
1209 | list_for_each_entry(port, &mstb->ports, next) { | 1220 | list_for_each_entry(port, &mstb->ports, next) { |
1210 | if (port->input) | 1221 | if (port->input) |
1211 | continue; | 1222 | continue; |
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, | |||
1458 | mutex_unlock(&mgr->qlock); | 1469 | mutex_unlock(&mgr->qlock); |
1459 | } | 1470 | } |
1460 | 1471 | ||
1461 | static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | 1472 | static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, |
1462 | struct drm_dp_mst_branch *mstb) | 1473 | struct drm_dp_mst_branch *mstb) |
1463 | { | 1474 | { |
1464 | int len; | 1475 | int len; |
1465 | struct drm_dp_sideband_msg_tx *txmsg; | 1476 | struct drm_dp_sideband_msg_tx *txmsg; |
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | |||
1467 | 1478 | ||
1468 | txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); | 1479 | txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); |
1469 | if (!txmsg) | 1480 | if (!txmsg) |
1470 | return -ENOMEM; | 1481 | return; |
1471 | 1482 | ||
1472 | txmsg->dst = mstb; | 1483 | txmsg->dst = mstb; |
1473 | len = build_link_address(txmsg); | 1484 | len = build_link_address(txmsg); |
1474 | 1485 | ||
1486 | mstb->link_address_sent = true; | ||
1475 | drm_dp_queue_down_tx(mgr, txmsg); | 1487 | drm_dp_queue_down_tx(mgr, txmsg); |
1476 | 1488 | ||
1477 | ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); | 1489 | ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); |
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, | |||
1499 | } | 1511 | } |
1500 | (*mgr->cbs->hotplug)(mgr); | 1512 | (*mgr->cbs->hotplug)(mgr); |
1501 | } | 1513 | } |
1502 | } else | 1514 | } else { |
1515 | mstb->link_address_sent = false; | ||
1503 | DRM_DEBUG_KMS("link address failed %d\n", ret); | 1516 | DRM_DEBUG_KMS("link address failed %d\n", ret); |
1517 | } | ||
1504 | 1518 | ||
1505 | kfree(txmsg); | 1519 | kfree(txmsg); |
1506 | return 0; | ||
1507 | } | 1520 | } |
1508 | 1521 | ||
1509 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, | 1522 | static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, |
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) | |||
1978 | drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, | 1991 | drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, |
1979 | DP_MST_EN | DP_UPSTREAM_IS_SRC); | 1992 | DP_MST_EN | DP_UPSTREAM_IS_SRC); |
1980 | mutex_unlock(&mgr->lock); | 1993 | mutex_unlock(&mgr->lock); |
1994 | flush_work(&mgr->work); | ||
1995 | flush_work(&mgr->destroy_connector_work); | ||
1981 | } | 1996 | } |
1982 | EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); | 1997 | EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); |
1983 | 1998 | ||
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ | |||
2263 | 2278 | ||
2264 | if (port->cached_edid) | 2279 | if (port->cached_edid) |
2265 | edid = drm_edid_duplicate(port->cached_edid); | 2280 | edid = drm_edid_duplicate(port->cached_edid); |
2266 | else | 2281 | else { |
2267 | edid = drm_get_edid(connector, &port->aux.ddc); | 2282 | edid = drm_get_edid(connector, &port->aux.ddc); |
2268 | 2283 | drm_mode_connector_set_tile_property(connector); | |
2269 | drm_mode_connector_set_tile_property(connector); | 2284 | } |
2270 | drm_dp_put_port(port); | 2285 | drm_dp_put_port(port); |
2271 | return edid; | 2286 | return edid; |
2272 | } | 2287 | } |
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
2671 | { | 2686 | { |
2672 | struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); | 2687 | struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); |
2673 | struct drm_dp_mst_port *port; | 2688 | struct drm_dp_mst_port *port; |
2674 | 2689 | bool send_hotplug = false; | |
2675 | /* | 2690 | /* |
2676 | * Not a regular list traverse as we have to drop the destroy | 2691 | * Not a regular list traverse as we have to drop the destroy |
2677 | * connector lock before destroying the connector, to avoid AB->BA | 2692 | * connector lock before destroying the connector, to avoid AB->BA |
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
2694 | if (!port->input && port->vcpi.vcpi > 0) | 2709 | if (!port->input && port->vcpi.vcpi > 0) |
2695 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | 2710 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); |
2696 | kfree(port); | 2711 | kfree(port); |
2712 | send_hotplug = true; | ||
2697 | } | 2713 | } |
2714 | if (send_hotplug) | ||
2715 | (*mgr->cbs->hotplug)(mgr); | ||
2698 | } | 2716 | } |
2699 | 2717 | ||
2700 | /** | 2718 | /** |
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); | |||
2747 | */ | 2765 | */ |
2748 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) | 2766 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) |
2749 | { | 2767 | { |
2768 | flush_work(&mgr->work); | ||
2750 | flush_work(&mgr->destroy_connector_work); | 2769 | flush_work(&mgr->destroy_connector_work); |
2751 | mutex_lock(&mgr->payload_lock); | 2770 | mutex_lock(&mgr->payload_lock); |
2752 | kfree(mgr->payloads); | 2771 | kfree(mgr->payloads); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) | |||
345 | struct drm_crtc *crtc = mode_set->crtc; | 345 | struct drm_crtc *crtc = mode_set->crtc; |
346 | int ret; | 346 | int ret; |
347 | 347 | ||
348 | if (crtc->funcs->cursor_set) { | 348 | if (crtc->funcs->cursor_set2) { |
349 | ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); | ||
350 | if (ret) | ||
351 | error = true; | ||
352 | } else if (crtc->funcs->cursor_set) { | ||
349 | ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); | 353 | ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); |
350 | if (ret) | 354 | if (ret) |
351 | error = true; | 355 | error = true; |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 96 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
97 | static void __drm_kms_helper_poll_enable(struct drm_device *dev) | 97 | /** |
98 | * drm_kms_helper_poll_enable_locked - re-enable output polling. | ||
99 | * @dev: drm_device | ||
100 | * | ||
101 | * This function re-enables the output polling work without | ||
102 | * locking the mode_config mutex. | ||
103 | * | ||
104 | * This is like drm_kms_helper_poll_enable() however it is to be | ||
105 | * called from a context where the mode_config mutex is locked | ||
106 | * already. | ||
107 | */ | ||
108 | void drm_kms_helper_poll_enable_locked(struct drm_device *dev) | ||
98 | { | 109 | { |
99 | bool poll = false; | 110 | bool poll = false; |
100 | struct drm_connector *connector; | 111 | struct drm_connector *connector; |
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) | |||
113 | if (poll) | 124 | if (poll) |
114 | schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); | 125 | schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
115 | } | 126 | } |
127 | EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); | ||
128 | |||
116 | 129 | ||
117 | static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, | 130 | static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, |
118 | uint32_t maxX, uint32_t maxY, bool merge_type_bits) | 131 | uint32_t maxX, uint32_t maxY, bool merge_type_bits) |
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
174 | 187 | ||
175 | /* Re-enable polling in case the global poll config changed. */ | 188 | /* Re-enable polling in case the global poll config changed. */ |
176 | if (drm_kms_helper_poll != dev->mode_config.poll_running) | 189 | if (drm_kms_helper_poll != dev->mode_config.poll_running) |
177 | __drm_kms_helper_poll_enable(dev); | 190 | drm_kms_helper_poll_enable_locked(dev); |
178 | 191 | ||
179 | dev->mode_config.poll_running = drm_kms_helper_poll; | 192 | dev->mode_config.poll_running = drm_kms_helper_poll; |
180 | 193 | ||
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); | |||
428 | void drm_kms_helper_poll_enable(struct drm_device *dev) | 441 | void drm_kms_helper_poll_enable(struct drm_device *dev) |
429 | { | 442 | { |
430 | mutex_lock(&dev->mode_config.mutex); | 443 | mutex_lock(&dev->mode_config.mutex); |
431 | __drm_kms_helper_poll_enable(dev); | 444 | drm_kms_helper_poll_enable_locked(dev); |
432 | mutex_unlock(&dev->mode_config.mutex); | 445 | mutex_unlock(&dev->mode_config.mutex); |
433 | } | 446 | } |
434 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | 447 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
@@ -37,7 +37,6 @@ | |||
37 | * DECON stands for Display and Enhancement controller. | 37 | * DECON stands for Display and Enhancement controller. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #define DECON_DEFAULT_FRAMERATE 60 | ||
41 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 | 40 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 |
42 | 41 | ||
43 | #define WINDOWS_NR 2 | 42 | #define WINDOWS_NR 2 |
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, | |||
165 | return (clkdiv < 0x100) ? clkdiv : 0xff; | 164 | return (clkdiv < 0x100) ? clkdiv : 0xff; |
166 | } | 165 | } |
167 | 166 | ||
168 | static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, | ||
169 | const struct drm_display_mode *mode, | ||
170 | struct drm_display_mode *adjusted_mode) | ||
171 | { | ||
172 | if (adjusted_mode->vrefresh == 0) | ||
173 | adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; | ||
174 | |||
175 | return true; | ||
176 | } | ||
177 | |||
178 | static void decon_commit(struct exynos_drm_crtc *crtc) | 167 | static void decon_commit(struct exynos_drm_crtc *crtc) |
179 | { | 168 | { |
180 | struct decon_context *ctx = crtc->ctx; | 169 | struct decon_context *ctx = crtc->ctx; |
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) | |||
637 | static const struct exynos_drm_crtc_ops decon_crtc_ops = { | 626 | static const struct exynos_drm_crtc_ops decon_crtc_ops = { |
638 | .enable = decon_enable, | 627 | .enable = decon_enable, |
639 | .disable = decon_disable, | 628 | .disable = decon_disable, |
640 | .mode_fixup = decon_mode_fixup, | ||
641 | .commit = decon_commit, | 629 | .commit = decon_commit, |
642 | .enable_vblank = decon_enable_vblank, | 630 | .enable_vblank = decon_enable_vblank, |
643 | .disable_vblank = decon_disable_vblank, | 631 | .disable_vblank = decon_disable_vblank, |
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c | |||
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) | |||
1383 | return 0; | 1383 | return 0; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | #ifdef CONFIG_PM_SLEEP | ||
1387 | static int exynos_dp_suspend(struct device *dev) | ||
1388 | { | ||
1389 | struct exynos_dp_device *dp = dev_get_drvdata(dev); | ||
1390 | |||
1391 | exynos_dp_disable(&dp->encoder); | ||
1392 | return 0; | ||
1393 | } | ||
1394 | |||
1395 | static int exynos_dp_resume(struct device *dev) | ||
1396 | { | ||
1397 | struct exynos_dp_device *dp = dev_get_drvdata(dev); | ||
1398 | |||
1399 | exynos_dp_enable(&dp->encoder); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | #endif | ||
1403 | |||
1404 | static const struct dev_pm_ops exynos_dp_pm_ops = { | ||
1405 | SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) | ||
1406 | }; | ||
1407 | |||
1408 | static const struct of_device_id exynos_dp_match[] = { | 1386 | static const struct of_device_id exynos_dp_match[] = { |
1409 | { .compatible = "samsung,exynos5-dp" }, | 1387 | { .compatible = "samsung,exynos5-dp" }, |
1410 | {}, | 1388 | {}, |
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { | |||
1417 | .driver = { | 1395 | .driver = { |
1418 | .name = "exynos-dp", | 1396 | .name = "exynos-dp", |
1419 | .owner = THIS_MODULE, | 1397 | .owner = THIS_MODULE, |
1420 | .pm = &exynos_dp_pm_ops, | ||
1421 | .of_match_table = exynos_dp_match, | 1398 | .of_match_table = exynos_dp_match, |
1422 | }, | 1399 | }, |
1423 | }; | 1400 | }; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) | |||
28 | 28 | ||
29 | return 0; | 29 | return 0; |
30 | } | 30 | } |
31 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); | ||
32 | 31 | ||
33 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | 32 | int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) |
34 | { | 33 | { |
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) | |||
39 | 38 | ||
40 | return 0; | 39 | return 0; |
41 | } | 40 | } |
42 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); | ||
43 | 41 | ||
44 | int exynos_drm_device_subdrv_probe(struct drm_device *dev) | 42 | int exynos_drm_device_subdrv_probe(struct drm_device *dev) |
45 | { | 43 | { |
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) | |||
69 | 67 | ||
70 | return 0; | 68 | return 0; |
71 | } | 69 | } |
72 | EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); | ||
73 | 70 | ||
74 | int exynos_drm_device_subdrv_remove(struct drm_device *dev) | 71 | int exynos_drm_device_subdrv_remove(struct drm_device *dev) |
75 | { | 72 | { |
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) | |||
87 | 84 | ||
88 | return 0; | 85 | return 0; |
89 | } | 86 | } |
90 | EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); | ||
91 | 87 | ||
92 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) | 88 | int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) |
93 | { | 89 | { |
@@ -111,7 +107,6 @@ err: | |||
111 | } | 107 | } |
112 | return ret; | 108 | return ret; |
113 | } | 109 | } |
114 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); | ||
115 | 110 | ||
116 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) | 111 | void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) |
117 | { | 112 | { |
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) | |||
122 | subdrv->close(dev, subdrv->dev, file); | 117 | subdrv->close(dev, subdrv->dev, file); |
123 | } | 118 | } |
124 | } | 119 | } |
125 | EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) | |||
41 | exynos_crtc->ops->disable(exynos_crtc); | 41 | exynos_crtc->ops->disable(exynos_crtc); |
42 | } | 42 | } |
43 | 43 | ||
44 | static bool | ||
45 | exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, | ||
46 | const struct drm_display_mode *mode, | ||
47 | struct drm_display_mode *adjusted_mode) | ||
48 | { | ||
49 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
50 | |||
51 | if (exynos_crtc->ops->mode_fixup) | ||
52 | return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, | ||
53 | adjusted_mode); | ||
54 | |||
55 | return true; | ||
56 | } | ||
57 | |||
58 | static void | 44 | static void |
59 | exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | 45 | exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) |
60 | { | 46 | { |
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, | |||
99 | static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | 85 | static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { |
100 | .enable = exynos_drm_crtc_enable, | 86 | .enable = exynos_drm_crtc_enable, |
101 | .disable = exynos_drm_crtc_disable, | 87 | .disable = exynos_drm_crtc_disable, |
102 | .mode_fixup = exynos_drm_crtc_mode_fixup, | ||
103 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, | 88 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, |
104 | .atomic_begin = exynos_crtc_atomic_begin, | 89 | .atomic_begin = exynos_crtc_atomic_begin, |
105 | .atomic_flush = exynos_crtc_atomic_flush, | 90 | .atomic_flush = exynos_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, | |||
304 | return 0; | 304 | return 0; |
305 | } | 305 | } |
306 | 306 | ||
307 | #ifdef CONFIG_PM_SLEEP | ||
307 | static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) | 308 | static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) |
308 | { | 309 | { |
309 | struct drm_connector *connector; | 310 | struct drm_connector *connector; |
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) | |||
340 | 341 | ||
341 | return 0; | 342 | return 0; |
342 | } | 343 | } |
344 | #endif | ||
343 | 345 | ||
344 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | 346 | static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) |
345 | { | 347 | { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -82,7 +82,6 @@ struct exynos_drm_plane { | |||
82 | * | 82 | * |
83 | * @enable: enable the device | 83 | * @enable: enable the device |
84 | * @disable: disable the device | 84 | * @disable: disable the device |
85 | * @mode_fixup: fix mode data before applying it | ||
86 | * @commit: set current hw specific display mode to hw. | 85 | * @commit: set current hw specific display mode to hw. |
87 | * @enable_vblank: specific driver callback for enabling vblank interrupt. | 86 | * @enable_vblank: specific driver callback for enabling vblank interrupt. |
88 | * @disable_vblank: specific driver callback for disabling vblank interrupt. | 87 | * @disable_vblank: specific driver callback for disabling vblank interrupt. |
@@ -103,9 +102,6 @@ struct exynos_drm_crtc; | |||
103 | struct exynos_drm_crtc_ops { | 102 | struct exynos_drm_crtc_ops { |
104 | void (*enable)(struct exynos_drm_crtc *crtc); | 103 | void (*enable)(struct exynos_drm_crtc *crtc); |
105 | void (*disable)(struct exynos_drm_crtc *crtc); | 104 | void (*disable)(struct exynos_drm_crtc *crtc); |
106 | bool (*mode_fixup)(struct exynos_drm_crtc *crtc, | ||
107 | const struct drm_display_mode *mode, | ||
108 | struct drm_display_mode *adjusted_mode); | ||
109 | void (*commit)(struct exynos_drm_crtc *crtc); | 105 | void (*commit)(struct exynos_drm_crtc *crtc); |
110 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); | 106 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); |
111 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); | 107 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { | |||
1206 | .set_addr = fimc_dst_set_addr, | 1206 | .set_addr = fimc_dst_set_addr, |
1207 | }; | 1207 | }; |
1208 | 1208 | ||
1209 | static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) | ||
1210 | { | ||
1211 | DRM_DEBUG_KMS("enable[%d]\n", enable); | ||
1212 | |||
1213 | if (enable) { | ||
1214 | clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); | ||
1215 | clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); | ||
1216 | ctx->suspended = false; | ||
1217 | } else { | ||
1218 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); | ||
1219 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); | ||
1220 | ctx->suspended = true; | ||
1221 | } | ||
1222 | |||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | static irqreturn_t fimc_irq_handler(int irq, void *dev_id) | 1209 | static irqreturn_t fimc_irq_handler(int irq, void *dev_id) |
1227 | { | 1210 | { |
1228 | struct fimc_context *ctx = dev_id; | 1211 | struct fimc_context *ctx = dev_id; |
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) | |||
1780 | return 0; | 1763 | return 0; |
1781 | } | 1764 | } |
1782 | 1765 | ||
1766 | #ifdef CONFIG_PM | ||
1767 | static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) | ||
1768 | { | ||
1769 | DRM_DEBUG_KMS("enable[%d]\n", enable); | ||
1770 | |||
1771 | if (enable) { | ||
1772 | clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); | ||
1773 | clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); | ||
1774 | ctx->suspended = false; | ||
1775 | } else { | ||
1776 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); | ||
1777 | clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); | ||
1778 | ctx->suspended = true; | ||
1779 | } | ||
1780 | |||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1783 | #ifdef CONFIG_PM_SLEEP | 1784 | #ifdef CONFIG_PM_SLEEP |
1784 | static int fimc_suspend(struct device *dev) | 1785 | static int fimc_suspend(struct device *dev) |
1785 | { | 1786 | { |
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) | |||
1806 | } | 1807 | } |
1807 | #endif | 1808 | #endif |
1808 | 1809 | ||
1809 | #ifdef CONFIG_PM | ||
1810 | static int fimc_runtime_suspend(struct device *dev) | 1810 | static int fimc_runtime_suspend(struct device *dev) |
1811 | { | 1811 | { |
1812 | struct fimc_context *ctx = get_fimc_context(dev); | 1812 | struct fimc_context *ctx = get_fimc_context(dev); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -41,7 +41,6 @@ | |||
41 | * CPU Interface. | 41 | * CPU Interface. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define FIMD_DEFAULT_FRAMERATE 60 | ||
45 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 | 44 | #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 |
46 | 45 | ||
47 | /* position control register for hardware window 0, 2 ~ 4.*/ | 46 | /* position control register for hardware window 0, 2 ~ 4.*/ |
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, | |||
377 | return (clkdiv < 0x100) ? clkdiv : 0xff; | 376 | return (clkdiv < 0x100) ? clkdiv : 0xff; |
378 | } | 377 | } |
379 | 378 | ||
380 | static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, | ||
381 | const struct drm_display_mode *mode, | ||
382 | struct drm_display_mode *adjusted_mode) | ||
383 | { | ||
384 | if (adjusted_mode->vrefresh == 0) | ||
385 | adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; | ||
386 | |||
387 | return true; | ||
388 | } | ||
389 | |||
390 | static void fimd_commit(struct exynos_drm_crtc *crtc) | 379 | static void fimd_commit(struct exynos_drm_crtc *crtc) |
391 | { | 380 | { |
392 | struct fimd_context *ctx = crtc->ctx; | 381 | struct fimd_context *ctx = crtc->ctx; |
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) | |||
882 | return; | 871 | return; |
883 | 872 | ||
884 | val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; | 873 | val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; |
885 | writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); | 874 | writel(val, ctx->regs + DP_MIE_CLKCON); |
886 | } | 875 | } |
887 | 876 | ||
888 | static const struct exynos_drm_crtc_ops fimd_crtc_ops = { | 877 | static const struct exynos_drm_crtc_ops fimd_crtc_ops = { |
889 | .enable = fimd_enable, | 878 | .enable = fimd_enable, |
890 | .disable = fimd_disable, | 879 | .disable = fimd_disable, |
891 | .mode_fixup = fimd_mode_fixup, | ||
892 | .commit = fimd_commit, | 880 | .commit = fimd_commit, |
893 | .enable_vblank = fimd_enable_vblank, | 881 | .enable_vblank = fimd_enable_vblank, |
894 | .disable_vblank = fimd_disable_vblank, | 882 | .disable_vblank = fimd_disable_vblank, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, | |||
1059 | 1059 | ||
1060 | return 0; | 1060 | return 0; |
1061 | } | 1061 | } |
1062 | EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); | ||
1063 | 1062 | ||
1064 | int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | 1063 | int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, |
1065 | struct drm_file *file) | 1064 | struct drm_file *file) |
@@ -1230,7 +1229,6 @@ err: | |||
1230 | g2d_put_cmdlist(g2d, node); | 1229 | g2d_put_cmdlist(g2d, node); |
1231 | return ret; | 1230 | return ret; |
1232 | } | 1231 | } |
1233 | EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); | ||
1234 | 1232 | ||
1235 | int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | 1233 | int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, |
1236 | struct drm_file *file) | 1234 | struct drm_file *file) |
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
1293 | out: | 1291 | out: |
1294 | return 0; | 1292 | return 0; |
1295 | } | 1293 | } |
1296 | EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); | ||
1297 | 1294 | ||
1298 | static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | 1295 | static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) |
1299 | { | 1296 | { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) | |||
56 | nr_pages = obj->size >> PAGE_SHIFT; | 56 | nr_pages = obj->size >> PAGE_SHIFT; |
57 | 57 | ||
58 | if (!is_drm_iommu_supported(dev)) { | 58 | if (!is_drm_iommu_supported(dev)) { |
59 | dma_addr_t start_addr; | ||
60 | unsigned int i = 0; | ||
61 | |||
62 | obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); | 59 | obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); |
63 | if (!obj->pages) { | 60 | if (!obj->pages) { |
64 | DRM_ERROR("failed to allocate pages.\n"); | 61 | DRM_ERROR("failed to allocate pages.\n"); |
65 | return -ENOMEM; | 62 | return -ENOMEM; |
66 | } | 63 | } |
64 | } | ||
67 | 65 | ||
68 | obj->cookie = dma_alloc_attrs(dev->dev, | 66 | obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, |
69 | obj->size, | 67 | GFP_KERNEL, &obj->dma_attrs); |
70 | &obj->dma_addr, GFP_KERNEL, | 68 | if (!obj->cookie) { |
71 | &obj->dma_attrs); | 69 | DRM_ERROR("failed to allocate buffer.\n"); |
72 | if (!obj->cookie) { | 70 | if (obj->pages) |
73 | DRM_ERROR("failed to allocate buffer.\n"); | ||
74 | drm_free_large(obj->pages); | 71 | drm_free_large(obj->pages); |
75 | return -ENOMEM; | 72 | return -ENOMEM; |
76 | } | 73 | } |
74 | |||
75 | if (obj->pages) { | ||
76 | dma_addr_t start_addr; | ||
77 | unsigned int i = 0; | ||
77 | 78 | ||
78 | start_addr = obj->dma_addr; | 79 | start_addr = obj->dma_addr; |
79 | while (i < nr_pages) { | 80 | while (i < nr_pages) { |
80 | obj->pages[i] = phys_to_page(start_addr); | 81 | obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, |
82 | start_addr)); | ||
81 | start_addr += PAGE_SIZE; | 83 | start_addr += PAGE_SIZE; |
82 | i++; | 84 | i++; |
83 | } | 85 | } |
84 | } else { | 86 | } else { |
85 | obj->pages = dma_alloc_attrs(dev->dev, obj->size, | 87 | obj->pages = obj->cookie; |
86 | &obj->dma_addr, GFP_KERNEL, | ||
87 | &obj->dma_attrs); | ||
88 | if (!obj->pages) { | ||
89 | DRM_ERROR("failed to allocate buffer.\n"); | ||
90 | return -ENOMEM; | ||
91 | } | ||
92 | } | 88 | } |
93 | 89 | ||
94 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | 90 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", |
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) | |||
110 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", | 106 | DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", |
111 | (unsigned long)obj->dma_addr, obj->size); | 107 | (unsigned long)obj->dma_addr, obj->size); |
112 | 108 | ||
113 | if (!is_drm_iommu_supported(dev)) { | 109 | dma_free_attrs(dev->dev, obj->size, obj->cookie, |
114 | dma_free_attrs(dev->dev, obj->size, obj->cookie, | 110 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); |
115 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
116 | drm_free_large(obj->pages); | ||
117 | } else | ||
118 | dma_free_attrs(dev->dev, obj->size, obj->pages, | ||
119 | (dma_addr_t)obj->dma_addr, &obj->dma_attrs); | ||
120 | 111 | ||
121 | obj->dma_addr = (dma_addr_t)NULL; | 112 | if (!is_drm_iommu_supported(dev)) |
113 | drm_free_large(obj->pages); | ||
122 | } | 114 | } |
123 | 115 | ||
124 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, | 116 | static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, |
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) | |||
156 | * once dmabuf's refcount becomes 0. | 148 | * once dmabuf's refcount becomes 0. |
157 | */ | 149 | */ |
158 | if (obj->import_attach) | 150 | if (obj->import_attach) |
159 | goto out; | 151 | drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); |
160 | 152 | else | |
161 | exynos_drm_free_buf(exynos_gem_obj); | 153 | exynos_drm_free_buf(exynos_gem_obj); |
162 | |||
163 | out: | ||
164 | drm_gem_free_mmap_offset(obj); | ||
165 | 154 | ||
166 | /* release file pointer to gem object. */ | 155 | /* release file pointer to gem object. */ |
167 | drm_gem_object_release(obj); | 156 | drm_gem_object_release(obj); |
168 | 157 | ||
169 | kfree(exynos_gem_obj); | 158 | kfree(exynos_gem_obj); |
170 | exynos_gem_obj = NULL; | ||
171 | } | 159 | } |
172 | 160 | ||
173 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | 161 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, |
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | |||
190 | return exynos_gem_obj->size; | 178 | return exynos_gem_obj->size; |
191 | } | 179 | } |
192 | 180 | ||
193 | 181 | static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |
194 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | ||
195 | unsigned long size) | 182 | unsigned long size) |
196 | { | 183 | { |
197 | struct exynos_drm_gem_obj *exynos_gem_obj; | 184 | struct exynos_drm_gem_obj *exynos_gem_obj; |
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | |||
212 | return ERR_PTR(ret); | 199 | return ERR_PTR(ret); |
213 | } | 200 | } |
214 | 201 | ||
202 | ret = drm_gem_create_mmap_offset(obj); | ||
203 | if (ret < 0) { | ||
204 | drm_gem_object_release(obj); | ||
205 | kfree(exynos_gem_obj); | ||
206 | return ERR_PTR(ret); | ||
207 | } | ||
208 | |||
215 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); | 209 | DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); |
216 | 210 | ||
217 | return exynos_gem_obj; | 211 | return exynos_gem_obj; |
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | |||
313 | drm_gem_object_unreference_unlocked(obj); | 307 | drm_gem_object_unreference_unlocked(obj); |
314 | } | 308 | } |
315 | 309 | ||
316 | int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | 310 | static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, |
317 | struct vm_area_struct *vma) | 311 | struct vm_area_struct *vma) |
318 | { | 312 | { |
319 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; | 313 | struct drm_device *drm_dev = exynos_gem_obj->base.dev; |
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, | |||
342 | 336 | ||
343 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | 337 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, |
344 | struct drm_file *file_priv) | 338 | struct drm_file *file_priv) |
345 | { struct exynos_drm_gem_obj *exynos_gem_obj; | 339 | { |
340 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
346 | struct drm_exynos_gem_info *args = data; | 341 | struct drm_exynos_gem_info *args = data; |
347 | struct drm_gem_object *obj; | 342 | struct drm_gem_object *obj; |
348 | 343 | ||
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
402 | struct drm_mode_create_dumb *args) | 397 | struct drm_mode_create_dumb *args) |
403 | { | 398 | { |
404 | struct exynos_drm_gem_obj *exynos_gem_obj; | 399 | struct exynos_drm_gem_obj *exynos_gem_obj; |
400 | unsigned int flags; | ||
405 | int ret; | 401 | int ret; |
406 | 402 | ||
407 | /* | 403 | /* |
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, | |||
413 | args->pitch = args->width * ((args->bpp + 7) / 8); | 409 | args->pitch = args->width * ((args->bpp + 7) / 8); |
414 | args->size = args->pitch * args->height; | 410 | args->size = args->pitch * args->height; |
415 | 411 | ||
416 | if (is_drm_iommu_supported(dev)) { | 412 | if (is_drm_iommu_supported(dev)) |
417 | exynos_gem_obj = exynos_drm_gem_create(dev, | 413 | flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; |
418 | EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, | 414 | else |
419 | args->size); | 415 | flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; |
420 | } else { | ||
421 | exynos_gem_obj = exynos_drm_gem_create(dev, | ||
422 | EXYNOS_BO_CONTIG | EXYNOS_BO_WC, | ||
423 | args->size); | ||
424 | } | ||
425 | 416 | ||
417 | exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); | ||
426 | if (IS_ERR(exynos_gem_obj)) { | 418 | if (IS_ERR(exynos_gem_obj)) { |
427 | dev_warn(dev->dev, "FB allocation failed.\n"); | 419 | dev_warn(dev->dev, "FB allocation failed.\n"); |
428 | return PTR_ERR(exynos_gem_obj); | 420 | return PTR_ERR(exynos_gem_obj); |
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, | |||
460 | goto unlock; | 452 | goto unlock; |
461 | } | 453 | } |
462 | 454 | ||
463 | ret = drm_gem_create_mmap_offset(obj); | ||
464 | if (ret) | ||
465 | goto out; | ||
466 | |||
467 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | 455 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
468 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); | 456 | DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); |
469 | 457 | ||
470 | out: | ||
471 | drm_gem_object_unreference(obj); | 458 | drm_gem_object_unreference(obj); |
472 | unlock: | 459 | unlock: |
473 | mutex_unlock(&dev->struct_mutex); | 460 | mutex_unlock(&dev->struct_mutex); |
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
543 | 530 | ||
544 | err_close_vm: | 531 | err_close_vm: |
545 | drm_gem_vm_close(vma); | 532 | drm_gem_vm_close(vma); |
546 | drm_gem_free_mmap_offset(obj); | ||
547 | 533 | ||
548 | return ret; | 534 | return ret; |
549 | } | 535 | } |
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, | |||
588 | if (ret < 0) | 574 | if (ret < 0) |
589 | goto err_free_large; | 575 | goto err_free_large; |
590 | 576 | ||
577 | exynos_gem_obj->sgt = sgt; | ||
578 | |||
591 | if (sgt->nents == 1) { | 579 | if (sgt->nents == 1) { |
592 | /* always physically continuous memory if sgt->nents is 1. */ | 580 | /* always physically continuous memory if sgt->nents is 1. */ |
593 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; | 581 | exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -39,6 +39,7 @@ | |||
39 | * - this address could be physical address without IOMMU and | 39 | * - this address could be physical address without IOMMU and |
40 | * device address with IOMMU. | 40 | * device address with IOMMU. |
41 | * @pages: Array of backing pages. | 41 | * @pages: Array of backing pages. |
42 | * @sgt: Imported sg_table. | ||
42 | * | 43 | * |
43 | * P.S. this object would be transferred to user as kms_bo.handle so | 44 | * P.S. this object would be transferred to user as kms_bo.handle so |
44 | * user can access the buffer through kms_bo.handle. | 45 | * user can access the buffer through kms_bo.handle. |
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { | |||
52 | dma_addr_t dma_addr; | 53 | dma_addr_t dma_addr; |
53 | struct dma_attrs dma_attrs; | 54 | struct dma_attrs dma_attrs; |
54 | struct page **pages; | 55 | struct page **pages; |
56 | struct sg_table *sgt; | ||
55 | }; | 57 | }; |
56 | 58 | ||
57 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | 59 | struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | |||
59 | /* destroy a buffer with gem object */ | 61 | /* destroy a buffer with gem object */ |
60 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); | 62 | void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); |
61 | 63 | ||
62 | /* create a private gem object and initialize it. */ | ||
63 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | ||
64 | unsigned long size); | ||
65 | |||
66 | /* create a new buffer with gem object */ | 64 | /* create a new buffer with gem object */ |
67 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, | 65 | struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, |
68 | unsigned int flags, | 66 | unsigned int flags, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) | |||
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | 788 | ||
789 | #ifdef CONFIG_PM | ||
789 | static int rotator_clk_crtl(struct rot_context *rot, bool enable) | 790 | static int rotator_clk_crtl(struct rot_context *rot, bool enable) |
790 | { | 791 | { |
791 | if (enable) { | 792 | if (enable) { |
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) | |||
822 | } | 823 | } |
823 | #endif | 824 | #endif |
824 | 825 | ||
825 | #ifdef CONFIG_PM | ||
826 | static int rotator_runtime_suspend(struct device *dev) | 826 | static int rotator_runtime_suspend(struct device *dev) |
827 | { | 827 | { |
828 | struct rot_context *rot = dev_get_drvdata(dev); | 828 | struct rot_context *rot = dev_get_drvdata(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo | |||
462 | drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); | 462 | drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); |
463 | 463 | ||
464 | drm_mode_connector_set_path_property(connector, pathprop); | 464 | drm_mode_connector_set_path_property(connector, pathprop); |
465 | return connector; | ||
466 | } | ||
467 | |||
468 | static void intel_dp_register_mst_connector(struct drm_connector *connector) | ||
469 | { | ||
470 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
471 | struct drm_device *dev = connector->dev; | ||
465 | drm_modeset_lock_all(dev); | 472 | drm_modeset_lock_all(dev); |
466 | intel_connector_add_to_fbdev(intel_connector); | 473 | intel_connector_add_to_fbdev(intel_connector); |
467 | drm_modeset_unlock_all(dev); | 474 | drm_modeset_unlock_all(dev); |
468 | drm_connector_register(&intel_connector->base); | 475 | drm_connector_register(&intel_connector->base); |
469 | return connector; | ||
470 | } | 476 | } |
471 | 477 | ||
472 | static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 478 | static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
512 | 518 | ||
513 | static struct drm_dp_mst_topology_cbs mst_cbs = { | 519 | static struct drm_dp_mst_topology_cbs mst_cbs = { |
514 | .add_connector = intel_dp_add_mst_connector, | 520 | .add_connector = intel_dp_add_mst_connector, |
521 | .register_connector = intel_dp_register_mst_connector, | ||
515 | .destroy_connector = intel_dp_destroy_mst_connector, | 522 | .destroy_connector = intel_dp_destroy_mst_connector, |
516 | .hotplug = intel_dp_mst_hotplug, | 523 | .hotplug = intel_dp_mst_hotplug, |
517 | }; | 524 | }; |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | |||
180 | 180 | ||
181 | /* Enable polling and queue hotplug re-enabling. */ | 181 | /* Enable polling and queue hotplug re-enabling. */ |
182 | if (hpd_disabled) { | 182 | if (hpd_disabled) { |
183 | drm_kms_helper_poll_enable(dev); | 183 | drm_kms_helper_poll_enable_locked(dev); |
184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, | 184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | 185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); |
186 | } | 186 | } |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..7412caedcf7f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) | |||
484 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); | 484 | status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); |
485 | 485 | ||
486 | read_pointer = ring->next_context_status_buffer; | 486 | read_pointer = ring->next_context_status_buffer; |
487 | write_pointer = status_pointer & 0x07; | 487 | write_pointer = status_pointer & GEN8_CSB_PTR_MASK; |
488 | if (read_pointer > write_pointer) | 488 | if (read_pointer > write_pointer) |
489 | write_pointer += 6; | 489 | write_pointer += GEN8_CSB_ENTRIES; |
490 | 490 | ||
491 | spin_lock(&ring->execlist_lock); | 491 | spin_lock(&ring->execlist_lock); |
492 | 492 | ||
493 | while (read_pointer < write_pointer) { | 493 | while (read_pointer < write_pointer) { |
494 | read_pointer++; | 494 | read_pointer++; |
495 | status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + | 495 | status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
496 | (read_pointer % 6) * 8); | 496 | (read_pointer % GEN8_CSB_ENTRIES) * 8); |
497 | status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + | 497 | status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + |
498 | (read_pointer % 6) * 8 + 4); | 498 | (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); |
499 | 499 | ||
500 | if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) | 500 | if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) |
501 | continue; | 501 | continue; |
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) | |||
521 | spin_unlock(&ring->execlist_lock); | 521 | spin_unlock(&ring->execlist_lock); |
522 | 522 | ||
523 | WARN(submit_contexts > 2, "More than two context complete events?\n"); | 523 | WARN(submit_contexts > 2, "More than two context complete events?\n"); |
524 | ring->next_context_status_buffer = write_pointer % 6; | 524 | ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; |
525 | 525 | ||
526 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), | 526 | I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), |
527 | _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); | 527 | _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, |
528 | ((u32)ring->next_context_status_buffer & | ||
529 | GEN8_CSB_PTR_MASK) << 8)); | ||
528 | } | 530 | } |
529 | 531 | ||
530 | static int execlists_context_queue(struct drm_i915_gem_request *request) | 532 | static int execlists_context_queue(struct drm_i915_gem_request *request) |
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) | |||
1422 | { | 1424 | { |
1423 | struct drm_device *dev = ring->dev; | 1425 | struct drm_device *dev = ring->dev; |
1424 | struct drm_i915_private *dev_priv = dev->dev_private; | 1426 | struct drm_i915_private *dev_priv = dev->dev_private; |
1427 | u8 next_context_status_buffer_hw; | ||
1425 | 1428 | ||
1426 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); | 1429 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); |
1427 | I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); | 1430 | I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); |
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) | |||
1436 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | | 1439 | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | |
1437 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); | 1440 | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); |
1438 | POSTING_READ(RING_MODE_GEN7(ring)); | 1441 | POSTING_READ(RING_MODE_GEN7(ring)); |
1439 | ring->next_context_status_buffer = 0; | 1442 | |
1443 | /* | ||
1444 | * Instead of resetting the Context Status Buffer (CSB) read pointer to | ||
1445 | * zero, we need to read the write pointer from hardware and use its | ||
1446 | * value because "this register is power context save restored". | ||
1447 | * Effectively, these states have been observed: | ||
1448 | * | ||
1449 | * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | | ||
1450 | * BDW | CSB regs not reset | CSB regs reset | | ||
1451 | * CHT | CSB regs not reset | CSB regs not reset | | ||
1452 | */ | ||
1453 | next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) | ||
1454 | & GEN8_CSB_PTR_MASK); | ||
1455 | |||
1456 | /* | ||
1457 | * When the CSB registers are reset (also after power-up / gpu reset), | ||
1458 | * CSB write pointer is set to all 1's, which is not valid, use '5' in | ||
1459 | * this special case, so the first element read is CSB[0]. | ||
1460 | */ | ||
1461 | if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) | ||
1462 | next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); | ||
1463 | |||
1464 | ring->next_context_status_buffer = next_context_status_buffer_hw; | ||
1440 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); | 1465 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); |
1441 | 1466 | ||
1442 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 1467 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -25,6 +25,8 @@ | |||
25 | #define _INTEL_LRC_H_ | 25 | #define _INTEL_LRC_H_ |
26 | 26 | ||
27 | #define GEN8_LR_CONTEXT_ALIGN 4096 | 27 | #define GEN8_LR_CONTEXT_ALIGN 4096 |
28 | #define GEN8_CSB_ENTRIES 6 | ||
29 | #define GEN8_CSB_PTR_MASK 0x07 | ||
28 | 30 | ||
29 | /* Execlists regs */ | 31 | /* Execlists regs */ |
30 | #define RING_ELSP(ring) ((ring)->mmio_base+0x230) | 32 | #define RING_ELSP(ring) ((ring)->mmio_base+0x230) |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, | |||
246 | } | 246 | } |
247 | 247 | ||
248 | if (power_well->data == SKL_DISP_PW_1) { | 248 | if (power_well->data == SKL_DISP_PW_1) { |
249 | intel_prepare_ddi(dev); | 249 | if (!dev_priv->power_domains.initializing) |
250 | intel_prepare_ddi(dev); | ||
250 | gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); | 251 | gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); |
251 | } | 252 | } |
252 | } | 253 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index dd845f82cc24..4649bd2ed340 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, | |||
618 | adjusted_mode->hdisplay, | 618 | adjusted_mode->hdisplay, |
619 | adjusted_mode->vdisplay); | 619 | adjusted_mode->vdisplay); |
620 | 620 | ||
621 | if (qcrtc->index == 0) | 621 | if (bo->is_primary == false) |
622 | recreate_primary = true; | 622 | recreate_primary = true; |
623 | 623 | ||
624 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { | 624 | if (bo->surf.stride * bo->surf.height > qdev->vram_size) { |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) | |||
1624 | } else | 1624 | } else |
1625 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1625 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
1626 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1626 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1627 | args.ucAction = ATOM_LCD_BLON; | 1627 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
1628 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1628 | |
1629 | atombios_set_backlight_level(radeon_encoder, dig->backlight_level); | ||
1629 | } | 1630 | } |
1630 | break; | 1631 | break; |
1631 | case DRM_MODE_DPMS_STANDBY: | 1632 | case DRM_MODE_DPMS_STANDBY: |
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
1706 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | 1707 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); |
1707 | } | 1708 | } |
1708 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | 1709 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
1709 | atombios_dig_transmitter_setup(encoder, | 1710 | atombios_set_backlight_level(radeon_encoder, dig->backlight_level); |
1710 | ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1711 | if (ext_encoder) | 1711 | if (ext_encoder) |
1712 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | 1712 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); |
1713 | break; | 1713 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..6cddae44fa6e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c | |||
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol | |||
265 | { | 265 | { |
266 | struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); | 266 | struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); |
267 | struct drm_device *dev = master->base.dev; | 267 | struct drm_device *dev = master->base.dev; |
268 | struct radeon_device *rdev = dev->dev_private; | ||
269 | struct radeon_connector *radeon_connector; | 268 | struct radeon_connector *radeon_connector; |
270 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
271 | 270 | ||
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol | |||
286 | drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); | 285 | drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); |
287 | drm_mode_connector_set_path_property(connector, pathprop); | 286 | drm_mode_connector_set_path_property(connector, pathprop); |
288 | 287 | ||
288 | return connector; | ||
289 | } | ||
290 | |||
291 | static void radeon_dp_register_mst_connector(struct drm_connector *connector) | ||
292 | { | ||
293 | struct drm_device *dev = connector->dev; | ||
294 | struct radeon_device *rdev = dev->dev_private; | ||
295 | |||
289 | drm_modeset_lock_all(dev); | 296 | drm_modeset_lock_all(dev); |
290 | radeon_fb_add_connector(rdev, connector); | 297 | radeon_fb_add_connector(rdev, connector); |
291 | drm_modeset_unlock_all(dev); | 298 | drm_modeset_unlock_all(dev); |
292 | 299 | ||
293 | drm_connector_register(connector); | 300 | drm_connector_register(connector); |
294 | return connector; | ||
295 | } | 301 | } |
296 | 302 | ||
297 | static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 303 | static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
324 | 330 | ||
325 | struct drm_dp_mst_topology_cbs mst_cbs = { | 331 | struct drm_dp_mst_topology_cbs mst_cbs = { |
326 | .add_connector = radeon_dp_add_mst_connector, | 332 | .add_connector = radeon_dp_add_mst_connector, |
333 | .register_connector = radeon_dp_register_mst_connector, | ||
327 | .destroy_connector = radeon_dp_destroy_mst_connector, | 334 | .destroy_connector = radeon_dp_destroy_mst_connector, |
328 | .hotplug = radeon_dp_mst_hotplug, | 335 | .hotplug = radeon_dp_mst_hotplug, |
329 | }; | 336 | }; |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..1aa657fe31cb 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -48,40 +48,10 @@ struct radeon_fbdev { | |||
48 | struct radeon_device *rdev; | 48 | struct radeon_device *rdev; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | /** | ||
52 | * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. | ||
53 | * | ||
54 | * @info: fbdev info | ||
55 | * | ||
56 | * This function hides the cursor on all CRTCs used by fbdev. | ||
57 | */ | ||
58 | static int radeon_fb_helper_set_par(struct fb_info *info) | ||
59 | { | ||
60 | int ret; | ||
61 | |||
62 | ret = drm_fb_helper_set_par(info); | ||
63 | |||
64 | /* XXX: with universal plane support fbdev will automatically disable | ||
65 | * all non-primary planes (including the cursor) | ||
66 | */ | ||
67 | if (ret == 0) { | ||
68 | struct drm_fb_helper *fb_helper = info->par; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < fb_helper->crtc_count; i++) { | ||
72 | struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; | ||
73 | |||
74 | radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | return ret; | ||
79 | } | ||
80 | |||
81 | static struct fb_ops radeonfb_ops = { | 51 | static struct fb_ops radeonfb_ops = { |
82 | .owner = THIS_MODULE, | 52 | .owner = THIS_MODULE, |
83 | .fb_check_var = drm_fb_helper_check_var, | 53 | .fb_check_var = drm_fb_helper_check_var, |
84 | .fb_set_par = radeon_fb_helper_set_par, | 54 | .fb_set_par = drm_fb_helper_set_par, |
85 | .fb_fillrect = drm_fb_helper_cfb_fillrect, | 55 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
86 | .fb_copyarea = drm_fb_helper_cfb_copyarea, | 56 | .fb_copyarea = drm_fb_helper_cfb_copyarea, |
87 | .fb_imageblit = drm_fb_helper_cfb_imageblit, | 57 | .fb_imageblit = drm_fb_helper_cfb_imageblit, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..8a76821177a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, | |||
681 | 0, 0, | 681 | 0, 0, |
682 | DRM_MM_SEARCH_DEFAULT, | 682 | DRM_MM_SEARCH_DEFAULT, |
683 | DRM_MM_CREATE_DEFAULT); | 683 | DRM_MM_CREATE_DEFAULT); |
684 | if (ret) { | ||
685 | (void) vmw_cmdbuf_man_process(man); | ||
686 | ret = drm_mm_insert_node_generic(&man->mm, info->node, | ||
687 | info->page_size, 0, 0, | ||
688 | DRM_MM_SEARCH_DEFAULT, | ||
689 | DRM_MM_CREATE_DEFAULT); | ||
690 | } | ||
691 | |||
684 | spin_unlock_bh(&man->lock); | 692 | spin_unlock_bh(&man->lock); |
685 | info->done = !ret; | 693 | info->done = !ret; |
686 | 694 | ||
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c | |||
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { | |||
470 | { .compatible = "stericsson,abx500-temp" }, | 470 | { .compatible = "stericsson,abx500-temp" }, |
471 | {}, | 471 | {}, |
472 | }; | 472 | }; |
473 | MODULE_DEVICE_TABLE(of, abx500_temp_match); | ||
473 | #endif | 474 | #endif |
474 | 475 | ||
475 | static struct platform_driver abx500_temp_driver = { | 476 | static struct platform_driver abx500_temp_driver = { |
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c | |||
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { | |||
539 | { .compatible = "gpio-fan", }, | 539 | { .compatible = "gpio-fan", }, |
540 | {}, | 540 | {}, |
541 | }; | 541 | }; |
542 | MODULE_DEVICE_TABLE(of, of_gpio_fan_match); | ||
542 | #endif /* CONFIG_OF_GPIO */ | 543 | #endif /* CONFIG_OF_GPIO */ |
543 | 544 | ||
544 | static int gpio_fan_probe(struct platform_device *pdev) | 545 | static int gpio_fan_probe(struct platform_device *pdev) |
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c | |||
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { | |||
323 | { .compatible = "pwm-fan", }, | 323 | { .compatible = "pwm-fan", }, |
324 | {}, | 324 | {}, |
325 | }; | 325 | }; |
326 | MODULE_DEVICE_TABLE(of, of_pwm_fan_match); | ||
326 | 327 | ||
327 | static struct platform_driver pwm_fan_driver = { | 328 | static struct platform_driver pwm_fan_driver = { |
328 | .probe = pwm_fan_probe, | 329 | .probe = pwm_fan_probe, |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { | |||
620 | .name = "C6-SKL", | 620 | .name = "C6-SKL", |
621 | .desc = "MWAIT 0x20", | 621 | .desc = "MWAIT 0x20", |
622 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 622 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
623 | .exit_latency = 75, | 623 | .exit_latency = 85, |
624 | .target_residency = 200, | 624 | .target_residency = 200, |
625 | .enter = &intel_idle, | 625 | .enter = &intel_idle, |
626 | .enter_freeze = intel_idle_freeze, }, | 626 | .enter_freeze = intel_idle_freeze, }, |
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = { | |||
636 | .name = "C8-SKL", | 636 | .name = "C8-SKL", |
637 | .desc = "MWAIT 0x40", | 637 | .desc = "MWAIT 0x40", |
638 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, | 638 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, |
639 | .exit_latency = 174, | 639 | .exit_latency = 200, |
640 | .target_residency = 800, | 640 | .target_residency = 800, |
641 | .enter = &intel_idle, | 641 | .enter = &intel_idle, |
642 | .enter_freeze = intel_idle_freeze, }, | 642 | .enter_freeze = intel_idle_freeze, }, |
643 | { | 643 | { |
644 | .name = "C9-SKL", | ||
645 | .desc = "MWAIT 0x50", | ||
646 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, | ||
647 | .exit_latency = 480, | ||
648 | .target_residency = 5000, | ||
649 | .enter = &intel_idle, | ||
650 | .enter_freeze = intel_idle_freeze, }, | ||
651 | { | ||
644 | .name = "C10-SKL", | 652 | .name = "C10-SKL", |
645 | .desc = "MWAIT 0x60", | 653 | .desc = "MWAIT 0x60", |
646 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, | 654 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | 245 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
246 | if (MLX5_CAP_GEN(mdev, apm)) | 246 | if (MLX5_CAP_GEN(mdev, apm)) |
247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | 247 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
248 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | ||
249 | if (MLX5_CAP_GEN(mdev, xrc)) | 248 | if (MLX5_CAP_GEN(mdev, xrc)) |
250 | props->device_cap_flags |= IB_DEVICE_XRC; | 249 | props->device_cap_flags |= IB_DEVICE_XRC; |
251 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | 250 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm | |||
795 | return 0; | 794 | return 0; |
796 | } | 795 | } |
797 | 796 | ||
798 | static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) | ||
799 | { | ||
800 | struct mlx5_create_mkey_mbox_in *in; | ||
801 | struct mlx5_mkey_seg *seg; | ||
802 | struct mlx5_core_mr mr; | ||
803 | int err; | ||
804 | |||
805 | in = kzalloc(sizeof(*in), GFP_KERNEL); | ||
806 | if (!in) | ||
807 | return -ENOMEM; | ||
808 | |||
809 | seg = &in->seg; | ||
810 | seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; | ||
811 | seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); | ||
812 | seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | ||
813 | seg->start_addr = 0; | ||
814 | |||
815 | err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), | ||
816 | NULL, NULL, NULL); | ||
817 | if (err) { | ||
818 | mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); | ||
819 | goto err_in; | ||
820 | } | ||
821 | |||
822 | kfree(in); | ||
823 | *key = mr.key; | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | err_in: | ||
828 | kfree(in); | ||
829 | |||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) | ||
834 | { | ||
835 | struct mlx5_core_mr mr; | ||
836 | int err; | ||
837 | |||
838 | memset(&mr, 0, sizeof(mr)); | ||
839 | mr.key = key; | ||
840 | err = mlx5_core_destroy_mkey(dev->mdev, &mr); | ||
841 | if (err) | ||
842 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); | ||
843 | } | ||
844 | |||
845 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | 797 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, |
846 | struct ib_ucontext *context, | 798 | struct ib_ucontext *context, |
847 | struct ib_udata *udata) | 799 | struct ib_udata *udata) |
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | |||
867 | kfree(pd); | 819 | kfree(pd); |
868 | return ERR_PTR(-EFAULT); | 820 | return ERR_PTR(-EFAULT); |
869 | } | 821 | } |
870 | } else { | ||
871 | err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); | ||
872 | if (err) { | ||
873 | mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); | ||
874 | kfree(pd); | ||
875 | return ERR_PTR(err); | ||
876 | } | ||
877 | } | 822 | } |
878 | 823 | ||
879 | return &pd->ibpd; | 824 | return &pd->ibpd; |
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) | |||
884 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | 829 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); |
885 | struct mlx5_ib_pd *mpd = to_mpd(pd); | 830 | struct mlx5_ib_pd *mpd = to_mpd(pd); |
886 | 831 | ||
887 | if (!pd->uobject) | ||
888 | free_pa_mkey(mdev, mpd->pa_lkey); | ||
889 | |||
890 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); | 832 | mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); |
891 | kfree(mpd); | 833 | kfree(mpd); |
892 | 834 | ||
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) | |||
1245 | struct ib_srq_init_attr attr; | 1187 | struct ib_srq_init_attr attr; |
1246 | struct mlx5_ib_dev *dev; | 1188 | struct mlx5_ib_dev *dev; |
1247 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; | 1189 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; |
1248 | u32 rsvd_lkey; | ||
1249 | int ret = 0; | 1190 | int ret = 0; |
1250 | 1191 | ||
1251 | dev = container_of(devr, struct mlx5_ib_dev, devr); | 1192 | dev = container_of(devr, struct mlx5_ib_dev, devr); |
1252 | 1193 | ||
1253 | ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); | ||
1254 | if (ret) { | ||
1255 | pr_err("Failed to query special context %d\n", ret); | ||
1256 | return ret; | ||
1257 | } | ||
1258 | dev->ib_dev.local_dma_lkey = rsvd_lkey; | ||
1259 | |||
1260 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); | 1194 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); |
1261 | if (IS_ERR(devr->p0)) { | 1195 | if (IS_ERR(devr->p0)) { |
1262 | ret = PTR_ERR(devr->p0); | 1196 | ret = PTR_ERR(devr->p0); |
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1418 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); | 1352 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); |
1419 | dev->ib_dev.owner = THIS_MODULE; | 1353 | dev->ib_dev.owner = THIS_MODULE; |
1420 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | 1354 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
1355 | dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; | ||
1421 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); | 1356 | dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); |
1422 | dev->ib_dev.phys_port_cnt = dev->num_ports; | 1357 | dev->ib_dev.phys_port_cnt = dev->num_ports; |
1423 | dev->ib_dev.num_comp_vectors = | 1358 | dev->ib_dev.num_comp_vectors = |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte | |||
103 | struct mlx5_ib_pd { | 103 | struct mlx5_ib_pd { |
104 | struct ib_pd ibpd; | 104 | struct ib_pd ibpd; |
105 | u32 pdn; | 105 | u32 pdn; |
106 | u32 pa_lkey; | ||
107 | }; | 106 | }; |
108 | 107 | ||
109 | /* Use macros here so that don't have to duplicate | 108 | /* Use macros here so that don't have to duplicate |
@@ -213,7 +212,6 @@ struct mlx5_ib_qp { | |||
213 | int uuarn; | 212 | int uuarn; |
214 | 213 | ||
215 | int create_type; | 214 | int create_type; |
216 | u32 pa_lkey; | ||
217 | 215 | ||
218 | /* Store signature errors */ | 216 | /* Store signature errors */ |
219 | bool signature_en; | 217 | bool signature_en; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); | 925 | err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); |
926 | if (err) | 926 | if (err) |
927 | mlx5_ib_dbg(dev, "err %d\n", err); | 927 | mlx5_ib_dbg(dev, "err %d\n", err); |
928 | else | ||
929 | qp->pa_lkey = to_mpd(pd)->pa_lkey; | ||
930 | } | 928 | } |
931 | 929 | ||
932 | if (err) | 930 | if (err) |
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, | |||
2045 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); | 2043 | mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); |
2046 | dseg->addr = cpu_to_be64(mfrpl->map); | 2044 | dseg->addr = cpu_to_be64(mfrpl->map); |
2047 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); | 2045 | dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); |
2048 | dseg->lkey = cpu_to_be32(pd->pa_lkey); | 2046 | dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); |
2049 | } | 2047 | } |
2050 | 2048 | ||
2051 | static __be32 send_ieth(struct ib_send_wr *wr) | 2049 | static __be32 send_ieth(struct ib_send_wr *wr) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..4cd5428a2399 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -80,7 +80,7 @@ enum { | |||
80 | IPOIB_NUM_WC = 4, | 80 | IPOIB_NUM_WC = 4, |
81 | 81 | ||
82 | IPOIB_MAX_PATH_REC_QUEUE = 3, | 82 | IPOIB_MAX_PATH_REC_QUEUE = 3, |
83 | IPOIB_MAX_MCAST_QUEUE = 3, | 83 | IPOIB_MAX_MCAST_QUEUE = 64, |
84 | 84 | ||
85 | IPOIB_FLAG_OPER_UP = 0, | 85 | IPOIB_FLAG_OPER_UP = 0, |
86 | IPOIB_FLAG_INITIALIZED = 1, | 86 | IPOIB_FLAG_INITIALIZED = 1, |
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, | |||
548 | 548 | ||
549 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, | 549 | int ipoib_mcast_attach(struct net_device *dev, u16 mlid, |
550 | union ib_gid *mgid, int set_qkey); | 550 | union ib_gid *mgid, int set_qkey); |
551 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); | ||
552 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); | ||
551 | 553 | ||
552 | int ipoib_init_qp(struct net_device *dev); | 554 | int ipoib_init_qp(struct net_device *dev); |
553 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); | 555 | int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..f74316e679d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1149 | unsigned long dt; | 1149 | unsigned long dt; |
1150 | unsigned long flags; | 1150 | unsigned long flags; |
1151 | int i; | 1151 | int i; |
1152 | LIST_HEAD(remove_list); | ||
1153 | struct ipoib_mcast *mcast, *tmcast; | ||
1154 | struct net_device *dev = priv->dev; | ||
1152 | 1155 | ||
1153 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) | 1156 | if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) |
1154 | return; | 1157 | return; |
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1176 | lockdep_is_held(&priv->lock))) != NULL) { | 1179 | lockdep_is_held(&priv->lock))) != NULL) { |
1177 | /* was the neigh idle for two GC periods */ | 1180 | /* was the neigh idle for two GC periods */ |
1178 | if (time_after(neigh_obsolete, neigh->alive)) { | 1181 | if (time_after(neigh_obsolete, neigh->alive)) { |
1182 | u8 *mgid = neigh->daddr + 4; | ||
1183 | |||
1184 | /* Is this multicast ? */ | ||
1185 | if (*mgid == 0xff) { | ||
1186 | mcast = __ipoib_mcast_find(dev, mgid); | ||
1187 | |||
1188 | if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | ||
1189 | list_del(&mcast->list); | ||
1190 | rb_erase(&mcast->rb_node, &priv->multicast_tree); | ||
1191 | list_add_tail(&mcast->list, &remove_list); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1179 | rcu_assign_pointer(*np, | 1195 | rcu_assign_pointer(*np, |
1180 | rcu_dereference_protected(neigh->hnext, | 1196 | rcu_dereference_protected(neigh->hnext, |
1181 | lockdep_is_held(&priv->lock))); | 1197 | lockdep_is_held(&priv->lock))); |
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) | |||
1191 | 1207 | ||
1192 | out_unlock: | 1208 | out_unlock: |
1193 | spin_unlock_irqrestore(&priv->lock, flags); | 1209 | spin_unlock_irqrestore(&priv->lock, flags); |
1210 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
1211 | ipoib_mcast_leave(dev, mcast); | ||
1194 | } | 1212 | } |
1195 | 1213 | ||
1196 | static void ipoib_reap_neigh(struct work_struct *work) | 1214 | static void ipoib_reap_neigh(struct work_struct *work) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..136cbefe00f8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, | |||
153 | return mcast; | 153 | return mcast; |
154 | } | 154 | } |
155 | 155 | ||
156 | static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) | 156 | struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) |
157 | { | 157 | { |
158 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 158 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
159 | struct rb_node *n = priv->multicast_tree.rb_node; | 159 | struct rb_node *n = priv->multicast_tree.rb_node; |
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; | 508 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
509 | 509 | ||
510 | /* | 510 | /* |
511 | * Historically Linux IPoIB has never properly supported SEND | 511 | * Send-only IB Multicast joins do not work at the core |
512 | * ONLY join. It emulated it by not providing all the required | 512 | * IB layer yet, so we can't use them here. However, |
513 | * attributes, which is enough to prevent group creation and | 513 | * we are emulating an Ethernet multicast send, which |
514 | * detect if there are full members or not. A major problem | 514 | * does not require a multicast subscription and will |
515 | * with supporting SEND ONLY is detecting when the group is | 515 | * still send properly. The most appropriate thing to |
516 | * auto-destroyed as IPoIB will cache the MLID.. | 516 | * do is to create the group if it doesn't exist as that |
517 | * most closely emulates the behavior, from a user space | ||
518 | * application perspecitive, of Ethernet multicast | ||
519 | * operation. For now, we do a full join, maybe later | ||
520 | * when the core IB layers support send only joins we | ||
521 | * will use them. | ||
517 | */ | 522 | */ |
518 | #if 1 | 523 | #if 0 |
519 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | ||
520 | comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; | ||
521 | #else | ||
522 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | 524 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) |
523 | rec.join_state = 4; | 525 | rec.join_state = 4; |
524 | #endif | 526 | #endif |
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) | |||
675 | return 0; | 677 | return 0; |
676 | } | 678 | } |
677 | 679 | ||
678 | static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | 680 | int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) |
679 | { | 681 | { |
680 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 682 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
681 | int ret = 0; | 683 | int ret = 0; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c | |||
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; | |||
97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); | 97 | module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); |
98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); | 98 | MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); |
99 | 99 | ||
100 | bool iser_always_reg = true; | ||
101 | module_param_named(always_register, iser_always_reg, bool, S_IRUGO); | ||
102 | MODULE_PARM_DESC(always_register, | ||
103 | "Always register memory, even for continuous memory regions (default:true)"); | ||
104 | |||
100 | bool iser_pi_enable = false; | 105 | bool iser_pi_enable = false; |
101 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); | 106 | module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); |
102 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); | 107 | MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
@@ -611,6 +611,7 @@ extern int iser_debug_level; | |||
611 | extern bool iser_pi_enable; | 611 | extern bool iser_pi_enable; |
612 | extern int iser_pi_guard; | 612 | extern int iser_pi_guard; |
613 | extern unsigned int iser_max_sectors; | 613 | extern unsigned int iser_max_sectors; |
614 | extern bool iser_always_reg; | ||
614 | 615 | ||
615 | int iser_assign_reg_ops(struct iser_device *device); | 616 | int iser_assign_reg_ops(struct iser_device *device); |
616 | 617 | ||
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -803,11 +803,12 @@ static int | |||
803 | iser_reg_prot_sg(struct iscsi_iser_task *task, | 803 | iser_reg_prot_sg(struct iscsi_iser_task *task, |
804 | struct iser_data_buf *mem, | 804 | struct iser_data_buf *mem, |
805 | struct iser_fr_desc *desc, | 805 | struct iser_fr_desc *desc, |
806 | bool use_dma_key, | ||
806 | struct iser_mem_reg *reg) | 807 | struct iser_mem_reg *reg) |
807 | { | 808 | { |
808 | struct iser_device *device = task->iser_conn->ib_conn.device; | 809 | struct iser_device *device = task->iser_conn->ib_conn.device; |
809 | 810 | ||
810 | if (mem->dma_nents == 1) | 811 | if (use_dma_key) |
811 | return iser_reg_dma(device, mem, reg); | 812 | return iser_reg_dma(device, mem, reg); |
812 | 813 | ||
813 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); | 814 | return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); |
@@ -817,11 +818,12 @@ static int | |||
817 | iser_reg_data_sg(struct iscsi_iser_task *task, | 818 | iser_reg_data_sg(struct iscsi_iser_task *task, |
818 | struct iser_data_buf *mem, | 819 | struct iser_data_buf *mem, |
819 | struct iser_fr_desc *desc, | 820 | struct iser_fr_desc *desc, |
821 | bool use_dma_key, | ||
820 | struct iser_mem_reg *reg) | 822 | struct iser_mem_reg *reg) |
821 | { | 823 | { |
822 | struct iser_device *device = task->iser_conn->ib_conn.device; | 824 | struct iser_device *device = task->iser_conn->ib_conn.device; |
823 | 825 | ||
824 | if (mem->dma_nents == 1) | 826 | if (use_dma_key) |
825 | return iser_reg_dma(device, mem, reg); | 827 | return iser_reg_dma(device, mem, reg); |
826 | 828 | ||
827 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); | 829 | return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); |
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
836 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; | 838 | struct iser_mem_reg *reg = &task->rdma_reg[dir]; |
837 | struct iser_mem_reg *data_reg; | 839 | struct iser_mem_reg *data_reg; |
838 | struct iser_fr_desc *desc = NULL; | 840 | struct iser_fr_desc *desc = NULL; |
841 | bool use_dma_key; | ||
839 | int err; | 842 | int err; |
840 | 843 | ||
841 | err = iser_handle_unaligned_buf(task, mem, dir); | 844 | err = iser_handle_unaligned_buf(task, mem, dir); |
842 | if (unlikely(err)) | 845 | if (unlikely(err)) |
843 | return err; | 846 | return err; |
844 | 847 | ||
845 | if (mem->dma_nents != 1 || | 848 | use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && |
846 | scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { | 849 | scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); |
850 | |||
851 | if (!use_dma_key) { | ||
847 | desc = device->reg_ops->reg_desc_get(ib_conn); | 852 | desc = device->reg_ops->reg_desc_get(ib_conn); |
848 | reg->mem_h = desc; | 853 | reg->mem_h = desc; |
849 | } | 854 | } |
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
853 | else | 858 | else |
854 | data_reg = &task->desc.data_reg; | 859 | data_reg = &task->desc.data_reg; |
855 | 860 | ||
856 | err = iser_reg_data_sg(task, mem, desc, data_reg); | 861 | err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); |
857 | if (unlikely(err)) | 862 | if (unlikely(err)) |
858 | goto err_reg; | 863 | goto err_reg; |
859 | 864 | ||
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, | |||
866 | if (unlikely(err)) | 871 | if (unlikely(err)) |
867 | goto err_reg; | 872 | goto err_reg; |
868 | 873 | ||
869 | err = iser_reg_prot_sg(task, mem, desc, prot_reg); | 874 | err = iser_reg_prot_sg(task, mem, desc, |
875 | use_dma_key, prot_reg); | ||
870 | if (unlikely(err)) | 876 | if (unlikely(err)) |
871 | goto err_reg; | 877 | goto err_reg; |
872 | } | 878 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
133 | (unsigned long)comp); | 133 | (unsigned long)comp); |
134 | } | 134 | } |
135 | 135 | ||
136 | device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | | 136 | if (!iser_always_reg) { |
137 | IB_ACCESS_REMOTE_WRITE | | 137 | int access = IB_ACCESS_LOCAL_WRITE | |
138 | IB_ACCESS_REMOTE_READ); | 138 | IB_ACCESS_REMOTE_WRITE | |
139 | if (IS_ERR(device->mr)) | 139 | IB_ACCESS_REMOTE_READ; |
140 | goto dma_mr_err; | 140 | |
141 | device->mr = ib_get_dma_mr(device->pd, access); | ||
142 | if (IS_ERR(device->mr)) | ||
143 | goto dma_mr_err; | ||
144 | } | ||
141 | 145 | ||
142 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, | 146 | INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, |
143 | iser_event_handler); | 147 | iser_event_handler); |
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) | |||
147 | return 0; | 151 | return 0; |
148 | 152 | ||
149 | handler_err: | 153 | handler_err: |
150 | ib_dereg_mr(device->mr); | 154 | if (device->mr) |
155 | ib_dereg_mr(device->mr); | ||
151 | dma_mr_err: | 156 | dma_mr_err: |
152 | for (i = 0; i < device->comps_used; i++) | 157 | for (i = 0; i < device->comps_used; i++) |
153 | tasklet_kill(&device->comps[i].tasklet); | 158 | tasklet_kill(&device->comps[i].tasklet); |
@@ -173,7 +178,6 @@ comps_err: | |||
173 | static void iser_free_device_ib_res(struct iser_device *device) | 178 | static void iser_free_device_ib_res(struct iser_device *device) |
174 | { | 179 | { |
175 | int i; | 180 | int i; |
176 | BUG_ON(device->mr == NULL); | ||
177 | 181 | ||
178 | for (i = 0; i < device->comps_used; i++) { | 182 | for (i = 0; i < device->comps_used; i++) { |
179 | struct iser_comp *comp = &device->comps[i]; | 183 | struct iser_comp *comp = &device->comps[i]; |
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) | |||
184 | } | 188 | } |
185 | 189 | ||
186 | (void)ib_unregister_event_handler(&device->event_handler); | 190 | (void)ib_unregister_event_handler(&device->event_handler); |
187 | (void)ib_dereg_mr(device->mr); | 191 | if (device->mr) |
192 | (void)ib_dereg_mr(device->mr); | ||
188 | ib_dealloc_pd(device->pd); | 193 | ib_dealloc_pd(device->pd); |
189 | 194 | ||
190 | kfree(device->comps); | 195 | kfree(device->comps); |
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig | |||
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY | |||
196 | config JOYSTICK_ZHENHUA | 196 | config JOYSTICK_ZHENHUA |
197 | tristate "5-byte Zhenhua RC transmitter" | 197 | tristate "5-byte Zhenhua RC transmitter" |
198 | select SERIO | 198 | select SERIO |
199 | select BITREVERSE | ||
199 | help | 200 | help |
200 | Say Y here if you have a Zhen Hua PPM-4CH transmitter which is | 201 | Say Y here if you have a Zhen Hua PPM-4CH transmitter which is |
201 | supplied with a ready to fly micro electric indoor helicopters | 202 | supplied with a ready to fly micro electric indoor helicopters |
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c | |||
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) | |||
150 | if (w->counter == 24) { /* full frame */ | 150 | if (w->counter == 24) { /* full frame */ |
151 | walkera0701_parse_frame(w); | 151 | walkera0701_parse_frame(w); |
152 | w->counter = NO_SYNC; | 152 | w->counter = NO_SYNC; |
153 | if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ | 153 | if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ |
154 | w->counter = 0; | 154 | w->counter = 0; |
155 | } else { | 155 | } else { |
156 | if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) | 156 | if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) |
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) | |||
161 | } else | 161 | } else |
162 | w->counter = NO_SYNC; | 162 | w->counter = NO_SYNC; |
163 | } | 163 | } |
164 | } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < | 164 | } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < |
165 | RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ | 165 | RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ |
166 | w->counter = 0; | 166 | w->counter = 0; |
167 | 167 | ||
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c | |||
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) | |||
266 | 266 | ||
267 | error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); | 267 | error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); |
268 | if (error) | 268 | if (error) |
269 | return error; | 269 | goto err_free_keypad; |
270 | 270 | ||
271 | res = request_mem_region(res->start, resource_size(res), pdev->name); | 271 | res = request_mem_region(res->start, resource_size(res), pdev->name); |
272 | if (!res) { | 272 | if (!res) { |
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c | |||
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, | |||
93 | default: | 93 | default: |
94 | reset_type = PON_PS_HOLD_TYPE_HARD_RESET; | 94 | reset_type = PON_PS_HOLD_TYPE_HARD_RESET; |
95 | break; | 95 | break; |
96 | }; | 96 | } |
97 | 97 | ||
98 | error = regmap_update_bits(pwrkey->regmap, | 98 | error = regmap_update_bits(pwrkey->regmap, |
99 | pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, | 99 | pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, | |||
414 | dev->id.product = user_dev->id.product; | 414 | dev->id.product = user_dev->id.product; |
415 | dev->id.version = user_dev->id.version; | 415 | dev->id.version = user_dev->id.version; |
416 | 416 | ||
417 | for_each_set_bit(i, dev->absbit, ABS_CNT) { | 417 | for (i = 0; i < ABS_CNT; i++) { |
418 | input_abs_set_max(dev, i, user_dev->absmax[i]); | 418 | input_abs_set_max(dev, i, user_dev->absmax[i]); |
419 | input_abs_set_min(dev, i, user_dev->absmin[i]); | 419 | input_abs_set_min(dev, i, user_dev->absmin[i]); |
420 | input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); | 420 | input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); |
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h | |||
@@ -60,7 +60,7 @@ struct elan_transport_ops { | |||
60 | int (*get_sm_version)(struct i2c_client *client, | 60 | int (*get_sm_version)(struct i2c_client *client, |
61 | u8* ic_type, u8 *version); | 61 | u8* ic_type, u8 *version); |
62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); | 62 | int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); |
63 | int (*get_product_id)(struct i2c_client *client, u8 *id); | 63 | int (*get_product_id)(struct i2c_client *client, u16 *id); |
64 | 64 | ||
65 | int (*get_max)(struct i2c_client *client, | 65 | int (*get_max)(struct i2c_client *client, |
66 | unsigned int *max_x, unsigned int *max_y); | 66 | unsigned int *max_x, unsigned int *max_y); |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include "elan_i2c.h" | 40 | #include "elan_i2c.h" |
41 | 41 | ||
42 | #define DRIVER_NAME "elan_i2c" | 42 | #define DRIVER_NAME "elan_i2c" |
43 | #define ELAN_DRIVER_VERSION "1.6.0" | 43 | #define ELAN_DRIVER_VERSION "1.6.1" |
44 | #define ETP_MAX_PRESSURE 255 | 44 | #define ETP_MAX_PRESSURE 255 |
45 | #define ETP_FWIDTH_REDUCE 90 | 45 | #define ETP_FWIDTH_REDUCE 90 |
46 | #define ETP_FINGER_WIDTH 15 | 46 | #define ETP_FINGER_WIDTH 15 |
@@ -76,7 +76,7 @@ struct elan_tp_data { | |||
76 | unsigned int x_res; | 76 | unsigned int x_res; |
77 | unsigned int y_res; | 77 | unsigned int y_res; |
78 | 78 | ||
79 | u8 product_id; | 79 | u16 product_id; |
80 | u8 fw_version; | 80 | u8 fw_version; |
81 | u8 sm_version; | 81 | u8 sm_version; |
82 | u8 iap_version; | 82 | u8 iap_version; |
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, | |||
98 | u16 *signature_address) | 98 | u16 *signature_address) |
99 | { | 99 | { |
100 | switch (iap_version) { | 100 | switch (iap_version) { |
101 | case 0x00: | ||
102 | case 0x06: | ||
101 | case 0x08: | 103 | case 0x08: |
102 | *validpage_count = 512; | 104 | *validpage_count = 512; |
103 | break; | 105 | break; |
106 | case 0x03: | ||
107 | case 0x07: | ||
104 | case 0x09: | 108 | case 0x09: |
109 | case 0x0A: | ||
110 | case 0x0B: | ||
111 | case 0x0C: | ||
105 | *validpage_count = 768; | 112 | *validpage_count = 768; |
106 | break; | 113 | break; |
107 | case 0x0D: | 114 | case 0x0D: |
108 | *validpage_count = 896; | 115 | *validpage_count = 896; |
109 | break; | 116 | break; |
117 | case 0x0E: | ||
118 | *validpage_count = 640; | ||
119 | break; | ||
110 | default: | 120 | default: |
111 | /* unknown ic type clear value */ | 121 | /* unknown ic type clear value */ |
112 | *validpage_count = 0; | 122 | *validpage_count = 0; |
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) | |||
266 | 276 | ||
267 | error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, | 277 | error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, |
268 | &data->fw_signature_address); | 278 | &data->fw_signature_address); |
269 | if (error) { | 279 | if (error) |
270 | dev_err(&data->client->dev, | 280 | dev_warn(&data->client->dev, |
271 | "unknown iap version %d\n", data->iap_version); | 281 | "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", |
272 | return error; | 282 | data->iap_version, data->ic_type); |
273 | } | ||
274 | 283 | ||
275 | return 0; | 284 | return 0; |
276 | } | 285 | } |
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, | |||
486 | const u8 *fw_signature; | 495 | const u8 *fw_signature; |
487 | static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; | 496 | static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; |
488 | 497 | ||
498 | if (data->fw_validpage_count == 0) | ||
499 | return -EINVAL; | ||
500 | |||
489 | /* Look for a firmware with the product id appended. */ | 501 | /* Look for a firmware with the product id appended. */ |
490 | fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); | 502 | fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); |
491 | if (!fw_name) { | 503 | if (!fw_name) { |
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, | |||
276 | return 0; | 276 | return 0; |
277 | } | 277 | } |
278 | 278 | ||
279 | static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) | 279 | static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) |
280 | { | 280 | { |
281 | int error; | 281 | int error; |
282 | u8 val[3]; | 282 | u8 val[3]; |
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) | |||
287 | return error; | 287 | return error; |
288 | } | 288 | } |
289 | 289 | ||
290 | *id = val[0]; | 290 | *id = le16_to_cpup((__le16 *)val); |
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c | |||
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, | |||
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | 185 | ||
186 | static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) | 186 | static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) |
187 | { | 187 | { |
188 | int error; | 188 | int error; |
189 | u8 val[3]; | 189 | u8 val[3]; |
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) | |||
195 | return error; | 195 | return error; |
196 | } | 196 | } |
197 | 197 | ||
198 | *id = val[1]; | 198 | *id = be16_to_cpup((__be16 *)val); |
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) | |||
519 | struct synaptics_data *priv = psmouse->private; | 519 | struct synaptics_data *priv = psmouse->private; |
520 | 520 | ||
521 | priv->mode = 0; | 521 | priv->mode = 0; |
522 | 522 | if (priv->absolute_mode) | |
523 | if (priv->absolute_mode) { | ||
524 | priv->mode |= SYN_BIT_ABSOLUTE_MODE; | 523 | priv->mode |= SYN_BIT_ABSOLUTE_MODE; |
525 | if (SYN_CAP_EXTENDED(priv->capabilities)) | 524 | if (priv->disable_gesture) |
526 | priv->mode |= SYN_BIT_W_MODE; | ||
527 | } | ||
528 | |||
529 | if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) | ||
530 | priv->mode |= SYN_BIT_DISABLE_GESTURE; | 525 | priv->mode |= SYN_BIT_DISABLE_GESTURE; |
531 | |||
532 | if (psmouse->rate >= 80) | 526 | if (psmouse->rate >= 80) |
533 | priv->mode |= SYN_BIT_HIGH_RATE; | 527 | priv->mode |= SYN_BIT_HIGH_RATE; |
528 | if (SYN_CAP_EXTENDED(priv->capabilities)) | ||
529 | priv->mode |= SYN_BIT_W_MODE; | ||
534 | 530 | ||
535 | if (synaptics_mode_cmd(psmouse, priv->mode)) | 531 | if (synaptics_mode_cmd(psmouse, priv->mode)) |
536 | return -1; | 532 | return -1; |
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c | |||
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) | |||
212 | * time before the ACK arrives. | 212 | * time before the ACK arrives. |
213 | */ | 213 | */ |
214 | if (ps2_sendbyte(ps2dev, command & 0xff, | 214 | if (ps2_sendbyte(ps2dev, command & 0xff, |
215 | command == PS2_CMD_RESET_BAT ? 1000 : 200)) | 215 | command == PS2_CMD_RESET_BAT ? 1000 : 200)) { |
216 | goto out; | 216 | serio_pause_rx(ps2dev->serio); |
217 | goto out_reset_flags; | ||
218 | } | ||
217 | 219 | ||
218 | for (i = 0; i < send; i++) | 220 | for (i = 0; i < send; i++) { |
219 | if (ps2_sendbyte(ps2dev, param[i], 200)) | 221 | if (ps2_sendbyte(ps2dev, param[i], 200)) { |
220 | goto out; | 222 | serio_pause_rx(ps2dev->serio); |
223 | goto out_reset_flags; | ||
224 | } | ||
225 | } | ||
221 | 226 | ||
222 | /* | 227 | /* |
223 | * The reset command takes a long time to execute. | 228 | * The reset command takes a long time to execute. |
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) | |||
234 | !(ps2dev->flags & PS2_FLAG_CMD), timeout); | 239 | !(ps2dev->flags & PS2_FLAG_CMD), timeout); |
235 | } | 240 | } |
236 | 241 | ||
242 | serio_pause_rx(ps2dev->serio); | ||
243 | |||
237 | if (param) | 244 | if (param) |
238 | for (i = 0; i < receive; i++) | 245 | for (i = 0; i < receive; i++) |
239 | param[i] = ps2dev->cmdbuf[(receive - 1) - i]; | 246 | param[i] = ps2dev->cmdbuf[(receive - 1) - i]; |
240 | 247 | ||
241 | if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) | 248 | if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) |
242 | goto out; | 249 | goto out_reset_flags; |
243 | 250 | ||
244 | rc = 0; | 251 | rc = 0; |
245 | 252 | ||
246 | out: | 253 | out_reset_flags: |
247 | serio_pause_rx(ps2dev->serio); | ||
248 | ps2dev->flags = 0; | 254 | ps2dev->flags = 0; |
249 | serio_continue_rx(ps2dev->serio); | 255 | serio_continue_rx(ps2dev->serio); |
250 | 256 | ||
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c | |||
@@ -194,6 +194,7 @@ static int __init parkbd_init(void) | |||
194 | parkbd_port = parkbd_allocate_serio(); | 194 | parkbd_port = parkbd_allocate_serio(); |
195 | if (!parkbd_port) { | 195 | if (!parkbd_port) { |
196 | parport_release(parkbd_dev); | 196 | parport_release(parkbd_dev); |
197 | parport_unregister_device(parkbd_dev); | ||
197 | return -ENOMEM; | 198 | return -ENOMEM; |
198 | } | 199 | } |
199 | 200 | ||
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c | |||
@@ -94,7 +94,7 @@ struct imx6ul_tsc { | |||
94 | * TSC module need ADC to get the measure value. So | 94 | * TSC module need ADC to get the measure value. So |
95 | * before config TSC, we should initialize ADC module. | 95 | * before config TSC, we should initialize ADC module. |
96 | */ | 96 | */ |
97 | static void imx6ul_adc_init(struct imx6ul_tsc *tsc) | 97 | static int imx6ul_adc_init(struct imx6ul_tsc *tsc) |
98 | { | 98 | { |
99 | int adc_hc = 0; | 99 | int adc_hc = 0; |
100 | int adc_gc; | 100 | int adc_gc; |
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) | |||
122 | 122 | ||
123 | timeout = wait_for_completion_timeout | 123 | timeout = wait_for_completion_timeout |
124 | (&tsc->completion, ADC_TIMEOUT); | 124 | (&tsc->completion, ADC_TIMEOUT); |
125 | if (timeout == 0) | 125 | if (timeout == 0) { |
126 | dev_err(tsc->dev, "Timeout for adc calibration\n"); | 126 | dev_err(tsc->dev, "Timeout for adc calibration\n"); |
127 | return -ETIMEDOUT; | ||
128 | } | ||
127 | 129 | ||
128 | adc_gs = readl(tsc->adc_regs + REG_ADC_GS); | 130 | adc_gs = readl(tsc->adc_regs + REG_ADC_GS); |
129 | if (adc_gs & ADC_CALF) | 131 | if (adc_gs & ADC_CALF) { |
130 | dev_err(tsc->dev, "ADC calibration failed\n"); | 132 | dev_err(tsc->dev, "ADC calibration failed\n"); |
133 | return -EINVAL; | ||
134 | } | ||
131 | 135 | ||
132 | /* TSC need the ADC work in hardware trigger */ | 136 | /* TSC need the ADC work in hardware trigger */ |
133 | adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); | 137 | adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); |
134 | adc_cfg |= ADC_HARDWARE_TRIGGER; | 138 | adc_cfg |= ADC_HARDWARE_TRIGGER; |
135 | writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); | 139 | writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); |
140 | |||
141 | return 0; | ||
136 | } | 142 | } |
137 | 143 | ||
138 | /* | 144 | /* |
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) | |||
188 | writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); | 194 | writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); |
189 | } | 195 | } |
190 | 196 | ||
191 | static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) | 197 | static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) |
192 | { | 198 | { |
193 | imx6ul_adc_init(tsc); | 199 | int err; |
200 | |||
201 | err = imx6ul_adc_init(tsc); | ||
202 | if (err) | ||
203 | return err; | ||
194 | imx6ul_tsc_channel_config(tsc); | 204 | imx6ul_tsc_channel_config(tsc); |
195 | imx6ul_tsc_set(tsc); | 205 | imx6ul_tsc_set(tsc); |
206 | |||
207 | return 0; | ||
196 | } | 208 | } |
197 | 209 | ||
198 | static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) | 210 | static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) |
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) | |||
311 | return err; | 323 | return err; |
312 | } | 324 | } |
313 | 325 | ||
314 | imx6ul_tsc_init(tsc); | 326 | return imx6ul_tsc_init(tsc); |
315 | |||
316 | return 0; | ||
317 | } | 327 | } |
318 | 328 | ||
319 | static void imx6ul_tsc_close(struct input_dev *input_dev) | 329 | static void imx6ul_tsc_close(struct input_dev *input_dev) |
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
337 | int tsc_irq; | 347 | int tsc_irq; |
338 | int adc_irq; | 348 | int adc_irq; |
339 | 349 | ||
340 | tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); | 350 | tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); |
341 | if (!tsc) | 351 | if (!tsc) |
342 | return -ENOMEM; | 352 | return -ENOMEM; |
343 | 353 | ||
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
345 | if (!input_dev) | 355 | if (!input_dev) |
346 | return -ENOMEM; | 356 | return -ENOMEM; |
347 | 357 | ||
348 | input_dev->name = "iMX6UL TouchScreen Controller"; | 358 | input_dev->name = "iMX6UL Touchscreen Controller"; |
349 | input_dev->id.bustype = BUS_HOST; | 359 | input_dev->id.bustype = BUS_HOST; |
350 | 360 | ||
351 | input_dev->open = imx6ul_tsc_open; | 361 | input_dev->open = imx6ul_tsc_open; |
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) | |||
406 | } | 416 | } |
407 | 417 | ||
408 | adc_irq = platform_get_irq(pdev, 1); | 418 | adc_irq = platform_get_irq(pdev, 1); |
409 | if (adc_irq <= 0) { | 419 | if (adc_irq < 0) { |
410 | dev_err(&pdev->dev, "no adc irq resource?\n"); | 420 | dev_err(&pdev->dev, "no adc irq resource?\n"); |
411 | return adc_irq; | 421 | return adc_irq; |
412 | } | 422 | } |
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) | |||
491 | goto out; | 501 | goto out; |
492 | } | 502 | } |
493 | 503 | ||
494 | imx6ul_tsc_init(tsc); | 504 | retval = imx6ul_tsc_init(tsc); |
495 | } | 505 | } |
496 | 506 | ||
497 | out: | 507 | out: |
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c | |||
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) | |||
394 | if (of_property_read_u32(np, "x-size", &pdata->x_size)) { | 394 | if (of_property_read_u32(np, "x-size", &pdata->x_size)) { |
395 | dev_err(dev, "failed to get x-size property\n"); | 395 | dev_err(dev, "failed to get x-size property\n"); |
396 | return NULL; | 396 | return NULL; |
397 | }; | 397 | } |
398 | 398 | ||
399 | if (of_property_read_u32(np, "y-size", &pdata->y_size)) { | 399 | if (of_property_read_u32(np, "y-size", &pdata->y_size)) { |
400 | dev_err(dev, "failed to get y-size property\n"); | 400 | dev_err(dev, "failed to get y-size property\n"); |
401 | return NULL; | 401 | return NULL; |
402 | }; | 402 | } |
403 | 403 | ||
404 | of_property_read_u32(np, "contact-threshold", | 404 | of_property_read_u32(np, "contact-threshold", |
405 | &pdata->contact_threshold); | 405 | &pdata->contact_threshold); |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..d9da766719c8 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST | |||
43 | endmenu | 43 | endmenu |
44 | 44 | ||
45 | config IOMMU_IOVA | 45 | config IOMMU_IOVA |
46 | bool | 46 | tristate |
47 | 47 | ||
48 | config OF_IOMMU | 48 | config OF_IOMMU |
49 | def_bool y | 49 | def_bool y |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..041bc1810a86 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
3215 | 3215 | ||
3216 | /* Restrict dma_mask to the width that the iommu can handle */ | 3216 | /* Restrict dma_mask to the width that the iommu can handle */ |
3217 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); | 3217 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
3218 | /* Ensure we reserve the whole size-aligned region */ | ||
3219 | nrpages = __roundup_pow_of_two(nrpages); | ||
3218 | 3220 | ||
3219 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { | 3221 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { |
3220 | /* | 3222 | /* |
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void) | |||
3711 | static int __init iommu_init_mempool(void) | 3713 | static int __init iommu_init_mempool(void) |
3712 | { | 3714 | { |
3713 | int ret; | 3715 | int ret; |
3714 | ret = iommu_iova_cache_init(); | 3716 | ret = iova_cache_get(); |
3715 | if (ret) | 3717 | if (ret) |
3716 | return ret; | 3718 | return ret; |
3717 | 3719 | ||
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void) | |||
3725 | 3727 | ||
3726 | kmem_cache_destroy(iommu_domain_cache); | 3728 | kmem_cache_destroy(iommu_domain_cache); |
3727 | domain_error: | 3729 | domain_error: |
3728 | iommu_iova_cache_destroy(); | 3730 | iova_cache_put(); |
3729 | 3731 | ||
3730 | return -ENOMEM; | 3732 | return -ENOMEM; |
3731 | } | 3733 | } |
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void) | |||
3734 | { | 3736 | { |
3735 | kmem_cache_destroy(iommu_devinfo_cache); | 3737 | kmem_cache_destroy(iommu_devinfo_cache); |
3736 | kmem_cache_destroy(iommu_domain_cache); | 3738 | kmem_cache_destroy(iommu_domain_cache); |
3737 | iommu_iova_cache_destroy(); | 3739 | iova_cache_put(); |
3738 | } | 3740 | } |
3739 | 3741 | ||
3740 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | 3742 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -18,42 +18,9 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/module.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | 23 | ||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
56 | |||
57 | void | 24 | void |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, | 25 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | 26 | unsigned long start_pfn, unsigned long pfn_32bit) |
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, | |||
72 | iovad->start_pfn = start_pfn; | 39 | iovad->start_pfn = start_pfn; |
73 | iovad->dma_32bit_pfn = pfn_32bit; | 40 | iovad->dma_32bit_pfn = pfn_32bit; |
74 | } | 41 | } |
42 | EXPORT_SYMBOL_GPL(init_iova_domain); | ||
75 | 43 | ||
76 | static struct rb_node * | 44 | static struct rb_node * |
77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | 45 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
120 | } | 88 | } |
121 | } | 89 | } |
122 | 90 | ||
123 | /* Computes the padding size required, to make the | 91 | /* |
124 | * the start address naturally aligned on its size | 92 | * Computes the padding size required, to make the start address |
93 | * naturally aligned on the power-of-two order of its size | ||
125 | */ | 94 | */ |
126 | static int | 95 | static unsigned int |
127 | iova_get_pad_size(int size, unsigned int limit_pfn) | 96 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) |
128 | { | 97 | { |
129 | unsigned int pad_size = 0; | 98 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
130 | unsigned int order = ilog2(size); | ||
131 | |||
132 | if (order) | ||
133 | pad_size = (limit_pfn + 1) % (1 << order); | ||
134 | |||
135 | return pad_size; | ||
136 | } | 99 | } |
137 | 100 | ||
138 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | 101 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
242 | rb_insert_color(&iova->node, root); | 205 | rb_insert_color(&iova->node, root); |
243 | } | 206 | } |
244 | 207 | ||
208 | static struct kmem_cache *iova_cache; | ||
209 | static unsigned int iova_cache_users; | ||
210 | static DEFINE_MUTEX(iova_cache_mutex); | ||
211 | |||
212 | struct iova *alloc_iova_mem(void) | ||
213 | { | ||
214 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | ||
215 | } | ||
216 | EXPORT_SYMBOL(alloc_iova_mem); | ||
217 | |||
218 | void free_iova_mem(struct iova *iova) | ||
219 | { | ||
220 | kmem_cache_free(iova_cache, iova); | ||
221 | } | ||
222 | EXPORT_SYMBOL(free_iova_mem); | ||
223 | |||
224 | int iova_cache_get(void) | ||
225 | { | ||
226 | mutex_lock(&iova_cache_mutex); | ||
227 | if (!iova_cache_users) { | ||
228 | iova_cache = kmem_cache_create( | ||
229 | "iommu_iova", sizeof(struct iova), 0, | ||
230 | SLAB_HWCACHE_ALIGN, NULL); | ||
231 | if (!iova_cache) { | ||
232 | mutex_unlock(&iova_cache_mutex); | ||
233 | printk(KERN_ERR "Couldn't create iova cache\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | iova_cache_users++; | ||
239 | mutex_unlock(&iova_cache_mutex); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(iova_cache_get); | ||
244 | |||
245 | void iova_cache_put(void) | ||
246 | { | ||
247 | mutex_lock(&iova_cache_mutex); | ||
248 | if (WARN_ON(!iova_cache_users)) { | ||
249 | mutex_unlock(&iova_cache_mutex); | ||
250 | return; | ||
251 | } | ||
252 | iova_cache_users--; | ||
253 | if (!iova_cache_users) | ||
254 | kmem_cache_destroy(iova_cache); | ||
255 | mutex_unlock(&iova_cache_mutex); | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(iova_cache_put); | ||
258 | |||
245 | /** | 259 | /** |
246 | * alloc_iova - allocates an iova | 260 | * alloc_iova - allocates an iova |
247 | * @iovad: - iova domain in question | 261 | * @iovad: - iova domain in question |
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
265 | if (!new_iova) | 279 | if (!new_iova) |
266 | return NULL; | 280 | return NULL; |
267 | 281 | ||
268 | /* If size aligned is set then round the size to | ||
269 | * to next power of two. | ||
270 | */ | ||
271 | if (size_aligned) | ||
272 | size = __roundup_pow_of_two(size); | ||
273 | |||
274 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, | 282 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
275 | new_iova, size_aligned); | 283 | new_iova, size_aligned); |
276 | 284 | ||
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
281 | 289 | ||
282 | return new_iova; | 290 | return new_iova; |
283 | } | 291 | } |
292 | EXPORT_SYMBOL_GPL(alloc_iova); | ||
284 | 293 | ||
285 | /** | 294 | /** |
286 | * find_iova - find's an iova for a given pfn | 295 | * find_iova - find's an iova for a given pfn |
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |||
321 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 330 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
322 | return NULL; | 331 | return NULL; |
323 | } | 332 | } |
333 | EXPORT_SYMBOL_GPL(find_iova); | ||
324 | 334 | ||
325 | /** | 335 | /** |
326 | * __free_iova - frees the given iova | 336 | * __free_iova - frees the given iova |
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) | |||
339 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 349 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
340 | free_iova_mem(iova); | 350 | free_iova_mem(iova); |
341 | } | 351 | } |
352 | EXPORT_SYMBOL_GPL(__free_iova); | ||
342 | 353 | ||
343 | /** | 354 | /** |
344 | * free_iova - finds and frees the iova for a given pfn | 355 | * free_iova - finds and frees the iova for a given pfn |
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) | |||
356 | __free_iova(iovad, iova); | 367 | __free_iova(iovad, iova); |
357 | 368 | ||
358 | } | 369 | } |
370 | EXPORT_SYMBOL_GPL(free_iova); | ||
359 | 371 | ||
360 | /** | 372 | /** |
361 | * put_iova_domain - destroys the iova doamin | 373 | * put_iova_domain - destroys the iova doamin |
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) | |||
378 | } | 390 | } |
379 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 391 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
380 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(put_iova_domain); | ||
381 | 394 | ||
382 | static int | 395 | static int |
383 | __is_range_overlap(struct rb_node *node, | 396 | __is_range_overlap(struct rb_node *node, |
@@ -467,6 +480,7 @@ finish: | |||
467 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 480 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
468 | return iova; | 481 | return iova; |
469 | } | 482 | } |
483 | EXPORT_SYMBOL_GPL(reserve_iova); | ||
470 | 484 | ||
471 | /** | 485 | /** |
472 | * copy_reserved_iova - copies the reserved between domains | 486 | * copy_reserved_iova - copies the reserved between domains |
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
493 | } | 507 | } |
494 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); | 508 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
495 | } | 509 | } |
510 | EXPORT_SYMBOL_GPL(copy_reserved_iova); | ||
496 | 511 | ||
497 | struct iova * | 512 | struct iova * |
498 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | 513 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, |
@@ -534,3 +549,6 @@ error: | |||
534 | free_iova_mem(prev); | 549 | free_iova_mem(prev); |
535 | return NULL; | 550 | return NULL; |
536 | } | 551 | } |
552 | |||
553 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); | ||
554 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c | |||
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | |||
62 | 62 | ||
63 | dev_alias->dev_id = alias; | 63 | dev_alias->dev_id = alias; |
64 | if (pdev != dev_alias->pdev) | 64 | if (pdev != dev_alias->pdev) |
65 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | 65 | dev_alias->count += its_pci_msi_vec_count(pdev); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ac7ae2b3cb83..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) | |||
719 | out: | 719 | out: |
720 | spin_unlock(&lpi_lock); | 720 | spin_unlock(&lpi_lock); |
721 | 721 | ||
722 | if (!bitmap) | ||
723 | *base = *nr_ids = 0; | ||
724 | |||
722 | return bitmap; | 725 | return bitmap; |
723 | } | 726 | } |
724 | 727 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index af2f16bb8a94..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) | |||
320 | intrmask[i] = gic_read(intrmask_reg); | 320 | intrmask[i] = gic_read(intrmask_reg); |
321 | pending_reg += gic_reg_step; | 321 | pending_reg += gic_reg_step; |
322 | intrmask_reg += gic_reg_step; | 322 | intrmask_reg += gic_reg_step; |
323 | |||
324 | if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) | ||
325 | continue; | ||
326 | |||
327 | pending[i] |= (u64)gic_read(pending_reg) << 32; | ||
328 | intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; | ||
329 | pending_reg += gic_reg_step; | ||
330 | intrmask_reg += gic_reg_step; | ||
323 | } | 331 | } |
324 | 332 | ||
325 | bitmap_and(pending, pending, intrmask, gic_shared_intrs); | 333 | bitmap_and(pending, pending, intrmask, gic_shared_intrs); |
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | |||
426 | spin_lock_irqsave(&gic_lock, flags); | 434 | spin_lock_irqsave(&gic_lock, flags); |
427 | 435 | ||
428 | /* Re-route this IRQ */ | 436 | /* Re-route this IRQ */ |
429 | gic_map_to_vpe(irq, cpumask_first(&tmp)); | 437 | gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); |
430 | 438 | ||
431 | /* Update the pcpu_masks */ | 439 | /* Update the pcpu_masks */ |
432 | for (i = 0; i < NR_CPUS; i++) | 440 | for (i = 0; i < NR_CPUS; i++) |
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, | |||
599 | GIC_SHARED_TO_HWIRQ(intr)); | 607 | GIC_SHARED_TO_HWIRQ(intr)); |
600 | int i; | 608 | int i; |
601 | 609 | ||
602 | gic_map_to_vpe(intr, cpu); | 610 | gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); |
603 | for (i = 0; i < NR_CPUS; i++) | 611 | for (i = 0; i < NR_CPUS; i++) |
604 | clear_bit(intr, pcpu_masks[i].pcpu_mask); | 612 | clear_bit(intr, pcpu_masks[i].pcpu_mask); |
605 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); | 613 | set_bit(intr, pcpu_masks[cpu].pcpu_mask); |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, | |||
1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) | 1997 | if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) |
1998 | ret = bitmap_storage_alloc(&store, chunks, | 1998 | ret = bitmap_storage_alloc(&store, chunks, |
1999 | !bitmap->mddev->bitmap_info.external, | 1999 | !bitmap->mddev->bitmap_info.external, |
2000 | bitmap->cluster_slot); | 2000 | mddev_is_clustered(bitmap->mddev) |
2001 | ? bitmap->cluster_slot : 0); | ||
2001 | if (ret) | 2002 | if (ret) |
2002 | goto err; | 2003 | goto err; |
2003 | 2004 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) | |||
5409 | * which will now never happen */ | 5409 | * which will now never happen */ |
5410 | wake_up_process(mddev->sync_thread->tsk); | 5410 | wake_up_process(mddev->sync_thread->tsk); |
5411 | 5411 | ||
5412 | if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) | ||
5413 | return -EBUSY; | ||
5412 | mddev_unlock(mddev); | 5414 | mddev_unlock(mddev); |
5413 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, | 5415 | wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, |
5414 | &mddev->recovery)); | 5416 | &mddev->recovery)); |
5417 | wait_event(mddev->sb_wait, | ||
5418 | !test_bit(MD_CHANGE_PENDING, &mddev->flags)); | ||
5415 | mddev_lock_nointr(mddev); | 5419 | mddev_lock_nointr(mddev); |
5416 | 5420 | ||
5417 | mutex_lock(&mddev->open_mutex); | 5421 | mutex_lock(&mddev->open_mutex); |
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) | |||
8160 | md_reap_sync_thread(mddev); | 8164 | md_reap_sync_thread(mddev); |
8161 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | 8165 | clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
8162 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 8166 | clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
8167 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); | ||
8163 | goto unlock; | 8168 | goto unlock; |
8164 | } | 8169 | } |
8165 | 8170 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) | |||
470 | return 0; | 470 | return 0; |
471 | 471 | ||
472 | out_free_conf: | 472 | out_free_conf: |
473 | if (conf->pool) | 473 | mempool_destroy(conf->pool); |
474 | mempool_destroy(conf->pool); | ||
475 | kfree(conf->multipaths); | 474 | kfree(conf->multipaths); |
476 | kfree(conf); | 475 | kfree(conf); |
477 | mddev->private = NULL; | 476 | mddev->private = NULL; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) | |||
376 | struct md_rdev *rdev; | 376 | struct md_rdev *rdev; |
377 | bool discard_supported = false; | 377 | bool discard_supported = false; |
378 | 378 | ||
379 | rdev_for_each(rdev, mddev) { | ||
380 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
381 | rdev->data_offset << 9); | ||
382 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
383 | discard_supported = true; | ||
384 | } | ||
385 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 379 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
386 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | 380 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); |
387 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | 381 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); |
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) | |||
390 | blk_queue_io_opt(mddev->queue, | 384 | blk_queue_io_opt(mddev->queue, |
391 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | 385 | (mddev->chunk_sectors << 9) * mddev->raid_disks); |
392 | 386 | ||
387 | rdev_for_each(rdev, mddev) { | ||
388 | disk_stack_limits(mddev->gendisk, rdev->bdev, | ||
389 | rdev->data_offset << 9); | ||
390 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | ||
391 | discard_supported = true; | ||
392 | } | ||
393 | if (!discard_supported) | 393 | if (!discard_supported) |
394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | 394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); |
395 | else | 395 | else |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..049df6c4a8cc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
881 | } | 881 | } |
882 | 882 | ||
883 | if (bio && bio_data_dir(bio) == WRITE) { | 883 | if (bio && bio_data_dir(bio) == WRITE) { |
884 | if (bio->bi_iter.bi_sector >= | 884 | if (bio->bi_iter.bi_sector >= conf->next_resync) { |
885 | conf->mddev->curr_resync_completed) { | ||
886 | if (conf->start_next_window == MaxSector) | 885 | if (conf->start_next_window == MaxSector) |
887 | conf->start_next_window = | 886 | conf->start_next_window = |
888 | conf->next_resync + | 887 | conf->next_resync + |
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) | |||
1516 | conf->r1buf_pool = NULL; | 1515 | conf->r1buf_pool = NULL; |
1517 | 1516 | ||
1518 | spin_lock_irq(&conf->resync_lock); | 1517 | spin_lock_irq(&conf->resync_lock); |
1519 | conf->next_resync = 0; | 1518 | conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; |
1520 | conf->start_next_window = MaxSector; | 1519 | conf->start_next_window = MaxSector; |
1521 | conf->current_window_requests += | 1520 | conf->current_window_requests += |
1522 | conf->next_window_requests; | 1521 | conf->next_window_requests; |
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) | |||
2843 | 2842 | ||
2844 | abort: | 2843 | abort: |
2845 | if (conf) { | 2844 | if (conf) { |
2846 | if (conf->r1bio_pool) | 2845 | mempool_destroy(conf->r1bio_pool); |
2847 | mempool_destroy(conf->r1bio_pool); | ||
2848 | kfree(conf->mirrors); | 2846 | kfree(conf->mirrors); |
2849 | safe_put_page(conf->tmppage); | 2847 | safe_put_page(conf->tmppage); |
2850 | kfree(conf->poolinfo); | 2848 | kfree(conf->poolinfo); |
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv) | |||
2946 | { | 2944 | { |
2947 | struct r1conf *conf = priv; | 2945 | struct r1conf *conf = priv; |
2948 | 2946 | ||
2949 | if (conf->r1bio_pool) | 2947 | mempool_destroy(conf->r1bio_pool); |
2950 | mempool_destroy(conf->r1bio_pool); | ||
2951 | kfree(conf->mirrors); | 2948 | kfree(conf->mirrors); |
2952 | safe_put_page(conf->tmppage); | 2949 | safe_put_page(conf->tmppage); |
2953 | kfree(conf->poolinfo); | 2950 | kfree(conf->poolinfo); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..7c99a4037715 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", | 3486 | printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", |
3487 | mdname(mddev)); | 3487 | mdname(mddev)); |
3488 | if (conf) { | 3488 | if (conf) { |
3489 | if (conf->r10bio_pool) | 3489 | mempool_destroy(conf->r10bio_pool); |
3490 | mempool_destroy(conf->r10bio_pool); | ||
3491 | kfree(conf->mirrors); | 3490 | kfree(conf->mirrors); |
3492 | safe_put_page(conf->tmppage); | 3491 | safe_put_page(conf->tmppage); |
3493 | kfree(conf); | 3492 | kfree(conf); |
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev) | |||
3682 | 3681 | ||
3683 | out_free_conf: | 3682 | out_free_conf: |
3684 | md_unregister_thread(&mddev->thread); | 3683 | md_unregister_thread(&mddev->thread); |
3685 | if (conf->r10bio_pool) | 3684 | mempool_destroy(conf->r10bio_pool); |
3686 | mempool_destroy(conf->r10bio_pool); | ||
3687 | safe_put_page(conf->tmppage); | 3685 | safe_put_page(conf->tmppage); |
3688 | kfree(conf->mirrors); | 3686 | kfree(conf->mirrors); |
3689 | kfree(conf); | 3687 | kfree(conf); |
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv) | |||
3696 | { | 3694 | { |
3697 | struct r10conf *conf = priv; | 3695 | struct r10conf *conf = priv; |
3698 | 3696 | ||
3699 | if (conf->r10bio_pool) | 3697 | mempool_destroy(conf->r10bio_pool); |
3700 | mempool_destroy(conf->r10bio_pool); | ||
3701 | safe_put_page(conf->tmppage); | 3698 | safe_put_page(conf->tmppage); |
3702 | kfree(conf->mirrors); | 3699 | kfree(conf->mirrors); |
3703 | kfree(conf->mirrors_old); | 3700 | kfree(conf->mirrors_old); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) | |||
2271 | drop_one_stripe(conf)) | 2271 | drop_one_stripe(conf)) |
2272 | ; | 2272 | ; |
2273 | 2273 | ||
2274 | if (conf->slab_cache) | 2274 | kmem_cache_destroy(conf->slab_cache); |
2275 | kmem_cache_destroy(conf->slab_cache); | ||
2276 | conf->slab_cache = NULL; | 2275 | conf->slab_cache = NULL; |
2277 | } | 2276 | } |
2278 | 2277 | ||
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3150 | spin_unlock_irq(&sh->stripe_lock); | 3149 | spin_unlock_irq(&sh->stripe_lock); |
3151 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 3150 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
3152 | wake_up(&conf->wait_for_overlap); | 3151 | wake_up(&conf->wait_for_overlap); |
3152 | if (bi) | ||
3153 | s->to_read--; | ||
3153 | while (bi && bi->bi_iter.bi_sector < | 3154 | while (bi && bi->bi_iter.bi_sector < |
3154 | sh->dev[i].sector + STRIPE_SECTORS) { | 3155 | sh->dev[i].sector + STRIPE_SECTORS) { |
3155 | struct bio *nextbi = | 3156 | struct bio *nextbi = |
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
3169 | */ | 3170 | */ |
3170 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 3171 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
3171 | } | 3172 | } |
3173 | s->to_write = 0; | ||
3174 | s->written = 0; | ||
3172 | 3175 | ||
3173 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 3176 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
3174 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 3177 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3300 | */ | 3303 | */ |
3301 | return 0; | 3304 | return 0; |
3302 | 3305 | ||
3303 | for (i = 0; i < s->failed; i++) { | 3306 | for (i = 0; i < s->failed && i < 2; i++) { |
3304 | if (fdev[i]->towrite && | 3307 | if (fdev[i]->towrite && |
3305 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3308 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
3306 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) | 3309 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3324 | sh->sector < sh->raid_conf->mddev->recovery_cp) | 3327 | sh->sector < sh->raid_conf->mddev->recovery_cp) |
3325 | /* reconstruct-write isn't being forced */ | 3328 | /* reconstruct-write isn't being forced */ |
3326 | return 0; | 3329 | return 0; |
3327 | for (i = 0; i < s->failed; i++) { | 3330 | for (i = 0; i < s->failed && i < 2; i++) { |
3328 | if (s->failed_num[i] != sh->pd_idx && | 3331 | if (s->failed_num[i] != sh->pd_idx && |
3329 | s->failed_num[i] != sh->qd_idx && | 3332 | s->failed_num[i] != sh->qd_idx && |
3330 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3333 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) | |||
134 | int err = cmd->error; | 134 | int err = cmd->error; |
135 | 135 | ||
136 | /* Flag re-tuning needed on CRC errors */ | 136 | /* Flag re-tuning needed on CRC errors */ |
137 | if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || | 137 | if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && |
138 | cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && | ||
139 | (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || | ||
138 | (mrq->data && mrq->data->error == -EILSEQ) || | 140 | (mrq->data && mrq->data->error == -EILSEQ) || |
139 | (mrq->stop && mrq->stop->error == -EILSEQ)) | 141 | (mrq->stop && mrq->stop->error == -EILSEQ))) |
140 | mmc_retune_needed(host); | 142 | mmc_retune_needed(host); |
141 | 143 | ||
142 | if (err && cmd->retries && mmc_host_is_spi(host)) { | 144 | if (err && cmd->retries && mmc_host_is_spi(host)) { |
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c | |||
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) | |||
457 | 0, &cd_gpio_invert); | 457 | 0, &cd_gpio_invert); |
458 | if (!ret) | 458 | if (!ret) |
459 | dev_info(host->parent, "Got CD GPIO\n"); | 459 | dev_info(host->parent, "Got CD GPIO\n"); |
460 | else if (ret != -ENOENT) | 460 | else if (ret != -ENOENT && ret != -ENOSYS) |
461 | return ret; | 461 | return ret; |
462 | 462 | ||
463 | /* | 463 | /* |
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) | |||
481 | ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); | 481 | ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); |
482 | if (!ret) | 482 | if (!ret) |
483 | dev_info(host->parent, "Got WP GPIO\n"); | 483 | dev_info(host->parent, "Got WP GPIO\n"); |
484 | else if (ret != -ENOENT) | 484 | else if (ret != -ENOENT && ret != -ENOSYS) |
485 | return ret; | 485 | return ret; |
486 | 486 | ||
487 | if (of_property_read_bool(np, "disable-wp")) | 487 | if (of_property_read_bool(np, "disable-wp")) |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/err.h> | 29 | #include <linux/err.h> |
30 | #include <linux/mmc/host.h> | 30 | #include <linux/mmc/host.h> |
31 | #include <linux/mmc/slot-gpio.h> | ||
31 | #include <linux/io.h> | 32 | #include <linux/io.h> |
32 | #include <linux/regulator/consumer.h> | 33 | #include <linux/regulator/consumer.h> |
33 | #include <linux/gpio.h> | 34 | #include <linux/gpio.h> |
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) | |||
454 | { | 455 | { |
455 | struct pxamci_host *host = mmc_priv(mmc); | 456 | struct pxamci_host *host = mmc_priv(mmc); |
456 | 457 | ||
457 | if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { | 458 | if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) |
458 | if (host->pdata->gpio_card_ro_invert) | 459 | return mmc_gpio_get_ro(mmc); |
459 | return !gpio_get_value(host->pdata->gpio_card_ro); | ||
460 | else | ||
461 | return gpio_get_value(host->pdata->gpio_card_ro); | ||
462 | } | ||
463 | if (host->pdata && host->pdata->get_ro) | 460 | if (host->pdata && host->pdata->get_ro) |
464 | return !!host->pdata->get_ro(mmc_dev(mmc)); | 461 | return !!host->pdata->get_ro(mmc_dev(mmc)); |
465 | /* | 462 | /* |
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) | |||
551 | 548 | ||
552 | static const struct mmc_host_ops pxamci_ops = { | 549 | static const struct mmc_host_ops pxamci_ops = { |
553 | .request = pxamci_request, | 550 | .request = pxamci_request, |
551 | .get_cd = mmc_gpio_get_cd, | ||
554 | .get_ro = pxamci_get_ro, | 552 | .get_ro = pxamci_get_ro, |
555 | .set_ios = pxamci_set_ios, | 553 | .set_ios = pxamci_set_ios, |
556 | .enable_sdio_irq = pxamci_enable_sdio_irq, | 554 | .enable_sdio_irq = pxamci_enable_sdio_irq, |
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) | |||
790 | gpio_power = host->pdata->gpio_power; | 788 | gpio_power = host->pdata->gpio_power; |
791 | } | 789 | } |
792 | if (gpio_is_valid(gpio_power)) { | 790 | if (gpio_is_valid(gpio_power)) { |
793 | ret = gpio_request(gpio_power, "mmc card power"); | 791 | ret = devm_gpio_request(&pdev->dev, gpio_power, |
792 | "mmc card power"); | ||
794 | if (ret) { | 793 | if (ret) { |
795 | dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); | 794 | dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", |
795 | gpio_power); | ||
796 | goto out; | 796 | goto out; |
797 | } | 797 | } |
798 | gpio_direction_output(gpio_power, | 798 | gpio_direction_output(gpio_power, |
799 | host->pdata->gpio_power_invert); | 799 | host->pdata->gpio_power_invert); |
800 | } | 800 | } |
801 | if (gpio_is_valid(gpio_ro)) { | 801 | if (gpio_is_valid(gpio_ro)) |
802 | ret = gpio_request(gpio_ro, "mmc card read only"); | 802 | ret = mmc_gpio_request_ro(mmc, gpio_ro); |
803 | if (ret) { | 803 | if (ret) { |
804 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); | 804 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); |
805 | goto err_gpio_ro; | 805 | goto out; |
806 | } | 806 | } else { |
807 | gpio_direction_input(gpio_ro); | 807 | mmc->caps |= host->pdata->gpio_card_ro_invert ? |
808 | MMC_CAP2_RO_ACTIVE_HIGH : 0; | ||
808 | } | 809 | } |
809 | if (gpio_is_valid(gpio_cd)) { | ||
810 | ret = gpio_request(gpio_cd, "mmc card detect"); | ||
811 | if (ret) { | ||
812 | dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); | ||
813 | goto err_gpio_cd; | ||
814 | } | ||
815 | gpio_direction_input(gpio_cd); | ||
816 | 810 | ||
817 | ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, | 811 | if (gpio_is_valid(gpio_cd)) |
818 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | 812 | ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); |
819 | "mmc card detect", mmc); | 813 | if (ret) { |
820 | if (ret) { | 814 | dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); |
821 | dev_err(&pdev->dev, "failed to request card detect IRQ\n"); | 815 | goto out; |
822 | goto err_request_irq; | ||
823 | } | ||
824 | } | 816 | } |
825 | 817 | ||
826 | if (host->pdata && host->pdata->init) | 818 | if (host->pdata && host->pdata->init) |
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
835 | 827 | ||
836 | return 0; | 828 | return 0; |
837 | 829 | ||
838 | err_request_irq: | 830 | out: |
839 | gpio_free(gpio_cd); | ||
840 | err_gpio_cd: | ||
841 | gpio_free(gpio_ro); | ||
842 | err_gpio_ro: | ||
843 | gpio_free(gpio_power); | ||
844 | out: | ||
845 | if (host) { | 831 | if (host) { |
846 | if (host->dma_chan_rx) | 832 | if (host->dma_chan_rx) |
847 | dma_release_channel(host->dma_chan_rx); | 833 | dma_release_channel(host->dma_chan_rx); |
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) | |||
873 | gpio_ro = host->pdata->gpio_card_ro; | 859 | gpio_ro = host->pdata->gpio_card_ro; |
874 | gpio_power = host->pdata->gpio_power; | 860 | gpio_power = host->pdata->gpio_power; |
875 | } | 861 | } |
876 | if (gpio_is_valid(gpio_cd)) { | ||
877 | free_irq(gpio_to_irq(gpio_cd), mmc); | ||
878 | gpio_free(gpio_cd); | ||
879 | } | ||
880 | if (gpio_is_valid(gpio_ro)) | ||
881 | gpio_free(gpio_ro); | ||
882 | if (gpio_is_valid(gpio_power)) | ||
883 | gpio_free(gpio_power); | ||
884 | if (host->vcc) | 862 | if (host->vcc) |
885 | regulator_put(host->vcc); | 863 | regulator_put(host->vcc); |
886 | 864 | ||
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -210,6 +210,16 @@ | |||
210 | #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ | 210 | #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ |
211 | #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ | 211 | #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ |
212 | 212 | ||
213 | #define SDXC_CLK_400K 0 | ||
214 | #define SDXC_CLK_25M 1 | ||
215 | #define SDXC_CLK_50M 2 | ||
216 | #define SDXC_CLK_50M_DDR 3 | ||
217 | |||
218 | struct sunxi_mmc_clk_delay { | ||
219 | u32 output; | ||
220 | u32 sample; | ||
221 | }; | ||
222 | |||
213 | struct sunxi_idma_des { | 223 | struct sunxi_idma_des { |
214 | u32 config; | 224 | u32 config; |
215 | u32 buf_size; | 225 | u32 buf_size; |
@@ -229,6 +239,7 @@ struct sunxi_mmc_host { | |||
229 | struct clk *clk_mmc; | 239 | struct clk *clk_mmc; |
230 | struct clk *clk_sample; | 240 | struct clk *clk_sample; |
231 | struct clk *clk_output; | 241 | struct clk *clk_output; |
242 | const struct sunxi_mmc_clk_delay *clk_delays; | ||
232 | 243 | ||
233 | /* irq */ | 244 | /* irq */ |
234 | spinlock_t lock; | 245 | spinlock_t lock; |
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, | |||
654 | 665 | ||
655 | /* determine delays */ | 666 | /* determine delays */ |
656 | if (rate <= 400000) { | 667 | if (rate <= 400000) { |
657 | oclk_dly = 180; | 668 | oclk_dly = host->clk_delays[SDXC_CLK_400K].output; |
658 | sclk_dly = 42; | 669 | sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; |
659 | } else if (rate <= 25000000) { | 670 | } else if (rate <= 25000000) { |
660 | oclk_dly = 180; | 671 | oclk_dly = host->clk_delays[SDXC_CLK_25M].output; |
661 | sclk_dly = 75; | 672 | sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; |
662 | } else if (rate <= 50000000) { | 673 | } else if (rate <= 50000000) { |
663 | if (ios->timing == MMC_TIMING_UHS_DDR50) { | 674 | if (ios->timing == MMC_TIMING_UHS_DDR50) { |
664 | oclk_dly = 60; | 675 | oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; |
665 | sclk_dly = 120; | 676 | sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; |
666 | } else { | 677 | } else { |
667 | oclk_dly = 90; | 678 | oclk_dly = host->clk_delays[SDXC_CLK_50M].output; |
668 | sclk_dly = 150; | 679 | sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; |
669 | } | 680 | } |
670 | } else if (rate <= 100000000) { | ||
671 | oclk_dly = 6; | ||
672 | sclk_dly = 24; | ||
673 | } else if (rate <= 200000000) { | ||
674 | oclk_dly = 3; | ||
675 | sclk_dly = 12; | ||
676 | } else { | 681 | } else { |
677 | return -EINVAL; | 682 | return -EINVAL; |
678 | } | 683 | } |
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
871 | static const struct of_device_id sunxi_mmc_of_match[] = { | 876 | static const struct of_device_id sunxi_mmc_of_match[] = { |
872 | { .compatible = "allwinner,sun4i-a10-mmc", }, | 877 | { .compatible = "allwinner,sun4i-a10-mmc", }, |
873 | { .compatible = "allwinner,sun5i-a13-mmc", }, | 878 | { .compatible = "allwinner,sun5i-a13-mmc", }, |
879 | { .compatible = "allwinner,sun9i-a80-mmc", }, | ||
874 | { /* sentinel */ } | 880 | { /* sentinel */ } |
875 | }; | 881 | }; |
876 | MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); | 882 | MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); |
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { | |||
884 | .hw_reset = sunxi_mmc_hw_reset, | 890 | .hw_reset = sunxi_mmc_hw_reset, |
885 | }; | 891 | }; |
886 | 892 | ||
893 | static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { | ||
894 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | ||
895 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | ||
896 | [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, | ||
897 | [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, | ||
898 | }; | ||
899 | |||
900 | static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { | ||
901 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | ||
902 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | ||
903 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, | ||
904 | [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, | ||
905 | }; | ||
906 | |||
887 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | 907 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
888 | struct platform_device *pdev) | 908 | struct platform_device *pdev) |
889 | { | 909 | { |
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | |||
895 | else | 915 | else |
896 | host->idma_des_size_bits = 16; | 916 | host->idma_des_size_bits = 16; |
897 | 917 | ||
918 | if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) | ||
919 | host->clk_delays = sun9i_mmc_clk_delays; | ||
920 | else | ||
921 | host->clk_delays = sunxi_mmc_clk_delays; | ||
922 | |||
898 | ret = mmc_regulator_get_supply(host->mmc); | 923 | ret = mmc_regulator_get_supply(host->mmc); |
899 | if (ret) { | 924 | if (ret) { |
900 | if (ret != -EPROBE_DEFER) | 925 | if (ret != -EPROBE_DEFER) |
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, | |||
926 | goto bad; | 926 | goto bad; |
927 | } | 927 | } |
928 | 928 | ||
929 | if (data_size > ubi->leb_size) { | ||
930 | ubi_err(ubi, "bad data_size"); | ||
931 | goto bad; | ||
932 | } | ||
933 | |||
929 | if (vol_type == UBI_VID_STATIC) { | 934 | if (vol_type == UBI_VID_STATIC) { |
930 | /* | 935 | /* |
931 | * Although from high-level point of view static volumes may | 936 | * Although from high-level point of view static volumes may |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, | |||
649 | if (ubi->corr_peb_count) | 649 | if (ubi->corr_peb_count) |
650 | ubi_err(ubi, "%d PEBs are corrupted and not used", | 650 | ubi_err(ubi, "%d PEBs are corrupted and not used", |
651 | ubi->corr_peb_count); | 651 | ubi->corr_peb_count); |
652 | return -ENOSPC; | ||
652 | } | 653 | } |
653 | ubi->rsvd_pebs += reserved_pebs; | 654 | ubi->rsvd_pebs += reserved_pebs; |
654 | ubi->avail_pebs -= reserved_pebs; | 655 | ubi->avail_pebs -= reserved_pebs; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1601 | if (ubi->corr_peb_count) | 1601 | if (ubi->corr_peb_count) |
1602 | ubi_err(ubi, "%d PEBs are corrupted and not used", | 1602 | ubi_err(ubi, "%d PEBs are corrupted and not used", |
1603 | ubi->corr_peb_count); | 1603 | ubi->corr_peb_count); |
1604 | err = -ENOSPC; | ||
1604 | goto out_free; | 1605 | goto out_free; |
1605 | } | 1606 | } |
1606 | ubi->avail_pebs -= reserved_pebs; | 1607 | ubi->avail_pebs -= reserved_pebs; |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f8baa897d1a0..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) | |||
2051 | reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; | 2051 | reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; |
2052 | else | 2052 | else |
2053 | reg |= PORT_CONTROL_FRAME_MODE_DSA; | 2053 | reg |= PORT_CONTROL_FRAME_MODE_DSA; |
2054 | reg |= PORT_CONTROL_FORWARD_UNKNOWN | | ||
2055 | PORT_CONTROL_FORWARD_UNKNOWN_MC; | ||
2054 | } | 2056 | } |
2055 | 2057 | ||
2056 | if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || | 2058 | if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) | |||
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | /* Flush FLI data fifo. */ | 1545 | /* Flush FLI data fifo. */ |
1546 | static u32 | 1546 | static int |
1547 | bfa_flash_fifo_flush(void __iomem *pci_bar) | 1547 | bfa_flash_fifo_flush(void __iomem *pci_bar) |
1548 | { | 1548 | { |
1549 | u32 i; | 1549 | u32 i; |
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) | |||
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | /* Read flash status. */ | 1575 | /* Read flash status. */ |
1576 | static u32 | 1576 | static int |
1577 | bfa_flash_status_read(void __iomem *pci_bar) | 1577 | bfa_flash_status_read(void __iomem *pci_bar) |
1578 | { | 1578 | { |
1579 | union bfa_flash_dev_status_reg dev_status; | 1579 | union bfa_flash_dev_status_reg dev_status; |
1580 | u32 status; | 1580 | int status; |
1581 | u32 ret_status; | 1581 | u32 ret_status; |
1582 | int i; | 1582 | int i; |
1583 | 1583 | ||
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) | |||
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | /* Start flash read operation. */ | 1613 | /* Start flash read operation. */ |
1614 | static u32 | 1614 | static int |
1615 | bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, | 1615 | bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, |
1616 | char *buf) | 1616 | char *buf) |
1617 | { | 1617 | { |
1618 | u32 status; | 1618 | int status; |
1619 | 1619 | ||
1620 | /* len must be mutiple of 4 and not exceeding fifo size */ | 1620 | /* len must be mutiple of 4 and not exceeding fifo size */ |
1621 | if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) | 1621 | if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) |
@@ -1703,7 +1703,8 @@ static enum bfa_status | |||
1703 | bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, | 1703 | bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, |
1704 | u32 len) | 1704 | u32 len) |
1705 | { | 1705 | { |
1706 | u32 n, status; | 1706 | u32 n; |
1707 | int status; | ||
1707 | u32 off, l, s, residue, fifo_sz; | 1708 | u32 off, l, s, residue, fifo_sz; |
1708 | 1709 | ||
1709 | residue = len; | 1710 | residue = len; |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) | |||
816 | struct net_device *ndev; | 816 | struct net_device *ndev; |
817 | struct hip04_priv *priv; | 817 | struct hip04_priv *priv; |
818 | struct resource *res; | 818 | struct resource *res; |
819 | unsigned int irq; | 819 | int irq; |
820 | int ret; | 820 | int ret; |
821 | 821 | ||
822 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); | 822 | ndev = alloc_etherdev(sizeof(struct hip04_priv)); |
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h | |||
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { | |||
460 | u32 index; | 460 | u32 index; |
461 | }; | 461 | }; |
462 | 462 | ||
463 | #define EMAC_ETHTOOL_REGS_VER 0 | 463 | #define EMAC_ETHTOOL_REGS_VER 3 |
464 | #define EMAC4_ETHTOOL_REGS_VER 1 | 464 | #define EMAC4_ETHTOOL_REGS_VER 4 |
465 | #define EMAC4SYNC_ETHTOOL_REGS_VER 2 | 465 | #define EMAC4SYNC_ETHTOOL_REGS_VER 5 |
466 | 466 | ||
467 | #endif /* __IBM_NEWEMAC_CORE_H */ | 467 | #endif /* __IBM_NEWEMAC_CORE_H */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..62488a67149d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, | |||
946 | /* take the lock before we start messing with the ring */ | 946 | /* take the lock before we start messing with the ring */ |
947 | mutex_lock(&hw->aq.arq_mutex); | 947 | mutex_lock(&hw->aq.arq_mutex); |
948 | 948 | ||
949 | if (hw->aq.arq.count == 0) { | ||
950 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, | ||
951 | "AQRX: Admin queue not initialized.\n"); | ||
952 | ret_code = I40E_ERR_QUEUE_EMPTY; | ||
953 | goto clean_arq_element_err; | ||
954 | } | ||
955 | |||
949 | /* set next_to_use to head */ | 956 | /* set next_to_use to head */ |
950 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); | 957 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); |
951 | if (ntu == ntc) { | 958 | if (ntu == ntc) { |
@@ -1007,6 +1014,8 @@ clean_arq_element_out: | |||
1007 | /* Set pending if needed, unlock and return */ | 1014 | /* Set pending if needed, unlock and return */ |
1008 | if (pending != NULL) | 1015 | if (pending != NULL) |
1009 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); | 1016 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
1017 | |||
1018 | clean_arq_element_err: | ||
1010 | mutex_unlock(&hw->aq.arq_mutex); | 1019 | mutex_unlock(&hw->aq.arq_mutex); |
1011 | 1020 | ||
1012 | if (i40e_is_nvm_update_op(&e->desc)) { | 1021 | if (i40e_is_nvm_update_op(&e->desc)) { |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..2fdf978ae6a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) | |||
2672 | rx_ctx.lrxqthresh = 2; | 2672 | rx_ctx.lrxqthresh = 2; |
2673 | rx_ctx.crcstrip = 1; | 2673 | rx_ctx.crcstrip = 1; |
2674 | rx_ctx.l2tsel = 1; | 2674 | rx_ctx.l2tsel = 1; |
2675 | rx_ctx.showiv = 1; | 2675 | /* this controls whether VLAN is stripped from inner headers */ |
2676 | rx_ctx.showiv = 0; | ||
2676 | #ifdef I40E_FCOE | 2677 | #ifdef I40E_FCOE |
2677 | rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); | 2678 | rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); |
2678 | #endif | 2679 | #endif |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..929d47152bf2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c | |||
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, | |||
887 | /* take the lock before we start messing with the ring */ | 887 | /* take the lock before we start messing with the ring */ |
888 | mutex_lock(&hw->aq.arq_mutex); | 888 | mutex_lock(&hw->aq.arq_mutex); |
889 | 889 | ||
890 | if (hw->aq.arq.count == 0) { | ||
891 | i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, | ||
892 | "AQRX: Admin queue not initialized.\n"); | ||
893 | ret_code = I40E_ERR_QUEUE_EMPTY; | ||
894 | goto clean_arq_element_err; | ||
895 | } | ||
896 | |||
890 | /* set next_to_use to head */ | 897 | /* set next_to_use to head */ |
891 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); | 898 | ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); |
892 | if (ntu == ntc) { | 899 | if (ntu == ntc) { |
@@ -948,6 +955,8 @@ clean_arq_element_out: | |||
948 | /* Set pending if needed, unlock and return */ | 955 | /* Set pending if needed, unlock and return */ |
949 | if (pending != NULL) | 956 | if (pending != NULL) |
950 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); | 957 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); |
958 | |||
959 | clean_arq_element_err: | ||
951 | mutex_unlock(&hw->aq.arq_mutex); | 960 | mutex_unlock(&hw->aq.arq_mutex); |
952 | 961 | ||
953 | return ret_code; | 962 | return ret_code; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -1184,10 +1184,11 @@ out: | |||
1184 | if (prot == MLX4_PROT_ETH) { | 1184 | if (prot == MLX4_PROT_ETH) { |
1185 | /* manage the steering entry for promisc mode */ | 1185 | /* manage the steering entry for promisc mode */ |
1186 | if (new_entry) | 1186 | if (new_entry) |
1187 | new_steering_entry(dev, port, steer, index, qp->qpn); | 1187 | err = new_steering_entry(dev, port, steer, |
1188 | index, qp->qpn); | ||
1188 | else | 1189 | else |
1189 | existing_steering_entry(dev, port, steer, | 1190 | err = existing_steering_entry(dev, port, steer, |
1190 | index, qp->qpn); | 1191 | index, qp->qpn); |
1191 | } | 1192 | } |
1192 | if (err && link && index != -1) { | 1193 | if (err && link && index != -1) { |
1193 | if (index < dev->caps.num_mgms) | 1194 | if (index < dev->caps.num_mgms) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) | |||
200 | 200 | ||
201 | return err; | 201 | return err; |
202 | } | 202 | } |
203 | |||
204 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) | ||
205 | { | ||
206 | struct mlx5_cmd_query_special_contexts_mbox_in in; | ||
207 | struct mlx5_cmd_query_special_contexts_mbox_out out; | ||
208 | int err; | ||
209 | |||
210 | memset(&in, 0, sizeof(in)); | ||
211 | memset(&out, 0, sizeof(out)); | ||
212 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||
213 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | ||
214 | if (err) | ||
215 | return err; | ||
216 | |||
217 | if (out.hdr.status) | ||
218 | err = mlx5_cmd_status_to_err(&out.hdr); | ||
219 | |||
220 | *rsvd_lkey = be32_to_cpu(out.resd_lkey); | ||
221 | |||
222 | return err; | ||
223 | } | ||
224 | EXPORT_SYMBOL(mlx5_core_query_special_context); | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) | |||
6081 | { | 6081 | { |
6082 | void __iomem *ioaddr = tp->mmio_addr; | 6082 | void __iomem *ioaddr = tp->mmio_addr; |
6083 | struct pci_dev *pdev = tp->pci_dev; | 6083 | struct pci_dev *pdev = tp->pci_dev; |
6084 | u16 rg_saw_cnt; | 6084 | int rg_saw_cnt; |
6085 | u32 data; | 6085 | u32 data; |
6086 | static const struct ephy_info e_info_8168h_1[] = { | 6086 | static const struct ephy_info e_info_8168h_1[] = { |
6087 | { 0x1e, 0x0800, 0x0001 }, | 6087 | { 0x1e, 0x0800, 0x0001 }, |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) | |||
299 | * Unbound PCI devices are always put in D0, regardless of | 299 | * Unbound PCI devices are always put in D0, regardless of |
300 | * runtime PM status. During probe, the device is set to | 300 | * runtime PM status. During probe, the device is set to |
301 | * active and the usage count is incremented. If the driver | 301 | * active and the usage count is incremented. If the driver |
302 | * supports runtime PM, it should call pm_runtime_put_noidle() | 302 | * supports runtime PM, it should call pm_runtime_put_noidle(), |
303 | * in its probe routine and pm_runtime_get_noresume() in its | 303 | * or any other runtime PM helper function decrementing the usage |
304 | * remove routine. | 304 | * count, in its probe routine and pm_runtime_get_noresume() in |
305 | * its remove routine. | ||
305 | */ | 306 | */ |
306 | pm_runtime_get_sync(dev); | 307 | pm_runtime_get_sync(dev); |
307 | pci_dev->driver = pci_drv; | 308 | pci_dev->driver = pci_drv; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1957 | static void scsi_mq_done(struct scsi_cmnd *cmd) | 1957 | static void scsi_mq_done(struct scsi_cmnd *cmd) |
1958 | { | 1958 | { |
1959 | trace_scsi_dispatch_cmd_done(cmd); | 1959 | trace_scsi_dispatch_cmd_done(cmd); |
1960 | blk_mq_complete_request(cmd->request); | 1960 | blk_mq_complete_request(cmd->request, cmd->request->errors); |
1961 | } | 1961 | } |
1962 | 1962 | ||
1963 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, | 1963 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 7ff96270c933..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c | |||
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, | |||
144 | switch_on_temp = 0; | 144 | switch_on_temp = 0; |
145 | 145 | ||
146 | temperature_threshold = control_temp - switch_on_temp; | 146 | temperature_threshold = control_temp - switch_on_temp; |
147 | /* | ||
148 | * estimate_pid_constants() tries to find appropriate default | ||
149 | * values for thermal zones that don't provide them. If a | ||
150 | * system integrator has configured a thermal zone with two | ||
151 | * passive trip points at the same temperature, that person | ||
152 | * hasn't put any effort to set up the thermal zone properly | ||
153 | * so just give up. | ||
154 | */ | ||
155 | if (!temperature_threshold) | ||
156 | return; | ||
147 | 157 | ||
148 | if (!tz->tzp->k_po || force) | 158 | if (!tz->tzp->k_po || force) |
149 | tz->tzp->k_po = int_to_frac(sustainable_power) / | 159 | tz->tzp->k_po = int_to_frac(sustainable_power) / |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -817,8 +817,9 @@ config ITCO_WDT | |||
817 | tristate "Intel TCO Timer/Watchdog" | 817 | tristate "Intel TCO Timer/Watchdog" |
818 | depends on (X86 || IA64) && PCI | 818 | depends on (X86 || IA64) && PCI |
819 | select WATCHDOG_CORE | 819 | select WATCHDOG_CORE |
820 | depends on I2C || I2C=n | ||
820 | select LPC_ICH if !EXPERT | 821 | select LPC_ICH if !EXPERT |
821 | select I2C_I801 if !EXPERT | 822 | select I2C_I801 if !EXPERT && I2C |
822 | ---help--- | 823 | ---help--- |
823 | Hardware driver for the intel TCO timer based watchdog devices. | 824 | Hardware driver for the intel TCO timer based watchdog devices. |
824 | These drivers are included in the Intel 82801 I/O Controller | 825 | These drivers are included in the Intel 82801 I/O Controller |
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c | |||
@@ -36,6 +36,13 @@ | |||
36 | #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 | 36 | #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 |
37 | #define PM_RSTC_RESET 0x00000102 | 37 | #define PM_RSTC_RESET 0x00000102 |
38 | 38 | ||
39 | /* | ||
40 | * The Raspberry Pi firmware uses the RSTS register to know which partiton | ||
41 | * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. | ||
42 | * Partiton 63 is a special partition used by the firmware to indicate halt. | ||
43 | */ | ||
44 | #define PM_RSTS_RASPBERRYPI_HALT 0x555 | ||
45 | |||
39 | #define SECS_TO_WDOG_TICKS(x) ((x) << 16) | 46 | #define SECS_TO_WDOG_TICKS(x) ((x) << 16) |
40 | #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) | 47 | #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) |
41 | 48 | ||
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void) | |||
151 | * hard reset. | 158 | * hard reset. |
152 | */ | 159 | */ |
153 | val = readl_relaxed(wdt->base + PM_RSTS); | 160 | val = readl_relaxed(wdt->base + PM_RSTS); |
154 | val &= PM_RSTC_WRCFG_CLR; | 161 | val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; |
155 | val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; | ||
156 | writel_relaxed(val, wdt->base + PM_RSTS); | 162 | writel_relaxed(val, wdt->base + PM_RSTS); |
157 | 163 | ||
158 | /* Continue with normal reset mechanism */ | 164 | /* Continue with normal reset mechanism */ |
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c | |||
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { | |||
303 | }, | 303 | }, |
304 | {}, | 304 | {}, |
305 | }; | 305 | }; |
306 | MODULE_DEVICE_TABLE(of, gef_wdt_ids); | ||
306 | 307 | ||
307 | static struct platform_driver gef_wdt_driver = { | 308 | static struct platform_driver gef_wdt_driver = { |
308 | .driver = { | 309 | .driver = { |
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c | |||
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { | |||
253 | { .compatible = "men,a021-wdt" }, | 253 | { .compatible = "men,a021-wdt" }, |
254 | { }, | 254 | { }, |
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(of, a21_wdt_ids); | ||
256 | 257 | ||
257 | static struct platform_driver a21_wdt_driver = { | 258 | static struct platform_driver a21_wdt_driver = { |
258 | .probe = a21_wdt_probe, | 259 | .probe = a21_wdt_probe, |
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c | |||
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { | |||
168 | { .compatible = "moxa,moxart-watchdog" }, | 168 | { .compatible = "moxa,moxart-watchdog" }, |
169 | { }, | 169 | { }, |
170 | }; | 170 | }; |
171 | MODULE_DEVICE_TABLE(of, moxart_watchdog_match); | ||
171 | 172 | ||
172 | static struct platform_driver moxart_wdt_driver = { | 173 | static struct platform_driver moxart_wdt_driver = { |
173 | .probe = moxart_wdt_probe, | 174 | .probe = moxart_wdt_probe, |
@@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
569 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) | 569 | if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) |
570 | goto fallback; | 570 | goto fallback; |
571 | 571 | ||
572 | sector = bh.b_blocknr << (blkbits - 9); | ||
573 | |||
572 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { | 574 | if (buffer_unwritten(&bh) || buffer_new(&bh)) { |
573 | int i; | 575 | int i; |
576 | |||
577 | length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, | ||
578 | bh.b_size); | ||
579 | if (length < 0) { | ||
580 | result = VM_FAULT_SIGBUS; | ||
581 | goto out; | ||
582 | } | ||
583 | if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) | ||
584 | goto fallback; | ||
585 | |||
574 | for (i = 0; i < PTRS_PER_PMD; i++) | 586 | for (i = 0; i < PTRS_PER_PMD; i++) |
575 | clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); | 587 | clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); |
576 | wmb_pmem(); | 588 | wmb_pmem(); |
@@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
623 | result = VM_FAULT_NOPAGE; | 635 | result = VM_FAULT_NOPAGE; |
624 | spin_unlock(ptl); | 636 | spin_unlock(ptl); |
625 | } else { | 637 | } else { |
626 | sector = bh.b_blocknr << (blkbits - 9); | ||
627 | length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, | 638 | length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn, |
628 | bh.b_size); | 639 | bh.b_size); |
629 | if (length < 0) { | 640 | if (length < 0) { |
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c | |||
@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, | |||
652 | { | 652 | { |
653 | int err; | 653 | int err; |
654 | 654 | ||
655 | mutex_lock(&inode->i_mutex); | ||
656 | err = security_inode_init_security(inode, dentry, qstr, | 655 | err = security_inode_init_security(inode, dentry, qstr, |
657 | &init_xattrs, 0); | 656 | &init_xattrs, 0); |
658 | mutex_unlock(&inode->i_mutex); | ||
659 | |||
660 | if (err) { | 657 | if (err) { |
661 | struct ubifs_info *c = dentry->i_sb->s_fs_info; | 658 | struct ubifs_info *c = dentry->i_sb->s_fs_info; |
662 | ubifs_err(c, "cannot initialize security for inode %lu, error %d", | 659 | ubifs_err(c, "cannot initialize security for inode %lu, error %d", |
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h | |||
@@ -1,15 +1,10 @@ | |||
1 | #ifndef _ASM_WORD_AT_A_TIME_H | 1 | #ifndef _ASM_WORD_AT_A_TIME_H |
2 | #define _ASM_WORD_AT_A_TIME_H | 2 | #define _ASM_WORD_AT_A_TIME_H |
3 | 3 | ||
4 | /* | ||
5 | * This says "generic", but it's actually big-endian only. | ||
6 | * Little-endian can use more efficient versions of these | ||
7 | * interfaces, see for example | ||
8 | * arch/x86/include/asm/word-at-a-time.h | ||
9 | * for those. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <asm/byteorder.h> | ||
6 | |||
7 | #ifdef __BIG_ENDIAN | ||
13 | 8 | ||
14 | struct word_at_a_time { | 9 | struct word_at_a_time { |
15 | const unsigned long high_bits, low_bits; | 10 | const unsigned long high_bits, low_bits; |
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
53 | #define zero_bytemask(mask) (~1ul << __fls(mask)) | 48 | #define zero_bytemask(mask) (~1ul << __fls(mask)) |
54 | #endif | 49 | #endif |
55 | 50 | ||
51 | #else | ||
52 | |||
53 | /* | ||
54 | * The optimal byte mask counting is probably going to be something | ||
55 | * that is architecture-specific. If you have a reliably fast | ||
56 | * bit count instruction, that might be better than the multiply | ||
57 | * and shift, for example. | ||
58 | */ | ||
59 | struct word_at_a_time { | ||
60 | const unsigned long one_bits, high_bits; | ||
61 | }; | ||
62 | |||
63 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
64 | |||
65 | #ifdef CONFIG_64BIT | ||
66 | |||
67 | /* | ||
68 | * Jan Achrenius on G+: microoptimized version of | ||
69 | * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" | ||
70 | * that works for the bytemasks without having to | ||
71 | * mask them first. | ||
72 | */ | ||
73 | static inline long count_masked_bytes(unsigned long mask) | ||
74 | { | ||
75 | return mask*0x0001020304050608ul >> 56; | ||
76 | } | ||
77 | |||
78 | #else /* 32-bit case */ | ||
79 | |||
80 | /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ | ||
81 | static inline long count_masked_bytes(long mask) | ||
82 | { | ||
83 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
84 | long a = (0x0ff0001+mask) >> 23; | ||
85 | /* Fix the 1 for 00 case */ | ||
86 | return a & mask; | ||
87 | } | ||
88 | |||
89 | #endif | ||
90 | |||
91 | /* Return nonzero if it has a zero */ | ||
92 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
93 | { | ||
94 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
95 | *bits = mask; | ||
96 | return mask; | ||
97 | } | ||
98 | |||
99 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
100 | { | ||
101 | return bits; | ||
102 | } | ||
103 | |||
104 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
105 | { | ||
106 | bits = (bits - 1) & ~bits; | ||
107 | return bits >> 7; | ||
108 | } | ||
109 | |||
110 | /* The mask we created is directly usable as a bytemask */ | ||
111 | #define zero_bytemask(mask) (mask) | ||
112 | |||
113 | static inline unsigned long find_zero(unsigned long mask) | ||
114 | { | ||
115 | return count_masked_bytes(mask); | ||
116 | } | ||
117 | |||
118 | #endif /* __BIG_ENDIAN */ | ||
119 | |||
56 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 120 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); | |||
240 | 240 | ||
241 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); | 241 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); |
242 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); | 242 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); |
243 | extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); | ||
243 | 244 | ||
244 | #endif | 245 | #endif |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -568,6 +568,10 @@ | |||
568 | #define MODE_I2C_READ 4 | 568 | #define MODE_I2C_READ 4 |
569 | #define MODE_I2C_STOP 8 | 569 | #define MODE_I2C_STOP 8 |
570 | 570 | ||
571 | /* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ | ||
572 | #define DP_MST_PHYSICAL_PORT_0 0 | ||
573 | #define DP_MST_LOGICAL_PORT_0 8 | ||
574 | |||
571 | #define DP_LINK_STATUS_SIZE 6 | 575 | #define DP_LINK_STATUS_SIZE 6 |
572 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], | 576 | bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
573 | int lane_count); | 577 | int lane_count); |
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..0f408b002d98 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h | |||
@@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr; | |||
374 | struct drm_dp_mst_topology_cbs { | 374 | struct drm_dp_mst_topology_cbs { |
375 | /* create a connector for a port */ | 375 | /* create a connector for a port */ |
376 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); | 376 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
377 | void (*register_connector)(struct drm_connector *connector); | ||
377 | void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, | 378 | void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
378 | struct drm_connector *connector); | 379 | struct drm_connector *connector); |
379 | void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); | 380 | void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -217,6 +217,7 @@ struct pci_dev; | |||
217 | 217 | ||
218 | int acpi_pci_irq_enable (struct pci_dev *dev); | 218 | int acpi_pci_irq_enable (struct pci_dev *dev); |
219 | void acpi_penalize_isa_irq(int irq, int active); | 219 | void acpi_penalize_isa_irq(int irq, int active); |
220 | bool acpi_isa_irq_available(int irq); | ||
220 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); | 221 | void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
221 | void acpi_pci_irq_disable (struct pci_dev *dev); | 222 | void acpi_pci_irq_disable (struct pci_dev *dev); |
222 | 223 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -145,7 +145,6 @@ enum { | |||
145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | 145 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
146 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 146 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
147 | BLK_MQ_F_SG_MERGE = 1 << 2, | 147 | BLK_MQ_F_SG_MERGE = 1 << 2, |
148 | BLK_MQ_F_SYSFS_UP = 1 << 3, | ||
149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | 148 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
150 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | 149 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
151 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | 150 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | |||
215 | void blk_mq_cancel_requeue_work(struct request_queue *q); | 214 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
216 | void blk_mq_kick_requeue_list(struct request_queue *q); | 215 | void blk_mq_kick_requeue_list(struct request_queue *q); |
217 | void blk_mq_abort_requeue_list(struct request_queue *q); | 216 | void blk_mq_abort_requeue_list(struct request_queue *q); |
218 | void blk_mq_complete_request(struct request *rq); | 217 | void blk_mq_complete_request(struct request *rq, int error); |
219 | 218 | ||
220 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 219 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
221 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | 220 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); | |||
224 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | 223 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
225 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); | 224 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
226 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 225 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
227 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | ||
228 | void *priv); | ||
229 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, | 226 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
230 | void *priv); | 227 | void *priv); |
231 | void blk_mq_freeze_queue(struct request_queue *q); | 228 | void blk_mq_freeze_queue(struct request_queue *q); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -456,6 +456,8 @@ struct request_queue { | |||
456 | struct blk_mq_tag_set *tag_set; | 456 | struct blk_mq_tag_set *tag_set; |
457 | struct list_head tag_set_list; | 457 | struct list_head tag_set_list; |
458 | struct bio_set *bio_split; | 458 | struct bio_set *bio_split; |
459 | |||
460 | bool mq_sysfs_init_done; | ||
459 | }; | 461 | }; |
460 | 462 | ||
461 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 463 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | |||
68 | return iova >> iova_shift(iovad); | 68 | return iova >> iova_shift(iovad); |
69 | } | 69 | } |
70 | 70 | ||
71 | int iommu_iova_cache_init(void); | 71 | int iova_cache_get(void); |
72 | void iommu_iova_cache_destroy(void); | 72 | void iova_cache_put(void); |
73 | 73 | ||
74 | struct iova *alloc_iova_mem(void); | 74 | struct iova *alloc_iova_mem(void); |
75 | void free_iova_mem(struct iova *iova); | 75 | void free_iova_mem(struct iova *iova); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..6452ff4c463f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -242,7 +242,6 @@ struct mem_cgroup { | |||
242 | * percpu counter. | 242 | * percpu counter. |
243 | */ | 243 | */ |
244 | struct mem_cgroup_stat_cpu __percpu *stat; | 244 | struct mem_cgroup_stat_cpu __percpu *stat; |
245 | spinlock_t pcp_counter_lock; | ||
246 | 245 | ||
247 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) | 246 | #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) |
248 | struct cg_proto tcp_mem; | 247 | struct cg_proto tcp_mem; |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { | |||
402 | u8 rsvd[8]; | 402 | u8 rsvd[8]; |
403 | }; | 403 | }; |
404 | 404 | ||
405 | struct mlx5_cmd_query_special_contexts_mbox_in { | ||
406 | struct mlx5_inbox_hdr hdr; | ||
407 | u8 rsvd[8]; | ||
408 | }; | ||
409 | |||
410 | struct mlx5_cmd_query_special_contexts_mbox_out { | ||
411 | struct mlx5_outbox_hdr hdr; | ||
412 | __be32 dump_fill_mkey; | ||
413 | __be32 resd_lkey; | ||
414 | }; | ||
415 | |||
416 | struct mlx5_cmd_layout { | 405 | struct mlx5_cmd_layout { |
417 | u8 type; | 406 | u8 type; |
418 | u8 rsvd0[3]; | 407 | u8 rsvd0[3]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); | |||
845 | int mlx5_register_interface(struct mlx5_interface *intf); | 845 | int mlx5_register_interface(struct mlx5_interface *intf); |
846 | void mlx5_unregister_interface(struct mlx5_interface *intf); | 846 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); | 847 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
848 | int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); | ||
849 | 848 | ||
850 | struct mlx5_profile { | 849 | struct mlx5_profile { |
851 | u64 mask; | 850 | u64 mask; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
905 | #endif | 905 | #endif |
906 | } | 906 | } |
907 | 907 | ||
908 | #ifdef CONFIG_MEMCG | ||
909 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
910 | { | ||
911 | return page->mem_cgroup; | ||
912 | } | ||
913 | |||
914 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
915 | { | ||
916 | page->mem_cgroup = memcg; | ||
917 | } | ||
918 | #else | ||
919 | static inline struct mem_cgroup *page_memcg(struct page *page) | ||
920 | { | ||
921 | return NULL; | ||
922 | } | ||
923 | |||
924 | static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) | ||
925 | { | ||
926 | } | ||
927 | #endif | ||
928 | |||
908 | /* | 929 | /* |
909 | * Some inline functions in vmstat.h depend on page_zone() | 930 | * Some inline functions in vmstat.h depend on page_zone() |
910 | */ | 931 | */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, | |||
230 | struct rcu_synchronize *rs_array); | 230 | struct rcu_synchronize *rs_array); |
231 | 231 | ||
232 | #define _wait_rcu_gp(checktiny, ...) \ | 232 | #define _wait_rcu_gp(checktiny, ...) \ |
233 | do { \ | 233 | do { \ |
234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ | 234 | call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ |
235 | const int __n = ARRAY_SIZE(__crcu_array); \ | 235 | struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ |
236 | struct rcu_synchronize __rs_array[__n]; \ | 236 | __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ |
237 | \ | 237 | __crcu_array, __rs_array); \ |
238 | __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ | ||
239 | } while (0) | 238 | } while (0) |
240 | 239 | ||
241 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) | 240 | #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2b0a30a6e31c..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, | |||
2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 2708 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | 2709 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); |
2710 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | 2710 | else if (skb->ip_summed == CHECKSUM_PARTIAL && |
2711 | skb_checksum_start_offset(skb) <= len) | 2711 | skb_checksum_start_offset(skb) < 0) |
2712 | skb->ip_summed = CHECKSUM_NONE; | 2712 | skb->ip_summed = CHECKSUM_NONE; |
2713 | } | 2713 | } |
2714 | 2714 | ||
diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); | |||
25 | #ifndef __HAVE_ARCH_STRLCPY | 25 | #ifndef __HAVE_ARCH_STRLCPY |
26 | size_t strlcpy(char *, const char *, size_t); | 26 | size_t strlcpy(char *, const char *, size_t); |
27 | #endif | 27 | #endif |
28 | #ifndef __HAVE_ARCH_STRSCPY | ||
29 | ssize_t __must_check strscpy(char *, const char *, size_t); | ||
30 | #endif | ||
28 | #ifndef __HAVE_ARCH_STRCAT | 31 | #ifndef __HAVE_ARCH_STRCAT |
29 | extern char * strcat(char *, const char *); | 32 | extern char * strcat(char *, const char *); |
30 | #endif | 33 | #endif |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..cb1b9bbda332 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
@@ -63,7 +63,11 @@ struct unix_sock { | |||
63 | #define UNIX_GC_MAYBE_CYCLE 1 | 63 | #define UNIX_GC_MAYBE_CYCLE 1 |
64 | struct socket_wq peer_wq; | 64 | struct socket_wq peer_wq; |
65 | }; | 65 | }; |
66 | #define unix_sk(__sk) ((struct unix_sock *)__sk) | 66 | |
67 | static inline struct unix_sock *unix_sk(struct sock *sk) | ||
68 | { | ||
69 | return (struct unix_sock *)sk; | ||
70 | } | ||
67 | 71 | ||
68 | #define peer_wait peer_wq.wait | 72 | #define peer_wait peer_wq.wait |
69 | 73 | ||
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h | |||
@@ -11,8 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #include <linux/compiler.h> | ||
15 | |||
16 | #define UFFD_API ((__u64)0xAA) | 14 | #define UFFD_API ((__u64)0xAA) |
17 | /* | 15 | /* |
18 | * After implementing the respective features it will become: | 16 | * After implementing the respective features it will become: |
@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
137 | return retval; | 137 | return retval; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* ipc_addid() locks msq upon success. */ | ||
141 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | ||
142 | if (id < 0) { | ||
143 | ipc_rcu_putref(msq, msg_rcu_free); | ||
144 | return id; | ||
145 | } | ||
146 | |||
147 | msq->q_stime = msq->q_rtime = 0; | 140 | msq->q_stime = msq->q_rtime = 0; |
148 | msq->q_ctime = get_seconds(); | 141 | msq->q_ctime = get_seconds(); |
149 | msq->q_cbytes = msq->q_qnum = 0; | 142 | msq->q_cbytes = msq->q_qnum = 0; |
@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
153 | INIT_LIST_HEAD(&msq->q_receivers); | 146 | INIT_LIST_HEAD(&msq->q_receivers); |
154 | INIT_LIST_HEAD(&msq->q_senders); | 147 | INIT_LIST_HEAD(&msq->q_senders); |
155 | 148 | ||
149 | /* ipc_addid() locks msq upon success. */ | ||
150 | id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); | ||
151 | if (id < 0) { | ||
152 | ipc_rcu_putref(msq, msg_rcu_free); | ||
153 | return id; | ||
154 | } | ||
155 | |||
156 | ipc_unlock_object(&msq->q_perm); | 156 | ipc_unlock_object(&msq->q_perm); |
157 | rcu_read_unlock(); | 157 | rcu_read_unlock(); |
158 | 158 | ||
@@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
551 | if (IS_ERR(file)) | 551 | if (IS_ERR(file)) |
552 | goto no_file; | 552 | goto no_file; |
553 | 553 | ||
554 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); | ||
555 | if (id < 0) { | ||
556 | error = id; | ||
557 | goto no_id; | ||
558 | } | ||
559 | |||
560 | shp->shm_cprid = task_tgid_vnr(current); | 554 | shp->shm_cprid = task_tgid_vnr(current); |
561 | shp->shm_lprid = 0; | 555 | shp->shm_lprid = 0; |
562 | shp->shm_atim = shp->shm_dtim = 0; | 556 | shp->shm_atim = shp->shm_dtim = 0; |
@@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
565 | shp->shm_nattch = 0; | 559 | shp->shm_nattch = 0; |
566 | shp->shm_file = file; | 560 | shp->shm_file = file; |
567 | shp->shm_creator = current; | 561 | shp->shm_creator = current; |
562 | |||
563 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); | ||
564 | if (id < 0) { | ||
565 | error = id; | ||
566 | goto no_id; | ||
567 | } | ||
568 | |||
568 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); | 569 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); |
569 | 570 | ||
570 | /* | 571 | /* |
diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) | |||
237 | rcu_read_lock(); | 237 | rcu_read_lock(); |
238 | spin_lock(&new->lock); | 238 | spin_lock(&new->lock); |
239 | 239 | ||
240 | current_euid_egid(&euid, &egid); | ||
241 | new->cuid = new->uid = euid; | ||
242 | new->gid = new->cgid = egid; | ||
243 | |||
240 | id = idr_alloc(&ids->ipcs_idr, new, | 244 | id = idr_alloc(&ids->ipcs_idr, new, |
241 | (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, | 245 | (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, |
242 | GFP_NOWAIT); | 246 | GFP_NOWAIT); |
@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) | |||
249 | 253 | ||
250 | ids->in_use++; | 254 | ids->in_use++; |
251 | 255 | ||
252 | current_euid_egid(&euid, &egid); | ||
253 | new->cuid = new->uid = euid; | ||
254 | new->gid = new->cgid = egid; | ||
255 | |||
256 | if (next_id < 0) { | 256 | if (next_id < 0) { |
257 | new->seq = ids->seq++; | 257 | new->seq = ids->seq++; |
258 | if (ids->seq > IPCID_SEQ_MAX) | 258 | if (ids->seq > IPCID_SEQ_MAX) |
diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) | |||
1243 | PERF_EVENT_STATE_INACTIVE; | 1243 | PERF_EVENT_STATE_INACTIVE; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* | 1246 | static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
1247 | * Called at perf_event creation and when events are attached/detached from a | ||
1248 | * group. | ||
1249 | */ | ||
1250 | static void perf_event__read_size(struct perf_event *event) | ||
1251 | { | 1247 | { |
1252 | int entry = sizeof(u64); /* value */ | 1248 | int entry = sizeof(u64); /* value */ |
1253 | int size = 0; | 1249 | int size = 0; |
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) | |||
1263 | entry += sizeof(u64); | 1259 | entry += sizeof(u64); |
1264 | 1260 | ||
1265 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | 1261 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
1266 | nr += event->group_leader->nr_siblings; | 1262 | nr += nr_siblings; |
1267 | size += sizeof(u64); | 1263 | size += sizeof(u64); |
1268 | } | 1264 | } |
1269 | 1265 | ||
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) | |||
1271 | event->read_size = size; | 1267 | event->read_size = size; |
1272 | } | 1268 | } |
1273 | 1269 | ||
1274 | static void perf_event__header_size(struct perf_event *event) | 1270 | static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
1275 | { | 1271 | { |
1276 | struct perf_sample_data *data; | 1272 | struct perf_sample_data *data; |
1277 | u64 sample_type = event->attr.sample_type; | ||
1278 | u16 size = 0; | 1273 | u16 size = 0; |
1279 | 1274 | ||
1280 | perf_event__read_size(event); | ||
1281 | |||
1282 | if (sample_type & PERF_SAMPLE_IP) | 1275 | if (sample_type & PERF_SAMPLE_IP) |
1283 | size += sizeof(data->ip); | 1276 | size += sizeof(data->ip); |
1284 | 1277 | ||
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) | |||
1303 | event->header_size = size; | 1296 | event->header_size = size; |
1304 | } | 1297 | } |
1305 | 1298 | ||
1299 | /* | ||
1300 | * Called at perf_event creation and when events are attached/detached from a | ||
1301 | * group. | ||
1302 | */ | ||
1303 | static void perf_event__header_size(struct perf_event *event) | ||
1304 | { | ||
1305 | __perf_event_read_size(event, | ||
1306 | event->group_leader->nr_siblings); | ||
1307 | __perf_event_header_size(event, event->attr.sample_type); | ||
1308 | } | ||
1309 | |||
1306 | static void perf_event__id_header_size(struct perf_event *event) | 1310 | static void perf_event__id_header_size(struct perf_event *event) |
1307 | { | 1311 | { |
1308 | struct perf_sample_data *data; | 1312 | struct perf_sample_data *data; |
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) | |||
1330 | event->id_header_size = size; | 1334 | event->id_header_size = size; |
1331 | } | 1335 | } |
1332 | 1336 | ||
1337 | static bool perf_event_validate_size(struct perf_event *event) | ||
1338 | { | ||
1339 | /* | ||
1340 | * The values computed here will be over-written when we actually | ||
1341 | * attach the event. | ||
1342 | */ | ||
1343 | __perf_event_read_size(event, event->group_leader->nr_siblings + 1); | ||
1344 | __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); | ||
1345 | perf_event__id_header_size(event); | ||
1346 | |||
1347 | /* | ||
1348 | * Sum the lot; should not exceed the 64k limit we have on records. | ||
1349 | * Conservative limit to allow for callchains and other variable fields. | ||
1350 | */ | ||
1351 | if (event->read_size + event->header_size + | ||
1352 | event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) | ||
1353 | return false; | ||
1354 | |||
1355 | return true; | ||
1356 | } | ||
1357 | |||
1333 | static void perf_group_attach(struct perf_event *event) | 1358 | static void perf_group_attach(struct perf_event *event) |
1334 | { | 1359 | { |
1335 | struct perf_event *group_leader = event->group_leader, *pos; | 1360 | struct perf_event *group_leader = event->group_leader, *pos; |
@@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8297 | 8322 | ||
8298 | if (move_group) { | 8323 | if (move_group) { |
8299 | gctx = group_leader->ctx; | 8324 | gctx = group_leader->ctx; |
8325 | mutex_lock_double(&gctx->mutex, &ctx->mutex); | ||
8326 | } else { | ||
8327 | mutex_lock(&ctx->mutex); | ||
8328 | } | ||
8300 | 8329 | ||
8330 | if (!perf_event_validate_size(event)) { | ||
8331 | err = -E2BIG; | ||
8332 | goto err_locked; | ||
8333 | } | ||
8334 | |||
8335 | /* | ||
8336 | * Must be under the same ctx::mutex as perf_install_in_context(), | ||
8337 | * because we need to serialize with concurrent event creation. | ||
8338 | */ | ||
8339 | if (!exclusive_event_installable(event, ctx)) { | ||
8340 | /* exclusive and group stuff are assumed mutually exclusive */ | ||
8341 | WARN_ON_ONCE(move_group); | ||
8342 | |||
8343 | err = -EBUSY; | ||
8344 | goto err_locked; | ||
8345 | } | ||
8346 | |||
8347 | WARN_ON_ONCE(ctx->parent_ctx); | ||
8348 | |||
8349 | if (move_group) { | ||
8301 | /* | 8350 | /* |
8302 | * See perf_event_ctx_lock() for comments on the details | 8351 | * See perf_event_ctx_lock() for comments on the details |
8303 | * of swizzling perf_event::ctx. | 8352 | * of swizzling perf_event::ctx. |
8304 | */ | 8353 | */ |
8305 | mutex_lock_double(&gctx->mutex, &ctx->mutex); | ||
8306 | |||
8307 | perf_remove_from_context(group_leader, false); | 8354 | perf_remove_from_context(group_leader, false); |
8308 | 8355 | ||
8309 | list_for_each_entry(sibling, &group_leader->sibling_list, | 8356 | list_for_each_entry(sibling, &group_leader->sibling_list, |
@@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8311 | perf_remove_from_context(sibling, false); | 8358 | perf_remove_from_context(sibling, false); |
8312 | put_ctx(gctx); | 8359 | put_ctx(gctx); |
8313 | } | 8360 | } |
8314 | } else { | ||
8315 | mutex_lock(&ctx->mutex); | ||
8316 | } | ||
8317 | 8361 | ||
8318 | WARN_ON_ONCE(ctx->parent_ctx); | ||
8319 | |||
8320 | if (move_group) { | ||
8321 | /* | 8362 | /* |
8322 | * Wait for everybody to stop referencing the events through | 8363 | * Wait for everybody to stop referencing the events through |
8323 | * the old lists, before installing it on new lists. | 8364 | * the old lists, before installing it on new lists. |
@@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8349 | perf_event__state_init(group_leader); | 8390 | perf_event__state_init(group_leader); |
8350 | perf_install_in_context(ctx, group_leader, group_leader->cpu); | 8391 | perf_install_in_context(ctx, group_leader, group_leader->cpu); |
8351 | get_ctx(ctx); | 8392 | get_ctx(ctx); |
8352 | } | ||
8353 | 8393 | ||
8354 | if (!exclusive_event_installable(event, ctx)) { | 8394 | /* |
8355 | err = -EBUSY; | 8395 | * Now that all events are installed in @ctx, nothing |
8356 | mutex_unlock(&ctx->mutex); | 8396 | * references @gctx anymore, so drop the last reference we have |
8357 | fput(event_file); | 8397 | * on it. |
8358 | goto err_context; | 8398 | */ |
8399 | put_ctx(gctx); | ||
8359 | } | 8400 | } |
8360 | 8401 | ||
8402 | /* | ||
8403 | * Precalculate sample_data sizes; do while holding ctx::mutex such | ||
8404 | * that we're serialized against further additions and before | ||
8405 | * perf_install_in_context() which is the point the event is active and | ||
8406 | * can use these values. | ||
8407 | */ | ||
8408 | perf_event__header_size(event); | ||
8409 | perf_event__id_header_size(event); | ||
8410 | |||
8361 | perf_install_in_context(ctx, event, event->cpu); | 8411 | perf_install_in_context(ctx, event, event->cpu); |
8362 | perf_unpin_context(ctx); | 8412 | perf_unpin_context(ctx); |
8363 | 8413 | ||
8364 | if (move_group) { | 8414 | if (move_group) |
8365 | mutex_unlock(&gctx->mutex); | 8415 | mutex_unlock(&gctx->mutex); |
8366 | put_ctx(gctx); | ||
8367 | } | ||
8368 | mutex_unlock(&ctx->mutex); | 8416 | mutex_unlock(&ctx->mutex); |
8369 | 8417 | ||
8370 | put_online_cpus(); | 8418 | put_online_cpus(); |
@@ -8376,12 +8424,6 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8376 | mutex_unlock(¤t->perf_event_mutex); | 8424 | mutex_unlock(¤t->perf_event_mutex); |
8377 | 8425 | ||
8378 | /* | 8426 | /* |
8379 | * Precalculate sample_data sizes | ||
8380 | */ | ||
8381 | perf_event__header_size(event); | ||
8382 | perf_event__id_header_size(event); | ||
8383 | |||
8384 | /* | ||
8385 | * Drop the reference on the group_event after placing the | 8427 | * Drop the reference on the group_event after placing the |
8386 | * new event on the sibling_list. This ensures destruction | 8428 | * new event on the sibling_list. This ensures destruction |
8387 | * of the group leader will find the pointer to itself in | 8429 | * of the group leader will find the pointer to itself in |
@@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8391 | fd_install(event_fd, event_file); | 8433 | fd_install(event_fd, event_file); |
8392 | return event_fd; | 8434 | return event_fd; |
8393 | 8435 | ||
8436 | err_locked: | ||
8437 | if (move_group) | ||
8438 | mutex_unlock(&gctx->mutex); | ||
8439 | mutex_unlock(&ctx->mutex); | ||
8440 | /* err_file: */ | ||
8441 | fput(event_file); | ||
8394 | err_context: | 8442 | err_context: |
8395 | perf_unpin_context(ctx); | 8443 | perf_unpin_context(ctx); |
8396 | put_ctx(ctx); | 8444 | put_ctx(ctx); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index e3a8c9577ba6..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/mutex.h> | ||
15 | 16 | ||
16 | #include "internals.h" | 17 | #include "internals.h" |
17 | 18 | ||
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
323 | 324 | ||
324 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 325 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
325 | { | 326 | { |
327 | static DEFINE_MUTEX(register_lock); | ||
326 | char name [MAX_NAMELEN]; | 328 | char name [MAX_NAMELEN]; |
327 | 329 | ||
328 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) | 330 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) |
329 | return; | 331 | return; |
330 | 332 | ||
333 | /* | ||
334 | * irq directories are registered only when a handler is | ||
335 | * added, not when the descriptor is created, so multiple | ||
336 | * tasks might try to register at the same time. | ||
337 | */ | ||
338 | mutex_lock(®ister_lock); | ||
339 | |||
340 | if (desc->dir) | ||
341 | goto out_unlock; | ||
342 | |||
331 | memset(name, 0, MAX_NAMELEN); | 343 | memset(name, 0, MAX_NAMELEN); |
332 | sprintf(name, "%d", irq); | 344 | sprintf(name, "%d", irq); |
333 | 345 | ||
334 | /* create /proc/irq/1234 */ | 346 | /* create /proc/irq/1234 */ |
335 | desc->dir = proc_mkdir(name, root_irq_dir); | 347 | desc->dir = proc_mkdir(name, root_irq_dir); |
336 | if (!desc->dir) | 348 | if (!desc->dir) |
337 | return; | 349 | goto out_unlock; |
338 | 350 | ||
339 | #ifdef CONFIG_SMP | 351 | #ifdef CONFIG_SMP |
340 | /* create /proc/irq/<irq>/smp_affinity */ | 352 | /* create /proc/irq/<irq>/smp_affinity */ |
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
355 | 367 | ||
356 | proc_create_data("spurious", 0444, desc->dir, | 368 | proc_create_data("spurious", 0444, desc->dir, |
357 | &irq_spurious_proc_fops, (void *)(long)irq); | 369 | &irq_spurious_proc_fops, (void *)(long)irq); |
370 | |||
371 | out_unlock: | ||
372 | mutex_unlock(®ister_lock); | ||
358 | } | 373 | } |
359 | 374 | ||
360 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) | 375 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); | |||
3068 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 3068 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
3069 | int trylock, int read, int check, int hardirqs_off, | 3069 | int trylock, int read, int check, int hardirqs_off, |
3070 | struct lockdep_map *nest_lock, unsigned long ip, | 3070 | struct lockdep_map *nest_lock, unsigned long ip, |
3071 | int references) | 3071 | int references, int pin_count) |
3072 | { | 3072 | { |
3073 | struct task_struct *curr = current; | 3073 | struct task_struct *curr = current; |
3074 | struct lock_class *class = NULL; | 3074 | struct lock_class *class = NULL; |
@@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3157 | hlock->waittime_stamp = 0; | 3157 | hlock->waittime_stamp = 0; |
3158 | hlock->holdtime_stamp = lockstat_clock(); | 3158 | hlock->holdtime_stamp = lockstat_clock(); |
3159 | #endif | 3159 | #endif |
3160 | hlock->pin_count = 0; | 3160 | hlock->pin_count = pin_count; |
3161 | 3161 | ||
3162 | if (check && !mark_irqflags(curr, hlock)) | 3162 | if (check && !mark_irqflags(curr, hlock)) |
3163 | return 0; | 3163 | return 0; |
@@ -3343,7 +3343,7 @@ found_it: | |||
3343 | hlock_class(hlock)->subclass, hlock->trylock, | 3343 | hlock_class(hlock)->subclass, hlock->trylock, |
3344 | hlock->read, hlock->check, hlock->hardirqs_off, | 3344 | hlock->read, hlock->check, hlock->hardirqs_off, |
3345 | hlock->nest_lock, hlock->acquire_ip, | 3345 | hlock->nest_lock, hlock->acquire_ip, |
3346 | hlock->references)) | 3346 | hlock->references, hlock->pin_count)) |
3347 | return 0; | 3347 | return 0; |
3348 | } | 3348 | } |
3349 | 3349 | ||
@@ -3433,7 +3433,7 @@ found_it: | |||
3433 | hlock_class(hlock)->subclass, hlock->trylock, | 3433 | hlock_class(hlock)->subclass, hlock->trylock, |
3434 | hlock->read, hlock->check, hlock->hardirqs_off, | 3434 | hlock->read, hlock->check, hlock->hardirqs_off, |
3435 | hlock->nest_lock, hlock->acquire_ip, | 3435 | hlock->nest_lock, hlock->acquire_ip, |
3436 | hlock->references)) | 3436 | hlock->references, hlock->pin_count)) |
3437 | return 0; | 3437 | return 0; |
3438 | } | 3438 | } |
3439 | 3439 | ||
@@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3583 | current->lockdep_recursion = 1; | 3583 | current->lockdep_recursion = 1; |
3584 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | 3584 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); |
3585 | __lock_acquire(lock, subclass, trylock, read, check, | 3585 | __lock_acquire(lock, subclass, trylock, read, check, |
3586 | irqs_disabled_flags(flags), nest_lock, ip, 0); | 3586 | irqs_disabled_flags(flags), nest_lock, ip, 0, 0); |
3587 | current->lockdep_recursion = 0; | 3587 | current->lockdep_recursion = 0; |
3588 | raw_local_irq_restore(flags); | 3588 | raw_local_irq_restore(flags); |
3589 | } | 3589 | } |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) | |||
3868 | static void __init | 3868 | static void __init |
3869 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | 3869 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
3870 | { | 3870 | { |
3871 | static struct lock_class_key rcu_exp_sched_rdp_class; | ||
3871 | unsigned long flags; | 3872 | unsigned long flags; |
3872 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 3873 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
3873 | struct rcu_node *rnp = rcu_get_root(rsp); | 3874 | struct rcu_node *rnp = rcu_get_root(rsp); |
@@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3883 | mutex_init(&rdp->exp_funnel_mutex); | 3884 | mutex_init(&rdp->exp_funnel_mutex); |
3884 | rcu_boot_init_nocb_percpu_data(rdp); | 3885 | rcu_boot_init_nocb_percpu_data(rdp); |
3885 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 3886 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
3887 | if (rsp == &rcu_sched_state) | ||
3888 | lockdep_set_class_and_name(&rdp->exp_funnel_mutex, | ||
3889 | &rcu_exp_sched_rdp_class, | ||
3890 | "rcu_data_exp_sched"); | ||
3886 | } | 3891 | } |
3887 | 3892 | ||
3888 | /* | 3893 | /* |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2f9c92884817..615953141951 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4934,7 +4934,15 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4934 | idle->state = TASK_RUNNING; | 4934 | idle->state = TASK_RUNNING; |
4935 | idle->se.exec_start = sched_clock(); | 4935 | idle->se.exec_start = sched_clock(); |
4936 | 4936 | ||
4937 | do_set_cpus_allowed(idle, cpumask_of(cpu)); | 4937 | #ifdef CONFIG_SMP |
4938 | /* | ||
4939 | * Its possible that init_idle() gets called multiple times on a task, | ||
4940 | * in that case do_set_cpus_allowed() will not do the right thing. | ||
4941 | * | ||
4942 | * And since this is boot we can forgo the serialization. | ||
4943 | */ | ||
4944 | set_cpus_allowed_common(idle, cpumask_of(cpu)); | ||
4945 | #endif | ||
4938 | /* | 4946 | /* |
4939 | * We're having a chicken and egg problem, even though we are | 4947 | * We're having a chicken and egg problem, even though we are |
4940 | * holding rq->lock, the cpu isn't yet set to this cpu so the | 4948 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
@@ -4951,7 +4959,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4951 | 4959 | ||
4952 | rq->curr = rq->idle = idle; | 4960 | rq->curr = rq->idle = idle; |
4953 | idle->on_rq = TASK_ON_RQ_QUEUED; | 4961 | idle->on_rq = TASK_ON_RQ_QUEUED; |
4954 | #if defined(CONFIG_SMP) | 4962 | #ifdef CONFIG_SMP |
4955 | idle->on_cpu = 1; | 4963 | idle->on_cpu = 1; |
4956 | #endif | 4964 | #endif |
4957 | raw_spin_unlock(&rq->lock); | 4965 | raw_spin_unlock(&rq->lock); |
@@ -4966,7 +4974,7 @@ void init_idle(struct task_struct *idle, int cpu) | |||
4966 | idle->sched_class = &idle_sched_class; | 4974 | idle->sched_class = &idle_sched_class; |
4967 | ftrace_graph_init_idle_task(idle, cpu); | 4975 | ftrace_graph_init_idle_task(idle, cpu); |
4968 | vtime_init_idle(idle, cpu); | 4976 | vtime_init_idle(idle, cpu); |
4969 | #if defined(CONFIG_SMP) | 4977 | #ifdef CONFIG_SMP |
4970 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); | 4978 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
4971 | #endif | 4979 | #endif |
4972 | } | 4980 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) | |||
217 | continue; | 217 | continue; |
218 | 218 | ||
219 | /* Check the deviation from the watchdog clocksource. */ | 219 | /* Check the deviation from the watchdog clocksource. */ |
220 | if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { | 220 | if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
221 | pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", | 221 | pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", |
222 | cs->name); | 222 | cs->name); |
223 | pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", | 223 | pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", |
diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..8dbb7b1eab50 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -27,6 +27,10 @@ | |||
27 | #include <linux/bug.h> | 27 | #include <linux/bug.h> |
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | 29 | ||
30 | #include <asm/byteorder.h> | ||
31 | #include <asm/word-at-a-time.h> | ||
32 | #include <asm/page.h> | ||
33 | |||
30 | #ifndef __HAVE_ARCH_STRNCASECMP | 34 | #ifndef __HAVE_ARCH_STRNCASECMP |
31 | /** | 35 | /** |
32 | * strncasecmp - Case insensitive, length-limited string comparison | 36 | * strncasecmp - Case insensitive, length-limited string comparison |
@@ -146,6 +150,90 @@ size_t strlcpy(char *dest, const char *src, size_t size) | |||
146 | EXPORT_SYMBOL(strlcpy); | 150 | EXPORT_SYMBOL(strlcpy); |
147 | #endif | 151 | #endif |
148 | 152 | ||
153 | #ifndef __HAVE_ARCH_STRSCPY | ||
154 | /** | ||
155 | * strscpy - Copy a C-string into a sized buffer | ||
156 | * @dest: Where to copy the string to | ||
157 | * @src: Where to copy the string from | ||
158 | * @count: Size of destination buffer | ||
159 | * | ||
160 | * Copy the string, or as much of it as fits, into the dest buffer. | ||
161 | * The routine returns the number of characters copied (not including | ||
162 | * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. | ||
163 | * The behavior is undefined if the string buffers overlap. | ||
164 | * The destination buffer is always NUL terminated, unless it's zero-sized. | ||
165 | * | ||
166 | * Preferred to strlcpy() since the API doesn't require reading memory | ||
167 | * from the src string beyond the specified "count" bytes, and since | ||
168 | * the return value is easier to error-check than strlcpy()'s. | ||
169 | * In addition, the implementation is robust to the string changing out | ||
170 | * from underneath it, unlike the current strlcpy() implementation. | ||
171 | * | ||
172 | * Preferred to strncpy() since it always returns a valid string, and | ||
173 | * doesn't unnecessarily force the tail of the destination buffer to be | ||
174 | * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() | ||
175 | * with an overflow test, then just memset() the tail of the dest buffer. | ||
176 | */ | ||
177 | ssize_t strscpy(char *dest, const char *src, size_t count) | ||
178 | { | ||
179 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; | ||
180 | size_t max = count; | ||
181 | long res = 0; | ||
182 | |||
183 | if (count == 0) | ||
184 | return -E2BIG; | ||
185 | |||
186 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
187 | /* | ||
188 | * If src is unaligned, don't cross a page boundary, | ||
189 | * since we don't know if the next page is mapped. | ||
190 | */ | ||
191 | if ((long)src & (sizeof(long) - 1)) { | ||
192 | size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); | ||
193 | if (limit < max) | ||
194 | max = limit; | ||
195 | } | ||
196 | #else | ||
197 | /* If src or dest is unaligned, don't do word-at-a-time. */ | ||
198 | if (((long) dest | (long) src) & (sizeof(long) - 1)) | ||
199 | max = 0; | ||
200 | #endif | ||
201 | |||
202 | while (max >= sizeof(unsigned long)) { | ||
203 | unsigned long c, data; | ||
204 | |||
205 | c = *(unsigned long *)(src+res); | ||
206 | *(unsigned long *)(dest+res) = c; | ||
207 | if (has_zero(c, &data, &constants)) { | ||
208 | data = prep_zero_mask(c, data, &constants); | ||
209 | data = create_zero_mask(data); | ||
210 | return res + find_zero(data); | ||
211 | } | ||
212 | res += sizeof(unsigned long); | ||
213 | count -= sizeof(unsigned long); | ||
214 | max -= sizeof(unsigned long); | ||
215 | } | ||
216 | |||
217 | while (count) { | ||
218 | char c; | ||
219 | |||
220 | c = src[res]; | ||
221 | dest[res] = c; | ||
222 | if (!c) | ||
223 | return res; | ||
224 | res++; | ||
225 | count--; | ||
226 | } | ||
227 | |||
228 | /* Hit buffer length without finding a NUL; force NUL-termination. */ | ||
229 | if (res) | ||
230 | dest[res-1] = '\0'; | ||
231 | |||
232 | return -E2BIG; | ||
233 | } | ||
234 | EXPORT_SYMBOL(strscpy); | ||
235 | #endif | ||
236 | |||
149 | #ifndef __HAVE_ARCH_STRCAT | 237 | #ifndef __HAVE_ARCH_STRCAT |
150 | /** | 238 | /** |
151 | * strcat - Append one %NUL-terminated string to another | 239 | * strcat - Append one %NUL-terminated string to another |
diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) | |||
394 | list_for_each_entry(page, &pool->page_list, page_list) { | 394 | list_for_each_entry(page, &pool->page_list, page_list) { |
395 | if (dma < page->dma) | 395 | if (dma < page->dma) |
396 | continue; | 396 | continue; |
397 | if (dma < (page->dma + pool->allocation)) | 397 | if ((dma - page->dma) < pool->allocation) |
398 | return page; | 398 | return page; |
399 | } | 399 | } |
400 | return NULL; | 400 | return NULL; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3202,6 +3202,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3202 | continue; | 3202 | continue; |
3203 | 3203 | ||
3204 | /* | 3204 | /* |
3205 | * Shared VMAs have their own reserves and do not affect | ||
3206 | * MAP_PRIVATE accounting but it is possible that a shared | ||
3207 | * VMA is using the same page so check and skip such VMAs. | ||
3208 | */ | ||
3209 | if (iter_vma->vm_flags & VM_MAYSHARE) | ||
3210 | continue; | ||
3211 | |||
3212 | /* | ||
3205 | * Unmap the page from other VMAs without their own reserves. | 3213 | * Unmap the page from other VMAs without their own reserves. |
3206 | * They get marked to be SIGKILLed if they fault in these | 3214 | * They get marked to be SIGKILLed if they fault in these |
3207 | * areas. This is because a future no-page fault on this VMA | 3215 | * areas. This is because a future no-page fault on this VMA |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..1fedbde68f59 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
644 | } | 644 | } |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * Return page count for single (non recursive) @memcg. | ||
648 | * | ||
647 | * Implementation Note: reading percpu statistics for memcg. | 649 | * Implementation Note: reading percpu statistics for memcg. |
648 | * | 650 | * |
649 | * Both of vmstat[] and percpu_counter has threshold and do periodic | 651 | * Both of vmstat[] and percpu_counter has threshold and do periodic |
650 | * synchronization to implement "quick" read. There are trade-off between | 652 | * synchronization to implement "quick" read. There are trade-off between |
651 | * reading cost and precision of value. Then, we may have a chance to implement | 653 | * reading cost and precision of value. Then, we may have a chance to implement |
652 | * a periodic synchronizion of counter in memcg's counter. | 654 | * a periodic synchronization of counter in memcg's counter. |
653 | * | 655 | * |
654 | * But this _read() function is used for user interface now. The user accounts | 656 | * But this _read() function is used for user interface now. The user accounts |
655 | * memory usage by memory cgroup and he _always_ requires exact value because | 657 | * memory usage by memory cgroup and he _always_ requires exact value because |
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
659 | * | 661 | * |
660 | * If there are kernel internal actions which can make use of some not-exact | 662 | * If there are kernel internal actions which can make use of some not-exact |
661 | * value, and reading all cpu value can be performance bottleneck in some | 663 | * value, and reading all cpu value can be performance bottleneck in some |
662 | * common workload, threashold and synchonization as vmstat[] should be | 664 | * common workload, threshold and synchronization as vmstat[] should be |
663 | * implemented. | 665 | * implemented. |
664 | */ | 666 | */ |
665 | static long mem_cgroup_read_stat(struct mem_cgroup *memcg, | 667 | static unsigned long |
666 | enum mem_cgroup_stat_index idx) | 668 | mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) |
667 | { | 669 | { |
668 | long val = 0; | 670 | long val = 0; |
669 | int cpu; | 671 | int cpu; |
670 | 672 | ||
673 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
671 | for_each_possible_cpu(cpu) | 674 | for_each_possible_cpu(cpu) |
672 | val += per_cpu(memcg->stat->count[idx], cpu); | 675 | val += per_cpu(memcg->stat->count[idx], cpu); |
676 | /* | ||
677 | * Summing races with updates, so val may be negative. Avoid exposing | ||
678 | * transient negative values. | ||
679 | */ | ||
680 | if (val < 0) | ||
681 | val = 0; | ||
673 | return val; | 682 | return val; |
674 | } | 683 | } |
675 | 684 | ||
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1254 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 1263 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
1255 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 1264 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
1256 | continue; | 1265 | continue; |
1257 | pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], | 1266 | pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], |
1258 | K(mem_cgroup_read_stat(iter, i))); | 1267 | K(mem_cgroup_read_stat(iter, i))); |
1259 | } | 1268 | } |
1260 | 1269 | ||
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, | |||
2819 | enum mem_cgroup_stat_index idx) | 2828 | enum mem_cgroup_stat_index idx) |
2820 | { | 2829 | { |
2821 | struct mem_cgroup *iter; | 2830 | struct mem_cgroup *iter; |
2822 | long val = 0; | 2831 | unsigned long val = 0; |
2823 | 2832 | ||
2824 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
2825 | for_each_mem_cgroup_tree(iter, memcg) | 2833 | for_each_mem_cgroup_tree(iter, memcg) |
2826 | val += mem_cgroup_read_stat(iter, idx); | 2834 | val += mem_cgroup_read_stat(iter, idx); |
2827 | 2835 | ||
2828 | if (val < 0) /* race ? */ | ||
2829 | val = 0; | ||
2830 | return val; | 2836 | return val; |
2831 | } | 2837 | } |
2832 | 2838 | ||
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
3169 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 3175 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
3170 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 3176 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
3171 | continue; | 3177 | continue; |
3172 | seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], | 3178 | seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], |
3173 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); | 3179 | mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); |
3174 | } | 3180 | } |
3175 | 3181 | ||
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
3194 | (u64)memsw * PAGE_SIZE); | 3200 | (u64)memsw * PAGE_SIZE); |
3195 | 3201 | ||
3196 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 3202 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
3197 | long long val = 0; | 3203 | unsigned long long val = 0; |
3198 | 3204 | ||
3199 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) | 3205 | if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) |
3200 | continue; | 3206 | continue; |
3201 | for_each_mem_cgroup_tree(mi, memcg) | 3207 | for_each_mem_cgroup_tree(mi, memcg) |
3202 | val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; | 3208 | val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; |
3203 | seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); | 3209 | seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); |
3204 | } | 3210 | } |
3205 | 3211 | ||
3206 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { | 3212 | for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { |
@@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
4179 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) | 4185 | if (memcg_wb_domain_init(memcg, GFP_KERNEL)) |
4180 | goto out_free_stat; | 4186 | goto out_free_stat; |
4181 | 4187 | ||
4182 | spin_lock_init(&memcg->pcp_counter_lock); | ||
4183 | return memcg; | 4188 | return memcg; |
4184 | 4189 | ||
4185 | out_free_stat: | 4190 | out_free_stat: |
diff --git a/mm/migrate.c b/mm/migrate.c index 7452a00bbb50..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
740 | if (PageSwapBacked(page)) | 740 | if (PageSwapBacked(page)) |
741 | SetPageSwapBacked(newpage); | 741 | SetPageSwapBacked(newpage); |
742 | 742 | ||
743 | /* | ||
744 | * Indirectly called below, migrate_page_copy() copies PG_dirty and thus | ||
745 | * needs newpage's memcg set to transfer memcg dirty page accounting. | ||
746 | * So perform memcg migration in two steps: | ||
747 | * 1. set newpage->mem_cgroup (here) | ||
748 | * 2. clear page->mem_cgroup (below) | ||
749 | */ | ||
750 | set_page_memcg(newpage, page_memcg(page)); | ||
751 | |||
743 | mapping = page_mapping(page); | 752 | mapping = page_mapping(page); |
744 | if (!mapping) | 753 | if (!mapping) |
745 | rc = migrate_page(mapping, newpage, page, mode); | 754 | rc = migrate_page(mapping, newpage, page, mode); |
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
756 | rc = fallback_migrate_page(mapping, newpage, page, mode); | 765 | rc = fallback_migrate_page(mapping, newpage, page, mode); |
757 | 766 | ||
758 | if (rc != MIGRATEPAGE_SUCCESS) { | 767 | if (rc != MIGRATEPAGE_SUCCESS) { |
768 | set_page_memcg(newpage, NULL); | ||
759 | newpage->mapping = NULL; | 769 | newpage->mapping = NULL; |
760 | } else { | 770 | } else { |
761 | mem_cgroup_migrate(page, newpage, false); | 771 | set_page_memcg(page, NULL); |
762 | if (page_was_mapped) | 772 | if (page_was_mapped) |
763 | remove_migration_ptes(page, newpage); | 773 | remove_migration_ptes(page, newpage); |
764 | page->mapping = NULL; | 774 | page->mapping = NULL; |
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2190 | size += BYTES_PER_WORD; | 2190 | size += BYTES_PER_WORD; |
2191 | } | 2191 | } |
2192 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) | 2192 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
2193 | if (size >= kmalloc_size(INDEX_NODE + 1) | 2193 | /* |
2194 | && cachep->object_size > cache_line_size() | 2194 | * To activate debug pagealloc, off-slab management is necessary |
2195 | && ALIGN(size, cachep->align) < PAGE_SIZE) { | 2195 | * requirement. In early phase of initialization, small sized slab |
2196 | * doesn't get initialized so it would not be possible. So, we need | ||
2197 | * to check size >= 256. It guarantees that all necessary small | ||
2198 | * sized slab is initialized in current slab initialization sequence. | ||
2199 | */ | ||
2200 | if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && | ||
2201 | size >= 256 && cachep->object_size > cache_line_size() && | ||
2202 | ALIGN(size, cachep->align) < PAGE_SIZE) { | ||
2196 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); | 2203 | cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); |
2197 | size = PAGE_SIZE; | 2204 | size = PAGE_SIZE; |
2198 | } | 2205 | } |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 805a95a48107..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -31,7 +31,6 @@ | |||
31 | static const char fmt_hex[] = "%#x\n"; | 31 | static const char fmt_hex[] = "%#x\n"; |
32 | static const char fmt_long_hex[] = "%#lx\n"; | 32 | static const char fmt_long_hex[] = "%#lx\n"; |
33 | static const char fmt_dec[] = "%d\n"; | 33 | static const char fmt_dec[] = "%d\n"; |
34 | static const char fmt_udec[] = "%u\n"; | ||
35 | static const char fmt_ulong[] = "%lu\n"; | 34 | static const char fmt_ulong[] = "%lu\n"; |
36 | static const char fmt_u64[] = "%llu\n"; | 35 | static const char fmt_u64[] = "%llu\n"; |
37 | 36 | ||
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, | |||
202 | if (netif_running(netdev)) { | 201 | if (netif_running(netdev)) { |
203 | struct ethtool_cmd cmd; | 202 | struct ethtool_cmd cmd; |
204 | if (!__ethtool_get_settings(netdev, &cmd)) | 203 | if (!__ethtool_get_settings(netdev, &cmd)) |
205 | ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); | 204 | ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); |
206 | } | 205 | } |
207 | rtnl_unlock(); | 206 | rtnl_unlock(); |
208 | return ret; | 207 | return ret; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); | |||
2958 | */ | 2958 | */ |
2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) | 2959 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
2960 | { | 2960 | { |
2961 | unsigned char *data = skb->data; | ||
2962 | |||
2961 | BUG_ON(len > skb->len); | 2963 | BUG_ON(len > skb->len); |
2962 | skb->len -= len; | 2964 | __skb_pull(skb, len); |
2963 | BUG_ON(skb->len < skb->data_len); | 2965 | skb_postpull_rcsum(skb, data, len); |
2964 | skb_postpull_rcsum(skb, skb->data, len); | 2966 | return skb->data; |
2965 | return skb->data += len; | ||
2966 | } | 2967 | } |
2967 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); | 2968 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
2968 | 2969 | ||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) | |||
458 | static int dsa_slave_port_attr_set(struct net_device *dev, | 458 | static int dsa_slave_port_attr_set(struct net_device *dev, |
459 | struct switchdev_attr *attr) | 459 | struct switchdev_attr *attr) |
460 | { | 460 | { |
461 | int ret = 0; | 461 | struct dsa_slave_priv *p = netdev_priv(dev); |
462 | struct dsa_switch *ds = p->parent; | ||
463 | int ret; | ||
462 | 464 | ||
463 | switch (attr->id) { | 465 | switch (attr->id) { |
464 | case SWITCHDEV_ATTR_PORT_STP_STATE: | 466 | case SWITCHDEV_ATTR_PORT_STP_STATE: |
465 | if (attr->trans == SWITCHDEV_TRANS_COMMIT) | 467 | if (attr->trans == SWITCHDEV_TRANS_PREPARE) |
466 | ret = dsa_slave_stp_update(dev, attr->u.stp_state); | 468 | ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; |
469 | else | ||
470 | ret = ds->drv->port_stp_update(ds, p->port, | ||
471 | attr->u.stp_state); | ||
467 | break; | 472 | break; |
468 | default: | 473 | default: |
469 | ret = -EOPNOTSUPP; | 474 | ret = -EOPNOTSUPP; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
340 | fl4.flowi4_tos = tos; | 340 | fl4.flowi4_tos = tos; |
341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 341 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
342 | fl4.flowi4_tun_key.tun_id = 0; | 342 | fl4.flowi4_tun_key.tun_id = 0; |
343 | fl4.flowi4_flags = 0; | ||
343 | 344 | ||
344 | no_addr = idev->ifa_list == NULL; | 345 | no_addr = idev->ifa_list == NULL; |
345 | 346 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c6ad99ad0ffb..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1737 | fl4.flowi4_mark = skb->mark; | 1737 | fl4.flowi4_mark = skb->mark; |
1738 | fl4.flowi4_tos = tos; | 1738 | fl4.flowi4_tos = tos; |
1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | 1739 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; |
1740 | fl4.flowi4_flags = 0; | ||
1740 | fl4.daddr = daddr; | 1741 | fl4.daddr = daddr; |
1741 | fl4.saddr = saddr; | 1742 | fl4.saddr = saddr; |
1742 | err = fib_lookup(net, &fl4, &res, 0); | 1743 | err = fib_lookup(net, &fl4, &res, 0); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f204089e854c..cb32ce250db0 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, | |||
1193 | 1193 | ||
1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; | 1194 | fl6->flowi6_iif = LOOPBACK_IFINDEX; |
1195 | 1195 | ||
1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) | 1196 | if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || |
1197 | fl6->flowi6_oif) | ||
1197 | flags |= RT6_LOOKUP_F_IFACE; | 1198 | flags |= RT6_LOOKUP_F_IFACE; |
1198 | 1199 | ||
1199 | if (!ipv6_addr_any(&fl6->saddr)) | 1200 | if (!ipv6_addr_any(&fl6->saddr)) |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); | 1319 | tunnel = container_of(work, struct l2tp_tunnel, del_work); |
1320 | sk = l2tp_tunnel_sock_lookup(tunnel); | 1320 | sk = l2tp_tunnel_sock_lookup(tunnel); |
1321 | if (!sk) | 1321 | if (!sk) |
1322 | return; | 1322 | goto out; |
1323 | 1323 | ||
1324 | sock = sk->sk_socket; | 1324 | sock = sk->sk_socket; |
1325 | 1325 | ||
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | l2tp_tunnel_sock_put(sk); | 1343 | l2tp_tunnel_sock_put(sk); |
1344 | out: | ||
1345 | l2tp_tunnel_dec_refcount(tunnel); | ||
1344 | } | 1346 | } |
1345 | 1347 | ||
1346 | /* Create a socket for the tunnel, if one isn't set up by | 1348 | /* Create a socket for the tunnel, if one isn't set up by |
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1636 | */ | 1638 | */ |
1637 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1639 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1638 | { | 1640 | { |
1641 | l2tp_tunnel_inc_refcount(tunnel); | ||
1639 | l2tp_tunnel_closeall(tunnel); | 1642 | l2tp_tunnel_closeall(tunnel); |
1640 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1643 | if (false == queue_work(l2tp_wq, &tunnel->del_work)) { |
1644 | l2tp_tunnel_dec_refcount(tunnel); | ||
1645 | return 1; | ||
1646 | } | ||
1647 | return 0; | ||
1641 | } | 1648 | } |
1642 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1649 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
1643 | 1650 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1208 | * within this document. | 1208 | * within this document. |
1209 | * | 1209 | * |
1210 | * Our basic strategy is to round-robin transports in priorities | 1210 | * Our basic strategy is to round-robin transports in priorities |
1211 | * according to sctp_state_prio_map[] e.g., if no such | 1211 | * according to sctp_trans_score() e.g., if no such |
1212 | * transport with state SCTP_ACTIVE exists, round-robin through | 1212 | * transport with state SCTP_ACTIVE exists, round-robin through |
1213 | * SCTP_UNKNOWN, etc. You get the picture. | 1213 | * SCTP_UNKNOWN, etc. You get the picture. |
1214 | */ | 1214 | */ |
1215 | static const u8 sctp_trans_state_to_prio_map[] = { | ||
1216 | [SCTP_ACTIVE] = 3, /* best case */ | ||
1217 | [SCTP_UNKNOWN] = 2, | ||
1218 | [SCTP_PF] = 1, | ||
1219 | [SCTP_INACTIVE] = 0, /* worst case */ | ||
1220 | }; | ||
1221 | |||
1222 | static u8 sctp_trans_score(const struct sctp_transport *trans) | 1215 | static u8 sctp_trans_score(const struct sctp_transport *trans) |
1223 | { | 1216 | { |
1224 | return sctp_trans_state_to_prio_map[trans->state]; | 1217 | switch (trans->state) { |
1218 | case SCTP_ACTIVE: | ||
1219 | return 3; /* best case */ | ||
1220 | case SCTP_UNKNOWN: | ||
1221 | return 2; | ||
1222 | case SCTP_PF: | ||
1223 | return 1; | ||
1224 | default: /* case SCTP_INACTIVE */ | ||
1225 | return 0; /* worst case */ | ||
1226 | } | ||
1225 | } | 1227 | } |
1226 | 1228 | ||
1227 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, | 1229 | static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
244 | int error; | 244 | int error; |
245 | struct sctp_transport *transport = (struct sctp_transport *) peer; | 245 | struct sctp_transport *transport = (struct sctp_transport *) peer; |
246 | struct sctp_association *asoc = transport->asoc; | 246 | struct sctp_association *asoc = transport->asoc; |
247 | struct net *net = sock_net(asoc->base.sk); | 247 | struct sock *sk = asoc->base.sk; |
248 | struct net *net = sock_net(sk); | ||
248 | 249 | ||
249 | /* Check whether a task is in the sock. */ | 250 | /* Check whether a task is in the sock. */ |
250 | 251 | ||
251 | bh_lock_sock(asoc->base.sk); | 252 | bh_lock_sock(sk); |
252 | if (sock_owned_by_user(asoc->base.sk)) { | 253 | if (sock_owned_by_user(sk)) { |
253 | pr_debug("%s: sock is busy\n", __func__); | 254 | pr_debug("%s: sock is busy\n", __func__); |
254 | 255 | ||
255 | /* Try again later. */ | 256 | /* Try again later. */ |
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) | |||
272 | transport, GFP_ATOMIC); | 273 | transport, GFP_ATOMIC); |
273 | 274 | ||
274 | if (error) | 275 | if (error) |
275 | asoc->base.sk->sk_err = -error; | 276 | sk->sk_err = -error; |
276 | 277 | ||
277 | out_unlock: | 278 | out_unlock: |
278 | bh_unlock_sock(asoc->base.sk); | 279 | bh_unlock_sock(sk); |
279 | sctp_transport_put(transport); | 280 | sctp_transport_put(transport); |
280 | } | 281 | } |
281 | 282 | ||
@@ -285,11 +286,12 @@ out_unlock: | |||
285 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | 286 | static void sctp_generate_timeout_event(struct sctp_association *asoc, |
286 | sctp_event_timeout_t timeout_type) | 287 | sctp_event_timeout_t timeout_type) |
287 | { | 288 | { |
288 | struct net *net = sock_net(asoc->base.sk); | 289 | struct sock *sk = asoc->base.sk; |
290 | struct net *net = sock_net(sk); | ||
289 | int error = 0; | 291 | int error = 0; |
290 | 292 | ||
291 | bh_lock_sock(asoc->base.sk); | 293 | bh_lock_sock(sk); |
292 | if (sock_owned_by_user(asoc->base.sk)) { | 294 | if (sock_owned_by_user(sk)) { |
293 | pr_debug("%s: sock is busy: timer %d\n", __func__, | 295 | pr_debug("%s: sock is busy: timer %d\n", __func__, |
294 | timeout_type); | 296 | timeout_type); |
295 | 297 | ||
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, | |||
312 | (void *)timeout_type, GFP_ATOMIC); | 314 | (void *)timeout_type, GFP_ATOMIC); |
313 | 315 | ||
314 | if (error) | 316 | if (error) |
315 | asoc->base.sk->sk_err = -error; | 317 | sk->sk_err = -error; |
316 | 318 | ||
317 | out_unlock: | 319 | out_unlock: |
318 | bh_unlock_sock(asoc->base.sk); | 320 | bh_unlock_sock(sk); |
319 | sctp_association_put(asoc); | 321 | sctp_association_put(asoc); |
320 | } | 322 | } |
321 | 323 | ||
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
365 | int error = 0; | 367 | int error = 0; |
366 | struct sctp_transport *transport = (struct sctp_transport *) data; | 368 | struct sctp_transport *transport = (struct sctp_transport *) data; |
367 | struct sctp_association *asoc = transport->asoc; | 369 | struct sctp_association *asoc = transport->asoc; |
368 | struct net *net = sock_net(asoc->base.sk); | 370 | struct sock *sk = asoc->base.sk; |
371 | struct net *net = sock_net(sk); | ||
369 | 372 | ||
370 | bh_lock_sock(asoc->base.sk); | 373 | bh_lock_sock(sk); |
371 | if (sock_owned_by_user(asoc->base.sk)) { | 374 | if (sock_owned_by_user(sk)) { |
372 | pr_debug("%s: sock is busy\n", __func__); | 375 | pr_debug("%s: sock is busy\n", __func__); |
373 | 376 | ||
374 | /* Try again later. */ | 377 | /* Try again later. */ |
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) | |||
388 | asoc->state, asoc->ep, asoc, | 391 | asoc->state, asoc->ep, asoc, |
389 | transport, GFP_ATOMIC); | 392 | transport, GFP_ATOMIC); |
390 | 393 | ||
391 | if (error) | 394 | if (error) |
392 | asoc->base.sk->sk_err = -error; | 395 | sk->sk_err = -error; |
393 | 396 | ||
394 | out_unlock: | 397 | out_unlock: |
395 | bh_unlock_sock(asoc->base.sk); | 398 | bh_unlock_sock(sk); |
396 | sctp_transport_put(transport); | 399 | sctp_transport_put(transport); |
397 | } | 400 | } |
398 | 401 | ||
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
403 | { | 406 | { |
404 | struct sctp_transport *transport = (struct sctp_transport *) data; | 407 | struct sctp_transport *transport = (struct sctp_transport *) data; |
405 | struct sctp_association *asoc = transport->asoc; | 408 | struct sctp_association *asoc = transport->asoc; |
406 | struct net *net = sock_net(asoc->base.sk); | 409 | struct sock *sk = asoc->base.sk; |
410 | struct net *net = sock_net(sk); | ||
407 | 411 | ||
408 | bh_lock_sock(asoc->base.sk); | 412 | bh_lock_sock(sk); |
409 | if (sock_owned_by_user(asoc->base.sk)) { | 413 | if (sock_owned_by_user(sk)) { |
410 | pr_debug("%s: sock is busy\n", __func__); | 414 | pr_debug("%s: sock is busy\n", __func__); |
411 | 415 | ||
412 | /* Try again later. */ | 416 | /* Try again later. */ |
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) | |||
427 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); | 431 | asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); |
428 | 432 | ||
429 | out_unlock: | 433 | out_unlock: |
430 | bh_unlock_sock(asoc->base.sk); | 434 | bh_unlock_sock(sk); |
431 | sctp_association_put(asoc); | 435 | sctp_association_put(asoc); |
432 | } | 436 | } |
433 | 437 | ||
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c | |||
@@ -39,25 +39,6 @@ static int | |||
39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 39 | fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
40 | struct rpcrdma_create_data_internal *cdata) | 40 | struct rpcrdma_create_data_internal *cdata) |
41 | { | 41 | { |
42 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
43 | struct ib_mr *mr; | ||
44 | |||
45 | /* Obtain an lkey to use for the regbufs, which are | ||
46 | * protected from remote access. | ||
47 | */ | ||
48 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { | ||
49 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
50 | } else { | ||
51 | mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); | ||
52 | if (IS_ERR(mr)) { | ||
53 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | ||
54 | __func__, PTR_ERR(mr)); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
58 | ia->ri_dma_mr = mr; | ||
59 | } | ||
60 | |||
61 | return 0; | 42 | return 0; |
62 | } | 43 | } |
63 | 44 | ||
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
189 | struct ib_device_attr *devattr = &ia->ri_devattr; | 189 | struct ib_device_attr *devattr = &ia->ri_devattr; |
190 | int depth, delta; | 190 | int depth, delta; |
191 | 191 | ||
192 | /* Obtain an lkey to use for the regbufs, which are | ||
193 | * protected from remote access. | ||
194 | */ | ||
195 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
196 | |||
197 | ia->ri_max_frmr_depth = | 192 | ia->ri_max_frmr_depth = |
198 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | 193 | min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, |
199 | devattr->max_fast_reg_page_list_len); | 194 | devattr->max_fast_reg_page_list_len); |
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c | |||
@@ -23,7 +23,6 @@ static int | |||
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | 23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, |
24 | struct rpcrdma_create_data_internal *cdata) | 24 | struct rpcrdma_create_data_internal *cdata) |
25 | { | 25 | { |
26 | struct ib_device_attr *devattr = &ia->ri_devattr; | ||
27 | struct ib_mr *mr; | 26 | struct ib_mr *mr; |
28 | 27 | ||
29 | /* Obtain an rkey to use for RPC data payloads. | 28 | /* Obtain an rkey to use for RPC data payloads. |
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |||
37 | __func__, PTR_ERR(mr)); | 36 | __func__, PTR_ERR(mr)); |
38 | return -ENOMEM; | 37 | return -ENOMEM; |
39 | } | 38 | } |
40 | ia->ri_dma_mr = mr; | ||
41 | |||
42 | /* Obtain an lkey to use for regbufs. | ||
43 | */ | ||
44 | if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) | ||
45 | ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; | ||
46 | else | ||
47 | ia->ri_dma_lkey = ia->ri_dma_mr->lkey; | ||
48 | 39 | ||
40 | ia->ri_dma_mr = mr; | ||
49 | return 0; | 41 | return 0; |
50 | } | 42 | } |
51 | 43 | ||
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..eb081ad05e33 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1252,7 +1252,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) | |||
1252 | goto out_free; | 1252 | goto out_free; |
1253 | 1253 | ||
1254 | iov->length = size; | 1254 | iov->length = size; |
1255 | iov->lkey = ia->ri_dma_lkey; | 1255 | iov->lkey = ia->ri_pd->local_dma_lkey; |
1256 | rb->rg_size = size; | 1256 | rb->rg_size = size; |
1257 | rb->rg_owner = NULL; | 1257 | rb->rg_owner = NULL; |
1258 | return rb; | 1258 | return rb; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -65,7 +65,6 @@ struct rpcrdma_ia { | |||
65 | struct rdma_cm_id *ri_id; | 65 | struct rdma_cm_id *ri_id; |
66 | struct ib_pd *ri_pd; | 66 | struct ib_pd *ri_pd; |
67 | struct ib_mr *ri_dma_mr; | 67 | struct ib_mr *ri_dma_mr; |
68 | u32 ri_dma_lkey; | ||
69 | struct completion ri_done; | 68 | struct completion ri_done; |
70 | int ri_async_rc; | 69 | int ri_async_rc; |
71 | unsigned int ri_max_frmr_depth; | 70 | unsigned int ri_max_frmr_depth; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..ef31b40ad550 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2179,8 +2179,21 @@ unlock: | |||
2179 | if (UNIXCB(skb).fp) | 2179 | if (UNIXCB(skb).fp) |
2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); | 2180 | scm.fp = scm_fp_dup(UNIXCB(skb).fp); |
2181 | 2181 | ||
2182 | sk_peek_offset_fwd(sk, chunk); | 2182 | if (skip) { |
2183 | sk_peek_offset_fwd(sk, chunk); | ||
2184 | skip -= chunk; | ||
2185 | } | ||
2183 | 2186 | ||
2187 | if (UNIXCB(skb).fp) | ||
2188 | break; | ||
2189 | |||
2190 | last = skb; | ||
2191 | last_len = skb->len; | ||
2192 | unix_state_lock(sk); | ||
2193 | skb = skb_peek_next(skb, &sk->sk_receive_queue); | ||
2194 | if (skb) | ||
2195 | goto again; | ||
2196 | unix_state_unlock(sk); | ||
2184 | break; | 2197 | break; |
2185 | } | 2198 | } |
2186 | } while (size); | 2199 | } while (size); |
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * Here's a sample kernel module showing the use of jprobes to dump | 2 | * Here's a sample kernel module showing the use of jprobes to dump |
3 | * the arguments of do_fork(). | 3 | * the arguments of _do_fork(). |
4 | * | 4 | * |
5 | * For more information on theory of operation of jprobes, see | 5 | * For more information on theory of operation of jprobes, see |
6 | * Documentation/kprobes.txt | 6 | * Documentation/kprobes.txt |
7 | * | 7 | * |
8 | * Build and insert the kernel module as done in the kprobe example. | 8 | * Build and insert the kernel module as done in the kprobe example. |
9 | * You will see the trace data in /var/log/messages and on the | 9 | * You will see the trace data in /var/log/messages and on the |
10 | * console whenever do_fork() is invoked to create a new process. | 10 | * console whenever _do_fork() is invoked to create a new process. |
11 | * (Some messages may be suppressed if syslogd is configured to | 11 | * (Some messages may be suppressed if syslogd is configured to |
12 | * eliminate duplicate messages.) | 12 | * eliminate duplicate messages.) |
13 | */ | 13 | */ |
@@ -17,13 +17,13 @@ | |||
17 | #include <linux/kprobes.h> | 17 | #include <linux/kprobes.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Jumper probe for do_fork. | 20 | * Jumper probe for _do_fork. |
21 | * Mirror principle enables access to arguments of the probed routine | 21 | * Mirror principle enables access to arguments of the probed routine |
22 | * from the probe handler. | 22 | * from the probe handler. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* Proxy routine having the same arguments as actual do_fork() routine */ | 25 | /* Proxy routine having the same arguments as actual _do_fork() routine */ |
26 | static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | 26 | static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, |
27 | unsigned long stack_size, int __user *parent_tidptr, | 27 | unsigned long stack_size, int __user *parent_tidptr, |
28 | int __user *child_tidptr) | 28 | int __user *child_tidptr) |
29 | { | 29 | { |
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static struct jprobe my_jprobe = { | 38 | static struct jprobe my_jprobe = { |
39 | .entry = jdo_fork, | 39 | .entry = j_do_fork, |
40 | .kp = { | 40 | .kp = { |
41 | .symbol_name = "do_fork", | 41 | .symbol_name = "_do_fork", |
42 | }, | 42 | }, |
43 | }; | 43 | }; |
44 | 44 | ||
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * NOTE: This example is works on x86 and powerpc. | 2 | * NOTE: This example is works on x86 and powerpc. |
3 | * Here's a sample kernel module showing the use of kprobes to dump a | 3 | * Here's a sample kernel module showing the use of kprobes to dump a |
4 | * stack trace and selected registers when do_fork() is called. | 4 | * stack trace and selected registers when _do_fork() is called. |
5 | * | 5 | * |
6 | * For more information on theory of operation of kprobes, see | 6 | * For more information on theory of operation of kprobes, see |
7 | * Documentation/kprobes.txt | 7 | * Documentation/kprobes.txt |
8 | * | 8 | * |
9 | * You will see the trace data in /var/log/messages and on the console | 9 | * You will see the trace data in /var/log/messages and on the console |
10 | * whenever do_fork() is invoked to create a new process. | 10 | * whenever _do_fork() is invoked to create a new process. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | /* For each probe you need to allocate a kprobe structure */ | 17 | /* For each probe you need to allocate a kprobe structure */ |
18 | static struct kprobe kp = { | 18 | static struct kprobe kp = { |
19 | .symbol_name = "do_fork", | 19 | .symbol_name = "_do_fork", |
20 | }; | 20 | }; |
21 | 21 | ||
22 | /* kprobe pre_handler: called just before the probed instruction is executed */ | 22 | /* kprobe pre_handler: called just before the probed instruction is executed */ |
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * usage: insmod kretprobe_example.ko func=<func_name> | 8 | * usage: insmod kretprobe_example.ko func=<func_name> |
9 | * | 9 | * |
10 | * If no func_name is specified, do_fork is instrumented | 10 | * If no func_name is specified, _do_fork is instrumented |
11 | * | 11 | * |
12 | * For more information on theory of operation of kretprobes, see | 12 | * For more information on theory of operation of kretprobes, see |
13 | * Documentation/kprobes.txt | 13 | * Documentation/kprobes.txt |
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/limits.h> | 25 | #include <linux/limits.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | 27 | ||
28 | static char func_name[NAME_MAX] = "do_fork"; | 28 | static char func_name[NAME_MAX] = "_do_fork"; |
29 | module_param_string(func, func_name, NAME_MAX, S_IRUGO); | 29 | module_param_string(func, func_name, NAME_MAX, S_IRUGO); |
30 | MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" | 30 | MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" |
31 | " function's execution time"); | 31 | " function's execution time"); |
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 6ce5945a0b89..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c | |||
@@ -17,13 +17,9 @@ | |||
17 | #include <stdint.h> | 17 | #include <stdint.h> |
18 | #include <stdbool.h> | 18 | #include <stdbool.h> |
19 | #include <string.h> | 19 | #include <string.h> |
20 | #include <getopt.h> | ||
21 | #include <err.h> | 20 | #include <err.h> |
22 | #include <arpa/inet.h> | ||
23 | #include <openssl/bio.h> | 21 | #include <openssl/bio.h> |
24 | #include <openssl/evp.h> | ||
25 | #include <openssl/pem.h> | 22 | #include <openssl/pem.h> |
26 | #include <openssl/pkcs7.h> | ||
27 | #include <openssl/err.h> | 23 | #include <openssl/err.h> |
28 | #include <openssl/engine.h> | 24 | #include <openssl/engine.h> |
29 | 25 | ||
diff --git a/scripts/sign-file.c b/scripts/sign-file.c index c3899ca4811c..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c | |||
@@ -20,13 +20,34 @@ | |||
20 | #include <getopt.h> | 20 | #include <getopt.h> |
21 | #include <err.h> | 21 | #include <err.h> |
22 | #include <arpa/inet.h> | 22 | #include <arpa/inet.h> |
23 | #include <openssl/opensslv.h> | ||
23 | #include <openssl/bio.h> | 24 | #include <openssl/bio.h> |
24 | #include <openssl/evp.h> | 25 | #include <openssl/evp.h> |
25 | #include <openssl/pem.h> | 26 | #include <openssl/pem.h> |
26 | #include <openssl/cms.h> | ||
27 | #include <openssl/err.h> | 27 | #include <openssl/err.h> |
28 | #include <openssl/engine.h> | 28 | #include <openssl/engine.h> |
29 | 29 | ||
30 | /* | ||
31 | * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to | ||
32 | * assume that it's not available and its header file is missing and that we | ||
33 | * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts | ||
34 | * the options we have on specifying the X.509 certificate we want. | ||
35 | * | ||
36 | * Further, older versions of OpenSSL don't support manually adding signers to | ||
37 | * the PKCS#7 message so have to accept that we get a certificate included in | ||
38 | * the signature message. Nor do such older versions of OpenSSL support | ||
39 | * signing with anything other than SHA1 - so we're stuck with that if such is | ||
40 | * the case. | ||
41 | */ | ||
42 | #if OPENSSL_VERSION_NUMBER < 0x10000000L | ||
43 | #define USE_PKCS7 | ||
44 | #endif | ||
45 | #ifndef USE_PKCS7 | ||
46 | #include <openssl/cms.h> | ||
47 | #else | ||
48 | #include <openssl/pkcs7.h> | ||
49 | #endif | ||
50 | |||
30 | struct module_signature { | 51 | struct module_signature { |
31 | uint8_t algo; /* Public-key crypto algorithm [0] */ | 52 | uint8_t algo; /* Public-key crypto algorithm [0] */ |
32 | uint8_t hash; /* Digest algorithm [0] */ | 53 | uint8_t hash; /* Digest algorithm [0] */ |
@@ -110,30 +131,42 @@ int main(int argc, char **argv) | |||
110 | struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; | 131 | struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; |
111 | char *hash_algo = NULL; | 132 | char *hash_algo = NULL; |
112 | char *private_key_name, *x509_name, *module_name, *dest_name; | 133 | char *private_key_name, *x509_name, *module_name, *dest_name; |
113 | bool save_cms = false, replace_orig; | 134 | bool save_sig = false, replace_orig; |
114 | bool sign_only = false; | 135 | bool sign_only = false; |
115 | unsigned char buf[4096]; | 136 | unsigned char buf[4096]; |
116 | unsigned long module_size, cms_size; | 137 | unsigned long module_size, sig_size; |
117 | unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; | 138 | unsigned int use_signed_attrs; |
118 | const EVP_MD *digest_algo; | 139 | const EVP_MD *digest_algo; |
119 | EVP_PKEY *private_key; | 140 | EVP_PKEY *private_key; |
141 | #ifndef USE_PKCS7 | ||
120 | CMS_ContentInfo *cms; | 142 | CMS_ContentInfo *cms; |
143 | unsigned int use_keyid = 0; | ||
144 | #else | ||
145 | PKCS7 *pkcs7; | ||
146 | #endif | ||
121 | X509 *x509; | 147 | X509 *x509; |
122 | BIO *b, *bd = NULL, *bm; | 148 | BIO *b, *bd = NULL, *bm; |
123 | int opt, n; | 149 | int opt, n; |
124 | |||
125 | OpenSSL_add_all_algorithms(); | 150 | OpenSSL_add_all_algorithms(); |
126 | ERR_load_crypto_strings(); | 151 | ERR_load_crypto_strings(); |
127 | ERR_clear_error(); | 152 | ERR_clear_error(); |
128 | 153 | ||
129 | key_pass = getenv("KBUILD_SIGN_PIN"); | 154 | key_pass = getenv("KBUILD_SIGN_PIN"); |
130 | 155 | ||
156 | #ifndef USE_PKCS7 | ||
157 | use_signed_attrs = CMS_NOATTR; | ||
158 | #else | ||
159 | use_signed_attrs = PKCS7_NOATTR; | ||
160 | #endif | ||
161 | |||
131 | do { | 162 | do { |
132 | opt = getopt(argc, argv, "dpk"); | 163 | opt = getopt(argc, argv, "dpk"); |
133 | switch (opt) { | 164 | switch (opt) { |
134 | case 'p': save_cms = true; break; | 165 | case 'p': save_sig = true; break; |
135 | case 'd': sign_only = true; save_cms = true; break; | 166 | case 'd': sign_only = true; save_sig = true; break; |
167 | #ifndef USE_PKCS7 | ||
136 | case 'k': use_keyid = CMS_USE_KEYID; break; | 168 | case 'k': use_keyid = CMS_USE_KEYID; break; |
169 | #endif | ||
137 | case -1: break; | 170 | case -1: break; |
138 | default: format(); | 171 | default: format(); |
139 | } | 172 | } |
@@ -157,6 +190,14 @@ int main(int argc, char **argv) | |||
157 | replace_orig = true; | 190 | replace_orig = true; |
158 | } | 191 | } |
159 | 192 | ||
193 | #ifdef USE_PKCS7 | ||
194 | if (strcmp(hash_algo, "sha1") != 0) { | ||
195 | fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", | ||
196 | OPENSSL_VERSION_TEXT); | ||
197 | exit(3); | ||
198 | } | ||
199 | #endif | ||
200 | |||
160 | /* Read the private key and the X.509 cert the PKCS#7 message | 201 | /* Read the private key and the X.509 cert the PKCS#7 message |
161 | * will point to. | 202 | * will point to. |
162 | */ | 203 | */ |
@@ -213,7 +254,8 @@ int main(int argc, char **argv) | |||
213 | bm = BIO_new_file(module_name, "rb"); | 254 | bm = BIO_new_file(module_name, "rb"); |
214 | ERR(!bm, "%s", module_name); | 255 | ERR(!bm, "%s", module_name); |
215 | 256 | ||
216 | /* Load the CMS message from the digest buffer. */ | 257 | #ifndef USE_PKCS7 |
258 | /* Load the signature message from the digest buffer. */ | ||
217 | cms = CMS_sign(NULL, NULL, NULL, NULL, | 259 | cms = CMS_sign(NULL, NULL, NULL, NULL, |
218 | CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); | 260 | CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); |
219 | ERR(!cms, "CMS_sign"); | 261 | ERR(!cms, "CMS_sign"); |
@@ -221,17 +263,31 @@ int main(int argc, char **argv) | |||
221 | ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, | 263 | ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, |
222 | CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | | 264 | CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | |
223 | use_keyid | use_signed_attrs), | 265 | use_keyid | use_signed_attrs), |
224 | "CMS_sign_add_signer"); | 266 | "CMS_add1_signer"); |
225 | ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, | 267 | ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, |
226 | "CMS_final"); | 268 | "CMS_final"); |
227 | 269 | ||
228 | if (save_cms) { | 270 | #else |
229 | char *cms_name; | 271 | pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, |
272 | PKCS7_NOCERTS | PKCS7_BINARY | | ||
273 | PKCS7_DETACHED | use_signed_attrs); | ||
274 | ERR(!pkcs7, "PKCS7_sign"); | ||
275 | #endif | ||
230 | 276 | ||
231 | ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); | 277 | if (save_sig) { |
232 | b = BIO_new_file(cms_name, "wb"); | 278 | char *sig_file_name; |
233 | ERR(!b, "%s", cms_name); | 279 | |
234 | ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); | 280 | ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, |
281 | "asprintf"); | ||
282 | b = BIO_new_file(sig_file_name, "wb"); | ||
283 | ERR(!b, "%s", sig_file_name); | ||
284 | #ifndef USE_PKCS7 | ||
285 | ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, | ||
286 | "%s", sig_file_name); | ||
287 | #else | ||
288 | ERR(i2d_PKCS7_bio(b, pkcs7) < 0, | ||
289 | "%s", sig_file_name); | ||
290 | #endif | ||
235 | BIO_free(b); | 291 | BIO_free(b); |
236 | } | 292 | } |
237 | 293 | ||
@@ -247,9 +303,13 @@ int main(int argc, char **argv) | |||
247 | ERR(n < 0, "%s", module_name); | 303 | ERR(n < 0, "%s", module_name); |
248 | module_size = BIO_number_written(bd); | 304 | module_size = BIO_number_written(bd); |
249 | 305 | ||
306 | #ifndef USE_PKCS7 | ||
250 | ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); | 307 | ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); |
251 | cms_size = BIO_number_written(bd) - module_size; | 308 | #else |
252 | sig_info.sig_len = htonl(cms_size); | 309 | ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); |
310 | #endif | ||
311 | sig_size = BIO_number_written(bd) - module_size; | ||
312 | sig_info.sig_len = htonl(sig_size); | ||
253 | ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); | 313 | ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); |
254 | ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); | 314 | ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); |
255 | 315 | ||
diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..39eac1fd5706 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c | |||
@@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
134 | kdebug("- %u", key->serial); | 134 | kdebug("- %u", key->serial); |
135 | key_check(key); | 135 | key_check(key); |
136 | 136 | ||
137 | /* Throw away the key data */ | ||
138 | if (key->type->destroy) | ||
139 | key->type->destroy(key); | ||
140 | |||
137 | security_key_free(key); | 141 | security_key_free(key); |
138 | 142 | ||
139 | /* deal with the user's key tracking and quota */ | 143 | /* deal with the user's key tracking and quota */ |
@@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
148 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) | 152 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) |
149 | atomic_dec(&key->user->nikeys); | 153 | atomic_dec(&key->user->nikeys); |
150 | 154 | ||
151 | /* now throw away the key memory */ | ||
152 | if (key->type->destroy) | ||
153 | key->type->destroy(key); | ||
154 | |||
155 | key_user_put(key->user); | 155 | key_user_put(key->user); |
156 | 156 | ||
157 | kfree(key->description); | 157 | kfree(key->description); |
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature | |||
@@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ | |||
41 | libelf-getphdrnum \ | 41 | libelf-getphdrnum \ |
42 | libelf-mmap \ | 42 | libelf-mmap \ |
43 | libnuma \ | 43 | libnuma \ |
44 | numa_num_possible_cpus \ | ||
44 | libperl \ | 45 | libperl \ |
45 | libpython \ | 46 | libpython \ |
46 | libpython-version \ | 47 | libpython-version \ |
@@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ | |||
51 | timerfd \ | 52 | timerfd \ |
52 | libdw-dwarf-unwind \ | 53 | libdw-dwarf-unwind \ |
53 | zlib \ | 54 | zlib \ |
54 | lzma | 55 | lzma \ |
56 | get_cpuid | ||
55 | 57 | ||
56 | FEATURE_DISPLAY ?= \ | 58 | FEATURE_DISPLAY ?= \ |
57 | dwarf \ | 59 | dwarf \ |
@@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ | |||
61 | libbfd \ | 63 | libbfd \ |
62 | libelf \ | 64 | libelf \ |
63 | libnuma \ | 65 | libnuma \ |
66 | numa_num_possible_cpus \ | ||
64 | libperl \ | 67 | libperl \ |
65 | libpython \ | 68 | libpython \ |
66 | libslang \ | 69 | libslang \ |
67 | libunwind \ | 70 | libunwind \ |
68 | libdw-dwarf-unwind \ | 71 | libdw-dwarf-unwind \ |
69 | zlib \ | 72 | zlib \ |
70 | lzma | 73 | lzma \ |
74 | get_cpuid | ||
71 | 75 | ||
72 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. | 76 | # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. |
73 | # If in the future we need per-feature checks/flags for features not | 77 | # If in the future we need per-feature checks/flags for features not |
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile | |||
@@ -19,6 +19,7 @@ FILES= \ | |||
19 | test-libelf-getphdrnum.bin \ | 19 | test-libelf-getphdrnum.bin \ |
20 | test-libelf-mmap.bin \ | 20 | test-libelf-mmap.bin \ |
21 | test-libnuma.bin \ | 21 | test-libnuma.bin \ |
22 | test-numa_num_possible_cpus.bin \ | ||
22 | test-libperl.bin \ | 23 | test-libperl.bin \ |
23 | test-libpython.bin \ | 24 | test-libpython.bin \ |
24 | test-libpython-version.bin \ | 25 | test-libpython-version.bin \ |
@@ -34,7 +35,8 @@ FILES= \ | |||
34 | test-compile-x32.bin \ | 35 | test-compile-x32.bin \ |
35 | test-zlib.bin \ | 36 | test-zlib.bin \ |
36 | test-lzma.bin \ | 37 | test-lzma.bin \ |
37 | test-bpf.bin | 38 | test-bpf.bin \ |
39 | test-get_cpuid.bin | ||
38 | 40 | ||
39 | CC := $(CROSS_COMPILE)gcc -MD | 41 | CC := $(CROSS_COMPILE)gcc -MD |
40 | PKG_CONFIG := $(CROSS_COMPILE)pkg-config | 42 | PKG_CONFIG := $(CROSS_COMPILE)pkg-config |
@@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: | |||
87 | test-libnuma.bin: | 89 | test-libnuma.bin: |
88 | $(BUILD) -lnuma | 90 | $(BUILD) -lnuma |
89 | 91 | ||
92 | test-numa_num_possible_cpus.bin: | ||
93 | $(BUILD) -lnuma | ||
94 | |||
90 | test-libunwind.bin: | 95 | test-libunwind.bin: |
91 | $(BUILD) -lelf | 96 | $(BUILD) -lelf |
92 | 97 | ||
@@ -162,6 +167,9 @@ test-zlib.bin: | |||
162 | test-lzma.bin: | 167 | test-lzma.bin: |
163 | $(BUILD) -llzma | 168 | $(BUILD) -llzma |
164 | 169 | ||
170 | test-get_cpuid.bin: | ||
171 | $(BUILD) | ||
172 | |||
165 | test-bpf.bin: | 173 | test-bpf.bin: |
166 | $(BUILD) | 174 | $(BUILD) |
167 | 175 | ||
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c | |||
@@ -77,6 +77,10 @@ | |||
77 | # include "test-libnuma.c" | 77 | # include "test-libnuma.c" |
78 | #undef main | 78 | #undef main |
79 | 79 | ||
80 | #define main main_test_numa_num_possible_cpus | ||
81 | # include "test-numa_num_possible_cpus.c" | ||
82 | #undef main | ||
83 | |||
80 | #define main main_test_timerfd | 84 | #define main main_test_timerfd |
81 | # include "test-timerfd.c" | 85 | # include "test-timerfd.c" |
82 | #undef main | 86 | #undef main |
@@ -117,6 +121,10 @@ | |||
117 | # include "test-lzma.c" | 121 | # include "test-lzma.c" |
118 | #undef main | 122 | #undef main |
119 | 123 | ||
124 | #define main main_test_get_cpuid | ||
125 | # include "test-get_cpuid.c" | ||
126 | #undef main | ||
127 | |||
120 | int main(int argc, char *argv[]) | 128 | int main(int argc, char *argv[]) |
121 | { | 129 | { |
122 | main_test_libpython(); | 130 | main_test_libpython(); |
@@ -136,6 +144,7 @@ int main(int argc, char *argv[]) | |||
136 | main_test_libbfd(); | 144 | main_test_libbfd(); |
137 | main_test_backtrace(); | 145 | main_test_backtrace(); |
138 | main_test_libnuma(); | 146 | main_test_libnuma(); |
147 | main_test_numa_num_possible_cpus(); | ||
139 | main_test_timerfd(); | 148 | main_test_timerfd(); |
140 | main_test_stackprotector_all(); | 149 | main_test_stackprotector_all(); |
141 | main_test_libdw_dwarf_unwind(); | 150 | main_test_libdw_dwarf_unwind(); |
@@ -143,6 +152,7 @@ int main(int argc, char *argv[]) | |||
143 | main_test_zlib(); | 152 | main_test_zlib(); |
144 | main_test_pthread_attr_setaffinity_np(); | 153 | main_test_pthread_attr_setaffinity_np(); |
145 | main_test_lzma(); | 154 | main_test_lzma(); |
155 | main_test_get_cpuid(); | ||
146 | 156 | ||
147 | return 0; | 157 | return 0; |
148 | } | 158 | } |
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c | |||
@@ -0,0 +1,7 @@ | |||
1 | #include <cpuid.h> | ||
2 | |||
3 | int main(void) | ||
4 | { | ||
5 | unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; | ||
6 | return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); | ||
7 | } | ||
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c | |||
@@ -0,0 +1,6 @@ | |||
1 | #include <numa.h> | ||
2 | |||
3 | int main(void) | ||
4 | { | ||
5 | return numa_num_possible_cpus(); | ||
6 | } | ||
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3795 | struct format_field *field; | 3795 | struct format_field *field; |
3796 | struct printk_map *printk; | 3796 | struct printk_map *printk; |
3797 | long long val, fval; | 3797 | long long val, fval; |
3798 | unsigned long addr; | 3798 | unsigned long long addr; |
3799 | char *str; | 3799 | char *str; |
3800 | unsigned char *hex; | 3800 | unsigned char *hex; |
3801 | int print; | 3801 | int print; |
@@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3828 | */ | 3828 | */ |
3829 | if (!(field->flags & FIELD_IS_ARRAY) && | 3829 | if (!(field->flags & FIELD_IS_ARRAY) && |
3830 | field->size == pevent->long_size) { | 3830 | field->size == pevent->long_size) { |
3831 | addr = *(unsigned long *)(data + field->offset); | 3831 | |
3832 | /* Handle heterogeneous recording and processing | ||
3833 | * architectures | ||
3834 | * | ||
3835 | * CASE I: | ||
3836 | * Traces recorded on 32-bit devices (32-bit | ||
3837 | * addressing) and processed on 64-bit devices: | ||
3838 | * In this case, only 32 bits should be read. | ||
3839 | * | ||
3840 | * CASE II: | ||
3841 | * Traces recorded on 64 bit devices and processed | ||
3842 | * on 32-bit devices: | ||
3843 | * In this case, 64 bits must be read. | ||
3844 | */ | ||
3845 | addr = (pevent->long_size == 8) ? | ||
3846 | *(unsigned long long *)(data + field->offset) : | ||
3847 | (unsigned long long)*(unsigned int *)(data + field->offset); | ||
3848 | |||
3832 | /* Check if it matches a print format */ | 3849 | /* Check if it matches a print format */ |
3833 | printk = find_printk(pevent, addr); | 3850 | printk = find_printk(pevent, addr); |
3834 | if (printk) | 3851 | if (printk) |
3835 | trace_seq_puts(s, printk->printk); | 3852 | trace_seq_puts(s, printk->printk); |
3836 | else | 3853 | else |
3837 | trace_seq_printf(s, "%lx", addr); | 3854 | trace_seq_printf(s, "%llx", addr); |
3838 | break; | 3855 | break; |
3839 | } | 3856 | } |
3840 | str = malloc(len + 1); | 3857 | str = malloc(len + 1); |
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt | |||
@@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc | |||
364 | 364 | ||
365 | CYC packets are not requested by default. | 365 | CYC packets are not requested by default. |
366 | 366 | ||
367 | no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. | ||
368 | |||
369 | It stops the driver resetting the byte count to zero whenever | ||
370 | enabling the trace (for example on context switches) which in | ||
371 | turn results in no PSB being forced. However some processors | ||
372 | will produce a PSB anyway. | ||
373 | |||
374 | In any case, there is still a PSB when the trace is enabled for | ||
375 | the first time. | ||
376 | |||
377 | no_force_psb can be used to slightly decrease the trace size but | ||
378 | may make it harder for the decoder to recover from errors. | ||
379 | |||
380 | no_force_psb is not selected by default. | ||
381 | |||
382 | 367 | ||
383 | new snapshot option | 368 | new snapshot option |
384 | ------------------- | 369 | ------------------- |
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile | |||
@@ -573,9 +573,14 @@ ifndef NO_LIBNUMA | |||
573 | msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); | 573 | msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); |
574 | NO_LIBNUMA := 1 | 574 | NO_LIBNUMA := 1 |
575 | else | 575 | else |
576 | CFLAGS += -DHAVE_LIBNUMA_SUPPORT | 576 | ifeq ($(feature-numa_num_possible_cpus), 0) |
577 | EXTLIBS += -lnuma | 577 | msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); |
578 | $(call detected,CONFIG_NUMA) | 578 | NO_LIBNUMA := 1 |
579 | else | ||
580 | CFLAGS += -DHAVE_LIBNUMA_SUPPORT | ||
581 | EXTLIBS += -lnuma | ||
582 | $(call detected,CONFIG_NUMA) | ||
583 | endif | ||
579 | endif | 584 | endif |
580 | endif | 585 | endif |
581 | 586 | ||
@@ -621,8 +626,13 @@ ifdef LIBBABELTRACE | |||
621 | endif | 626 | endif |
622 | 627 | ||
623 | ifndef NO_AUXTRACE | 628 | ifndef NO_AUXTRACE |
624 | $(call detected,CONFIG_AUXTRACE) | 629 | ifeq ($(feature-get_cpuid), 0) |
625 | CFLAGS += -DHAVE_AUXTRACE_SUPPORT | 630 | msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); |
631 | NO_AUXTRACE := 1 | ||
632 | else | ||
633 | $(call detected,CONFIG_AUXTRACE) | ||
634 | CFLAGS += -DHAVE_AUXTRACE_SUPPORT | ||
635 | endif | ||
626 | endif | 636 | endif |
627 | 637 | ||
628 | # Among the variables below, these: | 638 | # Among the variables below, these: |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) | |||
270 | int ret = 0; | 270 | int ret = 0; |
271 | 271 | ||
272 | if (module) { | 272 | if (module) { |
273 | list_for_each_entry(dso, &host_machine->dsos.head, node) { | 273 | char module_name[128]; |
274 | if (!dso->kernel) | 274 | |
275 | continue; | 275 | snprintf(module_name, sizeof(module_name), "[%s]", module); |
276 | if (strncmp(dso->short_name + 1, module, | 276 | map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); |
277 | dso->short_name_len - 2) == 0) | 277 | if (map) { |
278 | goto found; | 278 | dso = map->dso; |
279 | goto found; | ||
279 | } | 280 | } |
280 | pr_debug("Failed to find module %s.\n", module); | 281 | pr_debug("Failed to find module %s.\n", module); |
281 | return -ENOENT; | 282 | return -ENOENT; |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, | |||
1580 | file_offset = page_offset; | 1580 | file_offset = page_offset; |
1581 | head = data_offset - page_offset; | 1581 | head = data_offset - page_offset; |
1582 | 1582 | ||
1583 | if (data_size && (data_offset + data_size < file_size)) | 1583 | if (data_size == 0) |
1584 | goto out; | ||
1585 | |||
1586 | if (data_offset + data_size < file_size) | ||
1584 | file_size = data_offset + data_size; | 1587 | file_size = data_offset + data_size; |
1585 | 1588 | ||
1586 | ui_progress__init(&prog, file_size, "Processing events..."); | 1589 | ui_progress__init(&prog, file_size, "Processing events..."); |
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c | |||
@@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) | |||
196 | memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); | 196 | memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) | 199 | static int check_per_pkg(struct perf_evsel *counter, |
200 | struct perf_counts_values *vals, int cpu, bool *skip) | ||
200 | { | 201 | { |
201 | unsigned long *mask = counter->per_pkg_mask; | 202 | unsigned long *mask = counter->per_pkg_mask; |
202 | struct cpu_map *cpus = perf_evsel__cpus(counter); | 203 | struct cpu_map *cpus = perf_evsel__cpus(counter); |
@@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) | |||
218 | counter->per_pkg_mask = mask; | 219 | counter->per_pkg_mask = mask; |
219 | } | 220 | } |
220 | 221 | ||
222 | /* | ||
223 | * we do not consider an event that has not run as a good | ||
224 | * instance to mark a package as used (skip=1). Otherwise | ||
225 | * we may run into a situation where the first CPU in a package | ||
226 | * is not running anything, yet the second is, and this function | ||
227 | * would mark the package as used after the first CPU and would | ||
228 | * not read the values from the second CPU. | ||
229 | */ | ||
230 | if (!(vals->run && vals->ena)) | ||
231 | return 0; | ||
232 | |||
221 | s = cpu_map__get_socket(cpus, cpu); | 233 | s = cpu_map__get_socket(cpus, cpu); |
222 | if (s < 0) | 234 | if (s < 0) |
223 | return -1; | 235 | return -1; |
@@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel | |||
235 | static struct perf_counts_values zero; | 247 | static struct perf_counts_values zero; |
236 | bool skip = false; | 248 | bool skip = false; |
237 | 249 | ||
238 | if (check_per_pkg(evsel, cpu, &skip)) { | 250 | if (check_per_pkg(evsel, count, cpu, &skip)) { |
239 | pr_err("failed to read per-pkg counter\n"); | 251 | pr_err("failed to read per-pkg counter\n"); |
240 | return -1; | 252 | return -1; |
241 | } | 253 | } |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT | 40 | #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT |
41 | int elf_getphdrnum(Elf *elf, size_t *dst) | 41 | static int elf_getphdrnum(Elf *elf, size_t *dst) |
42 | { | 42 | { |
43 | GElf_Ehdr gehdr; | 43 | GElf_Ehdr gehdr; |
44 | GElf_Ehdr *ehdr; | 44 | GElf_Ehdr *ehdr; |
@@ -1271,8 +1271,6 @@ out_close: | |||
1271 | static int kcore__init(struct kcore *kcore, char *filename, int elfclass, | 1271 | static int kcore__init(struct kcore *kcore, char *filename, int elfclass, |
1272 | bool temp) | 1272 | bool temp) |
1273 | { | 1273 | { |
1274 | GElf_Ehdr *ehdr; | ||
1275 | |||
1276 | kcore->elfclass = elfclass; | 1274 | kcore->elfclass = elfclass; |
1277 | 1275 | ||
1278 | if (temp) | 1276 | if (temp) |
@@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, | |||
1289 | if (!gelf_newehdr(kcore->elf, elfclass)) | 1287 | if (!gelf_newehdr(kcore->elf, elfclass)) |
1290 | goto out_end; | 1288 | goto out_end; |
1291 | 1289 | ||
1292 | ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); | 1290 | memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); |
1293 | if (!ehdr) | ||
1294 | goto out_end; | ||
1295 | 1291 | ||
1296 | return 0; | 1292 | return 0; |
1297 | 1293 | ||
@@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) | |||
1348 | static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, | 1344 | static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, |
1349 | u64 addr, u64 len) | 1345 | u64 addr, u64 len) |
1350 | { | 1346 | { |
1351 | GElf_Phdr gphdr; | 1347 | GElf_Phdr phdr = { |
1352 | GElf_Phdr *phdr; | 1348 | .p_type = PT_LOAD, |
1353 | 1349 | .p_flags = PF_R | PF_W | PF_X, | |
1354 | phdr = gelf_getphdr(kcore->elf, idx, &gphdr); | 1350 | .p_offset = offset, |
1355 | if (!phdr) | 1351 | .p_vaddr = addr, |
1356 | return -1; | 1352 | .p_paddr = 0, |
1357 | 1353 | .p_filesz = len, | |
1358 | phdr->p_type = PT_LOAD; | 1354 | .p_memsz = len, |
1359 | phdr->p_flags = PF_R | PF_W | PF_X; | 1355 | .p_align = page_size, |
1360 | phdr->p_offset = offset; | 1356 | }; |
1361 | phdr->p_vaddr = addr; | 1357 | |
1362 | phdr->p_paddr = 0; | 1358 | if (!gelf_update_phdr(kcore->elf, idx, &phdr)) |
1363 | phdr->p_filesz = len; | ||
1364 | phdr->p_memsz = len; | ||
1365 | phdr->p_align = page_size; | ||
1366 | |||
1367 | if (!gelf_update_phdr(kcore->elf, idx, phdr)) | ||
1368 | return -1; | 1359 | return -1; |
1369 | 1360 | ||
1370 | return 0; | 1361 | return 0; |
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -709,7 +709,7 @@ bool find_process(const char *name) | |||
709 | 709 | ||
710 | dir = opendir(procfs__mountpoint()); | 710 | dir = opendir(procfs__mountpoint()); |
711 | if (!dir) | 711 | if (!dir) |
712 | return -1; | 712 | return false; |
713 | 713 | ||
714 | /* Walk through the directory. */ | 714 | /* Walk through the directory. */ |
715 | while (ret && (d = readdir(dir)) != NULL) { | 715 | while (ret && (d = readdir(dir)) != NULL) { |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; | |||
71 | unsigned int extra_msr_offset64; | 71 | unsigned int extra_msr_offset64; |
72 | unsigned int extra_delta_offset32; | 72 | unsigned int extra_delta_offset32; |
73 | unsigned int extra_delta_offset64; | 73 | unsigned int extra_delta_offset64; |
74 | unsigned int aperf_mperf_multiplier = 1; | ||
74 | int do_smi; | 75 | int do_smi; |
75 | double bclk; | 76 | double bclk; |
77 | double base_hz; | ||
78 | double tsc_tweak = 1.0; | ||
76 | unsigned int show_pkg; | 79 | unsigned int show_pkg; |
77 | unsigned int show_core; | 80 | unsigned int show_core; |
78 | unsigned int show_cpu; | 81 | unsigned int show_cpu; |
@@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
502 | /* %Busy */ | 505 | /* %Busy */ |
503 | if (has_aperf) { | 506 | if (has_aperf) { |
504 | if (!skip_c0) | 507 | if (!skip_c0) |
505 | outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); | 508 | outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); |
506 | else | 509 | else |
507 | outp += sprintf(outp, "********"); | 510 | outp += sprintf(outp, "********"); |
508 | } | 511 | } |
@@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, | |||
510 | /* Bzy_MHz */ | 513 | /* Bzy_MHz */ |
511 | if (has_aperf) | 514 | if (has_aperf) |
512 | outp += sprintf(outp, "%8.0f", | 515 | outp += sprintf(outp, "%8.0f", |
513 | 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); | 516 | 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); |
514 | 517 | ||
515 | /* TSC_MHz */ | 518 | /* TSC_MHz */ |
516 | outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); | 519 | outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); |
@@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) | |||
984 | return -3; | 987 | return -3; |
985 | if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) | 988 | if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) |
986 | return -4; | 989 | return -4; |
990 | t->aperf = t->aperf * aperf_mperf_multiplier; | ||
991 | t->mperf = t->mperf * aperf_mperf_multiplier; | ||
987 | } | 992 | } |
988 | 993 | ||
989 | if (do_smi) { | 994 | if (do_smi) { |
@@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, | |||
1149 | int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; | 1154 | int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; |
1150 | int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; | 1155 | int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; |
1151 | 1156 | ||
1157 | |||
1158 | static void | ||
1159 | calculate_tsc_tweak() | ||
1160 | { | ||
1161 | unsigned long long msr; | ||
1162 | unsigned int base_ratio; | ||
1163 | |||
1164 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | ||
1165 | base_ratio = (msr >> 8) & 0xFF; | ||
1166 | base_hz = base_ratio * bclk * 1000000; | ||
1167 | tsc_tweak = base_hz / tsc_hz; | ||
1168 | } | ||
1169 | |||
1152 | static void | 1170 | static void |
1153 | dump_nhm_platform_info(void) | 1171 | dump_nhm_platform_info(void) |
1154 | { | 1172 | { |
@@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) | |||
1926 | 1944 | ||
1927 | switch (model) { | 1945 | switch (model) { |
1928 | case 0x3A: /* IVB */ | 1946 | case 0x3A: /* IVB */ |
1929 | case 0x3E: /* IVB Xeon */ | ||
1930 | |||
1931 | case 0x3C: /* HSW */ | 1947 | case 0x3C: /* HSW */ |
1932 | case 0x3F: /* HSX */ | 1948 | case 0x3F: /* HSX */ |
1933 | case 0x45: /* HSW */ | 1949 | case 0x45: /* HSW */ |
@@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) | |||
2543 | return 0; | 2559 | return 0; |
2544 | } | 2560 | } |
2545 | 2561 | ||
2562 | unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) | ||
2563 | { | ||
2564 | if (is_knl(family, model)) | ||
2565 | return 1024; | ||
2566 | return 1; | ||
2567 | } | ||
2568 | |||
2546 | #define SLM_BCLK_FREQS 5 | 2569 | #define SLM_BCLK_FREQS 5 |
2547 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; | 2570 | double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; |
2548 | 2571 | ||
@@ -2744,6 +2767,9 @@ void process_cpuid() | |||
2744 | } | 2767 | } |
2745 | } | 2768 | } |
2746 | 2769 | ||
2770 | if (has_aperf) | ||
2771 | aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); | ||
2772 | |||
2747 | do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); | 2773 | do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); |
2748 | do_snb_cstates = has_snb_msrs(family, model); | 2774 | do_snb_cstates = has_snb_msrs(family, model); |
2749 | do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); | 2775 | do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); |
@@ -2762,6 +2788,9 @@ void process_cpuid() | |||
2762 | if (debug) | 2788 | if (debug) |
2763 | dump_cstate_pstate_config_info(); | 2789 | dump_cstate_pstate_config_info(); |
2764 | 2790 | ||
2791 | if (has_skl_msrs(family, model)) | ||
2792 | calculate_tsc_tweak(); | ||
2793 | |||
2765 | return; | 2794 | return; |
2766 | } | 2795 | } |
2767 | 2796 | ||
@@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) | |||
3090 | } | 3119 | } |
3091 | 3120 | ||
3092 | void print_version() { | 3121 | void print_version() { |
3093 | fprintf(stderr, "turbostat version 4.7 17-June, 2015" | 3122 | fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" |
3094 | " - Len Brown <lenb@kernel.org>\n"); | 3123 | " - Len Brown <lenb@kernel.org>\n"); |
3095 | } | 3124 | } |
3096 | 3125 | ||