diff options
161 files changed, 1414 insertions, 680 deletions
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst index 4b3825da48d9..82b6dbbd31cd 100644 --- a/Documentation/driver-api/fpga/fpga-mgr.rst +++ b/Documentation/driver-api/fpga/fpga-mgr.rst | |||
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver | |||
184 | API for programming an FPGA | 184 | API for programming an FPGA |
185 | --------------------------- | 185 | --------------------------- |
186 | 186 | ||
187 | FPGA Manager flags | ||
188 | |||
189 | .. kernel-doc:: include/linux/fpga/fpga-mgr.h | ||
190 | :doc: FPGA Manager flags | ||
191 | |||
187 | .. kernel-doc:: include/linux/fpga/fpga-mgr.h | 192 | .. kernel-doc:: include/linux/fpga/fpga-mgr.h |
188 | :functions: fpga_image_info | 193 | :functions: fpga_image_info |
189 | 194 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index fe223e606b9d..6d5161def3f3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9680,7 +9680,8 @@ MIPS/LOONGSON2 ARCHITECTURE | |||
9680 | M: Jiaxun Yang <jiaxun.yang@flygoat.com> | 9680 | M: Jiaxun Yang <jiaxun.yang@flygoat.com> |
9681 | L: linux-mips@linux-mips.org | 9681 | L: linux-mips@linux-mips.org |
9682 | S: Maintained | 9682 | S: Maintained |
9683 | F: arch/mips/loongson64/*{2e/2f}* | 9683 | F: arch/mips/loongson64/fuloong-2e/ |
9684 | F: arch/mips/loongson64/lemote-2f/ | ||
9684 | F: arch/mips/include/asm/mach-loongson64/ | 9685 | F: arch/mips/include/asm/mach-loongson64/ |
9685 | F: drivers/*/*loongson2* | 9686 | F: drivers/*/*loongson2* |
9686 | F: drivers/*/*/*loongson2* | 9687 | F: drivers/*/*/*loongson2* |
@@ -9887,7 +9888,7 @@ M: Peter Rosin <peda@axentia.se> | |||
9887 | S: Maintained | 9888 | S: Maintained |
9888 | F: Documentation/ABI/testing/sysfs-class-mux* | 9889 | F: Documentation/ABI/testing/sysfs-class-mux* |
9889 | F: Documentation/devicetree/bindings/mux/ | 9890 | F: Documentation/devicetree/bindings/mux/ |
9890 | F: include/linux/dt-bindings/mux/ | 9891 | F: include/dt-bindings/mux/ |
9891 | F: include/linux/mux/ | 9892 | F: include/linux/mux/ |
9892 | F: drivers/mux/ | 9893 | F: drivers/mux/ |
9893 | 9894 | ||
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc6 | 5 | EXTRAVERSION = -rc7 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -483,13 +483,15 @@ endif | |||
483 | ifeq ($(cc-name),clang) | 483 | ifeq ($(cc-name),clang) |
484 | ifneq ($(CROSS_COMPILE),) | 484 | ifneq ($(CROSS_COMPILE),) |
485 | CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) | 485 | CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) |
486 | GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) | 486 | GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) |
487 | CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR) | ||
488 | GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) | ||
487 | endif | 489 | endif |
488 | ifneq ($(GCC_TOOLCHAIN),) | 490 | ifneq ($(GCC_TOOLCHAIN),) |
489 | CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) | 491 | CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) |
490 | endif | 492 | endif |
491 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 493 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) |
492 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 494 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX) |
493 | KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) | 495 | KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) |
494 | KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) | 496 | KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) |
495 | endif | 497 | endif |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index b4441b0764d7..a045f3086047 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -149,7 +149,7 @@ config ARC_CPU_770 | |||
149 | Support for ARC770 core introduced with Rel 4.10 (Summer 2011) | 149 | Support for ARC770 core introduced with Rel 4.10 (Summer 2011) |
150 | This core has a bunch of cool new features: | 150 | This core has a bunch of cool new features: |
151 | -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) | 151 | -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) |
152 | Shared Address Spaces (for sharing TLB entires in MMU) | 152 | Shared Address Spaces (for sharing TLB entries in MMU) |
153 | -Caches: New Prog Model, Region Flush | 153 | -Caches: New Prog Model, Region Flush |
154 | -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr | 154 | -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr |
155 | 155 | ||
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 99cce77ab98f..644815c0516e 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
@@ -6,33 +6,11 @@ | |||
6 | # published by the Free Software Foundation. | 6 | # published by the Free Software Foundation. |
7 | # | 7 | # |
8 | 8 | ||
9 | ifeq ($(CROSS_COMPILE),) | ||
10 | ifndef CONFIG_CPU_BIG_ENDIAN | ||
11 | CROSS_COMPILE := arc-linux- | ||
12 | else | ||
13 | CROSS_COMPILE := arceb-linux- | ||
14 | endif | ||
15 | endif | ||
16 | |||
17 | KBUILD_DEFCONFIG := nsim_700_defconfig | 9 | KBUILD_DEFCONFIG := nsim_700_defconfig |
18 | 10 | ||
19 | cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ | 11 | cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ |
20 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 | 12 | cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 |
21 | cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs | 13 | cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 |
22 | |||
23 | is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0) | ||
24 | |||
25 | ifdef CONFIG_ISA_ARCOMPACT | ||
26 | ifeq ($(is_700), 0) | ||
27 | $(error Toolchain not configured for ARCompact builds) | ||
28 | endif | ||
29 | endif | ||
30 | |||
31 | ifdef CONFIG_ISA_ARCV2 | ||
32 | ifeq ($(is_700), 1) | ||
33 | $(error Toolchain not configured for ARCv2 builds) | ||
34 | endif | ||
35 | endif | ||
36 | 14 | ||
37 | ifdef CONFIG_ARC_CURR_IN_REG | 15 | ifdef CONFIG_ARC_CURR_IN_REG |
38 | # For a global register defintion, make sure it gets passed to every file | 16 | # For a global register defintion, make sure it gets passed to every file |
@@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp | |||
79 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian | 57 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian |
80 | ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB | 58 | ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB |
81 | 59 | ||
82 | LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) | 60 | LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name) |
83 | 61 | ||
84 | # Modules with short calls might break for calls into builtin-kernel | 62 | # Modules with short calls might break for calls into builtin-kernel |
85 | KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode | 63 | KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode |
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 4674541eba3f..8ce6e7235915 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c | |||
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags, | |||
241 | task_thread_info(current)->thr_ptr; | 241 | task_thread_info(current)->thr_ptr; |
242 | } | 242 | } |
243 | 243 | ||
244 | |||
245 | /* | ||
246 | * setup usermode thread pointer #1: | ||
247 | * when child is picked by scheduler, __switch_to() uses @c_callee to | ||
248 | * populate usermode callee regs: this works (despite being in a kernel | ||
249 | * function) since special return path for child @ret_from_fork() | ||
250 | * ensures those regs are not clobbered all the way to RTIE to usermode | ||
251 | */ | ||
252 | c_callee->r25 = task_thread_info(p)->thr_ptr; | ||
253 | |||
254 | #ifdef CONFIG_ARC_CURR_IN_REG | ||
255 | /* | ||
256 | * setup usermode thread pointer #2: | ||
257 | * however for this special use of r25 in kernel, __switch_to() sets | ||
258 | * r25 for kernel needs and only in the final return path is usermode | ||
259 | * r25 setup, from pt_regs->user_r25. So set that up as well | ||
260 | */ | ||
261 | c_regs->user_r25 = c_callee->r25; | ||
262 | #endif | ||
263 | |||
244 | return 0; | 264 | return 0; |
245 | } | 265 | } |
246 | 266 | ||
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index 7423d462d1e4..50dde84b72ed 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi | |||
@@ -123,6 +123,17 @@ | |||
123 | }; | 123 | }; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | &cpu0 { | ||
127 | /* CPU rated to 1GHz, not 1.2GHz as per the default settings */ | ||
128 | operating-points = < | ||
129 | /* kHz uV */ | ||
130 | 166666 850000 | ||
131 | 400000 900000 | ||
132 | 800000 1050000 | ||
133 | 1000000 1200000 | ||
134 | >; | ||
135 | }; | ||
136 | |||
126 | &esdhc1 { | 137 | &esdhc1 { |
127 | pinctrl-names = "default"; | 138 | pinctrl-names = "default"; |
128 | pinctrl-0 = <&pinctrl_esdhc1>; | 139 | pinctrl-0 = <&pinctrl_esdhc1>; |
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h index ae5fdff18406..8247bc15addc 100644 --- a/arch/arm/kernel/vmlinux.lds.h +++ b/arch/arm/kernel/vmlinux.lds.h | |||
@@ -49,6 +49,8 @@ | |||
49 | #define ARM_DISCARD \ | 49 | #define ARM_DISCARD \ |
50 | *(.ARM.exidx.exit.text) \ | 50 | *(.ARM.exidx.exit.text) \ |
51 | *(.ARM.extab.exit.text) \ | 51 | *(.ARM.extab.exit.text) \ |
52 | *(.ARM.exidx.text.exit) \ | ||
53 | *(.ARM.extab.text.exit) \ | ||
52 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ | 54 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ |
53 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ | 55 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ |
54 | ARM_EXIT_DISCARD(EXIT_TEXT) \ | 56 | ARM_EXIT_DISCARD(EXIT_TEXT) \ |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index b2fa62922d88..49d6046ca1d0 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/atomic.h> | 14 | #include <linux/atomic.h> |
15 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
16 | #include <linux/sizes.h> | ||
16 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
17 | 18 | ||
18 | #include <asm/cachectl.h> | 19 | #include <asm/cachectl.h> |
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count; | |||
80 | 81 | ||
81 | #endif | 82 | #endif |
82 | 83 | ||
83 | /* | 84 | #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M) |
84 | * One page above the stack is used for branch delay slot "emulation". | 85 | |
85 | * See dsemul.c for details. | 86 | extern unsigned long mips_stack_top(void); |
86 | */ | 87 | #define STACK_TOP mips_stack_top() |
87 | #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE) | ||
88 | 88 | ||
89 | /* | 89 | /* |
90 | * This decides where the kernel will search for a free chunk of vm | 90 | * This decides where the kernel will search for a free chunk of vm |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 8fc69891e117..d4f7fd4550e1 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/nmi.h> | 32 | #include <linux/nmi.h> |
33 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | 34 | ||
35 | #include <asm/abi.h> | ||
35 | #include <asm/asm.h> | 36 | #include <asm/asm.h> |
36 | #include <asm/bootinfo.h> | 37 | #include <asm/bootinfo.h> |
37 | #include <asm/cpu.h> | 38 | #include <asm/cpu.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/dsp.h> | 40 | #include <asm/dsp.h> |
40 | #include <asm/fpu.h> | 41 | #include <asm/fpu.h> |
41 | #include <asm/irq.h> | 42 | #include <asm/irq.h> |
43 | #include <asm/mips-cps.h> | ||
42 | #include <asm/msa.h> | 44 | #include <asm/msa.h> |
43 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
44 | #include <asm/mipsregs.h> | 46 | #include <asm/mipsregs.h> |
@@ -645,6 +647,29 @@ out: | |||
645 | return pc; | 647 | return pc; |
646 | } | 648 | } |
647 | 649 | ||
650 | unsigned long mips_stack_top(void) | ||
651 | { | ||
652 | unsigned long top = TASK_SIZE & PAGE_MASK; | ||
653 | |||
654 | /* One page for branch delay slot "emulation" */ | ||
655 | top -= PAGE_SIZE; | ||
656 | |||
657 | /* Space for the VDSO, data page & GIC user page */ | ||
658 | top -= PAGE_ALIGN(current->thread.abi->vdso->size); | ||
659 | top -= PAGE_SIZE; | ||
660 | top -= mips_gic_present() ? PAGE_SIZE : 0; | ||
661 | |||
662 | /* Space for cache colour alignment */ | ||
663 | if (cpu_has_dc_aliases) | ||
664 | top -= shm_align_mask + 1; | ||
665 | |||
666 | /* Space to randomize the VDSO base */ | ||
667 | if (current->flags & PF_RANDOMIZE) | ||
668 | top -= VDSO_RANDOMIZE_SIZE; | ||
669 | |||
670 | return top; | ||
671 | } | ||
672 | |||
648 | /* | 673 | /* |
649 | * Don't forget that the stack pointer must be aligned on a 8 bytes | 674 | * Don't forget that the stack pointer must be aligned on a 8 bytes |
650 | * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. | 675 | * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index c71d1eb7da59..8aaaa42f91ed 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p) | |||
846 | struct memblock_region *reg; | 846 | struct memblock_region *reg; |
847 | extern void plat_mem_setup(void); | 847 | extern void plat_mem_setup(void); |
848 | 848 | ||
849 | /* | ||
850 | * Initialize boot_command_line to an innocuous but non-empty string in | ||
851 | * order to prevent early_init_dt_scan_chosen() from copying | ||
852 | * CONFIG_CMDLINE into it without our knowledge. We handle | ||
853 | * CONFIG_CMDLINE ourselves below & don't want to duplicate its | ||
854 | * content because repeating arguments can be problematic. | ||
855 | */ | ||
856 | strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE); | ||
857 | |||
858 | /* call board setup routine */ | ||
859 | plat_mem_setup(); | ||
860 | |||
861 | /* | ||
862 | * Make sure all kernel memory is in the maps. The "UP" and | ||
863 | * "DOWN" are opposite for initdata since if it crosses over | ||
864 | * into another memory section you don't want that to be | ||
865 | * freed when the initdata is freed. | ||
866 | */ | ||
867 | arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, | ||
868 | PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, | ||
869 | BOOT_MEM_RAM); | ||
870 | arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, | ||
871 | PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, | ||
872 | BOOT_MEM_INIT_RAM); | ||
873 | |||
874 | pr_info("Determined physical RAM map:\n"); | ||
875 | print_memory_map(); | ||
876 | |||
849 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | 877 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) |
850 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | 878 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
851 | #else | 879 | #else |
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p) | |||
873 | } | 901 | } |
874 | #endif | 902 | #endif |
875 | #endif | 903 | #endif |
876 | |||
877 | /* call board setup routine */ | ||
878 | plat_mem_setup(); | ||
879 | |||
880 | /* | ||
881 | * Make sure all kernel memory is in the maps. The "UP" and | ||
882 | * "DOWN" are opposite for initdata since if it crosses over | ||
883 | * into another memory section you don't want that to be | ||
884 | * freed when the initdata is freed. | ||
885 | */ | ||
886 | arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT, | ||
887 | PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT, | ||
888 | BOOT_MEM_RAM); | ||
889 | arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT, | ||
890 | PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT, | ||
891 | BOOT_MEM_INIT_RAM); | ||
892 | |||
893 | pr_info("Determined physical RAM map:\n"); | ||
894 | print_memory_map(); | ||
895 | |||
896 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | 904 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); |
897 | 905 | ||
898 | *cmdline_p = command_line; | 906 | *cmdline_p = command_line; |
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 8f845f6e5f42..48a9c6b90e07 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/ioport.h> | 15 | #include <linux/ioport.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/random.h> | ||
18 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
20 | #include <linux/timekeeper_internal.h> | 21 | #include <linux/timekeeper_internal.h> |
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void) | |||
97 | } | 98 | } |
98 | } | 99 | } |
99 | 100 | ||
101 | static unsigned long vdso_base(void) | ||
102 | { | ||
103 | unsigned long base; | ||
104 | |||
105 | /* Skip the delay slot emulation page */ | ||
106 | base = STACK_TOP + PAGE_SIZE; | ||
107 | |||
108 | if (current->flags & PF_RANDOMIZE) { | ||
109 | base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); | ||
110 | base = PAGE_ALIGN(base); | ||
111 | } | ||
112 | |||
113 | return base; | ||
114 | } | ||
115 | |||
100 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | 116 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
101 | { | 117 | { |
102 | struct mips_vdso_image *image = current->thread.abi->vdso; | 118 | struct mips_vdso_image *image = current->thread.abi->vdso; |
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
137 | if (cpu_has_dc_aliases) | 153 | if (cpu_has_dc_aliases) |
138 | size += shm_align_mask + 1; | 154 | size += shm_align_mask + 1; |
139 | 155 | ||
140 | base = get_unmapped_area(NULL, 0, size, 0, 0); | 156 | base = get_unmapped_area(NULL, vdso_base(), size, 0, 0); |
141 | if (IS_ERR_VALUE(base)) { | 157 | if (IS_ERR_VALUE(base)) { |
142 | ret = base; | 158 | ret = base; |
143 | goto out; | 159 | goto out; |
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 3a6f34ef5ffc..069acec3df9f 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S | |||
@@ -280,9 +280,11 @@ | |||
280 | * unset_bytes = end_addr - current_addr + 1 | 280 | * unset_bytes = end_addr - current_addr + 1 |
281 | * a2 = t1 - a0 + 1 | 281 | * a2 = t1 - a0 + 1 |
282 | */ | 282 | */ |
283 | .set reorder | ||
283 | PTR_SUBU a2, t1, a0 | 284 | PTR_SUBU a2, t1, a0 |
285 | PTR_ADDIU a2, 1 | ||
284 | jr ra | 286 | jr ra |
285 | PTR_ADDIU a2, 1 | 287 | .set noreorder |
286 | 288 | ||
287 | .endm | 289 | .endm |
288 | 290 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 913c5725cdb2..bb6ac471a784 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs) | |||
1306 | 1306 | ||
1307 | pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); | 1307 | pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); |
1308 | 1308 | ||
1309 | /* | ||
1310 | * Make sure the NIP points at userspace, not kernel text/data or | ||
1311 | * elsewhere. | ||
1312 | */ | ||
1313 | if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) { | ||
1314 | pr_info("%s[%d]: Bad NIP, not dumping instructions.\n", | ||
1315 | current->comm, current->pid); | ||
1316 | return; | ||
1317 | } | ||
1318 | |||
1309 | pr_info("%s[%d]: code: ", current->comm, current->pid); | 1319 | pr_info("%s[%d]: code: ", current->comm, current->pid); |
1310 | 1320 | ||
1311 | for (i = 0; i < instructions_to_print; i++) { | 1321 | for (i = 0; i < instructions_to_print; i++) { |
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 6ae2777c220d..5ffee298745f 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c | |||
@@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, | |||
28 | { | 28 | { |
29 | int err; | 29 | int err; |
30 | 30 | ||
31 | /* Make sure we aren't patching a freed init section */ | ||
32 | if (init_mem_is_free && init_section_contains(exec_addr, 4)) { | ||
33 | pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | __put_user_size(instr, patch_addr, 4, err); | 31 | __put_user_size(instr, patch_addr, 4, err); |
38 | if (err) | 32 | if (err) |
39 | return err; | 33 | return err; |
@@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr) | |||
148 | return 0; | 142 | return 0; |
149 | } | 143 | } |
150 | 144 | ||
151 | int patch_instruction(unsigned int *addr, unsigned int instr) | 145 | static int do_patch_instruction(unsigned int *addr, unsigned int instr) |
152 | { | 146 | { |
153 | int err; | 147 | int err; |
154 | unsigned int *patch_addr = NULL; | 148 | unsigned int *patch_addr = NULL; |
@@ -188,12 +182,22 @@ out: | |||
188 | } | 182 | } |
189 | #else /* !CONFIG_STRICT_KERNEL_RWX */ | 183 | #else /* !CONFIG_STRICT_KERNEL_RWX */ |
190 | 184 | ||
191 | int patch_instruction(unsigned int *addr, unsigned int instr) | 185 | static int do_patch_instruction(unsigned int *addr, unsigned int instr) |
192 | { | 186 | { |
193 | return raw_patch_instruction(addr, instr); | 187 | return raw_patch_instruction(addr, instr); |
194 | } | 188 | } |
195 | 189 | ||
196 | #endif /* CONFIG_STRICT_KERNEL_RWX */ | 190 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
191 | |||
192 | int patch_instruction(unsigned int *addr, unsigned int instr) | ||
193 | { | ||
194 | /* Make sure we aren't patching a freed init section */ | ||
195 | if (init_mem_is_free && init_section_contains(addr, 4)) { | ||
196 | pr_debug("Skipping init section patching addr: 0x%px\n", addr); | ||
197 | return 0; | ||
198 | } | ||
199 | return do_patch_instruction(addr, instr); | ||
200 | } | ||
197 | NOKPROBE_SYMBOL(patch_instruction); | 201 | NOKPROBE_SYMBOL(patch_instruction); |
198 | 202 | ||
199 | int patch_branch(unsigned int *addr, unsigned long target, int flags) | 203 | int patch_branch(unsigned int *addr, unsigned long target, int flags) |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 59d07bd5374a..055b211b7126 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu) | |||
1217 | * Need to ensure that NODE_DATA is initialized for a node from | 1217 | * Need to ensure that NODE_DATA is initialized for a node from |
1218 | * available memory (see memblock_alloc_try_nid). If unable to | 1218 | * available memory (see memblock_alloc_try_nid). If unable to |
1219 | * init the node, then default to nearest node that has memory | 1219 | * init the node, then default to nearest node that has memory |
1220 | * installed. | 1220 | * installed. Skip onlining a node if the subsystems are not |
1221 | * yet initialized. | ||
1221 | */ | 1222 | */ |
1222 | if (try_online_node(new_nid)) | 1223 | if (!topology_inited || try_online_node(new_nid)) |
1223 | new_nid = first_online_node; | 1224 | new_nid = first_online_node; |
1224 | #else | 1225 | #else |
1225 | /* | 1226 | /* |
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 3cae9168f63c..e44a8d7959f5 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h | |||
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info); | |||
108 | void sclp_early_get_ipl_info(struct sclp_ipl_info *info); | 108 | void sclp_early_get_ipl_info(struct sclp_ipl_info *info); |
109 | void sclp_early_detect(void); | 109 | void sclp_early_detect(void); |
110 | void sclp_early_printk(const char *s); | 110 | void sclp_early_printk(const char *s); |
111 | void __sclp_early_printk(const char *s, unsigned int len); | 111 | void sclp_early_printk_force(const char *s); |
112 | void __sclp_early_printk(const char *s, unsigned int len, unsigned int force); | ||
112 | 113 | ||
113 | int _sclp_get_core_info(struct sclp_core_info *info); | 114 | int _sclp_get_core_info(struct sclp_core_info *info); |
114 | int sclp_core_configure(u8 core); | 115 | int sclp_core_configure(u8 core); |
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c index 9431784d7796..40c1dfec944e 100644 --- a/arch/s390/kernel/early_printk.c +++ b/arch/s390/kernel/early_printk.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | static void sclp_early_write(struct console *con, const char *s, unsigned int len) | 11 | static void sclp_early_write(struct console *con, const char *s, unsigned int len) |
12 | { | 12 | { |
13 | __sclp_early_printk(s, len); | 13 | __sclp_early_printk(s, len, 0); |
14 | } | 14 | } |
15 | 15 | ||
16 | static struct console sclp_early_console = { | 16 | static struct console sclp_early_console = { |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index a049a7b9d6e8..c1a080b11ae9 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -198,12 +198,10 @@ pgm_check_entry: | |||
198 | 198 | ||
199 | /* Suspend CPU not available -> panic */ | 199 | /* Suspend CPU not available -> panic */ |
200 | larl %r15,init_thread_union | 200 | larl %r15,init_thread_union |
201 | ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) | 201 | aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) |
202 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
202 | larl %r2,.Lpanic_string | 203 | larl %r2,.Lpanic_string |
203 | lghi %r1,0 | 204 | brasl %r14,sclp_early_printk_force |
204 | sam31 | ||
205 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | ||
206 | brasl %r14,sclp_early_printk | ||
207 | larl %r3,.Ldisabled_wait_31 | 205 | larl %r3,.Ldisabled_wait_31 |
208 | lpsw 0(%r3) | 206 | lpsw 0(%r3) |
209 | 4: | 207 | 4: |
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 4e8f56c3793c..cc42225c20f3 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c | |||
@@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev) | |||
115 | auxio_devtype = AUXIO_TYPE_SBUS; | 115 | auxio_devtype = AUXIO_TYPE_SBUS; |
116 | size = 1; | 116 | size = 1; |
117 | } else { | 117 | } else { |
118 | printk("auxio: Unknown parent bus type [%s]\n", | 118 | printk("auxio: Unknown parent bus type [%pOFn]\n", |
119 | dp->parent->name); | 119 | dp->parent); |
120 | return -ENODEV; | 120 | return -ENODEV; |
121 | } | 121 | } |
122 | auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); | 122 | auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); |
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 5868fc333ea8..639c8e54530a 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c | |||
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
122 | linux_regs->pc = addr; | 122 | linux_regs->pc = addr; |
123 | linux_regs->npc = addr + 4; | 123 | linux_regs->npc = addr + 4; |
124 | } | 124 | } |
125 | /* fallthru */ | 125 | /* fall through */ |
126 | 126 | ||
127 | case 'D': | 127 | case 'D': |
128 | case 'k': | 128 | case 'k': |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index d5f7dc6323d5..a68bbddbdba4 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
148 | linux_regs->tpc = addr; | 148 | linux_regs->tpc = addr; |
149 | linux_regs->tnpc = addr + 4; | 149 | linux_regs->tnpc = addr + 4; |
150 | } | 150 | } |
151 | /* fallthru */ | 151 | /* fall through */ |
152 | 152 | ||
153 | case 'D': | 153 | case 'D': |
154 | case 'k': | 154 | case 'k': |
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 92627abce311..d941875dd718 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c | |||
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op) | |||
41 | 41 | ||
42 | power_reg = of_ioremap(res, 0, 0x4, "power"); | 42 | power_reg = of_ioremap(res, 0, 0x4, "power"); |
43 | 43 | ||
44 | printk(KERN_INFO "%s: Control reg at %llx\n", | 44 | printk(KERN_INFO "%pOFn: Control reg at %llx\n", |
45 | op->dev.of_node->name, res->start); | 45 | op->dev.of_node, res->start); |
46 | 46 | ||
47 | if (has_button_interrupt(irq, op->dev.of_node)) { | 47 | if (has_button_interrupt(irq, op->dev.of_node)) { |
48 | if (request_irq(irq, | 48 | if (request_irq(irq, |
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index b51cbb9e87dc..17c87d29ff20 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c | |||
@@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf) | |||
68 | return; | 68 | return; |
69 | 69 | ||
70 | regs = rprop->value; | 70 | regs = rprop->value; |
71 | sprintf(tmp_buf, "%s@%x,%x", | 71 | sprintf(tmp_buf, "%pOFn@%x,%x", |
72 | dp->name, | 72 | dp, |
73 | regs->which_io, regs->phys_addr); | 73 | regs->which_io, regs->phys_addr); |
74 | } | 74 | } |
75 | 75 | ||
@@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) | |||
84 | return; | 84 | return; |
85 | 85 | ||
86 | regs = prop->value; | 86 | regs = prop->value; |
87 | sprintf(tmp_buf, "%s@%x,%x", | 87 | sprintf(tmp_buf, "%pOFn@%x,%x", |
88 | dp->name, | 88 | dp, |
89 | regs->which_io, | 89 | regs->which_io, |
90 | regs->phys_addr); | 90 | regs->phys_addr); |
91 | } | 91 | } |
@@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) | |||
104 | regs = prop->value; | 104 | regs = prop->value; |
105 | devfn = (regs->phys_hi >> 8) & 0xff; | 105 | devfn = (regs->phys_hi >> 8) & 0xff; |
106 | if (devfn & 0x07) { | 106 | if (devfn & 0x07) { |
107 | sprintf(tmp_buf, "%s@%x,%x", | 107 | sprintf(tmp_buf, "%pOFn@%x,%x", |
108 | dp->name, | 108 | dp, |
109 | devfn >> 3, | 109 | devfn >> 3, |
110 | devfn & 0x07); | 110 | devfn & 0x07); |
111 | } else { | 111 | } else { |
112 | sprintf(tmp_buf, "%s@%x", | 112 | sprintf(tmp_buf, "%pOFn@%x", |
113 | dp->name, | 113 | dp, |
114 | devfn >> 3); | 114 | devfn >> 3); |
115 | } | 115 | } |
116 | } | 116 | } |
@@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | |||
127 | 127 | ||
128 | regs = prop->value; | 128 | regs = prop->value; |
129 | 129 | ||
130 | sprintf(tmp_buf, "%s@%x,%x", | 130 | sprintf(tmp_buf, "%pOFn@%x,%x", |
131 | dp->name, | 131 | dp, |
132 | regs->which_io, regs->phys_addr); | 132 | regs->which_io, regs->phys_addr); |
133 | } | 133 | } |
134 | 134 | ||
@@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) | |||
167 | return; | 167 | return; |
168 | device = prop->value; | 168 | device = prop->value; |
169 | 169 | ||
170 | sprintf(tmp_buf, "%s:%d:%d@%x,%x", | 170 | sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x", |
171 | dp->name, *vendor, *device, | 171 | dp, *vendor, *device, |
172 | *intr, reg0); | 172 | *intr, reg0); |
173 | } | 173 | } |
174 | 174 | ||
@@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp) | |||
201 | tmp_buf[0] = '\0'; | 201 | tmp_buf[0] = '\0'; |
202 | __build_path_component(dp, tmp_buf); | 202 | __build_path_component(dp, tmp_buf); |
203 | if (tmp_buf[0] == '\0') | 203 | if (tmp_buf[0] == '\0') |
204 | strcpy(tmp_buf, dp->name); | 204 | snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); |
205 | 205 | ||
206 | n = prom_early_alloc(strlen(tmp_buf) + 1); | 206 | n = prom_early_alloc(strlen(tmp_buf) + 1); |
207 | strcpy(n, tmp_buf); | 207 | strcpy(n, tmp_buf); |
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index baeaeed64993..6220411ce8fc 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c | |||
@@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) | |||
82 | 82 | ||
83 | regs = rprop->value; | 83 | regs = rprop->value; |
84 | if (!of_node_is_root(dp->parent)) { | 84 | if (!of_node_is_root(dp->parent)) { |
85 | sprintf(tmp_buf, "%s@%x,%x", | 85 | sprintf(tmp_buf, "%pOFn@%x,%x", |
86 | dp->name, | 86 | dp, |
87 | (unsigned int) (regs->phys_addr >> 32UL), | 87 | (unsigned int) (regs->phys_addr >> 32UL), |
88 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 88 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
89 | return; | 89 | return; |
@@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) | |||
97 | const char *prefix = (type == 0) ? "m" : "i"; | 97 | const char *prefix = (type == 0) ? "m" : "i"; |
98 | 98 | ||
99 | if (low_bits) | 99 | if (low_bits) |
100 | sprintf(tmp_buf, "%s@%s%x,%x", | 100 | sprintf(tmp_buf, "%pOFn@%s%x,%x", |
101 | dp->name, prefix, | 101 | dp, prefix, |
102 | high_bits, low_bits); | 102 | high_bits, low_bits); |
103 | else | 103 | else |
104 | sprintf(tmp_buf, "%s@%s%x", | 104 | sprintf(tmp_buf, "%pOFn@%s%x", |
105 | dp->name, | 105 | dp, |
106 | prefix, | 106 | prefix, |
107 | high_bits); | 107 | high_bits); |
108 | } else if (type == 12) { | 108 | } else if (type == 12) { |
109 | sprintf(tmp_buf, "%s@%x", | 109 | sprintf(tmp_buf, "%pOFn@%x", |
110 | dp->name, high_bits); | 110 | dp, high_bits); |
111 | } | 111 | } |
112 | } | 112 | } |
113 | 113 | ||
@@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) | |||
122 | 122 | ||
123 | regs = prop->value; | 123 | regs = prop->value; |
124 | if (!of_node_is_root(dp->parent)) { | 124 | if (!of_node_is_root(dp->parent)) { |
125 | sprintf(tmp_buf, "%s@%x,%x", | 125 | sprintf(tmp_buf, "%pOFn@%x,%x", |
126 | dp->name, | 126 | dp, |
127 | (unsigned int) (regs->phys_addr >> 32UL), | 127 | (unsigned int) (regs->phys_addr >> 32UL), |
128 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 128 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
129 | return; | 129 | return; |
@@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) | |||
138 | if (tlb_type >= cheetah) | 138 | if (tlb_type >= cheetah) |
139 | mask = 0x7fffff; | 139 | mask = 0x7fffff; |
140 | 140 | ||
141 | sprintf(tmp_buf, "%s@%x,%x", | 141 | sprintf(tmp_buf, "%pOFn@%x,%x", |
142 | dp->name, | 142 | dp, |
143 | *(u32 *)prop->value, | 143 | *(u32 *)prop->value, |
144 | (unsigned int) (regs->phys_addr & mask)); | 144 | (unsigned int) (regs->phys_addr & mask)); |
145 | } | 145 | } |
@@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) | |||
156 | return; | 156 | return; |
157 | 157 | ||
158 | regs = prop->value; | 158 | regs = prop->value; |
159 | sprintf(tmp_buf, "%s@%x,%x", | 159 | sprintf(tmp_buf, "%pOFn@%x,%x", |
160 | dp->name, | 160 | dp, |
161 | regs->which_io, | 161 | regs->which_io, |
162 | regs->phys_addr); | 162 | regs->phys_addr); |
163 | } | 163 | } |
@@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) | |||
176 | regs = prop->value; | 176 | regs = prop->value; |
177 | devfn = (regs->phys_hi >> 8) & 0xff; | 177 | devfn = (regs->phys_hi >> 8) & 0xff; |
178 | if (devfn & 0x07) { | 178 | if (devfn & 0x07) { |
179 | sprintf(tmp_buf, "%s@%x,%x", | 179 | sprintf(tmp_buf, "%pOFn@%x,%x", |
180 | dp->name, | 180 | dp, |
181 | devfn >> 3, | 181 | devfn >> 3, |
182 | devfn & 0x07); | 182 | devfn & 0x07); |
183 | } else { | 183 | } else { |
184 | sprintf(tmp_buf, "%s@%x", | 184 | sprintf(tmp_buf, "%pOFn@%x", |
185 | dp->name, | 185 | dp, |
186 | devfn >> 3); | 186 | devfn >> 3); |
187 | } | 187 | } |
188 | } | 188 | } |
@@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf) | |||
203 | if (!prop) | 203 | if (!prop) |
204 | return; | 204 | return; |
205 | 205 | ||
206 | sprintf(tmp_buf, "%s@%x,%x", | 206 | sprintf(tmp_buf, "%pOFn@%x,%x", |
207 | dp->name, | 207 | dp, |
208 | *(u32 *) prop->value, | 208 | *(u32 *) prop->value, |
209 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 209 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
210 | } | 210 | } |
@@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) | |||
221 | 221 | ||
222 | regs = prop->value; | 222 | regs = prop->value; |
223 | 223 | ||
224 | sprintf(tmp_buf, "%s@%x", dp->name, *regs); | 224 | sprintf(tmp_buf, "%pOFn@%x", dp, *regs); |
225 | } | 225 | } |
226 | 226 | ||
227 | /* "name@addrhi,addrlo" */ | 227 | /* "name@addrhi,addrlo" */ |
@@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | |||
236 | 236 | ||
237 | regs = prop->value; | 237 | regs = prop->value; |
238 | 238 | ||
239 | sprintf(tmp_buf, "%s@%x,%x", | 239 | sprintf(tmp_buf, "%pOFn@%x,%x", |
240 | dp->name, | 240 | dp, |
241 | (unsigned int) (regs->phys_addr >> 32UL), | 241 | (unsigned int) (regs->phys_addr >> 32UL), |
242 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 242 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
243 | } | 243 | } |
@@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) | |||
257 | /* This actually isn't right... should look at the #address-cells | 257 | /* This actually isn't right... should look at the #address-cells |
258 | * property of the i2c bus node etc. etc. | 258 | * property of the i2c bus node etc. etc. |
259 | */ | 259 | */ |
260 | sprintf(tmp_buf, "%s@%x,%x", | 260 | sprintf(tmp_buf, "%pOFn@%x,%x", |
261 | dp->name, regs[0], regs[1]); | 261 | dp, regs[0], regs[1]); |
262 | } | 262 | } |
263 | 263 | ||
264 | /* "name@reg0[,reg1]" */ | 264 | /* "name@reg0[,reg1]" */ |
@@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf) | |||
274 | regs = prop->value; | 274 | regs = prop->value; |
275 | 275 | ||
276 | if (prop->length == sizeof(u32) || regs[1] == 1) { | 276 | if (prop->length == sizeof(u32) || regs[1] == 1) { |
277 | sprintf(tmp_buf, "%s@%x", | 277 | sprintf(tmp_buf, "%pOFn@%x", |
278 | dp->name, regs[0]); | 278 | dp, regs[0]); |
279 | } else { | 279 | } else { |
280 | sprintf(tmp_buf, "%s@%x,%x", | 280 | sprintf(tmp_buf, "%pOFn@%x,%x", |
281 | dp->name, regs[0], regs[1]); | 281 | dp, regs[0], regs[1]); |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
@@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf | |||
295 | regs = prop->value; | 295 | regs = prop->value; |
296 | 296 | ||
297 | if (regs[2] || regs[3]) { | 297 | if (regs[2] || regs[3]) { |
298 | sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", | 298 | sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x", |
299 | dp->name, regs[0], regs[1], regs[2], regs[3]); | 299 | dp, regs[0], regs[1], regs[2], regs[3]); |
300 | } else { | 300 | } else { |
301 | sprintf(tmp_buf, "%s@%08x%08x", | 301 | sprintf(tmp_buf, "%pOFn@%08x%08x", |
302 | dp->name, regs[0], regs[1]); | 302 | dp, regs[0], regs[1]); |
303 | } | 303 | } |
304 | } | 304 | } |
305 | 305 | ||
@@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp) | |||
361 | tmp_buf[0] = '\0'; | 361 | tmp_buf[0] = '\0'; |
362 | __build_path_component(dp, tmp_buf); | 362 | __build_path_component(dp, tmp_buf); |
363 | if (tmp_buf[0] == '\0') | 363 | if (tmp_buf[0] == '\0') |
364 | strcpy(tmp_buf, dp->name); | 364 | snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); |
365 | 365 | ||
366 | n = prom_early_alloc(strlen(tmp_buf) + 1); | 366 | n = prom_early_alloc(strlen(tmp_buf) + 1); |
367 | strcpy(n, tmp_buf); | 367 | strcpy(n, tmp_buf); |
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index 635d67ffc9a3..7db5aabe9708 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c | |||
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio) | |||
180 | struct vio_dring_register pkt; | 180 | struct vio_dring_register pkt; |
181 | char all[sizeof(struct vio_dring_register) + | 181 | char all[sizeof(struct vio_dring_register) + |
182 | (sizeof(struct ldc_trans_cookie) * | 182 | (sizeof(struct ldc_trans_cookie) * |
183 | dr->ncookies)]; | 183 | VIO_MAX_RING_COOKIES)]; |
184 | } u; | 184 | } u; |
185 | size_t bytes = sizeof(struct vio_dring_register) + | ||
186 | (sizeof(struct ldc_trans_cookie) * | ||
187 | dr->ncookies); | ||
185 | int i; | 188 | int i; |
186 | 189 | ||
187 | memset(&u, 0, sizeof(u)); | 190 | if (WARN_ON(bytes > sizeof(u))) |
191 | return -EINVAL; | ||
192 | |||
193 | memset(&u, 0, bytes); | ||
188 | init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); | 194 | init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); |
189 | u.pkt.dring_ident = 0; | 195 | u.pkt.dring_ident = 0; |
190 | u.pkt.num_descr = dr->num_entries; | 196 | u.pkt.num_descr = dr->num_entries; |
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio) | |||
206 | (unsigned long long) u.pkt.cookies[i].cookie_size); | 212 | (unsigned long long) u.pkt.cookies[i].cookie_size); |
207 | } | 213 | } |
208 | 214 | ||
209 | return send_ctrl(vio, &u.pkt.tag, sizeof(u)); | 215 | return send_ctrl(vio, &u.pkt.tag, bytes); |
210 | } | 216 | } |
211 | 217 | ||
212 | static int send_rdx(struct vio_driver_state *vio) | 218 | static int send_rdx(struct vio_driver_state *vio) |
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index dd0b5a92ffd0..dc85570d8839 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile | |||
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs) | |||
31 | targets += $(vdso_img_cfiles) | 31 | targets += $(vdso_img_cfiles) |
32 | targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) | 32 | targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) |
33 | 33 | ||
34 | export CPPFLAGS_vdso.lds += -P -C | 34 | CPPFLAGS_vdso.lds += -P -C |
35 | 35 | ||
36 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ | 36 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ |
37 | -Wl,--no-undefined \ | 37 | -Wl,--no-undefined \ |
38 | -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ | 38 | -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ |
39 | $(DISABLE_LTO) | 39 | $(DISABLE_LTO) |
40 | 40 | ||
41 | $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE | 41 | $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE |
42 | $(call if_changed,vdso) | 42 | $(call if_changed,vdso) |
43 | 43 | ||
44 | HOST_EXTRACFLAGS += -I$(srctree)/tools/include | 44 | HOST_EXTRACFLAGS += -I$(srctree)/tools/include |
45 | hostprogs-y += vdso2c | 45 | hostprogs-y += vdso2c |
46 | 46 | ||
47 | quiet_cmd_vdso2c = VDSO2C $@ | 47 | quiet_cmd_vdso2c = VDSO2C $@ |
48 | define cmd_vdso2c | 48 | cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@ |
49 | $(obj)/vdso2c $< $(<:%.dbg=%) $@ | ||
50 | endef | ||
51 | 49 | ||
52 | $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE | 50 | $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE |
53 | $(call if_changed,vdso2c) | 51 | $(call if_changed,vdso2c) |
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 285eb3ec4200..3736f6dc9545 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h | |||
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, | |||
529 | int rdtgroup_schemata_show(struct kernfs_open_file *of, | 529 | int rdtgroup_schemata_show(struct kernfs_open_file *of, |
530 | struct seq_file *s, void *v); | 530 | struct seq_file *s, void *v); |
531 | bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, | 531 | bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, |
532 | u32 _cbm, int closid, bool exclusive); | 532 | unsigned long cbm, int closid, bool exclusive); |
533 | unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, | 533 | unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, |
534 | u32 cbm); | 534 | unsigned long cbm); |
535 | enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); | 535 | enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); |
536 | int rdtgroup_tasks_assigned(struct rdtgroup *r); | 536 | int rdtgroup_tasks_assigned(struct rdtgroup *r); |
537 | int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); | 537 | int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); |
538 | int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); | 538 | int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); |
539 | bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm); | 539 | bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); |
540 | bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); | 540 | bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); |
541 | int rdt_pseudo_lock_init(void); | 541 | int rdt_pseudo_lock_init(void); |
542 | void rdt_pseudo_lock_release(void); | 542 | void rdt_pseudo_lock_release(void); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c index 40f3903ae5d9..f8c260d522ca 100644 --- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | |||
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) | |||
797 | /** | 797 | /** |
798 | * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked | 798 | * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked |
799 | * @d: RDT domain | 799 | * @d: RDT domain |
800 | * @_cbm: CBM to test | 800 | * @cbm: CBM to test |
801 | * | 801 | * |
802 | * @d represents a cache instance and @_cbm a capacity bitmask that is | 802 | * @d represents a cache instance and @cbm a capacity bitmask that is |
803 | * considered for it. Determine if @_cbm overlaps with any existing | 803 | * considered for it. Determine if @cbm overlaps with any existing |
804 | * pseudo-locked region on @d. | 804 | * pseudo-locked region on @d. |
805 | * | 805 | * |
806 | * Return: true if @_cbm overlaps with pseudo-locked region on @d, false | 806 | * @cbm is unsigned long, even if only 32 bits are used, to make the |
807 | * bitmap functions work correctly. | ||
808 | * | ||
809 | * Return: true if @cbm overlaps with pseudo-locked region on @d, false | ||
807 | * otherwise. | 810 | * otherwise. |
808 | */ | 811 | */ |
809 | bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm) | 812 | bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) |
810 | { | 813 | { |
811 | unsigned long *cbm = (unsigned long *)&_cbm; | ||
812 | unsigned long *cbm_b; | ||
813 | unsigned int cbm_len; | 814 | unsigned int cbm_len; |
815 | unsigned long cbm_b; | ||
814 | 816 | ||
815 | if (d->plr) { | 817 | if (d->plr) { |
816 | cbm_len = d->plr->r->cache.cbm_len; | 818 | cbm_len = d->plr->r->cache.cbm_len; |
817 | cbm_b = (unsigned long *)&d->plr->cbm; | 819 | cbm_b = d->plr->cbm; |
818 | if (bitmap_intersects(cbm, cbm_b, cbm_len)) | 820 | if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) |
819 | return true; | 821 | return true; |
820 | } | 822 | } |
821 | return false; | 823 | return false; |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 1b8e86a5d5e1..b140c68bc14b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of, | |||
975 | * is false then overlaps with any resource group or hardware entities | 975 | * is false then overlaps with any resource group or hardware entities |
976 | * will be considered. | 976 | * will be considered. |
977 | * | 977 | * |
978 | * @cbm is unsigned long, even if only 32 bits are used, to make the | ||
979 | * bitmap functions work correctly. | ||
980 | * | ||
978 | * Return: false if CBM does not overlap, true if it does. | 981 | * Return: false if CBM does not overlap, true if it does. |
979 | */ | 982 | */ |
980 | bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, | 983 | bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, |
981 | u32 _cbm, int closid, bool exclusive) | 984 | unsigned long cbm, int closid, bool exclusive) |
982 | { | 985 | { |
983 | unsigned long *cbm = (unsigned long *)&_cbm; | ||
984 | unsigned long *ctrl_b; | ||
985 | enum rdtgrp_mode mode; | 986 | enum rdtgrp_mode mode; |
987 | unsigned long ctrl_b; | ||
986 | u32 *ctrl; | 988 | u32 *ctrl; |
987 | int i; | 989 | int i; |
988 | 990 | ||
989 | /* Check for any overlap with regions used by hardware directly */ | 991 | /* Check for any overlap with regions used by hardware directly */ |
990 | if (!exclusive) { | 992 | if (!exclusive) { |
991 | if (bitmap_intersects(cbm, | 993 | ctrl_b = r->cache.shareable_bits; |
992 | (unsigned long *)&r->cache.shareable_bits, | 994 | if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) |
993 | r->cache.cbm_len)) | ||
994 | return true; | 995 | return true; |
995 | } | 996 | } |
996 | 997 | ||
997 | /* Check for overlap with other resource groups */ | 998 | /* Check for overlap with other resource groups */ |
998 | ctrl = d->ctrl_val; | 999 | ctrl = d->ctrl_val; |
999 | for (i = 0; i < closids_supported(); i++, ctrl++) { | 1000 | for (i = 0; i < closids_supported(); i++, ctrl++) { |
1000 | ctrl_b = (unsigned long *)ctrl; | 1001 | ctrl_b = *ctrl; |
1001 | mode = rdtgroup_mode_by_closid(i); | 1002 | mode = rdtgroup_mode_by_closid(i); |
1002 | if (closid_allocated(i) && i != closid && | 1003 | if (closid_allocated(i) && i != closid && |
1003 | mode != RDT_MODE_PSEUDO_LOCKSETUP) { | 1004 | mode != RDT_MODE_PSEUDO_LOCKSETUP) { |
1004 | if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) { | 1005 | if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { |
1005 | if (exclusive) { | 1006 | if (exclusive) { |
1006 | if (mode == RDT_MODE_EXCLUSIVE) | 1007 | if (mode == RDT_MODE_EXCLUSIVE) |
1007 | return true; | 1008 | return true; |
@@ -1138,15 +1139,18 @@ out: | |||
1138 | * computed by first dividing the total cache size by the CBM length to | 1139 | * computed by first dividing the total cache size by the CBM length to |
1139 | * determine how many bytes each bit in the bitmask represents. The result | 1140 | * determine how many bytes each bit in the bitmask represents. The result |
1140 | * is multiplied with the number of bits set in the bitmask. | 1141 | * is multiplied with the number of bits set in the bitmask. |
1142 | * | ||
1143 | * @cbm is unsigned long, even if only 32 bits are used to make the | ||
1144 | * bitmap functions work correctly. | ||
1141 | */ | 1145 | */ |
1142 | unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, | 1146 | unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, |
1143 | struct rdt_domain *d, u32 cbm) | 1147 | struct rdt_domain *d, unsigned long cbm) |
1144 | { | 1148 | { |
1145 | struct cpu_cacheinfo *ci; | 1149 | struct cpu_cacheinfo *ci; |
1146 | unsigned int size = 0; | 1150 | unsigned int size = 0; |
1147 | int num_b, i; | 1151 | int num_b, i; |
1148 | 1152 | ||
1149 | num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len); | 1153 | num_b = bitmap_weight(&cbm, r->cache.cbm_len); |
1150 | ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); | 1154 | ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); |
1151 | for (i = 0; i < ci->num_leaves; i++) { | 1155 | for (i = 0; i < ci->num_leaves; i++) { |
1152 | if (ci->info_list[i].level == r->cache_level) { | 1156 | if (ci->info_list[i].level == r->cache_level) { |
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
2353 | u32 used_b = 0, unused_b = 0; | 2357 | u32 used_b = 0, unused_b = 0; |
2354 | u32 closid = rdtgrp->closid; | 2358 | u32 closid = rdtgrp->closid; |
2355 | struct rdt_resource *r; | 2359 | struct rdt_resource *r; |
2360 | unsigned long tmp_cbm; | ||
2356 | enum rdtgrp_mode mode; | 2361 | enum rdtgrp_mode mode; |
2357 | struct rdt_domain *d; | 2362 | struct rdt_domain *d; |
2358 | int i, ret; | 2363 | int i, ret; |
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) | |||
2390 | * modify the CBM based on system availability. | 2395 | * modify the CBM based on system availability. |
2391 | */ | 2396 | */ |
2392 | cbm_ensure_valid(&d->new_ctrl, r); | 2397 | cbm_ensure_valid(&d->new_ctrl, r); |
2393 | if (bitmap_weight((unsigned long *) &d->new_ctrl, | 2398 | /* |
2394 | r->cache.cbm_len) < | 2399 | * Assign the u32 CBM to an unsigned long to ensure |
2395 | r->cache.min_cbm_bits) { | 2400 | * that bitmap_weight() does not access out-of-bound |
2401 | * memory. | ||
2402 | */ | ||
2403 | tmp_cbm = d->new_ctrl; | ||
2404 | if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < | ||
2405 | r->cache.min_cbm_bits) { | ||
2396 | rdt_last_cmd_printf("no space on %s:%d\n", | 2406 | rdt_last_cmd_printf("no space on %s:%d\n", |
2397 | r->name, d->id); | 2407 | r->name, d->id); |
2398 | return -ENOSPC; | 2408 | return -ENOSPC; |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 089e78c4effd..59274e2c1ac4 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
115 | 115 | ||
116 | #define UNSHARED_PTRS_PER_PGD \ | 116 | #define UNSHARED_PTRS_PER_PGD \ |
117 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) | 117 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
118 | #define MAX_UNSHARED_PTRS_PER_PGD \ | ||
119 | max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD) | ||
118 | 120 | ||
119 | 121 | ||
120 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) | 122 | static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) |
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd) | |||
181 | * and initialize the kernel pmds here. | 183 | * and initialize the kernel pmds here. |
182 | */ | 184 | */ |
183 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD | 185 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD |
186 | #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD | ||
184 | 187 | ||
185 | /* | 188 | /* |
186 | * We allocate separate PMDs for the kernel part of the user page-table | 189 | * We allocate separate PMDs for the kernel part of the user page-table |
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd) | |||
189 | */ | 192 | */ |
190 | #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ | 193 | #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ |
191 | KERNEL_PGD_PTRS : 0) | 194 | KERNEL_PGD_PTRS : 0) |
195 | #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS | ||
192 | 196 | ||
193 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | 197 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) |
194 | { | 198 | { |
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | |||
210 | 214 | ||
211 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | 215 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ |
212 | #define PREALLOCATED_PMDS 0 | 216 | #define PREALLOCATED_PMDS 0 |
217 | #define MAX_PREALLOCATED_PMDS 0 | ||
213 | #define PREALLOCATED_USER_PMDS 0 | 218 | #define PREALLOCATED_USER_PMDS 0 |
219 | #define MAX_PREALLOCATED_USER_PMDS 0 | ||
214 | #endif /* CONFIG_X86_PAE */ | 220 | #endif /* CONFIG_X86_PAE */ |
215 | 221 | ||
216 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) | 222 | static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) |
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd) | |||
428 | pgd_t *pgd_alloc(struct mm_struct *mm) | 434 | pgd_t *pgd_alloc(struct mm_struct *mm) |
429 | { | 435 | { |
430 | pgd_t *pgd; | 436 | pgd_t *pgd; |
431 | pmd_t *u_pmds[PREALLOCATED_USER_PMDS]; | 437 | pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; |
432 | pmd_t *pmds[PREALLOCATED_PMDS]; | 438 | pmd_t *pmds[MAX_PREALLOCATED_PMDS]; |
433 | 439 | ||
434 | pgd = _pgd_alloc(); | 440 | pgd = _pgd_alloc(); |
435 | 441 | ||
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index b3c0498ee433..8e9213b36e31 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c | |||
@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name, | |||
226 | } | 226 | } |
227 | 227 | ||
228 | tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); | 228 | tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); |
229 | if (tmp && !(opt_flags & FW_OPT_NOCACHE)) | 229 | if (tmp) { |
230 | list_add(&tmp->list, &fwc->head); | 230 | INIT_LIST_HEAD(&tmp->list); |
231 | if (!(opt_flags & FW_OPT_NOCACHE)) | ||
232 | list_add(&tmp->list, &fwc->head); | ||
233 | } | ||
231 | spin_unlock(&fwc->lock); | 234 | spin_unlock(&fwc->lock); |
232 | 235 | ||
233 | *fw_priv = tmp; | 236 | *fw_priv = tmp; |
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index d98ed0442201..9f1392fc7105 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c | |||
@@ -1356,7 +1356,7 @@ static int qca_init_regulators(struct qca_power *qca, | |||
1356 | { | 1356 | { |
1357 | int i; | 1357 | int i; |
1358 | 1358 | ||
1359 | qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs * | 1359 | qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs, |
1360 | sizeof(struct regulator_bulk_data), | 1360 | sizeof(struct regulator_bulk_data), |
1361 | GFP_KERNEL); | 1361 | GFP_KERNEL); |
1362 | if (!qca->vreg_bulk) | 1362 | if (!qca->vreg_bulk) |
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 7e71043457a6..86c699c14f84 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev) | |||
1044 | 1044 | ||
1045 | safexcel_configure(priv); | 1045 | safexcel_configure(priv); |
1046 | 1046 | ||
1047 | priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), | 1047 | priv->ring = devm_kcalloc(dev, priv->config.rings, |
1048 | sizeof(*priv->ring), | ||
1048 | GFP_KERNEL); | 1049 | GFP_KERNEL); |
1049 | if (!priv->ring) { | 1050 | if (!priv->ring) { |
1050 | ret = -ENOMEM; | 1051 | ret = -ENOMEM; |
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev) | |||
1063 | if (ret) | 1064 | if (ret) |
1064 | goto err_reg_clk; | 1065 | goto err_reg_clk; |
1065 | 1066 | ||
1066 | priv->ring[i].rdr_req = devm_kzalloc(dev, | 1067 | priv->ring[i].rdr_req = devm_kcalloc(dev, |
1067 | sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE, | 1068 | EIP197_DEFAULT_RING_SIZE, |
1069 | sizeof(priv->ring[i].rdr_req), | ||
1068 | GFP_KERNEL); | 1070 | GFP_KERNEL); |
1069 | if (!priv->ring[i].rdr_req) { | 1071 | if (!priv->ring[i].rdr_req) { |
1070 | ret = -ENOMEM; | 1072 | ret = -ENOMEM; |
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c index 0b7e19c27c6d..51a5ac2293a7 100644 --- a/drivers/fpga/dfl-fme-region.c +++ b/drivers/fpga/dfl-fme-region.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/fpga/fpga-mgr.h> | ||
17 | #include <linux/fpga/fpga-region.h> | 18 | #include <linux/fpga/fpga-region.h> |
18 | 19 | ||
19 | #include "dfl-fme-pr.h" | 20 | #include "dfl-fme-pr.h" |
@@ -66,9 +67,10 @@ eprobe_mgr_put: | |||
66 | static int fme_region_remove(struct platform_device *pdev) | 67 | static int fme_region_remove(struct platform_device *pdev) |
67 | { | 68 | { |
68 | struct fpga_region *region = dev_get_drvdata(&pdev->dev); | 69 | struct fpga_region *region = dev_get_drvdata(&pdev->dev); |
70 | struct fpga_manager *mgr = region->mgr; | ||
69 | 71 | ||
70 | fpga_region_unregister(region); | 72 | fpga_region_unregister(region); |
71 | fpga_mgr_put(region->mgr); | 73 | fpga_mgr_put(mgr); |
72 | 74 | ||
73 | return 0; | 75 | return 0; |
74 | } | 76 | } |
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 24b8f98b73ec..c983dac97501 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c | |||
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) | |||
125 | * | 125 | * |
126 | * Given a device, get an exclusive reference to a fpga bridge. | 126 | * Given a device, get an exclusive reference to a fpga bridge. |
127 | * | 127 | * |
128 | * Return: fpga manager struct or IS_ERR() condition containing error code. | 128 | * Return: fpga bridge struct or IS_ERR() condition containing error code. |
129 | */ | 129 | */ |
130 | struct fpga_bridge *fpga_bridge_get(struct device *dev, | 130 | struct fpga_bridge *fpga_bridge_get(struct device *dev, |
131 | struct fpga_image_info *info) | 131 | struct fpga_image_info *info) |
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 35fabb8083fb..052a1342ab7e 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c | |||
@@ -437,9 +437,10 @@ eprobe_mgr_put: | |||
437 | static int of_fpga_region_remove(struct platform_device *pdev) | 437 | static int of_fpga_region_remove(struct platform_device *pdev) |
438 | { | 438 | { |
439 | struct fpga_region *region = platform_get_drvdata(pdev); | 439 | struct fpga_region *region = platform_get_drvdata(pdev); |
440 | struct fpga_manager *mgr = region->mgr; | ||
440 | 441 | ||
441 | fpga_region_unregister(region); | 442 | fpga_region_unregister(region); |
442 | fpga_mgr_put(region->mgr); | 443 | fpga_mgr_put(mgr); |
443 | 444 | ||
444 | return 0; | 445 | return 0; |
445 | } | 446 | } |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a57300c1d649..25187403e3ac 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip, | |||
1682 | irq_set_chained_handler_and_data(parent_irq, parent_handler, | 1682 | irq_set_chained_handler_and_data(parent_irq, parent_handler, |
1683 | gpiochip); | 1683 | gpiochip); |
1684 | 1684 | ||
1685 | gpiochip->irq.parents = &parent_irq; | 1685 | gpiochip->irq.parent_irq = parent_irq; |
1686 | gpiochip->irq.parents = &gpiochip->irq.parent_irq; | ||
1686 | gpiochip->irq.num_parents = 1; | 1687 | gpiochip->irq.num_parents = 1; |
1687 | } | 1688 | } |
1688 | 1689 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 0b976dfd04df..92ecb9bf982c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c | |||
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, | |||
600 | } | 600 | } |
601 | 601 | ||
602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); | 602 | mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); |
603 | mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * | 603 | mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, |
604 | sizeof(struct drm_plane), | 604 | sizeof(struct drm_plane), |
605 | GFP_KERNEL); | 605 | GFP_KERNEL); |
606 | 606 | ||
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c index 790d39f816dc..b557687b1964 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c | |||
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev, | |||
153 | return 0; | 153 | return 0; |
154 | } | 154 | } |
155 | 155 | ||
156 | mp->clk_config = devm_kzalloc(&pdev->dev, | 156 | mp->clk_config = devm_kcalloc(&pdev->dev, |
157 | sizeof(struct dss_clk) * num_clk, | 157 | num_clk, sizeof(struct dss_clk), |
158 | GFP_KERNEL); | 158 | GFP_KERNEL); |
159 | if (!mp->clk_config) | 159 | if (!mp->clk_config) |
160 | return -ENOMEM; | 160 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 5691dfa1db6f..041e7daf8a33 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
@@ -900,9 +900,22 @@ static enum drm_connector_status | |||
900 | nv50_mstc_detect(struct drm_connector *connector, bool force) | 900 | nv50_mstc_detect(struct drm_connector *connector, bool force) |
901 | { | 901 | { |
902 | struct nv50_mstc *mstc = nv50_mstc(connector); | 902 | struct nv50_mstc *mstc = nv50_mstc(connector); |
903 | enum drm_connector_status conn_status; | ||
904 | int ret; | ||
905 | |||
903 | if (!mstc->port) | 906 | if (!mstc->port) |
904 | return connector_status_disconnected; | 907 | return connector_status_disconnected; |
905 | return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port); | 908 | |
909 | ret = pm_runtime_get_sync(connector->dev->dev); | ||
910 | if (ret < 0 && ret != -EACCES) | ||
911 | return connector_status_disconnected; | ||
912 | |||
913 | conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, | ||
914 | mstc->port); | ||
915 | |||
916 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
917 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
918 | return conn_status; | ||
906 | } | 919 | } |
907 | 920 | ||
908 | static void | 921 | static void |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index ced041899456..f4d08c8ac7f8 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
76 | __u32 version) | 76 | __u32 version) |
77 | { | 77 | { |
78 | int ret = 0; | 78 | int ret = 0; |
79 | unsigned int cur_cpu; | ||
79 | struct vmbus_channel_initiate_contact *msg; | 80 | struct vmbus_channel_initiate_contact *msg; |
80 | unsigned long flags; | 81 | unsigned long flags; |
81 | 82 | ||
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
118 | * the CPU attempting to connect may not be CPU 0. | 119 | * the CPU attempting to connect may not be CPU 0. |
119 | */ | 120 | */ |
120 | if (version >= VERSION_WIN8_1) { | 121 | if (version >= VERSION_WIN8_1) { |
121 | msg->target_vcpu = | 122 | cur_cpu = get_cpu(); |
122 | hv_cpu_number_to_vp_number(smp_processor_id()); | 123 | msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu); |
123 | vmbus_connection.connect_cpu = smp_processor_id(); | 124 | vmbus_connection.connect_cpu = cur_cpu; |
125 | put_cpu(); | ||
124 | } else { | 126 | } else { |
125 | msg->target_vcpu = 0; | 127 | msg->target_vcpu = 0; |
126 | vmbus_connection.connect_cpu = 0; | 128 | vmbus_connection.connect_cpu = 0; |
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c index 8474d601aa63..b998f9fbed41 100644 --- a/drivers/hwmon/npcm750-pwm-fan.c +++ b/drivers/hwmon/npcm750-pwm-fan.c | |||
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev, | |||
908 | if (fan_cnt < 1) | 908 | if (fan_cnt < 1) |
909 | return -EINVAL; | 909 | return -EINVAL; |
910 | 910 | ||
911 | fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL); | 911 | fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL); |
912 | if (!fan_ch) | 912 | if (!fan_ch) |
913 | return -ENOMEM; | 913 | return -ENOMEM; |
914 | 914 | ||
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 94d94b4a9a0d..18cc324f3ca9 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) | |||
34 | 34 | ||
35 | static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) | 35 | static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) |
36 | { | 36 | { |
37 | u32 ic_clk = i2c_dw_clk_rate(dev); | ||
38 | const char *mode_str, *fp_str = ""; | 37 | const char *mode_str, *fp_str = ""; |
39 | u32 comp_param1; | 38 | u32 comp_param1; |
40 | u32 sda_falling_time, scl_falling_time; | 39 | u32 sda_falling_time, scl_falling_time; |
41 | struct i2c_timings *t = &dev->timings; | 40 | struct i2c_timings *t = &dev->timings; |
41 | u32 ic_clk; | ||
42 | int ret; | 42 | int ret; |
43 | 43 | ||
44 | ret = i2c_dw_acquire_lock(dev); | 44 | ret = i2c_dw_acquire_lock(dev); |
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) | |||
53 | 53 | ||
54 | /* Calculate SCL timing parameters for standard mode if not set */ | 54 | /* Calculate SCL timing parameters for standard mode if not set */ |
55 | if (!dev->ss_hcnt || !dev->ss_lcnt) { | 55 | if (!dev->ss_hcnt || !dev->ss_lcnt) { |
56 | ic_clk = i2c_dw_clk_rate(dev); | ||
56 | dev->ss_hcnt = | 57 | dev->ss_hcnt = |
57 | i2c_dw_scl_hcnt(ic_clk, | 58 | i2c_dw_scl_hcnt(ic_clk, |
58 | 4000, /* tHD;STA = tHIGH = 4.0 us */ | 59 | 4000, /* tHD;STA = tHIGH = 4.0 us */ |
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) | |||
89 | * needed also in high speed mode. | 90 | * needed also in high speed mode. |
90 | */ | 91 | */ |
91 | if (!dev->fs_hcnt || !dev->fs_lcnt) { | 92 | if (!dev->fs_hcnt || !dev->fs_lcnt) { |
93 | ic_clk = i2c_dw_clk_rate(dev); | ||
92 | dev->fs_hcnt = | 94 | dev->fs_hcnt = |
93 | i2c_dw_scl_hcnt(ic_clk, | 95 | i2c_dw_scl_hcnt(ic_clk, |
94 | 600, /* tHD;STA = tHIGH = 0.6 us */ | 96 | 600, /* tHD;STA = tHIGH = 0.6 us */ |
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 0cf1379f4e80..5c754bf659e2 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c | |||
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr, | |||
164 | * run ~75 kHz instead which should do no harm. | 164 | * run ~75 kHz instead which should do no harm. |
165 | */ | 165 | */ |
166 | dev_notice(&sch_adapter.dev, | 166 | dev_notice(&sch_adapter.dev, |
167 | "Clock divider unitialized. Setting defaults\n"); | 167 | "Clock divider uninitialized. Setting defaults\n"); |
168 | outw(backbone_speed / (4 * 100), SMBHSTCLK); | 168 | outw(backbone_speed / (4 * 100), SMBHSTCLK); |
169 | } | 169 | } |
170 | 170 | ||
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 36732eb688a4..9f2eb02481d3 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c | |||
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, | |||
367 | dma_addr_t rx_dma; | 367 | dma_addr_t rx_dma; |
368 | enum geni_se_xfer_mode mode; | 368 | enum geni_se_xfer_mode mode; |
369 | unsigned long time_left = XFER_TIMEOUT; | 369 | unsigned long time_left = XFER_TIMEOUT; |
370 | void *dma_buf; | ||
370 | 371 | ||
371 | gi2c->cur = msg; | 372 | gi2c->cur = msg; |
372 | mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; | 373 | mode = GENI_SE_FIFO; |
374 | dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); | ||
375 | if (dma_buf) | ||
376 | mode = GENI_SE_DMA; | ||
377 | |||
373 | geni_se_select_mode(&gi2c->se, mode); | 378 | geni_se_select_mode(&gi2c->se, mode); |
374 | writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN); | 379 | writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN); |
375 | geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param); | 380 | geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param); |
376 | if (mode == GENI_SE_DMA) { | 381 | if (mode == GENI_SE_DMA) { |
377 | int ret; | 382 | int ret; |
378 | 383 | ||
379 | ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len, | 384 | ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len, |
380 | &rx_dma); | 385 | &rx_dma); |
381 | if (ret) { | 386 | if (ret) { |
382 | mode = GENI_SE_FIFO; | 387 | mode = GENI_SE_FIFO; |
383 | geni_se_select_mode(&gi2c->se, mode); | 388 | geni_se_select_mode(&gi2c->se, mode); |
389 | i2c_put_dma_safe_msg_buf(dma_buf, msg, false); | ||
384 | } | 390 | } |
385 | } | 391 | } |
386 | 392 | ||
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, | |||
393 | if (gi2c->err) | 399 | if (gi2c->err) |
394 | geni_i2c_rx_fsm_rst(gi2c); | 400 | geni_i2c_rx_fsm_rst(gi2c); |
395 | geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len); | 401 | geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len); |
402 | i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); | ||
396 | } | 403 | } |
397 | return gi2c->err; | 404 | return gi2c->err; |
398 | } | 405 | } |
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, | |||
403 | dma_addr_t tx_dma; | 410 | dma_addr_t tx_dma; |
404 | enum geni_se_xfer_mode mode; | 411 | enum geni_se_xfer_mode mode; |
405 | unsigned long time_left; | 412 | unsigned long time_left; |
413 | void *dma_buf; | ||
406 | 414 | ||
407 | gi2c->cur = msg; | 415 | gi2c->cur = msg; |
408 | mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; | 416 | mode = GENI_SE_FIFO; |
417 | dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); | ||
418 | if (dma_buf) | ||
419 | mode = GENI_SE_DMA; | ||
420 | |||
409 | geni_se_select_mode(&gi2c->se, mode); | 421 | geni_se_select_mode(&gi2c->se, mode); |
410 | writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN); | 422 | writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN); |
411 | geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param); | 423 | geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param); |
412 | if (mode == GENI_SE_DMA) { | 424 | if (mode == GENI_SE_DMA) { |
413 | int ret; | 425 | int ret; |
414 | 426 | ||
415 | ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len, | 427 | ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len, |
416 | &tx_dma); | 428 | &tx_dma); |
417 | if (ret) { | 429 | if (ret) { |
418 | mode = GENI_SE_FIFO; | 430 | mode = GENI_SE_FIFO; |
419 | geni_se_select_mode(&gi2c->se, mode); | 431 | geni_se_select_mode(&gi2c->se, mode); |
432 | i2c_put_dma_safe_msg_buf(dma_buf, msg, false); | ||
420 | } | 433 | } |
421 | } | 434 | } |
422 | 435 | ||
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, | |||
432 | if (gi2c->err) | 445 | if (gi2c->err) |
433 | geni_i2c_tx_fsm_rst(gi2c); | 446 | geni_i2c_tx_fsm_rst(gi2c); |
434 | geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len); | 447 | geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len); |
448 | i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err); | ||
435 | } | 449 | } |
436 | return gi2c->err; | 450 | return gi2c->err; |
437 | } | 451 | } |
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index a01389b85f13..7e9a2bbf5ddc 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c | |||
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, | |||
152 | mt_params[3].type = ACPI_TYPE_INTEGER; | 152 | mt_params[3].type = ACPI_TYPE_INTEGER; |
153 | mt_params[3].integer.value = len; | 153 | mt_params[3].integer.value = len; |
154 | mt_params[4].type = ACPI_TYPE_BUFFER; | 154 | mt_params[4].type = ACPI_TYPE_BUFFER; |
155 | mt_params[4].buffer.length = len; | ||
155 | mt_params[4].buffer.pointer = data->block + 1; | 156 | mt_params[4].buffer.pointer = data->block + 1; |
156 | } | 157 | } |
157 | break; | 158 | break; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 9fb1d9cb9401..e22314837645 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
544 | int shrink = 0; | 544 | int shrink = 0; |
545 | int c; | 545 | int c; |
546 | 546 | ||
547 | if (!mr->allocated_from_cache) | ||
548 | return; | ||
549 | |||
547 | c = order2idx(dev, mr->order); | 550 | c = order2idx(dev, mr->order); |
548 | if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { | 551 | if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { |
549 | mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); | 552 | mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); |
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1647 | umem = NULL; | 1650 | umem = NULL; |
1648 | } | 1651 | } |
1649 | #endif | 1652 | #endif |
1650 | |||
1651 | clean_mr(dev, mr); | 1653 | clean_mr(dev, mr); |
1652 | 1654 | ||
1655 | /* | ||
1656 | * We should unregister the DMA address from the HCA before | ||
1657 | * remove the DMA mapping. | ||
1658 | */ | ||
1659 | mlx5_mr_cache_free(dev, mr); | ||
1653 | if (umem) { | 1660 | if (umem) { |
1654 | ib_umem_release(umem); | 1661 | ib_umem_release(umem); |
1655 | atomic_sub(npages, &dev->mdev->priv.reg_pages); | 1662 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
1656 | } | 1663 | } |
1657 | |||
1658 | if (!mr->allocated_from_cache) | 1664 | if (!mr->allocated_from_cache) |
1659 | kfree(mr); | 1665 | kfree(mr); |
1660 | else | ||
1661 | mlx5_mr_cache_free(dev, mr); | ||
1662 | } | 1666 | } |
1663 | 1667 | ||
1664 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | 1668 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 370206f987f9..f48369d6f3a0 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer, | |||
564 | 564 | ||
565 | input_inject_event(&evdev->handle, | 565 | input_inject_event(&evdev->handle, |
566 | event.type, event.code, event.value); | 566 | event.type, event.code, event.value); |
567 | cond_resched(); | ||
567 | } | 568 | } |
568 | 569 | ||
569 | out: | 570 | out: |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index cd620e009bad..d4b9db487b16 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
@@ -231,6 +231,7 @@ static const struct xpad_device { | |||
231 | { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, | 231 | { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, |
232 | { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, | 232 | { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, |
233 | { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, | 233 | { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, |
234 | { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, | ||
234 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, | 235 | { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
235 | { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, | 236 | { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, |
236 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, | 237 | { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, |
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { | |||
530 | XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), | 531 | XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), |
531 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), | 532 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), |
532 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), | 533 | XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), |
534 | XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), | ||
535 | XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), | ||
533 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), | 536 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), |
534 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), | 537 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), |
535 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), | 538 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index eb14ddf69346..8ec483e8688b 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev, | |||
598 | 598 | ||
599 | input_event(udev->dev, ev.type, ev.code, ev.value); | 599 | input_event(udev->dev, ev.type, ev.code, ev.value); |
600 | bytes += input_event_size(); | 600 | bytes += input_event_size(); |
601 | cond_resched(); | ||
601 | } | 602 | } |
602 | 603 | ||
603 | return bytes; | 604 | return bytes; |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index e08228061bcd..412fa71245af 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer, | |||
707 | mousedev_generate_response(client, c); | 707 | mousedev_generate_response(client, c); |
708 | 708 | ||
709 | spin_unlock_irq(&client->packet_lock); | 709 | spin_unlock_irq(&client->packet_lock); |
710 | cond_resched(); | ||
710 | } | 711 | } |
711 | 712 | ||
712 | kill_fasync(&client->fasync, SIGIO, POLL_IN); | 713 | kill_fasync(&client->fasync, SIGIO, POLL_IN); |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b8bc71569349..95a78ccbd847 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void) | |||
1395 | for (i = 0; i < I8042_NUM_PORTS; i++) { | 1395 | for (i = 0; i < I8042_NUM_PORTS; i++) { |
1396 | struct serio *serio = i8042_ports[i].serio; | 1396 | struct serio *serio = i8042_ports[i].serio; |
1397 | 1397 | ||
1398 | if (serio) { | 1398 | if (!serio) |
1399 | printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", | 1399 | continue; |
1400 | serio->name, | 1400 | |
1401 | (unsigned long) I8042_DATA_REG, | 1401 | printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", |
1402 | (unsigned long) I8042_COMMAND_REG, | 1402 | serio->name, |
1403 | i8042_ports[i].irq); | 1403 | (unsigned long) I8042_DATA_REG, |
1404 | serio_register_port(serio); | 1404 | (unsigned long) I8042_COMMAND_REG, |
1405 | device_set_wakeup_capable(&serio->dev, true); | 1405 | i8042_ports[i].irq); |
1406 | } | 1406 | serio_register_port(serio); |
1407 | device_set_wakeup_capable(&serio->dev, true); | ||
1408 | |||
1409 | /* | ||
1410 | * On platforms using suspend-to-idle, allow the keyboard to | ||
1411 | * wake up the system from sleep by enabling keyboard wakeups | ||
1412 | * by default. This is consistent with keyboard wakeup | ||
1413 | * behavior on many platforms using suspend-to-RAM (ACPI S3) | ||
1414 | * by default. | ||
1415 | */ | ||
1416 | if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO) | ||
1417 | device_set_wakeup_enable(&serio->dev, true); | ||
1407 | } | 1418 | } |
1408 | } | 1419 | } |
1409 | 1420 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index e13d991e9fb5..b29a8327eed1 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -3484,14 +3484,13 @@ static int __init dm_cache_init(void) | |||
3484 | int r; | 3484 | int r; |
3485 | 3485 | ||
3486 | migration_cache = KMEM_CACHE(dm_cache_migration, 0); | 3486 | migration_cache = KMEM_CACHE(dm_cache_migration, 0); |
3487 | if (!migration_cache) { | 3487 | if (!migration_cache) |
3488 | dm_unregister_target(&cache_target); | ||
3489 | return -ENOMEM; | 3488 | return -ENOMEM; |
3490 | } | ||
3491 | 3489 | ||
3492 | r = dm_register_target(&cache_target); | 3490 | r = dm_register_target(&cache_target); |
3493 | if (r) { | 3491 | if (r) { |
3494 | DMERR("cache target registration failed: %d", r); | 3492 | DMERR("cache target registration failed: %d", r); |
3493 | kmem_cache_destroy(migration_cache); | ||
3495 | return r; | 3494 | return r; |
3496 | } | 3495 | } |
3497 | 3496 | ||
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 21d126a5078c..32aabe27b37c 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_ | |||
467 | static struct target_type flakey_target = { | 467 | static struct target_type flakey_target = { |
468 | .name = "flakey", | 468 | .name = "flakey", |
469 | .version = {1, 5, 0}, | 469 | .version = {1, 5, 0}, |
470 | #ifdef CONFIG_BLK_DEV_ZONED | ||
470 | .features = DM_TARGET_ZONED_HM, | 471 | .features = DM_TARGET_ZONED_HM, |
472 | #endif | ||
471 | .module = THIS_MODULE, | 473 | .module = THIS_MODULE, |
472 | .ctr = flakey_ctr, | 474 | .ctr = flakey_ctr, |
473 | .dtr = flakey_dtr, | 475 | .dtr = flakey_dtr, |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 89ccb64342de..e1fa6baf4e8e 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -3462,7 +3462,8 @@ try_smaller_buffer: | |||
3462 | r = -ENOMEM; | 3462 | r = -ENOMEM; |
3463 | goto bad; | 3463 | goto bad; |
3464 | } | 3464 | } |
3465 | ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL); | 3465 | ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, |
3466 | ic->tag_size, GFP_KERNEL); | ||
3466 | if (!ic->recalc_tags) { | 3467 | if (!ic->recalc_tags) { |
3467 | ti->error = "Cannot allocate tags for recalculating"; | 3468 | ti->error = "Cannot allocate tags for recalculating"; |
3468 | r = -ENOMEM; | 3469 | r = -ENOMEM; |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index d10964d41fd7..2f7c44a006c4 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio) | |||
102 | return DM_MAPIO_REMAPPED; | 102 | return DM_MAPIO_REMAPPED; |
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef CONFIG_BLK_DEV_ZONED | ||
105 | static int linear_end_io(struct dm_target *ti, struct bio *bio, | 106 | static int linear_end_io(struct dm_target *ti, struct bio *bio, |
106 | blk_status_t *error) | 107 | blk_status_t *error) |
107 | { | 108 | { |
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio, | |||
112 | 113 | ||
113 | return DM_ENDIO_DONE; | 114 | return DM_ENDIO_DONE; |
114 | } | 115 | } |
116 | #endif | ||
115 | 117 | ||
116 | static void linear_status(struct dm_target *ti, status_type_t type, | 118 | static void linear_status(struct dm_target *ti, status_type_t type, |
117 | unsigned status_flags, char *result, unsigned maxlen) | 119 | unsigned status_flags, char *result, unsigned maxlen) |
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, | |||
208 | static struct target_type linear_target = { | 210 | static struct target_type linear_target = { |
209 | .name = "linear", | 211 | .name = "linear", |
210 | .version = {1, 4, 0}, | 212 | .version = {1, 4, 0}, |
213 | #ifdef CONFIG_BLK_DEV_ZONED | ||
214 | .end_io = linear_end_io, | ||
211 | .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, | 215 | .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, |
216 | #else | ||
217 | .features = DM_TARGET_PASSES_INTEGRITY, | ||
218 | #endif | ||
212 | .module = THIS_MODULE, | 219 | .module = THIS_MODULE, |
213 | .ctr = linear_ctr, | 220 | .ctr = linear_ctr, |
214 | .dtr = linear_dtr, | 221 | .dtr = linear_dtr, |
215 | .map = linear_map, | 222 | .map = linear_map, |
216 | .end_io = linear_end_io, | ||
217 | .status = linear_status, | 223 | .status = linear_status, |
218 | .prepare_ioctl = linear_prepare_ioctl, | 224 | .prepare_ioctl = linear_prepare_ioctl, |
219 | .iterate_devices = linear_iterate_devices, | 225 | .iterate_devices = linear_iterate_devices, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 20f7e4ef5342..45abb54037fc 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) | |||
1155 | EXPORT_SYMBOL_GPL(dm_accept_partial_bio); | 1155 | EXPORT_SYMBOL_GPL(dm_accept_partial_bio); |
1156 | 1156 | ||
1157 | /* | 1157 | /* |
1158 | * The zone descriptors obtained with a zone report indicate | 1158 | * The zone descriptors obtained with a zone report indicate zone positions |
1159 | * zone positions within the target device. The zone descriptors | 1159 | * within the target backing device, regardless of that device is a partition |
1160 | * must be remapped to match their position within the dm device. | 1160 | * and regardless of the target mapping start sector on the device or partition. |
1161 | * A target may call dm_remap_zone_report after completion of a | 1161 | * The zone descriptors start sector and write pointer position must be adjusted |
1162 | * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained | 1162 | * to match their relative position within the dm device. |
1163 | * from the target device mapping to the dm device. | 1163 | * A target may call dm_remap_zone_report() after completion of a |
1164 | * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the | ||
1165 | * backing device. | ||
1164 | */ | 1166 | */ |
1165 | void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) | 1167 | void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) |
1166 | { | 1168 | { |
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) | |||
1171 | struct blk_zone *zone; | 1173 | struct blk_zone *zone; |
1172 | unsigned int nr_rep = 0; | 1174 | unsigned int nr_rep = 0; |
1173 | unsigned int ofst; | 1175 | unsigned int ofst; |
1176 | sector_t part_offset; | ||
1174 | struct bio_vec bvec; | 1177 | struct bio_vec bvec; |
1175 | struct bvec_iter iter; | 1178 | struct bvec_iter iter; |
1176 | void *addr; | 1179 | void *addr; |
@@ -1179,6 +1182,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) | |||
1179 | return; | 1182 | return; |
1180 | 1183 | ||
1181 | /* | 1184 | /* |
1185 | * bio sector was incremented by the request size on completion. Taking | ||
1186 | * into account the original request sector, the target start offset on | ||
1187 | * the backing device and the target mapping offset (ti->begin), the | ||
1188 | * start sector of the backing device. The partition offset is always 0 | ||
1189 | * if the target uses a whole device. | ||
1190 | */ | ||
1191 | part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio)); | ||
1192 | |||
1193 | /* | ||
1182 | * Remap the start sector of the reported zones. For sequential zones, | 1194 | * Remap the start sector of the reported zones. For sequential zones, |
1183 | * also remap the write pointer position. | 1195 | * also remap the write pointer position. |
1184 | */ | 1196 | */ |
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) | |||
1195 | /* Set zones start sector */ | 1207 | /* Set zones start sector */ |
1196 | while (hdr->nr_zones && ofst < bvec.bv_len) { | 1208 | while (hdr->nr_zones && ofst < bvec.bv_len) { |
1197 | zone = addr + ofst; | 1209 | zone = addr + ofst; |
1210 | zone->start -= part_offset; | ||
1198 | if (zone->start >= start + ti->len) { | 1211 | if (zone->start >= start + ti->len) { |
1199 | hdr->nr_zones = 0; | 1212 | hdr->nr_zones = 0; |
1200 | break; | 1213 | break; |
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) | |||
1206 | else if (zone->cond == BLK_ZONE_COND_EMPTY) | 1219 | else if (zone->cond == BLK_ZONE_COND_EMPTY) |
1207 | zone->wp = zone->start; | 1220 | zone->wp = zone->start; |
1208 | else | 1221 | else |
1209 | zone->wp = zone->wp + ti->begin - start; | 1222 | zone->wp = zone->wp + ti->begin - start - part_offset; |
1210 | } | 1223 | } |
1211 | ofst += sizeof(struct blk_zone); | 1224 | ofst += sizeof(struct blk_zone); |
1212 | hdr->nr_zones--; | 1225 | hdr->nr_zones--; |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a0b9102c4c6e..e201ccb3fda4 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -1371,6 +1371,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, | |||
1371 | 1371 | ||
1372 | if (brq->data.blocks > 1) { | 1372 | if (brq->data.blocks > 1) { |
1373 | /* | 1373 | /* |
1374 | * Some SD cards in SPI mode return a CRC error or even lock up | ||
1375 | * completely when trying to read the last block using a | ||
1376 | * multiblock read command. | ||
1377 | */ | ||
1378 | if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && | ||
1379 | (blk_rq_pos(req) + blk_rq_sectors(req) == | ||
1380 | get_capacity(md->disk))) | ||
1381 | brq->data.blocks--; | ||
1382 | |||
1383 | /* | ||
1374 | * After a read error, we redo the request one sector | 1384 | * After a read error, we redo the request one sector |
1375 | * at a time in order to accurately determine which | 1385 | * at a time in order to accurately determine which |
1376 | * sectors can be read successfully. | 1386 | * sectors can be read successfully. |
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c index 0f7cf54e3234..89096f10f4c4 100644 --- a/drivers/mux/adgs1408.c +++ b/drivers/mux/adgs1408.c | |||
@@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver); | |||
128 | 128 | ||
129 | MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>"); | 129 | MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>"); |
130 | MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver"); | 130 | MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver"); |
131 | MODULE_LICENSE("GPL v2"); | 131 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 6f8e15b9b3cf..f6c2d3855be8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c | |||
@@ -349,11 +349,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, | |||
349 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> | 349 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> |
350 | ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; | 350 | ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; |
351 | ena_rx_ctx->l3_csum_err = | 351 | ena_rx_ctx->l3_csum_err = |
352 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> | 352 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> |
353 | ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; | 353 | ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); |
354 | ena_rx_ctx->l4_csum_err = | 354 | ena_rx_ctx->l4_csum_err = |
355 | (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> | 355 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> |
356 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; | 356 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); |
357 | ena_rx_ctx->l4_csum_checked = | 357 | ena_rx_ctx->l4_csum_checked = |
358 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> | 358 | !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> |
359 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); | 359 | ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 3494d4ac9932..284a0a612131 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -1595,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter) | |||
1595 | if (rc) | 1595 | if (rc) |
1596 | return rc; | 1596 | return rc; |
1597 | 1597 | ||
1598 | ena_init_napi(adapter); | ||
1599 | |||
1600 | ena_change_mtu(adapter->netdev, adapter->netdev->mtu); | 1598 | ena_change_mtu(adapter->netdev, adapter->netdev->mtu); |
1601 | 1599 | ||
1602 | ena_refill_all_rx_bufs(adapter); | 1600 | ena_refill_all_rx_bufs(adapter); |
@@ -1754,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter) | |||
1754 | 1752 | ||
1755 | ena_setup_io_intr(adapter); | 1753 | ena_setup_io_intr(adapter); |
1756 | 1754 | ||
1755 | /* napi poll functions should be initialized before running | ||
1756 | * request_irq(), to handle a rare condition where there is a pending | ||
1757 | * interrupt, causing the ISR to fire immediately while the poll | ||
1758 | * function wasn't set yet, causing a null dereference | ||
1759 | */ | ||
1760 | ena_init_napi(adapter); | ||
1761 | |||
1757 | rc = ena_request_io_irq(adapter); | 1762 | rc = ena_request_io_irq(adapter); |
1758 | if (rc) | 1763 | if (rc) |
1759 | goto err_req_irq; | 1764 | goto err_req_irq; |
@@ -2686,7 +2691,11 @@ err_disable_msix: | |||
2686 | ena_free_mgmnt_irq(adapter); | 2691 | ena_free_mgmnt_irq(adapter); |
2687 | ena_disable_msix(adapter); | 2692 | ena_disable_msix(adapter); |
2688 | err_device_destroy: | 2693 | err_device_destroy: |
2694 | ena_com_abort_admin_commands(ena_dev); | ||
2695 | ena_com_wait_for_abort_completion(ena_dev); | ||
2689 | ena_com_admin_destroy(ena_dev); | 2696 | ena_com_admin_destroy(ena_dev); |
2697 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
2698 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); | ||
2690 | err: | 2699 | err: |
2691 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2700 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
2692 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | 2701 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
@@ -3200,15 +3209,8 @@ err_rss_init: | |||
3200 | 3209 | ||
3201 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | 3210 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
3202 | { | 3211 | { |
3203 | int release_bars; | 3212 | int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
3204 | |||
3205 | if (ena_dev->mem_bar) | ||
3206 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | ||
3207 | |||
3208 | if (ena_dev->reg_bar) | ||
3209 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
3210 | 3213 | ||
3211 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | ||
3212 | pci_release_selected_regions(pdev, release_bars); | 3214 | pci_release_selected_regions(pdev, release_bars); |
3213 | } | 3215 | } |
3214 | 3216 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d2d59444f562..6a046030e873 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = { | |||
260 | NULL, NULL, NULL), | 260 | NULL, NULL, NULL), |
261 | }; | 261 | }; |
262 | 262 | ||
263 | static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, | ||
264 | union devlink_param_value init_val) | ||
265 | { | ||
266 | struct mlx4_priv *priv = devlink_priv(devlink); | ||
267 | struct mlx4_dev *dev = &priv->dev; | ||
268 | int err; | ||
269 | |||
270 | err = devlink_param_driverinit_value_set(devlink, param_id, init_val); | ||
271 | if (err) | ||
272 | mlx4_warn(dev, | ||
273 | "devlink set parameter %u value failed (err = %d)", | ||
274 | param_id, err); | ||
275 | } | ||
276 | |||
277 | static void mlx4_devlink_set_params_init_values(struct devlink *devlink) | 263 | static void mlx4_devlink_set_params_init_values(struct devlink *devlink) |
278 | { | 264 | { |
279 | union devlink_param_value value; | 265 | union devlink_param_value value; |
280 | 266 | ||
281 | value.vbool = !!mlx4_internal_err_reset; | 267 | value.vbool = !!mlx4_internal_err_reset; |
282 | mlx4_devlink_set_init_value(devlink, | 268 | devlink_param_driverinit_value_set(devlink, |
283 | DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, | 269 | DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, |
284 | value); | 270 | value); |
285 | 271 | ||
286 | value.vu32 = 1UL << log_num_mac; | 272 | value.vu32 = 1UL << log_num_mac; |
287 | mlx4_devlink_set_init_value(devlink, | 273 | devlink_param_driverinit_value_set(devlink, |
288 | DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); | 274 | DEVLINK_PARAM_GENERIC_ID_MAX_MACS, |
275 | value); | ||
289 | 276 | ||
290 | value.vbool = enable_64b_cqe_eqe; | 277 | value.vbool = enable_64b_cqe_eqe; |
291 | mlx4_devlink_set_init_value(devlink, | 278 | devlink_param_driverinit_value_set(devlink, |
292 | MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, | 279 | MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, |
293 | value); | 280 | value); |
294 | 281 | ||
295 | value.vbool = enable_4k_uar; | 282 | value.vbool = enable_4k_uar; |
296 | mlx4_devlink_set_init_value(devlink, | 283 | devlink_param_driverinit_value_set(devlink, |
297 | MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, | 284 | MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, |
298 | value); | 285 | value); |
299 | 286 | ||
300 | value.vbool = false; | 287 | value.vbool = false; |
301 | mlx4_devlink_set_init_value(devlink, | 288 | devlink_param_driverinit_value_set(devlink, |
302 | DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, | 289 | DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, |
303 | value); | 290 | value); |
304 | } | 291 | } |
305 | 292 | ||
306 | static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, | 293 | static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7d3f671e1bb3..b68e32186d67 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -4269,8 +4269,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) | |||
4269 | RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); | 4269 | RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); |
4270 | break; | 4270 | break; |
4271 | case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: | 4271 | case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: |
4272 | case RTL_GIGA_MAC_VER_34: | 4272 | case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: |
4273 | case RTL_GIGA_MAC_VER_35: | 4273 | case RTL_GIGA_MAC_VER_38: |
4274 | RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); | 4274 | RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); |
4275 | break; | 4275 | break; |
4276 | case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: | 4276 | case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: |
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 7aa5ebb6766c..4289ccb26e4e 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c | |||
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) | |||
735 | u16 idx = dring->tail; | 735 | u16 idx = dring->tail; |
736 | struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); | 736 | struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); |
737 | 737 | ||
738 | if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) | 738 | if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { |
739 | /* reading the register clears the irq */ | ||
740 | netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); | ||
739 | break; | 741 | break; |
742 | } | ||
740 | 743 | ||
741 | /* This barrier is needed to keep us from reading | 744 | /* This barrier is needed to keep us from reading |
742 | * any other fields out of the netsec_de until we have | 745 | * any other fields out of the netsec_de until we have |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 6e13b8832bc7..fd8bb998ae52 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = { | |||
163 | /* Give this long for the PHY to reset. */ | 163 | /* Give this long for the PHY to reset. */ |
164 | #define T_PHY_RESET_MS 50 | 164 | #define T_PHY_RESET_MS 50 |
165 | 165 | ||
166 | static DEFINE_MUTEX(sfp_mutex); | ||
167 | |||
168 | struct sff_data { | 166 | struct sff_data { |
169 | unsigned int gpios; | 167 | unsigned int gpios; |
170 | bool (*module_supported)(const struct sfp_eeprom_id *id); | 168 | bool (*module_supported)(const struct sfp_eeprom_id *id); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 533b6fb8d923..72a55b6b4211 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = { | |||
1241 | {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ | 1241 | {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ |
1242 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ | 1242 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
1243 | {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ | 1243 | {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ |
1244 | {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ | ||
1244 | {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ | 1245 | {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ |
1245 | {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ | 1246 | {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ |
1246 | {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ | 1247 | {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ |
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 43743c26c071..39bf85d0ade0 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c | |||
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev) | |||
1317 | if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { | 1317 | if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { |
1318 | dev_info(dev, "Suspend without wake params -- powering down card\n"); | 1318 | dev_info(dev, "Suspend without wake params -- powering down card\n"); |
1319 | if (priv->fw_ready) { | 1319 | if (priv->fw_ready) { |
1320 | ret = lbs_suspend(priv); | ||
1321 | if (ret) | ||
1322 | return ret; | ||
1323 | |||
1320 | priv->power_up_on_resume = true; | 1324 | priv->power_up_on_resume = true; |
1321 | if_sdio_power_off(card); | 1325 | if_sdio_power_off(card); |
1322 | } | 1326 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 6b643ea701e3..6a255643c1f0 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c | |||
@@ -318,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, | |||
318 | if (!buf->urb) | 318 | if (!buf->urb) |
319 | return -ENOMEM; | 319 | return -ENOMEM; |
320 | 320 | ||
321 | buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg), | 321 | buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg), |
322 | gfp); | 322 | gfp); |
323 | if (!buf->urb->sg) | 323 | if (!buf->urb->sg) |
324 | return -ENOMEM; | 324 | return -ENOMEM; |
@@ -525,8 +525,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) | |||
525 | 525 | ||
526 | spin_lock_init(&q->rx_page_lock); | 526 | spin_lock_init(&q->rx_page_lock); |
527 | spin_lock_init(&q->lock); | 527 | spin_lock_init(&q->lock); |
528 | q->entry = devm_kzalloc(dev->dev, | 528 | q->entry = devm_kcalloc(dev->dev, |
529 | MT_NUM_RX_ENTRIES * sizeof(*q->entry), | 529 | MT_NUM_RX_ENTRIES, sizeof(*q->entry), |
530 | GFP_KERNEL); | 530 | GFP_KERNEL); |
531 | if (!q->entry) | 531 | if (!q->entry) |
532 | return -ENOMEM; | 532 | return -ENOMEM; |
@@ -755,8 +755,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) | |||
755 | INIT_LIST_HEAD(&q->swq); | 755 | INIT_LIST_HEAD(&q->swq); |
756 | q->hw_idx = mt76_ac_to_hwq(i); | 756 | q->hw_idx = mt76_ac_to_hwq(i); |
757 | 757 | ||
758 | q->entry = devm_kzalloc(dev->dev, | 758 | q->entry = devm_kcalloc(dev->dev, |
759 | MT_NUM_TX_ENTRIES * sizeof(*q->entry), | 759 | MT_NUM_TX_ENTRIES, sizeof(*q->entry), |
760 | GFP_KERNEL); | 760 | GFP_KERNEL); |
761 | if (!q->entry) | 761 | if (!q->entry) |
762 | return -ENOMEM; | 762 | return -ENOMEM; |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 722537e14848..41b49716ac75 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void) | |||
771 | struct of_phandle_args args; | 771 | struct of_phandle_args args; |
772 | int i, rc; | 772 | int i, rc; |
773 | 773 | ||
774 | if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) | ||
775 | return; | ||
776 | |||
774 | np = of_find_node_by_path("/testcase-data/interrupts/interrupts0"); | 777 | np = of_find_node_by_path("/testcase-data/interrupts/interrupts0"); |
775 | if (!np) { | 778 | if (!np) { |
776 | pr_err("missing testcase data\n"); | 779 | pr_err("missing testcase data\n"); |
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void) | |||
845 | struct of_phandle_args args; | 848 | struct of_phandle_args args; |
846 | int i, rc; | 849 | int i, rc; |
847 | 850 | ||
851 | if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) | ||
852 | return; | ||
853 | |||
848 | np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0"); | 854 | np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0"); |
849 | if (!np) { | 855 | if (!np) { |
850 | pr_err("missing testcase data\n"); | 856 | pr_err("missing testcase data\n"); |
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void) | |||
1001 | pdev = of_find_device_by_node(np); | 1007 | pdev = of_find_device_by_node(np); |
1002 | unittest(pdev, "device 1 creation failed\n"); | 1008 | unittest(pdev, "device 1 creation failed\n"); |
1003 | 1009 | ||
1004 | irq = platform_get_irq(pdev, 0); | 1010 | if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) { |
1005 | unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq); | 1011 | irq = platform_get_irq(pdev, 0); |
1012 | unittest(irq == -EPROBE_DEFER, | ||
1013 | "device deferred probe failed - %d\n", irq); | ||
1006 | 1014 | ||
1007 | /* Test that a parsing failure does not return -EPROBE_DEFER */ | 1015 | /* Test that a parsing failure does not return -EPROBE_DEFER */ |
1008 | np = of_find_node_by_path("/testcase-data/testcase-device2"); | 1016 | np = of_find_node_by_path("/testcase-data/testcase-device2"); |
1009 | pdev = of_find_device_by_node(np); | 1017 | pdev = of_find_device_by_node(np); |
1010 | unittest(pdev, "device 2 creation failed\n"); | 1018 | unittest(pdev, "device 2 creation failed\n"); |
1011 | irq = platform_get_irq(pdev, 0); | 1019 | irq = platform_get_irq(pdev, 0); |
1012 | unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq); | 1020 | unittest(irq < 0 && irq != -EPROBE_DEFER, |
1021 | "device parsing error failed - %d\n", irq); | ||
1022 | } | ||
1013 | 1023 | ||
1014 | np = of_find_node_by_path("/testcase-data/platform-tests"); | 1024 | np = of_find_node_by_path("/testcase-data/platform-tests"); |
1015 | unittest(np, "No testcase data in device tree\n"); | 1025 | unittest(np, "No testcase data in device tree\n"); |
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c index 86f1b002c846..975bcdd6b5c0 100644 --- a/drivers/pci/controller/pcie-cadence.c +++ b/drivers/pci/controller/pcie-cadence.c | |||
@@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) | |||
180 | return 0; | 180 | return 0; |
181 | } | 181 | } |
182 | 182 | ||
183 | phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); | 183 | phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); |
184 | if (!phy) | 184 | if (!phy) |
185 | return -ENOMEM; | 185 | return -ENOMEM; |
186 | 186 | ||
187 | link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); | 187 | link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); |
188 | if (!link) | 188 | if (!link) |
189 | return -ENOMEM; | 189 | return -ENOMEM; |
190 | 190 | ||
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 4a8a8efadefa..cf73a403d22d 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c | |||
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp) | |||
636 | return err; | 636 | return err; |
637 | } | 637 | } |
638 | 638 | ||
639 | return 0; | ||
640 | } | ||
641 | |||
642 | static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp) | ||
643 | { | ||
644 | struct gpio_chip *chip = &mcp->chip; | ||
645 | int err; | ||
646 | |||
639 | err = gpiochip_irqchip_add_nested(chip, | 647 | err = gpiochip_irqchip_add_nested(chip, |
640 | &mcp23s08_irq_chip, | 648 | &mcp23s08_irq_chip, |
641 | 0, | 649 | 0, |
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
912 | } | 920 | } |
913 | 921 | ||
914 | if (mcp->irq && mcp->irq_controller) { | 922 | if (mcp->irq && mcp->irq_controller) { |
915 | ret = mcp23s08_irq_setup(mcp); | 923 | ret = mcp23s08_irqchip_setup(mcp); |
916 | if (ret) | 924 | if (ret) |
917 | goto fail; | 925 | goto fail; |
918 | } | 926 | } |
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
944 | goto fail; | 952 | goto fail; |
945 | } | 953 | } |
946 | 954 | ||
955 | if (mcp->irq) | ||
956 | ret = mcp23s08_irq_setup(mcp); | ||
957 | |||
947 | fail: | 958 | fail: |
948 | if (ret < 0) | 959 | if (ret < 0) |
949 | dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret); | 960 | dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret); |
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 398393ab5df8..b6fd4838f60f 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c | |||
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev, | |||
520 | ret = cros_ec_cmd_xfer(ec_dev, msg); | 520 | ret = cros_ec_cmd_xfer(ec_dev, msg); |
521 | if (ret > 0) { | 521 | if (ret > 0) { |
522 | ec_dev->event_size = ret - 1; | 522 | ec_dev->event_size = ret - 1; |
523 | memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size); | 523 | memcpy(&ec_dev->event_data, msg->data, ret); |
524 | } | 524 | } |
525 | 525 | ||
526 | return ret; | 526 | return ret; |
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index eceba3858cef..2f61f5579aa5 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c | |||
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) | |||
210 | * Output one or more lines of text on the SCLP console (VT220 and / | 210 | * Output one or more lines of text on the SCLP console (VT220 and / |
211 | * or line-mode). | 211 | * or line-mode). |
212 | */ | 212 | */ |
213 | void __sclp_early_printk(const char *str, unsigned int len) | 213 | void __sclp_early_printk(const char *str, unsigned int len, unsigned int force) |
214 | { | 214 | { |
215 | int have_linemode, have_vt220; | 215 | int have_linemode, have_vt220; |
216 | 216 | ||
217 | if (sclp_init_state != sclp_init_state_uninitialized) | 217 | if (!force && sclp_init_state != sclp_init_state_uninitialized) |
218 | return; | 218 | return; |
219 | if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) | 219 | if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) |
220 | return; | 220 | return; |
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len) | |||
227 | 227 | ||
228 | void sclp_early_printk(const char *str) | 228 | void sclp_early_printk(const char *str) |
229 | { | 229 | { |
230 | __sclp_early_printk(str, strlen(str)); | 230 | __sclp_early_printk(str, strlen(str), 0); |
231 | } | ||
232 | |||
233 | void sclp_early_printk_force(const char *str) | ||
234 | { | ||
235 | __sclp_early_printk(str, strlen(str), 1); | ||
231 | } | 236 | } |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index dbe7c7ac9ac8..fd77e46eb3b2 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat, | |||
163 | 163 | ||
164 | for (i = 0; i < pat->pat_nr; i++, pa++) | 164 | for (i = 0; i < pat->pat_nr; i++, pa++) |
165 | for (j = 0; j < pa->pa_nr; j++) | 165 | for (j = 0; j < pa->pa_nr; j++) |
166 | if (pa->pa_iova_pfn[i] == iova_pfn) | 166 | if (pa->pa_iova_pfn[j] == iova_pfn) |
167 | return true; | 167 | return true; |
168 | 168 | ||
169 | return false; | 169 | return false; |
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 770fa9cfc310..f47d16b5810b 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "vfio_ccw_private.h" | 22 | #include "vfio_ccw_private.h" |
23 | 23 | ||
24 | struct workqueue_struct *vfio_ccw_work_q; | 24 | struct workqueue_struct *vfio_ccw_work_q; |
25 | struct kmem_cache *vfio_ccw_io_region; | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * Helpers | 28 | * Helpers |
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) | |||
79 | cp_update_scsw(&private->cp, &irb->scsw); | 80 | cp_update_scsw(&private->cp, &irb->scsw); |
80 | cp_free(&private->cp); | 81 | cp_free(&private->cp); |
81 | } | 82 | } |
82 | memcpy(private->io_region.irb_area, irb, sizeof(*irb)); | 83 | memcpy(private->io_region->irb_area, irb, sizeof(*irb)); |
83 | 84 | ||
84 | if (private->io_trigger) | 85 | if (private->io_trigger) |
85 | eventfd_signal(private->io_trigger, 1); | 86 | eventfd_signal(private->io_trigger, 1); |
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) | |||
114 | private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); | 115 | private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); |
115 | if (!private) | 116 | if (!private) |
116 | return -ENOMEM; | 117 | return -ENOMEM; |
118 | |||
119 | private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, | ||
120 | GFP_KERNEL | GFP_DMA); | ||
121 | if (!private->io_region) { | ||
122 | kfree(private); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | |||
117 | private->sch = sch; | 126 | private->sch = sch; |
118 | dev_set_drvdata(&sch->dev, private); | 127 | dev_set_drvdata(&sch->dev, private); |
119 | 128 | ||
@@ -139,6 +148,7 @@ out_disable: | |||
139 | cio_disable_subchannel(sch); | 148 | cio_disable_subchannel(sch); |
140 | out_free: | 149 | out_free: |
141 | dev_set_drvdata(&sch->dev, NULL); | 150 | dev_set_drvdata(&sch->dev, NULL); |
151 | kmem_cache_free(vfio_ccw_io_region, private->io_region); | ||
142 | kfree(private); | 152 | kfree(private); |
143 | return ret; | 153 | return ret; |
144 | } | 154 | } |
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) | |||
153 | 163 | ||
154 | dev_set_drvdata(&sch->dev, NULL); | 164 | dev_set_drvdata(&sch->dev, NULL); |
155 | 165 | ||
166 | kmem_cache_free(vfio_ccw_io_region, private->io_region); | ||
156 | kfree(private); | 167 | kfree(private); |
157 | 168 | ||
158 | return 0; | 169 | return 0; |
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void) | |||
232 | if (!vfio_ccw_work_q) | 243 | if (!vfio_ccw_work_q) |
233 | return -ENOMEM; | 244 | return -ENOMEM; |
234 | 245 | ||
246 | vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", | ||
247 | sizeof(struct ccw_io_region), 0, | ||
248 | SLAB_ACCOUNT, 0, | ||
249 | sizeof(struct ccw_io_region), NULL); | ||
250 | if (!vfio_ccw_io_region) { | ||
251 | destroy_workqueue(vfio_ccw_work_q); | ||
252 | return -ENOMEM; | ||
253 | } | ||
254 | |||
235 | isc_register(VFIO_CCW_ISC); | 255 | isc_register(VFIO_CCW_ISC); |
236 | ret = css_driver_register(&vfio_ccw_sch_driver); | 256 | ret = css_driver_register(&vfio_ccw_sch_driver); |
237 | if (ret) { | 257 | if (ret) { |
238 | isc_unregister(VFIO_CCW_ISC); | 258 | isc_unregister(VFIO_CCW_ISC); |
259 | kmem_cache_destroy(vfio_ccw_io_region); | ||
239 | destroy_workqueue(vfio_ccw_work_q); | 260 | destroy_workqueue(vfio_ccw_work_q); |
240 | } | 261 | } |
241 | 262 | ||
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void) | |||
246 | { | 267 | { |
247 | css_driver_unregister(&vfio_ccw_sch_driver); | 268 | css_driver_unregister(&vfio_ccw_sch_driver); |
248 | isc_unregister(VFIO_CCW_ISC); | 269 | isc_unregister(VFIO_CCW_ISC); |
270 | kmem_cache_destroy(vfio_ccw_io_region); | ||
249 | destroy_workqueue(vfio_ccw_work_q); | 271 | destroy_workqueue(vfio_ccw_work_q); |
250 | } | 272 | } |
251 | module_init(vfio_ccw_sch_init); | 273 | module_init(vfio_ccw_sch_init); |
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 797a82731159..f94aa01f9c36 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c | |||
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private, | |||
93 | enum vfio_ccw_event event) | 93 | enum vfio_ccw_event event) |
94 | { | 94 | { |
95 | pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state); | 95 | pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state); |
96 | private->io_region.ret_code = -EIO; | 96 | private->io_region->ret_code = -EIO; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void fsm_io_busy(struct vfio_ccw_private *private, | 99 | static void fsm_io_busy(struct vfio_ccw_private *private, |
100 | enum vfio_ccw_event event) | 100 | enum vfio_ccw_event event) |
101 | { | 101 | { |
102 | private->io_region.ret_code = -EBUSY; | 102 | private->io_region->ret_code = -EBUSY; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void fsm_disabled_irq(struct vfio_ccw_private *private, | 105 | static void fsm_disabled_irq(struct vfio_ccw_private *private, |
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, | |||
126 | { | 126 | { |
127 | union orb *orb; | 127 | union orb *orb; |
128 | union scsw *scsw = &private->scsw; | 128 | union scsw *scsw = &private->scsw; |
129 | struct ccw_io_region *io_region = &private->io_region; | 129 | struct ccw_io_region *io_region = private->io_region; |
130 | struct mdev_device *mdev = private->mdev; | 130 | struct mdev_device *mdev = private->mdev; |
131 | char *errstr = "request"; | 131 | char *errstr = "request"; |
132 | 132 | ||
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 41eeb57d68a3..f673e106c041 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c | |||
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, | |||
174 | return -EINVAL; | 174 | return -EINVAL; |
175 | 175 | ||
176 | private = dev_get_drvdata(mdev_parent_dev(mdev)); | 176 | private = dev_get_drvdata(mdev_parent_dev(mdev)); |
177 | region = &private->io_region; | 177 | region = private->io_region; |
178 | if (copy_to_user(buf, (void *)region + *ppos, count)) | 178 | if (copy_to_user(buf, (void *)region + *ppos, count)) |
179 | return -EFAULT; | 179 | return -EFAULT; |
180 | 180 | ||
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, | |||
196 | if (private->state != VFIO_CCW_STATE_IDLE) | 196 | if (private->state != VFIO_CCW_STATE_IDLE) |
197 | return -EACCES; | 197 | return -EACCES; |
198 | 198 | ||
199 | region = &private->io_region; | 199 | region = private->io_region; |
200 | if (copy_from_user((void *)region + *ppos, buf, count)) | 200 | if (copy_from_user((void *)region + *ppos, buf, count)) |
201 | return -EFAULT; | 201 | return -EFAULT; |
202 | 202 | ||
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index 78a66d96756b..078e46f9623d 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h | |||
@@ -41,7 +41,7 @@ struct vfio_ccw_private { | |||
41 | atomic_t avail; | 41 | atomic_t avail; |
42 | struct mdev_device *mdev; | 42 | struct mdev_device *mdev; |
43 | struct notifier_block nb; | 43 | struct notifier_block nb; |
44 | struct ccw_io_region io_region; | 44 | struct ccw_io_region *io_region; |
45 | 45 | ||
46 | struct channel_program cp; | 46 | struct channel_program cp; |
47 | struct irb irb; | 47 | struct irb irb; |
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index 7b31f19ade83..050879a2ddef 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c | |||
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = { | |||
715 | 715 | ||
716 | static int __init openprom_init(void) | 716 | static int __init openprom_init(void) |
717 | { | 717 | { |
718 | struct device_node *dp; | ||
719 | int err; | 718 | int err; |
720 | 719 | ||
721 | err = misc_register(&openprom_dev); | 720 | err = misc_register(&openprom_dev); |
722 | if (err) | 721 | if (err) |
723 | return err; | 722 | return err; |
724 | 723 | ||
725 | dp = of_find_node_by_path("/"); | 724 | options_node = of_get_child_by_name(of_find_node_by_path("/"), "options"); |
726 | dp = dp->child; | ||
727 | while (dp) { | ||
728 | if (!strcmp(dp->name, "options")) | ||
729 | break; | ||
730 | dp = dp->sibling; | ||
731 | } | ||
732 | options_node = dp; | ||
733 | |||
734 | if (!options_node) { | 725 | if (!options_node) { |
735 | misc_deregister(&openprom_dev); | 726 | misc_deregister(&openprom_dev); |
736 | return -EIO; | 727 | return -EIO; |
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c index 524f9ea62e52..6516bc3cb58b 100644 --- a/drivers/sbus/char/oradax.c +++ b/drivers/sbus/char/oradax.c | |||
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f) | |||
689 | alloc_error: | 689 | alloc_error: |
690 | kfree(ctx->ccb_buf); | 690 | kfree(ctx->ccb_buf); |
691 | done: | 691 | done: |
692 | if (ctx != NULL) | 692 | kfree(ctx); |
693 | kfree(ctx); | ||
694 | return -ENOMEM; | 693 | return -ENOMEM; |
695 | } | 694 | } |
696 | 695 | ||
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cc8e64dc65ad..e5bd035ebad0 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) | |||
2472 | /* start qedi context */ | 2472 | /* start qedi context */ |
2473 | spin_lock_init(&qedi->hba_lock); | 2473 | spin_lock_init(&qedi->hba_lock); |
2474 | spin_lock_init(&qedi->task_idx_lock); | 2474 | spin_lock_init(&qedi->task_idx_lock); |
2475 | mutex_init(&qedi->stats_lock); | ||
2475 | } | 2476 | } |
2476 | qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); | 2477 | qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); |
2477 | qedi_ops->ll2->start(qedi->cdev, ¶ms); | 2478 | qedi_ops->ll2->start(qedi->cdev, ¶ms); |
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c index 05c42235dd41..7c3cc968053c 100644 --- a/drivers/soc/fsl/qbman/bman_ccsr.c +++ b/drivers/soc/fsl/qbman/bman_ccsr.c | |||
@@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size) | |||
120 | */ | 120 | */ |
121 | static dma_addr_t fbpr_a; | 121 | static dma_addr_t fbpr_a; |
122 | static size_t fbpr_sz; | 122 | static size_t fbpr_sz; |
123 | static int __bman_probed; | ||
123 | 124 | ||
124 | static int bman_fbpr(struct reserved_mem *rmem) | 125 | static int bman_fbpr(struct reserved_mem *rmem) |
125 | { | 126 | { |
@@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr) | |||
166 | return IRQ_HANDLED; | 167 | return IRQ_HANDLED; |
167 | } | 168 | } |
168 | 169 | ||
170 | int bman_is_probed(void) | ||
171 | { | ||
172 | return __bman_probed; | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(bman_is_probed); | ||
175 | |||
169 | static int fsl_bman_probe(struct platform_device *pdev) | 176 | static int fsl_bman_probe(struct platform_device *pdev) |
170 | { | 177 | { |
171 | int ret, err_irq; | 178 | int ret, err_irq; |
@@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev) | |||
175 | u16 id, bm_pool_cnt; | 182 | u16 id, bm_pool_cnt; |
176 | u8 major, minor; | 183 | u8 major, minor; |
177 | 184 | ||
185 | __bman_probed = -1; | ||
186 | |||
178 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 187 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
179 | if (!res) { | 188 | if (!res) { |
180 | dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", | 189 | dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", |
@@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev) | |||
255 | return ret; | 264 | return ret; |
256 | } | 265 | } |
257 | 266 | ||
267 | __bman_probed = 1; | ||
268 | |||
258 | return 0; | 269 | return 0; |
259 | }; | 270 | }; |
260 | 271 | ||
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index 79cba58387a5..6fd5fef5f39b 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c | |||
@@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = { | |||
273 | static u32 __iomem *qm_ccsr_start; | 273 | static u32 __iomem *qm_ccsr_start; |
274 | /* A SDQCR mask comprising all the available/visible pool channels */ | 274 | /* A SDQCR mask comprising all the available/visible pool channels */ |
275 | static u32 qm_pools_sdqcr; | 275 | static u32 qm_pools_sdqcr; |
276 | static int __qman_probed; | ||
276 | 277 | ||
277 | static inline u32 qm_ccsr_in(u32 offset) | 278 | static inline u32 qm_ccsr_in(u32 offset) |
278 | { | 279 | { |
@@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev) | |||
686 | return 0; | 687 | return 0; |
687 | } | 688 | } |
688 | 689 | ||
690 | int qman_is_probed(void) | ||
691 | { | ||
692 | return __qman_probed; | ||
693 | } | ||
694 | EXPORT_SYMBOL_GPL(qman_is_probed); | ||
695 | |||
689 | static int fsl_qman_probe(struct platform_device *pdev) | 696 | static int fsl_qman_probe(struct platform_device *pdev) |
690 | { | 697 | { |
691 | struct device *dev = &pdev->dev; | 698 | struct device *dev = &pdev->dev; |
@@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev) | |||
695 | u16 id; | 702 | u16 id; |
696 | u8 major, minor; | 703 | u8 major, minor; |
697 | 704 | ||
705 | __qman_probed = -1; | ||
706 | |||
698 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 707 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
699 | if (!res) { | 708 | if (!res) { |
700 | dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", | 709 | dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", |
@@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev) | |||
828 | if (ret) | 837 | if (ret) |
829 | return ret; | 838 | return ret; |
830 | 839 | ||
840 | __qman_probed = 1; | ||
841 | |||
831 | return 0; | 842 | return 0; |
832 | } | 843 | } |
833 | 844 | ||
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c index a120002b630e..3e9391d117c5 100644 --- a/drivers/soc/fsl/qbman/qman_portal.c +++ b/drivers/soc/fsl/qbman/qman_portal.c | |||
@@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev) | |||
227 | int irq, cpu, err; | 227 | int irq, cpu, err; |
228 | u32 val; | 228 | u32 val; |
229 | 229 | ||
230 | err = qman_is_probed(); | ||
231 | if (!err) | ||
232 | return -EPROBE_DEFER; | ||
233 | if (err < 0) { | ||
234 | dev_err(&pdev->dev, "failing probe due to qman probe error\n"); | ||
235 | return -ENODEV; | ||
236 | } | ||
237 | |||
230 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); | 238 | pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); |
231 | if (!pcfg) | 239 | if (!pcfg) |
232 | return -ENOMEM; | 240 | return -ENOMEM; |
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index e1e264a9a4c7..28fc4ce75edb 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c | |||
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |||
738 | u8 link, depth; | 738 | u8 link, depth; |
739 | u64 route; | 739 | u64 route; |
740 | 740 | ||
741 | /* | ||
742 | * After NVM upgrade adding root switch device fails because we | ||
743 | * initiated reset. During that time ICM might still send | ||
744 | * XDomain connected message which we ignore here. | ||
745 | */ | ||
746 | if (!tb->root_switch) | ||
747 | return; | ||
748 | |||
749 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; | 741 | link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; |
750 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> | 742 | depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> |
751 | ICM_LINK_INFO_DEPTH_SHIFT; | 743 | ICM_LINK_INFO_DEPTH_SHIFT; |
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) | |||
1037 | if (pkg->hdr.packet_id) | 1029 | if (pkg->hdr.packet_id) |
1038 | return; | 1030 | return; |
1039 | 1031 | ||
1040 | /* | ||
1041 | * After NVM upgrade adding root switch device fails because we | ||
1042 | * initiated reset. During that time ICM might still send device | ||
1043 | * connected message which we ignore here. | ||
1044 | */ | ||
1045 | if (!tb->root_switch) | ||
1046 | return; | ||
1047 | |||
1048 | route = get_route(pkg->route_hi, pkg->route_lo); | 1032 | route = get_route(pkg->route_hi, pkg->route_lo); |
1049 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; | 1033 | authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; |
1050 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> | 1034 | security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> |
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work) | |||
1408 | 1392 | ||
1409 | mutex_lock(&tb->lock); | 1393 | mutex_lock(&tb->lock); |
1410 | 1394 | ||
1411 | switch (n->pkg->code) { | 1395 | /* |
1412 | case ICM_EVENT_DEVICE_CONNECTED: | 1396 | * When the domain is stopped we flush its workqueue but before |
1413 | icm->device_connected(tb, n->pkg); | 1397 | * that the root switch is removed. In that case we should treat |
1414 | break; | 1398 | * the queued events as being canceled. |
1415 | case ICM_EVENT_DEVICE_DISCONNECTED: | 1399 | */ |
1416 | icm->device_disconnected(tb, n->pkg); | 1400 | if (tb->root_switch) { |
1417 | break; | 1401 | switch (n->pkg->code) { |
1418 | case ICM_EVENT_XDOMAIN_CONNECTED: | 1402 | case ICM_EVENT_DEVICE_CONNECTED: |
1419 | icm->xdomain_connected(tb, n->pkg); | 1403 | icm->device_connected(tb, n->pkg); |
1420 | break; | 1404 | break; |
1421 | case ICM_EVENT_XDOMAIN_DISCONNECTED: | 1405 | case ICM_EVENT_DEVICE_DISCONNECTED: |
1422 | icm->xdomain_disconnected(tb, n->pkg); | 1406 | icm->device_disconnected(tb, n->pkg); |
1423 | break; | 1407 | break; |
1408 | case ICM_EVENT_XDOMAIN_CONNECTED: | ||
1409 | icm->xdomain_connected(tb, n->pkg); | ||
1410 | break; | ||
1411 | case ICM_EVENT_XDOMAIN_DISCONNECTED: | ||
1412 | icm->xdomain_disconnected(tb, n->pkg); | ||
1413 | break; | ||
1414 | } | ||
1424 | } | 1415 | } |
1425 | 1416 | ||
1426 | mutex_unlock(&tb->lock); | 1417 | mutex_unlock(&tb->lock); |
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 88cff05a1808..5cd6bdfa068f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c | |||
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void) | |||
1191 | tb_domain_exit(); | 1191 | tb_domain_exit(); |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | fs_initcall(nhi_init); | 1194 | rootfs_initcall(nhi_init); |
1195 | module_exit(nhi_unload); | 1195 | module_exit(nhi_unload); |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index fa8dcb470640..d31b975dd3fd 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev) | |||
630 | if (!data->skip_autocfg) | 630 | if (!data->skip_autocfg) |
631 | dw8250_setup_port(p); | 631 | dw8250_setup_port(p); |
632 | 632 | ||
633 | #ifdef CONFIG_PM | ||
634 | uart.capabilities |= UART_CAP_RPM; | ||
635 | #endif | ||
636 | |||
637 | /* If we have a valid fifosize, try hooking up DMA */ | 633 | /* If we have a valid fifosize, try hooking up DMA */ |
638 | if (p->fifosize) { | 634 | if (p->fifosize) { |
639 | data->dma.rxconf.src_maxburst = p->fifosize / 4; | 635 | data->dma.rxconf.src_maxburst = p->fifosize / 4; |
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 29ec34387246..1515074e18fb 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c | |||
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport) | |||
868 | geni_se_init(&port->se, port->rx_wm, port->rx_rfr); | 868 | geni_se_init(&port->se, port->rx_wm, port->rx_rfr); |
869 | geni_se_select_mode(&port->se, port->xfer_mode); | 869 | geni_se_select_mode(&port->se, port->xfer_mode); |
870 | if (!uart_console(uport)) { | 870 | if (!uart_console(uport)) { |
871 | port->rx_fifo = devm_kzalloc(uport->dev, | 871 | port->rx_fifo = devm_kcalloc(uport->dev, |
872 | port->rx_fifo_depth * sizeof(u32), GFP_KERNEL); | 872 | port->rx_fifo_depth, sizeof(u32), GFP_KERNEL); |
873 | if (!port->rx_fifo) | 873 | if (!port->rx_fifo) |
874 | return -ENOMEM; | 874 | return -ENOMEM; |
875 | } | 875 | } |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ac4424bf6b13..ab3f6e91853d 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = { | |||
292 | }, | 292 | }, |
293 | 293 | ||
294 | /* | 294 | /* |
295 | * The "SCIFA" that is in RZ/T and RZ/A2. | ||
296 | * It looks like a normal SCIF with FIFO data, but with a | ||
297 | * compressed address space. Also, the break out of interrupts | ||
298 | * are different: ERI/BRI, RXI, TXI, TEI, DRI. | ||
299 | */ | ||
300 | [SCIx_RZ_SCIFA_REGTYPE] = { | ||
301 | .regs = { | ||
302 | [SCSMR] = { 0x00, 16 }, | ||
303 | [SCBRR] = { 0x02, 8 }, | ||
304 | [SCSCR] = { 0x04, 16 }, | ||
305 | [SCxTDR] = { 0x06, 8 }, | ||
306 | [SCxSR] = { 0x08, 16 }, | ||
307 | [SCxRDR] = { 0x0A, 8 }, | ||
308 | [SCFCR] = { 0x0C, 16 }, | ||
309 | [SCFDR] = { 0x0E, 16 }, | ||
310 | [SCSPTR] = { 0x10, 16 }, | ||
311 | [SCLSR] = { 0x12, 16 }, | ||
312 | }, | ||
313 | .fifosize = 16, | ||
314 | .overrun_reg = SCLSR, | ||
315 | .overrun_mask = SCLSR_ORER, | ||
316 | .sampling_rate_mask = SCI_SR(32), | ||
317 | .error_mask = SCIF_DEFAULT_ERROR_MASK, | ||
318 | .error_clear = SCIF_ERROR_CLEAR, | ||
319 | }, | ||
320 | |||
321 | /* | ||
295 | * Common SH-3 SCIF definitions. | 322 | * Common SH-3 SCIF definitions. |
296 | */ | 323 | */ |
297 | [SCIx_SH3_SCIF_REGTYPE] = { | 324 | [SCIx_SH3_SCIF_REGTYPE] = { |
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = { | |||
319 | [SCIx_SH4_SCIF_REGTYPE] = { | 346 | [SCIx_SH4_SCIF_REGTYPE] = { |
320 | .regs = { | 347 | .regs = { |
321 | [SCSMR] = { 0x00, 16 }, | 348 | [SCSMR] = { 0x00, 16 }, |
322 | [SCBRR] = { 0x02, 8 }, | 349 | [SCBRR] = { 0x04, 8 }, |
323 | [SCSCR] = { 0x04, 16 }, | 350 | [SCSCR] = { 0x08, 16 }, |
324 | [SCxTDR] = { 0x06, 8 }, | 351 | [SCxTDR] = { 0x0c, 8 }, |
325 | [SCxSR] = { 0x08, 16 }, | 352 | [SCxSR] = { 0x10, 16 }, |
326 | [SCxRDR] = { 0x0a, 8 }, | 353 | [SCxRDR] = { 0x14, 8 }, |
327 | [SCFCR] = { 0x0c, 16 }, | 354 | [SCFCR] = { 0x18, 16 }, |
328 | [SCFDR] = { 0x0e, 16 }, | 355 | [SCFDR] = { 0x1c, 16 }, |
329 | [SCSPTR] = { 0x10, 16 }, | 356 | [SCSPTR] = { 0x20, 16 }, |
330 | [SCLSR] = { 0x12, 16 }, | 357 | [SCLSR] = { 0x24, 16 }, |
331 | }, | 358 | }, |
332 | .fifosize = 16, | 359 | .fifosize = 16, |
333 | .overrun_reg = SCLSR, | 360 | .overrun_reg = SCLSR, |
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev, | |||
2810 | { | 2837 | { |
2811 | struct uart_port *port = &sci_port->port; | 2838 | struct uart_port *port = &sci_port->port; |
2812 | const struct resource *res; | 2839 | const struct resource *res; |
2813 | unsigned int i, regtype; | 2840 | unsigned int i; |
2814 | int ret; | 2841 | int ret; |
2815 | 2842 | ||
2816 | sci_port->cfg = p; | 2843 | sci_port->cfg = p; |
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev, | |||
2847 | if (unlikely(sci_port->params == NULL)) | 2874 | if (unlikely(sci_port->params == NULL)) |
2848 | return -EINVAL; | 2875 | return -EINVAL; |
2849 | 2876 | ||
2850 | regtype = sci_port->params - sci_port_params; | ||
2851 | switch (p->type) { | 2877 | switch (p->type) { |
2852 | case PORT_SCIFB: | 2878 | case PORT_SCIFB: |
2853 | sci_port->rx_trigger = 48; | 2879 | sci_port->rx_trigger = 48; |
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev, | |||
2902 | port->regshift = 1; | 2928 | port->regshift = 1; |
2903 | } | 2929 | } |
2904 | 2930 | ||
2905 | if (regtype == SCIx_SH4_SCIF_REGTYPE) | ||
2906 | if (sci_port->reg_size >= 0x20) | ||
2907 | port->regshift = 1; | ||
2908 | |||
2909 | /* | 2931 | /* |
2910 | * The UART port needs an IRQ value, so we peg this to the RX IRQ | 2932 | * The UART port needs an IRQ value, so we peg this to the RX IRQ |
2911 | * for the multi-IRQ ports, which is where we are primarily | 2933 | * for the multi-IRQ ports, which is where we are primarily |
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = { | |||
3110 | .compatible = "renesas,scif-r7s72100", | 3132 | .compatible = "renesas,scif-r7s72100", |
3111 | .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), | 3133 | .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), |
3112 | }, | 3134 | }, |
3135 | { | ||
3136 | .compatible = "renesas,scif-r7s9210", | ||
3137 | .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE), | ||
3138 | }, | ||
3113 | /* Family-specific types */ | 3139 | /* Family-specific types */ |
3114 | { | 3140 | { |
3115 | .compatible = "renesas,rcar-gen1-scif", | 3141 | .compatible = "renesas,rcar-gen1-scif", |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f9b40a9dc4d3..bc03b0a690b4 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1514,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf) | |||
1514 | { | 1514 | { |
1515 | struct acm *acm = usb_get_intfdata(intf); | 1515 | struct acm *acm = usb_get_intfdata(intf); |
1516 | struct tty_struct *tty; | 1516 | struct tty_struct *tty; |
1517 | int i; | ||
1517 | 1518 | ||
1518 | /* sibling interface is already cleaning up */ | 1519 | /* sibling interface is already cleaning up */ |
1519 | if (!acm) | 1520 | if (!acm) |
@@ -1544,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf) | |||
1544 | 1545 | ||
1545 | tty_unregister_device(acm_tty_driver, acm->minor); | 1546 | tty_unregister_device(acm_tty_driver, acm->minor); |
1546 | 1547 | ||
1548 | usb_free_urb(acm->ctrlurb); | ||
1549 | for (i = 0; i < ACM_NW; i++) | ||
1550 | usb_free_urb(acm->wb[i].urb); | ||
1551 | for (i = 0; i < acm->rx_buflimit; i++) | ||
1552 | usb_free_urb(acm->read_urbs[i]); | ||
1547 | acm_write_buffers_free(acm); | 1553 | acm_write_buffers_free(acm); |
1548 | usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); | 1554 | usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); |
1549 | acm_read_buffers_free(acm); | 1555 | acm_read_buffers_free(acm); |
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 7334da9e9779..71d0d33c3286 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c | |||
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev) | |||
642 | xhci_mtk_host_enable(mtk); | 642 | xhci_mtk_host_enable(mtk); |
643 | 643 | ||
644 | xhci_dbg(xhci, "%s: restart port polling\n", __func__); | 644 | xhci_dbg(xhci, "%s: restart port polling\n", __func__); |
645 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
646 | usb_hcd_poll_rh_status(hcd); | ||
647 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | 645 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
648 | usb_hcd_poll_rh_status(xhci->shared_hcd); | 646 | usb_hcd_poll_rh_status(xhci->shared_hcd); |
647 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); | ||
648 | usb_hcd_poll_rh_status(hcd); | ||
649 | return 0; | 649 | return 0; |
650 | } | 650 | } |
651 | 651 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6372edf339d9..722860eb5a91 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
185 | } | 185 | } |
186 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | 186 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
187 | (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || | 187 | (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
188 | pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || | ||
189 | pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || | ||
188 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || | 190 | pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || |
189 | pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) | 191 | pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) |
190 | xhci->quirks |= XHCI_MISSING_CAS; | 192 | xhci->quirks |= XHCI_MISSING_CAS; |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 0215b70c4efc..e72ad9f81c73 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb); | |||
561 | /* Interface is reserved */ | 561 | /* Interface is reserved */ |
562 | #define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) | 562 | #define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) |
563 | 563 | ||
564 | /* Interface must have two endpoints */ | ||
565 | #define NUMEP2 BIT(16) | ||
566 | |||
564 | 567 | ||
565 | static const struct usb_device_id option_ids[] = { | 568 | static const struct usb_device_id option_ids[] = { |
566 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 569 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = { | |||
1081 | .driver_info = RSVD(4) }, | 1084 | .driver_info = RSVD(4) }, |
1082 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), | 1085 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), |
1083 | .driver_info = RSVD(4) }, | 1086 | .driver_info = RSVD(4) }, |
1084 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), | 1087 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), |
1085 | .driver_info = RSVD(4) | RSVD(5) }, | 1088 | .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, |
1089 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, | ||
1086 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, | 1090 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
1087 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, | 1091 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
1088 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), | 1092 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial, | |||
1999 | if (device_flags & RSVD(iface_desc->bInterfaceNumber)) | 2003 | if (device_flags & RSVD(iface_desc->bInterfaceNumber)) |
2000 | return -ENODEV; | 2004 | return -ENODEV; |
2001 | 2005 | ||
2006 | /* | ||
2007 | * Allow matching on bNumEndpoints for devices whose interface numbers | ||
2008 | * can change (e.g. Quectel EP06). | ||
2009 | */ | ||
2010 | if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2) | ||
2011 | return -ENODEV; | ||
2012 | |||
2002 | /* Store the device flags so we can use them during attach. */ | 2013 | /* Store the device flags so we can use them during attach. */ |
2003 | usb_set_serial_data(serial, (void *)device_flags); | 2014 | usb_set_serial_data(serial, (void *)device_flags); |
2004 | 2015 | ||
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 40864c2bd9dc..4d0273508043 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS); | |||
84 | 84 | ||
85 | /* Motorola Tetra driver */ | 85 | /* Motorola Tetra driver */ |
86 | #define MOTOROLA_TETRA_IDS() \ | 86 | #define MOTOROLA_TETRA_IDS() \ |
87 | { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ | 87 | { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ |
88 | { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ | ||
88 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); | 89 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); |
89 | 90 | ||
90 | /* Novatel Wireless GPS driver */ | 91 | /* Novatel Wireless GPS driver */ |
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h index 8235b285dbb2..d09bab3bf224 100644 --- a/drivers/video/fbdev/aty/atyfb.h +++ b/drivers/video/fbdev/aty/atyfb.h | |||
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */ | |||
333 | extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll); | 333 | extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll); |
334 | extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); | 334 | extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); |
335 | 335 | ||
336 | extern const u8 aty_postdividers[8]; | ||
337 | |||
336 | 338 | ||
337 | /* | 339 | /* |
338 | * Hardware cursor support | 340 | * Hardware cursor support |
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par) | |||
359 | 361 | ||
360 | extern void aty_reset_engine(const struct atyfb_par *par); | 362 | extern void aty_reset_engine(const struct atyfb_par *par); |
361 | extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); | 363 | extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); |
362 | extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); | ||
363 | 364 | ||
364 | void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); | 365 | void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); |
365 | void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); | 366 | void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); |
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index a9a8272f7a6e..05111e90f168 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c | |||
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info, | |||
3087 | /* | 3087 | /* |
3088 | * PLL Reference Divider M: | 3088 | * PLL Reference Divider M: |
3089 | */ | 3089 | */ |
3090 | M = pll_regs[2]; | 3090 | M = pll_regs[PLL_REF_DIV]; |
3091 | 3091 | ||
3092 | /* | 3092 | /* |
3093 | * PLL Feedback Divider N (Dependent on CLOCK_CNTL): | 3093 | * PLL Feedback Divider N (Dependent on CLOCK_CNTL): |
3094 | */ | 3094 | */ |
3095 | N = pll_regs[7 + (clock_cntl & 3)]; | 3095 | N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)]; |
3096 | 3096 | ||
3097 | /* | 3097 | /* |
3098 | * PLL Post Divider P (Dependent on CLOCK_CNTL): | 3098 | * PLL Post Divider P (Dependent on CLOCK_CNTL): |
3099 | */ | 3099 | */ |
3100 | P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1)); | 3100 | P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) | |
3101 | ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)]; | ||
3101 | 3102 | ||
3102 | /* | 3103 | /* |
3103 | * PLL Divider Q: | 3104 | * PLL Divider Q: |
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c index 74a62aa193c0..f87cc81f4fa2 100644 --- a/drivers/video/fbdev/aty/mach64_ct.c +++ b/drivers/video/fbdev/aty/mach64_ct.c | |||
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par) | |||
115 | */ | 115 | */ |
116 | 116 | ||
117 | #define Maximum_DSP_PRECISION 7 | 117 | #define Maximum_DSP_PRECISION 7 |
118 | static u8 postdividers[] = {1,2,4,8,3}; | 118 | const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12}; |
119 | 119 | ||
120 | static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll) | 120 | static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll) |
121 | { | 121 | { |
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll | |||
222 | pll->vclk_post_div += (q < 64*8); | 222 | pll->vclk_post_div += (q < 64*8); |
223 | pll->vclk_post_div += (q < 32*8); | 223 | pll->vclk_post_div += (q < 32*8); |
224 | } | 224 | } |
225 | pll->vclk_post_div_real = postdividers[pll->vclk_post_div]; | 225 | pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div]; |
226 | // pll->vclk_post_div <<= 6; | 226 | // pll->vclk_post_div <<= 6; |
227 | pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; | 227 | pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; |
228 | pllvclk = (1000000 * 2 * pll->vclk_fb_div) / | 228 | pllvclk = (1000000 * 2 * pll->vclk_fb_div) / |
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll) | |||
513 | u8 mclk_fb_div, pll_ext_cntl; | 513 | u8 mclk_fb_div, pll_ext_cntl; |
514 | pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); | 514 | pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); |
515 | pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par); | 515 | pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par); |
516 | pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07]; | 516 | pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07]; |
517 | mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par); | 517 | mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par); |
518 | if (pll_ext_cntl & PLL_MFB_TIMES_4_2B) | 518 | if (pll_ext_cntl & PLL_MFB_TIMES_4_2B) |
519 | mclk_fb_div <<= 1; | 519 | mclk_fb_div <<= 1; |
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll) | |||
535 | xpost_div += (q < 64*8); | 535 | xpost_div += (q < 64*8); |
536 | xpost_div += (q < 32*8); | 536 | xpost_div += (q < 32*8); |
537 | } | 537 | } |
538 | pll->ct.xclk_post_div_real = postdividers[xpost_div]; | 538 | pll->ct.xclk_post_div_real = aty_postdividers[xpost_div]; |
539 | pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; | 539 | pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; |
540 | 540 | ||
541 | #ifdef CONFIG_PPC | 541 | #ifdef CONFIG_PPC |
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll) | |||
584 | mpost_div += (q < 64*8); | 584 | mpost_div += (q < 64*8); |
585 | mpost_div += (q < 32*8); | 585 | mpost_div += (q < 32*8); |
586 | } | 586 | } |
587 | sclk_post_div_real = postdividers[mpost_div]; | 587 | sclk_post_div_real = aty_postdividers[mpost_div]; |
588 | pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; | 588 | pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; |
589 | pll->ct.spll_cntl2 = mpost_div << 4; | 589 | pll->ct.spll_cntl2 = mpost_div << 4; |
590 | #ifdef DEBUG | 590 | #ifdef DEBUG |
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index f3d0bef16d78..6127f0fcd62c 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c | |||
@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell) | |||
514 | */ | 514 | */ |
515 | static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) | 515 | static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) |
516 | { | 516 | { |
517 | struct hlist_node **p; | ||
518 | struct afs_cell *pcell; | ||
517 | int ret; | 519 | int ret; |
518 | 520 | ||
519 | if (!cell->anonymous_key) { | 521 | if (!cell->anonymous_key) { |
@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) | |||
534 | return ret; | 536 | return ret; |
535 | 537 | ||
536 | mutex_lock(&net->proc_cells_lock); | 538 | mutex_lock(&net->proc_cells_lock); |
537 | list_add_tail(&cell->proc_link, &net->proc_cells); | 539 | for (p = &net->proc_cells.first; *p; p = &(*p)->next) { |
540 | pcell = hlist_entry(*p, struct afs_cell, proc_link); | ||
541 | if (strcmp(cell->name, pcell->name) < 0) | ||
542 | break; | ||
543 | } | ||
544 | |||
545 | cell->proc_link.pprev = p; | ||
546 | cell->proc_link.next = *p; | ||
547 | rcu_assign_pointer(*p, &cell->proc_link.next); | ||
548 | if (cell->proc_link.next) | ||
549 | cell->proc_link.next->pprev = &cell->proc_link.next; | ||
550 | |||
538 | afs_dynroot_mkdir(net, cell); | 551 | afs_dynroot_mkdir(net, cell); |
539 | mutex_unlock(&net->proc_cells_lock); | 552 | mutex_unlock(&net->proc_cells_lock); |
540 | return 0; | 553 | return 0; |
@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) | |||
550 | afs_proc_cell_remove(cell); | 563 | afs_proc_cell_remove(cell); |
551 | 564 | ||
552 | mutex_lock(&net->proc_cells_lock); | 565 | mutex_lock(&net->proc_cells_lock); |
553 | list_del_init(&cell->proc_link); | 566 | hlist_del_rcu(&cell->proc_link); |
554 | afs_dynroot_rmdir(net, cell); | 567 | afs_dynroot_rmdir(net, cell); |
555 | mutex_unlock(&net->proc_cells_lock); | 568 | mutex_unlock(&net->proc_cells_lock); |
556 | 569 | ||
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 1cde710a8013..f29c6dade7f6 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c | |||
@@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb) | |||
265 | return -ERESTARTSYS; | 265 | return -ERESTARTSYS; |
266 | 266 | ||
267 | net->dynroot_sb = sb; | 267 | net->dynroot_sb = sb; |
268 | list_for_each_entry(cell, &net->proc_cells, proc_link) { | 268 | hlist_for_each_entry(cell, &net->proc_cells, proc_link) { |
269 | ret = afs_dynroot_mkdir(net, cell); | 269 | ret = afs_dynroot_mkdir(net, cell); |
270 | if (ret < 0) | 270 | if (ret < 0) |
271 | goto error; | 271 | goto error; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 8ae4e2ebb99a..72de1f157d20 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -244,7 +244,7 @@ struct afs_net { | |||
244 | seqlock_t cells_lock; | 244 | seqlock_t cells_lock; |
245 | 245 | ||
246 | struct mutex proc_cells_lock; | 246 | struct mutex proc_cells_lock; |
247 | struct list_head proc_cells; | 247 | struct hlist_head proc_cells; |
248 | 248 | ||
249 | /* Known servers. Theoretically each fileserver can only be in one | 249 | /* Known servers. Theoretically each fileserver can only be in one |
250 | * cell, but in practice, people create aliases and subsets and there's | 250 | * cell, but in practice, people create aliases and subsets and there's |
@@ -322,7 +322,7 @@ struct afs_cell { | |||
322 | struct afs_net *net; | 322 | struct afs_net *net; |
323 | struct key *anonymous_key; /* anonymous user key for this cell */ | 323 | struct key *anonymous_key; /* anonymous user key for this cell */ |
324 | struct work_struct manager; /* Manager for init/deinit/dns */ | 324 | struct work_struct manager; /* Manager for init/deinit/dns */ |
325 | struct list_head proc_link; /* /proc cell list link */ | 325 | struct hlist_node proc_link; /* /proc cell list link */ |
326 | #ifdef CONFIG_AFS_FSCACHE | 326 | #ifdef CONFIG_AFS_FSCACHE |
327 | struct fscache_cookie *cache; /* caching cookie */ | 327 | struct fscache_cookie *cache; /* caching cookie */ |
328 | #endif | 328 | #endif |
diff --git a/fs/afs/main.c b/fs/afs/main.c index e84fe822a960..107427688edd 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c | |||
@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns) | |||
87 | timer_setup(&net->cells_timer, afs_cells_timer, 0); | 87 | timer_setup(&net->cells_timer, afs_cells_timer, 0); |
88 | 88 | ||
89 | mutex_init(&net->proc_cells_lock); | 89 | mutex_init(&net->proc_cells_lock); |
90 | INIT_LIST_HEAD(&net->proc_cells); | 90 | INIT_HLIST_HEAD(&net->proc_cells); |
91 | 91 | ||
92 | seqlock_init(&net->fs_lock); | 92 | seqlock_init(&net->fs_lock); |
93 | net->fs_servers = RB_ROOT; | 93 | net->fs_servers = RB_ROOT; |
diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 476dcbb79713..9101f62707af 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c | |||
@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m) | |||
33 | static int afs_proc_cells_show(struct seq_file *m, void *v) | 33 | static int afs_proc_cells_show(struct seq_file *m, void *v) |
34 | { | 34 | { |
35 | struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); | 35 | struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); |
36 | struct afs_net *net = afs_seq2net(m); | ||
37 | 36 | ||
38 | if (v == &net->proc_cells) { | 37 | if (v == SEQ_START_TOKEN) { |
39 | /* display header on line 1 */ | 38 | /* display header on line 1 */ |
40 | seq_puts(m, "USE NAME\n"); | 39 | seq_puts(m, "USE NAME\n"); |
41 | return 0; | 40 | return 0; |
@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos) | |||
50 | __acquires(rcu) | 49 | __acquires(rcu) |
51 | { | 50 | { |
52 | rcu_read_lock(); | 51 | rcu_read_lock(); |
53 | return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos); | 52 | return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos); |
54 | } | 53 | } |
55 | 54 | ||
56 | static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) | 55 | static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos) |
57 | { | 56 | { |
58 | return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos); | 57 | return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos); |
59 | } | 58 | } |
60 | 59 | ||
61 | static void afs_proc_cells_stop(struct seq_file *m, void *v) | 60 | static void afs_proc_cells_stop(struct seq_file *m, void *v) |
diff --git a/fs/afs/server.c b/fs/afs/server.c index 1d329e6981d5..2f306c0cc4ee 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
@@ -199,9 +199,11 @@ static struct afs_server *afs_install_server(struct afs_net *net, | |||
199 | 199 | ||
200 | write_sequnlock(&net->fs_addr_lock); | 200 | write_sequnlock(&net->fs_addr_lock); |
201 | ret = 0; | 201 | ret = 0; |
202 | goto out; | ||
202 | 203 | ||
203 | exists: | 204 | exists: |
204 | afs_get_server(server); | 205 | afs_get_server(server); |
206 | out: | ||
205 | write_sequnlock(&net->fs_lock); | 207 | write_sequnlock(&net->fs_lock); |
206 | return server; | 208 | return server; |
207 | } | 209 | } |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 03128ed1f34e..3c159a7f9a9e 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -975,6 +975,10 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos, | |||
975 | { | 975 | { |
976 | struct gfs2_inode *ip = GFS2_I(inode); | 976 | struct gfs2_inode *ip = GFS2_I(inode); |
977 | 977 | ||
978 | if (!page_has_buffers(page)) { | ||
979 | create_empty_buffers(page, inode->i_sb->s_blocksize, | ||
980 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | ||
981 | } | ||
978 | gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); | 982 | gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); |
979 | } | 983 | } |
980 | 984 | ||
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 5289e22cb081..42ea7bab9144 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
@@ -1220,35 +1220,92 @@ retry: | |||
1220 | return 0; | 1220 | return 0; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | /* Unlock both inodes after they've been prepped for a range clone. */ | ||
1224 | STATIC void | ||
1225 | xfs_reflink_remap_unlock( | ||
1226 | struct file *file_in, | ||
1227 | struct file *file_out) | ||
1228 | { | ||
1229 | struct inode *inode_in = file_inode(file_in); | ||
1230 | struct xfs_inode *src = XFS_I(inode_in); | ||
1231 | struct inode *inode_out = file_inode(file_out); | ||
1232 | struct xfs_inode *dest = XFS_I(inode_out); | ||
1233 | bool same_inode = (inode_in == inode_out); | ||
1234 | |||
1235 | xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); | ||
1236 | if (!same_inode) | ||
1237 | xfs_iunlock(src, XFS_MMAPLOCK_SHARED); | ||
1238 | inode_unlock(inode_out); | ||
1239 | if (!same_inode) | ||
1240 | inode_unlock_shared(inode_in); | ||
1241 | } | ||
1242 | |||
1223 | /* | 1243 | /* |
1224 | * Link a range of blocks from one file to another. | 1244 | * If we're reflinking to a point past the destination file's EOF, we must |
1245 | * zero any speculative post-EOF preallocations that sit between the old EOF | ||
1246 | * and the destination file offset. | ||
1225 | */ | 1247 | */ |
1226 | int | 1248 | static int |
1227 | xfs_reflink_remap_range( | 1249 | xfs_reflink_zero_posteof( |
1250 | struct xfs_inode *ip, | ||
1251 | loff_t pos) | ||
1252 | { | ||
1253 | loff_t isize = i_size_read(VFS_I(ip)); | ||
1254 | |||
1255 | if (pos <= isize) | ||
1256 | return 0; | ||
1257 | |||
1258 | trace_xfs_zero_eof(ip, isize, pos - isize); | ||
1259 | return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL, | ||
1260 | &xfs_iomap_ops); | ||
1261 | } | ||
1262 | |||
1263 | /* | ||
1264 | * Prepare two files for range cloning. Upon a successful return both inodes | ||
1265 | * will have the iolock and mmaplock held, the page cache of the out file will | ||
1266 | * be truncated, and any leases on the out file will have been broken. This | ||
1267 | * function borrows heavily from xfs_file_aio_write_checks. | ||
1268 | * | ||
1269 | * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't | ||
1270 | * checked that the bytes beyond EOF physically match. Hence we cannot use the | ||
1271 | * EOF block in the source dedupe range because it's not a complete block match, | ||
1272 | * hence can introduce a corruption into the file that has it's block replaced. | ||
1273 | * | ||
1274 | * In similar fashion, the VFS file cloning also allows partial EOF blocks to be | ||
1275 | * "block aligned" for the purposes of cloning entire files. However, if the | ||
1276 | * source file range includes the EOF block and it lands within the existing EOF | ||
1277 | * of the destination file, then we can expose stale data from beyond the source | ||
1278 | * file EOF in the destination file. | ||
1279 | * | ||
1280 | * XFS doesn't support partial block sharing, so in both cases we have check | ||
1281 | * these cases ourselves. For dedupe, we can simply round the length to dedupe | ||
1282 | * down to the previous whole block and ignore the partial EOF block. While this | ||
1283 | * means we can't dedupe the last block of a file, this is an acceptible | ||
1284 | * tradeoff for simplicity on implementation. | ||
1285 | * | ||
1286 | * For cloning, we want to share the partial EOF block if it is also the new EOF | ||
1287 | * block of the destination file. If the partial EOF block lies inside the | ||
1288 | * existing destination EOF, then we have to abort the clone to avoid exposing | ||
1289 | * stale data in the destination file. Hence we reject these clone attempts with | ||
1290 | * -EINVAL in this case. | ||
1291 | */ | ||
1292 | STATIC int | ||
1293 | xfs_reflink_remap_prep( | ||
1228 | struct file *file_in, | 1294 | struct file *file_in, |
1229 | loff_t pos_in, | 1295 | loff_t pos_in, |
1230 | struct file *file_out, | 1296 | struct file *file_out, |
1231 | loff_t pos_out, | 1297 | loff_t pos_out, |
1232 | u64 len, | 1298 | u64 *len, |
1233 | bool is_dedupe) | 1299 | bool is_dedupe) |
1234 | { | 1300 | { |
1235 | struct inode *inode_in = file_inode(file_in); | 1301 | struct inode *inode_in = file_inode(file_in); |
1236 | struct xfs_inode *src = XFS_I(inode_in); | 1302 | struct xfs_inode *src = XFS_I(inode_in); |
1237 | struct inode *inode_out = file_inode(file_out); | 1303 | struct inode *inode_out = file_inode(file_out); |
1238 | struct xfs_inode *dest = XFS_I(inode_out); | 1304 | struct xfs_inode *dest = XFS_I(inode_out); |
1239 | struct xfs_mount *mp = src->i_mount; | ||
1240 | bool same_inode = (inode_in == inode_out); | 1305 | bool same_inode = (inode_in == inode_out); |
1241 | xfs_fileoff_t sfsbno, dfsbno; | 1306 | u64 blkmask = i_blocksize(inode_in) - 1; |
1242 | xfs_filblks_t fsblen; | ||
1243 | xfs_extlen_t cowextsize; | ||
1244 | ssize_t ret; | 1307 | ssize_t ret; |
1245 | 1308 | ||
1246 | if (!xfs_sb_version_hasreflink(&mp->m_sb)) | ||
1247 | return -EOPNOTSUPP; | ||
1248 | |||
1249 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
1250 | return -EIO; | ||
1251 | |||
1252 | /* Lock both files against IO */ | 1309 | /* Lock both files against IO */ |
1253 | ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out); | 1310 | ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out); |
1254 | if (ret) | 1311 | if (ret) |
@@ -1270,33 +1327,115 @@ xfs_reflink_remap_range( | |||
1270 | goto out_unlock; | 1327 | goto out_unlock; |
1271 | 1328 | ||
1272 | ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out, | 1329 | ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out, |
1273 | &len, is_dedupe); | 1330 | len, is_dedupe); |
1274 | if (ret <= 0) | 1331 | if (ret <= 0) |
1275 | goto out_unlock; | 1332 | goto out_unlock; |
1276 | 1333 | ||
1334 | /* | ||
1335 | * If the dedupe data matches, chop off the partial EOF block | ||
1336 | * from the source file so we don't try to dedupe the partial | ||
1337 | * EOF block. | ||
1338 | */ | ||
1339 | if (is_dedupe) { | ||
1340 | *len &= ~blkmask; | ||
1341 | } else if (*len & blkmask) { | ||
1342 | /* | ||
1343 | * The user is attempting to share a partial EOF block, | ||
1344 | * if it's inside the destination EOF then reject it. | ||
1345 | */ | ||
1346 | if (pos_out + *len < i_size_read(inode_out)) { | ||
1347 | ret = -EINVAL; | ||
1348 | goto out_unlock; | ||
1349 | } | ||
1350 | } | ||
1351 | |||
1277 | /* Attach dquots to dest inode before changing block map */ | 1352 | /* Attach dquots to dest inode before changing block map */ |
1278 | ret = xfs_qm_dqattach(dest); | 1353 | ret = xfs_qm_dqattach(dest); |
1279 | if (ret) | 1354 | if (ret) |
1280 | goto out_unlock; | 1355 | goto out_unlock; |
1281 | 1356 | ||
1282 | trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); | ||
1283 | |||
1284 | /* | 1357 | /* |
1285 | * Clear out post-eof preallocations because we don't have page cache | 1358 | * Zero existing post-eof speculative preallocations in the destination |
1286 | * backing the delayed allocations and they'll never get freed on | 1359 | * file. |
1287 | * their own. | ||
1288 | */ | 1360 | */ |
1289 | if (xfs_can_free_eofblocks(dest, true)) { | 1361 | ret = xfs_reflink_zero_posteof(dest, pos_out); |
1290 | ret = xfs_free_eofblocks(dest); | 1362 | if (ret) |
1291 | if (ret) | 1363 | goto out_unlock; |
1292 | goto out_unlock; | ||
1293 | } | ||
1294 | 1364 | ||
1295 | /* Set flags and remap blocks. */ | 1365 | /* Set flags and remap blocks. */ |
1296 | ret = xfs_reflink_set_inode_flag(src, dest); | 1366 | ret = xfs_reflink_set_inode_flag(src, dest); |
1297 | if (ret) | 1367 | if (ret) |
1298 | goto out_unlock; | 1368 | goto out_unlock; |
1299 | 1369 | ||
1370 | /* Zap any page cache for the destination file's range. */ | ||
1371 | truncate_inode_pages_range(&inode_out->i_data, pos_out, | ||
1372 | PAGE_ALIGN(pos_out + *len) - 1); | ||
1373 | |||
1374 | /* If we're altering the file contents... */ | ||
1375 | if (!is_dedupe) { | ||
1376 | /* | ||
1377 | * ...update the timestamps (which will grab the ilock again | ||
1378 | * from xfs_fs_dirty_inode, so we have to call it before we | ||
1379 | * take the ilock). | ||
1380 | */ | ||
1381 | if (!(file_out->f_mode & FMODE_NOCMTIME)) { | ||
1382 | ret = file_update_time(file_out); | ||
1383 | if (ret) | ||
1384 | goto out_unlock; | ||
1385 | } | ||
1386 | |||
1387 | /* | ||
1388 | * ...clear the security bits if the process is not being run | ||
1389 | * by root. This keeps people from modifying setuid and setgid | ||
1390 | * binaries. | ||
1391 | */ | ||
1392 | ret = file_remove_privs(file_out); | ||
1393 | if (ret) | ||
1394 | goto out_unlock; | ||
1395 | } | ||
1396 | |||
1397 | return 1; | ||
1398 | out_unlock: | ||
1399 | xfs_reflink_remap_unlock(file_in, file_out); | ||
1400 | return ret; | ||
1401 | } | ||
1402 | |||
1403 | /* | ||
1404 | * Link a range of blocks from one file to another. | ||
1405 | */ | ||
1406 | int | ||
1407 | xfs_reflink_remap_range( | ||
1408 | struct file *file_in, | ||
1409 | loff_t pos_in, | ||
1410 | struct file *file_out, | ||
1411 | loff_t pos_out, | ||
1412 | u64 len, | ||
1413 | bool is_dedupe) | ||
1414 | { | ||
1415 | struct inode *inode_in = file_inode(file_in); | ||
1416 | struct xfs_inode *src = XFS_I(inode_in); | ||
1417 | struct inode *inode_out = file_inode(file_out); | ||
1418 | struct xfs_inode *dest = XFS_I(inode_out); | ||
1419 | struct xfs_mount *mp = src->i_mount; | ||
1420 | xfs_fileoff_t sfsbno, dfsbno; | ||
1421 | xfs_filblks_t fsblen; | ||
1422 | xfs_extlen_t cowextsize; | ||
1423 | ssize_t ret; | ||
1424 | |||
1425 | if (!xfs_sb_version_hasreflink(&mp->m_sb)) | ||
1426 | return -EOPNOTSUPP; | ||
1427 | |||
1428 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
1429 | return -EIO; | ||
1430 | |||
1431 | /* Prepare and then clone file data. */ | ||
1432 | ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out, | ||
1433 | &len, is_dedupe); | ||
1434 | if (ret <= 0) | ||
1435 | return ret; | ||
1436 | |||
1437 | trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); | ||
1438 | |||
1300 | dfsbno = XFS_B_TO_FSBT(mp, pos_out); | 1439 | dfsbno = XFS_B_TO_FSBT(mp, pos_out); |
1301 | sfsbno = XFS_B_TO_FSBT(mp, pos_in); | 1440 | sfsbno = XFS_B_TO_FSBT(mp, pos_in); |
1302 | fsblen = XFS_B_TO_FSB(mp, len); | 1441 | fsblen = XFS_B_TO_FSB(mp, len); |
@@ -1305,10 +1444,6 @@ xfs_reflink_remap_range( | |||
1305 | if (ret) | 1444 | if (ret) |
1306 | goto out_unlock; | 1445 | goto out_unlock; |
1307 | 1446 | ||
1308 | /* Zap any page cache for the destination file's range. */ | ||
1309 | truncate_inode_pages_range(&inode_out->i_data, pos_out, | ||
1310 | PAGE_ALIGN(pos_out + len) - 1); | ||
1311 | |||
1312 | /* | 1447 | /* |
1313 | * Carry the cowextsize hint from src to dest if we're sharing the | 1448 | * Carry the cowextsize hint from src to dest if we're sharing the |
1314 | * entire source file to the entire destination file, the source file | 1449 | * entire source file to the entire destination file, the source file |
@@ -1325,12 +1460,7 @@ xfs_reflink_remap_range( | |||
1325 | is_dedupe); | 1460 | is_dedupe); |
1326 | 1461 | ||
1327 | out_unlock: | 1462 | out_unlock: |
1328 | xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); | 1463 | xfs_reflink_remap_unlock(file_in, file_out); |
1329 | if (!same_inode) | ||
1330 | xfs_iunlock(src, XFS_MMAPLOCK_SHARED); | ||
1331 | inode_unlock(inode_out); | ||
1332 | if (!same_inode) | ||
1333 | inode_unlock_shared(inode_in); | ||
1334 | if (ret) | 1464 | if (ret) |
1335 | trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); | 1465 | trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); |
1336 | return ret; | 1466 | return ret; |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7b75ff6e2fce..d7701d466b60 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -68,7 +68,7 @@ | |||
68 | */ | 68 | */ |
69 | #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION | 69 | #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION |
70 | #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* | 70 | #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* |
71 | #define DATA_MAIN .data .data.[0-9a-zA-Z_]* | 71 | #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* |
72 | #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* | 72 | #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* |
73 | #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* | 73 | #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* |
74 | #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* | 74 | #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* |
@@ -613,8 +613,8 @@ | |||
613 | 613 | ||
614 | #define EXIT_DATA \ | 614 | #define EXIT_DATA \ |
615 | *(.exit.data .exit.data.*) \ | 615 | *(.exit.data .exit.data.*) \ |
616 | *(.fini_array) \ | 616 | *(.fini_array .fini_array.*) \ |
617 | *(.dtors) \ | 617 | *(.dtors .dtors.*) \ |
618 | MEM_DISCARD(exit.data*) \ | 618 | MEM_DISCARD(exit.data*) \ |
619 | MEM_DISCARD(exit.rodata*) | 619 | MEM_DISCARD(exit.rodata*) |
620 | 620 | ||
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ff20b677fb9f..22254c1fe1c5 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -412,6 +412,7 @@ struct cgroup { | |||
412 | * specific task are charged to the dom_cgrp. | 412 | * specific task are charged to the dom_cgrp. |
413 | */ | 413 | */ |
414 | struct cgroup *dom_cgrp; | 414 | struct cgroup *dom_cgrp; |
415 | struct cgroup *old_dom_cgrp; /* used while enabling threaded */ | ||
415 | 416 | ||
416 | /* per-cpu recursive resource statistics */ | 417 | /* per-cpu recursive resource statistics */ |
417 | struct cgroup_rstat_cpu __percpu *rstat_cpu; | 418 | struct cgroup_rstat_cpu __percpu *rstat_cpu; |
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 8942e61f0028..8ab5df769923 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h | |||
@@ -53,12 +53,20 @@ enum fpga_mgr_states { | |||
53 | FPGA_MGR_STATE_OPERATING, | 53 | FPGA_MGR_STATE_OPERATING, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* | 56 | /** |
57 | * FPGA Manager flags | 57 | * DOC: FPGA Manager flags |
58 | * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported | 58 | * |
59 | * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting | 59 | * Flags used in the &fpga_image_info->flags field |
60 | * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first | 60 | * |
61 | * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed | 61 | * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported |
62 | * | ||
63 | * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting | ||
64 | * | ||
65 | * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted | ||
66 | * | ||
67 | * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first | ||
68 | * | ||
69 | * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed | ||
62 | */ | 70 | */ |
63 | #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) | 71 | #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) |
64 | #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) | 72 | #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 0ea328e71ec9..a4d5eb37744a 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -95,6 +95,13 @@ struct gpio_irq_chip { | |||
95 | unsigned int num_parents; | 95 | unsigned int num_parents; |
96 | 96 | ||
97 | /** | 97 | /** |
98 | * @parent_irq: | ||
99 | * | ||
100 | * For use by gpiochip_set_cascaded_irqchip() | ||
101 | */ | ||
102 | unsigned int parent_irq; | ||
103 | |||
104 | /** | ||
98 | * @parents: | 105 | * @parents: |
99 | * | 106 | * |
100 | * A list of interrupt parents of a GPIO chip. This is owned by the | 107 | * A list of interrupt parents of a GPIO chip. This is owned by the |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3f4c0b167333..d4b0c79d2924 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -668,10 +668,6 @@ typedef struct pglist_data { | |||
668 | wait_queue_head_t kcompactd_wait; | 668 | wait_queue_head_t kcompactd_wait; |
669 | struct task_struct *kcompactd; | 669 | struct task_struct *kcompactd; |
670 | #endif | 670 | #endif |
671 | #ifdef CONFIG_NUMA_BALANCING | ||
672 | /* Lock serializing the migrate rate limiting window */ | ||
673 | spinlock_t numabalancing_migrate_lock; | ||
674 | #endif | ||
675 | /* | 671 | /* |
676 | * This is a per-node reserve of pages that are not available | 672 | * This is a per-node reserve of pages that are not available |
677 | * to userspace allocations. | 673 | * to userspace allocations. |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 76603ee136a8..22e4ef7bb701 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2496,6 +2496,13 @@ struct netdev_notifier_info { | |||
2496 | struct netlink_ext_ack *extack; | 2496 | struct netlink_ext_ack *extack; |
2497 | }; | 2497 | }; |
2498 | 2498 | ||
2499 | struct netdev_notifier_info_ext { | ||
2500 | struct netdev_notifier_info info; /* must be first */ | ||
2501 | union { | ||
2502 | u32 mtu; | ||
2503 | } ext; | ||
2504 | }; | ||
2505 | |||
2499 | struct netdev_notifier_change_info { | 2506 | struct netdev_notifier_change_info { |
2500 | struct netdev_notifier_info info; /* must be first */ | 2507 | struct netdev_notifier_info info; /* must be first */ |
2501 | unsigned int flags_changed; | 2508 | unsigned int flags_changed; |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index c0e795d95477..1c89611e0e06 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
@@ -36,6 +36,7 @@ enum { | |||
36 | SCIx_SH4_SCIF_FIFODATA_REGTYPE, | 36 | SCIx_SH4_SCIF_FIFODATA_REGTYPE, |
37 | SCIx_SH7705_SCIF_REGTYPE, | 37 | SCIx_SH7705_SCIF_REGTYPE, |
38 | SCIx_HSCIF_REGTYPE, | 38 | SCIx_HSCIF_REGTYPE, |
39 | SCIx_RZ_SCIFA_REGTYPE, | ||
39 | 40 | ||
40 | SCIx_NR_REGTYPES, | 41 | SCIx_NR_REGTYPES, |
41 | }; | 42 | }; |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5a28ac9284f0..3f529ad9a9d2 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void) | |||
251 | return unlikely(s2idle_state == S2IDLE_STATE_ENTER); | 251 | return unlikely(s2idle_state == S2IDLE_STATE_ENTER); |
252 | } | 252 | } |
253 | 253 | ||
254 | extern bool pm_suspend_via_s2idle(void); | ||
254 | extern void __init pm_states_init(void); | 255 | extern void __init pm_states_init(void); |
255 | extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); | 256 | extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); |
256 | extern void s2idle_wake(void); | 257 | extern void s2idle_wake(void); |
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {} | |||
282 | static inline void pm_set_resume_via_firmware(void) {} | 283 | static inline void pm_set_resume_via_firmware(void) {} |
283 | static inline bool pm_suspend_via_firmware(void) { return false; } | 284 | static inline bool pm_suspend_via_firmware(void) { return false; } |
284 | static inline bool pm_resume_via_firmware(void) { return false; } | 285 | static inline bool pm_resume_via_firmware(void) { return false; } |
286 | static inline bool pm_suspend_via_s2idle(void) { return false; } | ||
285 | 287 | ||
286 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} | 288 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} |
287 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } | 289 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } |
diff --git a/include/net/devlink.h b/include/net/devlink.h index 9a70755ad1c2..45db0c79462d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h | |||
@@ -298,7 +298,7 @@ struct devlink_resource { | |||
298 | 298 | ||
299 | #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 | 299 | #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 |
300 | 300 | ||
301 | #define DEVLINK_PARAM_MAX_STRING_VALUE 32 | 301 | #define __DEVLINK_PARAM_MAX_STRING_VALUE 32 |
302 | enum devlink_param_type { | 302 | enum devlink_param_type { |
303 | DEVLINK_PARAM_TYPE_U8, | 303 | DEVLINK_PARAM_TYPE_U8, |
304 | DEVLINK_PARAM_TYPE_U16, | 304 | DEVLINK_PARAM_TYPE_U16, |
@@ -311,7 +311,7 @@ union devlink_param_value { | |||
311 | u8 vu8; | 311 | u8 vu8; |
312 | u16 vu16; | 312 | u16 vu16; |
313 | u32 vu32; | 313 | u32 vu32; |
314 | const char *vstr; | 314 | char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE]; |
315 | bool vbool; | 315 | bool vbool; |
316 | }; | 316 | }; |
317 | 317 | ||
@@ -568,6 +568,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, | |||
568 | int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, | 568 | int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, |
569 | union devlink_param_value init_val); | 569 | union devlink_param_value init_val); |
570 | void devlink_param_value_changed(struct devlink *devlink, u32 param_id); | 570 | void devlink_param_value_changed(struct devlink *devlink, u32 param_id); |
571 | void devlink_param_value_str_fill(union devlink_param_value *dst_val, | ||
572 | const char *src); | ||
571 | struct devlink_region *devlink_region_create(struct devlink *devlink, | 573 | struct devlink_region *devlink_region_create(struct devlink *devlink, |
572 | const char *region_name, | 574 | const char *region_name, |
573 | u32 region_max_snapshots, | 575 | u32 region_max_snapshots, |
@@ -804,6 +806,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id) | |||
804 | { | 806 | { |
805 | } | 807 | } |
806 | 808 | ||
809 | static inline void | ||
810 | devlink_param_value_str_fill(union devlink_param_value *dst_val, | ||
811 | const char *src) | ||
812 | { | ||
813 | } | ||
814 | |||
807 | static inline struct devlink_region * | 815 | static inline struct devlink_region * |
808 | devlink_region_create(struct devlink *devlink, | 816 | devlink_region_create(struct devlink *devlink, |
809 | const char *region_name, | 817 | const char *region_name, |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9846b79c9ee1..852e4ebf2209 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -395,6 +395,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev); | |||
395 | int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); | 395 | int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); |
396 | int fib_sync_down_addr(struct net_device *dev, __be32 local); | 396 | int fib_sync_down_addr(struct net_device *dev, __be32 local); |
397 | int fib_sync_up(struct net_device *dev, unsigned int nh_flags); | 397 | int fib_sync_up(struct net_device *dev, unsigned int nh_flags); |
398 | void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); | ||
398 | 399 | ||
399 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 400 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
400 | int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, | 401 | int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, |
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h index eaaf56df4086..5b99cb2ea5ef 100644 --- a/include/soc/fsl/bman.h +++ b/include/soc/fsl/bman.h | |||
@@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num); | |||
126 | */ | 126 | */ |
127 | int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); | 127 | int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); |
128 | 128 | ||
129 | /** | ||
130 | * bman_is_probed - Check if bman is probed | ||
131 | * | ||
132 | * Returns 1 if the bman driver successfully probed, -1 if the bman driver | ||
133 | * failed to probe or 0 if the bman driver did not probed yet. | ||
134 | */ | ||
135 | int bman_is_probed(void); | ||
136 | |||
129 | #endif /* __FSL_BMAN_H */ | 137 | #endif /* __FSL_BMAN_H */ |
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index d4dfefdee6c1..597783b8a3a0 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h | |||
@@ -1186,4 +1186,12 @@ int qman_alloc_cgrid_range(u32 *result, u32 count); | |||
1186 | */ | 1186 | */ |
1187 | int qman_release_cgrid(u32 id); | 1187 | int qman_release_cgrid(u32 id); |
1188 | 1188 | ||
1189 | /** | ||
1190 | * qman_is_probed - Check if qman is probed | ||
1191 | * | ||
1192 | * Returns 1 if the qman driver successfully probed, -1 if the qman driver | ||
1193 | * failed to probe or 0 if the qman driver did not probed yet. | ||
1194 | */ | ||
1195 | int qman_is_probed(void); | ||
1196 | |||
1189 | #endif /* __FSL_QMAN_H */ | 1197 | #endif /* __FSL_QMAN_H */ |
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 837393fa897b..573d5b901fb1 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h | |||
@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet, | |||
931 | TP_fast_assign( | 931 | TP_fast_assign( |
932 | __entry->call = call_id; | 932 | __entry->call = call_id; |
933 | memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); | 933 | memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); |
934 | __entry->where = where; | ||
934 | ), | 935 | ), |
935 | 936 | ||
936 | TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", | 937 | TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", |
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index ac9e8c96d9bd..8cb3a6fef553 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h | |||
@@ -18,14 +18,17 @@ struct smc_diag_req { | |||
18 | * on the internal clcsock, and more SMC-related socket data | 18 | * on the internal clcsock, and more SMC-related socket data |
19 | */ | 19 | */ |
20 | struct smc_diag_msg { | 20 | struct smc_diag_msg { |
21 | __u8 diag_family; | 21 | __u8 diag_family; |
22 | __u8 diag_state; | 22 | __u8 diag_state; |
23 | __u8 diag_mode; | 23 | union { |
24 | __u8 diag_shutdown; | 24 | __u8 diag_mode; |
25 | __u8 diag_fallback; /* the old name of the field */ | ||
26 | }; | ||
27 | __u8 diag_shutdown; | ||
25 | struct inet_diag_sockid id; | 28 | struct inet_diag_sockid id; |
26 | 29 | ||
27 | __u32 diag_uid; | 30 | __u32 diag_uid; |
28 | __u64 diag_inode; | 31 | __aligned_u64 diag_inode; |
29 | }; | 32 | }; |
30 | 33 | ||
31 | /* Mode of a connection */ | 34 | /* Mode of a connection */ |
@@ -99,11 +102,11 @@ struct smc_diag_fallback { | |||
99 | }; | 102 | }; |
100 | 103 | ||
101 | struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ | 104 | struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ |
102 | __u32 linkid; /* Link identifier */ | 105 | __u32 linkid; /* Link identifier */ |
103 | __u64 peer_gid; /* Peer GID */ | 106 | __aligned_u64 peer_gid; /* Peer GID */ |
104 | __u64 my_gid; /* My GID */ | 107 | __aligned_u64 my_gid; /* My GID */ |
105 | __u64 token; /* Token of DMB */ | 108 | __aligned_u64 token; /* Token of DMB */ |
106 | __u64 peer_token; /* Token of remote DMBE */ | 109 | __aligned_u64 peer_token; /* Token of remote DMBE */ |
107 | }; | 110 | }; |
108 | 111 | ||
109 | #endif /* _UAPI_SMC_DIAG_H_ */ | 112 | #endif /* _UAPI_SMC_DIAG_H_ */ |
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 09d00f8c442b..09502de447f5 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h | |||
@@ -40,5 +40,6 @@ struct udphdr { | |||
40 | #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ | 40 | #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ |
41 | #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ | 41 | #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ |
42 | #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ | 42 | #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ |
43 | #define UDP_ENCAP_RXRPC 6 | ||
43 | 44 | ||
44 | #endif /* _UAPI_LINUX_UDP_H */ | 45 | #endif /* _UAPI_LINUX_UDP_H */ |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index aae10baf1902..4a3dae2a8283 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -2836,11 +2836,12 @@ restart: | |||
2836 | } | 2836 | } |
2837 | 2837 | ||
2838 | /** | 2838 | /** |
2839 | * cgroup_save_control - save control masks of a subtree | 2839 | * cgroup_save_control - save control masks and dom_cgrp of a subtree |
2840 | * @cgrp: root of the target subtree | 2840 | * @cgrp: root of the target subtree |
2841 | * | 2841 | * |
2842 | * Save ->subtree_control and ->subtree_ss_mask to the respective old_ | 2842 | * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the |
2843 | * prefixed fields for @cgrp's subtree including @cgrp itself. | 2843 | * respective old_ prefixed fields for @cgrp's subtree including @cgrp |
2844 | * itself. | ||
2844 | */ | 2845 | */ |
2845 | static void cgroup_save_control(struct cgroup *cgrp) | 2846 | static void cgroup_save_control(struct cgroup *cgrp) |
2846 | { | 2847 | { |
@@ -2850,6 +2851,7 @@ static void cgroup_save_control(struct cgroup *cgrp) | |||
2850 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { | 2851 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
2851 | dsct->old_subtree_control = dsct->subtree_control; | 2852 | dsct->old_subtree_control = dsct->subtree_control; |
2852 | dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; | 2853 | dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; |
2854 | dsct->old_dom_cgrp = dsct->dom_cgrp; | ||
2853 | } | 2855 | } |
2854 | } | 2856 | } |
2855 | 2857 | ||
@@ -2875,11 +2877,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp) | |||
2875 | } | 2877 | } |
2876 | 2878 | ||
2877 | /** | 2879 | /** |
2878 | * cgroup_restore_control - restore control masks of a subtree | 2880 | * cgroup_restore_control - restore control masks and dom_cgrp of a subtree |
2879 | * @cgrp: root of the target subtree | 2881 | * @cgrp: root of the target subtree |
2880 | * | 2882 | * |
2881 | * Restore ->subtree_control and ->subtree_ss_mask from the respective old_ | 2883 | * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the |
2882 | * prefixed fields for @cgrp's subtree including @cgrp itself. | 2884 | * respective old_ prefixed fields for @cgrp's subtree including @cgrp |
2885 | * itself. | ||
2883 | */ | 2886 | */ |
2884 | static void cgroup_restore_control(struct cgroup *cgrp) | 2887 | static void cgroup_restore_control(struct cgroup *cgrp) |
2885 | { | 2888 | { |
@@ -2889,6 +2892,7 @@ static void cgroup_restore_control(struct cgroup *cgrp) | |||
2889 | cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { | 2892 | cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { |
2890 | dsct->subtree_control = dsct->old_subtree_control; | 2893 | dsct->subtree_control = dsct->old_subtree_control; |
2891 | dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; | 2894 | dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; |
2895 | dsct->dom_cgrp = dsct->old_dom_cgrp; | ||
2892 | } | 2896 | } |
2893 | } | 2897 | } |
2894 | 2898 | ||
@@ -3196,6 +3200,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp) | |||
3196 | { | 3200 | { |
3197 | struct cgroup *parent = cgroup_parent(cgrp); | 3201 | struct cgroup *parent = cgroup_parent(cgrp); |
3198 | struct cgroup *dom_cgrp = parent->dom_cgrp; | 3202 | struct cgroup *dom_cgrp = parent->dom_cgrp; |
3203 | struct cgroup *dsct; | ||
3204 | struct cgroup_subsys_state *d_css; | ||
3199 | int ret; | 3205 | int ret; |
3200 | 3206 | ||
3201 | lockdep_assert_held(&cgroup_mutex); | 3207 | lockdep_assert_held(&cgroup_mutex); |
@@ -3225,12 +3231,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp) | |||
3225 | */ | 3231 | */ |
3226 | cgroup_save_control(cgrp); | 3232 | cgroup_save_control(cgrp); |
3227 | 3233 | ||
3228 | cgrp->dom_cgrp = dom_cgrp; | 3234 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) |
3235 | if (dsct == cgrp || cgroup_is_threaded(dsct)) | ||
3236 | dsct->dom_cgrp = dom_cgrp; | ||
3237 | |||
3229 | ret = cgroup_apply_control(cgrp); | 3238 | ret = cgroup_apply_control(cgrp); |
3230 | if (!ret) | 3239 | if (!ret) |
3231 | parent->nr_threaded_children++; | 3240 | parent->nr_threaded_children++; |
3232 | else | ||
3233 | cgrp->dom_cgrp = cgrp; | ||
3234 | 3241 | ||
3235 | cgroup_finalize_control(cgrp, ret); | 3242 | cgroup_finalize_control(cgrp, ret); |
3236 | return ret; | 3243 | return ret; |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 5342f6fc022e..0bd595a0b610 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -63,6 +63,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head); | |||
63 | enum s2idle_states __read_mostly s2idle_state; | 63 | enum s2idle_states __read_mostly s2idle_state; |
64 | static DEFINE_RAW_SPINLOCK(s2idle_lock); | 64 | static DEFINE_RAW_SPINLOCK(s2idle_lock); |
65 | 65 | ||
66 | bool pm_suspend_via_s2idle(void) | ||
67 | { | ||
68 | return mem_sleep_current == PM_SUSPEND_TO_IDLE; | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle); | ||
71 | |||
66 | void s2idle_set_ops(const struct platform_s2idle_ops *ops) | 72 | void s2idle_set_ops(const struct platform_s2idle_ops *ops) |
67 | { | 73 | { |
68 | lock_system_sleep(); | 74 | lock_system_sleep(); |
diff --git a/lib/Makefile b/lib/Makefile index ca3f7ebb900d..423876446810 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -119,7 +119,6 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ | |||
119 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ | 119 | obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ |
120 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ | 120 | obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ |
121 | obj-$(CONFIG_BCH) += bch.o | 121 | obj-$(CONFIG_BCH) += bch.o |
122 | CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500) | ||
123 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ | 122 | obj-$(CONFIG_LZO_COMPRESS) += lzo/ |
124 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ | 123 | obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ |
125 | obj-$(CONFIG_LZ4_COMPRESS) += lz4/ | 124 | obj-$(CONFIG_LZ4_COMPRESS) += lz4/ |
@@ -79,20 +79,19 @@ | |||
79 | #define GF_T(_p) (CONFIG_BCH_CONST_T) | 79 | #define GF_T(_p) (CONFIG_BCH_CONST_T) |
80 | #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) | 80 | #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) |
81 | #define BCH_MAX_M (CONFIG_BCH_CONST_M) | 81 | #define BCH_MAX_M (CONFIG_BCH_CONST_M) |
82 | #define BCH_MAX_T (CONFIG_BCH_CONST_T) | ||
82 | #else | 83 | #else |
83 | #define GF_M(_p) ((_p)->m) | 84 | #define GF_M(_p) ((_p)->m) |
84 | #define GF_T(_p) ((_p)->t) | 85 | #define GF_T(_p) ((_p)->t) |
85 | #define GF_N(_p) ((_p)->n) | 86 | #define GF_N(_p) ((_p)->n) |
86 | #define BCH_MAX_M 15 | 87 | #define BCH_MAX_M 15 /* 2KB */ |
88 | #define BCH_MAX_T 64 /* 64 bit correction */ | ||
87 | #endif | 89 | #endif |
88 | 90 | ||
89 | #define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M) | ||
90 | |||
91 | #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) | 91 | #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) |
92 | #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) | 92 | #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) |
93 | 93 | ||
94 | #define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32) | 94 | #define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32) |
95 | #define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8) | ||
96 | 95 | ||
97 | #ifndef dbg | 96 | #ifndef dbg |
98 | #define dbg(_fmt, args...) do {} while (0) | 97 | #define dbg(_fmt, args...) do {} while (0) |
@@ -202,6 +201,9 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, | |||
202 | const uint32_t * const tab3 = tab2 + 256*(l+1); | 201 | const uint32_t * const tab3 = tab2 + 256*(l+1); |
203 | const uint32_t *pdata, *p0, *p1, *p2, *p3; | 202 | const uint32_t *pdata, *p0, *p1, *p2, *p3; |
204 | 203 | ||
204 | if (WARN_ON(r_bytes > sizeof(r))) | ||
205 | return; | ||
206 | |||
205 | if (ecc) { | 207 | if (ecc) { |
206 | /* load ecc parity bytes into internal 32-bit buffer */ | 208 | /* load ecc parity bytes into internal 32-bit buffer */ |
207 | load_ecc8(bch, bch->ecc_buf, ecc); | 209 | load_ecc8(bch, bch->ecc_buf, ecc); |
@@ -1285,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) | |||
1285 | */ | 1287 | */ |
1286 | goto fail; | 1288 | goto fail; |
1287 | 1289 | ||
1290 | if (t > BCH_MAX_T) | ||
1291 | /* | ||
1292 | * we can support larger than 64 bits if necessary, at the | ||
1293 | * cost of higher stack usage. | ||
1294 | */ | ||
1295 | goto fail; | ||
1296 | |||
1288 | /* sanity checks */ | 1297 | /* sanity checks */ |
1289 | if ((t < 1) || (m*t >= ((1 << m)-1))) | 1298 | if ((t < 1) || (m*t >= ((1 << m)-1))) |
1290 | /* invalid t value */ | 1299 | /* invalid t value */ |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d5b3a3f95c01..812e59e13fe6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -2794,7 +2794,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
2794 | copy = end - str; | 2794 | copy = end - str; |
2795 | memcpy(str, args, copy); | 2795 | memcpy(str, args, copy); |
2796 | str += len; | 2796 | str += len; |
2797 | args += len; | 2797 | args += len + 1; |
2798 | } | 2798 | } |
2799 | } | 2799 | } |
2800 | if (process) | 2800 | if (process) |
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c index 25a5d87e2e4c..912aae5fa09e 100644 --- a/lib/xz/xz_crc32.c +++ b/lib/xz/xz_crc32.c | |||
@@ -15,7 +15,6 @@ | |||
15 | * but they are bigger and use more memory for the lookup table. | 15 | * but they are bigger and use more memory for the lookup table. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/crc32poly.h> | ||
19 | #include "xz_private.h" | 18 | #include "xz_private.h" |
20 | 19 | ||
21 | /* | 20 | /* |
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h index 482b90f363fe..09360ebb510e 100644 --- a/lib/xz/xz_private.h +++ b/lib/xz/xz_private.h | |||
@@ -102,6 +102,10 @@ | |||
102 | # endif | 102 | # endif |
103 | #endif | 103 | #endif |
104 | 104 | ||
105 | #ifndef CRC32_POLY_LE | ||
106 | #define CRC32_POLY_LE 0xedb88320 | ||
107 | #endif | ||
108 | |||
105 | /* | 109 | /* |
106 | * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used | 110 | * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used |
107 | * before calling xz_dec_lzma2_run(). | 111 | * before calling xz_dec_lzma2_run(). |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 706a738c0aee..e2ef1c17942f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6193,15 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages, | |||
6193 | return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; | 6193 | return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; |
6194 | } | 6194 | } |
6195 | 6195 | ||
6196 | #ifdef CONFIG_NUMA_BALANCING | ||
6197 | static void pgdat_init_numabalancing(struct pglist_data *pgdat) | ||
6198 | { | ||
6199 | spin_lock_init(&pgdat->numabalancing_migrate_lock); | ||
6200 | } | ||
6201 | #else | ||
6202 | static void pgdat_init_numabalancing(struct pglist_data *pgdat) {} | ||
6203 | #endif | ||
6204 | |||
6205 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 6196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6206 | static void pgdat_init_split_queue(struct pglist_data *pgdat) | 6197 | static void pgdat_init_split_queue(struct pglist_data *pgdat) |
6207 | { | 6198 | { |
@@ -6226,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) | |||
6226 | { | 6217 | { |
6227 | pgdat_resize_init(pgdat); | 6218 | pgdat_resize_init(pgdat); |
6228 | 6219 | ||
6229 | pgdat_init_numabalancing(pgdat); | ||
6230 | pgdat_init_split_queue(pgdat); | 6220 | pgdat_init_split_queue(pgdat); |
6231 | pgdat_init_kcompactd(pgdat); | 6221 | pgdat_init_kcompactd(pgdat); |
6232 | 6222 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index a749d4d96e3e..4b90682623e9 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk) | |||
1212 | { | 1212 | { |
1213 | if (!chunk) | 1213 | if (!chunk) |
1214 | return; | 1214 | return; |
1215 | pcpu_mem_free(chunk->md_blocks); | ||
1215 | pcpu_mem_free(chunk->bound_map); | 1216 | pcpu_mem_free(chunk->bound_map); |
1216 | pcpu_mem_free(chunk->alloc_map); | 1217 | pcpu_mem_free(chunk->alloc_map); |
1217 | pcpu_mem_free(chunk); | 1218 | pcpu_mem_free(chunk); |
diff --git a/net/core/dev.c b/net/core/dev.c index 0b2d777e5b9e..a4d39b87b4e5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | |||
1752 | } | 1752 | } |
1753 | EXPORT_SYMBOL(call_netdevice_notifiers); | 1753 | EXPORT_SYMBOL(call_netdevice_notifiers); |
1754 | 1754 | ||
1755 | /** | ||
1756 | * call_netdevice_notifiers_mtu - call all network notifier blocks | ||
1757 | * @val: value passed unmodified to notifier function | ||
1758 | * @dev: net_device pointer passed unmodified to notifier function | ||
1759 | * @arg: additional u32 argument passed to the notifier function | ||
1760 | * | ||
1761 | * Call all network notifier blocks. Parameters and return value | ||
1762 | * are as for raw_notifier_call_chain(). | ||
1763 | */ | ||
1764 | static int call_netdevice_notifiers_mtu(unsigned long val, | ||
1765 | struct net_device *dev, u32 arg) | ||
1766 | { | ||
1767 | struct netdev_notifier_info_ext info = { | ||
1768 | .info.dev = dev, | ||
1769 | .ext.mtu = arg, | ||
1770 | }; | ||
1771 | |||
1772 | BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); | ||
1773 | |||
1774 | return call_netdevice_notifiers_info(val, &info.info); | ||
1775 | } | ||
1776 | |||
1755 | #ifdef CONFIG_NET_INGRESS | 1777 | #ifdef CONFIG_NET_INGRESS |
1756 | static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); | 1778 | static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); |
1757 | 1779 | ||
@@ -7575,14 +7597,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu, | |||
7575 | err = __dev_set_mtu(dev, new_mtu); | 7597 | err = __dev_set_mtu(dev, new_mtu); |
7576 | 7598 | ||
7577 | if (!err) { | 7599 | if (!err) { |
7578 | err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); | 7600 | err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, |
7601 | orig_mtu); | ||
7579 | err = notifier_to_errno(err); | 7602 | err = notifier_to_errno(err); |
7580 | if (err) { | 7603 | if (err) { |
7581 | /* setting mtu back and notifying everyone again, | 7604 | /* setting mtu back and notifying everyone again, |
7582 | * so that they have a chance to revert changes. | 7605 | * so that they have a chance to revert changes. |
7583 | */ | 7606 | */ |
7584 | __dev_set_mtu(dev, orig_mtu); | 7607 | __dev_set_mtu(dev, orig_mtu); |
7585 | call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); | 7608 | call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev, |
7609 | new_mtu); | ||
7586 | } | 7610 | } |
7587 | } | 7611 | } |
7588 | return err; | 7612 | return err; |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 6dae81d65d5c..3a4b29a13d31 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -3012,6 +3012,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param, | |||
3012 | struct genl_info *info, | 3012 | struct genl_info *info, |
3013 | union devlink_param_value *value) | 3013 | union devlink_param_value *value) |
3014 | { | 3014 | { |
3015 | int len; | ||
3016 | |||
3015 | if (param->type != DEVLINK_PARAM_TYPE_BOOL && | 3017 | if (param->type != DEVLINK_PARAM_TYPE_BOOL && |
3016 | !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) | 3018 | !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) |
3017 | return -EINVAL; | 3019 | return -EINVAL; |
@@ -3027,10 +3029,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param, | |||
3027 | value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); | 3029 | value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); |
3028 | break; | 3030 | break; |
3029 | case DEVLINK_PARAM_TYPE_STRING: | 3031 | case DEVLINK_PARAM_TYPE_STRING: |
3030 | if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) > | 3032 | len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]), |
3031 | DEVLINK_PARAM_MAX_STRING_VALUE) | 3033 | nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); |
3034 | if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) || | ||
3035 | len >= __DEVLINK_PARAM_MAX_STRING_VALUE) | ||
3032 | return -EINVAL; | 3036 | return -EINVAL; |
3033 | value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]); | 3037 | strcpy(value->vstr, |
3038 | nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])); | ||
3034 | break; | 3039 | break; |
3035 | case DEVLINK_PARAM_TYPE_BOOL: | 3040 | case DEVLINK_PARAM_TYPE_BOOL: |
3036 | value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? | 3041 | value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ? |
@@ -3117,7 +3122,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, | |||
3117 | return -EOPNOTSUPP; | 3122 | return -EOPNOTSUPP; |
3118 | 3123 | ||
3119 | if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { | 3124 | if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { |
3120 | param_item->driverinit_value = value; | 3125 | if (param->type == DEVLINK_PARAM_TYPE_STRING) |
3126 | strcpy(param_item->driverinit_value.vstr, value.vstr); | ||
3127 | else | ||
3128 | param_item->driverinit_value = value; | ||
3121 | param_item->driverinit_value_valid = true; | 3129 | param_item->driverinit_value_valid = true; |
3122 | } else { | 3130 | } else { |
3123 | if (!param->set) | 3131 | if (!param->set) |
@@ -4557,7 +4565,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, | |||
4557 | DEVLINK_PARAM_CMODE_DRIVERINIT)) | 4565 | DEVLINK_PARAM_CMODE_DRIVERINIT)) |
4558 | return -EOPNOTSUPP; | 4566 | return -EOPNOTSUPP; |
4559 | 4567 | ||
4560 | *init_val = param_item->driverinit_value; | 4568 | if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) |
4569 | strcpy(init_val->vstr, param_item->driverinit_value.vstr); | ||
4570 | else | ||
4571 | *init_val = param_item->driverinit_value; | ||
4561 | 4572 | ||
4562 | return 0; | 4573 | return 0; |
4563 | } | 4574 | } |
@@ -4588,7 +4599,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, | |||
4588 | DEVLINK_PARAM_CMODE_DRIVERINIT)) | 4599 | DEVLINK_PARAM_CMODE_DRIVERINIT)) |
4589 | return -EOPNOTSUPP; | 4600 | return -EOPNOTSUPP; |
4590 | 4601 | ||
4591 | param_item->driverinit_value = init_val; | 4602 | if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING) |
4603 | strcpy(param_item->driverinit_value.vstr, init_val.vstr); | ||
4604 | else | ||
4605 | param_item->driverinit_value = init_val; | ||
4592 | param_item->driverinit_value_valid = true; | 4606 | param_item->driverinit_value_valid = true; |
4593 | 4607 | ||
4594 | devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); | 4608 | devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW); |
@@ -4621,6 +4635,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id) | |||
4621 | EXPORT_SYMBOL_GPL(devlink_param_value_changed); | 4635 | EXPORT_SYMBOL_GPL(devlink_param_value_changed); |
4622 | 4636 | ||
4623 | /** | 4637 | /** |
4638 | * devlink_param_value_str_fill - Safely fill-up the string preventing | ||
4639 | * from overflow of the preallocated buffer | ||
4640 | * | ||
4641 | * @dst_val: destination devlink_param_value | ||
4642 | * @src: source buffer | ||
4643 | */ | ||
4644 | void devlink_param_value_str_fill(union devlink_param_value *dst_val, | ||
4645 | const char *src) | ||
4646 | { | ||
4647 | size_t len; | ||
4648 | |||
4649 | len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE); | ||
4650 | WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE); | ||
4651 | } | ||
4652 | EXPORT_SYMBOL_GPL(devlink_param_value_str_fill); | ||
4653 | |||
4654 | /** | ||
4624 | * devlink_region_create - create a new address region | 4655 | * devlink_region_create - create a new address region |
4625 | * | 4656 | * |
4626 | * @devlink: devlink | 4657 | * @devlink: devlink |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0e937d3d85b5..54b961de9538 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4394,14 +4394,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); | |||
4394 | */ | 4394 | */ |
4395 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) | 4395 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
4396 | { | 4396 | { |
4397 | if (unlikely(start > skb_headlen(skb)) || | 4397 | u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); |
4398 | unlikely((int)start + off > skb_headlen(skb) - 2)) { | 4398 | u32 csum_start = skb_headroom(skb) + (u32)start; |
4399 | net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", | 4399 | |
4400 | start, off, skb_headlen(skb)); | 4400 | if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { |
4401 | net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", | ||
4402 | start, off, skb_headroom(skb), skb_headlen(skb)); | ||
4401 | return false; | 4403 | return false; |
4402 | } | 4404 | } |
4403 | skb->ip_summed = CHECKSUM_PARTIAL; | 4405 | skb->ip_summed = CHECKSUM_PARTIAL; |
4404 | skb->csum_start = skb_headroom(skb) + start; | 4406 | skb->csum_start = csum_start; |
4405 | skb->csum_offset = off; | 4407 | skb->csum_offset = off; |
4406 | skb_set_transport_header(skb, start); | 4408 | skb_set_transport_header(skb, start); |
4407 | return true; | 4409 | return true; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 038f511c73fa..0f1beceb47d5 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -1291,7 +1291,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
1291 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) | 1291 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
1292 | { | 1292 | { |
1293 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 1293 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
1294 | struct netdev_notifier_changeupper_info *info; | 1294 | struct netdev_notifier_changeupper_info *upper_info = ptr; |
1295 | struct netdev_notifier_info_ext *info_ext = ptr; | ||
1295 | struct in_device *in_dev; | 1296 | struct in_device *in_dev; |
1296 | struct net *net = dev_net(dev); | 1297 | struct net *net = dev_net(dev); |
1297 | unsigned int flags; | 1298 | unsigned int flags; |
@@ -1326,16 +1327,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
1326 | fib_sync_up(dev, RTNH_F_LINKDOWN); | 1327 | fib_sync_up(dev, RTNH_F_LINKDOWN); |
1327 | else | 1328 | else |
1328 | fib_sync_down_dev(dev, event, false); | 1329 | fib_sync_down_dev(dev, event, false); |
1329 | /* fall through */ | 1330 | rt_cache_flush(net); |
1331 | break; | ||
1330 | case NETDEV_CHANGEMTU: | 1332 | case NETDEV_CHANGEMTU: |
1333 | fib_sync_mtu(dev, info_ext->ext.mtu); | ||
1331 | rt_cache_flush(net); | 1334 | rt_cache_flush(net); |
1332 | break; | 1335 | break; |
1333 | case NETDEV_CHANGEUPPER: | 1336 | case NETDEV_CHANGEUPPER: |
1334 | info = ptr; | 1337 | upper_info = ptr; |
1335 | /* flush all routes if dev is linked to or unlinked from | 1338 | /* flush all routes if dev is linked to or unlinked from |
1336 | * an L3 master device (e.g., VRF) | 1339 | * an L3 master device (e.g., VRF) |
1337 | */ | 1340 | */ |
1338 | if (info->upper_dev && netif_is_l3_master(info->upper_dev)) | 1341 | if (upper_info->upper_dev && |
1342 | netif_is_l3_master(upper_info->upper_dev)) | ||
1339 | fib_disable_ip(dev, NETDEV_DOWN, true); | 1343 | fib_disable_ip(dev, NETDEV_DOWN, true); |
1340 | break; | 1344 | break; |
1341 | } | 1345 | } |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f8c7ec8171a8..b5c3937ca6ec 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -1457,6 +1457,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh, | |||
1457 | return NOTIFY_DONE; | 1457 | return NOTIFY_DONE; |
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | /* Update the PMTU of exceptions when: | ||
1461 | * - the new MTU of the first hop becomes smaller than the PMTU | ||
1462 | * - the old MTU was the same as the PMTU, and it limited discovery of | ||
1463 | * larger MTUs on the path. With that limit raised, we can now | ||
1464 | * discover larger MTUs | ||
1465 | * A special case is locked exceptions, for which the PMTU is smaller | ||
1466 | * than the minimal accepted PMTU: | ||
1467 | * - if the new MTU is greater than the PMTU, don't make any change | ||
1468 | * - otherwise, unlock and set PMTU | ||
1469 | */ | ||
1470 | static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig) | ||
1471 | { | ||
1472 | struct fnhe_hash_bucket *bucket; | ||
1473 | int i; | ||
1474 | |||
1475 | bucket = rcu_dereference_protected(nh->nh_exceptions, 1); | ||
1476 | if (!bucket) | ||
1477 | return; | ||
1478 | |||
1479 | for (i = 0; i < FNHE_HASH_SIZE; i++) { | ||
1480 | struct fib_nh_exception *fnhe; | ||
1481 | |||
1482 | for (fnhe = rcu_dereference_protected(bucket[i].chain, 1); | ||
1483 | fnhe; | ||
1484 | fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) { | ||
1485 | if (fnhe->fnhe_mtu_locked) { | ||
1486 | if (new <= fnhe->fnhe_pmtu) { | ||
1487 | fnhe->fnhe_pmtu = new; | ||
1488 | fnhe->fnhe_mtu_locked = false; | ||
1489 | } | ||
1490 | } else if (new < fnhe->fnhe_pmtu || | ||
1491 | orig == fnhe->fnhe_pmtu) { | ||
1492 | fnhe->fnhe_pmtu = new; | ||
1493 | } | ||
1494 | } | ||
1495 | } | ||
1496 | } | ||
1497 | |||
1498 | void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) | ||
1499 | { | ||
1500 | unsigned int hash = fib_devindex_hashfn(dev->ifindex); | ||
1501 | struct hlist_head *head = &fib_info_devhash[hash]; | ||
1502 | struct fib_nh *nh; | ||
1503 | |||
1504 | hlist_for_each_entry(nh, head, nh_hash) { | ||
1505 | if (nh->nh_dev == dev) | ||
1506 | nh_update_mtu(nh, dev->mtu, orig_mtu); | ||
1507 | } | ||
1508 | } | ||
1509 | |||
1460 | /* Event force Flags Description | 1510 | /* Event force Flags Description |
1461 | * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host | 1511 | * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host |
1462 | * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host | 1512 | * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f71d2395c428..c0a9d26c06ce 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1001,21 +1001,22 @@ out: kfree_skb(skb); | |||
1001 | static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | 1001 | static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
1002 | { | 1002 | { |
1003 | struct dst_entry *dst = &rt->dst; | 1003 | struct dst_entry *dst = &rt->dst; |
1004 | u32 old_mtu = ipv4_mtu(dst); | ||
1004 | struct fib_result res; | 1005 | struct fib_result res; |
1005 | bool lock = false; | 1006 | bool lock = false; |
1006 | 1007 | ||
1007 | if (ip_mtu_locked(dst)) | 1008 | if (ip_mtu_locked(dst)) |
1008 | return; | 1009 | return; |
1009 | 1010 | ||
1010 | if (ipv4_mtu(dst) < mtu) | 1011 | if (old_mtu < mtu) |
1011 | return; | 1012 | return; |
1012 | 1013 | ||
1013 | if (mtu < ip_rt_min_pmtu) { | 1014 | if (mtu < ip_rt_min_pmtu) { |
1014 | lock = true; | 1015 | lock = true; |
1015 | mtu = ip_rt_min_pmtu; | 1016 | mtu = min(old_mtu, ip_rt_min_pmtu); |
1016 | } | 1017 | } |
1017 | 1018 | ||
1018 | if (rt->rt_pmtu == mtu && | 1019 | if (rt->rt_pmtu == mtu && !lock && |
1019 | time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) | 1020 | time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) |
1020 | return; | 1021 | return; |
1021 | 1022 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1bec2203d558..cf8252d05a01 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1627,7 +1627,7 @@ busy_check: | |||
1627 | *err = error; | 1627 | *err = error; |
1628 | return NULL; | 1628 | return NULL; |
1629 | } | 1629 | } |
1630 | EXPORT_SYMBOL_GPL(__skb_recv_udp); | 1630 | EXPORT_SYMBOL(__skb_recv_udp); |
1631 | 1631 | ||
1632 | /* | 1632 | /* |
1633 | * This should be easy, if there is something there we | 1633 | * This should be easy, if there is something there we |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 9ba72d94d60f..0783af11b0b7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -195,6 +195,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) | |||
195 | *ppcpu_rt = NULL; | 195 | *ppcpu_rt = NULL; |
196 | } | 196 | } |
197 | } | 197 | } |
198 | |||
199 | free_percpu(f6i->rt6i_pcpu); | ||
198 | } | 200 | } |
199 | 201 | ||
200 | lwtstate_put(f6i->fib6_nh.nh_lwtstate); | 202 | lwtstate_put(f6i->fib6_nh.nh_lwtstate); |
diff --git a/net/rds/send.c b/net/rds/send.c index 57b3d5a8b2db..fe785ee819dd 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, | |||
1007 | return ret; | 1007 | return ret; |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn) | 1010 | static int rds_send_mprds_hash(struct rds_sock *rs, |
1011 | struct rds_connection *conn, int nonblock) | ||
1011 | { | 1012 | { |
1012 | int hash; | 1013 | int hash; |
1013 | 1014 | ||
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn) | |||
1023 | * used. But if we are interrupted, we have to use the zero | 1024 | * used. But if we are interrupted, we have to use the zero |
1024 | * c_path in case the connection ends up being non-MP capable. | 1025 | * c_path in case the connection ends up being non-MP capable. |
1025 | */ | 1026 | */ |
1026 | if (conn->c_npaths == 0) | 1027 | if (conn->c_npaths == 0) { |
1028 | /* Cannot wait for the connection be made, so just use | ||
1029 | * the base c_path. | ||
1030 | */ | ||
1031 | if (nonblock) | ||
1032 | return 0; | ||
1027 | if (wait_event_interruptible(conn->c_hs_waitq, | 1033 | if (wait_event_interruptible(conn->c_hs_waitq, |
1028 | conn->c_npaths != 0)) | 1034 | conn->c_npaths != 0)) |
1029 | hash = 0; | 1035 | hash = 0; |
1036 | } | ||
1030 | if (conn->c_npaths == 1) | 1037 | if (conn->c_npaths == 1) |
1031 | hash = 0; | 1038 | hash = 0; |
1032 | } | 1039 | } |
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
1256 | } | 1263 | } |
1257 | 1264 | ||
1258 | if (conn->c_trans->t_mp_capable) | 1265 | if (conn->c_trans->t_mp_capable) |
1259 | cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; | 1266 | cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)]; |
1260 | else | 1267 | else |
1261 | cpath = &conn->c_path[0]; | 1268 | cpath = &conn->c_path[0]; |
1262 | 1269 | ||
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 76569c178915..8cee7644965c 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -302,6 +302,7 @@ struct rxrpc_peer { | |||
302 | 302 | ||
303 | /* calculated RTT cache */ | 303 | /* calculated RTT cache */ |
304 | #define RXRPC_RTT_CACHE_SIZE 32 | 304 | #define RXRPC_RTT_CACHE_SIZE 32 |
305 | spinlock_t rtt_input_lock; /* RTT lock for input routine */ | ||
305 | ktime_t rtt_last_req; /* Time of last RTT request */ | 306 | ktime_t rtt_last_req; /* Time of last RTT request */ |
306 | u64 rtt; /* Current RTT estimate (in nS) */ | 307 | u64 rtt; /* Current RTT estimate (in nS) */ |
307 | u64 rtt_sum; /* Sum of cache contents */ | 308 | u64 rtt_sum; /* Sum of cache contents */ |
@@ -442,17 +443,17 @@ struct rxrpc_connection { | |||
442 | spinlock_t state_lock; /* state-change lock */ | 443 | spinlock_t state_lock; /* state-change lock */ |
443 | enum rxrpc_conn_cache_state cache_state; | 444 | enum rxrpc_conn_cache_state cache_state; |
444 | enum rxrpc_conn_proto_state state; /* current state of connection */ | 445 | enum rxrpc_conn_proto_state state; /* current state of connection */ |
445 | u32 local_abort; /* local abort code */ | 446 | u32 abort_code; /* Abort code of connection abort */ |
446 | u32 remote_abort; /* remote abort code */ | ||
447 | int debug_id; /* debug ID for printks */ | 447 | int debug_id; /* debug ID for printks */ |
448 | atomic_t serial; /* packet serial number counter */ | 448 | atomic_t serial; /* packet serial number counter */ |
449 | unsigned int hi_serial; /* highest serial number received */ | 449 | unsigned int hi_serial; /* highest serial number received */ |
450 | u32 security_nonce; /* response re-use preventer */ | 450 | u32 security_nonce; /* response re-use preventer */ |
451 | u16 service_id; /* Service ID, possibly upgraded */ | 451 | u32 service_id; /* Service ID, possibly upgraded */ |
452 | u8 size_align; /* data size alignment (for security) */ | 452 | u8 size_align; /* data size alignment (for security) */ |
453 | u8 security_size; /* security header size */ | 453 | u8 security_size; /* security header size */ |
454 | u8 security_ix; /* security type */ | 454 | u8 security_ix; /* security type */ |
455 | u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ | 455 | u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ |
456 | short error; /* Local error code */ | ||
456 | }; | 457 | }; |
457 | 458 | ||
458 | static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) | 459 | static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) |
@@ -635,6 +636,8 @@ struct rxrpc_call { | |||
635 | bool tx_phase; /* T if transmission phase, F if receive phase */ | 636 | bool tx_phase; /* T if transmission phase, F if receive phase */ |
636 | u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ | 637 | u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */ |
637 | 638 | ||
639 | spinlock_t input_lock; /* Lock for packet input to this call */ | ||
640 | |||
638 | /* receive-phase ACK management */ | 641 | /* receive-phase ACK management */ |
639 | u8 ackr_reason; /* reason to ACK */ | 642 | u8 ackr_reason; /* reason to ACK */ |
640 | u16 ackr_skew; /* skew on packet being ACK'd */ | 643 | u16 ackr_skew; /* skew on packet being ACK'd */ |
@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); | |||
720 | void rxrpc_discard_prealloc(struct rxrpc_sock *); | 723 | void rxrpc_discard_prealloc(struct rxrpc_sock *); |
721 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, | 724 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, |
722 | struct rxrpc_sock *, | 725 | struct rxrpc_sock *, |
723 | struct rxrpc_peer *, | ||
724 | struct rxrpc_connection *, | ||
725 | struct sk_buff *); | 726 | struct sk_buff *); |
726 | void rxrpc_accept_incoming_calls(struct rxrpc_local *); | 727 | void rxrpc_accept_incoming_calls(struct rxrpc_local *); |
727 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, | 728 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, |
@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry; | |||
891 | extern struct idr rxrpc_client_conn_ids; | 892 | extern struct idr rxrpc_client_conn_ids; |
892 | 893 | ||
893 | void rxrpc_destroy_client_conn_ids(void); | 894 | void rxrpc_destroy_client_conn_ids(void); |
894 | int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, | 895 | int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, |
895 | struct sockaddr_rxrpc *, gfp_t); | 896 | struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, |
897 | gfp_t); | ||
896 | void rxrpc_expose_client_call(struct rxrpc_call *); | 898 | void rxrpc_expose_client_call(struct rxrpc_call *); |
897 | void rxrpc_disconnect_client_call(struct rxrpc_call *); | 899 | void rxrpc_disconnect_client_call(struct rxrpc_call *); |
898 | void rxrpc_put_client_conn(struct rxrpc_connection *); | 900 | void rxrpc_put_client_conn(struct rxrpc_connection *); |
@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *); | |||
965 | /* | 967 | /* |
966 | * input.c | 968 | * input.c |
967 | */ | 969 | */ |
968 | void rxrpc_data_ready(struct sock *); | 970 | int rxrpc_input_packet(struct sock *, struct sk_buff *); |
969 | 971 | ||
970 | /* | 972 | /* |
971 | * insecure.c | 973 | * insecure.c |
@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *); | |||
1045 | */ | 1047 | */ |
1046 | struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, | 1048 | struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, |
1047 | const struct sockaddr_rxrpc *); | 1049 | const struct sockaddr_rxrpc *); |
1048 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, | 1050 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *, |
1049 | struct sockaddr_rxrpc *, gfp_t); | 1051 | struct sockaddr_rxrpc *, gfp_t); |
1050 | struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); | 1052 | struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); |
1051 | void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *); | 1053 | void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *, |
1054 | struct rxrpc_peer *); | ||
1052 | void rxrpc_destroy_all_peers(struct rxrpc_net *); | 1055 | void rxrpc_destroy_all_peers(struct rxrpc_net *); |
1053 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); | 1056 | struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); |
1054 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); | 1057 | struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 8354cadbb839..e0d8ca03169a 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, | |||
287 | (peer_tail + 1) & | 287 | (peer_tail + 1) & |
288 | (RXRPC_BACKLOG_MAX - 1)); | 288 | (RXRPC_BACKLOG_MAX - 1)); |
289 | 289 | ||
290 | rxrpc_new_incoming_peer(local, peer); | 290 | rxrpc_new_incoming_peer(rx, local, peer); |
291 | } | 291 | } |
292 | 292 | ||
293 | /* Now allocate and set up the connection */ | 293 | /* Now allocate and set up the connection */ |
@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, | |||
333 | */ | 333 | */ |
334 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | 334 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
335 | struct rxrpc_sock *rx, | 335 | struct rxrpc_sock *rx, |
336 | struct rxrpc_peer *peer, | ||
337 | struct rxrpc_connection *conn, | ||
338 | struct sk_buff *skb) | 336 | struct sk_buff *skb) |
339 | { | 337 | { |
340 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 338 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
339 | struct rxrpc_connection *conn; | ||
340 | struct rxrpc_peer *peer; | ||
341 | struct rxrpc_call *call; | 341 | struct rxrpc_call *call; |
342 | 342 | ||
343 | _enter(""); | 343 | _enter(""); |
@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
354 | goto out; | 354 | goto out; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* The peer, connection and call may all have sprung into existence due | ||
358 | * to a duplicate packet being handled on another CPU in parallel, so | ||
359 | * we have to recheck the routing. However, we're now holding | ||
360 | * rx->incoming_lock, so the values should remain stable. | ||
361 | */ | ||
362 | conn = rxrpc_find_connection_rcu(local, skb, &peer); | ||
363 | |||
357 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); | 364 | call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); |
358 | if (!call) { | 365 | if (!call) { |
359 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; | 366 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
396 | 403 | ||
397 | case RXRPC_CONN_SERVICE: | 404 | case RXRPC_CONN_SERVICE: |
398 | write_lock(&call->state_lock); | 405 | write_lock(&call->state_lock); |
399 | if (rx->discard_new_call) | 406 | if (call->state < RXRPC_CALL_COMPLETE) { |
400 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | 407 | if (rx->discard_new_call) |
401 | else | 408 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; |
402 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | 409 | else |
410 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | ||
411 | } | ||
403 | write_unlock(&call->state_lock); | 412 | write_unlock(&call->state_lock); |
404 | break; | 413 | break; |
405 | 414 | ||
406 | case RXRPC_CONN_REMOTELY_ABORTED: | 415 | case RXRPC_CONN_REMOTELY_ABORTED: |
407 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | 416 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, |
408 | conn->remote_abort, -ECONNABORTED); | 417 | conn->abort_code, conn->error); |
409 | break; | 418 | break; |
410 | case RXRPC_CONN_LOCALLY_ABORTED: | 419 | case RXRPC_CONN_LOCALLY_ABORTED: |
411 | rxrpc_abort_call("CON", call, sp->hdr.seq, | 420 | rxrpc_abort_call("CON", call, sp->hdr.seq, |
412 | conn->local_abort, -ECONNABORTED); | 421 | conn->abort_code, conn->error); |
413 | break; | 422 | break; |
414 | default: | 423 | default: |
415 | BUG(); | 424 | BUG(); |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 799f75b6900d..8f1a8f85b1f9 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, | |||
138 | init_waitqueue_head(&call->waitq); | 138 | init_waitqueue_head(&call->waitq); |
139 | spin_lock_init(&call->lock); | 139 | spin_lock_init(&call->lock); |
140 | spin_lock_init(&call->notify_lock); | 140 | spin_lock_init(&call->notify_lock); |
141 | spin_lock_init(&call->input_lock); | ||
141 | rwlock_init(&call->state_lock); | 142 | rwlock_init(&call->state_lock); |
142 | atomic_set(&call->usage, 1); | 143 | atomic_set(&call->usage, 1); |
143 | call->debug_id = debug_id; | 144 | call->debug_id = debug_id; |
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, | |||
287 | /* Set up or get a connection record and set the protocol parameters, | 288 | /* Set up or get a connection record and set the protocol parameters, |
288 | * including channel number and call ID. | 289 | * including channel number and call ID. |
289 | */ | 290 | */ |
290 | ret = rxrpc_connect_call(call, cp, srx, gfp); | 291 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); |
291 | if (ret < 0) | 292 | if (ret < 0) |
292 | goto error; | 293 | goto error; |
293 | 294 | ||
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx, | |||
339 | /* Set up or get a connection record and set the protocol parameters, | 340 | /* Set up or get a connection record and set the protocol parameters, |
340 | * including channel number and call ID. | 341 | * including channel number and call ID. |
341 | */ | 342 | */ |
342 | ret = rxrpc_connect_call(call, cp, srx, gfp); | 343 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); |
343 | if (ret < 0) | 344 | if (ret < 0) |
344 | goto error; | 345 | goto error; |
345 | 346 | ||
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 8acf74fe24c0..521189f4b666 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c | |||
@@ -276,7 +276,8 @@ dont_reuse: | |||
276 | * If we return with a connection, the call will be on its waiting list. It's | 276 | * If we return with a connection, the call will be on its waiting list. It's |
277 | * left to the caller to assign a channel and wake up the call. | 277 | * left to the caller to assign a channel and wake up the call. |
278 | */ | 278 | */ |
279 | static int rxrpc_get_client_conn(struct rxrpc_call *call, | 279 | static int rxrpc_get_client_conn(struct rxrpc_sock *rx, |
280 | struct rxrpc_call *call, | ||
280 | struct rxrpc_conn_parameters *cp, | 281 | struct rxrpc_conn_parameters *cp, |
281 | struct sockaddr_rxrpc *srx, | 282 | struct sockaddr_rxrpc *srx, |
282 | gfp_t gfp) | 283 | gfp_t gfp) |
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call, | |||
289 | 290 | ||
290 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); | 291 | _enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
291 | 292 | ||
292 | cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); | 293 | cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); |
293 | if (!cp->peer) | 294 | if (!cp->peer) |
294 | goto error; | 295 | goto error; |
295 | 296 | ||
@@ -683,7 +684,8 @@ out: | |||
683 | * find a connection for a call | 684 | * find a connection for a call |
684 | * - called in process context with IRQs enabled | 685 | * - called in process context with IRQs enabled |
685 | */ | 686 | */ |
686 | int rxrpc_connect_call(struct rxrpc_call *call, | 687 | int rxrpc_connect_call(struct rxrpc_sock *rx, |
688 | struct rxrpc_call *call, | ||
687 | struct rxrpc_conn_parameters *cp, | 689 | struct rxrpc_conn_parameters *cp, |
688 | struct sockaddr_rxrpc *srx, | 690 | struct sockaddr_rxrpc *srx, |
689 | gfp_t gfp) | 691 | gfp_t gfp) |
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call, | |||
696 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); | 698 | rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); |
697 | rxrpc_cull_active_client_conns(rxnet); | 699 | rxrpc_cull_active_client_conns(rxnet); |
698 | 700 | ||
699 | ret = rxrpc_get_client_conn(call, cp, srx, gfp); | 701 | ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp); |
700 | if (ret < 0) | 702 | if (ret < 0) |
701 | goto out; | 703 | goto out; |
702 | 704 | ||
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 6df56ce68861..b6fca8ebb117 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
126 | 126 | ||
127 | switch (chan->last_type) { | 127 | switch (chan->last_type) { |
128 | case RXRPC_PACKET_TYPE_ABORT: | 128 | case RXRPC_PACKET_TYPE_ABORT: |
129 | _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort); | 129 | _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code); |
130 | break; | 130 | break; |
131 | case RXRPC_PACKET_TYPE_ACK: | 131 | case RXRPC_PACKET_TYPE_ACK: |
132 | trace_rxrpc_tx_ack(chan->call_debug_id, serial, | 132 | trace_rxrpc_tx_ack(chan->call_debug_id, serial, |
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, | |||
153 | * pass a connection-level abort onto all calls on that connection | 153 | * pass a connection-level abort onto all calls on that connection |
154 | */ | 154 | */ |
155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, | 155 | static void rxrpc_abort_calls(struct rxrpc_connection *conn, |
156 | enum rxrpc_call_completion compl, | 156 | enum rxrpc_call_completion compl) |
157 | u32 abort_code, int error) | ||
158 | { | 157 | { |
159 | struct rxrpc_call *call; | 158 | struct rxrpc_call *call; |
160 | int i; | 159 | int i; |
161 | 160 | ||
162 | _enter("{%d},%x", conn->debug_id, abort_code); | 161 | _enter("{%d},%x", conn->debug_id, conn->abort_code); |
163 | 162 | ||
164 | spin_lock(&conn->channel_lock); | 163 | spin_lock(&conn->channel_lock); |
165 | 164 | ||
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn, | |||
172 | trace_rxrpc_abort(call->debug_id, | 171 | trace_rxrpc_abort(call->debug_id, |
173 | "CON", call->cid, | 172 | "CON", call->cid, |
174 | call->call_id, 0, | 173 | call->call_id, 0, |
175 | abort_code, error); | 174 | conn->abort_code, |
175 | conn->error); | ||
176 | if (rxrpc_set_call_completion(call, compl, | 176 | if (rxrpc_set_call_completion(call, compl, |
177 | abort_code, error)) | 177 | conn->abort_code, |
178 | conn->error)) | ||
178 | rxrpc_notify_socket(call); | 179 | rxrpc_notify_socket(call); |
179 | } | 180 | } |
180 | } | 181 | } |
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
207 | return 0; | 208 | return 0; |
208 | } | 209 | } |
209 | 210 | ||
211 | conn->error = error; | ||
212 | conn->abort_code = abort_code; | ||
210 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; | 213 | conn->state = RXRPC_CONN_LOCALLY_ABORTED; |
211 | spin_unlock_bh(&conn->state_lock); | 214 | spin_unlock_bh(&conn->state_lock); |
212 | 215 | ||
213 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error); | 216 | rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED); |
214 | 217 | ||
215 | msg.msg_name = &conn->params.peer->srx.transport; | 218 | msg.msg_name = &conn->params.peer->srx.transport; |
216 | msg.msg_namelen = conn->params.peer->srx.transport_len; | 219 | msg.msg_namelen = conn->params.peer->srx.transport_len; |
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
229 | whdr._rsvd = 0; | 232 | whdr._rsvd = 0; |
230 | whdr.serviceId = htons(conn->service_id); | 233 | whdr.serviceId = htons(conn->service_id); |
231 | 234 | ||
232 | word = htonl(conn->local_abort); | 235 | word = htonl(conn->abort_code); |
233 | 236 | ||
234 | iov[0].iov_base = &whdr; | 237 | iov[0].iov_base = &whdr; |
235 | iov[0].iov_len = sizeof(whdr); | 238 | iov[0].iov_len = sizeof(whdr); |
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn, | |||
240 | 243 | ||
241 | serial = atomic_inc_return(&conn->serial); | 244 | serial = atomic_inc_return(&conn->serial); |
242 | whdr.serial = htonl(serial); | 245 | whdr.serial = htonl(serial); |
243 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort); | 246 | _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); |
244 | 247 | ||
245 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); | 248 | ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); |
246 | if (ret < 0) { | 249 | if (ret < 0) { |
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, | |||
315 | abort_code = ntohl(wtmp); | 318 | abort_code = ntohl(wtmp); |
316 | _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); | 319 | _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); |
317 | 320 | ||
321 | conn->error = -ECONNABORTED; | ||
322 | conn->abort_code = abort_code; | ||
318 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; | 323 | conn->state = RXRPC_CONN_REMOTELY_ABORTED; |
319 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, | 324 | rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); |
320 | abort_code, -ECONNABORTED); | ||
321 | return -ECONNABORTED; | 325 | return -ECONNABORTED; |
322 | 326 | ||
323 | case RXRPC_PACKET_TYPE_CHALLENGE: | 327 | case RXRPC_PACKET_TYPE_CHALLENGE: |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5b2626929822..9128aa0e40aa 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, | |||
216 | /* | 216 | /* |
217 | * Apply a hard ACK by advancing the Tx window. | 217 | * Apply a hard ACK by advancing the Tx window. |
218 | */ | 218 | */ |
219 | static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | 219 | static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, |
220 | struct rxrpc_ack_summary *summary) | 220 | struct rxrpc_ack_summary *summary) |
221 | { | 221 | { |
222 | struct sk_buff *skb, *list = NULL; | 222 | struct sk_buff *skb, *list = NULL; |
223 | bool rot_last = false; | ||
223 | int ix; | 224 | int ix; |
224 | u8 annotation; | 225 | u8 annotation; |
225 | 226 | ||
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
243 | skb->next = list; | 244 | skb->next = list; |
244 | list = skb; | 245 | list = skb; |
245 | 246 | ||
246 | if (annotation & RXRPC_TX_ANNO_LAST) | 247 | if (annotation & RXRPC_TX_ANNO_LAST) { |
247 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); | 248 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); |
249 | rot_last = true; | ||
250 | } | ||
248 | if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) | 251 | if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) |
249 | summary->nr_rot_new_acks++; | 252 | summary->nr_rot_new_acks++; |
250 | } | 253 | } |
251 | 254 | ||
252 | spin_unlock(&call->lock); | 255 | spin_unlock(&call->lock); |
253 | 256 | ||
254 | trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? | 257 | trace_rxrpc_transmit(call, (rot_last ? |
255 | rxrpc_transmit_rotate_last : | 258 | rxrpc_transmit_rotate_last : |
256 | rxrpc_transmit_rotate)); | 259 | rxrpc_transmit_rotate)); |
257 | wake_up(&call->waitq); | 260 | wake_up(&call->waitq); |
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
262 | skb_mark_not_on_list(skb); | 265 | skb_mark_not_on_list(skb); |
263 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); | 266 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
264 | } | 267 | } |
268 | |||
269 | return rot_last; | ||
265 | } | 270 | } |
266 | 271 | ||
267 | /* | 272 | /* |
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, | |||
273 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, | 278 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, |
274 | const char *abort_why) | 279 | const char *abort_why) |
275 | { | 280 | { |
281 | unsigned int state; | ||
276 | 282 | ||
277 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); | 283 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); |
278 | 284 | ||
279 | write_lock(&call->state_lock); | 285 | write_lock(&call->state_lock); |
280 | 286 | ||
281 | switch (call->state) { | 287 | state = call->state; |
288 | switch (state) { | ||
282 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | 289 | case RXRPC_CALL_CLIENT_SEND_REQUEST: |
283 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | 290 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: |
284 | if (reply_begun) | 291 | if (reply_begun) |
285 | call->state = RXRPC_CALL_CLIENT_RECV_REPLY; | 292 | call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; |
286 | else | 293 | else |
287 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | 294 | call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; |
288 | break; | 295 | break; |
289 | 296 | ||
290 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 297 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
291 | __rxrpc_call_completed(call); | 298 | __rxrpc_call_completed(call); |
292 | rxrpc_notify_socket(call); | 299 | rxrpc_notify_socket(call); |
300 | state = call->state; | ||
293 | break; | 301 | break; |
294 | 302 | ||
295 | default: | 303 | default: |
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, | |||
297 | } | 305 | } |
298 | 306 | ||
299 | write_unlock(&call->state_lock); | 307 | write_unlock(&call->state_lock); |
300 | if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { | 308 | if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) |
301 | trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); | 309 | trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); |
302 | } else { | 310 | else |
303 | trace_rxrpc_transmit(call, rxrpc_transmit_end); | 311 | trace_rxrpc_transmit(call, rxrpc_transmit_end); |
304 | } | ||
305 | _leave(" = ok"); | 312 | _leave(" = ok"); |
306 | return true; | 313 | return true; |
307 | 314 | ||
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |||
332 | trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); | 339 | trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); |
333 | } | 340 | } |
334 | 341 | ||
335 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) | ||
336 | rxrpc_rotate_tx_window(call, top, &summary); | ||
337 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { | 342 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { |
338 | rxrpc_proto_abort("TXL", call, top); | 343 | if (!rxrpc_rotate_tx_window(call, top, &summary)) { |
339 | return false; | 344 | rxrpc_proto_abort("TXL", call, top); |
345 | return false; | ||
346 | } | ||
340 | } | 347 | } |
341 | if (!rxrpc_end_tx_phase(call, true, "ETD")) | 348 | if (!rxrpc_end_tx_phase(call, true, "ETD")) |
342 | return false; | 349 | return false; |
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | |||
452 | } | 459 | } |
453 | } | 460 | } |
454 | 461 | ||
462 | spin_lock(&call->input_lock); | ||
463 | |||
455 | /* Received data implicitly ACKs all of the request packets we sent | 464 | /* Received data implicitly ACKs all of the request packets we sent |
456 | * when we're acting as a client. | 465 | * when we're acting as a client. |
457 | */ | 466 | */ |
458 | if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || | 467 | if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || |
459 | state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && | 468 | state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && |
460 | !rxrpc_receiving_reply(call)) | 469 | !rxrpc_receiving_reply(call)) |
461 | return; | 470 | goto unlock; |
462 | 471 | ||
463 | call->ackr_prev_seq = seq; | 472 | call->ackr_prev_seq = seq; |
464 | 473 | ||
@@ -488,12 +497,16 @@ next_subpacket: | |||
488 | 497 | ||
489 | if (flags & RXRPC_LAST_PACKET) { | 498 | if (flags & RXRPC_LAST_PACKET) { |
490 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 499 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
491 | seq != call->rx_top) | 500 | seq != call->rx_top) { |
492 | return rxrpc_proto_abort("LSN", call, seq); | 501 | rxrpc_proto_abort("LSN", call, seq); |
502 | goto unlock; | ||
503 | } | ||
493 | } else { | 504 | } else { |
494 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | 505 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
495 | after_eq(seq, call->rx_top)) | 506 | after_eq(seq, call->rx_top)) { |
496 | return rxrpc_proto_abort("LSA", call, seq); | 507 | rxrpc_proto_abort("LSA", call, seq); |
508 | goto unlock; | ||
509 | } | ||
497 | } | 510 | } |
498 | 511 | ||
499 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); | 512 | trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); |
@@ -560,8 +573,10 @@ next_subpacket: | |||
560 | skip: | 573 | skip: |
561 | offset += len; | 574 | offset += len; |
562 | if (flags & RXRPC_JUMBO_PACKET) { | 575 | if (flags & RXRPC_JUMBO_PACKET) { |
563 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | 576 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) { |
564 | return rxrpc_proto_abort("XJF", call, seq); | 577 | rxrpc_proto_abort("XJF", call, seq); |
578 | goto unlock; | ||
579 | } | ||
565 | offset += sizeof(struct rxrpc_jumbo_header); | 580 | offset += sizeof(struct rxrpc_jumbo_header); |
566 | seq++; | 581 | seq++; |
567 | serial++; | 582 | serial++; |
@@ -601,6 +616,9 @@ ack: | |||
601 | trace_rxrpc_notify_socket(call->debug_id, serial); | 616 | trace_rxrpc_notify_socket(call->debug_id, serial); |
602 | rxrpc_notify_socket(call); | 617 | rxrpc_notify_socket(call); |
603 | } | 618 | } |
619 | |||
620 | unlock: | ||
621 | spin_unlock(&call->input_lock); | ||
604 | _leave(" [queued]"); | 622 | _leave(" [queued]"); |
605 | } | 623 | } |
606 | 624 | ||
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, | |||
687 | 705 | ||
688 | ping_time = call->ping_time; | 706 | ping_time = call->ping_time; |
689 | smp_rmb(); | 707 | smp_rmb(); |
690 | ping_serial = call->ping_serial; | 708 | ping_serial = READ_ONCE(call->ping_serial); |
691 | 709 | ||
692 | if (orig_serial == call->acks_lost_ping) | 710 | if (orig_serial == call->acks_lost_ping) |
693 | rxrpc_input_check_for_lost_ack(call); | 711 | rxrpc_input_check_for_lost_ack(call); |
694 | 712 | ||
695 | if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || | 713 | if (before(orig_serial, ping_serial) || |
696 | before(orig_serial, ping_serial)) | 714 | !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags)) |
697 | return; | 715 | return; |
698 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | ||
699 | if (after(orig_serial, ping_serial)) | 716 | if (after(orig_serial, ping_serial)) |
700 | return; | 717 | return; |
701 | 718 | ||
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
861 | rxrpc_propose_ack_respond_to_ack); | 878 | rxrpc_propose_ack_respond_to_ack); |
862 | } | 879 | } |
863 | 880 | ||
881 | /* Discard any out-of-order or duplicate ACKs. */ | ||
882 | if (before_eq(sp->hdr.serial, call->acks_latest)) | ||
883 | return; | ||
884 | |||
885 | buf.info.rxMTU = 0; | ||
864 | ioffset = offset + nr_acks + 3; | 886 | ioffset = offset + nr_acks + 3; |
865 | if (skb->len >= ioffset + sizeof(buf.info)) { | 887 | if (skb->len >= ioffset + sizeof(buf.info) && |
866 | if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) | 888 | skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0) |
867 | return rxrpc_proto_abort("XAI", call, 0); | 889 | return rxrpc_proto_abort("XAI", call, 0); |
890 | |||
891 | spin_lock(&call->input_lock); | ||
892 | |||
893 | /* Discard any out-of-order or duplicate ACKs. */ | ||
894 | if (before_eq(sp->hdr.serial, call->acks_latest)) | ||
895 | goto out; | ||
896 | call->acks_latest_ts = skb->tstamp; | ||
897 | call->acks_latest = sp->hdr.serial; | ||
898 | |||
899 | /* Parse rwind and mtu sizes if provided. */ | ||
900 | if (buf.info.rxMTU) | ||
868 | rxrpc_input_ackinfo(call, skb, &buf.info); | 901 | rxrpc_input_ackinfo(call, skb, &buf.info); |
869 | } | ||
870 | 902 | ||
871 | if (first_soft_ack == 0) | 903 | if (first_soft_ack == 0) { |
872 | return rxrpc_proto_abort("AK0", call, 0); | 904 | rxrpc_proto_abort("AK0", call, 0); |
905 | goto out; | ||
906 | } | ||
873 | 907 | ||
874 | /* Ignore ACKs unless we are or have just been transmitting. */ | 908 | /* Ignore ACKs unless we are or have just been transmitting. */ |
875 | switch (READ_ONCE(call->state)) { | 909 | switch (READ_ONCE(call->state)) { |
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
879 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 913 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
880 | break; | 914 | break; |
881 | default: | 915 | default: |
882 | return; | 916 | goto out; |
883 | } | ||
884 | |||
885 | /* Discard any out-of-order or duplicate ACKs. */ | ||
886 | if (before_eq(sp->hdr.serial, call->acks_latest)) { | ||
887 | _debug("discard ACK %d <= %d", | ||
888 | sp->hdr.serial, call->acks_latest); | ||
889 | return; | ||
890 | } | 917 | } |
891 | call->acks_latest_ts = skb->tstamp; | ||
892 | call->acks_latest = sp->hdr.serial; | ||
893 | 918 | ||
894 | if (before(hard_ack, call->tx_hard_ack) || | 919 | if (before(hard_ack, call->tx_hard_ack) || |
895 | after(hard_ack, call->tx_top)) | 920 | after(hard_ack, call->tx_top)) { |
896 | return rxrpc_proto_abort("AKW", call, 0); | 921 | rxrpc_proto_abort("AKW", call, 0); |
897 | if (nr_acks > call->tx_top - hard_ack) | 922 | goto out; |
898 | return rxrpc_proto_abort("AKN", call, 0); | 923 | } |
924 | if (nr_acks > call->tx_top - hard_ack) { | ||
925 | rxrpc_proto_abort("AKN", call, 0); | ||
926 | goto out; | ||
927 | } | ||
899 | 928 | ||
900 | if (after(hard_ack, call->tx_hard_ack)) | 929 | if (after(hard_ack, call->tx_hard_ack)) { |
901 | rxrpc_rotate_tx_window(call, hard_ack, &summary); | 930 | if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { |
931 | rxrpc_end_tx_phase(call, false, "ETA"); | ||
932 | goto out; | ||
933 | } | ||
934 | } | ||
902 | 935 | ||
903 | if (nr_acks > 0) { | 936 | if (nr_acks > 0) { |
904 | if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) | 937 | if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) { |
905 | return rxrpc_proto_abort("XSA", call, 0); | 938 | rxrpc_proto_abort("XSA", call, 0); |
939 | goto out; | ||
940 | } | ||
906 | rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, | 941 | rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, |
907 | &summary); | 942 | &summary); |
908 | } | 943 | } |
909 | 944 | ||
910 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { | ||
911 | rxrpc_end_tx_phase(call, false, "ETA"); | ||
912 | return; | ||
913 | } | ||
914 | |||
915 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & | 945 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & |
916 | RXRPC_TX_ANNO_LAST && | 946 | RXRPC_TX_ANNO_LAST && |
917 | summary.nr_acks == call->tx_top - hard_ack && | 947 | summary.nr_acks == call->tx_top - hard_ack && |
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, | |||
920 | false, true, | 950 | false, true, |
921 | rxrpc_propose_ack_ping_for_lost_reply); | 951 | rxrpc_propose_ack_ping_for_lost_reply); |
922 | 952 | ||
923 | return rxrpc_congestion_management(call, skb, &summary, acked_serial); | 953 | rxrpc_congestion_management(call, skb, &summary, acked_serial); |
954 | out: | ||
955 | spin_unlock(&call->input_lock); | ||
924 | } | 956 | } |
925 | 957 | ||
926 | /* | 958 | /* |
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) | |||
933 | 965 | ||
934 | _proto("Rx ACKALL %%%u", sp->hdr.serial); | 966 | _proto("Rx ACKALL %%%u", sp->hdr.serial); |
935 | 967 | ||
936 | rxrpc_rotate_tx_window(call, call->tx_top, &summary); | 968 | spin_lock(&call->input_lock); |
937 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) | 969 | |
970 | if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) | ||
938 | rxrpc_end_tx_phase(call, false, "ETL"); | 971 | rxrpc_end_tx_phase(call, false, "ETL"); |
972 | |||
973 | spin_unlock(&call->input_lock); | ||
939 | } | 974 | } |
940 | 975 | ||
941 | /* | 976 | /* |
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, | |||
1018 | } | 1053 | } |
1019 | 1054 | ||
1020 | /* | 1055 | /* |
1021 | * Handle a new call on a channel implicitly completing the preceding call on | 1056 | * Handle a new service call on a channel implicitly completing the preceding |
1022 | * that channel. | 1057 | * call on that channel. This does not apply to client conns. |
1023 | * | 1058 | * |
1024 | * TODO: If callNumber > call_id + 1, renegotiate security. | 1059 | * TODO: If callNumber > call_id + 1, renegotiate security. |
1025 | */ | 1060 | */ |
1026 | static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, | 1061 | static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, |
1062 | struct rxrpc_connection *conn, | ||
1027 | struct rxrpc_call *call) | 1063 | struct rxrpc_call *call) |
1028 | { | 1064 | { |
1029 | switch (READ_ONCE(call->state)) { | 1065 | switch (READ_ONCE(call->state)) { |
1030 | case RXRPC_CALL_SERVER_AWAIT_ACK: | 1066 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
1031 | rxrpc_call_completed(call); | 1067 | rxrpc_call_completed(call); |
1032 | break; | 1068 | /* Fall through */ |
1033 | case RXRPC_CALL_COMPLETE: | 1069 | case RXRPC_CALL_COMPLETE: |
1034 | break; | 1070 | break; |
1035 | default: | 1071 | default: |
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn, | |||
1037 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); | 1073 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); |
1038 | rxrpc_queue_call(call); | 1074 | rxrpc_queue_call(call); |
1039 | } | 1075 | } |
1076 | trace_rxrpc_improper_term(call); | ||
1040 | break; | 1077 | break; |
1041 | } | 1078 | } |
1042 | 1079 | ||
1043 | trace_rxrpc_improper_term(call); | 1080 | spin_lock(&rx->incoming_lock); |
1044 | __rxrpc_disconnect_call(conn, call); | 1081 | __rxrpc_disconnect_call(conn, call); |
1082 | spin_unlock(&rx->incoming_lock); | ||
1045 | rxrpc_notify_socket(call); | 1083 | rxrpc_notify_socket(call); |
1046 | } | 1084 | } |
1047 | 1085 | ||
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) | |||
1120 | * The socket is locked by the caller and this prevents the socket from being | 1158 | * The socket is locked by the caller and this prevents the socket from being |
1121 | * shut down and the local endpoint from going away, thus sk_user_data will not | 1159 | * shut down and the local endpoint from going away, thus sk_user_data will not |
1122 | * be cleared until this function returns. | 1160 | * be cleared until this function returns. |
1161 | * | ||
1162 | * Called with the RCU read lock held from the IP layer via UDP. | ||
1123 | */ | 1163 | */ |
1124 | void rxrpc_data_ready(struct sock *udp_sk) | 1164 | int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) |
1125 | { | 1165 | { |
1126 | struct rxrpc_connection *conn; | 1166 | struct rxrpc_connection *conn; |
1127 | struct rxrpc_channel *chan; | 1167 | struct rxrpc_channel *chan; |
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1130 | struct rxrpc_local *local = udp_sk->sk_user_data; | 1170 | struct rxrpc_local *local = udp_sk->sk_user_data; |
1131 | struct rxrpc_peer *peer = NULL; | 1171 | struct rxrpc_peer *peer = NULL; |
1132 | struct rxrpc_sock *rx = NULL; | 1172 | struct rxrpc_sock *rx = NULL; |
1133 | struct sk_buff *skb; | ||
1134 | unsigned int channel; | 1173 | unsigned int channel; |
1135 | int ret, skew = 0; | 1174 | int skew = 0; |
1136 | 1175 | ||
1137 | _enter("%p", udp_sk); | 1176 | _enter("%p", udp_sk); |
1138 | 1177 | ||
1139 | ASSERT(!irqs_disabled()); | ||
1140 | |||
1141 | skb = skb_recv_udp(udp_sk, 0, 1, &ret); | ||
1142 | if (!skb) { | ||
1143 | if (ret == -EAGAIN) | ||
1144 | return; | ||
1145 | _debug("UDP socket error %d", ret); | ||
1146 | return; | ||
1147 | } | ||
1148 | |||
1149 | if (skb->tstamp == 0) | 1178 | if (skb->tstamp == 0) |
1150 | skb->tstamp = ktime_get_real(); | 1179 | skb->tstamp = ktime_get_real(); |
1151 | 1180 | ||
1152 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); | 1181 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); |
1153 | 1182 | ||
1154 | _net("recv skb %p", skb); | 1183 | skb_pull(skb, sizeof(struct udphdr)); |
1155 | |||
1156 | /* we'll probably need to checksum it (didn't call sock_recvmsg) */ | ||
1157 | if (skb_checksum_complete(skb)) { | ||
1158 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | ||
1159 | __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); | ||
1160 | _leave(" [CSUM failed]"); | ||
1161 | return; | ||
1162 | } | ||
1163 | |||
1164 | __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); | ||
1165 | 1184 | ||
1166 | /* The UDP protocol already released all skb resources; | 1185 | /* The UDP protocol already released all skb resources; |
1167 | * we are free to add our own data there. | 1186 | * we are free to add our own data there. |
@@ -1177,10 +1196,12 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1177 | if ((lose++ & 7) == 7) { | 1196 | if ((lose++ & 7) == 7) { |
1178 | trace_rxrpc_rx_lose(sp); | 1197 | trace_rxrpc_rx_lose(sp); |
1179 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); | 1198 | rxrpc_free_skb(skb, rxrpc_skb_rx_lost); |
1180 | return; | 1199 | return 0; |
1181 | } | 1200 | } |
1182 | } | 1201 | } |
1183 | 1202 | ||
1203 | if (skb->tstamp == 0) | ||
1204 | skb->tstamp = ktime_get_real(); | ||
1184 | trace_rxrpc_rx_packet(sp); | 1205 | trace_rxrpc_rx_packet(sp); |
1185 | 1206 | ||
1186 | switch (sp->hdr.type) { | 1207 | switch (sp->hdr.type) { |
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1234 | if (sp->hdr.serviceId == 0) | 1255 | if (sp->hdr.serviceId == 0) |
1235 | goto bad_message; | 1256 | goto bad_message; |
1236 | 1257 | ||
1237 | rcu_read_lock(); | ||
1238 | |||
1239 | if (rxrpc_to_server(sp)) { | 1258 | if (rxrpc_to_server(sp)) { |
1240 | /* Weed out packets to services we're not offering. Packets | 1259 | /* Weed out packets to services we're not offering. Packets |
1241 | * that would begin a call are explicitly rejected and the rest | 1260 | * that would begin a call are explicitly rejected and the rest |
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1247 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && | 1266 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && |
1248 | sp->hdr.seq == 1) | 1267 | sp->hdr.seq == 1) |
1249 | goto unsupported_service; | 1268 | goto unsupported_service; |
1250 | goto discard_unlock; | 1269 | goto discard; |
1251 | } | 1270 | } |
1252 | } | 1271 | } |
1253 | 1272 | ||
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1257 | goto wrong_security; | 1276 | goto wrong_security; |
1258 | 1277 | ||
1259 | if (sp->hdr.serviceId != conn->service_id) { | 1278 | if (sp->hdr.serviceId != conn->service_id) { |
1260 | if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) || | 1279 | int old_id; |
1261 | conn->service_id != conn->params.service_id) | 1280 | |
1281 | if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) | ||
1282 | goto reupgrade; | ||
1283 | old_id = cmpxchg(&conn->service_id, conn->params.service_id, | ||
1284 | sp->hdr.serviceId); | ||
1285 | |||
1286 | if (old_id != conn->params.service_id && | ||
1287 | old_id != sp->hdr.serviceId) | ||
1262 | goto reupgrade; | 1288 | goto reupgrade; |
1263 | conn->service_id = sp->hdr.serviceId; | ||
1264 | } | 1289 | } |
1265 | 1290 | ||
1266 | if (sp->hdr.callNumber == 0) { | 1291 | if (sp->hdr.callNumber == 0) { |
1267 | /* Connection-level packet */ | 1292 | /* Connection-level packet */ |
1268 | _debug("CONN %p {%d}", conn, conn->debug_id); | 1293 | _debug("CONN %p {%d}", conn, conn->debug_id); |
1269 | rxrpc_post_packet_to_conn(conn, skb); | 1294 | rxrpc_post_packet_to_conn(conn, skb); |
1270 | goto out_unlock; | 1295 | goto out; |
1271 | } | 1296 | } |
1272 | 1297 | ||
1273 | /* Note the serial number skew here */ | 1298 | /* Note the serial number skew here */ |
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1286 | 1311 | ||
1287 | /* Ignore really old calls */ | 1312 | /* Ignore really old calls */ |
1288 | if (sp->hdr.callNumber < chan->last_call) | 1313 | if (sp->hdr.callNumber < chan->last_call) |
1289 | goto discard_unlock; | 1314 | goto discard; |
1290 | 1315 | ||
1291 | if (sp->hdr.callNumber == chan->last_call) { | 1316 | if (sp->hdr.callNumber == chan->last_call) { |
1292 | if (chan->call || | 1317 | if (chan->call || |
1293 | sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) | 1318 | sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) |
1294 | goto discard_unlock; | 1319 | goto discard; |
1295 | 1320 | ||
1296 | /* For the previous service call, if completed | 1321 | /* For the previous service call, if completed |
1297 | * successfully, we discard all further packets. | 1322 | * successfully, we discard all further packets. |
1298 | */ | 1323 | */ |
1299 | if (rxrpc_conn_is_service(conn) && | 1324 | if (rxrpc_conn_is_service(conn) && |
1300 | chan->last_type == RXRPC_PACKET_TYPE_ACK) | 1325 | chan->last_type == RXRPC_PACKET_TYPE_ACK) |
1301 | goto discard_unlock; | 1326 | goto discard; |
1302 | 1327 | ||
1303 | /* But otherwise we need to retransmit the final packet | 1328 | /* But otherwise we need to retransmit the final packet |
1304 | * from data cached in the connection record. | 1329 | * from data cached in the connection record. |
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1309 | sp->hdr.serial, | 1334 | sp->hdr.serial, |
1310 | sp->hdr.flags, 0); | 1335 | sp->hdr.flags, 0); |
1311 | rxrpc_post_packet_to_conn(conn, skb); | 1336 | rxrpc_post_packet_to_conn(conn, skb); |
1312 | goto out_unlock; | 1337 | goto out; |
1313 | } | 1338 | } |
1314 | 1339 | ||
1315 | call = rcu_dereference(chan->call); | 1340 | call = rcu_dereference(chan->call); |
1316 | 1341 | ||
1317 | if (sp->hdr.callNumber > chan->call_id) { | 1342 | if (sp->hdr.callNumber > chan->call_id) { |
1318 | if (rxrpc_to_client(sp)) { | 1343 | if (rxrpc_to_client(sp)) |
1319 | rcu_read_unlock(); | ||
1320 | goto reject_packet; | 1344 | goto reject_packet; |
1321 | } | ||
1322 | if (call) | 1345 | if (call) |
1323 | rxrpc_input_implicit_end_call(conn, call); | 1346 | rxrpc_input_implicit_end_call(rx, conn, call); |
1324 | call = NULL; | 1347 | call = NULL; |
1325 | } | 1348 | } |
1326 | 1349 | ||
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk) | |||
1337 | if (!call || atomic_read(&call->usage) == 0) { | 1360 | if (!call || atomic_read(&call->usage) == 0) { |
1338 | if (rxrpc_to_client(sp) || | 1361 | if (rxrpc_to_client(sp) || |
1339 | sp->hdr.type != RXRPC_PACKET_TYPE_DATA) | 1362 | sp->hdr.type != RXRPC_PACKET_TYPE_DATA) |
1340 | goto bad_message_unlock; | 1363 | goto bad_message; |
1341 | if (sp->hdr.seq != 1) | 1364 | if (sp->hdr.seq != 1) |
1342 | goto discard_unlock; | 1365 | goto discard; |
1343 | call = rxrpc_new_incoming_call(local, rx, peer, conn, skb); | 1366 | call = rxrpc_new_incoming_call(local, rx, skb); |
1344 | if (!call) { | 1367 | if (!call) |
1345 | rcu_read_unlock(); | ||
1346 | goto reject_packet; | 1368 | goto reject_packet; |
1347 | } | ||
1348 | rxrpc_send_ping(call, skb, skew); | 1369 | rxrpc_send_ping(call, skb, skew); |
1349 | mutex_unlock(&call->user_mutex); | 1370 | mutex_unlock(&call->user_mutex); |
1350 | } | 1371 | } |
1351 | 1372 | ||
1352 | rxrpc_input_call_packet(call, skb, skew); | 1373 | rxrpc_input_call_packet(call, skb, skew); |
1353 | goto discard_unlock; | 1374 | goto discard; |
1354 | 1375 | ||
1355 | discard_unlock: | ||
1356 | rcu_read_unlock(); | ||
1357 | discard: | 1376 | discard: |
1358 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 1377 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
1359 | out: | 1378 | out: |
1360 | trace_rxrpc_rx_done(0, 0); | 1379 | trace_rxrpc_rx_done(0, 0); |
1361 | return; | 1380 | return 0; |
1362 | |||
1363 | out_unlock: | ||
1364 | rcu_read_unlock(); | ||
1365 | goto out; | ||
1366 | 1381 | ||
1367 | wrong_security: | 1382 | wrong_security: |
1368 | rcu_read_unlock(); | ||
1369 | trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1383 | trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1370 | RXKADINCONSISTENCY, EBADMSG); | 1384 | RXKADINCONSISTENCY, EBADMSG); |
1371 | skb->priority = RXKADINCONSISTENCY; | 1385 | skb->priority = RXKADINCONSISTENCY; |
1372 | goto post_abort; | 1386 | goto post_abort; |
1373 | 1387 | ||
1374 | unsupported_service: | 1388 | unsupported_service: |
1375 | rcu_read_unlock(); | ||
1376 | trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1389 | trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1377 | RX_INVALID_OPERATION, EOPNOTSUPP); | 1390 | RX_INVALID_OPERATION, EOPNOTSUPP); |
1378 | skb->priority = RX_INVALID_OPERATION; | 1391 | skb->priority = RX_INVALID_OPERATION; |
1379 | goto post_abort; | 1392 | goto post_abort; |
1380 | 1393 | ||
1381 | reupgrade: | 1394 | reupgrade: |
1382 | rcu_read_unlock(); | ||
1383 | trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1395 | trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1384 | RX_PROTOCOL_ERROR, EBADMSG); | 1396 | RX_PROTOCOL_ERROR, EBADMSG); |
1385 | goto protocol_error; | 1397 | goto protocol_error; |
1386 | 1398 | ||
1387 | bad_message_unlock: | ||
1388 | rcu_read_unlock(); | ||
1389 | bad_message: | 1399 | bad_message: |
1390 | trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | 1400 | trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1391 | RX_PROTOCOL_ERROR, EBADMSG); | 1401 | RX_PROTOCOL_ERROR, EBADMSG); |
@@ -1397,4 +1407,5 @@ reject_packet: | |||
1397 | trace_rxrpc_rx_done(skb->mark, skb->priority); | 1407 | trace_rxrpc_rx_done(skb->mark, skb->priority); |
1398 | rxrpc_reject_packet(local, skb); | 1408 | rxrpc_reject_packet(local, skb); |
1399 | _leave(" [badmsg]"); | 1409 | _leave(" [badmsg]"); |
1410 | return 0; | ||
1400 | } | 1411 | } |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 94d234e9c685..cad0691c2bb4 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/hashtable.h> | 20 | #include <linux/hashtable.h> |
21 | #include <net/sock.h> | 21 | #include <net/sock.h> |
22 | #include <net/udp.h> | ||
22 | #include <net/af_rxrpc.h> | 23 | #include <net/af_rxrpc.h> |
23 | #include "ar-internal.h" | 24 | #include "ar-internal.h" |
24 | 25 | ||
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, | |||
108 | */ | 109 | */ |
109 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | 110 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
110 | { | 111 | { |
111 | struct sock *sock; | 112 | struct sock *usk; |
112 | int ret, opt; | 113 | int ret, opt; |
113 | 114 | ||
114 | _enter("%p{%d,%d}", | 115 | _enter("%p{%d,%d}", |
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
122 | return ret; | 123 | return ret; |
123 | } | 124 | } |
124 | 125 | ||
126 | /* set the socket up */ | ||
127 | usk = local->socket->sk; | ||
128 | inet_sk(usk)->mc_loop = 0; | ||
129 | |||
130 | /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ | ||
131 | inet_inc_convert_csum(usk); | ||
132 | |||
133 | rcu_assign_sk_user_data(usk, local); | ||
134 | |||
135 | udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC; | ||
136 | udp_sk(usk)->encap_rcv = rxrpc_input_packet; | ||
137 | udp_sk(usk)->encap_destroy = NULL; | ||
138 | udp_sk(usk)->gro_receive = NULL; | ||
139 | udp_sk(usk)->gro_complete = NULL; | ||
140 | |||
141 | udp_encap_enable(); | ||
142 | #if IS_ENABLED(CONFIG_IPV6) | ||
143 | if (local->srx.transport.family == AF_INET6) | ||
144 | udpv6_encap_enable(); | ||
145 | #endif | ||
146 | usk->sk_error_report = rxrpc_error_report; | ||
147 | |||
125 | /* if a local address was supplied then bind it */ | 148 | /* if a local address was supplied then bind it */ |
126 | if (local->srx.transport_len > sizeof(sa_family_t)) { | 149 | if (local->srx.transport_len > sizeof(sa_family_t)) { |
127 | _debug("bind"); | 150 | _debug("bind"); |
@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
191 | BUG(); | 214 | BUG(); |
192 | } | 215 | } |
193 | 216 | ||
194 | /* set the socket up */ | ||
195 | sock = local->socket->sk; | ||
196 | sock->sk_user_data = local; | ||
197 | sock->sk_data_ready = rxrpc_data_ready; | ||
198 | sock->sk_error_report = rxrpc_error_report; | ||
199 | _leave(" = 0"); | 217 | _leave(" = 0"); |
200 | return 0; | 218 | return 0; |
201 | 219 | ||
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 81a7869325a6..7feb611c7258 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -303,6 +303,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
303 | if (rtt < 0) | 303 | if (rtt < 0) |
304 | return; | 304 | return; |
305 | 305 | ||
306 | spin_lock(&peer->rtt_input_lock); | ||
307 | |||
306 | /* Replace the oldest datum in the RTT buffer */ | 308 | /* Replace the oldest datum in the RTT buffer */ |
307 | sum -= peer->rtt_cache[cursor]; | 309 | sum -= peer->rtt_cache[cursor]; |
308 | sum += rtt; | 310 | sum += rtt; |
@@ -314,6 +316,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
314 | peer->rtt_usage = usage; | 316 | peer->rtt_usage = usage; |
315 | } | 317 | } |
316 | 318 | ||
319 | spin_unlock(&peer->rtt_input_lock); | ||
320 | |||
317 | /* Now recalculate the average */ | 321 | /* Now recalculate the average */ |
318 | if (usage == RXRPC_RTT_CACHE_SIZE) { | 322 | if (usage == RXRPC_RTT_CACHE_SIZE) { |
319 | avg = sum / RXRPC_RTT_CACHE_SIZE; | 323 | avg = sum / RXRPC_RTT_CACHE_SIZE; |
@@ -322,6 +326,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |||
322 | do_div(avg, usage); | 326 | do_div(avg, usage); |
323 | } | 327 | } |
324 | 328 | ||
329 | /* Don't need to update this under lock */ | ||
325 | peer->rtt = avg; | 330 | peer->rtt = avg; |
326 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, | 331 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, |
327 | usage, avg); | 332 | usage, avg); |
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 01a9febfa367..5691b7d266ca 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c | |||
@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, | |||
153 | * assess the MTU size for the network interface through which this peer is | 153 | * assess the MTU size for the network interface through which this peer is |
154 | * reached | 154 | * reached |
155 | */ | 155 | */ |
156 | static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | 156 | static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx, |
157 | struct rxrpc_peer *peer) | ||
157 | { | 158 | { |
159 | struct net *net = sock_net(&rx->sk); | ||
158 | struct dst_entry *dst; | 160 | struct dst_entry *dst; |
159 | struct rtable *rt; | 161 | struct rtable *rt; |
160 | struct flowi fl; | 162 | struct flowi fl; |
@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
169 | switch (peer->srx.transport.family) { | 171 | switch (peer->srx.transport.family) { |
170 | case AF_INET: | 172 | case AF_INET: |
171 | rt = ip_route_output_ports( | 173 | rt = ip_route_output_ports( |
172 | &init_net, fl4, NULL, | 174 | net, fl4, NULL, |
173 | peer->srx.transport.sin.sin_addr.s_addr, 0, | 175 | peer->srx.transport.sin.sin_addr.s_addr, 0, |
174 | htons(7000), htons(7001), IPPROTO_UDP, 0, 0); | 176 | htons(7000), htons(7001), IPPROTO_UDP, 0, 0); |
175 | if (IS_ERR(rt)) { | 177 | if (IS_ERR(rt)) { |
@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
188 | sizeof(struct in6_addr)); | 190 | sizeof(struct in6_addr)); |
189 | fl6->fl6_dport = htons(7001); | 191 | fl6->fl6_dport = htons(7001); |
190 | fl6->fl6_sport = htons(7000); | 192 | fl6->fl6_sport = htons(7000); |
191 | dst = ip6_route_output(&init_net, NULL, fl6); | 193 | dst = ip6_route_output(net, NULL, fl6); |
192 | if (dst->error) { | 194 | if (dst->error) { |
193 | _leave(" [route err %d]", dst->error); | 195 | _leave(" [route err %d]", dst->error); |
194 | return; | 196 | return; |
@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) | |||
223 | peer->service_conns = RB_ROOT; | 225 | peer->service_conns = RB_ROOT; |
224 | seqlock_init(&peer->service_conn_lock); | 226 | seqlock_init(&peer->service_conn_lock); |
225 | spin_lock_init(&peer->lock); | 227 | spin_lock_init(&peer->lock); |
228 | spin_lock_init(&peer->rtt_input_lock); | ||
226 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); | 229 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); |
227 | 230 | ||
228 | if (RXRPC_TX_SMSS > 2190) | 231 | if (RXRPC_TX_SMSS > 2190) |
@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) | |||
240 | /* | 243 | /* |
241 | * Initialise peer record. | 244 | * Initialise peer record. |
242 | */ | 245 | */ |
243 | static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) | 246 | static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer, |
247 | unsigned long hash_key) | ||
244 | { | 248 | { |
245 | peer->hash_key = hash_key; | 249 | peer->hash_key = hash_key; |
246 | rxrpc_assess_MTU_size(peer); | 250 | rxrpc_assess_MTU_size(rx, peer); |
247 | peer->mtu = peer->if_mtu; | 251 | peer->mtu = peer->if_mtu; |
248 | peer->rtt_last_req = ktime_get_real(); | 252 | peer->rtt_last_req = ktime_get_real(); |
249 | 253 | ||
@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key) | |||
275 | /* | 279 | /* |
276 | * Set up a new peer. | 280 | * Set up a new peer. |
277 | */ | 281 | */ |
278 | static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | 282 | static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, |
283 | struct rxrpc_local *local, | ||
279 | struct sockaddr_rxrpc *srx, | 284 | struct sockaddr_rxrpc *srx, |
280 | unsigned long hash_key, | 285 | unsigned long hash_key, |
281 | gfp_t gfp) | 286 | gfp_t gfp) |
@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | |||
287 | peer = rxrpc_alloc_peer(local, gfp); | 292 | peer = rxrpc_alloc_peer(local, gfp); |
288 | if (peer) { | 293 | if (peer) { |
289 | memcpy(&peer->srx, srx, sizeof(*srx)); | 294 | memcpy(&peer->srx, srx, sizeof(*srx)); |
290 | rxrpc_init_peer(peer, hash_key); | 295 | rxrpc_init_peer(rx, peer, hash_key); |
291 | } | 296 | } |
292 | 297 | ||
293 | _leave(" = %p", peer); | 298 | _leave(" = %p", peer); |
@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, | |||
299 | * since we've already done a search in the list from the non-reentrant context | 304 | * since we've already done a search in the list from the non-reentrant context |
300 | * (the data_ready handler) that is the only place we can add new peers. | 305 | * (the data_ready handler) that is the only place we can add new peers. |
301 | */ | 306 | */ |
302 | void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) | 307 | void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local, |
308 | struct rxrpc_peer *peer) | ||
303 | { | 309 | { |
304 | struct rxrpc_net *rxnet = local->rxnet; | 310 | struct rxrpc_net *rxnet = local->rxnet; |
305 | unsigned long hash_key; | 311 | unsigned long hash_key; |
306 | 312 | ||
307 | hash_key = rxrpc_peer_hash_key(local, &peer->srx); | 313 | hash_key = rxrpc_peer_hash_key(local, &peer->srx); |
308 | peer->local = local; | 314 | peer->local = local; |
309 | rxrpc_init_peer(peer, hash_key); | 315 | rxrpc_init_peer(rx, peer, hash_key); |
310 | 316 | ||
311 | spin_lock(&rxnet->peer_hash_lock); | 317 | spin_lock(&rxnet->peer_hash_lock); |
312 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); | 318 | hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); |
@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) | |||
317 | /* | 323 | /* |
318 | * obtain a remote transport endpoint for the specified address | 324 | * obtain a remote transport endpoint for the specified address |
319 | */ | 325 | */ |
320 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | 326 | struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, |
327 | struct rxrpc_local *local, | ||
321 | struct sockaddr_rxrpc *srx, gfp_t gfp) | 328 | struct sockaddr_rxrpc *srx, gfp_t gfp) |
322 | { | 329 | { |
323 | struct rxrpc_peer *peer, *candidate; | 330 | struct rxrpc_peer *peer, *candidate; |
@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, | |||
337 | /* The peer is not yet present in hash - create a candidate | 344 | /* The peer is not yet present in hash - create a candidate |
338 | * for a new record and then redo the search. | 345 | * for a new record and then redo the search. |
339 | */ | 346 | */ |
340 | candidate = rxrpc_create_peer(local, srx, hash_key, gfp); | 347 | candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp); |
341 | if (!candidate) { | 348 | if (!candidate) { |
342 | _leave(" = NULL [nomem]"); | 349 | _leave(" = NULL [nomem]"); |
343 | return NULL; | 350 | return NULL; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index ac79a40a0392..4b28fd44576d 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -391,6 +391,7 @@ static int u32_init(struct tcf_proto *tp) | |||
391 | RCU_INIT_POINTER(root_ht->next, tp_c->hlist); | 391 | RCU_INIT_POINTER(root_ht->next, tp_c->hlist); |
392 | rcu_assign_pointer(tp_c->hlist, root_ht); | 392 | rcu_assign_pointer(tp_c->hlist, root_ht); |
393 | 393 | ||
394 | root_ht->refcnt++; | ||
394 | rcu_assign_pointer(tp->root, root_ht); | 395 | rcu_assign_pointer(tp->root, root_ht); |
395 | tp->data = tp_c; | 396 | tp->data = tp_c; |
396 | return 0; | 397 | return 0; |
@@ -606,7 +607,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, | |||
606 | struct tc_u_hnode __rcu **hn; | 607 | struct tc_u_hnode __rcu **hn; |
607 | struct tc_u_hnode *phn; | 608 | struct tc_u_hnode *phn; |
608 | 609 | ||
609 | WARN_ON(ht->refcnt); | 610 | WARN_ON(--ht->refcnt); |
610 | 611 | ||
611 | u32_clear_hnode(tp, ht, extack); | 612 | u32_clear_hnode(tp, ht, extack); |
612 | 613 | ||
@@ -634,7 +635,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) | |||
634 | 635 | ||
635 | WARN_ON(root_ht == NULL); | 636 | WARN_ON(root_ht == NULL); |
636 | 637 | ||
637 | if (root_ht && --root_ht->refcnt == 0) | 638 | if (root_ht && --root_ht->refcnt == 1) |
638 | u32_destroy_hnode(tp, root_ht, extack); | 639 | u32_destroy_hnode(tp, root_ht, extack); |
639 | 640 | ||
640 | if (--tp_c->refcnt == 0) { | 641 | if (--tp_c->refcnt == 0) { |
@@ -679,7 +680,6 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, | |||
679 | } | 680 | } |
680 | 681 | ||
681 | if (ht->refcnt == 1) { | 682 | if (ht->refcnt == 1) { |
682 | ht->refcnt--; | ||
683 | u32_destroy_hnode(tp, ht, extack); | 683 | u32_destroy_hnode(tp, ht, extack); |
684 | } else { | 684 | } else { |
685 | NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); | 685 | NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); |
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index dc539295ae65..b910cd5c56f7 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
@@ -2644,7 +2644,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, | |||
2644 | for (i = 1; i <= CAKE_QUEUES; i++) | 2644 | for (i = 1; i <= CAKE_QUEUES; i++) |
2645 | quantum_div[i] = 65535 / i; | 2645 | quantum_div[i] = 65535 / i; |
2646 | 2646 | ||
2647 | q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data), | 2647 | q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), |
2648 | GFP_KERNEL); | 2648 | GFP_KERNEL); |
2649 | if (!q->tins) | 2649 | if (!q->tins) |
2650 | goto nomem; | 2650 | goto nomem; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index fb886b525d95..f6552e4f4b43 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -477,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, | |||
477 | l->in_session = false; | 477 | l->in_session = false; |
478 | l->bearer_id = bearer_id; | 478 | l->bearer_id = bearer_id; |
479 | l->tolerance = tolerance; | 479 | l->tolerance = tolerance; |
480 | if (bc_rcvlink) | ||
481 | bc_rcvlink->tolerance = tolerance; | ||
480 | l->net_plane = net_plane; | 482 | l->net_plane = net_plane; |
481 | l->advertised_mtu = mtu; | 483 | l->advertised_mtu = mtu; |
482 | l->mtu = mtu; | 484 | l->mtu = mtu; |
@@ -843,14 +845,21 @@ static void link_prepare_wakeup(struct tipc_link *l) | |||
843 | 845 | ||
844 | void tipc_link_reset(struct tipc_link *l) | 846 | void tipc_link_reset(struct tipc_link *l) |
845 | { | 847 | { |
848 | struct sk_buff_head list; | ||
849 | |||
850 | __skb_queue_head_init(&list); | ||
851 | |||
846 | l->in_session = false; | 852 | l->in_session = false; |
847 | l->session++; | 853 | l->session++; |
848 | l->mtu = l->advertised_mtu; | 854 | l->mtu = l->advertised_mtu; |
855 | |||
849 | spin_lock_bh(&l->wakeupq.lock); | 856 | spin_lock_bh(&l->wakeupq.lock); |
857 | skb_queue_splice_init(&l->wakeupq, &list); | ||
858 | spin_unlock_bh(&l->wakeupq.lock); | ||
859 | |||
850 | spin_lock_bh(&l->inputq->lock); | 860 | spin_lock_bh(&l->inputq->lock); |
851 | skb_queue_splice_init(&l->wakeupq, l->inputq); | 861 | skb_queue_splice_init(&list, l->inputq); |
852 | spin_unlock_bh(&l->inputq->lock); | 862 | spin_unlock_bh(&l->inputq->lock); |
853 | spin_unlock_bh(&l->wakeupq.lock); | ||
854 | 863 | ||
855 | __skb_queue_purge(&l->transmq); | 864 | __skb_queue_purge(&l->transmq); |
856 | __skb_queue_purge(&l->deferdq); | 865 | __skb_queue_purge(&l->deferdq); |
@@ -1031,7 +1040,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, | |||
1031 | /* Detect repeated retransmit failures on same packet */ | 1040 | /* Detect repeated retransmit failures on same packet */ |
1032 | if (r->last_retransm != buf_seqno(skb)) { | 1041 | if (r->last_retransm != buf_seqno(skb)) { |
1033 | r->last_retransm = buf_seqno(skb); | 1042 | r->last_retransm = buf_seqno(skb); |
1034 | r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance); | 1043 | r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); |
1035 | } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { | 1044 | } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { |
1036 | link_retransmit_failure(l, skb); | 1045 | link_retransmit_failure(l, skb); |
1037 | if (link_is_bc_sndlink(l)) | 1046 | if (link_is_bc_sndlink(l)) |
@@ -1576,9 +1585,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1576 | strncpy(if_name, data, TIPC_MAX_IF_NAME); | 1585 | strncpy(if_name, data, TIPC_MAX_IF_NAME); |
1577 | 1586 | ||
1578 | /* Update own tolerance if peer indicates a non-zero value */ | 1587 | /* Update own tolerance if peer indicates a non-zero value */ |
1579 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) | 1588 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { |
1580 | l->tolerance = peers_tol; | 1589 | l->tolerance = peers_tol; |
1581 | 1590 | l->bc_rcvlink->tolerance = peers_tol; | |
1591 | } | ||
1582 | /* Update own priority if peer's priority is higher */ | 1592 | /* Update own priority if peer's priority is higher */ |
1583 | if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) | 1593 | if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) |
1584 | l->priority = peers_prio; | 1594 | l->priority = peers_prio; |
@@ -1604,9 +1614,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1604 | l->rcv_nxt_state = msg_seqno(hdr) + 1; | 1614 | l->rcv_nxt_state = msg_seqno(hdr) + 1; |
1605 | 1615 | ||
1606 | /* Update own tolerance if peer indicates a non-zero value */ | 1616 | /* Update own tolerance if peer indicates a non-zero value */ |
1607 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) | 1617 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { |
1608 | l->tolerance = peers_tol; | 1618 | l->tolerance = peers_tol; |
1609 | 1619 | l->bc_rcvlink->tolerance = peers_tol; | |
1620 | } | ||
1610 | /* Update own prio if peer indicates a different value */ | 1621 | /* Update own prio if peer indicates a different value */ |
1611 | if ((peers_prio != l->priority) && | 1622 | if ((peers_prio != l->priority) && |
1612 | in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { | 1623 | in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { |
@@ -2223,6 +2234,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, | |||
2223 | struct sk_buff_head *xmitq) | 2234 | struct sk_buff_head *xmitq) |
2224 | { | 2235 | { |
2225 | l->tolerance = tol; | 2236 | l->tolerance = tol; |
2237 | if (l->bc_rcvlink) | ||
2238 | l->bc_rcvlink->tolerance = tol; | ||
2226 | if (link_is_up(l)) | 2239 | if (link_is_up(l)) |
2227 | tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); | 2240 | tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); |
2228 | } | 2241 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index db148c4a916a..de09f514428c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1198,6 +1198,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, | |||
1198 | * @skb: pointer to message buffer. | 1198 | * @skb: pointer to message buffer. |
1199 | */ | 1199 | */ |
1200 | static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, | 1200 | static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, |
1201 | struct sk_buff_head *inputq, | ||
1201 | struct sk_buff_head *xmitq) | 1202 | struct sk_buff_head *xmitq) |
1202 | { | 1203 | { |
1203 | struct tipc_msg *hdr = buf_msg(skb); | 1204 | struct tipc_msg *hdr = buf_msg(skb); |
@@ -1215,7 +1216,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb, | |||
1215 | tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), | 1216 | tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), |
1216 | tsk_peer_port(tsk)); | 1217 | tsk_peer_port(tsk)); |
1217 | sk->sk_state_change(sk); | 1218 | sk->sk_state_change(sk); |
1218 | goto exit; | 1219 | |
1220 | /* State change is ignored if socket already awake, | ||
1221 | * - convert msg to abort msg and add to inqueue | ||
1222 | */ | ||
1223 | msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); | ||
1224 | msg_set_type(hdr, TIPC_CONN_MSG); | ||
1225 | msg_set_size(hdr, BASIC_H_SIZE); | ||
1226 | msg_set_hdr_sz(hdr, BASIC_H_SIZE); | ||
1227 | __skb_queue_tail(inputq, skb); | ||
1228 | return; | ||
1219 | } | 1229 | } |
1220 | 1230 | ||
1221 | tsk->probe_unacked = false; | 1231 | tsk->probe_unacked = false; |
@@ -1943,7 +1953,7 @@ static void tipc_sk_proto_rcv(struct sock *sk, | |||
1943 | 1953 | ||
1944 | switch (msg_user(hdr)) { | 1954 | switch (msg_user(hdr)) { |
1945 | case CONN_MANAGER: | 1955 | case CONN_MANAGER: |
1946 | tipc_sk_conn_proto_rcv(tsk, skb, xmitq); | 1956 | tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq); |
1947 | return; | 1957 | return; |
1948 | case SOCK_WAKEUP: | 1958 | case SOCK_WAKEUP: |
1949 | tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); | 1959 | tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0); |
diff --git a/samples/Kconfig b/samples/Kconfig index bd133efc1a56..ad1ec7016d4c 100644 --- a/samples/Kconfig +++ b/samples/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menuconfig SAMPLES | 1 | menuconfig SAMPLES |
2 | bool "Sample kernel code" | 2 | bool "Sample kernel code" |
3 | depends on !UML | ||
3 | help | 4 | help |
4 | You can build and test sample kernel code here. | 5 | You can build and test sample kernel code here. |
5 | 6 | ||
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 5a2d1c9578a0..54da4b070db3 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -219,7 +219,7 @@ else | |||
219 | sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ | 219 | sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ |
220 | "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ | 220 | "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ |
221 | "$(if $(CONFIG_64BIT),64,32)" \ | 221 | "$(if $(CONFIG_64BIT),64,32)" \ |
222 | "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ | 222 | "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \ |
223 | "$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \ | 223 | "$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \ |
224 | "$(if $(part-of-module),1,0)" "$(@)"; | 224 | "$(if $(part-of-module),1,0)" "$(@)"; |
225 | recordmcount_source := $(srctree)/scripts/recordmcount.pl | 225 | recordmcount_source := $(srctree)/scripts/recordmcount.pl |
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c index d78aed86af09..8ff8cb1a11f4 100644 --- a/tools/hv/hv_fcopy_daemon.c +++ b/tools/hv/hv_fcopy_daemon.c | |||
@@ -234,6 +234,7 @@ int main(int argc, char *argv[]) | |||
234 | break; | 234 | break; |
235 | 235 | ||
236 | default: | 236 | default: |
237 | error = HV_E_FAIL; | ||
237 | syslog(LOG_ERR, "Unknown operation: %d", | 238 | syslog(LOG_ERR, "Unknown operation: %d", |
238 | buffer.hdr.operation); | 239 | buffer.hdr.operation); |
239 | 240 | ||
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index efcaf6cac2eb..e46f51b17513 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py | |||
@@ -204,14 +204,23 @@ from ctypes import * | |||
204 | libpq = CDLL("libpq.so.5") | 204 | libpq = CDLL("libpq.so.5") |
205 | PQconnectdb = libpq.PQconnectdb | 205 | PQconnectdb = libpq.PQconnectdb |
206 | PQconnectdb.restype = c_void_p | 206 | PQconnectdb.restype = c_void_p |
207 | PQconnectdb.argtypes = [ c_char_p ] | ||
207 | PQfinish = libpq.PQfinish | 208 | PQfinish = libpq.PQfinish |
209 | PQfinish.argtypes = [ c_void_p ] | ||
208 | PQstatus = libpq.PQstatus | 210 | PQstatus = libpq.PQstatus |
211 | PQstatus.restype = c_int | ||
212 | PQstatus.argtypes = [ c_void_p ] | ||
209 | PQexec = libpq.PQexec | 213 | PQexec = libpq.PQexec |
210 | PQexec.restype = c_void_p | 214 | PQexec.restype = c_void_p |
215 | PQexec.argtypes = [ c_void_p, c_char_p ] | ||
211 | PQresultStatus = libpq.PQresultStatus | 216 | PQresultStatus = libpq.PQresultStatus |
217 | PQresultStatus.restype = c_int | ||
218 | PQresultStatus.argtypes = [ c_void_p ] | ||
212 | PQputCopyData = libpq.PQputCopyData | 219 | PQputCopyData = libpq.PQputCopyData |
220 | PQputCopyData.restype = c_int | ||
213 | PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] | 221 | PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ] |
214 | PQputCopyEnd = libpq.PQputCopyEnd | 222 | PQputCopyEnd = libpq.PQputCopyEnd |
223 | PQputCopyEnd.restype = c_int | ||
215 | PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] | 224 | PQputCopyEnd.argtypes = [ c_void_p, c_void_p ] |
216 | 225 | ||
217 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 226 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index f827bf77e9d2..e4bb82c8aba9 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py | |||
@@ -440,7 +440,11 @@ def branch_type_table(*x): | |||
440 | 440 | ||
441 | def sample_table(*x): | 441 | def sample_table(*x): |
442 | if branches: | 442 | if branches: |
443 | bind_exec(sample_query, 18, x) | 443 | for xx in x[0:15]: |
444 | sample_query.addBindValue(str(xx)) | ||
445 | for xx in x[19:22]: | ||
446 | sample_query.addBindValue(str(xx)) | ||
447 | do_query_(sample_query) | ||
444 | else: | 448 | else: |
445 | bind_exec(sample_query, 22, x) | 449 | bind_exec(sample_query, 22, x) |
446 | 450 | ||
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index c4acd2001db0..111ae858cbcb 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -2286,7 +2286,8 @@ static int append_inlines(struct callchain_cursor *cursor, | |||
2286 | if (!symbol_conf.inline_name || !map || !sym) | 2286 | if (!symbol_conf.inline_name || !map || !sym) |
2287 | return ret; | 2287 | return ret; |
2288 | 2288 | ||
2289 | addr = map__rip_2objdump(map, ip); | 2289 | addr = map__map_ip(map, ip); |
2290 | addr = map__rip_2objdump(map, addr); | ||
2290 | 2291 | ||
2291 | inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); | 2292 | inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); |
2292 | if (!inline_node) { | 2293 | if (!inline_node) { |
@@ -2312,7 +2313,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) | |||
2312 | { | 2313 | { |
2313 | struct callchain_cursor *cursor = arg; | 2314 | struct callchain_cursor *cursor = arg; |
2314 | const char *srcline = NULL; | 2315 | const char *srcline = NULL; |
2315 | u64 addr; | 2316 | u64 addr = entry->ip; |
2316 | 2317 | ||
2317 | if (symbol_conf.hide_unresolved && entry->sym == NULL) | 2318 | if (symbol_conf.hide_unresolved && entry->sym == NULL) |
2318 | return 0; | 2319 | return 0; |
@@ -2324,7 +2325,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) | |||
2324 | * Convert entry->ip from a virtual address to an offset in | 2325 | * Convert entry->ip from a virtual address to an offset in |
2325 | * its corresponding binary. | 2326 | * its corresponding binary. |
2326 | */ | 2327 | */ |
2327 | addr = map__map_ip(entry->map, entry->ip); | 2328 | if (entry->map) |
2329 | addr = map__map_ip(entry->map, entry->ip); | ||
2328 | 2330 | ||
2329 | srcline = callchain_srcline(entry->map, entry->sym, addr); | 2331 | srcline = callchain_srcline(entry->map, entry->sym, addr); |
2330 | return callchain_cursor_append(cursor, entry->ip, | 2332 | return callchain_cursor_append(cursor, entry->ip, |
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 97efbcad076e..1942f6dd24f6 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py | |||
@@ -35,7 +35,7 @@ class install_lib(_install_lib): | |||
35 | 35 | ||
36 | cflags = getenv('CFLAGS', '').split() | 36 | cflags = getenv('CFLAGS', '').split() |
37 | # switch off several checks (need to be at the end of cflags list) | 37 | # switch off several checks (need to be at the end of cflags list) |
38 | cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ] | 38 | cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ] |
39 | if cc != "clang": | 39 | if cc != "clang": |
40 | cflags += ['-Wno-cast-function-type' ] | 40 | cflags += ['-Wno-cast-function-type' ] |
41 | 41 | ||
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 08c341b49760..e101af52d1d6 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh | |||
@@ -1,4 +1,4 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/bash |
2 | # | 2 | # |
3 | # This test is for checking rtnetlink callpaths, and get as much coverage as possible. | 3 | # This test is for checking rtnetlink callpaths, and get as much coverage as possible. |
4 | # | 4 | # |
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 850767befa47..99e537ab5ad9 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh | |||
@@ -1,4 +1,4 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/bash |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | # | 3 | # |
4 | # Run a series of udpgso benchmarks | 4 | # Run a series of udpgso benchmarks |