diff options
192 files changed, 2087 insertions, 971 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index de79efdad46c..8c68768ebee5 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -128,16 +128,44 @@ X!Edrivers/base/interface.c | |||
128 | !Edrivers/base/platform.c | 128 | !Edrivers/base/platform.c |
129 | !Edrivers/base/bus.c | 129 | !Edrivers/base/bus.c |
130 | </sect1> | 130 | </sect1> |
131 | <sect1><title>Device Drivers DMA Management</title> | 131 | <sect1> |
132 | <title>Buffer Sharing and Synchronization</title> | ||
133 | <para> | ||
134 | The dma-buf subsystem provides the framework for sharing buffers | ||
135 | for hardware (DMA) access across multiple device drivers and | ||
136 | subsystems, and for synchronizing asynchronous hardware access. | ||
137 | </para> | ||
138 | <para> | ||
139 | This is used, for example, by drm "prime" multi-GPU support, but | ||
140 | is of course not limited to GPU use cases. | ||
141 | </para> | ||
142 | <para> | ||
143 | The three main components of this are: (1) dma-buf, representing | ||
144 | a sg_table and exposed to userspace as a file descriptor to allow | ||
145 | passing between devices, (2) fence, which provides a mechanism | ||
146 | to signal when one device as finished access, and (3) reservation, | ||
147 | which manages the shared or exclusive fence(s) associated with | ||
148 | the buffer. | ||
149 | </para> | ||
150 | <sect2><title>dma-buf</title> | ||
132 | !Edrivers/dma-buf/dma-buf.c | 151 | !Edrivers/dma-buf/dma-buf.c |
152 | !Iinclude/linux/dma-buf.h | ||
153 | </sect2> | ||
154 | <sect2><title>reservation</title> | ||
155 | !Pdrivers/dma-buf/reservation.c Reservation Object Overview | ||
156 | !Edrivers/dma-buf/reservation.c | ||
157 | !Iinclude/linux/reservation.h | ||
158 | </sect2> | ||
159 | <sect2><title>fence</title> | ||
133 | !Edrivers/dma-buf/fence.c | 160 | !Edrivers/dma-buf/fence.c |
134 | !Edrivers/dma-buf/seqno-fence.c | ||
135 | !Iinclude/linux/fence.h | 161 | !Iinclude/linux/fence.h |
162 | !Edrivers/dma-buf/seqno-fence.c | ||
136 | !Iinclude/linux/seqno-fence.h | 163 | !Iinclude/linux/seqno-fence.h |
137 | !Edrivers/dma-buf/reservation.c | ||
138 | !Iinclude/linux/reservation.h | ||
139 | !Edrivers/dma-buf/sync_file.c | 164 | !Edrivers/dma-buf/sync_file.c |
140 | !Iinclude/linux/sync_file.h | 165 | !Iinclude/linux/sync_file.h |
166 | </sect2> | ||
167 | </sect1> | ||
168 | <sect1><title>Device Drivers DMA Management</title> | ||
141 | !Edrivers/base/dma-coherent.c | 169 | !Edrivers/base/dma-coherent.c |
142 | !Edrivers/base/dma-mapping.c | 170 | !Edrivers/base/dma-mapping.c |
143 | </sect1> | 171 | </sect1> |
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index c6938e50e71f..4da60b463995 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
@@ -56,6 +56,7 @@ stable kernels. | |||
56 | | ARM | MMU-500 | #841119,#826419 | N/A | | 56 | | ARM | MMU-500 | #841119,#826419 | N/A | |
57 | | | | | | | 57 | | | | | | |
58 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | 58 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
59 | | Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | ||
59 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | 60 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
60 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | 61 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
61 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | | 62 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | |
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt index 35f6a982a0d5..220d0a80ca2c 100644 --- a/Documentation/kdump/gdbmacros.txt +++ b/Documentation/kdump/gdbmacros.txt | |||
@@ -170,21 +170,92 @@ document trapinfo | |||
170 | address the kernel panicked. | 170 | address the kernel panicked. |
171 | end | 171 | end |
172 | 172 | ||
173 | define dump_log_idx | ||
174 | set $idx = $arg0 | ||
175 | if ($argc > 1) | ||
176 | set $prev_flags = $arg1 | ||
177 | else | ||
178 | set $prev_flags = 0 | ||
179 | end | ||
180 | set $msg = ((struct printk_log *) (log_buf + $idx)) | ||
181 | set $prefix = 1 | ||
182 | set $newline = 1 | ||
183 | set $log = log_buf + $idx + sizeof(*$msg) | ||
173 | 184 | ||
174 | define dmesg | 185 | # prev & LOG_CONT && !(msg->flags & LOG_PREIX) |
175 | set $i = 0 | 186 | if (($prev_flags & 8) && !($msg->flags & 4)) |
176 | set $end_idx = (log_end - 1) & (log_buf_len - 1) | 187 | set $prefix = 0 |
188 | end | ||
189 | |||
190 | # msg->flags & LOG_CONT | ||
191 | if ($msg->flags & 8) | ||
192 | # (prev & LOG_CONT && !(prev & LOG_NEWLINE)) | ||
193 | if (($prev_flags & 8) && !($prev_flags & 2)) | ||
194 | set $prefix = 0 | ||
195 | end | ||
196 | # (!(msg->flags & LOG_NEWLINE)) | ||
197 | if (!($msg->flags & 2)) | ||
198 | set $newline = 0 | ||
199 | end | ||
200 | end | ||
201 | |||
202 | if ($prefix) | ||
203 | printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000 | ||
204 | end | ||
205 | if ($msg->text_len != 0) | ||
206 | eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len | ||
207 | end | ||
208 | if ($newline) | ||
209 | printf "\n" | ||
210 | end | ||
211 | if ($msg->dict_len > 0) | ||
212 | set $dict = $log + $msg->text_len | ||
213 | set $idx = 0 | ||
214 | set $line = 1 | ||
215 | while ($idx < $msg->dict_len) | ||
216 | if ($line) | ||
217 | printf " " | ||
218 | set $line = 0 | ||
219 | end | ||
220 | set $c = $dict[$idx] | ||
221 | if ($c == '\0') | ||
222 | printf "\n" | ||
223 | set $line = 1 | ||
224 | else | ||
225 | if ($c < ' ' || $c >= 127 || $c == '\\') | ||
226 | printf "\\x%02x", $c | ||
227 | else | ||
228 | printf "%c", $c | ||
229 | end | ||
230 | end | ||
231 | set $idx = $idx + 1 | ||
232 | end | ||
233 | printf "\n" | ||
234 | end | ||
235 | end | ||
236 | document dump_log_idx | ||
237 | Dump a single log given its index in the log buffer. The first | ||
238 | parameter is the index into log_buf, the second is optional and | ||
239 | specified the previous log buffer's flags, used for properly | ||
240 | formatting continued lines. | ||
241 | end | ||
177 | 242 | ||
178 | while ($i < logged_chars) | 243 | define dmesg |
179 | set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) | 244 | set $i = log_first_idx |
245 | set $end_idx = log_first_idx | ||
246 | set $prev_flags = 0 | ||
180 | 247 | ||
181 | if ($idx + 100 <= $end_idx) || \ | 248 | while (1) |
182 | ($end_idx <= $idx && $idx + 100 < log_buf_len) | 249 | set $msg = ((struct printk_log *) (log_buf + $i)) |
183 | printf "%.100s", &log_buf[$idx] | 250 | if ($msg->len == 0) |
184 | set $i = $i + 100 | 251 | set $i = 0 |
185 | else | 252 | else |
186 | printf "%c", log_buf[$idx] | 253 | dump_log_idx $i $prev_flags |
187 | set $i = $i + 1 | 254 | set $i = $i + $msg->len |
255 | set $prev_flags = $msg->flags | ||
256 | end | ||
257 | if ($i == $end_idx) | ||
258 | loop_break | ||
188 | end | 259 | end |
189 | end | 260 | end |
190 | end | 261 | end |
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 631b0f7ae16f..9d05ed7f7da5 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
@@ -369,8 +369,6 @@ does not allocate any driver private context space. | |||
369 | Switch configuration | 369 | Switch configuration |
370 | -------------------- | 370 | -------------------- |
371 | 371 | ||
372 | - priv_size: additional size needed by the switch driver for its private context | ||
373 | |||
374 | - tag_protocol: this is to indicate what kind of tagging protocol is supported, | 372 | - tag_protocol: this is to indicate what kind of tagging protocol is supported, |
375 | should be a valid value from the dsa_tag_protocol enum | 373 | should be a valid value from the dsa_tag_protocol enum |
376 | 374 | ||
@@ -416,11 +414,6 @@ PHY devices and link management | |||
416 | to the switch port MDIO registers. If unavailable return a negative error | 414 | to the switch port MDIO registers. If unavailable return a negative error |
417 | code. | 415 | code. |
418 | 416 | ||
419 | - poll_link: Function invoked by DSA to query the link state of the switch | ||
420 | builtin Ethernet PHYs, per port. This function is responsible for calling | ||
421 | netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a | ||
422 | single call. Executes from workqueue context. | ||
423 | |||
424 | - adjust_link: Function invoked by the PHY library when a slave network device | 417 | - adjust_link: Function invoked by the PHY library when a slave network device |
425 | is attached to a PHY device. This function is responsible for appropriately | 418 | is attached to a PHY device. This function is responsible for appropriately |
426 | configuring the switch port link parameters: speed, duplex, pause based on | 419 | configuring the switch port link parameters: speed, duplex, pause based on |
@@ -542,6 +535,16 @@ Bridge layer | |||
542 | Bridge VLAN filtering | 535 | Bridge VLAN filtering |
543 | --------------------- | 536 | --------------------- |
544 | 537 | ||
538 | - port_vlan_filtering: bridge layer function invoked when the bridge gets | ||
539 | configured for turning on or off VLAN filtering. If nothing specific needs to | ||
540 | be done at the hardware level, this callback does not need to be implemented. | ||
541 | When VLAN filtering is turned on, the hardware must be programmed with | ||
542 | rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed | ||
543 | VLAN ID map/rules. If there is no PVID programmed into the switch port, | ||
544 | untagged frames must be rejected as well. When turned off the switch must | ||
545 | accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are | ||
546 | allowed. | ||
547 | |||
545 | - port_vlan_prepare: bridge layer function invoked when the bridge prepares the | 548 | - port_vlan_prepare: bridge layer function invoked when the bridge prepares the |
546 | configuration of a VLAN on the given port. If the operation is not supported | 549 | configuration of a VLAN on the given port. If the operation is not supported |
547 | by the hardware, this function should return -EOPNOTSUPP to inform the bridge | 550 | by the hardware, this function should return -EOPNOTSUPP to inform the bridge |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 6c7f365b1515..9ae929395b24 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN | |||
1036 | 1036 | ||
1037 | shared_media - BOOLEAN | 1037 | shared_media - BOOLEAN |
1038 | Send(router) or accept(host) RFC1620 shared media redirects. | 1038 | Send(router) or accept(host) RFC1620 shared media redirects. |
1039 | Overrides ip_secure_redirects. | 1039 | Overrides secure_redirects. |
1040 | shared_media for the interface will be enabled if at least one of | 1040 | shared_media for the interface will be enabled if at least one of |
1041 | conf/{all,interface}/shared_media is set to TRUE, | 1041 | conf/{all,interface}/shared_media is set to TRUE, |
1042 | it will be disabled otherwise | 1042 | it will be disabled otherwise |
1043 | default TRUE | 1043 | default TRUE |
1044 | 1044 | ||
1045 | secure_redirects - BOOLEAN | 1045 | secure_redirects - BOOLEAN |
1046 | Accept ICMP redirect messages only for gateways, | 1046 | Accept ICMP redirect messages only to gateways listed in the |
1047 | listed in default gateway list. | 1047 | interface's current gateway list. Even if disabled, RFC1122 redirect |
1048 | rules still apply. | ||
1049 | Overridden by shared_media. | ||
1048 | secure_redirects for the interface will be enabled if at least one of | 1050 | secure_redirects for the interface will be enabled if at least one of |
1049 | conf/{all,interface}/secure_redirects is set to TRUE, | 1051 | conf/{all,interface}/secure_redirects is set to TRUE, |
1050 | it will be disabled otherwise | 1052 | it will be disabled otherwise |
diff --git a/MAINTAINERS b/MAINTAINERS index 7304d2e37a98..ed42cb65a19b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7989,6 +7989,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ | |||
7989 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git | 7989 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git |
7990 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git | 7990 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git |
7991 | S: Odd Fixes | 7991 | S: Odd Fixes |
7992 | F: Documentation/devicetree/bindings/net/ | ||
7992 | F: drivers/net/ | 7993 | F: drivers/net/ |
7993 | F: include/linux/if_* | 7994 | F: include/linux/if_* |
7994 | F: include/linux/netdevice.h | 7995 | F: include/linux/netdevice.h |
@@ -8944,6 +8945,7 @@ M: Linus Walleij <linus.walleij@linaro.org> | |||
8944 | L: linux-gpio@vger.kernel.org | 8945 | L: linux-gpio@vger.kernel.org |
8945 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git | 8946 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git |
8946 | S: Maintained | 8947 | S: Maintained |
8948 | F: Documentation/devicetree/bindings/pinctrl/ | ||
8947 | F: drivers/pinctrl/ | 8949 | F: drivers/pinctrl/ |
8948 | F: include/linux/pinctrl/ | 8950 | F: include/linux/pinctrl/ |
8949 | 8951 | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ef9119f7462e..4d9375814b53 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target, | |||
733 | if (ret) | 733 | if (ret) |
734 | return ret; | 734 | return ret; |
735 | 735 | ||
736 | vfp_flush_hwstate(thread); | ||
737 | thread->vfpstate.hard = new_vfp; | 736 | thread->vfpstate.hard = new_vfp; |
737 | vfp_flush_hwstate(thread); | ||
738 | 738 | ||
739 | return 0; | 739 | return 0; |
740 | } | 740 | } |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..5a0a691d4220 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT | |||
113 | config MMU | 113 | config MMU |
114 | def_bool y | 114 | def_bool y |
115 | 115 | ||
116 | config ARM64_PAGE_SHIFT | ||
117 | int | ||
118 | default 16 if ARM64_64K_PAGES | ||
119 | default 14 if ARM64_16K_PAGES | ||
120 | default 12 | ||
121 | |||
122 | config ARM64_CONT_SHIFT | ||
123 | int | ||
124 | default 5 if ARM64_64K_PAGES | ||
125 | default 7 if ARM64_16K_PAGES | ||
126 | default 4 | ||
127 | |||
116 | config ARCH_MMAP_RND_BITS_MIN | 128 | config ARCH_MMAP_RND_BITS_MIN |
117 | default 14 if ARM64_64K_PAGES | 129 | default 14 if ARM64_64K_PAGES |
118 | default 16 if ARM64_16K_PAGES | 130 | default 16 if ARM64_16K_PAGES |
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375 | |||
426 | 438 | ||
427 | If unsure, say Y. | 439 | If unsure, say Y. |
428 | 440 | ||
441 | config CAVIUM_ERRATUM_23144 | ||
442 | bool "Cavium erratum 23144: ITS SYNC hang on dual socket system" | ||
443 | depends on NUMA | ||
444 | default y | ||
445 | help | ||
446 | ITS SYNC command hang for cross node io and collections/cpu mapping. | ||
447 | |||
448 | If unsure, say Y. | ||
449 | |||
429 | config CAVIUM_ERRATUM_23154 | 450 | config CAVIUM_ERRATUM_23154 |
430 | bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" | 451 | bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" |
431 | default y | 452 | default y |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 710fde4ad0f0..0cc758cdd0dc 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -12,7 +12,8 @@ config ARM64_PTDUMP | |||
12 | who are working in architecture specific areas of the kernel. | 12 | who are working in architecture specific areas of the kernel. |
13 | It is probably not a good idea to enable this feature in a production | 13 | It is probably not a good idea to enable this feature in a production |
14 | kernel. | 14 | kernel. |
15 | If in doubt, say "N" | 15 | |
16 | If in doubt, say N. | ||
16 | 17 | ||
17 | config PID_IN_CONTEXTIDR | 18 | config PID_IN_CONTEXTIDR |
18 | bool "Write the current PID to the CONTEXTIDR register" | 19 | bool "Write the current PID to the CONTEXTIDR register" |
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET | |||
38 | value. | 39 | value. |
39 | 40 | ||
40 | config DEBUG_SET_MODULE_RONX | 41 | config DEBUG_SET_MODULE_RONX |
41 | bool "Set loadable kernel module data as NX and text as RO" | 42 | bool "Set loadable kernel module data as NX and text as RO" |
42 | depends on MODULES | 43 | depends on MODULES |
43 | help | 44 | default y |
44 | This option helps catch unintended modifications to loadable | 45 | help |
45 | kernel module's text and read-only data. It also prevents execution | 46 | Is this is set, kernel module text and rodata will be made read-only. |
46 | of module data. Such protection may interfere with run-time code | 47 | This is to help catch accidental or malicious attempts to change the |
47 | patching and dynamic kernel tracing - and they might also protect | 48 | kernel's executable code. |
48 | against certain classes of kernel exploits. | 49 | |
49 | If in doubt, say "N". | 50 | If in doubt, say Y. |
50 | 51 | ||
51 | config DEBUG_RODATA | 52 | config DEBUG_RODATA |
52 | bool "Make kernel text and rodata read-only" | 53 | bool "Make kernel text and rodata read-only" |
@@ -56,7 +57,7 @@ config DEBUG_RODATA | |||
56 | is to help catch accidental or malicious attempts to change the | 57 | is to help catch accidental or malicious attempts to change the |
57 | kernel's executable code. | 58 | kernel's executable code. |
58 | 59 | ||
59 | If in doubt, say Y | 60 | If in doubt, say Y. |
60 | 61 | ||
61 | config DEBUG_ALIGN_RODATA | 62 | config DEBUG_ALIGN_RODATA |
62 | depends on DEBUG_RODATA | 63 | depends on DEBUG_RODATA |
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA | |||
69 | alignment and potentially wasted space. Turn on this option if | 70 | alignment and potentially wasted space. Turn on this option if |
70 | performance is more important than memory pressure. | 71 | performance is more important than memory pressure. |
71 | 72 | ||
72 | If in doubt, say N | 73 | If in doubt, say N. |
73 | 74 | ||
74 | source "drivers/hwtracing/coresight/Kconfig" | 75 | source "drivers/hwtracing/coresight/Kconfig" |
75 | 76 | ||
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 354d75402ace..7085e322dc42 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o | |||
60 | 60 | ||
61 | # The byte offset of the kernel image in RAM from the start of RAM. | 61 | # The byte offset of the kernel image in RAM from the start of RAM. |
62 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) | 62 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) |
63 | TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') | 63 | TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ |
64 | int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ | ||
65 | rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") | ||
64 | else | 66 | else |
65 | TEXT_OFFSET := 0x00080000 | 67 | TEXT_OFFSET := 0x00080000 |
66 | endif | 68 | endif |
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 7a09c48c0475..579b6e654f2d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
160 | #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) | 160 | #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) |
161 | #endif | 161 | #endif |
162 | 162 | ||
163 | #ifdef CONFIG_COMPAT | ||
164 | |||
165 | #ifdef __AARCH64EB__ | 163 | #ifdef __AARCH64EB__ |
166 | #define COMPAT_ELF_PLATFORM ("v8b") | 164 | #define COMPAT_ELF_PLATFORM ("v8b") |
167 | #else | 165 | #else |
168 | #define COMPAT_ELF_PLATFORM ("v8l") | 166 | #define COMPAT_ELF_PLATFORM ("v8l") |
169 | #endif | 167 | #endif |
170 | 168 | ||
169 | #ifdef CONFIG_COMPAT | ||
170 | |||
171 | #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) | 171 | #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) |
172 | 172 | ||
173 | /* AArch32 registers. */ | 173 | /* AArch32 registers. */ |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 72a3025bb583..31b73227b41f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -55,8 +55,9 @@ | |||
55 | #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) | 55 | #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * PAGE_OFFSET - the virtual address of the start of the kernel image (top | 58 | * PAGE_OFFSET - the virtual address of the start of the linear map (top |
59 | * (VA_BITS - 1)) | 59 | * (VA_BITS - 1)) |
60 | * KIMAGE_VADDR - the virtual address of the start of the kernel image | ||
60 | * VA_BITS - the maximum number of bits for virtual addresses. | 61 | * VA_BITS - the maximum number of bits for virtual addresses. |
61 | * VA_START - the first kernel virtual address. | 62 | * VA_START - the first kernel virtual address. |
62 | * TASK_SIZE - the maximum size of a user space task. | 63 | * TASK_SIZE - the maximum size of a user space task. |
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 17b45f7d96d3..8472c6def5ef 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h | |||
@@ -23,16 +23,8 @@ | |||
23 | 23 | ||
24 | /* PAGE_SHIFT determines the page size */ | 24 | /* PAGE_SHIFT determines the page size */ |
25 | /* CONT_SHIFT determines the number of pages which can be tracked together */ | 25 | /* CONT_SHIFT determines the number of pages which can be tracked together */ |
26 | #ifdef CONFIG_ARM64_64K_PAGES | 26 | #define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT |
27 | #define PAGE_SHIFT 16 | 27 | #define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT |
28 | #define CONT_SHIFT 5 | ||
29 | #elif defined(CONFIG_ARM64_16K_PAGES) | ||
30 | #define PAGE_SHIFT 14 | ||
31 | #define CONT_SHIFT 7 | ||
32 | #else | ||
33 | #define PAGE_SHIFT 12 | ||
34 | #define CONT_SHIFT 4 | ||
35 | #endif | ||
36 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | 28 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
37 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 29 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
38 | 30 | ||
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d74572af..9e397a542756 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs) | |||
81 | #define segment_eq(a, b) ((a) == (b)) | 81 | #define segment_eq(a, b) ((a) == (b)) |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Return 1 if addr < current->addr_limit, 0 otherwise. | ||
85 | */ | ||
86 | #define __addr_ok(addr) \ | ||
87 | ({ \ | ||
88 | unsigned long flag; \ | ||
89 | asm("cmp %1, %0; cset %0, lo" \ | ||
90 | : "=&r" (flag) \ | ||
91 | : "r" (addr), "0" (current_thread_info()->addr_limit) \ | ||
92 | : "cc"); \ | ||
93 | flag; \ | ||
94 | }) | ||
95 | |||
96 | /* | ||
97 | * Test whether a block of memory is a valid user space address. | 84 | * Test whether a block of memory is a valid user space address. |
98 | * Returns 1 if the range is valid, 0 otherwise. | 85 | * Returns 1 if the range is valid, 0 otherwise. |
99 | * | 86 | * |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c041..e78ac26324bd 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
@@ -44,7 +44,7 @@ | |||
44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
46 | 46 | ||
47 | #define __NR_compat_syscalls 390 | 47 | #define __NR_compat_syscalls 394 |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761a2a..b7e8ef16ff0d 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) | |||
801 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) | 801 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) |
802 | #define __NR_membarrier 389 | 802 | #define __NR_membarrier 389 |
803 | __SYSCALL(__NR_membarrier, sys_membarrier) | 803 | __SYSCALL(__NR_membarrier, sys_membarrier) |
804 | #define __NR_mlock2 390 | ||
805 | __SYSCALL(__NR_mlock2, sys_mlock2) | ||
806 | #define __NR_copy_file_range 391 | ||
807 | __SYSCALL(__NR_copy_file_range, sys_copy_file_range) | ||
808 | #define __NR_preadv2 392 | ||
809 | __SYSCALL(__NR_preadv2, compat_sys_preadv2) | ||
810 | #define __NR_pwritev2 393 | ||
811 | __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) | ||
804 | 812 | ||
805 | /* | 813 | /* |
806 | * Please add new compat syscalls above this comment and update | 814 | * Please add new compat syscalls above this comment and update |
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3808470486f3..c173d329397f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -22,6 +22,8 @@ | |||
22 | 22 | ||
23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/compat.h> | ||
26 | #include <linux/elf.h> | ||
25 | #include <linux/init.h> | 27 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
27 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = { | |||
104 | static int c_show(struct seq_file *m, void *v) | 106 | static int c_show(struct seq_file *m, void *v) |
105 | { | 107 | { |
106 | int i, j; | 108 | int i, j; |
109 | bool compat = personality(current->personality) == PER_LINUX32; | ||
107 | 110 | ||
108 | for_each_online_cpu(i) { | 111 | for_each_online_cpu(i) { |
109 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); | 112 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); |
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v) | |||
115 | * "processor". Give glibc what it expects. | 118 | * "processor". Give glibc what it expects. |
116 | */ | 119 | */ |
117 | seq_printf(m, "processor\t: %d\n", i); | 120 | seq_printf(m, "processor\t: %d\n", i); |
121 | if (compat) | ||
122 | seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", | ||
123 | MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); | ||
118 | 124 | ||
119 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | 125 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
120 | loops_per_jiffy / (500000UL/HZ), | 126 | loops_per_jiffy / (500000UL/HZ), |
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v) | |||
127 | * software which does already (at least for 32-bit). | 133 | * software which does already (at least for 32-bit). |
128 | */ | 134 | */ |
129 | seq_puts(m, "Features\t:"); | 135 | seq_puts(m, "Features\t:"); |
130 | if (personality(current->personality) == PER_LINUX32) { | 136 | if (compat) { |
131 | #ifdef CONFIG_COMPAT | 137 | #ifdef CONFIG_COMPAT |
132 | for (j = 0; compat_hwcap_str[j]; j++) | 138 | for (j = 0; compat_hwcap_str[j]; j++) |
133 | if (compat_elf_hwcap & (1 << j)) | 139 | if (compat_elf_hwcap & (1 << j)) |
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c5392081b49b..f7cf463107df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |||
477 | void __user *pc = (void __user *)instruction_pointer(regs); | 477 | void __user *pc = (void __user *)instruction_pointer(regs); |
478 | console_verbose(); | 478 | console_verbose(); |
479 | 479 | ||
480 | pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", | 480 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", |
481 | handler[reason], esr, esr_get_class_string(esr)); | 481 | handler[reason], smp_processor_id(), esr, |
482 | esr_get_class_string(esr)); | ||
482 | __show_regs(regs); | 483 | __show_regs(regs); |
483 | 484 | ||
484 | info.si_signo = SIGILL; | 485 | info.si_signo = SIGILL; |
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index fff7cd42b3a3..5f8f80b4a224 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c | |||
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
169 | * Make sure stores to the GIC via the memory mapped interface | 169 | * Make sure stores to the GIC via the memory mapped interface |
170 | * are now visible to the system register interface. | 170 | * are now visible to the system register interface. |
171 | */ | 171 | */ |
172 | dsb(st); | 172 | if (!cpu_if->vgic_sre) |
173 | dsb(st); | ||
173 | 174 | ||
174 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 175 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
175 | 176 | ||
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
190 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | 191 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
191 | continue; | 192 | continue; |
192 | 193 | ||
193 | if (cpu_if->vgic_elrsr & (1 << i)) { | 194 | if (cpu_if->vgic_elrsr & (1 << i)) |
194 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; | 195 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
195 | continue; | 196 | else |
196 | } | 197 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); |
197 | 198 | ||
198 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | ||
199 | __gic_v3_set_lr(0, i); | 199 | __gic_v3_set_lr(0, i); |
200 | } | 200 | } |
201 | 201 | ||
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
236 | 236 | ||
237 | val = read_gicreg(ICC_SRE_EL2); | 237 | val = read_gicreg(ICC_SRE_EL2); |
238 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | 238 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); |
239 | isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | 239 | |
240 | write_gicreg(1, ICC_SRE_EL1); | 240 | if (!cpu_if->vgic_sre) { |
241 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | ||
242 | isb(); | ||
243 | write_gicreg(1, ICC_SRE_EL1); | ||
244 | } | ||
241 | } | 245 | } |
242 | 246 | ||
243 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | 247 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) |
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
256 | * been actually programmed with the value we want before | 260 | * been actually programmed with the value we want before |
257 | * starting to mess with the rest of the GIC. | 261 | * starting to mess with the rest of the GIC. |
258 | */ | 262 | */ |
259 | write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); | 263 | if (!cpu_if->vgic_sre) { |
260 | isb(); | 264 | write_gicreg(0, ICC_SRE_EL1); |
265 | isb(); | ||
266 | } | ||
261 | 267 | ||
262 | val = read_gicreg(ICH_VTR_EL2); | 268 | val = read_gicreg(ICH_VTR_EL2); |
263 | max_lr_idx = vtr_to_max_lr_idx(val); | 269 | max_lr_idx = vtr_to_max_lr_idx(val); |
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
306 | * (re)distributors. This ensure the guest will read the | 312 | * (re)distributors. This ensure the guest will read the |
307 | * correct values from the memory-mapped interface. | 313 | * correct values from the memory-mapped interface. |
308 | */ | 314 | */ |
309 | isb(); | 315 | if (!cpu_if->vgic_sre) { |
310 | dsb(sy); | 316 | isb(); |
317 | dsb(sy); | ||
318 | } | ||
311 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; | 319 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; |
312 | 320 | ||
313 | /* | 321 | /* |
314 | * Prevent the guest from touching the GIC system registers if | 322 | * Prevent the guest from touching the GIC system registers if |
315 | * SRE isn't enabled for GICv3 emulation. | 323 | * SRE isn't enabled for GICv3 emulation. |
316 | */ | 324 | */ |
317 | if (!cpu_if->vgic_sre) { | 325 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
318 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, | 326 | ICC_SRE_EL2); |
319 | ICC_SRE_EL2); | ||
320 | } | ||
321 | } | 327 | } |
322 | 328 | ||
323 | void __hyp_text __vgic_v3_init_lrs(void) | 329 | void __hyp_text __vgic_v3_init_lrs(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7bbe3ff02602..a57d650f552c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, | |||
134 | return true; | 134 | return true; |
135 | } | 135 | } |
136 | 136 | ||
137 | static bool access_gic_sre(struct kvm_vcpu *vcpu, | ||
138 | struct sys_reg_params *p, | ||
139 | const struct sys_reg_desc *r) | ||
140 | { | ||
141 | if (p->is_write) | ||
142 | return ignore_write(vcpu, p); | ||
143 | |||
144 | p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; | ||
145 | return true; | ||
146 | } | ||
147 | |||
137 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, | 148 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
138 | struct sys_reg_params *p, | 149 | struct sys_reg_params *p, |
139 | const struct sys_reg_desc *r) | 150 | const struct sys_reg_desc *r) |
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
958 | access_gic_sgi }, | 969 | access_gic_sgi }, |
959 | /* ICC_SRE_EL1 */ | 970 | /* ICC_SRE_EL1 */ |
960 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), | 971 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), |
961 | trap_raz_wi }, | 972 | access_gic_sre }, |
962 | 973 | ||
963 | /* CONTEXTIDR_EL1 */ | 974 | /* CONTEXTIDR_EL1 */ |
964 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | 975 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8404190fe2bd..ccfde237d6e6 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = { | |||
150 | 150 | ||
151 | struct pg_level { | 151 | struct pg_level { |
152 | const struct prot_bits *bits; | 152 | const struct prot_bits *bits; |
153 | const char *name; | ||
153 | size_t num; | 154 | size_t num; |
154 | u64 mask; | 155 | u64 mask; |
155 | }; | 156 | }; |
@@ -157,15 +158,19 @@ struct pg_level { | |||
157 | static struct pg_level pg_level[] = { | 158 | static struct pg_level pg_level[] = { |
158 | { | 159 | { |
159 | }, { /* pgd */ | 160 | }, { /* pgd */ |
161 | .name = "PGD", | ||
160 | .bits = pte_bits, | 162 | .bits = pte_bits, |
161 | .num = ARRAY_SIZE(pte_bits), | 163 | .num = ARRAY_SIZE(pte_bits), |
162 | }, { /* pud */ | 164 | }, { /* pud */ |
165 | .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", | ||
163 | .bits = pte_bits, | 166 | .bits = pte_bits, |
164 | .num = ARRAY_SIZE(pte_bits), | 167 | .num = ARRAY_SIZE(pte_bits), |
165 | }, { /* pmd */ | 168 | }, { /* pmd */ |
169 | .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", | ||
166 | .bits = pte_bits, | 170 | .bits = pte_bits, |
167 | .num = ARRAY_SIZE(pte_bits), | 171 | .num = ARRAY_SIZE(pte_bits), |
168 | }, { /* pte */ | 172 | }, { /* pte */ |
173 | .name = "PTE", | ||
169 | .bits = pte_bits, | 174 | .bits = pte_bits, |
170 | .num = ARRAY_SIZE(pte_bits), | 175 | .num = ARRAY_SIZE(pte_bits), |
171 | }, | 176 | }, |
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
214 | delta >>= 10; | 219 | delta >>= 10; |
215 | unit++; | 220 | unit++; |
216 | } | 221 | } |
217 | seq_printf(st->seq, "%9lu%c", delta, *unit); | 222 | seq_printf(st->seq, "%9lu%c %s", delta, *unit, |
223 | pg_level[st->level].name); | ||
218 | if (pg_level[st->level].bits) | 224 | if (pg_level[st->level].bits) |
219 | dump_prot(st, pg_level[st->level].bits, | 225 | dump_prot(st, pg_level[st->level].bits, |
220 | pg_level[st->level].num); | 226 | pg_level[st->level].num); |
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa8aee7d6929..2e49bd252fe7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c | |||
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt) | |||
306 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | 306 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); |
307 | } else if (ps == PUD_SIZE) { | 307 | } else if (ps == PUD_SIZE) { |
308 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | 308 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
309 | } else if (ps == (PAGE_SIZE * CONT_PTES)) { | ||
310 | hugetlb_add_hstate(CONT_PTE_SHIFT); | ||
311 | } else if (ps == (PMD_SIZE * CONT_PMDS)) { | ||
312 | hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); | ||
309 | } else { | 313 | } else { |
310 | hugetlb_bad_size(); | 314 | hugetlb_bad_size(); |
311 | pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); | 315 | pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); |
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt) | |||
314 | return 1; | 318 | return 1; |
315 | } | 319 | } |
316 | __setup("hugepagesz=", setup_hugepagesz); | 320 | __setup("hugepagesz=", setup_hugepagesz); |
321 | |||
322 | #ifdef CONFIG_ARM64_64K_PAGES | ||
323 | static __init int add_default_hugepagesz(void) | ||
324 | { | ||
325 | if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) | ||
326 | hugetlb_add_hstate(CONT_PMD_SHIFT); | ||
327 | return 0; | ||
328 | } | ||
329 | arch_initcall(add_default_hugepagesz); | ||
330 | #endif | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c1e82e968506..a0948f40bc7b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -717,7 +717,7 @@ | |||
717 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ | 717 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ |
718 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ | 718 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ |
719 | #define SPRN_MMCR1 798 | 719 | #define SPRN_MMCR1 798 |
720 | #define SPRN_MMCR2 769 | 720 | #define SPRN_MMCR2 785 |
721 | #define SPRN_MMCRA 0x312 | 721 | #define SPRN_MMCRA 0x312 |
722 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ | 722 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ |
723 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL | 723 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL |
@@ -754,13 +754,13 @@ | |||
754 | #define SPRN_PMC6 792 | 754 | #define SPRN_PMC6 792 |
755 | #define SPRN_PMC7 793 | 755 | #define SPRN_PMC7 793 |
756 | #define SPRN_PMC8 794 | 756 | #define SPRN_PMC8 794 |
757 | #define SPRN_SIAR 780 | ||
758 | #define SPRN_SDAR 781 | ||
759 | #define SPRN_SIER 784 | 757 | #define SPRN_SIER 784 |
760 | #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ | 758 | #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ |
761 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ | 759 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ |
762 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ | 760 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ |
763 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ | 761 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ |
762 | #define SPRN_SIAR 796 | ||
763 | #define SPRN_SDAR 797 | ||
764 | #define SPRN_TACR 888 | 764 | #define SPRN_TACR 888 |
765 | #define SPRN_TCSCR 889 | 765 | #define SPRN_TCSCR 889 |
766 | #define SPRN_CSIGR 890 | 766 | #define SPRN_CSIGR 890 |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index da5192590c44..ccd2037c797f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = { | |||
656 | W(0xffff0000), W(0x003e0000), /* POWER6 */ | 656 | W(0xffff0000), W(0x003e0000), /* POWER6 */ |
657 | W(0xffff0000), W(0x003f0000), /* POWER7 */ | 657 | W(0xffff0000), W(0x003f0000), /* POWER7 */ |
658 | W(0xffff0000), W(0x004b0000), /* POWER8E */ | 658 | W(0xffff0000), W(0x004b0000), /* POWER8E */ |
659 | W(0xffff0000), W(0x004c0000), /* POWER8NVL */ | ||
659 | W(0xffff0000), W(0x004d0000), /* POWER8 */ | 660 | W(0xffff0000), W(0x004d0000), /* POWER8 */ |
660 | W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ | 661 | W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ |
661 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ | 662 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 59268969a0bc..b2740c67e172 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { | |||
159 | }, | 159 | }, |
160 | }; | 160 | }; |
161 | 161 | ||
162 | /* | ||
163 | * 'R' and 'C' update notes: | ||
164 | * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* | ||
165 | * create writeable HPTEs without C set, because the hcall H_PROTECT | ||
166 | * that we use in that case will not update C | ||
167 | * - The above is however not a problem, because we also don't do that | ||
168 | * fancy "no flush" variant of eviction and we use H_REMOVE which will | ||
169 | * do the right thing and thus we don't have the race I described earlier | ||
170 | * | ||
171 | * - Under bare metal, we do have the race, so we need R and C set | ||
172 | * - We make sure R is always set and never lost | ||
173 | * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping | ||
174 | */ | ||
162 | unsigned long htab_convert_pte_flags(unsigned long pteflags) | 175 | unsigned long htab_convert_pte_flags(unsigned long pteflags) |
163 | { | 176 | { |
164 | unsigned long rflags = 0; | 177 | unsigned long rflags = 0; |
@@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) | |||
186 | rflags |= 0x1; | 199 | rflags |= 0x1; |
187 | } | 200 | } |
188 | /* | 201 | /* |
189 | * Always add "C" bit for perf. Memory coherence is always enabled | 202 | * We can't allow hardware to update hpte bits. Hence always |
203 | * set 'R' bit and set 'C' if it is a write fault | ||
204 | * Memory coherence is always enabled | ||
190 | */ | 205 | */ |
191 | rflags |= HPTE_R_C | HPTE_R_M; | 206 | rflags |= HPTE_R_R | HPTE_R_M; |
207 | |||
208 | if (pteflags & _PAGE_DIRTY) | ||
209 | rflags |= HPTE_R_C; | ||
192 | /* | 210 | /* |
193 | * Add in WIG bits | 211 | * Add in WIG bits |
194 | */ | 212 | */ |
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index eb4451144746..670318766545 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
33 | changed = !pmd_same(*(pmdp), entry); | 33 | changed = !pmd_same(*(pmdp), entry); |
34 | if (changed) { | 34 | if (changed) { |
35 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); | 35 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); |
36 | /* | 36 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
37 | * Since we are not supporting SW TLB systems, we don't | ||
38 | * have any thing similar to flush_tlb_page_nohash() | ||
39 | */ | ||
40 | } | 37 | } |
41 | return changed; | 38 | return changed; |
42 | } | 39 | } |
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 18b2c11604fa..c939e6e57a9e 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
@@ -296,11 +296,6 @@ found: | |||
296 | void __init radix__early_init_mmu(void) | 296 | void __init radix__early_init_mmu(void) |
297 | { | 297 | { |
298 | unsigned long lpcr; | 298 | unsigned long lpcr; |
299 | /* | ||
300 | * setup LPCR UPRT based on mmu_features | ||
301 | */ | ||
302 | lpcr = mfspr(SPRN_LPCR); | ||
303 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
304 | 299 | ||
305 | #ifdef CONFIG_PPC_64K_PAGES | 300 | #ifdef CONFIG_PPC_64K_PAGES |
306 | /* PAGE_SIZE mappings */ | 301 | /* PAGE_SIZE mappings */ |
@@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void) | |||
343 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; | 338 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; |
344 | 339 | ||
345 | radix_init_page_sizes(); | 340 | radix_init_page_sizes(); |
346 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 341 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
342 | lpcr = mfspr(SPRN_LPCR); | ||
343 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
347 | radix_init_partition_table(); | 344 | radix_init_partition_table(); |
345 | } | ||
348 | 346 | ||
349 | radix_init_pgtable(); | 347 | radix_init_pgtable(); |
350 | } | 348 | } |
@@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void) | |||
353 | { | 351 | { |
354 | unsigned long lpcr; | 352 | unsigned long lpcr; |
355 | /* | 353 | /* |
356 | * setup LPCR UPRT based on mmu_features | 354 | * update partition table control register and UPRT |
357 | */ | 355 | */ |
358 | lpcr = mfspr(SPRN_LPCR); | 356 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
359 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | 357 | lpcr = mfspr(SPRN_LPCR); |
360 | /* | 358 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); |
361 | * update partition table control register, 64 K size. | 359 | |
362 | */ | ||
363 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | ||
364 | mtspr(SPRN_PTCR, | 360 | mtspr(SPRN_PTCR, |
365 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | 361 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); |
362 | } | ||
366 | } | 363 | } |
367 | 364 | ||
368 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, | 365 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ac3ffd97e059..3998e0f9a03b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2; | |||
53 | static int ibm_slot_error_detail; | 53 | static int ibm_slot_error_detail; |
54 | static int ibm_get_config_addr_info; | 54 | static int ibm_get_config_addr_info; |
55 | static int ibm_get_config_addr_info2; | 55 | static int ibm_get_config_addr_info2; |
56 | static int ibm_configure_bridge; | ||
57 | static int ibm_configure_pe; | 56 | static int ibm_configure_pe; |
58 | 57 | ||
59 | /* | 58 | /* |
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void) | |||
81 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); | 80 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); |
82 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); | 81 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); |
83 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | 82 | ibm_configure_pe = rtas_token("ibm,configure-pe"); |
84 | ibm_configure_bridge = rtas_token("ibm,configure-bridge"); | 83 | |
84 | /* | ||
85 | * ibm,configure-pe and ibm,configure-bridge have the same semantics, | ||
86 | * however ibm,configure-pe can be faster. If we can't find | ||
87 | * ibm,configure-pe then fall back to using ibm,configure-bridge. | ||
88 | */ | ||
89 | if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) | ||
90 | ibm_configure_pe = rtas_token("ibm,configure-bridge"); | ||
85 | 91 | ||
86 | /* | 92 | /* |
87 | * Necessary sanity check. We needn't check "get-config-addr-info" | 93 | * Necessary sanity check. We needn't check "get-config-addr-info" |
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void) | |||
93 | (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && | 99 | (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && |
94 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || | 100 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || |
95 | ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || | 101 | ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || |
96 | (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && | 102 | ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { |
97 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { | ||
98 | pr_info("EEH functionality not supported\n"); | 103 | pr_info("EEH functionality not supported\n"); |
99 | return -EINVAL; | 104 | return -EINVAL; |
100 | } | 105 | } |
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) | |||
615 | { | 620 | { |
616 | int config_addr; | 621 | int config_addr; |
617 | int ret; | 622 | int ret; |
623 | /* Waiting 0.2s maximum before skipping configuration */ | ||
624 | int max_wait = 200; | ||
618 | 625 | ||
619 | /* Figure out the PE address */ | 626 | /* Figure out the PE address */ |
620 | config_addr = pe->config_addr; | 627 | config_addr = pe->config_addr; |
621 | if (pe->addr) | 628 | if (pe->addr) |
622 | config_addr = pe->addr; | 629 | config_addr = pe->addr; |
623 | 630 | ||
624 | /* Use new configure-pe function, if supported */ | 631 | while (max_wait > 0) { |
625 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { | ||
626 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, | 632 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, |
627 | config_addr, BUID_HI(pe->phb->buid), | 633 | config_addr, BUID_HI(pe->phb->buid), |
628 | BUID_LO(pe->phb->buid)); | 634 | BUID_LO(pe->phb->buid)); |
629 | } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { | ||
630 | ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, | ||
631 | config_addr, BUID_HI(pe->phb->buid), | ||
632 | BUID_LO(pe->phb->buid)); | ||
633 | } else { | ||
634 | return -EFAULT; | ||
635 | } | ||
636 | 635 | ||
637 | if (ret) | 636 | if (!ret) |
638 | pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", | 637 | return ret; |
639 | __func__, pe->phb->global_number, pe->addr, ret); | 638 | |
639 | /* | ||
640 | * If RTAS returns a delay value that's above 100ms, cut it | ||
641 | * down to 100ms in case firmware made a mistake. For more | ||
642 | * on how these delay values work see rtas_busy_delay_time | ||
643 | */ | ||
644 | if (ret > RTAS_EXTENDED_DELAY_MIN+2 && | ||
645 | ret <= RTAS_EXTENDED_DELAY_MAX) | ||
646 | ret = RTAS_EXTENDED_DELAY_MIN+2; | ||
647 | |||
648 | max_wait -= rtas_busy_delay_time(ret); | ||
649 | |||
650 | if (max_wait < 0) | ||
651 | break; | ||
652 | |||
653 | rtas_busy_delay(ret); | ||
654 | } | ||
640 | 655 | ||
656 | pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", | ||
657 | __func__, pe->phb->global_number, pe->addr, ret); | ||
641 | return ret; | 658 | return ret; |
642 | } | 659 | } |
643 | 660 | ||
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0ac42cc4f880..d5ec71b2ed02 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
@@ -1,8 +1,7 @@ | |||
1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
3 | CONFIG_FHANDLE=y | ||
4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
13 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
16 | CONFIG_CGROUP_FREEZER=y | ||
17 | CONFIG_CGROUP_PIDS=y | ||
18 | CONFIG_CGROUP_DEVICE=y | ||
19 | CONFIG_CPUSETS=y | ||
20 | CONFIG_CGROUP_CPUACCT=y | ||
21 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
22 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
23 | CONFIG_MEMCG_KMEM=y | 17 | CONFIG_BLK_CGROUP=y |
24 | CONFIG_CGROUP_HUGETLB=y | ||
25 | CONFIG_CGROUP_PERF=y | ||
26 | CONFIG_CFS_BANDWIDTH=y | 18 | CONFIG_CFS_BANDWIDTH=y |
27 | CONFIG_RT_GROUP_SCHED=y | 19 | CONFIG_RT_GROUP_SCHED=y |
28 | CONFIG_BLK_CGROUP=y | 20 | CONFIG_CGROUP_PIDS=y |
21 | CONFIG_CGROUP_FREEZER=y | ||
22 | CONFIG_CGROUP_HUGETLB=y | ||
23 | CONFIG_CPUSETS=y | ||
24 | CONFIG_CGROUP_DEVICE=y | ||
25 | CONFIG_CGROUP_CPUACCT=y | ||
26 | CONFIG_CGROUP_PERF=y | ||
27 | CONFIG_CHECKPOINT_RESTORE=y | ||
29 | CONFIG_NAMESPACES=y | 28 | CONFIG_NAMESPACES=y |
30 | CONFIG_USER_NS=y | 29 | CONFIG_USER_NS=y |
31 | CONFIG_SCHED_AUTOGROUP=y | 30 | CONFIG_SCHED_AUTOGROUP=y |
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
55 | CONFIG_CFQ_GROUP_IOSCHED=y | 54 | CONFIG_CFQ_GROUP_IOSCHED=y |
56 | CONFIG_DEFAULT_DEADLINE=y | 55 | CONFIG_DEFAULT_DEADLINE=y |
57 | CONFIG_LIVEPATCH=y | 56 | CONFIG_LIVEPATCH=y |
58 | CONFIG_MARCH_Z196=y | ||
59 | CONFIG_TUNE_ZEC12=y | 57 | CONFIG_TUNE_ZEC12=y |
60 | CONFIG_NR_CPUS=256 | 58 | CONFIG_NR_CPUS=256 |
61 | CONFIG_NUMA=y | 59 | CONFIG_NUMA=y |
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y | |||
65 | CONFIG_MEMORY_HOTREMOVE=y | 63 | CONFIG_MEMORY_HOTREMOVE=y |
66 | CONFIG_KSM=y | 64 | CONFIG_KSM=y |
67 | CONFIG_TRANSPARENT_HUGEPAGE=y | 65 | CONFIG_TRANSPARENT_HUGEPAGE=y |
66 | CONFIG_CLEANCACHE=y | ||
67 | CONFIG_FRONTSWAP=y | ||
68 | CONFIG_CMA=y | ||
69 | CONFIG_MEM_SOFT_DIRTY=y | ||
70 | CONFIG_ZPOOL=m | ||
71 | CONFIG_ZBUD=m | ||
72 | CONFIG_ZSMALLOC=m | ||
73 | CONFIG_ZSMALLOC_STAT=y | ||
74 | CONFIG_IDLE_PAGE_TRACKING=y | ||
68 | CONFIG_PCI=y | 75 | CONFIG_PCI=y |
69 | CONFIG_PCI_DEBUG=y | 76 | CONFIG_PCI_DEBUG=y |
70 | CONFIG_HOTPLUG_PCI=y | 77 | CONFIG_HOTPLUG_PCI=y |
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m | |||
452 | CONFIG_RAW_DRIVER=m | 459 | CONFIG_RAW_DRIVER=m |
453 | CONFIG_HANGCHECK_TIMER=m | 460 | CONFIG_HANGCHECK_TIMER=m |
454 | CONFIG_TN3270_FS=y | 461 | CONFIG_TN3270_FS=y |
462 | # CONFIG_HWMON is not set | ||
455 | CONFIG_WATCHDOG=y | 463 | CONFIG_WATCHDOG=y |
456 | CONFIG_WATCHDOG_NOWAYOUT=y | 464 | CONFIG_WATCHDOG_NOWAYOUT=y |
457 | CONFIG_SOFT_WATCHDOG=m | 465 | CONFIG_SOFT_WATCHDOG=m |
@@ -537,6 +545,8 @@ CONFIG_DLM=m | |||
537 | CONFIG_PRINTK_TIME=y | 545 | CONFIG_PRINTK_TIME=y |
538 | CONFIG_DYNAMIC_DEBUG=y | 546 | CONFIG_DYNAMIC_DEBUG=y |
539 | CONFIG_DEBUG_INFO=y | 547 | CONFIG_DEBUG_INFO=y |
548 | CONFIG_DEBUG_INFO_DWARF4=y | ||
549 | CONFIG_GDB_SCRIPTS=y | ||
540 | CONFIG_FRAME_WARN=1024 | 550 | CONFIG_FRAME_WARN=1024 |
541 | CONFIG_READABLE_ASM=y | 551 | CONFIG_READABLE_ASM=y |
542 | CONFIG_UNUSED_SYMBOLS=y | 552 | CONFIG_UNUSED_SYMBOLS=y |
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y | |||
555 | CONFIG_SLUB_STATS=y | 565 | CONFIG_SLUB_STATS=y |
556 | CONFIG_DEBUG_STACK_USAGE=y | 566 | CONFIG_DEBUG_STACK_USAGE=y |
557 | CONFIG_DEBUG_VM=y | 567 | CONFIG_DEBUG_VM=y |
568 | CONFIG_DEBUG_VM_VMACACHE=y | ||
558 | CONFIG_DEBUG_VM_RB=y | 569 | CONFIG_DEBUG_VM_RB=y |
570 | CONFIG_DEBUG_VM_PGFLAGS=y | ||
559 | CONFIG_DEBUG_MEMORY_INIT=y | 571 | CONFIG_DEBUG_MEMORY_INIT=y |
560 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m | 572 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m |
561 | CONFIG_DEBUG_PER_CPU_MAPS=y | 573 | CONFIG_DEBUG_PER_CPU_MAPS=y |
562 | CONFIG_DEBUG_SHIRQ=y | 574 | CONFIG_DEBUG_SHIRQ=y |
563 | CONFIG_DETECT_HUNG_TASK=y | 575 | CONFIG_DETECT_HUNG_TASK=y |
576 | CONFIG_WQ_WATCHDOG=y | ||
564 | CONFIG_PANIC_ON_OOPS=y | 577 | CONFIG_PANIC_ON_OOPS=y |
578 | CONFIG_DEBUG_TIMEKEEPING=y | ||
565 | CONFIG_TIMER_STATS=y | 579 | CONFIG_TIMER_STATS=y |
566 | CONFIG_DEBUG_RT_MUTEXES=y | 580 | CONFIG_DEBUG_RT_MUTEXES=y |
567 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y | 581 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y |
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y | |||
596 | CONFIG_STACK_TRACER=y | 610 | CONFIG_STACK_TRACER=y |
597 | CONFIG_BLK_DEV_IO_TRACE=y | 611 | CONFIG_BLK_DEV_IO_TRACE=y |
598 | CONFIG_UPROBE_EVENT=y | 612 | CONFIG_UPROBE_EVENT=y |
613 | CONFIG_FUNCTION_PROFILER=y | ||
614 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
599 | CONFIG_LKDTM=m | 615 | CONFIG_LKDTM=m |
600 | CONFIG_TEST_LIST_SORT=y | 616 | CONFIG_TEST_LIST_SORT=y |
601 | CONFIG_KPROBES_SANITY_TEST=y | 617 | CONFIG_KPROBES_SANITY_TEST=y |
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y | |||
607 | CONFIG_TEST_KSTRTOX=y | 623 | CONFIG_TEST_KSTRTOX=y |
608 | CONFIG_DMA_API_DEBUG=y | 624 | CONFIG_DMA_API_DEBUG=y |
609 | CONFIG_TEST_BPF=m | 625 | CONFIG_TEST_BPF=m |
610 | # CONFIG_STRICT_DEVMEM is not set | ||
611 | CONFIG_S390_PTDUMP=y | 626 | CONFIG_S390_PTDUMP=y |
612 | CONFIG_ENCRYPTED_KEYS=m | 627 | CONFIG_ENCRYPTED_KEYS=m |
613 | CONFIG_SECURITY=y | 628 | CONFIG_SECURITY=y |
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m | |||
651 | CONFIG_CRYPTO_SERPENT=m | 666 | CONFIG_CRYPTO_SERPENT=m |
652 | CONFIG_CRYPTO_TEA=m | 667 | CONFIG_CRYPTO_TEA=m |
653 | CONFIG_CRYPTO_TWOFISH=m | 668 | CONFIG_CRYPTO_TWOFISH=m |
654 | CONFIG_CRYPTO_ZLIB=y | ||
655 | CONFIG_CRYPTO_LZO=m | 669 | CONFIG_CRYPTO_LZO=m |
656 | CONFIG_CRYPTO_LZ4=m | 670 | CONFIG_CRYPTO_LZ4=m |
657 | CONFIG_CRYPTO_LZ4HC=m | 671 | CONFIG_CRYPTO_LZ4HC=m |
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
664 | CONFIG_CRYPTO_DES_S390=m | 678 | CONFIG_CRYPTO_DES_S390=m |
665 | CONFIG_CRYPTO_AES_S390=m | 679 | CONFIG_CRYPTO_AES_S390=m |
666 | CONFIG_CRYPTO_GHASH_S390=m | 680 | CONFIG_CRYPTO_GHASH_S390=m |
667 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 681 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
668 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 682 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
669 | CONFIG_X509_CERTIFICATE_PARSER=m | 683 | CONFIG_X509_CERTIFICATE_PARSER=m |
670 | CONFIG_CRC7=m | 684 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index a31dcd56f7c0..f46a35115d2d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
@@ -1,8 +1,7 @@ | |||
1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
3 | CONFIG_FHANDLE=y | ||
4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
13 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
16 | CONFIG_CGROUP_FREEZER=y | ||
17 | CONFIG_CGROUP_PIDS=y | ||
18 | CONFIG_CGROUP_DEVICE=y | ||
19 | CONFIG_CPUSETS=y | ||
20 | CONFIG_CGROUP_CPUACCT=y | ||
21 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
22 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
23 | CONFIG_MEMCG_KMEM=y | 17 | CONFIG_BLK_CGROUP=y |
18 | CONFIG_CGROUP_PIDS=y | ||
19 | CONFIG_CGROUP_FREEZER=y | ||
24 | CONFIG_CGROUP_HUGETLB=y | 20 | CONFIG_CGROUP_HUGETLB=y |
21 | CONFIG_CPUSETS=y | ||
22 | CONFIG_CGROUP_DEVICE=y | ||
23 | CONFIG_CGROUP_CPUACCT=y | ||
25 | CONFIG_CGROUP_PERF=y | 24 | CONFIG_CGROUP_PERF=y |
26 | CONFIG_BLK_CGROUP=y | 25 | CONFIG_CHECKPOINT_RESTORE=y |
27 | CONFIG_NAMESPACES=y | 26 | CONFIG_NAMESPACES=y |
28 | CONFIG_USER_NS=y | 27 | CONFIG_USER_NS=y |
29 | CONFIG_SCHED_AUTOGROUP=y | 28 | CONFIG_SCHED_AUTOGROUP=y |
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y | |||
53 | CONFIG_UNIXWARE_DISKLABEL=y | 52 | CONFIG_UNIXWARE_DISKLABEL=y |
54 | CONFIG_CFQ_GROUP_IOSCHED=y | 53 | CONFIG_CFQ_GROUP_IOSCHED=y |
55 | CONFIG_DEFAULT_DEADLINE=y | 54 | CONFIG_DEFAULT_DEADLINE=y |
56 | CONFIG_MARCH_Z196=y | ||
57 | CONFIG_TUNE_ZEC12=y | 55 | CONFIG_TUNE_ZEC12=y |
58 | CONFIG_NR_CPUS=256 | 56 | CONFIG_NR_CPUS=256 |
59 | CONFIG_NUMA=y | 57 | CONFIG_NUMA=y |
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
62 | CONFIG_MEMORY_HOTREMOVE=y | 60 | CONFIG_MEMORY_HOTREMOVE=y |
63 | CONFIG_KSM=y | 61 | CONFIG_KSM=y |
64 | CONFIG_TRANSPARENT_HUGEPAGE=y | 62 | CONFIG_TRANSPARENT_HUGEPAGE=y |
63 | CONFIG_CLEANCACHE=y | ||
64 | CONFIG_FRONTSWAP=y | ||
65 | CONFIG_CMA=y | ||
66 | CONFIG_ZSWAP=y | ||
67 | CONFIG_ZBUD=m | ||
68 | CONFIG_ZSMALLOC=m | ||
69 | CONFIG_ZSMALLOC_STAT=y | ||
70 | CONFIG_IDLE_PAGE_TRACKING=y | ||
65 | CONFIG_PCI=y | 71 | CONFIG_PCI=y |
66 | CONFIG_HOTPLUG_PCI=y | 72 | CONFIG_HOTPLUG_PCI=y |
67 | CONFIG_HOTPLUG_PCI_S390=y | 73 | CONFIG_HOTPLUG_PCI_S390=y |
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m | |||
530 | CONFIG_DLM=m | 536 | CONFIG_DLM=m |
531 | CONFIG_PRINTK_TIME=y | 537 | CONFIG_PRINTK_TIME=y |
532 | CONFIG_DEBUG_INFO=y | 538 | CONFIG_DEBUG_INFO=y |
539 | CONFIG_DEBUG_INFO_DWARF4=y | ||
540 | CONFIG_GDB_SCRIPTS=y | ||
533 | # CONFIG_ENABLE_MUST_CHECK is not set | 541 | # CONFIG_ENABLE_MUST_CHECK is not set |
534 | CONFIG_FRAME_WARN=1024 | 542 | CONFIG_FRAME_WARN=1024 |
535 | CONFIG_UNUSED_SYMBOLS=y | 543 | CONFIG_UNUSED_SYMBOLS=y |
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y | |||
547 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y | 555 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y |
548 | CONFIG_BLK_DEV_IO_TRACE=y | 556 | CONFIG_BLK_DEV_IO_TRACE=y |
549 | # CONFIG_KPROBE_EVENT is not set | 557 | # CONFIG_KPROBE_EVENT is not set |
558 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
550 | CONFIG_LKDTM=m | 559 | CONFIG_LKDTM=m |
551 | CONFIG_RBTREE_TEST=m | 560 | CONFIG_RBTREE_TEST=m |
552 | CONFIG_INTERVAL_TREE_TEST=m | 561 | CONFIG_INTERVAL_TREE_TEST=m |
553 | CONFIG_PERCPU_TEST=m | 562 | CONFIG_PERCPU_TEST=m |
554 | CONFIG_ATOMIC64_SELFTEST=y | 563 | CONFIG_ATOMIC64_SELFTEST=y |
555 | CONFIG_TEST_BPF=m | 564 | CONFIG_TEST_BPF=m |
556 | # CONFIG_STRICT_DEVMEM is not set | ||
557 | CONFIG_S390_PTDUMP=y | 565 | CONFIG_S390_PTDUMP=y |
558 | CONFIG_ENCRYPTED_KEYS=m | 566 | CONFIG_ENCRYPTED_KEYS=m |
559 | CONFIG_SECURITY=y | 567 | CONFIG_SECURITY=y |
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m | |||
597 | CONFIG_CRYPTO_SERPENT=m | 605 | CONFIG_CRYPTO_SERPENT=m |
598 | CONFIG_CRYPTO_TEA=m | 606 | CONFIG_CRYPTO_TEA=m |
599 | CONFIG_CRYPTO_TWOFISH=m | 607 | CONFIG_CRYPTO_TWOFISH=m |
600 | CONFIG_CRYPTO_ZLIB=y | ||
601 | CONFIG_CRYPTO_LZO=m | ||
602 | CONFIG_CRYPTO_LZ4=m | 608 | CONFIG_CRYPTO_LZ4=m |
603 | CONFIG_CRYPTO_LZ4HC=m | 609 | CONFIG_CRYPTO_LZ4HC=m |
604 | CONFIG_CRYPTO_USER_API_HASH=m | 610 | CONFIG_CRYPTO_USER_API_HASH=m |
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
610 | CONFIG_CRYPTO_DES_S390=m | 616 | CONFIG_CRYPTO_DES_S390=m |
611 | CONFIG_CRYPTO_AES_S390=m | 617 | CONFIG_CRYPTO_AES_S390=m |
612 | CONFIG_CRYPTO_GHASH_S390=m | 618 | CONFIG_CRYPTO_GHASH_S390=m |
613 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 619 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
614 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 620 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
615 | CONFIG_X509_CERTIFICATE_PARSER=m | 621 | CONFIG_X509_CERTIFICATE_PARSER=m |
616 | CONFIG_CRC7=m | 622 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 7b73bf353345..ba0f2a58b8cd 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -1,8 +1,7 @@ | |||
1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
3 | CONFIG_FHANDLE=y | ||
4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y | |||
14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
16 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set | 15 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set |
17 | CONFIG_CGROUP_FREEZER=y | ||
18 | CONFIG_CGROUP_PIDS=y | ||
19 | CONFIG_CGROUP_DEVICE=y | ||
20 | CONFIG_CPUSETS=y | ||
21 | CONFIG_CGROUP_CPUACCT=y | ||
22 | CONFIG_MEMCG=y | 16 | CONFIG_MEMCG=y |
23 | CONFIG_MEMCG_SWAP=y | 17 | CONFIG_MEMCG_SWAP=y |
24 | CONFIG_MEMCG_KMEM=y | 18 | CONFIG_BLK_CGROUP=y |
19 | CONFIG_CGROUP_PIDS=y | ||
20 | CONFIG_CGROUP_FREEZER=y | ||
25 | CONFIG_CGROUP_HUGETLB=y | 21 | CONFIG_CGROUP_HUGETLB=y |
22 | CONFIG_CPUSETS=y | ||
23 | CONFIG_CGROUP_DEVICE=y | ||
24 | CONFIG_CGROUP_CPUACCT=y | ||
26 | CONFIG_CGROUP_PERF=y | 25 | CONFIG_CGROUP_PERF=y |
27 | CONFIG_BLK_CGROUP=y | 26 | CONFIG_CHECKPOINT_RESTORE=y |
28 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
29 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
30 | CONFIG_SCHED_AUTOGROUP=y | 29 | CONFIG_SCHED_AUTOGROUP=y |
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
53 | CONFIG_CFQ_GROUP_IOSCHED=y | 52 | CONFIG_CFQ_GROUP_IOSCHED=y |
54 | CONFIG_DEFAULT_DEADLINE=y | 53 | CONFIG_DEFAULT_DEADLINE=y |
55 | CONFIG_LIVEPATCH=y | 54 | CONFIG_LIVEPATCH=y |
56 | CONFIG_MARCH_Z196=y | ||
57 | CONFIG_TUNE_ZEC12=y | 55 | CONFIG_TUNE_ZEC12=y |
58 | CONFIG_NR_CPUS=512 | 56 | CONFIG_NR_CPUS=512 |
59 | CONFIG_NUMA=y | 57 | CONFIG_NUMA=y |
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
62 | CONFIG_MEMORY_HOTREMOVE=y | 60 | CONFIG_MEMORY_HOTREMOVE=y |
63 | CONFIG_KSM=y | 61 | CONFIG_KSM=y |
64 | CONFIG_TRANSPARENT_HUGEPAGE=y | 62 | CONFIG_TRANSPARENT_HUGEPAGE=y |
63 | CONFIG_CLEANCACHE=y | ||
64 | CONFIG_FRONTSWAP=y | ||
65 | CONFIG_CMA=y | ||
66 | CONFIG_ZSWAP=y | ||
67 | CONFIG_ZBUD=m | ||
68 | CONFIG_ZSMALLOC=m | ||
69 | CONFIG_ZSMALLOC_STAT=y | ||
70 | CONFIG_IDLE_PAGE_TRACKING=y | ||
65 | CONFIG_PCI=y | 71 | CONFIG_PCI=y |
66 | CONFIG_HOTPLUG_PCI=y | 72 | CONFIG_HOTPLUG_PCI=y |
67 | CONFIG_HOTPLUG_PCI_S390=y | 73 | CONFIG_HOTPLUG_PCI_S390=y |
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m | |||
447 | CONFIG_RAW_DRIVER=m | 453 | CONFIG_RAW_DRIVER=m |
448 | CONFIG_HANGCHECK_TIMER=m | 454 | CONFIG_HANGCHECK_TIMER=m |
449 | CONFIG_TN3270_FS=y | 455 | CONFIG_TN3270_FS=y |
456 | # CONFIG_HWMON is not set | ||
450 | CONFIG_WATCHDOG=y | 457 | CONFIG_WATCHDOG=y |
451 | CONFIG_WATCHDOG_NOWAYOUT=y | 458 | CONFIG_WATCHDOG_NOWAYOUT=y |
452 | CONFIG_SOFT_WATCHDOG=m | 459 | CONFIG_SOFT_WATCHDOG=m |
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m | |||
530 | CONFIG_DLM=m | 537 | CONFIG_DLM=m |
531 | CONFIG_PRINTK_TIME=y | 538 | CONFIG_PRINTK_TIME=y |
532 | CONFIG_DEBUG_INFO=y | 539 | CONFIG_DEBUG_INFO=y |
540 | CONFIG_DEBUG_INFO_DWARF4=y | ||
541 | CONFIG_GDB_SCRIPTS=y | ||
533 | # CONFIG_ENABLE_MUST_CHECK is not set | 542 | # CONFIG_ENABLE_MUST_CHECK is not set |
534 | CONFIG_FRAME_WARN=1024 | 543 | CONFIG_FRAME_WARN=1024 |
535 | CONFIG_UNUSED_SYMBOLS=y | 544 | CONFIG_UNUSED_SYMBOLS=y |
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y | |||
546 | CONFIG_STACK_TRACER=y | 555 | CONFIG_STACK_TRACER=y |
547 | CONFIG_BLK_DEV_IO_TRACE=y | 556 | CONFIG_BLK_DEV_IO_TRACE=y |
548 | CONFIG_UPROBE_EVENT=y | 557 | CONFIG_UPROBE_EVENT=y |
558 | CONFIG_FUNCTION_PROFILER=y | ||
559 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
549 | CONFIG_LKDTM=m | 560 | CONFIG_LKDTM=m |
550 | CONFIG_PERCPU_TEST=m | 561 | CONFIG_PERCPU_TEST=m |
551 | CONFIG_ATOMIC64_SELFTEST=y | 562 | CONFIG_ATOMIC64_SELFTEST=y |
552 | CONFIG_TEST_BPF=m | 563 | CONFIG_TEST_BPF=m |
553 | # CONFIG_STRICT_DEVMEM is not set | ||
554 | CONFIG_S390_PTDUMP=y | 564 | CONFIG_S390_PTDUMP=y |
555 | CONFIG_ENCRYPTED_KEYS=m | 565 | CONFIG_ENCRYPTED_KEYS=m |
556 | CONFIG_SECURITY=y | 566 | CONFIG_SECURITY=y |
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m | |||
594 | CONFIG_CRYPTO_SERPENT=m | 604 | CONFIG_CRYPTO_SERPENT=m |
595 | CONFIG_CRYPTO_TEA=m | 605 | CONFIG_CRYPTO_TEA=m |
596 | CONFIG_CRYPTO_TWOFISH=m | 606 | CONFIG_CRYPTO_TWOFISH=m |
597 | CONFIG_CRYPTO_ZLIB=y | ||
598 | CONFIG_CRYPTO_LZO=m | ||
599 | CONFIG_CRYPTO_LZ4=m | 607 | CONFIG_CRYPTO_LZ4=m |
600 | CONFIG_CRYPTO_LZ4HC=m | 608 | CONFIG_CRYPTO_LZ4HC=m |
601 | CONFIG_CRYPTO_USER_API_HASH=m | 609 | CONFIG_CRYPTO_USER_API_HASH=m |
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
607 | CONFIG_CRYPTO_DES_S390=m | 615 | CONFIG_CRYPTO_DES_S390=m |
608 | CONFIG_CRYPTO_AES_S390=m | 616 | CONFIG_CRYPTO_AES_S390=m |
609 | CONFIG_CRYPTO_GHASH_S390=m | 617 | CONFIG_CRYPTO_GHASH_S390=m |
610 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 618 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
611 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 619 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
612 | CONFIG_X509_CERTIFICATE_PARSER=m | 620 | CONFIG_X509_CERTIFICATE_PARSER=m |
613 | CONFIG_CRC7=m | 621 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1719843a55a2..4366a3e3e754 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
2 | CONFIG_NO_HZ=y | 2 | CONFIG_NO_HZ_IDLE=y |
3 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 5 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y | |||
7 | CONFIG_PARTITION_ADVANCED=y | 7 | CONFIG_PARTITION_ADVANCED=y |
8 | CONFIG_IBM_PARTITION=y | 8 | CONFIG_IBM_PARTITION=y |
9 | CONFIG_DEFAULT_DEADLINE=y | 9 | CONFIG_DEFAULT_DEADLINE=y |
10 | CONFIG_MARCH_Z196=y | ||
11 | CONFIG_TUNE_ZEC12=y | 10 | CONFIG_TUNE_ZEC12=y |
12 | # CONFIG_COMPAT is not set | 11 | # CONFIG_COMPAT is not set |
13 | CONFIG_NR_CPUS=2 | 12 | CONFIG_NR_CPUS=2 |
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y | |||
64 | # CONFIG_SCHED_DEBUG is not set | 63 | # CONFIG_SCHED_DEBUG is not set |
65 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 64 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
66 | # CONFIG_FTRACE is not set | 65 | # CONFIG_FTRACE is not set |
67 | # CONFIG_STRICT_DEVMEM is not set | ||
68 | # CONFIG_PFAULT is not set | 66 | # CONFIG_PFAULT is not set |
69 | # CONFIG_S390_HYPFS_FS is not set | 67 | # CONFIG_S390_HYPFS_FS is not set |
70 | # CONFIG_VIRTUALIZATION is not set | 68 | # CONFIG_VIRTUALIZATION is not set |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index e24f2af4c73b..3f571ea89509 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,8 +1,8 @@ | |||
1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
3 | CONFIG_FHANDLE=y | 3 | CONFIG_USELIB=y |
4 | CONFIG_AUDIT=y | 4 | CONFIG_AUDIT=y |
5 | CONFIG_NO_HZ=y | 5 | CONFIG_NO_HZ_IDLE=y |
6 | CONFIG_HIGH_RES_TIMERS=y | 6 | CONFIG_HIGH_RES_TIMERS=y |
7 | CONFIG_TASKSTATS=y | 7 | CONFIG_TASKSTATS=y |
8 | CONFIG_TASK_DELAY_ACCT=y | 8 | CONFIG_TASK_DELAY_ACCT=y |
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
11 | CONFIG_IKCONFIG=y | 11 | CONFIG_IKCONFIG=y |
12 | CONFIG_IKCONFIG_PROC=y | 12 | CONFIG_IKCONFIG_PROC=y |
13 | CONFIG_CGROUPS=y | 13 | CONFIG_CGROUPS=y |
14 | CONFIG_CGROUP_FREEZER=y | ||
15 | CONFIG_CGROUP_PIDS=y | ||
16 | CONFIG_CGROUP_DEVICE=y | ||
17 | CONFIG_CPUSETS=y | ||
18 | CONFIG_CGROUP_CPUACCT=y | ||
19 | CONFIG_MEMCG=y | 14 | CONFIG_MEMCG=y |
20 | CONFIG_MEMCG_SWAP=y | 15 | CONFIG_MEMCG_SWAP=y |
21 | CONFIG_MEMCG_KMEM=y | 16 | CONFIG_BLK_CGROUP=y |
22 | CONFIG_CGROUP_HUGETLB=y | ||
23 | CONFIG_CGROUP_PERF=y | ||
24 | CONFIG_CGROUP_SCHED=y | 17 | CONFIG_CGROUP_SCHED=y |
25 | CONFIG_RT_GROUP_SCHED=y | 18 | CONFIG_RT_GROUP_SCHED=y |
26 | CONFIG_BLK_CGROUP=y | 19 | CONFIG_CGROUP_PIDS=y |
20 | CONFIG_CGROUP_FREEZER=y | ||
21 | CONFIG_CGROUP_HUGETLB=y | ||
22 | CONFIG_CPUSETS=y | ||
23 | CONFIG_CGROUP_DEVICE=y | ||
24 | CONFIG_CGROUP_CPUACCT=y | ||
25 | CONFIG_CGROUP_PERF=y | ||
26 | CONFIG_CHECKPOINT_RESTORE=y | ||
27 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
29 | CONFIG_BLK_DEV_INITRD=y | 29 | CONFIG_BLK_DEV_INITRD=y |
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
44 | CONFIG_IBM_PARTITION=y | 44 | CONFIG_IBM_PARTITION=y |
45 | CONFIG_DEFAULT_DEADLINE=y | 45 | CONFIG_DEFAULT_DEADLINE=y |
46 | CONFIG_LIVEPATCH=y | 46 | CONFIG_LIVEPATCH=y |
47 | CONFIG_MARCH_Z196=y | ||
48 | CONFIG_NR_CPUS=256 | 47 | CONFIG_NR_CPUS=256 |
49 | CONFIG_NUMA=y | 48 | CONFIG_NUMA=y |
50 | CONFIG_HZ_100=y | 49 | CONFIG_HZ_100=y |
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
52 | CONFIG_MEMORY_HOTREMOVE=y | 51 | CONFIG_MEMORY_HOTREMOVE=y |
53 | CONFIG_KSM=y | 52 | CONFIG_KSM=y |
54 | CONFIG_TRANSPARENT_HUGEPAGE=y | 53 | CONFIG_TRANSPARENT_HUGEPAGE=y |
54 | CONFIG_CLEANCACHE=y | ||
55 | CONFIG_FRONTSWAP=y | ||
56 | CONFIG_CMA=y | ||
57 | CONFIG_ZSWAP=y | ||
58 | CONFIG_ZBUD=m | ||
59 | CONFIG_ZSMALLOC=m | ||
60 | CONFIG_ZSMALLOC_STAT=y | ||
61 | CONFIG_IDLE_PAGE_TRACKING=y | ||
55 | CONFIG_CRASH_DUMP=y | 62 | CONFIG_CRASH_DUMP=y |
56 | CONFIG_BINFMT_MISC=m | 63 | CONFIG_BINFMT_MISC=m |
57 | CONFIG_HIBERNATION=y | 64 | CONFIG_HIBERNATION=y |
@@ -61,7 +68,6 @@ CONFIG_UNIX=y | |||
61 | CONFIG_NET_KEY=y | 68 | CONFIG_NET_KEY=y |
62 | CONFIG_INET=y | 69 | CONFIG_INET=y |
63 | CONFIG_IP_MULTICAST=y | 70 | CONFIG_IP_MULTICAST=y |
64 | # CONFIG_INET_LRO is not set | ||
65 | CONFIG_L2TP=m | 71 | CONFIG_L2TP=m |
66 | CONFIG_L2TP_DEBUGFS=m | 72 | CONFIG_L2TP_DEBUGFS=m |
67 | CONFIG_VLAN_8021Q=y | 73 | CONFIG_VLAN_8021Q=y |
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y | |||
144 | CONFIG_TMPFS_POSIX_ACL=y | 150 | CONFIG_TMPFS_POSIX_ACL=y |
145 | CONFIG_HUGETLBFS=y | 151 | CONFIG_HUGETLBFS=y |
146 | # CONFIG_NETWORK_FILESYSTEMS is not set | 152 | # CONFIG_NETWORK_FILESYSTEMS is not set |
153 | CONFIG_DEBUG_INFO=y | ||
154 | CONFIG_DEBUG_INFO_DWARF4=y | ||
155 | CONFIG_GDB_SCRIPTS=y | ||
147 | CONFIG_UNUSED_SYMBOLS=y | 156 | CONFIG_UNUSED_SYMBOLS=y |
148 | CONFIG_DEBUG_SECTION_MISMATCH=y | 157 | CONFIG_DEBUG_SECTION_MISMATCH=y |
149 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | 158 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y |
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y | |||
158 | CONFIG_DEBUG_LOCKDEP=y | 167 | CONFIG_DEBUG_LOCKDEP=y |
159 | CONFIG_DEBUG_ATOMIC_SLEEP=y | 168 | CONFIG_DEBUG_ATOMIC_SLEEP=y |
160 | CONFIG_DEBUG_LIST=y | 169 | CONFIG_DEBUG_LIST=y |
161 | CONFIG_DEBUG_PI_LIST=y | ||
162 | CONFIG_DEBUG_SG=y | 170 | CONFIG_DEBUG_SG=y |
163 | CONFIG_DEBUG_NOTIFIERS=y | 171 | CONFIG_DEBUG_NOTIFIERS=y |
164 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 172 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
165 | CONFIG_RCU_TRACE=y | 173 | CONFIG_RCU_TRACE=y |
166 | CONFIG_LATENCYTOP=y | 174 | CONFIG_LATENCYTOP=y |
167 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y | 175 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y |
168 | CONFIG_TRACER_SNAPSHOT=y | 176 | CONFIG_SCHED_TRACER=y |
177 | CONFIG_FTRACE_SYSCALLS=y | ||
169 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | 178 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y |
170 | CONFIG_STACK_TRACER=y | 179 | CONFIG_STACK_TRACER=y |
171 | CONFIG_BLK_DEV_IO_TRACE=y | 180 | CONFIG_BLK_DEV_IO_TRACE=y |
172 | CONFIG_UPROBE_EVENT=y | 181 | CONFIG_UPROBE_EVENT=y |
182 | CONFIG_FUNCTION_PROFILER=y | ||
183 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
173 | CONFIG_KPROBES_SANITY_TEST=y | 184 | CONFIG_KPROBES_SANITY_TEST=y |
174 | # CONFIG_STRICT_DEVMEM is not set | ||
175 | CONFIG_S390_PTDUMP=y | 185 | CONFIG_S390_PTDUMP=y |
176 | CONFIG_CRYPTO_CRYPTD=m | 186 | CONFIG_CRYPTO_CRYPTD=m |
177 | CONFIG_CRYPTO_AUTHENC=m | 187 | CONFIG_CRYPTO_AUTHENC=m |
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m | |||
212 | CONFIG_CRYPTO_TEA=m | 222 | CONFIG_CRYPTO_TEA=m |
213 | CONFIG_CRYPTO_TWOFISH=m | 223 | CONFIG_CRYPTO_TWOFISH=m |
214 | CONFIG_CRYPTO_DEFLATE=m | 224 | CONFIG_CRYPTO_DEFLATE=m |
215 | CONFIG_CRYPTO_ZLIB=m | ||
216 | CONFIG_CRYPTO_LZO=m | ||
217 | CONFIG_CRYPTO_LZ4=m | 225 | CONFIG_CRYPTO_LZ4=m |
218 | CONFIG_CRYPTO_LZ4HC=m | 226 | CONFIG_CRYPTO_LZ4HC=m |
219 | CONFIG_CRYPTO_ANSI_CPRNG=m | 227 | CONFIG_CRYPTO_ANSI_CPRNG=m |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7a3144017301..19288c1b36d3 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) | |||
250 | 250 | ||
251 | report_user_fault(regs, SIGSEGV, 1); | 251 | report_user_fault(regs, SIGSEGV, 1); |
252 | si.si_signo = SIGSEGV; | 252 | si.si_signo = SIGSEGV; |
253 | si.si_errno = 0; | ||
253 | si.si_code = si_code; | 254 | si.si_code = si_code; |
254 | si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); | 255 | si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); |
255 | force_sig_info(SIGSEGV, &si, current); | 256 | force_sig_info(SIGSEGV, &si, current); |
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index f010c93a88b1..fda605dbc1b4 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h | |||
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; | |||
37 | * | | | | 37 | * | | | |
38 | * +---------------+ | | 38 | * +---------------+ | |
39 | * | 8 byte skbp | | | 39 | * | 8 byte skbp | | |
40 | * R15+170 -> +---------------+ | | 40 | * R15+176 -> +---------------+ | |
41 | * | 8 byte hlen | | | 41 | * | 8 byte hlen | | |
42 | * R15+168 -> +---------------+ | | 42 | * R15+168 -> +---------------+ | |
43 | * | 4 byte align | | | 43 | * | 4 byte align | | |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; | |||
58 | #define STK_OFF (STK_SPACE - STK_160_UNUSED) | 58 | #define STK_OFF (STK_SPACE - STK_160_UNUSED) |
59 | #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ | 59 | #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ |
60 | #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ | 60 | #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ |
61 | #define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ | 61 | #define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ |
62 | 62 | ||
63 | #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ | 63 | #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ |
64 | #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ | 64 | #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9133b0ec000b..bee281f3163d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -45,7 +45,7 @@ struct bpf_jit { | |||
45 | int labels[1]; /* Labels for local jumps */ | 45 | int labels[1]; /* Labels for local jumps */ |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ | 48 | #define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ |
49 | 49 | ||
50 | #define SEEN_SKB 1 /* skb access */ | 50 | #define SEEN_SKB 1 /* skb access */ |
51 | #define SEEN_MEM 2 /* use mem[] for temporary storage */ | 51 | #define SEEN_MEM 2 /* use mem[] for temporary storage */ |
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit) | |||
450 | emit_load_skb_data_hlen(jit); | 450 | emit_load_skb_data_hlen(jit); |
451 | if (jit->seen & SEEN_SKB_CHANGE) | 451 | if (jit->seen & SEEN_SKB_CHANGE) |
452 | /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ | 452 | /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ |
453 | EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, | 453 | EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, |
454 | STK_OFF_SKBP); | 454 | STK_OFF_SKBP); |
455 | } | 455 | } |
456 | 456 | ||
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 10e9dabc4c41..f0700cfeedd7 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h | |||
@@ -15,6 +15,10 @@ | |||
15 | 15 | ||
16 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) | 16 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) |
17 | 17 | ||
18 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) | ||
19 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | ||
20 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | ||
21 | |||
18 | #define __CHEETAH_ID 0x003e0014 | 22 | #define __CHEETAH_ID 0x003e0014 |
19 | #define __JALAPENO_ID 0x003e0016 | 23 | #define __JALAPENO_ID 0x003e0016 |
20 | #define __SERRANO_ID 0x003e0022 | 24 | #define __SERRANO_ID 0x003e0022 |
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 71b5a67522ab..781b9f1dbdc2 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h | |||
@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \ | |||
589 | restored; \ | 589 | restored; \ |
590 | nop; nop; nop; nop; nop; nop; \ | 590 | nop; nop; nop; nop; nop; nop; \ |
591 | nop; nop; nop; nop; nop; \ | 591 | nop; nop; nop; nop; nop; \ |
592 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 592 | ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ |
593 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 593 | ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ |
594 | ba,a,pt %xcc, user_rtt_fill_fixup; | 594 | ba,a,pt %xcc, user_rtt_fill_fixup; |
595 | 595 | ||
596 | 596 | ||
@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \ | |||
652 | restored; \ | 652 | restored; \ |
653 | nop; nop; nop; nop; nop; \ | 653 | nop; nop; nop; nop; nop; \ |
654 | nop; nop; nop; \ | 654 | nop; nop; nop; \ |
655 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 655 | ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ |
656 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 656 | ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ |
657 | ba,a,pt %xcc, user_rtt_fill_fixup; | 657 | ba,a,pt %xcc, user_rtt_fill_fixup; |
658 | 658 | ||
659 | 659 | ||
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 7cf9c6ea3f1f..fdb13327fded 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg | |||
21 | CFLAGS_REMOVE_pcr.o := -pg | 21 | CFLAGS_REMOVE_pcr.o := -pg |
22 | endif | 22 | endif |
23 | 23 | ||
24 | obj-$(CONFIG_SPARC64) += urtt_fill.o | ||
24 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o | 25 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o |
25 | obj-$(CONFIG_SPARC32) += etrap_32.o | 26 | obj-$(CONFIG_SPARC32) += etrap_32.o |
26 | obj-$(CONFIG_SPARC32) += rtrap_32.o | 27 | obj-$(CONFIG_SPARC32) += rtrap_32.o |
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index d08bdaffdbfc..216948ca4382 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S | |||
@@ -14,10 +14,6 @@ | |||
14 | #include <asm/visasm.h> | 14 | #include <asm/visasm.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | 16 | ||
17 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) | ||
18 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | ||
19 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | ||
20 | |||
21 | #ifdef CONFIG_CONTEXT_TRACKING | 17 | #ifdef CONFIG_CONTEXT_TRACKING |
22 | # define SCHEDULE_USER schedule_user | 18 | # define SCHEDULE_USER schedule_user |
23 | #else | 19 | #else |
@@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
242 | wrpr %g1, %cwp | 238 | wrpr %g1, %cwp |
243 | ba,a,pt %xcc, user_rtt_fill_64bit | 239 | ba,a,pt %xcc, user_rtt_fill_64bit |
244 | 240 | ||
245 | user_rtt_fill_fixup: | 241 | user_rtt_fill_fixup_dax: |
246 | rdpr %cwp, %g1 | 242 | ba,pt %xcc, user_rtt_fill_fixup_common |
247 | add %g1, 1, %g1 | 243 | mov 1, %g3 |
248 | wrpr %g1, 0x0, %cwp | ||
249 | |||
250 | rdpr %wstate, %g2 | ||
251 | sll %g2, 3, %g2 | ||
252 | wrpr %g2, 0x0, %wstate | ||
253 | |||
254 | /* We know %canrestore and %otherwin are both zero. */ | ||
255 | |||
256 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
257 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
258 | mov PRIMARY_CONTEXT, %g1 | ||
259 | |||
260 | 661: stxa %g2, [%g1] ASI_DMMU | ||
261 | .section .sun4v_1insn_patch, "ax" | ||
262 | .word 661b | ||
263 | stxa %g2, [%g1] ASI_MMU | ||
264 | .previous | ||
265 | |||
266 | sethi %hi(KERNBASE), %g1 | ||
267 | flush %g1 | ||
268 | 244 | ||
269 | or %g4, FAULT_CODE_WINFIXUP, %g4 | 245 | user_rtt_fill_fixup_mna: |
270 | stb %g4, [%g6 + TI_FAULT_CODE] | 246 | ba,pt %xcc, user_rtt_fill_fixup_common |
271 | stx %g5, [%g6 + TI_FAULT_ADDR] | 247 | mov 2, %g3 |
272 | 248 | ||
273 | mov %g6, %l1 | 249 | user_rtt_fill_fixup: |
274 | wrpr %g0, 0x0, %tl | 250 | ba,pt %xcc, user_rtt_fill_fixup_common |
275 | 251 | clr %g3 | |
276 | 661: nop | ||
277 | .section .sun4v_1insn_patch, "ax" | ||
278 | .word 661b | ||
279 | SET_GL(0) | ||
280 | .previous | ||
281 | |||
282 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
283 | |||
284 | mov %l1, %g6 | ||
285 | ldx [%g6 + TI_TASK], %g4 | ||
286 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
287 | call do_sparc64_fault | ||
288 | add %sp, PTREGS_OFF, %o0 | ||
289 | ba,pt %xcc, rtrap | ||
290 | nop | ||
291 | 252 | ||
292 | user_rtt_pre_restore: | 253 | user_rtt_pre_restore: |
293 | add %g1, 1, %g1 | 254 | add %g1, 1, %g1 |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 3c25241fa5cb..91cc2f4ae4d9 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* Checks if the fp is valid. We always build signal frames which are | ||
142 | * 16-byte aligned, therefore we can always enforce that the restore | ||
143 | * frame has that property as well. | ||
144 | */ | ||
145 | static bool invalid_frame_pointer(void __user *fp, int fplen) | ||
146 | { | ||
147 | if ((((unsigned long) fp) & 15) || | ||
148 | ((unsigned long)fp) > 0x100000000ULL - fplen) | ||
149 | return true; | ||
150 | return false; | ||
151 | } | ||
152 | |||
141 | void do_sigreturn32(struct pt_regs *regs) | 153 | void do_sigreturn32(struct pt_regs *regs) |
142 | { | 154 | { |
143 | struct signal_frame32 __user *sf; | 155 | struct signal_frame32 __user *sf; |
144 | compat_uptr_t fpu_save; | 156 | compat_uptr_t fpu_save; |
145 | compat_uptr_t rwin_save; | 157 | compat_uptr_t rwin_save; |
146 | unsigned int psr; | 158 | unsigned int psr, ufp; |
147 | unsigned int pc, npc; | 159 | unsigned int pc, npc; |
148 | sigset_t set; | 160 | sigset_t set; |
149 | compat_sigset_t seta; | 161 | compat_sigset_t seta; |
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs) | |||
158 | sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; | 170 | sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; |
159 | 171 | ||
160 | /* 1. Make sure we are not getting garbage from the user */ | 172 | /* 1. Make sure we are not getting garbage from the user */ |
161 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 173 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
162 | (((unsigned long) sf) & 3)) | 174 | goto segv; |
175 | |||
176 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) | ||
177 | goto segv; | ||
178 | |||
179 | if (ufp & 0x7) | ||
163 | goto segv; | 180 | goto segv; |
164 | 181 | ||
165 | if (get_user(pc, &sf->info.si_regs.pc) || | 182 | if (__get_user(pc, &sf->info.si_regs.pc) || |
166 | __get_user(npc, &sf->info.si_regs.npc)) | 183 | __get_user(npc, &sf->info.si_regs.npc)) |
167 | goto segv; | 184 | goto segv; |
168 | 185 | ||
@@ -227,7 +244,7 @@ segv: | |||
227 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | 244 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) |
228 | { | 245 | { |
229 | struct rt_signal_frame32 __user *sf; | 246 | struct rt_signal_frame32 __user *sf; |
230 | unsigned int psr, pc, npc; | 247 | unsigned int psr, pc, npc, ufp; |
231 | compat_uptr_t fpu_save; | 248 | compat_uptr_t fpu_save; |
232 | compat_uptr_t rwin_save; | 249 | compat_uptr_t rwin_save; |
233 | sigset_t set; | 250 | sigset_t set; |
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | |||
242 | sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; | 259 | sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; |
243 | 260 | ||
244 | /* 1. Make sure we are not getting garbage from the user */ | 261 | /* 1. Make sure we are not getting garbage from the user */ |
245 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 262 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
246 | (((unsigned long) sf) & 3)) | ||
247 | goto segv; | 263 | goto segv; |
248 | 264 | ||
249 | if (get_user(pc, &sf->regs.pc) || | 265 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) |
266 | goto segv; | ||
267 | |||
268 | if (ufp & 0x7) | ||
269 | goto segv; | ||
270 | |||
271 | if (__get_user(pc, &sf->regs.pc) || | ||
250 | __get_user(npc, &sf->regs.npc)) | 272 | __get_user(npc, &sf->regs.npc)) |
251 | goto segv; | 273 | goto segv; |
252 | 274 | ||
@@ -307,14 +329,6 @@ segv: | |||
307 | force_sig(SIGSEGV, current); | 329 | force_sig(SIGSEGV, current); |
308 | } | 330 | } |
309 | 331 | ||
310 | /* Checks if the fp is valid */ | ||
311 | static int invalid_frame_pointer(void __user *fp, int fplen) | ||
312 | { | ||
313 | if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) | ||
314 | return 1; | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 332 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
319 | { | 333 | { |
320 | unsigned long sp; | 334 | unsigned long sp; |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 52aa5e4ce5e7..c3c12efe0bc0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -60,10 +60,22 @@ struct rt_signal_frame { | |||
60 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) | 60 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) |
61 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) | 61 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) |
62 | 62 | ||
63 | /* Checks if the fp is valid. We always build signal frames which are | ||
64 | * 16-byte aligned, therefore we can always enforce that the restore | ||
65 | * frame has that property as well. | ||
66 | */ | ||
67 | static inline bool invalid_frame_pointer(void __user *fp, int fplen) | ||
68 | { | ||
69 | if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) | ||
70 | return true; | ||
71 | |||
72 | return false; | ||
73 | } | ||
74 | |||
63 | asmlinkage void do_sigreturn(struct pt_regs *regs) | 75 | asmlinkage void do_sigreturn(struct pt_regs *regs) |
64 | { | 76 | { |
77 | unsigned long up_psr, pc, npc, ufp; | ||
65 | struct signal_frame __user *sf; | 78 | struct signal_frame __user *sf; |
66 | unsigned long up_psr, pc, npc; | ||
67 | sigset_t set; | 79 | sigset_t set; |
68 | __siginfo_fpu_t __user *fpu_save; | 80 | __siginfo_fpu_t __user *fpu_save; |
69 | __siginfo_rwin_t __user *rwin_save; | 81 | __siginfo_rwin_t __user *rwin_save; |
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) | |||
77 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; | 89 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; |
78 | 90 | ||
79 | /* 1. Make sure we are not getting garbage from the user */ | 91 | /* 1. Make sure we are not getting garbage from the user */ |
80 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) | 92 | if (!invalid_frame_pointer(sf, sizeof(*sf))) |
93 | goto segv_and_exit; | ||
94 | |||
95 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) | ||
81 | goto segv_and_exit; | 96 | goto segv_and_exit; |
82 | 97 | ||
83 | if (((unsigned long) sf) & 3) | 98 | if (ufp & 0x7) |
84 | goto segv_and_exit; | 99 | goto segv_and_exit; |
85 | 100 | ||
86 | err = __get_user(pc, &sf->info.si_regs.pc); | 101 | err = __get_user(pc, &sf->info.si_regs.pc); |
@@ -127,7 +142,7 @@ segv_and_exit: | |||
127 | asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | 142 | asmlinkage void do_rt_sigreturn(struct pt_regs *regs) |
128 | { | 143 | { |
129 | struct rt_signal_frame __user *sf; | 144 | struct rt_signal_frame __user *sf; |
130 | unsigned int psr, pc, npc; | 145 | unsigned int psr, pc, npc, ufp; |
131 | __siginfo_fpu_t __user *fpu_save; | 146 | __siginfo_fpu_t __user *fpu_save; |
132 | __siginfo_rwin_t __user *rwin_save; | 147 | __siginfo_rwin_t __user *rwin_save; |
133 | sigset_t set; | 148 | sigset_t set; |
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
135 | 150 | ||
136 | synchronize_user_stack(); | 151 | synchronize_user_stack(); |
137 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; | 152 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; |
138 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 153 | if (!invalid_frame_pointer(sf, sizeof(*sf))) |
139 | (((unsigned long) sf) & 0x03)) | 154 | goto segv; |
155 | |||
156 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) | ||
157 | goto segv; | ||
158 | |||
159 | if (ufp & 0x7) | ||
140 | goto segv; | 160 | goto segv; |
141 | 161 | ||
142 | err = __get_user(pc, &sf->regs.pc); | 162 | err = __get_user(pc, &sf->regs.pc); |
@@ -178,15 +198,6 @@ segv: | |||
178 | force_sig(SIGSEGV, current); | 198 | force_sig(SIGSEGV, current); |
179 | } | 199 | } |
180 | 200 | ||
181 | /* Checks if the fp is valid */ | ||
182 | static inline int invalid_frame_pointer(void __user *fp, int fplen) | ||
183 | { | ||
184 | if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) | ||
185 | return 1; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 201 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
191 | { | 202 | { |
192 | unsigned long sp = regs->u_regs[UREG_FP]; | 203 | unsigned long sp = regs->u_regs[UREG_FP]; |
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 39aaec173f66..5ee930c48f4c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -234,6 +234,17 @@ do_sigsegv: | |||
234 | goto out; | 234 | goto out; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* Checks if the fp is valid. We always build rt signal frames which | ||
238 | * are 16-byte aligned, therefore we can always enforce that the | ||
239 | * restore frame has that property as well. | ||
240 | */ | ||
241 | static bool invalid_frame_pointer(void __user *fp) | ||
242 | { | ||
243 | if (((unsigned long) fp) & 15) | ||
244 | return true; | ||
245 | return false; | ||
246 | } | ||
247 | |||
237 | struct rt_signal_frame { | 248 | struct rt_signal_frame { |
238 | struct sparc_stackf ss; | 249 | struct sparc_stackf ss; |
239 | siginfo_t info; | 250 | siginfo_t info; |
@@ -246,8 +257,8 @@ struct rt_signal_frame { | |||
246 | 257 | ||
247 | void do_rt_sigreturn(struct pt_regs *regs) | 258 | void do_rt_sigreturn(struct pt_regs *regs) |
248 | { | 259 | { |
260 | unsigned long tpc, tnpc, tstate, ufp; | ||
249 | struct rt_signal_frame __user *sf; | 261 | struct rt_signal_frame __user *sf; |
250 | unsigned long tpc, tnpc, tstate; | ||
251 | __siginfo_fpu_t __user *fpu_save; | 262 | __siginfo_fpu_t __user *fpu_save; |
252 | __siginfo_rwin_t __user *rwin_save; | 263 | __siginfo_rwin_t __user *rwin_save; |
253 | sigset_t set; | 264 | sigset_t set; |
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
261 | (regs->u_regs [UREG_FP] + STACK_BIAS); | 272 | (regs->u_regs [UREG_FP] + STACK_BIAS); |
262 | 273 | ||
263 | /* 1. Make sure we are not getting garbage from the user */ | 274 | /* 1. Make sure we are not getting garbage from the user */ |
264 | if (((unsigned long) sf) & 3) | 275 | if (invalid_frame_pointer(sf)) |
276 | goto segv; | ||
277 | |||
278 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) | ||
265 | goto segv; | 279 | goto segv; |
266 | 280 | ||
267 | err = get_user(tpc, &sf->regs.tpc); | 281 | if ((ufp + STACK_BIAS) & 0x7) |
282 | goto segv; | ||
283 | |||
284 | err = __get_user(tpc, &sf->regs.tpc); | ||
268 | err |= __get_user(tnpc, &sf->regs.tnpc); | 285 | err |= __get_user(tnpc, &sf->regs.tnpc); |
269 | if (test_thread_flag(TIF_32BIT)) { | 286 | if (test_thread_flag(TIF_32BIT)) { |
270 | tpc &= 0xffffffff; | 287 | tpc &= 0xffffffff; |
@@ -308,14 +325,6 @@ segv: | |||
308 | force_sig(SIGSEGV, current); | 325 | force_sig(SIGSEGV, current); |
309 | } | 326 | } |
310 | 327 | ||
311 | /* Checks if the fp is valid */ | ||
312 | static int invalid_frame_pointer(void __user *fp) | ||
313 | { | ||
314 | if (((unsigned long) fp) & 15) | ||
315 | return 1; | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 328 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
320 | { | 329 | { |
321 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; | 330 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; |
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 0f6eebe71e6c..e5fe8cef9a69 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c | |||
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
48 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | 48 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) |
49 | { | 49 | { |
50 | int err; | 50 | int err; |
51 | |||
52 | if (((unsigned long) fpu) & 3) | ||
53 | return -EFAULT; | ||
54 | |||
51 | #ifdef CONFIG_SMP | 55 | #ifdef CONFIG_SMP |
52 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) | 56 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) |
53 | regs->psr &= ~PSR_EF; | 57 | regs->psr &= ~PSR_EF; |
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) | |||
97 | struct thread_info *t = current_thread_info(); | 101 | struct thread_info *t = current_thread_info(); |
98 | int i, wsaved, err; | 102 | int i, wsaved, err; |
99 | 103 | ||
100 | __get_user(wsaved, &rp->wsaved); | 104 | if (((unsigned long) rp) & 3) |
105 | return -EFAULT; | ||
106 | |||
107 | get_user(wsaved, &rp->wsaved); | ||
101 | if (wsaved > NSWINS) | 108 | if (wsaved > NSWINS) |
102 | return -EFAULT; | 109 | return -EFAULT; |
103 | 110 | ||
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index 387834a9c56a..36aadcbeac69 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c | |||
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
37 | unsigned long fprs; | 37 | unsigned long fprs; |
38 | int err; | 38 | int err; |
39 | 39 | ||
40 | err = __get_user(fprs, &fpu->si_fprs); | 40 | if (((unsigned long) fpu) & 7) |
41 | return -EFAULT; | ||
42 | |||
43 | err = get_user(fprs, &fpu->si_fprs); | ||
41 | fprs_write(0); | 44 | fprs_write(0); |
42 | regs->tstate &= ~TSTATE_PEF; | 45 | regs->tstate &= ~TSTATE_PEF; |
43 | if (fprs & FPRS_DL) | 46 | if (fprs & FPRS_DL) |
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) | |||
72 | struct thread_info *t = current_thread_info(); | 75 | struct thread_info *t = current_thread_info(); |
73 | int i, wsaved, err; | 76 | int i, wsaved, err; |
74 | 77 | ||
75 | __get_user(wsaved, &rp->wsaved); | 78 | if (((unsigned long) rp) & 7) |
79 | return -EFAULT; | ||
80 | |||
81 | get_user(wsaved, &rp->wsaved); | ||
76 | if (wsaved > NSWINS) | 82 | if (wsaved > NSWINS) |
77 | return -EFAULT; | 83 | return -EFAULT; |
78 | 84 | ||
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000000..5604a2b051d4 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <asm/thread_info.h> | ||
2 | #include <asm/trap_block.h> | ||
3 | #include <asm/spitfire.h> | ||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/head.h> | ||
6 | |||
7 | .text | ||
8 | .align 8 | ||
9 | .globl user_rtt_fill_fixup_common | ||
10 | user_rtt_fill_fixup_common: | ||
11 | rdpr %cwp, %g1 | ||
12 | add %g1, 1, %g1 | ||
13 | wrpr %g1, 0x0, %cwp | ||
14 | |||
15 | rdpr %wstate, %g2 | ||
16 | sll %g2, 3, %g2 | ||
17 | wrpr %g2, 0x0, %wstate | ||
18 | |||
19 | /* We know %canrestore and %otherwin are both zero. */ | ||
20 | |||
21 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
22 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
23 | mov PRIMARY_CONTEXT, %g1 | ||
24 | |||
25 | 661: stxa %g2, [%g1] ASI_DMMU | ||
26 | .section .sun4v_1insn_patch, "ax" | ||
27 | .word 661b | ||
28 | stxa %g2, [%g1] ASI_MMU | ||
29 | .previous | ||
30 | |||
31 | sethi %hi(KERNBASE), %g1 | ||
32 | flush %g1 | ||
33 | |||
34 | mov %g4, %l4 | ||
35 | mov %g5, %l5 | ||
36 | brnz,pn %g3, 1f | ||
37 | mov %g3, %l3 | ||
38 | |||
39 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
40 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
41 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
42 | 1: | ||
43 | mov %g6, %l1 | ||
44 | wrpr %g0, 0x0, %tl | ||
45 | |||
46 | 661: nop | ||
47 | .section .sun4v_1insn_patch, "ax" | ||
48 | .word 661b | ||
49 | SET_GL(0) | ||
50 | .previous | ||
51 | |||
52 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
53 | |||
54 | mov %l1, %g6 | ||
55 | ldx [%g6 + TI_TASK], %g4 | ||
56 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
57 | |||
58 | brnz,pn %l3, 1f | ||
59 | nop | ||
60 | |||
61 | call do_sparc64_fault | ||
62 | add %sp, PTREGS_OFF, %o0 | ||
63 | ba,pt %xcc, rtrap | ||
64 | nop | ||
65 | |||
66 | 1: cmp %g3, 2 | ||
67 | bne,pn %xcc, 2f | ||
68 | nop | ||
69 | |||
70 | sethi %hi(tlb_type), %g1 | ||
71 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
72 | cmp %g1, 3 | ||
73 | bne,pt %icc, 1f | ||
74 | add %sp, PTREGS_OFF, %o0 | ||
75 | mov %l4, %o2 | ||
76 | call sun4v_do_mna | ||
77 | mov %l5, %o1 | ||
78 | ba,a,pt %xcc, rtrap | ||
79 | 1: mov %l4, %o1 | ||
80 | mov %l5, %o2 | ||
81 | call mem_address_unaligned | ||
82 | nop | ||
83 | ba,a,pt %xcc, rtrap | ||
84 | |||
85 | 2: sethi %hi(tlb_type), %g1 | ||
86 | mov %l4, %o1 | ||
87 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
88 | mov %l5, %o2 | ||
89 | cmp %g1, 3 | ||
90 | bne,pt %icc, 1f | ||
91 | add %sp, PTREGS_OFF, %o0 | ||
92 | call sun4v_data_access_exception | ||
93 | nop | ||
94 | ba,a,pt %xcc, rtrap | ||
95 | |||
96 | 1: call spitfire_data_access_exception | ||
97 | nop | ||
98 | ba,a,pt %xcc, rtrap | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 652683cb4b4b..14bb0d5ed3c6 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs) | |||
2824 | * the Data-TLB for huge pages. | 2824 | * the Data-TLB for huge pages. |
2825 | */ | 2825 | */ |
2826 | if (tlb_type == cheetah_plus) { | 2826 | if (tlb_type == cheetah_plus) { |
2827 | bool need_context_reload = false; | ||
2827 | unsigned long ctx; | 2828 | unsigned long ctx; |
2828 | 2829 | ||
2829 | spin_lock(&ctx_alloc_lock); | 2830 | spin_lock_irq(&ctx_alloc_lock); |
2830 | ctx = mm->context.sparc64_ctx_val; | 2831 | ctx = mm->context.sparc64_ctx_val; |
2831 | ctx &= ~CTX_PGSZ_MASK; | 2832 | ctx &= ~CTX_PGSZ_MASK; |
2832 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | 2833 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; |
@@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs) | |||
2845 | * also executing in this address space. | 2846 | * also executing in this address space. |
2846 | */ | 2847 | */ |
2847 | mm->context.sparc64_ctx_val = ctx; | 2848 | mm->context.sparc64_ctx_val = ctx; |
2848 | on_each_cpu(context_reload, mm, 0); | 2849 | need_context_reload = true; |
2849 | } | 2850 | } |
2850 | spin_unlock(&ctx_alloc_lock); | 2851 | spin_unlock_irq(&ctx_alloc_lock); |
2852 | |||
2853 | if (need_context_reload) | ||
2854 | on_each_cpu(context_reload, mm, 0); | ||
2851 | } | 2855 | } |
2852 | } | 2856 | } |
2853 | #endif | 2857 | #endif |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 769af907f824..7597b42a8a88 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
181 | struct kvm_cpuid_entry __user *entries) | 181 | struct kvm_cpuid_entry __user *entries) |
182 | { | 182 | { |
183 | int r, i; | 183 | int r, i; |
184 | struct kvm_cpuid_entry *cpuid_entries; | 184 | struct kvm_cpuid_entry *cpuid_entries = NULL; |
185 | 185 | ||
186 | r = -E2BIG; | 186 | r = -E2BIG; |
187 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | 187 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
188 | goto out; | 188 | goto out; |
189 | r = -ENOMEM; | 189 | r = -ENOMEM; |
190 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); | 190 | if (cpuid->nent) { |
191 | if (!cpuid_entries) | 191 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * |
192 | goto out; | 192 | cpuid->nent); |
193 | r = -EFAULT; | 193 | if (!cpuid_entries) |
194 | if (copy_from_user(cpuid_entries, entries, | 194 | goto out; |
195 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | 195 | r = -EFAULT; |
196 | goto out_free; | 196 | if (copy_from_user(cpuid_entries, entries, |
197 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | ||
198 | goto out; | ||
199 | } | ||
197 | for (i = 0; i < cpuid->nent; i++) { | 200 | for (i = 0; i < cpuid->nent; i++) { |
198 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; | 201 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; |
199 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; | 202 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; |
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
212 | kvm_x86_ops->cpuid_update(vcpu); | 215 | kvm_x86_ops->cpuid_update(vcpu); |
213 | r = kvm_update_cpuid(vcpu); | 216 | r = kvm_update_cpuid(vcpu); |
214 | 217 | ||
215 | out_free: | ||
216 | vfree(cpuid_entries); | ||
217 | out: | 218 | out: |
219 | vfree(cpuid_entries); | ||
218 | return r; | 220 | return r; |
219 | } | 221 | } |
220 | 222 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24e800116ab4..def97b3a392b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) | |||
336 | #ifdef CONFIG_X86_64 | 336 | #ifdef CONFIG_X86_64 |
337 | static void __set_spte(u64 *sptep, u64 spte) | 337 | static void __set_spte(u64 *sptep, u64 spte) |
338 | { | 338 | { |
339 | *sptep = spte; | 339 | WRITE_ONCE(*sptep, spte); |
340 | } | 340 | } |
341 | 341 | ||
342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
343 | { | 343 | { |
344 | *sptep = spte; | 344 | WRITE_ONCE(*sptep, spte); |
345 | } | 345 | } |
346 | 346 | ||
347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | 347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) | |||
390 | */ | 390 | */ |
391 | smp_wmb(); | 391 | smp_wmb(); |
392 | 392 | ||
393 | ssptep->spte_low = sspte.spte_low; | 393 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
394 | } | 394 | } |
395 | 395 | ||
396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) | |||
400 | ssptep = (union split_spte *)sptep; | 400 | ssptep = (union split_spte *)sptep; |
401 | sspte = (union split_spte)spte; | 401 | sspte = (union split_spte)spte; |
402 | 402 | ||
403 | ssptep->spte_low = sspte.spte_low; | 403 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * If we map the spte from present to nonpresent, we should clear | 406 | * If we map the spte from present to nonpresent, we should clear |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c805cf494154..902d9da12392 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2314 | case MSR_AMD64_NB_CFG: | 2314 | case MSR_AMD64_NB_CFG: |
2315 | case MSR_FAM10H_MMIO_CONF_BASE: | 2315 | case MSR_FAM10H_MMIO_CONF_BASE: |
2316 | case MSR_AMD64_BU_CFG2: | 2316 | case MSR_AMD64_BU_CFG2: |
2317 | case MSR_IA32_PERF_CTL: | ||
2317 | msr_info->data = 0; | 2318 | msr_info->data = 0; |
2318 | break; | 2319 | break; |
2319 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: | 2320 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: |
@@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
2972 | | KVM_VCPUEVENT_VALID_SMM)) | 2973 | | KVM_VCPUEVENT_VALID_SMM)) |
2973 | return -EINVAL; | 2974 | return -EINVAL; |
2974 | 2975 | ||
2976 | if (events->exception.injected && | ||
2977 | (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) | ||
2978 | return -EINVAL; | ||
2979 | |||
2975 | process_nmi(vcpu); | 2980 | process_nmi(vcpu); |
2976 | vcpu->arch.exception.pending = events->exception.injected; | 2981 | vcpu->arch.exception.pending = events->exception.injected; |
2977 | vcpu->arch.exception.nr = events->exception.nr; | 2982 | vcpu->arch.exception.nr = events->exception.nr; |
@@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
3036 | if (dbgregs->flags) | 3041 | if (dbgregs->flags) |
3037 | return -EINVAL; | 3042 | return -EINVAL; |
3038 | 3043 | ||
3044 | if (dbgregs->dr6 & ~0xffffffffull) | ||
3045 | return -EINVAL; | ||
3046 | if (dbgregs->dr7 & ~0xffffffffull) | ||
3047 | return -EINVAL; | ||
3048 | |||
3039 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); | 3049 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); |
3040 | kvm_update_dr0123(vcpu); | 3050 | kvm_update_dr0123(vcpu); |
3041 | vcpu->arch.dr6 = dbgregs->dr6; | 3051 | vcpu->arch.dr6 = dbgregs->dr6; |
@@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) | |||
7815 | 7825 | ||
7816 | slot = id_to_memslot(slots, id); | 7826 | slot = id_to_memslot(slots, id); |
7817 | if (size) { | 7827 | if (size) { |
7818 | if (WARN_ON(slot->npages)) | 7828 | if (slot->npages) |
7819 | return -EEXIST; | 7829 | return -EEXIST; |
7820 | 7830 | ||
7821 | /* | 7831 | /* |
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index e28e912000a7..331f6baf2df8 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig | |||
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE | |||
13 | tristate "Asymmetric public-key crypto algorithm subtype" | 13 | tristate "Asymmetric public-key crypto algorithm subtype" |
14 | select MPILIB | 14 | select MPILIB |
15 | select CRYPTO_HASH_INFO | 15 | select CRYPTO_HASH_INFO |
16 | select CRYPTO_AKCIPHER | ||
16 | help | 17 | help |
17 | This option provides support for asymmetric public key type handling. | 18 | This option provides support for asymmetric public key type handling. |
18 | If signature generation and/or verification are to be used, | 19 | If signature generation and/or verification are to be used, |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 0d92d0f915e9..c7ba948d253c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; | 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; |
332 | 332 | ||
333 | pr->pblk = object.processor.pblk_address; | 333 | pr->pblk = object.processor.pblk_address; |
334 | |||
335 | /* | ||
336 | * We don't care about error returns - we just try to mark | ||
337 | * these reserved so that nobody else is confused into thinking | ||
338 | * that this region might be unused.. | ||
339 | * | ||
340 | * (In particular, allocating the IO range for Cardbus) | ||
341 | */ | ||
342 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
343 | } | 334 | } |
344 | 335 | ||
345 | /* | 336 | /* |
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3d5b8a099351..c1d138e128cb 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c | |||
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, | |||
754 | } | 754 | } |
755 | 755 | ||
756 | int acpi_video_get_levels(struct acpi_device *device, | 756 | int acpi_video_get_levels(struct acpi_device *device, |
757 | struct acpi_video_device_brightness **dev_br) | 757 | struct acpi_video_device_brightness **dev_br, |
758 | int *pmax_level) | ||
758 | { | 759 | { |
759 | union acpi_object *obj = NULL; | 760 | union acpi_object *obj = NULL; |
760 | int i, max_level = 0, count = 0, level_ac_battery = 0; | 761 | int i, max_level = 0, count = 0, level_ac_battery = 0; |
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, | |||
841 | 842 | ||
842 | br->count = count; | 843 | br->count = count; |
843 | *dev_br = br; | 844 | *dev_br = br; |
845 | if (pmax_level) | ||
846 | *pmax_level = max_level; | ||
844 | 847 | ||
845 | out: | 848 | out: |
846 | kfree(obj); | 849 | kfree(obj); |
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
869 | struct acpi_video_device_brightness *br = NULL; | 872 | struct acpi_video_device_brightness *br = NULL; |
870 | int result = -EINVAL; | 873 | int result = -EINVAL; |
871 | 874 | ||
872 | result = acpi_video_get_levels(device->dev, &br); | 875 | result = acpi_video_get_levels(device->dev, &br, &max_level); |
873 | if (result) | 876 | if (result) |
874 | return result; | 877 | return result; |
875 | device->brightness = br; | 878 | device->brightness = br; |
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) | |||
1737 | 1740 | ||
1738 | mutex_lock(&video->device_list_lock); | 1741 | mutex_lock(&video->device_list_lock); |
1739 | list_for_each_entry(dev, &video->video_device_list, entry) { | 1742 | list_for_each_entry(dev, &video->video_device_list, entry) { |
1740 | if (!acpi_video_device_lcd_query_levels(dev, &levels)) | 1743 | if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) |
1741 | kfree(levels); | 1744 | kfree(levels); |
1742 | } | 1745 | } |
1743 | mutex_unlock(&video->device_list_lock); | 1746 | mutex_unlock(&video->device_list_lock); |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 0f18dbc9a37f..daceb80022b0 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, | |||
83 | static u8 | 83 | static u8 |
84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) | 84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) |
85 | { | 85 | { |
86 | u64 address; | ||
87 | |||
88 | if (!reg->access_width) { | 86 | if (!reg->access_width) { |
87 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
88 | max_bit_width = 32; | ||
89 | } | ||
90 | |||
89 | /* | 91 | /* |
90 | * Detect old register descriptors where only the bit_width field | 92 | * Detect old register descriptors where only the bit_width field |
91 | * makes senses. The target address is copied to handle possible | 93 | * makes senses. |
92 | * alignment issues. | ||
93 | */ | 94 | */ |
94 | ACPI_MOVE_64_TO_64(&address, ®->address); | 95 | if (reg->bit_width < max_bit_width && |
95 | if (!reg->bit_offset && reg->bit_width && | 96 | !reg->bit_offset && reg->bit_width && |
96 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && | 97 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && |
97 | ACPI_IS_ALIGNED(reg->bit_width, 8) && | 98 | ACPI_IS_ALIGNED(reg->bit_width, 8)) { |
98 | ACPI_IS_ALIGNED(address, reg->bit_width)) { | ||
99 | return (reg->bit_width); | 99 | return (reg->bit_width); |
100 | } else { | ||
101 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
102 | return (32); | ||
103 | } else { | ||
104 | return (max_bit_width); | ||
105 | } | ||
106 | } | 100 | } |
101 | return (max_bit_width); | ||
107 | } else { | 102 | } else { |
108 | return (1 << (reg->access_width + 2)); | 103 | return (1 << (reg->access_width + 2)); |
109 | } | 104 | } |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) | |||
676 | if (!pr->flags.throttling) | 676 | if (!pr->flags.throttling) |
677 | return -ENODEV; | 677 | return -ENODEV; |
678 | 678 | ||
679 | /* | ||
680 | * We don't care about error returns - we just try to mark | ||
681 | * these reserved so that nobody else is confused into thinking | ||
682 | * that this region might be unused.. | ||
683 | * | ||
684 | * (In particular, allocating the IO range for Cardbus) | ||
685 | */ | ||
686 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
687 | |||
679 | pr->throttling.state = 0; | 688 | pr->throttling.state = 0; |
680 | 689 | ||
681 | duty_mask = pr->throttling.state_count - 1; | 690 | duty_mask = pr->throttling.state_count - 1; |
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index a969a7e443be..85aaf2222587 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
@@ -181,13 +181,17 @@ static char *res_strings[] = { | |||
181 | "reserved 27", | 181 | "reserved 27", |
182 | "reserved 28", | 182 | "reserved 28", |
183 | "reserved 29", | 183 | "reserved 29", |
184 | "reserved 30", | 184 | "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ |
185 | "reassembly abort: no buffers", | 185 | "reassembly abort: no buffers", |
186 | "receive buffer overflow", | 186 | "receive buffer overflow", |
187 | "change in GFC", | 187 | "change in GFC", |
188 | "receive buffer full", | 188 | "receive buffer full", |
189 | "low priority discard - no receive descriptor", | 189 | "low priority discard - no receive descriptor", |
190 | "low priority discard - missing end of packet", | 190 | "low priority discard - missing end of packet", |
191 | "reserved 37", | ||
192 | "reserved 38", | ||
193 | "reserved 39", | ||
194 | "reseverd 40", | ||
191 | "reserved 41", | 195 | "reserved 41", |
192 | "reserved 42", | 196 | "reserved 42", |
193 | "reserved 43", | 197 | "reserved 43", |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 7d00f2994738..809dd1e02091 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) | |||
1128 | /* make the ptr point to the corresponding buffer desc entry */ | 1128 | /* make the ptr point to the corresponding buffer desc entry */ |
1129 | buf_desc_ptr += desc; | 1129 | buf_desc_ptr += desc; |
1130 | if (!desc || (desc > iadev->num_rx_desc) || | 1130 | if (!desc || (desc > iadev->num_rx_desc) || |
1131 | ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { | 1131 | ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { |
1132 | free_desc(dev, desc); | 1132 | free_desc(dev, desc); |
1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) | 1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) |
1134 | return -1; | 1134 | return -1; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 36bc11a106aa..9009295f5134 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | 1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
1833 | unsigned int target_freq) | 1833 | unsigned int target_freq) |
1834 | { | 1834 | { |
1835 | clamp_val(target_freq, policy->min, policy->max); | 1835 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
1836 | 1836 | ||
1837 | return cpufreq_driver->fast_switch(policy, target_freq); | 1837 | return cpufreq_driver->fast_switch(policy, target_freq); |
1838 | } | 1838 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3a9c4325d6e2..0d159b513469 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | |||
449 | cpu->acpi_perf_data.states[0].core_frequency = | 449 | cpu->acpi_perf_data.states[0].core_frequency = |
450 | policy->cpuinfo.max_freq / 1000; | 450 | policy->cpuinfo.max_freq / 1000; |
451 | cpu->valid_pss_table = true; | 451 | cpu->valid_pss_table = true; |
452 | pr_info("_PPC limits will be enforced\n"); | 452 | pr_debug("_PPC limits will be enforced\n"); |
453 | 453 | ||
454 | return; | 454 | return; |
455 | 455 | ||
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c | |||
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | 123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
124 | unsigned int unit; | 124 | unsigned int unit; |
125 | u32 unit_size; | ||
125 | int ret; | 126 | int ret; |
126 | 127 | ||
127 | if (!ctx->u.aes.key_len) | 128 | if (!ctx->u.aes.key_len) |
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
133 | if (!req->info) | 134 | if (!req->info) |
134 | return -EINVAL; | 135 | return -EINVAL; |
135 | 136 | ||
136 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) | 137 | unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; |
137 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) | 138 | if (req->nbytes <= unit_size_map[0].size) { |
138 | break; | 139 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { |
140 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) { | ||
141 | unit_size = unit_size_map[unit].value; | ||
142 | break; | ||
143 | } | ||
144 | } | ||
145 | } | ||
139 | 146 | ||
140 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || | 147 | if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || |
141 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { | 148 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { |
142 | /* Use the fallback to process the request for any | 149 | /* Use the fallback to process the request for any |
143 | * unsupported unit sizes or key sizes | 150 | * unsupported unit sizes or key sizes |
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
158 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; | 165 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; |
159 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT | 166 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT |
160 | : CCP_AES_ACTION_DECRYPT; | 167 | : CCP_AES_ACTION_DECRYPT; |
161 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; | 168 | rctx->cmd.u.xts.unit_size = unit_size; |
162 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; | 169 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; |
163 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; | 170 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; |
164 | rctx->cmd.u.xts.iv = &rctx->iv_sg; | 171 | rctx->cmd.u.xts.iv = &rctx->iv_sg; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6eefaa2fe58f..63464e86f2b1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -1986,7 +1986,7 @@ err_algs: | |||
1986 | &dd->pdata->algs_info[i].algs_list[j]); | 1986 | &dd->pdata->algs_info[i].algs_list[j]); |
1987 | err_pm: | 1987 | err_pm: |
1988 | pm_runtime_disable(dev); | 1988 | pm_runtime_disable(dev); |
1989 | if (dd->polling_mode) | 1989 | if (!dd->polling_mode) |
1990 | dma_release_channel(dd->dma_lch); | 1990 | dma_release_channel(dd->dma_lch); |
1991 | data_err: | 1991 | data_err: |
1992 | dev_err(dev, "initialization failed.\n"); | 1992 | dev_err(dev, "initialization failed.\n"); |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a2c07ee6677..6355ab38d630 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
36 | #include <linux/mm.h> | ||
36 | 37 | ||
37 | #include <uapi/linux/dma-buf.h> | 38 | #include <uapi/linux/dma-buf.h> |
38 | 39 | ||
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) | |||
90 | dmabuf = file->private_data; | 91 | dmabuf = file->private_data; |
91 | 92 | ||
92 | /* check for overflowing the buffer's size */ | 93 | /* check for overflowing the buffer's size */ |
93 | if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 94 | if (vma->vm_pgoff + vma_pages(vma) > |
94 | dmabuf->size >> PAGE_SHIFT) | 95 | dmabuf->size >> PAGE_SHIFT) |
95 | return -EINVAL; | 96 | return -EINVAL; |
96 | 97 | ||
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |||
723 | return -EINVAL; | 724 | return -EINVAL; |
724 | 725 | ||
725 | /* check for offset overflow */ | 726 | /* check for offset overflow */ |
726 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) | 727 | if (pgoff + vma_pages(vma) < pgoff) |
727 | return -EOVERFLOW; | 728 | return -EOVERFLOW; |
728 | 729 | ||
729 | /* check for overflowing the buffer's size */ | 730 | /* check for overflowing the buffer's size */ |
730 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 731 | if (pgoff + vma_pages(vma) > |
731 | dmabuf->size >> PAGE_SHIFT) | 732 | dmabuf->size >> PAGE_SHIFT) |
732 | return -EINVAL; | 733 | return -EINVAL; |
733 | 734 | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index c0bd5722c997..9566a62ad8e3 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
@@ -35,6 +35,17 @@ | |||
35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
37 | 37 | ||
38 | /** | ||
39 | * DOC: Reservation Object Overview | ||
40 | * | ||
41 | * The reservation object provides a mechanism to manage shared and | ||
42 | * exclusive fences associated with a buffer. A reservation object | ||
43 | * can have attached one exclusive fence (normally associated with | ||
44 | * write operations) or N shared fences (read operations). The RCU | ||
45 | * mechanism is used to protect read access to fences from locked | ||
46 | * write-side updates. | ||
47 | */ | ||
48 | |||
38 | DEFINE_WW_CLASS(reservation_ww_class); | 49 | DEFINE_WW_CLASS(reservation_ww_class); |
39 | EXPORT_SYMBOL(reservation_ww_class); | 50 | EXPORT_SYMBOL(reservation_ww_class); |
40 | 51 | ||
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); | |||
43 | 54 | ||
44 | const char reservation_seqcount_string[] = "reservation_seqcount"; | 55 | const char reservation_seqcount_string[] = "reservation_seqcount"; |
45 | EXPORT_SYMBOL(reservation_seqcount_string); | 56 | EXPORT_SYMBOL(reservation_seqcount_string); |
46 | /* | 57 | |
47 | * Reserve space to add a shared fence to a reservation_object, | 58 | /** |
48 | * must be called with obj->lock held. | 59 | * reservation_object_reserve_shared - Reserve space to add a shared |
60 | * fence to a reservation_object. | ||
61 | * @obj: reservation object | ||
62 | * | ||
63 | * Should be called before reservation_object_add_shared_fence(). Must | ||
64 | * be called with obj->lock held. | ||
65 | * | ||
66 | * RETURNS | ||
67 | * Zero for success, or -errno | ||
49 | */ | 68 | */ |
50 | int reservation_object_reserve_shared(struct reservation_object *obj) | 69 | int reservation_object_reserve_shared(struct reservation_object *obj) |
51 | { | 70 | { |
@@ -180,7 +199,11 @@ done: | |||
180 | fence_put(old_fence); | 199 | fence_put(old_fence); |
181 | } | 200 | } |
182 | 201 | ||
183 | /* | 202 | /** |
203 | * reservation_object_add_shared_fence - Add a fence to a shared slot | ||
204 | * @obj: the reservation object | ||
205 | * @fence: the shared fence to add | ||
206 | * | ||
184 | * Add a fence to a shared slot, obj->lock must be held, and | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
185 | * reservation_object_reserve_shared_fence has been called. | 208 | * reservation_object_reserve_shared_fence has been called. |
186 | */ | 209 | */ |
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, | |||
200 | } | 223 | } |
201 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | 224 | EXPORT_SYMBOL(reservation_object_add_shared_fence); |
202 | 225 | ||
226 | /** | ||
227 | * reservation_object_add_excl_fence - Add an exclusive fence. | ||
228 | * @obj: the reservation object | ||
229 | * @fence: the shared fence to add | ||
230 | * | ||
231 | * Add a fence to the exclusive slot. The obj->lock must be held. | ||
232 | */ | ||
203 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
204 | struct fence *fence) | 234 | struct fence *fence) |
205 | { | 235 | { |
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
233 | } | 263 | } |
234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
235 | 265 | ||
266 | /** | ||
267 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive | ||
268 | * fences without update side lock held | ||
269 | * @obj: the reservation object | ||
270 | * @pfence_excl: the returned exclusive fence (or NULL) | ||
271 | * @pshared_count: the number of shared fences returned | ||
272 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to | ||
273 | * the required size, and must be freed by caller) | ||
274 | * | ||
275 | * RETURNS | ||
276 | * Zero or -errno | ||
277 | */ | ||
236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
237 | struct fence **pfence_excl, | 279 | struct fence **pfence_excl, |
238 | unsigned *pshared_count, | 280 | unsigned *pshared_count, |
@@ -319,6 +361,18 @@ unlock: | |||
319 | } | 361 | } |
320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | 362 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); |
321 | 363 | ||
364 | /** | ||
365 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects | ||
366 | * shared and/or exclusive fences. | ||
367 | * @obj: the reservation object | ||
368 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | ||
369 | * @intr: if true, do interruptible wait | ||
370 | * @timeout: timeout value in jiffies or zero to return immediately | ||
371 | * | ||
372 | * RETURNS | ||
373 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | ||
374 | * greater than zer on success. | ||
375 | */ | ||
322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 376 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
323 | bool wait_all, bool intr, | 377 | bool wait_all, bool intr, |
324 | unsigned long timeout) | 378 | unsigned long timeout) |
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) | |||
416 | return ret; | 470 | return ret; |
417 | } | 471 | } |
418 | 472 | ||
473 | /** | ||
474 | * reservation_object_test_signaled_rcu - Test if a reservation object's | ||
475 | * fences have been signaled. | ||
476 | * @obj: the reservation object | ||
477 | * @test_all: if true, test all fences, otherwise only test the exclusive | ||
478 | * fence | ||
479 | * | ||
480 | * RETURNS | ||
481 | * true if all fences signaled, else false | ||
482 | */ | ||
419 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | 483 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
420 | bool test_all) | 484 | bool test_all) |
421 | { | 485 | { |
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index d39014daeef9..fc5f197906ac 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c | |||
@@ -29,7 +29,6 @@ | |||
29 | 29 | ||
30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
31 | #include <mach/platform.h> | 31 | #include <mach/platform.h> |
32 | #include <mach/irqs.h> | ||
33 | 32 | ||
34 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) | 33 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) |
35 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) | 34 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) |
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) | |||
371 | 370 | ||
372 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) | 371 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) |
373 | { | 372 | { |
374 | return IRQ_LPC32XX_P0_P1_IRQ; | 373 | return -ENXIO; |
375 | } | 374 | } |
376 | 375 | ||
377 | static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { | ||
378 | IRQ_LPC32XX_GPIO_00, | ||
379 | IRQ_LPC32XX_GPIO_01, | ||
380 | IRQ_LPC32XX_GPIO_02, | ||
381 | IRQ_LPC32XX_GPIO_03, | ||
382 | IRQ_LPC32XX_GPIO_04, | ||
383 | IRQ_LPC32XX_GPIO_05, | ||
384 | }; | ||
385 | |||
386 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) | 376 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) |
387 | { | 377 | { |
388 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) | ||
389 | return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; | ||
390 | return -ENXIO; | 378 | return -ENXIO; |
391 | } | 379 | } |
392 | 380 | ||
393 | static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { | ||
394 | IRQ_LPC32XX_GPI_00, | ||
395 | IRQ_LPC32XX_GPI_01, | ||
396 | IRQ_LPC32XX_GPI_02, | ||
397 | IRQ_LPC32XX_GPI_03, | ||
398 | IRQ_LPC32XX_GPI_04, | ||
399 | IRQ_LPC32XX_GPI_05, | ||
400 | IRQ_LPC32XX_GPI_06, | ||
401 | IRQ_LPC32XX_GPI_07, | ||
402 | IRQ_LPC32XX_GPI_08, | ||
403 | IRQ_LPC32XX_GPI_09, | ||
404 | -ENXIO, /* 10 */ | ||
405 | -ENXIO, /* 11 */ | ||
406 | -ENXIO, /* 12 */ | ||
407 | -ENXIO, /* 13 */ | ||
408 | -ENXIO, /* 14 */ | ||
409 | -ENXIO, /* 15 */ | ||
410 | -ENXIO, /* 16 */ | ||
411 | -ENXIO, /* 17 */ | ||
412 | -ENXIO, /* 18 */ | ||
413 | IRQ_LPC32XX_GPI_19, | ||
414 | -ENXIO, /* 20 */ | ||
415 | -ENXIO, /* 21 */ | ||
416 | -ENXIO, /* 22 */ | ||
417 | -ENXIO, /* 23 */ | ||
418 | -ENXIO, /* 24 */ | ||
419 | -ENXIO, /* 25 */ | ||
420 | -ENXIO, /* 26 */ | ||
421 | -ENXIO, /* 27 */ | ||
422 | IRQ_LPC32XX_GPI_28, | ||
423 | }; | ||
424 | |||
425 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) | 381 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) |
426 | { | 382 | { |
427 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) | ||
428 | return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; | ||
429 | return -ENXIO; | 383 | return -ENXIO; |
430 | } | 384 | } |
431 | 385 | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d407f904a31c..24f60d28f0c0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/compat.h> | ||
23 | #include <uapi/linux/gpio.h> | 24 | #include <uapi/linux/gpio.h> |
24 | 25 | ||
25 | #include "gpiolib.h" | 26 | #include "gpiolib.h" |
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
316 | { | 317 | { |
317 | struct gpio_device *gdev = filp->private_data; | 318 | struct gpio_device *gdev = filp->private_data; |
318 | struct gpio_chip *chip = gdev->chip; | 319 | struct gpio_chip *chip = gdev->chip; |
319 | int __user *ip = (int __user *)arg; | 320 | void __user *ip = (void __user *)arg; |
320 | 321 | ||
321 | /* We fail any subsequent ioctl():s when the chip is gone */ | 322 | /* We fail any subsequent ioctl():s when the chip is gone */ |
322 | if (!chip) | 323 | if (!chip) |
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
388 | return -EINVAL; | 389 | return -EINVAL; |
389 | } | 390 | } |
390 | 391 | ||
392 | #ifdef CONFIG_COMPAT | ||
393 | static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, | ||
394 | unsigned long arg) | ||
395 | { | ||
396 | return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
397 | } | ||
398 | #endif | ||
399 | |||
391 | /** | 400 | /** |
392 | * gpio_chrdev_open() - open the chardev for ioctl operations | 401 | * gpio_chrdev_open() - open the chardev for ioctl operations |
393 | * @inode: inode for this chardev | 402 | * @inode: inode for this chardev |
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = { | |||
431 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
432 | .llseek = noop_llseek, | 441 | .llseek = noop_llseek, |
433 | .unlocked_ioctl = gpio_ioctl, | 442 | .unlocked_ioctl = gpio_ioctl, |
434 | .compat_ioctl = gpio_ioctl, | 443 | #ifdef CONFIG_COMPAT |
444 | .compat_ioctl = gpio_ioctl_compat, | ||
445 | #endif | ||
435 | }; | 446 | }; |
436 | 447 | ||
437 | static void gpiodevice_release(struct device *dev) | 448 | static void gpiodevice_release(struct device *dev) |
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
618 | goto err_free_label; | 629 | goto err_free_label; |
619 | } | 630 | } |
620 | 631 | ||
632 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
633 | |||
621 | for (i = 0; i < chip->ngpio; i++) { | 634 | for (i = 0; i < chip->ngpio; i++) { |
622 | struct gpio_desc *desc = &gdev->descs[i]; | 635 | struct gpio_desc *desc = &gdev->descs[i]; |
623 | 636 | ||
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
649 | } | 662 | } |
650 | } | 663 | } |
651 | 664 | ||
652 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
653 | |||
654 | #ifdef CONFIG_PINCTRL | 665 | #ifdef CONFIG_PINCTRL |
655 | INIT_LIST_HEAD(&gdev->pin_ranges); | 666 | INIT_LIST_HEAD(&gdev->pin_ranges); |
656 | #endif | 667 | #endif |
@@ -1356,10 +1367,13 @@ done: | |||
1356 | /* | 1367 | /* |
1357 | * This descriptor validation needs to be inserted verbatim into each | 1368 | * This descriptor validation needs to be inserted verbatim into each |
1358 | * function taking a descriptor, so we need to use a preprocessor | 1369 | * function taking a descriptor, so we need to use a preprocessor |
1359 | * macro to avoid endless duplication. | 1370 | * macro to avoid endless duplication. If the desc is NULL it is an |
1371 | * optional GPIO and calls should just bail out. | ||
1360 | */ | 1372 | */ |
1361 | #define VALIDATE_DESC(desc) do { \ | 1373 | #define VALIDATE_DESC(desc) do { \ |
1362 | if (!desc || !desc->gdev) { \ | 1374 | if (!desc) \ |
1375 | return 0; \ | ||
1376 | if (!desc->gdev) { \ | ||
1363 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1377 | pr_warn("%s: invalid GPIO\n", __func__); \ |
1364 | return -EINVAL; \ | 1378 | return -EINVAL; \ |
1365 | } \ | 1379 | } \ |
@@ -1370,7 +1384,9 @@ done: | |||
1370 | } } while (0) | 1384 | } } while (0) |
1371 | 1385 | ||
1372 | #define VALIDATE_DESC_VOID(desc) do { \ | 1386 | #define VALIDATE_DESC_VOID(desc) do { \ |
1373 | if (!desc || !desc->gdev) { \ | 1387 | if (!desc) \ |
1388 | return; \ | ||
1389 | if (!desc->gdev) { \ | ||
1374 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1390 | pr_warn("%s: invalid GPIO\n", __func__); \ |
1375 | return; \ | 1391 | return; \ |
1376 | } \ | 1392 | } \ |
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); | |||
2066 | */ | 2082 | */ |
2067 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | 2083 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) |
2068 | { | 2084 | { |
2069 | if (offset >= chip->ngpio) | 2085 | struct gpio_desc *desc; |
2070 | return -EINVAL; | 2086 | |
2087 | desc = gpiochip_get_desc(chip, offset); | ||
2088 | if (IS_ERR(desc)) | ||
2089 | return PTR_ERR(desc); | ||
2090 | |||
2091 | /* Flush direction if something changed behind our back */ | ||
2092 | if (chip->get_direction) { | ||
2093 | int dir = chip->get_direction(chip, offset); | ||
2094 | |||
2095 | if (dir) | ||
2096 | clear_bit(FLAG_IS_OUT, &desc->flags); | ||
2097 | else | ||
2098 | set_bit(FLAG_IS_OUT, &desc->flags); | ||
2099 | } | ||
2071 | 2100 | ||
2072 | if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { | 2101 | if (test_bit(FLAG_IS_OUT, &desc->flags)) { |
2073 | chip_err(chip, | 2102 | chip_err(chip, |
2074 | "%s: tried to flag a GPIO set as output for IRQ\n", | 2103 | "%s: tried to flag a GPIO set as output for IRQ\n", |
2075 | __func__); | 2104 | __func__); |
2076 | return -EIO; | 2105 | return -EIO; |
2077 | } | 2106 | } |
2078 | 2107 | ||
2079 | set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); | 2108 | set_bit(FLAG_USED_AS_IRQ, &desc->flags); |
2080 | return 0; | 2109 | return 0; |
2081 | } | 2110 | } |
2082 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); | 2111 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) | 42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) |
44 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) | ||
44 | 45 | ||
45 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) | 46 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
46 | 47 | ||
@@ -82,6 +83,7 @@ struct its_node { | |||
82 | u64 flags; | 83 | u64 flags; |
83 | u32 ite_size; | 84 | u32 ite_size; |
84 | u32 device_ids; | 85 | u32 device_ids; |
86 | int numa_node; | ||
85 | }; | 87 | }; |
86 | 88 | ||
87 | #define ITS_ITT_ALIGN SZ_256 | 89 | #define ITS_ITT_ALIGN SZ_256 |
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) | |||
613 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 615 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
614 | bool force) | 616 | bool force) |
615 | { | 617 | { |
616 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 618 | unsigned int cpu; |
619 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
617 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 620 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
618 | struct its_collection *target_col; | 621 | struct its_collection *target_col; |
619 | u32 id = its_get_event_id(d); | 622 | u32 id = its_get_event_id(d); |
620 | 623 | ||
624 | /* lpi cannot be routed to a redistributor that is on a foreign node */ | ||
625 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
626 | if (its_dev->its->numa_node >= 0) { | ||
627 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
628 | if (!cpumask_intersects(mask_val, cpu_mask)) | ||
629 | return -EINVAL; | ||
630 | } | ||
631 | } | ||
632 | |||
633 | cpu = cpumask_any_and(mask_val, cpu_mask); | ||
634 | |||
621 | if (cpu >= nr_cpu_ids) | 635 | if (cpu >= nr_cpu_ids) |
622 | return -EINVAL; | 636 | return -EINVAL; |
623 | 637 | ||
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) | |||
1101 | list_for_each_entry(its, &its_nodes, entry) { | 1115 | list_for_each_entry(its, &its_nodes, entry) { |
1102 | u64 target; | 1116 | u64 target; |
1103 | 1117 | ||
1118 | /* avoid cross node collections and its mapping */ | ||
1119 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
1120 | struct device_node *cpu_node; | ||
1121 | |||
1122 | cpu_node = of_get_cpu_node(cpu, NULL); | ||
1123 | if (its->numa_node != NUMA_NO_NODE && | ||
1124 | its->numa_node != of_node_to_nid(cpu_node)) | ||
1125 | continue; | ||
1126 | } | ||
1127 | |||
1104 | /* | 1128 | /* |
1105 | * We now have to bind each collection to its target | 1129 | * We now have to bind each collection to its target |
1106 | * redistributor. | 1130 | * redistributor. |
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, | |||
1351 | { | 1375 | { |
1352 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 1376 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1353 | u32 event = its_get_event_id(d); | 1377 | u32 event = its_get_event_id(d); |
1378 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
1379 | |||
1380 | /* get the cpu_mask of local node */ | ||
1381 | if (its_dev->its->numa_node >= 0) | ||
1382 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
1354 | 1383 | ||
1355 | /* Bind the LPI to the first possible CPU */ | 1384 | /* Bind the LPI to the first possible CPU */ |
1356 | its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); | 1385 | its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); |
1357 | 1386 | ||
1358 | /* Map the GIC IRQ and event to the device */ | 1387 | /* Map the GIC IRQ and event to the device */ |
1359 | its_send_mapvi(its_dev, d->hwirq, event); | 1388 | its_send_mapvi(its_dev, d->hwirq, event); |
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) | |||
1443 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | 1472 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
1444 | } | 1473 | } |
1445 | 1474 | ||
1475 | static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | ||
1476 | { | ||
1477 | struct its_node *its = data; | ||
1478 | |||
1479 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | ||
1480 | } | ||
1481 | |||
1446 | static const struct gic_quirk its_quirks[] = { | 1482 | static const struct gic_quirk its_quirks[] = { |
1447 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | 1483 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
1448 | { | 1484 | { |
@@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = { | |||
1452 | .init = its_enable_quirk_cavium_22375, | 1488 | .init = its_enable_quirk_cavium_22375, |
1453 | }, | 1489 | }, |
1454 | #endif | 1490 | #endif |
1491 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | ||
1492 | { | ||
1493 | .desc = "ITS: Cavium erratum 23144", | ||
1494 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | ||
1495 | .mask = 0xffff0fff, | ||
1496 | .init = its_enable_quirk_cavium_23144, | ||
1497 | }, | ||
1498 | #endif | ||
1455 | { | 1499 | { |
1456 | } | 1500 | } |
1457 | }; | 1501 | }; |
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, | |||
1514 | its->base = its_base; | 1558 | its->base = its_base; |
1515 | its->phys_base = res.start; | 1559 | its->phys_base = res.start; |
1516 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; | 1560 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; |
1561 | its->numa_node = of_node_to_nid(node); | ||
1517 | 1562 | ||
1518 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); | 1563 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); |
1519 | if (!its->cmd_base) { | 1564 | if (!its->cmd_base) { |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb042ba9a3db..2c5ba0e704bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) | |||
155 | 155 | ||
156 | while (count--) { | 156 | while (count--) { |
157 | val = readl_relaxed(rbase + GICR_WAKER); | 157 | val = readl_relaxed(rbase + GICR_WAKER); |
158 | if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) | 158 | if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) |
159 | break; | 159 | break; |
160 | cpu_relax(); | 160 | cpu_relax(); |
161 | udelay(1); | 161 | udelay(1); |
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index e7155db01d55..73addb4b625b 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c | |||
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, | |||
91 | /* set polarity for external interrupts only */ | 91 | /* set polarity for external interrupts only */ |
92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { | 92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { |
93 | if (priv->ext_irqs[i] == data->hwirq) { | 93 | if (priv->ext_irqs[i] == data->hwirq) { |
94 | ret = pic32_set_ext_polarity(i + 1, flow_type); | 94 | ret = pic32_set_ext_polarity(i, flow_type); |
95 | if (ret) | 95 | if (ret) |
96 | return ret; | 96 | return ret; |
97 | } | 97 | } |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c984321d1881..5d438ad3ee32 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
1276 | * switch to HS200 mode if bus width is set successfully. | 1276 | * switch to HS200 mode if bus width is set successfully. |
1277 | */ | 1277 | */ |
1278 | err = mmc_select_bus_width(card); | 1278 | err = mmc_select_bus_width(card); |
1279 | if (!err) { | 1279 | if (err >= 0) { |
1280 | val = EXT_CSD_TIMING_HS200 | | 1280 | val = EXT_CSD_TIMING_HS200 | |
1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
1583 | } else if (mmc_card_hs(card)) { | 1583 | } else if (mmc_card_hs(card)) { |
1584 | /* Select the desired bus width optionally */ | 1584 | /* Select the desired bus width optionally */ |
1585 | err = mmc_select_bus_width(card); | 1585 | err = mmc_select_bus_width(card); |
1586 | if (!err) { | 1586 | if (err >= 0) { |
1587 | err = mmc_select_hs_ddr(card); | 1587 | err = mmc_select_hs_ddr(card); |
1588 | if (err) | 1588 | if (err) |
1589 | goto free_card; | 1589 | goto free_card; |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 7fc8b7aa83f0..2ee4c21ec55e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { | |||
970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | 970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, |
971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | 971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, |
972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, | 972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, |
973 | [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, | 973 | [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, |
974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, | 974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, |
975 | }; | 975 | }; |
976 | 976 | ||
977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | 977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
1129 | MMC_CAP_1_8V_DDR | | 1129 | MMC_CAP_1_8V_DDR | |
1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
1131 | 1131 | ||
1132 | /* TODO MMC DDR is not working on A80 */ | ||
1133 | if (of_device_is_compatible(pdev->dev.of_node, | ||
1134 | "allwinner,sun9i-a80-mmc")) | ||
1135 | mmc->caps &= ~MMC_CAP_1_8V_DDR; | ||
1136 | |||
1137 | ret = mmc_of_parse(mmc); | 1132 | ret = mmc_of_parse(mmc); |
1138 | if (ret) | 1133 | if (ret) |
1139 | goto error_free_dma; | 1134 | goto error_free_dma; |
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 16419f550eff..058460bdd5a6 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c | |||
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) | |||
141 | priv->bus = bus; | 141 | priv->bus = bus; |
142 | bus->priv = priv; | 142 | bus->priv = priv; |
143 | bus->parent = priv->dev; | 143 | bus->parent = priv->dev; |
144 | bus->name = "Synopsys MII Bus", | 144 | bus->name = "Synopsys MII Bus"; |
145 | bus->read = &arc_mdio_read; | 145 | bus->read = &arc_mdio_read; |
146 | bus->write = &arc_mdio_write; | 146 | bus->write = &arc_mdio_write; |
147 | bus->reset = &arc_mdio_reset; | 147 | bus->reset = &arc_mdio_reset; |
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 8fc93c5f6abc..d02c4240b7df 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h | |||
@@ -96,6 +96,10 @@ struct alx_priv { | |||
96 | unsigned int rx_ringsz; | 96 | unsigned int rx_ringsz; |
97 | unsigned int rxbuf_size; | 97 | unsigned int rxbuf_size; |
98 | 98 | ||
99 | struct page *rx_page; | ||
100 | unsigned int rx_page_offset; | ||
101 | unsigned int rx_frag_size; | ||
102 | |||
99 | struct napi_struct napi; | 103 | struct napi_struct napi; |
100 | struct alx_tx_queue txq; | 104 | struct alx_tx_queue txq; |
101 | struct alx_rx_queue rxq; | 105 | struct alx_rx_queue rxq; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9fe8b5e310d1..c98acdc0d14f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) | |||
70 | } | 70 | } |
71 | } | 71 | } |
72 | 72 | ||
73 | static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) | ||
74 | { | ||
75 | struct sk_buff *skb; | ||
76 | struct page *page; | ||
77 | |||
78 | if (alx->rx_frag_size > PAGE_SIZE) | ||
79 | return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | ||
80 | |||
81 | page = alx->rx_page; | ||
82 | if (!page) { | ||
83 | alx->rx_page = page = alloc_page(gfp); | ||
84 | if (unlikely(!page)) | ||
85 | return NULL; | ||
86 | alx->rx_page_offset = 0; | ||
87 | } | ||
88 | |||
89 | skb = build_skb(page_address(page) + alx->rx_page_offset, | ||
90 | alx->rx_frag_size); | ||
91 | if (likely(skb)) { | ||
92 | alx->rx_page_offset += alx->rx_frag_size; | ||
93 | if (alx->rx_page_offset >= PAGE_SIZE) | ||
94 | alx->rx_page = NULL; | ||
95 | else | ||
96 | get_page(page); | ||
97 | } | ||
98 | return skb; | ||
99 | } | ||
100 | |||
101 | |||
73 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | 102 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) |
74 | { | 103 | { |
75 | struct alx_rx_queue *rxq = &alx->rxq; | 104 | struct alx_rx_queue *rxq = &alx->rxq; |
@@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
86 | while (!cur_buf->skb && next != rxq->read_idx) { | 115 | while (!cur_buf->skb && next != rxq->read_idx) { |
87 | struct alx_rfd *rfd = &rxq->rfd[cur]; | 116 | struct alx_rfd *rfd = &rxq->rfd[cur]; |
88 | 117 | ||
89 | skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | 118 | skb = alx_alloc_skb(alx, gfp); |
90 | if (!skb) | 119 | if (!skb) |
91 | break; | 120 | break; |
92 | dma = dma_map_single(&alx->hw.pdev->dev, | 121 | dma = dma_map_single(&alx->hw.pdev->dev, |
@@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
124 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); | 153 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); |
125 | } | 154 | } |
126 | 155 | ||
156 | |||
127 | return count; | 157 | return count; |
128 | } | 158 | } |
129 | 159 | ||
@@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx) | |||
592 | kfree(alx->txq.bufs); | 622 | kfree(alx->txq.bufs); |
593 | kfree(alx->rxq.bufs); | 623 | kfree(alx->rxq.bufs); |
594 | 624 | ||
625 | if (alx->rx_page) { | ||
626 | put_page(alx->rx_page); | ||
627 | alx->rx_page = NULL; | ||
628 | } | ||
629 | |||
595 | dma_free_coherent(&alx->hw.pdev->dev, | 630 | dma_free_coherent(&alx->hw.pdev->dev, |
596 | alx->descmem.size, | 631 | alx->descmem.size, |
597 | alx->descmem.virt, | 632 | alx->descmem.virt, |
@@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx) | |||
646 | alx->dev->name, alx); | 681 | alx->dev->name, alx); |
647 | if (!err) | 682 | if (!err) |
648 | goto out; | 683 | goto out; |
684 | |||
649 | /* fall back to legacy interrupt */ | 685 | /* fall back to legacy interrupt */ |
650 | pci_disable_msi(alx->hw.pdev); | 686 | pci_disable_msi(alx->hw.pdev); |
651 | } | 687 | } |
@@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx) | |||
689 | struct pci_dev *pdev = alx->hw.pdev; | 725 | struct pci_dev *pdev = alx->hw.pdev; |
690 | struct alx_hw *hw = &alx->hw; | 726 | struct alx_hw *hw = &alx->hw; |
691 | int err; | 727 | int err; |
728 | unsigned int head_size; | ||
692 | 729 | ||
693 | err = alx_identify_hw(alx); | 730 | err = alx_identify_hw(alx); |
694 | if (err) { | 731 | if (err) { |
@@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx) | |||
704 | 741 | ||
705 | hw->smb_timer = 400; | 742 | hw->smb_timer = 400; |
706 | hw->mtu = alx->dev->mtu; | 743 | hw->mtu = alx->dev->mtu; |
744 | |||
707 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); | 745 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); |
746 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
747 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
748 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
749 | |||
708 | alx->tx_ringsz = 256; | 750 | alx->tx_ringsz = 256; |
709 | alx->rx_ringsz = 512; | 751 | alx->rx_ringsz = 512; |
710 | hw->imt = 200; | 752 | hw->imt = 200; |
@@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
806 | { | 848 | { |
807 | struct alx_priv *alx = netdev_priv(netdev); | 849 | struct alx_priv *alx = netdev_priv(netdev); |
808 | int max_frame = ALX_MAX_FRAME_LEN(mtu); | 850 | int max_frame = ALX_MAX_FRAME_LEN(mtu); |
851 | unsigned int head_size; | ||
809 | 852 | ||
810 | if ((max_frame < ALX_MIN_FRAME_SIZE) || | 853 | if ((max_frame < ALX_MIN_FRAME_SIZE) || |
811 | (max_frame > ALX_MAX_FRAME_SIZE)) | 854 | (max_frame > ALX_MAX_FRAME_SIZE)) |
@@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
817 | netdev->mtu = mtu; | 860 | netdev->mtu = mtu; |
818 | alx->hw.mtu = mtu; | 861 | alx->hw.mtu = mtu; |
819 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); | 862 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); |
863 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
864 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
865 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
820 | netdev_update_features(netdev); | 866 | netdev_update_features(netdev); |
821 | if (netif_running(netdev)) | 867 | if (netif_running(netdev)) |
822 | alx_reinit(alx); | 868 | alx_reinit(alx); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a5b770cefaa..c5fe915870ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
13941 | bp->doorbells = bnx2x_vf_doorbells(bp); | 13941 | bp->doorbells = bnx2x_vf_doorbells(bp); |
13942 | rc = bnx2x_vf_pci_alloc(bp); | 13942 | rc = bnx2x_vf_pci_alloc(bp); |
13943 | if (rc) | 13943 | if (rc) |
13944 | goto init_one_exit; | 13944 | goto init_one_freemem; |
13945 | } else { | 13945 | } else { |
13946 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); | 13946 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); |
13947 | if (doorbell_size > pci_resource_len(pdev, 2)) { | 13947 | if (doorbell_size > pci_resource_len(pdev, 2)) { |
13948 | dev_err(&bp->pdev->dev, | 13948 | dev_err(&bp->pdev->dev, |
13949 | "Cannot map doorbells, bar size too small, aborting\n"); | 13949 | "Cannot map doorbells, bar size too small, aborting\n"); |
13950 | rc = -ENOMEM; | 13950 | rc = -ENOMEM; |
13951 | goto init_one_exit; | 13951 | goto init_one_freemem; |
13952 | } | 13952 | } |
13953 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), | 13953 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
13954 | doorbell_size); | 13954 | doorbell_size); |
@@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
13957 | dev_err(&bp->pdev->dev, | 13957 | dev_err(&bp->pdev->dev, |
13958 | "Cannot map doorbell space, aborting\n"); | 13958 | "Cannot map doorbell space, aborting\n"); |
13959 | rc = -ENOMEM; | 13959 | rc = -ENOMEM; |
13960 | goto init_one_exit; | 13960 | goto init_one_freemem; |
13961 | } | 13961 | } |
13962 | 13962 | ||
13963 | if (IS_VF(bp)) { | 13963 | if (IS_VF(bp)) { |
13964 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); | 13964 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); |
13965 | if (rc) | 13965 | if (rc) |
13966 | goto init_one_exit; | 13966 | goto init_one_freemem; |
13967 | } | 13967 | } |
13968 | 13968 | ||
13969 | /* Enable SRIOV if capability found in configuration space */ | 13969 | /* Enable SRIOV if capability found in configuration space */ |
13970 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); | 13970 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); |
13971 | if (rc) | 13971 | if (rc) |
13972 | goto init_one_exit; | 13972 | goto init_one_freemem; |
13973 | 13973 | ||
13974 | /* calc qm_cid_count */ | 13974 | /* calc qm_cid_count */ |
13975 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); | 13975 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); |
@@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
13988 | rc = bnx2x_set_int_mode(bp); | 13988 | rc = bnx2x_set_int_mode(bp); |
13989 | if (rc) { | 13989 | if (rc) { |
13990 | dev_err(&pdev->dev, "Cannot set interrupts\n"); | 13990 | dev_err(&pdev->dev, "Cannot set interrupts\n"); |
13991 | goto init_one_exit; | 13991 | goto init_one_freemem; |
13992 | } | 13992 | } |
13993 | BNX2X_DEV_INFO("set interrupts successfully\n"); | 13993 | BNX2X_DEV_INFO("set interrupts successfully\n"); |
13994 | 13994 | ||
@@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
13996 | rc = register_netdev(dev); | 13996 | rc = register_netdev(dev); |
13997 | if (rc) { | 13997 | if (rc) { |
13998 | dev_err(&pdev->dev, "Cannot register net device\n"); | 13998 | dev_err(&pdev->dev, "Cannot register net device\n"); |
13999 | goto init_one_exit; | 13999 | goto init_one_freemem; |
14000 | } | 14000 | } |
14001 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); | 14001 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); |
14002 | 14002 | ||
@@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
14029 | 14029 | ||
14030 | return 0; | 14030 | return 0; |
14031 | 14031 | ||
14032 | init_one_freemem: | ||
14033 | bnx2x_free_mem_bp(bp); | ||
14034 | |||
14032 | init_one_exit: | 14035 | init_one_exit: |
14033 | bnx2x_disable_pcie_error_reporting(bp); | 14036 | bnx2x_disable_pcie_error_reporting(bp); |
14034 | 14037 | ||
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 085f9125cf42..06f031715b57 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) | |||
205 | * re-adding ourselves to the poll list. | 205 | * re-adding ourselves to the poll list. |
206 | */ | 206 | */ |
207 | 207 | ||
208 | if (priv->tx_skb && !tx_ctrl_ct) | 208 | if (priv->tx_skb && !tx_ctrl_ct) { |
209 | nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); | ||
209 | napi_reschedule(napi); | 210 | napi_reschedule(napi); |
211 | } | ||
210 | } | 212 | } |
211 | 213 | ||
212 | return work_done; | 214 | return work_done; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2cccc594fd..3c0255e98535 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
1197 | fec16_to_cpu(bdp->cbd_datlen), | 1197 | fec16_to_cpu(bdp->cbd_datlen), |
1198 | DMA_TO_DEVICE); | 1198 | DMA_TO_DEVICE); |
1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); | 1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
1200 | if (!skb) { | 1200 | if (!skb) |
1201 | bdp = fec_enet_get_nextdesc(bdp, &txq->bd); | 1201 | goto skb_done; |
1202 | continue; | ||
1203 | } | ||
1204 | 1202 | ||
1205 | /* Check for errors. */ | 1203 | /* Check for errors. */ |
1206 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1204 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
1239 | 1237 | ||
1240 | /* Free the sk buffer associated with this last transmit */ | 1238 | /* Free the sk buffer associated with this last transmit */ |
1241 | dev_kfree_skb_any(skb); | 1239 | dev_kfree_skb_any(skb); |
1242 | 1240 | skb_done: | |
1243 | /* Make sure the update to bdp and tx_skbuff are performed | 1241 | /* Make sure the update to bdp and tx_skbuff are performed |
1244 | * before dirty_tx | 1242 | * before dirty_tx |
1245 | */ | 1243 | */ |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3d746c887873..67a648c7d3a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev) | |||
46 | u32 link_stat = priv->link; | 46 | u32 link_stat = priv->link; |
47 | struct hnae_handle *h; | 47 | struct hnae_handle *h; |
48 | 48 | ||
49 | assert(priv && priv->ae_handle); | ||
50 | h = priv->ae_handle; | 49 | h = priv->ae_handle; |
51 | 50 | ||
52 | if (priv->phy) { | 51 | if (priv->phy) { |
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, | |||
646 | { | 645 | { |
647 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 646 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
648 | 647 | ||
649 | assert(priv); | ||
650 | |||
651 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, | 648 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, |
652 | sizeof(drvinfo->version)); | 649 | sizeof(drvinfo->version)); |
653 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; | 650 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; |
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev, | |||
720 | struct hnae_handle *h; | 717 | struct hnae_handle *h; |
721 | struct hnae_ae_ops *ops; | 718 | struct hnae_ae_ops *ops; |
722 | 719 | ||
723 | assert(priv || priv->ae_handle); | ||
724 | |||
725 | h = priv->ae_handle; | 720 | h = priv->ae_handle; |
726 | ops = h->dev->ops; | 721 | ops = h->dev->ops; |
727 | 722 | ||
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev, | |||
780 | struct hnae_ae_ops *ops; | 775 | struct hnae_ae_ops *ops; |
781 | int ret; | 776 | int ret; |
782 | 777 | ||
783 | assert(priv || priv->ae_handle); | ||
784 | |||
785 | ops = priv->ae_handle->dev->ops; | 778 | ops = priv->ae_handle->dev->ops; |
786 | 779 | ||
787 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) | 780 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) |
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, | |||
1111 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1104 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
1112 | struct hnae_ae_ops *ops; | 1105 | struct hnae_ae_ops *ops; |
1113 | 1106 | ||
1114 | assert(priv || priv->ae_handle); | ||
1115 | |||
1116 | ops = priv->ae_handle->dev->ops; | 1107 | ops = priv->ae_handle->dev->ops; |
1117 | 1108 | ||
1118 | cmd->version = HNS_CHIP_VERSION; | 1109 | cmd->version = HNS_CHIP_VERSION; |
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev) | |||
1135 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1126 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
1136 | struct hnae_ae_ops *ops; | 1127 | struct hnae_ae_ops *ops; |
1137 | 1128 | ||
1138 | assert(priv || priv->ae_handle); | ||
1139 | |||
1140 | ops = priv->ae_handle->dev->ops; | 1129 | ops = priv->ae_handle->dev->ops; |
1141 | if (!ops->get_regs_len) { | 1130 | if (!ops->get_regs_len) { |
1142 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); | 1131 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); |
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 01fccec632ec..466939f8f0cf 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c | |||
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, | |||
189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
190 | hwbm_pool->construct = mvneta_bm_construct; | 190 | hwbm_pool->construct = mvneta_bm_construct; |
191 | hwbm_pool->priv = new_pool; | 191 | hwbm_pool->priv = new_pool; |
192 | spin_lock_init(&hwbm_pool->lock); | ||
192 | 193 | ||
193 | /* Create new pool */ | 194 | /* Create new pool */ |
194 | err = mvneta_bm_pool_create(priv, new_pool); | 195 | err = mvneta_bm_pool_create(priv, new_pool); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c761194bb323..fc95affaf76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, | |||
362 | 362 | ||
363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) | 363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) |
364 | if (bitmap_iterator_test(&it)) | 364 | if (bitmap_iterator_test(&it)) |
365 | data[index++] = ((unsigned long *)&priv->stats)[i]; | 365 | data[index++] = ((unsigned long *)&dev->stats)[i]; |
366 | 366 | ||
367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) | 367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) |
368 | if (bitmap_iterator_test(&it)) | 368 | if (bitmap_iterator_test(&it)) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 92e0624f4cf0..19ceced6736c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev) | |||
1296 | } | 1296 | } |
1297 | 1297 | ||
1298 | 1298 | ||
1299 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | 1299 | static struct rtnl_link_stats64 * |
1300 | mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | ||
1300 | { | 1301 | { |
1301 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1302 | struct mlx4_en_priv *priv = netdev_priv(dev); |
1302 | 1303 | ||
1303 | spin_lock_bh(&priv->stats_lock); | 1304 | spin_lock_bh(&priv->stats_lock); |
1304 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | 1305 | netdev_stats_to_stats64(stats, &dev->stats); |
1305 | spin_unlock_bh(&priv->stats_lock); | 1306 | spin_unlock_bh(&priv->stats_lock); |
1306 | 1307 | ||
1307 | return &priv->ret_stats; | 1308 | return stats; |
1308 | } | 1309 | } |
1309 | 1310 | ||
1310 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | 1311 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) |
@@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
1876 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | 1877 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) |
1877 | en_dbg(HW, priv, "Failed dumping statistics\n"); | 1878 | en_dbg(HW, priv, "Failed dumping statistics\n"); |
1878 | 1879 | ||
1879 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | 1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); |
1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); | 1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); |
1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); | 1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); |
@@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
1892 | priv->tx_ring[i]->bytes = 0; | 1892 | priv->tx_ring[i]->bytes = 0; |
1893 | priv->tx_ring[i]->packets = 0; | 1893 | priv->tx_ring[i]->packets = 0; |
1894 | priv->tx_ring[i]->tx_csum = 0; | 1894 | priv->tx_ring[i]->tx_csum = 0; |
1895 | priv->tx_ring[i]->tx_dropped = 0; | ||
1896 | priv->tx_ring[i]->queue_stopped = 0; | ||
1897 | priv->tx_ring[i]->wake_queue = 0; | ||
1898 | priv->tx_ring[i]->tso_packets = 0; | ||
1899 | priv->tx_ring[i]->xmit_more = 0; | ||
1895 | } | 1900 | } |
1896 | for (i = 0; i < priv->rx_ring_num; i++) { | 1901 | for (i = 0; i < priv->rx_ring_num; i++) { |
1897 | priv->rx_ring[i]->bytes = 0; | 1902 | priv->rx_ring[i]->bytes = 0; |
@@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
2482 | .ndo_stop = mlx4_en_close, | 2487 | .ndo_stop = mlx4_en_close, |
2483 | .ndo_start_xmit = mlx4_en_xmit, | 2488 | .ndo_start_xmit = mlx4_en_xmit, |
2484 | .ndo_select_queue = mlx4_en_select_queue, | 2489 | .ndo_select_queue = mlx4_en_select_queue, |
2485 | .ndo_get_stats = mlx4_en_get_stats, | 2490 | .ndo_get_stats64 = mlx4_en_get_stats64, |
2486 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2491 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
2487 | .ndo_set_mac_address = mlx4_en_set_mac, | 2492 | .ndo_set_mac_address = mlx4_en_set_mac, |
2488 | .ndo_validate_addr = eth_validate_addr, | 2493 | .ndo_validate_addr = eth_validate_addr, |
@@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
2514 | .ndo_stop = mlx4_en_close, | 2519 | .ndo_stop = mlx4_en_close, |
2515 | .ndo_start_xmit = mlx4_en_xmit, | 2520 | .ndo_start_xmit = mlx4_en_xmit, |
2516 | .ndo_select_queue = mlx4_en_select_queue, | 2521 | .ndo_select_queue = mlx4_en_select_queue, |
2517 | .ndo_get_stats = mlx4_en_get_stats, | 2522 | .ndo_get_stats64 = mlx4_en_get_stats64, |
2518 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2523 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
2519 | .ndo_set_mac_address = mlx4_en_set_mac, | 2524 | .ndo_set_mac_address = mlx4_en_set_mac, |
2520 | .ndo_validate_addr = eth_validate_addr, | 2525 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 20b6c2e678b8..5aa8b751f417 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
152 | struct mlx4_counter tmp_counter_stats; | 152 | struct mlx4_counter tmp_counter_stats; |
153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | 153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; |
154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; | 154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; |
155 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | 155 | struct net_device *dev = mdev->pndev[port]; |
156 | struct net_device_stats *stats = &priv->stats; | 156 | struct mlx4_en_priv *priv = netdev_priv(dev); |
157 | struct net_device_stats *stats = &dev->stats; | ||
157 | struct mlx4_cmd_mailbox *mailbox; | 158 | struct mlx4_cmd_mailbox *mailbox; |
158 | u64 in_mod = reset << 8 | port; | 159 | u64 in_mod = reset << 8 | port; |
159 | int err; | 160 | int err; |
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
188 | } | 189 | } |
189 | stats->tx_packets = 0; | 190 | stats->tx_packets = 0; |
190 | stats->tx_bytes = 0; | 191 | stats->tx_bytes = 0; |
192 | stats->tx_dropped = 0; | ||
191 | priv->port_stats.tx_chksum_offload = 0; | 193 | priv->port_stats.tx_chksum_offload = 0; |
192 | priv->port_stats.queue_stopped = 0; | 194 | priv->port_stats.queue_stopped = 0; |
193 | priv->port_stats.wake_queue = 0; | 195 | priv->port_stats.wake_queue = 0; |
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
199 | 201 | ||
200 | stats->tx_packets += ring->packets; | 202 | stats->tx_packets += ring->packets; |
201 | stats->tx_bytes += ring->bytes; | 203 | stats->tx_bytes += ring->bytes; |
204 | stats->tx_dropped += ring->tx_dropped; | ||
202 | priv->port_stats.tx_chksum_offload += ring->tx_csum; | 205 | priv->port_stats.tx_chksum_offload += ring->tx_csum; |
203 | priv->port_stats.queue_stopped += ring->queue_stopped; | 206 | priv->port_stats.queue_stopped += ring->queue_stopped; |
204 | priv->port_stats.wake_queue += ring->wake_queue; | 207 | priv->port_stats.wake_queue += ring->wake_queue; |
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
237 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, | 240 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, |
238 | &mlx4_en_stats->MCAST_prio_1, | 241 | &mlx4_en_stats->MCAST_prio_1, |
239 | NUM_PRIORITIES); | 242 | NUM_PRIORITIES); |
240 | stats->collisions = 0; | ||
241 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + | 243 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + |
242 | sw_rx_dropped; | 244 | sw_rx_dropped; |
243 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | 245 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); |
244 | stats->rx_over_errors = 0; | ||
245 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | 246 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); |
246 | stats->rx_frame_errors = 0; | ||
247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | 247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); |
248 | stats->rx_missed_errors = 0; | 248 | stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); |
249 | stats->tx_aborted_errors = 0; | ||
250 | stats->tx_carrier_errors = 0; | ||
251 | stats->tx_fifo_errors = 0; | ||
252 | stats->tx_heartbeat_errors = 0; | ||
253 | stats->tx_window_errors = 0; | ||
254 | stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); | ||
255 | 249 | ||
256 | /* RX stats */ | 250 | /* RX stats */ |
257 | priv->pkstats.rx_multicast_packets = stats->multicast; | 251 | priv->pkstats.rx_multicast_packets = stats->multicast; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f6e61570cb2c..76aa4d27183c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
726 | bool inline_ok; | 726 | bool inline_ok; |
727 | u32 ring_cons; | 727 | u32 ring_cons; |
728 | 728 | ||
729 | if (!priv->port_up) | ||
730 | goto tx_drop; | ||
731 | |||
732 | tx_ind = skb_get_queue_mapping(skb); | 729 | tx_ind = skb_get_queue_mapping(skb); |
733 | ring = priv->tx_ring[tx_ind]; | 730 | ring = priv->tx_ring[tx_ind]; |
734 | 731 | ||
732 | if (!priv->port_up) | ||
733 | goto tx_drop; | ||
734 | |||
735 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 735 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
736 | ring_cons = ACCESS_ONCE(ring->cons); | 736 | ring_cons = ACCESS_ONCE(ring->cons); |
737 | 737 | ||
@@ -1030,7 +1030,7 @@ tx_drop_unmap: | |||
1030 | 1030 | ||
1031 | tx_drop: | 1031 | tx_drop: |
1032 | dev_kfree_skb_any(skb); | 1032 | dev_kfree_skb_any(skb); |
1033 | priv->stats.tx_dropped++; | 1033 | ring->tx_dropped++; |
1034 | return NETDEV_TX_OK; | 1034 | return NETDEV_TX_OK; |
1035 | } | 1035 | } |
1036 | 1036 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cc84e09f324a..467d47ed2c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring { | |||
270 | unsigned long tx_csum; | 270 | unsigned long tx_csum; |
271 | unsigned long tso_packets; | 271 | unsigned long tso_packets; |
272 | unsigned long xmit_more; | 272 | unsigned long xmit_more; |
273 | unsigned int tx_dropped; | ||
273 | struct mlx4_bf bf; | 274 | struct mlx4_bf bf; |
274 | unsigned long queue_stopped; | 275 | unsigned long queue_stopped; |
275 | 276 | ||
@@ -482,8 +483,6 @@ struct mlx4_en_priv { | |||
482 | struct mlx4_en_port_profile *prof; | 483 | struct mlx4_en_port_profile *prof; |
483 | struct net_device *dev; | 484 | struct net_device *dev; |
484 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 485 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
485 | struct net_device_stats stats; | ||
486 | struct net_device_stats ret_stats; | ||
487 | struct mlx4_en_port_state port_state; | 486 | struct mlx4_en_port_state port_state; |
488 | spinlock_t stats_lock; | 487 | spinlock_t stats_lock; |
489 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; | 488 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cbf58e1f9333..21ec1c2df2c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
192 | struct dcbx_app_priority_entry *p_tbl, | 192 | struct dcbx_app_priority_entry *p_tbl, |
193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) |
194 | { | 194 | { |
195 | u8 tc, priority, priority_map; | 195 | u8 tc, priority_map; |
196 | enum dcbx_protocol_type type; | 196 | enum dcbx_protocol_type type; |
197 | u16 protocol_id; | 197 | u16 protocol_id; |
198 | int priority; | ||
198 | bool enable; | 199 | bool enable; |
199 | int i; | 200 | int i; |
200 | 201 | ||
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
221 | * indication, but we only got here if there was an | 222 | * indication, but we only got here if there was an |
222 | * app tlv for the protocol, so dcbx must be enabled. | 223 | * app tlv for the protocol, so dcbx must be enabled. |
223 | */ | 224 | */ |
224 | enable = !!(type == DCBX_PROTOCOL_ETH); | 225 | enable = !(type == DCBX_PROTOCOL_ETH); |
225 | 226 | ||
226 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, | 227 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, |
227 | priority, tc, type); | 228 | priority, tc, type); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 089016f46f26..2d89e8c16b32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev) | |||
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | 158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) |
159 | { | 159 | { |
160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; | 160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; |
161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
162 | struct init_qm_port_params *p_qm_port; | 162 | struct init_qm_port_params *p_qm_port; |
163 | u16 num_pqs, multi_cos_tcs = 1; | 163 | u16 num_pqs, multi_cos_tcs = 1; |
164 | u8 pf_wfq = qm_info->pf_wfq; | ||
165 | u32 pf_rl = qm_info->pf_rl; | ||
164 | u16 num_vfs = 0; | 166 | u16 num_vfs = 0; |
165 | 167 | ||
166 | #ifdef CONFIG_QED_SRIOV | 168 | #ifdef CONFIG_QED_SRIOV |
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
182 | 184 | ||
183 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. | 185 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. |
184 | */ | 186 | */ |
185 | qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * | 187 | qm_info->qm_pq_params = kcalloc(num_pqs, |
186 | num_pqs, GFP_KERNEL); | 188 | sizeof(struct init_qm_pq_params), |
189 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); | ||
187 | if (!qm_info->qm_pq_params) | 190 | if (!qm_info->qm_pq_params) |
188 | goto alloc_err; | 191 | goto alloc_err; |
189 | 192 | ||
190 | qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * | 193 | qm_info->qm_vport_params = kcalloc(num_vports, |
191 | num_vports, GFP_KERNEL); | 194 | sizeof(struct init_qm_vport_params), |
195 | b_sleepable ? GFP_KERNEL | ||
196 | : GFP_ATOMIC); | ||
192 | if (!qm_info->qm_vport_params) | 197 | if (!qm_info->qm_vport_params) |
193 | goto alloc_err; | 198 | goto alloc_err; |
194 | 199 | ||
195 | qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * | 200 | qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, |
196 | MAX_NUM_PORTS, GFP_KERNEL); | 201 | sizeof(struct init_qm_port_params), |
202 | b_sleepable ? GFP_KERNEL | ||
203 | : GFP_ATOMIC); | ||
197 | if (!qm_info->qm_port_params) | 204 | if (!qm_info->qm_port_params) |
198 | goto alloc_err; | 205 | goto alloc_err; |
199 | 206 | ||
200 | qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), | 207 | qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), |
201 | GFP_KERNEL); | 208 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); |
202 | if (!qm_info->wfq_data) | 209 | if (!qm_info->wfq_data) |
203 | goto alloc_err; | 210 | goto alloc_err; |
204 | 211 | ||
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
264 | for (i = 0; i < qm_info->num_vports; i++) | 271 | for (i = 0; i < qm_info->num_vports; i++) |
265 | qm_info->qm_vport_params[i].vport_wfq = 1; | 272 | qm_info->qm_vport_params[i].vport_wfq = 1; |
266 | 273 | ||
267 | qm_info->pf_wfq = 0; | ||
268 | qm_info->pf_rl = 0; | ||
269 | qm_info->vport_rl_en = 1; | 274 | qm_info->vport_rl_en = 1; |
270 | qm_info->vport_wfq_en = 1; | 275 | qm_info->vport_wfq_en = 1; |
276 | qm_info->pf_rl = pf_rl; | ||
277 | qm_info->pf_wfq = pf_wfq; | ||
271 | 278 | ||
272 | return 0; | 279 | return 0; |
273 | 280 | ||
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
299 | qed_qm_info_free(p_hwfn); | 306 | qed_qm_info_free(p_hwfn); |
300 | 307 | ||
301 | /* initialize qed's qm data structure */ | 308 | /* initialize qed's qm data structure */ |
302 | rc = qed_init_qm_info(p_hwfn); | 309 | rc = qed_init_qm_info(p_hwfn, false); |
303 | if (rc) | 310 | if (rc) |
304 | return rc; | 311 | return rc; |
305 | 312 | ||
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev) | |||
388 | goto alloc_err; | 395 | goto alloc_err; |
389 | 396 | ||
390 | /* Prepare and process QM requirements */ | 397 | /* Prepare and process QM requirements */ |
391 | rc = qed_init_qm_info(p_hwfn); | 398 | rc = qed_init_qm_info(p_hwfn, true); |
392 | if (rc) | 399 | if (rc) |
393 | goto alloc_err; | 400 | goto alloc_err; |
394 | 401 | ||
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) | |||
581 | 588 | ||
582 | hw_mode |= 1 << MODE_ASIC; | 589 | hw_mode |= 1 << MODE_ASIC; |
583 | 590 | ||
591 | if (p_hwfn->cdev->num_hwfns > 1) | ||
592 | hw_mode |= 1 << MODE_100G; | ||
593 | |||
584 | p_hwfn->hw_info.hw_mode = hw_mode; | 594 | p_hwfn->hw_info.hw_mode = hw_mode; |
595 | |||
596 | DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), | ||
597 | "Configuring function for hw_mode: 0x%08x\n", | ||
598 | p_hwfn->hw_info.hw_mode); | ||
585 | } | 599 | } |
586 | 600 | ||
587 | /* Init run time data for all PFs on an engine. */ | 601 | /* Init run time data for all PFs on an engine. */ |
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev, | |||
821 | u32 load_code, param; | 835 | u32 load_code, param; |
822 | int rc, mfw_rc, i; | 836 | int rc, mfw_rc, i; |
823 | 837 | ||
838 | if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { | ||
839 | DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); | ||
840 | return -EINVAL; | ||
841 | } | ||
842 | |||
824 | if (IS_PF(cdev)) { | 843 | if (IS_PF(cdev)) { |
825 | rc = qed_init_fw_data(cdev, bin_fw_data); | 844 | rc = qed_init_fw_data(cdev, bin_fw_data); |
826 | if (rc != 0) | 845 | if (rc != 0) |
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) | |||
2086 | { | 2105 | { |
2087 | int i; | 2106 | int i; |
2088 | 2107 | ||
2108 | if (cdev->num_hwfns > 1) { | ||
2109 | DP_VERBOSE(cdev, | ||
2110 | NETIF_MSG_LINK, | ||
2111 | "WFQ configuration is not supported for this device\n"); | ||
2112 | return; | ||
2113 | } | ||
2114 | |||
2089 | for_each_hwfn(cdev, i) { | 2115 | for_each_hwfn(cdev, i) { |
2090 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 2116 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2091 | 2117 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8b22f87033ce..753064679bde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) | |||
413 | /* Fallthrough */ | 413 | /* Fallthrough */ |
414 | 414 | ||
415 | case QED_INT_MODE_MSI: | 415 | case QED_INT_MODE_MSI: |
416 | rc = pci_enable_msi(cdev->pdev); | 416 | if (cdev->num_hwfns == 1) { |
417 | if (!rc) { | 417 | rc = pci_enable_msi(cdev->pdev); |
418 | int_params->out.int_mode = QED_INT_MODE_MSI; | 418 | if (!rc) { |
419 | goto out; | 419 | int_params->out.int_mode = QED_INT_MODE_MSI; |
420 | } | 420 | goto out; |
421 | } | ||
421 | 422 | ||
422 | DP_NOTICE(cdev, "Failed to enable MSI\n"); | 423 | DP_NOTICE(cdev, "Failed to enable MSI\n"); |
423 | if (force_mode) | 424 | if (force_mode) |
424 | goto out; | 425 | goto out; |
426 | } | ||
425 | /* Fallthrough */ | 427 | /* Fallthrough */ |
426 | 428 | ||
427 | case QED_INT_MODE_INTA: | 429 | case QED_INT_MODE_INTA: |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1bc75358cbc4..ad3cae3b7243 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) | |||
230 | case ETH_SS_PRIV_FLAGS: | 230 | case ETH_SS_PRIV_FLAGS: |
231 | return QEDE_PRI_FLAG_LEN; | 231 | return QEDE_PRI_FLAG_LEN; |
232 | case ETH_SS_TEST: | 232 | case ETH_SS_TEST: |
233 | return QEDE_ETHTOOL_TEST_MAX; | 233 | if (!IS_VF(edev)) |
234 | return QEDE_ETHTOOL_TEST_MAX; | ||
235 | else | ||
236 | return 0; | ||
234 | default: | 237 | default: |
235 | DP_VERBOSE(edev, QED_MSG_DEBUG, | 238 | DP_VERBOSE(edev, QED_MSG_DEBUG, |
236 | "Unsupported stringset 0x%08x\n", stringset); | 239 | "Unsupported stringset 0x%08x\n", stringset); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 337e839ca586..5d00d1404bfc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, | |||
1824 | { | 1824 | { |
1825 | struct qede_dev *edev = netdev_priv(dev); | 1825 | struct qede_dev *edev = netdev_priv(dev); |
1826 | 1826 | ||
1827 | return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, | 1827 | return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, |
1828 | max_tx_rate); | 1828 | max_tx_rate); |
1829 | } | 1829 | } |
1830 | 1830 | ||
@@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) | |||
2091 | edev->accept_any_vlan = false; | 2091 | edev->accept_any_vlan = false; |
2092 | } | 2092 | } |
2093 | 2093 | ||
2094 | int qede_set_features(struct net_device *dev, netdev_features_t features) | ||
2095 | { | ||
2096 | struct qede_dev *edev = netdev_priv(dev); | ||
2097 | netdev_features_t changes = features ^ dev->features; | ||
2098 | bool need_reload = false; | ||
2099 | |||
2100 | /* No action needed if hardware GRO is disabled during driver load */ | ||
2101 | if (changes & NETIF_F_GRO) { | ||
2102 | if (dev->features & NETIF_F_GRO) | ||
2103 | need_reload = !edev->gro_disable; | ||
2104 | else | ||
2105 | need_reload = edev->gro_disable; | ||
2106 | } | ||
2107 | |||
2108 | if (need_reload && netif_running(edev->ndev)) { | ||
2109 | dev->features = features; | ||
2110 | qede_reload(edev, NULL, NULL); | ||
2111 | return 1; | ||
2112 | } | ||
2113 | |||
2114 | return 0; | ||
2115 | } | ||
2116 | |||
2094 | #ifdef CONFIG_QEDE_VXLAN | 2117 | #ifdef CONFIG_QEDE_VXLAN |
2095 | static void qede_add_vxlan_port(struct net_device *dev, | 2118 | static void qede_add_vxlan_port(struct net_device *dev, |
2096 | sa_family_t sa_family, __be16 port) | 2119 | sa_family_t sa_family, __be16 port) |
@@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
2175 | #endif | 2198 | #endif |
2176 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, | 2199 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
2177 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, | 2200 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
2201 | .ndo_set_features = qede_set_features, | ||
2178 | .ndo_get_stats64 = qede_get_stats64, | 2202 | .ndo_get_stats64 = qede_get_stats64, |
2179 | #ifdef CONFIG_QED_SRIOV | 2203 | #ifdef CONFIG_QED_SRIOV |
2180 | .ndo_set_vf_link_state = qede_set_vf_link_state, | 2204 | .ndo_set_vf_link_state = qede_set_vf_link_state, |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 83d72106471c..fd5d1c93b55b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev) | |||
4846 | } | 4846 | } |
4847 | 4847 | ||
4848 | /* Disabling the timer */ | 4848 | /* Disabling the timer */ |
4849 | del_timer_sync(&qdev->timer); | ||
4850 | ql_cancel_all_work_sync(qdev); | 4849 | ql_cancel_all_work_sync(qdev); |
4851 | 4850 | ||
4852 | for (i = 0; i < qdev->rss_ring_count; i++) | 4851 | for (i = 0; i < qdev->rss_ring_count; i++) |
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
4873 | return PCI_ERS_RESULT_CAN_RECOVER; | 4872 | return PCI_ERS_RESULT_CAN_RECOVER; |
4874 | case pci_channel_io_frozen: | 4873 | case pci_channel_io_frozen: |
4875 | netif_device_detach(ndev); | 4874 | netif_device_detach(ndev); |
4875 | del_timer_sync(&qdev->timer); | ||
4876 | if (netif_running(ndev)) | 4876 | if (netif_running(ndev)) |
4877 | ql_eeh_close(ndev); | 4877 | ql_eeh_close(ndev); |
4878 | pci_disable_device(pdev); | 4878 | pci_disable_device(pdev); |
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
4880 | case pci_channel_io_perm_failure: | 4880 | case pci_channel_io_perm_failure: |
4881 | dev_err(&pdev->dev, | 4881 | dev_err(&pdev->dev, |
4882 | "%s: pci_channel_io_perm_failure.\n", __func__); | 4882 | "%s: pci_channel_io_perm_failure.\n", __func__); |
4883 | del_timer_sync(&qdev->timer); | ||
4883 | ql_eeh_close(ndev); | 4884 | ql_eeh_close(ndev); |
4884 | set_bit(QL_EEH_FATAL, &qdev->flags); | 4885 | set_bit(QL_EEH_FATAL, &qdev->flags); |
4885 | return PCI_ERS_RESULT_DISCONNECT; | 4886 | return PCI_ERS_RESULT_DISCONNECT; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1681084cc96f..1f309127457d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -619,6 +619,17 @@ fail: | |||
619 | return rc; | 619 | return rc; |
620 | } | 620 | } |
621 | 621 | ||
622 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
623 | { | ||
624 | struct efx_channel *channel; | ||
625 | struct efx_tx_queue *tx_queue; | ||
626 | |||
627 | /* All our existing PIO buffers went away */ | ||
628 | efx_for_each_channel(channel, efx) | ||
629 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
630 | tx_queue->piobuf = NULL; | ||
631 | } | ||
632 | |||
622 | #else /* !EFX_USE_PIO */ | 633 | #else /* !EFX_USE_PIO */ |
623 | 634 | ||
624 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | 635 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) | |||
635 | { | 646 | { |
636 | } | 647 | } |
637 | 648 | ||
649 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
650 | { | ||
651 | } | ||
652 | |||
638 | #endif /* EFX_USE_PIO */ | 653 | #endif /* EFX_USE_PIO */ |
639 | 654 | ||
640 | static void efx_ef10_remove(struct efx_nic *efx) | 655 | static void efx_ef10_remove(struct efx_nic *efx) |
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) | |||
1018 | nic_data->must_realloc_vis = true; | 1033 | nic_data->must_realloc_vis = true; |
1019 | nic_data->must_restore_filters = true; | 1034 | nic_data->must_restore_filters = true; |
1020 | nic_data->must_restore_piobufs = true; | 1035 | nic_data->must_restore_piobufs = true; |
1036 | efx_ef10_forget_old_piobufs(efx); | ||
1021 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 1037 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
1022 | 1038 | ||
1023 | /* Driver-created vswitches and vports must be re-created */ | 1039 | /* Driver-created vswitches and vports must be re-created */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0705ec869487..097f363f1630 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) | |||
1726 | 1726 | ||
1727 | #ifdef CONFIG_RFS_ACCEL | 1727 | #ifdef CONFIG_RFS_ACCEL |
1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { | 1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { |
1729 | efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, | 1729 | struct efx_channel *channel; |
1730 | sizeof(*efx->rps_flow_id), | 1730 | int i, success = 1; |
1731 | GFP_KERNEL); | 1731 | |
1732 | if (!efx->rps_flow_id) { | 1732 | efx_for_each_channel(channel, efx) { |
1733 | channel->rps_flow_id = | ||
1734 | kcalloc(efx->type->max_rx_ip_filters, | ||
1735 | sizeof(*channel->rps_flow_id), | ||
1736 | GFP_KERNEL); | ||
1737 | if (!channel->rps_flow_id) | ||
1738 | success = 0; | ||
1739 | else | ||
1740 | for (i = 0; | ||
1741 | i < efx->type->max_rx_ip_filters; | ||
1742 | ++i) | ||
1743 | channel->rps_flow_id[i] = | ||
1744 | RPS_FLOW_ID_INVALID; | ||
1745 | } | ||
1746 | |||
1747 | if (!success) { | ||
1748 | efx_for_each_channel(channel, efx) | ||
1749 | kfree(channel->rps_flow_id); | ||
1733 | efx->type->filter_table_remove(efx); | 1750 | efx->type->filter_table_remove(efx); |
1734 | rc = -ENOMEM; | 1751 | rc = -ENOMEM; |
1735 | goto out_unlock; | 1752 | goto out_unlock; |
1736 | } | 1753 | } |
1754 | |||
1755 | efx->rps_expire_index = efx->rps_expire_channel = 0; | ||
1737 | } | 1756 | } |
1738 | #endif | 1757 | #endif |
1739 | out_unlock: | 1758 | out_unlock: |
@@ -1744,7 +1763,10 @@ out_unlock: | |||
1744 | static void efx_remove_filters(struct efx_nic *efx) | 1763 | static void efx_remove_filters(struct efx_nic *efx) |
1745 | { | 1764 | { |
1746 | #ifdef CONFIG_RFS_ACCEL | 1765 | #ifdef CONFIG_RFS_ACCEL |
1747 | kfree(efx->rps_flow_id); | 1766 | struct efx_channel *channel; |
1767 | |||
1768 | efx_for_each_channel(channel, efx) | ||
1769 | kfree(channel->rps_flow_id); | ||
1748 | #endif | 1770 | #endif |
1749 | down_write(&efx->filter_sem); | 1771 | down_write(&efx->filter_sem); |
1750 | efx->type->filter_table_remove(efx); | 1772 | efx->type->filter_table_remove(efx); |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 38c422321cda..d13ddf9703ff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -403,6 +403,8 @@ enum efx_sync_events_state { | |||
403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel | 403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel |
404 | * @irq_count: Number of IRQs since last adaptive moderation decision | 404 | * @irq_count: Number of IRQs since last adaptive moderation decision |
405 | * @irq_mod_score: IRQ moderation score | 405 | * @irq_mod_score: IRQ moderation score |
406 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | ||
407 | * indexed by filter ID | ||
406 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 408 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
407 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 409 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
408 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | 410 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors |
@@ -446,6 +448,8 @@ struct efx_channel { | |||
446 | unsigned int irq_mod_score; | 448 | unsigned int irq_mod_score; |
447 | #ifdef CONFIG_RFS_ACCEL | 449 | #ifdef CONFIG_RFS_ACCEL |
448 | unsigned int rfs_filters_added; | 450 | unsigned int rfs_filters_added; |
451 | #define RPS_FLOW_ID_INVALID 0xFFFFFFFF | ||
452 | u32 *rps_flow_id; | ||
449 | #endif | 453 | #endif |
450 | 454 | ||
451 | unsigned n_rx_tobe_disc; | 455 | unsigned n_rx_tobe_disc; |
@@ -889,9 +893,9 @@ struct vfdi_status; | |||
889 | * @filter_sem: Filter table rw_semaphore, for freeing the table | 893 | * @filter_sem: Filter table rw_semaphore, for freeing the table |
890 | * @filter_lock: Filter table lock, for mere content changes | 894 | * @filter_lock: Filter table lock, for mere content changes |
891 | * @filter_state: Architecture-dependent filter table state | 895 | * @filter_state: Architecture-dependent filter table state |
892 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | 896 | * @rps_expire_channel: Next channel to check for expiry |
893 | * indexed by filter ID | 897 | * @rps_expire_index: Next index to check for expiry in |
894 | * @rps_expire_index: Next index to check for expiry in @rps_flow_id | 898 | * @rps_expire_channel's @rps_flow_id |
895 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. | 899 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. |
896 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. | 900 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. |
897 | * Decremented when the efx_flush_rx_queue() is called. | 901 | * Decremented when the efx_flush_rx_queue() is called. |
@@ -1035,7 +1039,7 @@ struct efx_nic { | |||
1035 | spinlock_t filter_lock; | 1039 | spinlock_t filter_lock; |
1036 | void *filter_state; | 1040 | void *filter_state; |
1037 | #ifdef CONFIG_RFS_ACCEL | 1041 | #ifdef CONFIG_RFS_ACCEL |
1038 | u32 *rps_flow_id; | 1042 | unsigned int rps_expire_channel; |
1039 | unsigned int rps_expire_index; | 1043 | unsigned int rps_expire_index; |
1040 | #endif | 1044 | #endif |
1041 | 1045 | ||
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8956995b2fe7..02b0b5272c14 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
842 | struct efx_nic *efx = netdev_priv(net_dev); | 842 | struct efx_nic *efx = netdev_priv(net_dev); |
843 | struct efx_channel *channel; | 843 | struct efx_channel *channel; |
844 | struct efx_filter_spec spec; | 844 | struct efx_filter_spec spec; |
845 | const __be16 *ports; | 845 | struct flow_keys fk; |
846 | __be16 ether_type; | ||
847 | int nhoff; | ||
848 | int rc; | 846 | int rc; |
849 | 847 | ||
850 | /* The core RPS/RFS code has already parsed and validated | 848 | if (flow_id == RPS_FLOW_ID_INVALID) |
851 | * VLAN, IP and transport headers. We assume they are in the | 849 | return -EINVAL; |
852 | * header area. | ||
853 | */ | ||
854 | |||
855 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
856 | const struct vlan_hdr *vh = | ||
857 | (const struct vlan_hdr *)skb->data; | ||
858 | 850 | ||
859 | /* We can't filter on the IP 5-tuple and the vlan | 851 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) |
860 | * together, so just strip the vlan header and filter | 852 | return -EPROTONOSUPPORT; |
861 | * on the IP part. | ||
862 | */ | ||
863 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); | ||
864 | ether_type = vh->h_vlan_encapsulated_proto; | ||
865 | nhoff = sizeof(struct vlan_hdr); | ||
866 | } else { | ||
867 | ether_type = skb->protocol; | ||
868 | nhoff = 0; | ||
869 | } | ||
870 | 853 | ||
871 | if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) | 854 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) |
855 | return -EPROTONOSUPPORT; | ||
856 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) | ||
872 | return -EPROTONOSUPPORT; | 857 | return -EPROTONOSUPPORT; |
873 | 858 | ||
874 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, | 859 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, |
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
878 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | | 863 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
879 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | | 864 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | |
880 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; | 865 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; |
881 | spec.ether_type = ether_type; | 866 | spec.ether_type = fk.basic.n_proto; |
882 | 867 | spec.ip_proto = fk.basic.ip_proto; | |
883 | if (ether_type == htons(ETH_P_IP)) { | 868 | |
884 | const struct iphdr *ip = | 869 | if (fk.basic.n_proto == htons(ETH_P_IP)) { |
885 | (const struct iphdr *)(skb->data + nhoff); | 870 | spec.rem_host[0] = fk.addrs.v4addrs.src; |
886 | 871 | spec.loc_host[0] = fk.addrs.v4addrs.dst; | |
887 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); | ||
888 | if (ip_is_fragment(ip)) | ||
889 | return -EPROTONOSUPPORT; | ||
890 | spec.ip_proto = ip->protocol; | ||
891 | spec.rem_host[0] = ip->saddr; | ||
892 | spec.loc_host[0] = ip->daddr; | ||
893 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); | ||
894 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); | ||
895 | } else { | 872 | } else { |
896 | const struct ipv6hdr *ip6 = | 873 | memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); |
897 | (const struct ipv6hdr *)(skb->data + nhoff); | 874 | memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); |
898 | |||
899 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < | ||
900 | nhoff + sizeof(*ip6) + 4); | ||
901 | spec.ip_proto = ip6->nexthdr; | ||
902 | memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); | ||
903 | memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); | ||
904 | ports = (const __be16 *)(ip6 + 1); | ||
905 | } | 875 | } |
906 | 876 | ||
907 | spec.rem_port = ports[0]; | 877 | spec.rem_port = fk.ports.src; |
908 | spec.loc_port = ports[1]; | 878 | spec.loc_port = fk.ports.dst; |
909 | 879 | ||
910 | rc = efx->type->filter_rfs_insert(efx, &spec); | 880 | rc = efx->type->filter_rfs_insert(efx, &spec); |
911 | if (rc < 0) | 881 | if (rc < 0) |
912 | return rc; | 882 | return rc; |
913 | 883 | ||
914 | /* Remember this so we can check whether to expire the filter later */ | 884 | /* Remember this so we can check whether to expire the filter later */ |
915 | efx->rps_flow_id[rc] = flow_id; | 885 | channel = efx_get_channel(efx, rxq_index); |
916 | channel = efx_get_channel(efx, skb_get_rx_queue(skb)); | 886 | channel->rps_flow_id[rc] = flow_id; |
917 | ++channel->rfs_filters_added; | 887 | ++channel->rfs_filters_added; |
918 | 888 | ||
919 | if (ether_type == htons(ETH_P_IP)) | 889 | if (spec.ether_type == htons(ETH_P_IP)) |
920 | netif_info(efx, rx_status, efx->net_dev, | 890 | netif_info(efx, rx_status, efx->net_dev, |
921 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", | 891 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", |
922 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 892 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
923 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 893 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
924 | ntohs(ports[1]), rxq_index, flow_id, rc); | 894 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
925 | else | 895 | else |
926 | netif_info(efx, rx_status, efx->net_dev, | 896 | netif_info(efx, rx_status, efx->net_dev, |
927 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", | 897 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", |
928 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 898 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
929 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 899 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
930 | ntohs(ports[1]), rxq_index, flow_id, rc); | 900 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
931 | 901 | ||
932 | return rc; | 902 | return rc; |
933 | } | 903 | } |
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
935 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) | 905 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
936 | { | 906 | { |
937 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); | 907 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); |
938 | unsigned int index, size; | 908 | unsigned int channel_idx, index, size; |
939 | u32 flow_id; | 909 | u32 flow_id; |
940 | 910 | ||
941 | if (!spin_trylock_bh(&efx->filter_lock)) | 911 | if (!spin_trylock_bh(&efx->filter_lock)) |
942 | return false; | 912 | return false; |
943 | 913 | ||
944 | expire_one = efx->type->filter_rfs_expire_one; | 914 | expire_one = efx->type->filter_rfs_expire_one; |
915 | channel_idx = efx->rps_expire_channel; | ||
945 | index = efx->rps_expire_index; | 916 | index = efx->rps_expire_index; |
946 | size = efx->type->max_rx_ip_filters; | 917 | size = efx->type->max_rx_ip_filters; |
947 | while (quota--) { | 918 | while (quota--) { |
948 | flow_id = efx->rps_flow_id[index]; | 919 | struct efx_channel *channel = efx_get_channel(efx, channel_idx); |
949 | if (expire_one(efx, flow_id, index)) | 920 | flow_id = channel->rps_flow_id[index]; |
921 | |||
922 | if (flow_id != RPS_FLOW_ID_INVALID && | ||
923 | expire_one(efx, flow_id, index)) { | ||
950 | netif_info(efx, rx_status, efx->net_dev, | 924 | netif_info(efx, rx_status, efx->net_dev, |
951 | "expired filter %d [flow %u]\n", | 925 | "expired filter %d [queue %u flow %u]\n", |
952 | index, flow_id); | 926 | index, channel_idx, flow_id); |
953 | if (++index == size) | 927 | channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; |
928 | } | ||
929 | if (++index == size) { | ||
930 | if (++channel_idx == efx->n_channels) | ||
931 | channel_idx = 0; | ||
954 | index = 0; | 932 | index = 0; |
933 | } | ||
955 | } | 934 | } |
935 | efx->rps_expire_channel = channel_idx; | ||
956 | efx->rps_expire_index = index; | 936 | efx->rps_expire_index = index; |
957 | 937 | ||
958 | spin_unlock_bh(&efx->filter_lock); | 938 | spin_unlock_bh(&efx->filter_lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3f83c369f56c..ec295851812b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
297 | return -ENOMEM; | 297 | return -ENOMEM; |
298 | 298 | ||
299 | if (mdio_bus_data->irqs) | 299 | if (mdio_bus_data->irqs) |
300 | memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); | 300 | memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); |
301 | 301 | ||
302 | #ifdef CONFIG_OF | 302 | #ifdef CONFIG_OF |
303 | if (priv->device->of_node) | 303 | if (priv->device->of_node) |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a0f64cba86ba..2ace126533cd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, | |||
990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ | 990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) | 991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) |
992 | 992 | ||
993 | static void __team_compute_features(struct team *team) | 993 | static void ___team_compute_features(struct team *team) |
994 | { | 994 | { |
995 | struct team_port *port; | 995 | struct team_port *port; |
996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; | 996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; |
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team) | |||
1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) | 1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) |
1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; | 1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; |
1024 | } | ||
1024 | 1025 | ||
1026 | static void __team_compute_features(struct team *team) | ||
1027 | { | ||
1028 | ___team_compute_features(team); | ||
1025 | netdev_change_features(team->dev); | 1029 | netdev_change_features(team->dev); |
1026 | } | 1030 | } |
1027 | 1031 | ||
1028 | static void team_compute_features(struct team *team) | 1032 | static void team_compute_features(struct team *team) |
1029 | { | 1033 | { |
1030 | mutex_lock(&team->lock); | 1034 | mutex_lock(&team->lock); |
1031 | __team_compute_features(team); | 1035 | ___team_compute_features(team); |
1032 | mutex_unlock(&team->lock); | 1036 | mutex_unlock(&team->lock); |
1037 | netdev_change_features(team->dev); | ||
1033 | } | 1038 | } |
1034 | 1039 | ||
1035 | static int team_port_enter(struct team *team, struct team_port *port) | 1040 | static int team_port_enter(struct team *team, struct team_port *port) |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36cd7f016a8d..9bbe0161a2f4 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb) | |||
473 | goto goon; | 473 | goto goon; |
474 | } | 474 | } |
475 | 475 | ||
476 | if (!count || count < 4) | 476 | if (count < 4) |
477 | goto goon; | 477 | goto goon; |
478 | 478 | ||
479 | rx_status = buf[count - 2]; | 479 | rx_status = buf[count - 2]; |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d9d2806a47b1..dc989a8b5afb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -61,6 +61,8 @@ | |||
61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ | 61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ |
62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) | 62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) |
63 | 63 | ||
64 | #define CARRIER_CHECK_DELAY (2 * HZ) | ||
65 | |||
64 | struct smsc95xx_priv { | 66 | struct smsc95xx_priv { |
65 | u32 mac_cr; | 67 | u32 mac_cr; |
66 | u32 hash_hi; | 68 | u32 hash_hi; |
@@ -69,6 +71,9 @@ struct smsc95xx_priv { | |||
69 | spinlock_t mac_cr_lock; | 71 | spinlock_t mac_cr_lock; |
70 | u8 features; | 72 | u8 features; |
71 | u8 suspend_flags; | 73 | u8 suspend_flags; |
74 | bool link_ok; | ||
75 | struct delayed_work carrier_check; | ||
76 | struct usbnet *dev; | ||
72 | }; | 77 | }; |
73 | 78 | ||
74 | static bool turbo_mode = true; | 79 | static bool turbo_mode = true; |
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) | |||
624 | intdata); | 629 | intdata); |
625 | } | 630 | } |
626 | 631 | ||
632 | static void set_carrier(struct usbnet *dev, bool link) | ||
633 | { | ||
634 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | ||
635 | |||
636 | if (pdata->link_ok == link) | ||
637 | return; | ||
638 | |||
639 | pdata->link_ok = link; | ||
640 | |||
641 | if (link) | ||
642 | usbnet_link_change(dev, 1, 0); | ||
643 | else | ||
644 | usbnet_link_change(dev, 0, 0); | ||
645 | } | ||
646 | |||
647 | static void check_carrier(struct work_struct *work) | ||
648 | { | ||
649 | struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, | ||
650 | carrier_check.work); | ||
651 | struct usbnet *dev = pdata->dev; | ||
652 | int ret; | ||
653 | |||
654 | if (pdata->suspend_flags != 0) | ||
655 | return; | ||
656 | |||
657 | ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR); | ||
658 | if (ret < 0) { | ||
659 | netdev_warn(dev->net, "Failed to read MII_BMSR\n"); | ||
660 | return; | ||
661 | } | ||
662 | if (ret & BMSR_LSTATUS) | ||
663 | set_carrier(dev, 1); | ||
664 | else | ||
665 | set_carrier(dev, 0); | ||
666 | |||
667 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
668 | } | ||
669 | |||
627 | /* Enable or disable Tx & Rx checksum offload engines */ | 670 | /* Enable or disable Tx & Rx checksum offload engines */ |
628 | static int smsc95xx_set_features(struct net_device *netdev, | 671 | static int smsc95xx_set_features(struct net_device *netdev, |
629 | netdev_features_t features) | 672 | netdev_features_t features) |
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
1165 | dev->net->flags |= IFF_MULTICAST; | 1208 | dev->net->flags |= IFF_MULTICAST; |
1166 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; | 1209 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; |
1167 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 1210 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
1211 | |||
1212 | pdata->dev = dev; | ||
1213 | INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier); | ||
1214 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
1215 | |||
1168 | return 0; | 1216 | return 0; |
1169 | } | 1217 | } |
1170 | 1218 | ||
1171 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) | 1219 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) |
1172 | { | 1220 | { |
1173 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 1221 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
1222 | |||
1174 | if (pdata) { | 1223 | if (pdata) { |
1224 | cancel_delayed_work(&pdata->carrier_check); | ||
1175 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); | 1225 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); |
1176 | kfree(pdata); | 1226 | kfree(pdata); |
1177 | pdata = NULL; | 1227 | pdata = NULL; |
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf) | |||
1695 | 1745 | ||
1696 | /* do this first to ensure it's cleared even in error case */ | 1746 | /* do this first to ensure it's cleared even in error case */ |
1697 | pdata->suspend_flags = 0; | 1747 | pdata->suspend_flags = 0; |
1748 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
1698 | 1749 | ||
1699 | if (suspend_flags & SUSPEND_ALLMODES) { | 1750 | if (suspend_flags & SUSPEND_ALLMODES) { |
1700 | /* clear wake-up sources */ | 1751 | /* clear wake-up sources */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 49d84e540343..e0638e556fe7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1925 | 1925 | ||
1926 | virtio_device_ready(vdev); | 1926 | virtio_device_ready(vdev); |
1927 | 1927 | ||
1928 | /* Last of all, set up some receive buffers. */ | ||
1929 | for (i = 0; i < vi->curr_queue_pairs; i++) { | ||
1930 | try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); | ||
1931 | |||
1932 | /* If we didn't even get one input buffer, we're useless. */ | ||
1933 | if (vi->rq[i].vq->num_free == | ||
1934 | virtqueue_get_vring_size(vi->rq[i].vq)) { | ||
1935 | free_unused_bufs(vi); | ||
1936 | err = -ENOMEM; | ||
1937 | goto free_recv_bufs; | ||
1938 | } | ||
1939 | } | ||
1940 | |||
1941 | vi->nb.notifier_call = &virtnet_cpu_callback; | 1928 | vi->nb.notifier_call = &virtnet_cpu_callback; |
1942 | err = register_hotcpu_notifier(&vi->nb); | 1929 | err = register_hotcpu_notifier(&vi->nb); |
1943 | if (err) { | 1930 | if (err) { |
1944 | pr_debug("virtio_net: registering cpu notifier failed\n"); | 1931 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
1945 | goto free_recv_bufs; | 1932 | goto free_unregister_netdev; |
1946 | } | 1933 | } |
1947 | 1934 | ||
1948 | /* Assume link up if device can't report link status, | 1935 | /* Assume link up if device can't report link status, |
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1960 | 1947 | ||
1961 | return 0; | 1948 | return 0; |
1962 | 1949 | ||
1963 | free_recv_bufs: | 1950 | free_unregister_netdev: |
1964 | vi->vdev->config->reset(vdev); | 1951 | vi->vdev->config->reset(vdev); |
1965 | 1952 | ||
1966 | free_receive_bufs(vi); | ||
1967 | unregister_netdev(dev); | 1953 | unregister_netdev(dev); |
1968 | free_vqs: | 1954 | free_vqs: |
1969 | cancel_delayed_work_sync(&vi->refill); | 1955 | cancel_delayed_work_sync(&vi->refill); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8ff30c3bdfce..f999db2f97b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, | |||
3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) | 3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) |
3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; | 3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; |
3088 | 3088 | ||
3089 | if (tb[IFLA_MTU]) | ||
3090 | conf.mtu = nla_get_u32(tb[IFLA_MTU]); | ||
3091 | |||
3089 | err = vxlan_dev_configure(src_net, dev, &conf); | 3092 | err = vxlan_dev_configure(src_net, dev, &conf); |
3090 | switch (err) { | 3093 | switch (err) { |
3091 | case -ENODEV: | 3094 | case -ENODEV: |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f2d01d4d9364..1b8304e1efaa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
950 | 950 | ||
951 | /* For SPIs, we need to track the affinity per IRQ */ | 951 | /* For SPIs, we need to track the affinity per IRQ */ |
952 | if (using_spi) { | 952 | if (using_spi) { |
953 | if (i >= pdev->num_resources) { | 953 | if (i >= pdev->num_resources) |
954 | of_node_put(dn); | ||
955 | break; | 954 | break; |
956 | } | ||
957 | 955 | ||
958 | irqs[i] = cpu; | 956 | irqs[i] = cpu; |
959 | } | 957 | } |
960 | 958 | ||
961 | /* Keep track of the CPUs containing this PMU type */ | 959 | /* Keep track of the CPUs containing this PMU type */ |
962 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | 960 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
963 | of_node_put(dn); | ||
964 | i++; | 961 | i++; |
965 | } while (1); | 962 | } while (1); |
966 | 963 | ||
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
995 | 992 | ||
996 | armpmu_init(pmu); | 993 | armpmu_init(pmu); |
997 | 994 | ||
998 | if (!__oprofile_cpu_pmu) | ||
999 | __oprofile_cpu_pmu = pmu; | ||
1000 | |||
1001 | pmu->plat_device = pdev; | 995 | pmu->plat_device = pdev; |
1002 | 996 | ||
1003 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { | 997 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { |
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
1033 | if (ret) | 1027 | if (ret) |
1034 | goto out_destroy; | 1028 | goto out_destroy; |
1035 | 1029 | ||
1030 | if (!__oprofile_cpu_pmu) | ||
1031 | __oprofile_cpu_pmu = pmu; | ||
1032 | |||
1036 | pr_info("enabled with %s PMU driver, %d counters available\n", | 1033 | pr_info("enabled with %s PMU driver, %d counters available\n", |
1037 | pmu->name, pmu->num_events); | 1034 | pmu->name, pmu->num_events); |
1038 | 1035 | ||
@@ -1043,6 +1040,7 @@ out_destroy: | |||
1043 | out_free: | 1040 | out_free: |
1044 | pr_info("%s: failed to register PMU devices!\n", | 1041 | pr_info("%s: failed to register PMU devices!\n", |
1045 | of_node_full_name(node)); | 1042 | of_node_full_name(node)); |
1043 | kfree(pmu->irq_affinity); | ||
1046 | kfree(pmu); | 1044 | kfree(pmu); |
1047 | return ret; | 1045 | return ret; |
1048 | } | 1046 | } |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 207b13b618cf..a607655d7830 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | |||
1256 | const struct mtk_desc_pin *pin; | 1256 | const struct mtk_desc_pin *pin; |
1257 | 1257 | ||
1258 | chained_irq_enter(chip, desc); | 1258 | chained_irq_enter(chip, desc); |
1259 | for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { | 1259 | for (eint_num = 0; |
1260 | eint_num < pctl->devdata->ap_num; | ||
1261 | eint_num += 32, reg += 4) { | ||
1260 | status = readl(reg); | 1262 | status = readl(reg); |
1261 | reg += 4; | ||
1262 | while (status) { | 1263 | while (status) { |
1263 | offset = __ffs(status); | 1264 | offset = __ffs(status); |
1264 | index = eint_num + offset; | 1265 | index = eint_num + offset; |
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index ccbfc325c778..38faceff2f08 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c | |||
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset) | |||
854 | 854 | ||
855 | clk_enable(nmk_chip->clk); | 855 | clk_enable(nmk_chip->clk); |
856 | 856 | ||
857 | dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); | 857 | dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); |
858 | 858 | ||
859 | clk_disable(nmk_chip->clk); | 859 | clk_disable(nmk_chip->clk); |
860 | 860 | ||
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 579fd65299a0..d637c933c8a9 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
208 | break; | 208 | break; |
209 | 209 | ||
210 | case PTP_SYS_OFFSET: | 210 | case PTP_SYS_OFFSET: |
211 | sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); | 211 | sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); |
212 | if (!sysoff) { | 212 | if (IS_ERR(sysoff)) { |
213 | err = -ENOMEM; | 213 | err = PTR_ERR(sysoff); |
214 | break; | 214 | sysoff = NULL; |
215 | } | ||
216 | if (copy_from_user(sysoff, (void __user *)arg, | ||
217 | sizeof(*sysoff))) { | ||
218 | err = -EFAULT; | ||
219 | break; | 215 | break; |
220 | } | 216 | } |
221 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { | 217 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8f90d9e77104..969c312de1be 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -621,6 +621,11 @@ struct aac_driver_ident | |||
621 | #define AAC_QUIRK_SCSI_32 0x0020 | 621 | #define AAC_QUIRK_SCSI_32 0x0020 |
622 | 622 | ||
623 | /* | 623 | /* |
624 | * SRC based adapters support the AifReqEvent functions | ||
625 | */ | ||
626 | #define AAC_QUIRK_SRC 0x0040 | ||
627 | |||
628 | /* | ||
624 | * The adapter interface specs all queues to be located in the same | 629 | * The adapter interface specs all queues to be located in the same |
625 | * physically contiguous block. The host structure that defines the | 630 | * physically contiguous block. The host structure that defines the |
626 | * commuication queues will assume they are each a separate physically | 631 | * commuication queues will assume they are each a separate physically |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a943bd230bc2..79871f3519ff 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = { | |||
236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ | 238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ | 239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ |
240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ | 240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ |
241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ | 241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ |
242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ | 242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ |
243 | }; | 243 | }; |
244 | 244 | ||
245 | /** | 245 | /** |
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1299 | else | 1299 | else |
1300 | shost->this_id = shost->max_id; | 1300 | shost->this_id = shost->max_id; |
1301 | 1301 | ||
1302 | aac_intr_normal(aac, 0, 2, 0, NULL); | 1302 | if (aac_drivers[index].quirks & AAC_QUIRK_SRC) |
1303 | aac_intr_normal(aac, 0, 2, 0, NULL); | ||
1303 | 1304 | ||
1304 | /* | 1305 | /* |
1305 | * dmb - we may need to move the setting of these parms somewhere else once | 1306 | * dmb - we may need to move the setting of these parms somewhere else once |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6a4df5a315e9..6bff13e7afc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
7975 | ActiveCableEventData = | 7975 | ActiveCableEventData = |
7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; | 7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; |
7977 | if (ActiveCableEventData->ReasonCode == | 7977 | if (ActiveCableEventData->ReasonCode == |
7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) | 7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { |
7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", | 7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", |
7980 | ioc->name, ActiveCableEventData->ReceptacleID); | 7980 | ioc->name, ActiveCableEventData->ReceptacleID); |
7981 | pr_info("cannot be powered and devices connected to this active cable"); | 7981 | pr_info("cannot be powered and devices connected to this active cable"); |
7982 | pr_info("will not be seen. This active cable"); | 7982 | pr_info("will not be seen. This active cable"); |
7983 | pr_info("requires %d mW of power", | 7983 | pr_info("requires %d mW of power", |
7984 | ActiveCableEventData->ActiveCablePowerRequirement); | 7984 | ActiveCableEventData->ActiveCablePowerRequirement); |
7985 | } | ||
7985 | break; | 7986 | break; |
7986 | 7987 | ||
7987 | default: /* ignore the rest */ | 7988 | default: /* ignore the rest */ |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2e332af0f51..c71344aebdbb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
821 | } | 821 | } |
822 | 822 | ||
823 | /* | 823 | /* |
824 | * If we finished all bytes in the request we are done now. | 824 | * special case: failed zero length commands always need to |
825 | * drop down into the retry code. Otherwise, if we finished | ||
826 | * all bytes in the request we are done now. | ||
825 | */ | 827 | */ |
826 | if (!scsi_end_request(req, error, good_bytes, 0)) | 828 | if (!(blk_rq_bytes(req) == 0 && error) && |
829 | !scsi_end_request(req, error, good_bytes, 0)) | ||
827 | return; | 830 | return; |
828 | 831 | ||
829 | /* | 832 | /* |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 428c03ef02b2..f459dff30512 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, | |||
1398 | **/ | 1398 | **/ |
1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) | 1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) |
1400 | { | 1400 | { |
1401 | struct scsi_disk *sdkp = scsi_disk(disk); | 1401 | struct scsi_disk *sdkp = scsi_disk_get(disk); |
1402 | struct scsi_device *sdp = sdkp->device; | 1402 | struct scsi_device *sdp; |
1403 | struct scsi_sense_hdr *sshdr = NULL; | 1403 | struct scsi_sense_hdr *sshdr = NULL; |
1404 | int retval; | 1404 | int retval; |
1405 | 1405 | ||
1406 | if (!sdkp) | ||
1407 | return 0; | ||
1408 | |||
1409 | sdp = sdkp->device; | ||
1406 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); | 1410 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); |
1407 | 1411 | ||
1408 | /* | 1412 | /* |
@@ -1459,6 +1463,7 @@ out: | |||
1459 | kfree(sshdr); | 1463 | kfree(sshdr); |
1460 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; | 1464 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; |
1461 | sdp->changed = 0; | 1465 | sdp->changed = 0; |
1466 | scsi_disk_put(sdkp); | ||
1462 | return retval; | 1467 | return retval; |
1463 | } | 1468 | } |
1464 | 1469 | ||
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 13d431cbd29e..a578cd257db4 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev) | |||
177 | return -ENODEV; | 177 | return -ENODEV; |
178 | d->raw_bd = bd; | 178 | d->raw_bd = bd; |
179 | 179 | ||
180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); | 180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); |
181 | if (ret) | 181 | if (ret) |
182 | return ret; | 182 | return ret; |
183 | 183 | ||
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 93601407dab8..688691d9058d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, | |||
749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) | 749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) |
750 | return count; | 750 | return count; |
751 | } else { | 751 | } else { |
752 | if (pci_read_vpd(pdev, addr, 4, &data) != 4) | 752 | data = 0; |
753 | if (pci_read_vpd(pdev, addr, 4, &data) < 0) | ||
753 | return count; | 754 | return count; |
754 | *pdata = cpu_to_le32(data); | 755 | *pdata = cpu_to_le32(data); |
755 | } | 756 | } |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e9ea3fef144a..15ecfc9c5f6c 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |||
228 | 228 | ||
229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | 229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) |
230 | { | 230 | { |
231 | vfio_intx_set_signal(vdev, -1); | ||
232 | vfio_virqfd_disable(&vdev->ctx[0].unmask); | 231 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
233 | vfio_virqfd_disable(&vdev->ctx[0].mask); | 232 | vfio_virqfd_disable(&vdev->ctx[0].mask); |
233 | vfio_intx_set_signal(vdev, -1); | ||
234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; | 234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
235 | vdev->num_ctx = 0; | 235 | vdev->num_ctx = 0; |
236 | kfree(vdev->ctx); | 236 | kfree(vdev->ctx); |
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |||
401 | struct pci_dev *pdev = vdev->pdev; | 401 | struct pci_dev *pdev = vdev->pdev; |
402 | int i; | 402 | int i; |
403 | 403 | ||
404 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
405 | |||
406 | for (i = 0; i < vdev->num_ctx; i++) { | 404 | for (i = 0; i < vdev->num_ctx; i++) { |
407 | vfio_virqfd_disable(&vdev->ctx[i].unmask); | 405 | vfio_virqfd_disable(&vdev->ctx[i].unmask); |
408 | vfio_virqfd_disable(&vdev->ctx[i].mask); | 406 | vfio_virqfd_disable(&vdev->ctx[i].mask); |
409 | } | 407 | } |
410 | 408 | ||
409 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
410 | |||
411 | if (msix) { | 411 | if (msix) { |
412 | pci_disable_msix(vdev->pdev); | 412 | pci_disable_msix(vdev->pdev); |
413 | kfree(vdev->msix); | 413 | kfree(vdev->msix); |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 15a65823aad9..2ba19424e4a1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, | |||
515 | unsigned long pfn, long npage, int prot) | 515 | unsigned long pfn, long npage, int prot) |
516 | { | 516 | { |
517 | long i; | 517 | long i; |
518 | int ret; | 518 | int ret = 0; |
519 | 519 | ||
520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { | 520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { |
521 | ret = iommu_map(domain->domain, iova, | 521 | ret = iommu_map(domain->domain, iova, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a400951e8678..689d25ac6a68 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
2042 | struct btrfs_bio *bbio = NULL; | 2042 | struct btrfs_bio *bbio = NULL; |
2043 | 2043 | ||
2044 | 2044 | ||
2045 | /* | ||
2046 | * Avoid races with device replace and make sure our bbio has devices | ||
2047 | * associated to its stripes that don't go away while we are discarding. | ||
2048 | */ | ||
2049 | btrfs_bio_counter_inc_blocked(root->fs_info); | ||
2045 | /* Tell the block device(s) that the sectors can be discarded */ | 2050 | /* Tell the block device(s) that the sectors can be discarded */ |
2046 | ret = btrfs_map_block(root->fs_info, REQ_DISCARD, | 2051 | ret = btrfs_map_block(root->fs_info, REQ_DISCARD, |
2047 | bytenr, &num_bytes, &bbio, 0); | 2052 | bytenr, &num_bytes, &bbio, 0); |
@@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
2074 | } | 2079 | } |
2075 | btrfs_put_bbio(bbio); | 2080 | btrfs_put_bbio(bbio); |
2076 | } | 2081 | } |
2082 | btrfs_bio_counter_dec(root->fs_info); | ||
2077 | 2083 | ||
2078 | if (actual_bytes) | 2084 | if (actual_bytes) |
2079 | *actual_bytes = discarded_bytes; | 2085 | *actual_bytes = discarded_bytes; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3cd57825c75f..6e953de83f08 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2025 | bio->bi_iter.bi_size = 0; | 2025 | bio->bi_iter.bi_size = 0; |
2026 | map_length = length; | 2026 | map_length = length; |
2027 | 2027 | ||
2028 | /* | ||
2029 | * Avoid races with device replace and make sure our bbio has devices | ||
2030 | * associated to its stripes that don't go away while we are doing the | ||
2031 | * read repair operation. | ||
2032 | */ | ||
2033 | btrfs_bio_counter_inc_blocked(fs_info); | ||
2028 | ret = btrfs_map_block(fs_info, WRITE, logical, | 2034 | ret = btrfs_map_block(fs_info, WRITE, logical, |
2029 | &map_length, &bbio, mirror_num); | 2035 | &map_length, &bbio, mirror_num); |
2030 | if (ret) { | 2036 | if (ret) { |
2037 | btrfs_bio_counter_dec(fs_info); | ||
2031 | bio_put(bio); | 2038 | bio_put(bio); |
2032 | return -EIO; | 2039 | return -EIO; |
2033 | } | 2040 | } |
@@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2037 | dev = bbio->stripes[mirror_num-1].dev; | 2044 | dev = bbio->stripes[mirror_num-1].dev; |
2038 | btrfs_put_bbio(bbio); | 2045 | btrfs_put_bbio(bbio); |
2039 | if (!dev || !dev->bdev || !dev->writeable) { | 2046 | if (!dev || !dev->bdev || !dev->writeable) { |
2047 | btrfs_bio_counter_dec(fs_info); | ||
2040 | bio_put(bio); | 2048 | bio_put(bio); |
2041 | return -EIO; | 2049 | return -EIO; |
2042 | } | 2050 | } |
@@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2045 | 2053 | ||
2046 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { | 2054 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { |
2047 | /* try to remap that extent elsewhere? */ | 2055 | /* try to remap that extent elsewhere? */ |
2056 | btrfs_bio_counter_dec(fs_info); | ||
2048 | bio_put(bio); | 2057 | bio_put(bio); |
2049 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); | 2058 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); |
2050 | return -EIO; | 2059 | return -EIO; |
@@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
2054 | "read error corrected: ino %llu off %llu (dev %s sector %llu)", | 2063 | "read error corrected: ino %llu off %llu (dev %s sector %llu)", |
2055 | btrfs_ino(inode), start, | 2064 | btrfs_ino(inode), start, |
2056 | rcu_str_deref(dev->name), sector); | 2065 | rcu_str_deref(dev->name), sector); |
2066 | btrfs_bio_counter_dec(fs_info); | ||
2057 | bio_put(bio); | 2067 | bio_put(bio); |
2058 | return 0; | 2068 | return 0; |
2059 | } | 2069 | } |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 270499598ed4..8b1212e8f7a8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -6979,7 +6979,18 @@ insert: | |||
6979 | * existing will always be non-NULL, since there must be | 6979 | * existing will always be non-NULL, since there must be |
6980 | * extent causing the -EEXIST. | 6980 | * extent causing the -EEXIST. |
6981 | */ | 6981 | */ |
6982 | if (start >= extent_map_end(existing) || | 6982 | if (existing->start == em->start && |
6983 | extent_map_end(existing) == extent_map_end(em) && | ||
6984 | em->block_start == existing->block_start) { | ||
6985 | /* | ||
6986 | * these two extents are the same, it happens | ||
6987 | * with inlines especially | ||
6988 | */ | ||
6989 | free_extent_map(em); | ||
6990 | em = existing; | ||
6991 | err = 0; | ||
6992 | |||
6993 | } else if (start >= extent_map_end(existing) || | ||
6983 | start <= existing->start) { | 6994 | start <= existing->start) { |
6984 | /* | 6995 | /* |
6985 | * The existing extent map is the one nearest to | 6996 | * The existing extent map is the one nearest to |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 559170464d7c..e96634a725c3 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, | |||
718 | return count; | 718 | return count; |
719 | } | 719 | } |
720 | 720 | ||
721 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | 721 | int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, |
722 | const u64 range_start, const u64 range_len) | 722 | const u64 range_start, const u64 range_len) |
723 | { | 723 | { |
724 | struct btrfs_root *root; | 724 | struct btrfs_root *root; |
725 | struct list_head splice; | 725 | struct list_head splice; |
726 | int done; | 726 | int done; |
727 | int total_done = 0; | ||
727 | 728 | ||
728 | INIT_LIST_HEAD(&splice); | 729 | INIT_LIST_HEAD(&splice); |
729 | 730 | ||
@@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | |||
742 | done = btrfs_wait_ordered_extents(root, nr, | 743 | done = btrfs_wait_ordered_extents(root, nr, |
743 | range_start, range_len); | 744 | range_start, range_len); |
744 | btrfs_put_fs_root(root); | 745 | btrfs_put_fs_root(root); |
746 | total_done += done; | ||
745 | 747 | ||
746 | spin_lock(&fs_info->ordered_root_lock); | 748 | spin_lock(&fs_info->ordered_root_lock); |
747 | if (nr != -1) { | 749 | if (nr != -1) { |
@@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | |||
752 | list_splice_tail(&splice, &fs_info->ordered_roots); | 754 | list_splice_tail(&splice, &fs_info->ordered_roots); |
753 | spin_unlock(&fs_info->ordered_root_lock); | 755 | spin_unlock(&fs_info->ordered_root_lock); |
754 | mutex_unlock(&fs_info->ordered_operations_mutex); | 756 | mutex_unlock(&fs_info->ordered_operations_mutex); |
757 | |||
758 | return total_done; | ||
755 | } | 759 | } |
756 | 760 | ||
757 | /* | 761 | /* |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 2049c9be85ee..451507776ff5 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
@@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
199 | u32 *sum, int len); | 199 | u32 *sum, int len); |
200 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, | 200 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, |
201 | const u64 range_start, const u64 range_len); | 201 | const u64 range_start, const u64 range_len); |
202 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | 202 | int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, |
203 | const u64 range_start, const u64 range_len); | 203 | const u64 range_start, const u64 range_len); |
204 | void btrfs_get_logged_extents(struct inode *inode, | 204 | void btrfs_get_logged_extents(struct inode *inode, |
205 | struct list_head *logged_list, | 205 | struct list_head *logged_list, |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 298631eaee78..8428db7cd88f 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c | |||
@@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) | |||
761 | 761 | ||
762 | do { | 762 | do { |
763 | enqueued = 0; | 763 | enqueued = 0; |
764 | mutex_lock(&fs_devices->device_list_mutex); | ||
764 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 765 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
765 | if (atomic_read(&device->reada_in_flight) < | 766 | if (atomic_read(&device->reada_in_flight) < |
766 | MAX_IN_FLIGHT) | 767 | MAX_IN_FLIGHT) |
767 | enqueued += reada_start_machine_dev(fs_info, | 768 | enqueued += reada_start_machine_dev(fs_info, |
768 | device); | 769 | device); |
769 | } | 770 | } |
771 | mutex_unlock(&fs_devices->device_list_mutex); | ||
770 | total += enqueued; | 772 | total += enqueued; |
771 | } while (enqueued && total < 10000); | 773 | } while (enqueued && total < 10000); |
772 | 774 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 46d847f66e4b..70427ef66b04 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
3582 | */ | 3582 | */ |
3583 | scrub_pause_on(fs_info); | 3583 | scrub_pause_on(fs_info); |
3584 | ret = btrfs_inc_block_group_ro(root, cache); | 3584 | ret = btrfs_inc_block_group_ro(root, cache); |
3585 | if (!ret && is_dev_replace) { | ||
3586 | /* | ||
3587 | * If we are doing a device replace wait for any tasks | ||
3588 | * that started dellaloc right before we set the block | ||
3589 | * group to RO mode, as they might have just allocated | ||
3590 | * an extent from it or decided they could do a nocow | ||
3591 | * write. And if any such tasks did that, wait for their | ||
3592 | * ordered extents to complete and then commit the | ||
3593 | * current transaction, so that we can later see the new | ||
3594 | * extent items in the extent tree - the ordered extents | ||
3595 | * create delayed data references (for cow writes) when | ||
3596 | * they complete, which will be run and insert the | ||
3597 | * corresponding extent items into the extent tree when | ||
3598 | * we commit the transaction they used when running | ||
3599 | * inode.c:btrfs_finish_ordered_io(). We later use | ||
3600 | * the commit root of the extent tree to find extents | ||
3601 | * to copy from the srcdev into the tgtdev, and we don't | ||
3602 | * want to miss any new extents. | ||
3603 | */ | ||
3604 | btrfs_wait_block_group_reservations(cache); | ||
3605 | btrfs_wait_nocow_writers(cache); | ||
3606 | ret = btrfs_wait_ordered_roots(fs_info, -1, | ||
3607 | cache->key.objectid, | ||
3608 | cache->key.offset); | ||
3609 | if (ret > 0) { | ||
3610 | struct btrfs_trans_handle *trans; | ||
3611 | |||
3612 | trans = btrfs_join_transaction(root); | ||
3613 | if (IS_ERR(trans)) | ||
3614 | ret = PTR_ERR(trans); | ||
3615 | else | ||
3616 | ret = btrfs_commit_transaction(trans, | ||
3617 | root); | ||
3618 | if (ret) { | ||
3619 | scrub_pause_off(fs_info); | ||
3620 | btrfs_put_block_group(cache); | ||
3621 | break; | ||
3622 | } | ||
3623 | } | ||
3624 | } | ||
3585 | scrub_pause_off(fs_info); | 3625 | scrub_pause_off(fs_info); |
3586 | 3626 | ||
3587 | if (ret == 0) { | 3627 | if (ret == 0) { |
@@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
3602 | break; | 3642 | break; |
3603 | } | 3643 | } |
3604 | 3644 | ||
3645 | btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | ||
3605 | dev_replace->cursor_right = found_key.offset + length; | 3646 | dev_replace->cursor_right = found_key.offset + length; |
3606 | dev_replace->cursor_left = found_key.offset; | 3647 | dev_replace->cursor_left = found_key.offset; |
3607 | dev_replace->item_needs_writeback = 1; | 3648 | dev_replace->item_needs_writeback = 1; |
3649 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | ||
3608 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, | 3650 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, |
3609 | found_key.offset, cache, is_dev_replace); | 3651 | found_key.offset, cache, is_dev_replace); |
3610 | 3652 | ||
@@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
3640 | 3682 | ||
3641 | scrub_pause_off(fs_info); | 3683 | scrub_pause_off(fs_info); |
3642 | 3684 | ||
3685 | btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | ||
3686 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
3687 | dev_replace->item_needs_writeback = 1; | ||
3688 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | ||
3689 | |||
3643 | if (ro_set) | 3690 | if (ro_set) |
3644 | btrfs_dec_block_group_ro(root, cache); | 3691 | btrfs_dec_block_group_ro(root, cache); |
3645 | 3692 | ||
@@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
3677 | ret = -ENOMEM; | 3724 | ret = -ENOMEM; |
3678 | break; | 3725 | break; |
3679 | } | 3726 | } |
3680 | |||
3681 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
3682 | dev_replace->item_needs_writeback = 1; | ||
3683 | skip: | 3727 | skip: |
3684 | key.offset = found_key.offset + length; | 3728 | key.offset = found_key.offset + length; |
3685 | btrfs_release_path(path); | 3729 | btrfs_release_path(path); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bdc62561ede8..da9e0036a864 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
2761 | u64 dev_extent_len = 0; | 2761 | u64 dev_extent_len = 0; |
2762 | u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; | 2762 | u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; |
2763 | int i, ret = 0; | 2763 | int i, ret = 0; |
2764 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | ||
2764 | 2765 | ||
2765 | /* Just in case */ | 2766 | /* Just in case */ |
2766 | root = root->fs_info->chunk_root; | 2767 | root = root->fs_info->chunk_root; |
@@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
2787 | check_system_chunk(trans, extent_root, map->type); | 2788 | check_system_chunk(trans, extent_root, map->type); |
2788 | unlock_chunks(root->fs_info->chunk_root); | 2789 | unlock_chunks(root->fs_info->chunk_root); |
2789 | 2790 | ||
2791 | /* | ||
2792 | * Take the device list mutex to prevent races with the final phase of | ||
2793 | * a device replace operation that replaces the device object associated | ||
2794 | * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). | ||
2795 | */ | ||
2796 | mutex_lock(&fs_devices->device_list_mutex); | ||
2790 | for (i = 0; i < map->num_stripes; i++) { | 2797 | for (i = 0; i < map->num_stripes; i++) { |
2791 | struct btrfs_device *device = map->stripes[i].dev; | 2798 | struct btrfs_device *device = map->stripes[i].dev; |
2792 | ret = btrfs_free_dev_extent(trans, device, | 2799 | ret = btrfs_free_dev_extent(trans, device, |
2793 | map->stripes[i].physical, | 2800 | map->stripes[i].physical, |
2794 | &dev_extent_len); | 2801 | &dev_extent_len); |
2795 | if (ret) { | 2802 | if (ret) { |
2803 | mutex_unlock(&fs_devices->device_list_mutex); | ||
2796 | btrfs_abort_transaction(trans, root, ret); | 2804 | btrfs_abort_transaction(trans, root, ret); |
2797 | goto out; | 2805 | goto out; |
2798 | } | 2806 | } |
@@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
2811 | if (map->stripes[i].dev) { | 2819 | if (map->stripes[i].dev) { |
2812 | ret = btrfs_update_device(trans, map->stripes[i].dev); | 2820 | ret = btrfs_update_device(trans, map->stripes[i].dev); |
2813 | if (ret) { | 2821 | if (ret) { |
2822 | mutex_unlock(&fs_devices->device_list_mutex); | ||
2814 | btrfs_abort_transaction(trans, root, ret); | 2823 | btrfs_abort_transaction(trans, root, ret); |
2815 | goto out; | 2824 | goto out; |
2816 | } | 2825 | } |
2817 | } | 2826 | } |
2818 | } | 2827 | } |
2828 | mutex_unlock(&fs_devices->device_list_mutex); | ||
2829 | |||
2819 | ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); | 2830 | ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); |
2820 | if (ret) { | 2831 | if (ret) { |
2821 | btrfs_abort_transaction(trans, root, ret); | 2832 | btrfs_abort_transaction(trans, root, ret); |
@@ -5762,20 +5773,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
5762 | } | 5773 | } |
5763 | } | 5774 | } |
5764 | if (found) { | 5775 | if (found) { |
5765 | if (physical_of_found + map->stripe_len <= | 5776 | struct btrfs_bio_stripe *tgtdev_stripe = |
5766 | dev_replace->cursor_left) { | 5777 | bbio->stripes + num_stripes; |
5767 | struct btrfs_bio_stripe *tgtdev_stripe = | ||
5768 | bbio->stripes + num_stripes; | ||
5769 | 5778 | ||
5770 | tgtdev_stripe->physical = physical_of_found; | 5779 | tgtdev_stripe->physical = physical_of_found; |
5771 | tgtdev_stripe->length = | 5780 | tgtdev_stripe->length = |
5772 | bbio->stripes[index_srcdev].length; | 5781 | bbio->stripes[index_srcdev].length; |
5773 | tgtdev_stripe->dev = dev_replace->tgtdev; | 5782 | tgtdev_stripe->dev = dev_replace->tgtdev; |
5774 | bbio->tgtdev_map[index_srcdev] = num_stripes; | 5783 | bbio->tgtdev_map[index_srcdev] = num_stripes; |
5775 | 5784 | ||
5776 | tgtdev_indexes++; | 5785 | tgtdev_indexes++; |
5777 | num_stripes++; | 5786 | num_stripes++; |
5778 | } | ||
5779 | } | 5787 | } |
5780 | } | 5788 | } |
5781 | 5789 | ||
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 861d611b8c05..ce5f345d70f5 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c | |||
@@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache) | |||
380 | * check if the backing cache is updated to FS-Cache | 380 | * check if the backing cache is updated to FS-Cache |
381 | * - called by FS-Cache when evaluates if need to invalidate the cache | 381 | * - called by FS-Cache when evaluates if need to invalidate the cache |
382 | */ | 382 | */ |
383 | static bool cachefiles_check_consistency(struct fscache_operation *op) | 383 | static int cachefiles_check_consistency(struct fscache_operation *op) |
384 | { | 384 | { |
385 | struct cachefiles_object *object; | 385 | struct cachefiles_object *object; |
386 | struct cachefiles_cache *cache; | 386 | struct cachefiles_cache *cache; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index eeb71e5de27a..26a9d10d75e9 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req) | |||
276 | for (i = 0; i < num_pages; i++) { | 276 | for (i = 0; i < num_pages; i++) { |
277 | struct page *page = osd_data->pages[i]; | 277 | struct page *page = osd_data->pages[i]; |
278 | 278 | ||
279 | if (rc < 0 && rc != -ENOENT) | 279 | if (rc < 0 && rc != -ENOENT) { |
280 | ceph_fscache_readpage_cancel(inode, page); | ||
280 | goto unlock; | 281 | goto unlock; |
282 | } | ||
281 | if (bytes < (int)PAGE_SIZE) { | 283 | if (bytes < (int)PAGE_SIZE) { |
282 | /* zero (remainder of) page */ | 284 | /* zero (remainder of) page */ |
283 | int s = bytes < 0 ? 0 : bytes; | 285 | int s = bytes < 0 ? 0 : bytes; |
@@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
535 | CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) | 537 | CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) |
536 | set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); | 538 | set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); |
537 | 539 | ||
538 | ceph_readpage_to_fscache(inode, page); | ||
539 | |||
540 | set_page_writeback(page); | 540 | set_page_writeback(page); |
541 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), | 541 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), |
542 | &ci->i_layout, snapc, | 542 | &ci->i_layout, snapc, |
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index c052b5bf219b..238c55b01723 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "cache.h" | 25 | #include "cache.h" |
26 | 26 | ||
27 | struct ceph_aux_inode { | 27 | struct ceph_aux_inode { |
28 | u64 version; | ||
28 | struct timespec mtime; | 29 | struct timespec mtime; |
29 | loff_t size; | 30 | loff_t size; |
30 | }; | 31 | }; |
@@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc) | |||
69 | fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, | 70 | fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, |
70 | &ceph_fscache_fsid_object_def, | 71 | &ceph_fscache_fsid_object_def, |
71 | fsc, true); | 72 | fsc, true); |
72 | 73 | if (!fsc->fscache) | |
73 | if (fsc->fscache == NULL) { | ||
74 | pr_err("Unable to resgister fsid: %p fscache cookie", fsc); | 74 | pr_err("Unable to resgister fsid: %p fscache cookie", fsc); |
75 | return 0; | ||
76 | } | ||
77 | |||
78 | fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1); | ||
79 | if (fsc->revalidate_wq == NULL) | ||
80 | return -ENOMEM; | ||
81 | 75 | ||
82 | return 0; | 76 | return 0; |
83 | } | 77 | } |
@@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data, | |||
105 | const struct inode* inode = &ci->vfs_inode; | 99 | const struct inode* inode = &ci->vfs_inode; |
106 | 100 | ||
107 | memset(&aux, 0, sizeof(aux)); | 101 | memset(&aux, 0, sizeof(aux)); |
102 | aux.version = ci->i_version; | ||
108 | aux.mtime = inode->i_mtime; | 103 | aux.mtime = inode->i_mtime; |
109 | aux.size = i_size_read(inode); | 104 | aux.size = i_size_read(inode); |
110 | 105 | ||
@@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux( | |||
131 | return FSCACHE_CHECKAUX_OBSOLETE; | 126 | return FSCACHE_CHECKAUX_OBSOLETE; |
132 | 127 | ||
133 | memset(&aux, 0, sizeof(aux)); | 128 | memset(&aux, 0, sizeof(aux)); |
129 | aux.version = ci->i_version; | ||
134 | aux.mtime = inode->i_mtime; | 130 | aux.mtime = inode->i_mtime; |
135 | aux.size = i_size_read(inode); | 131 | aux.size = i_size_read(inode); |
136 | 132 | ||
@@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = { | |||
181 | .now_uncached = ceph_fscache_inode_now_uncached, | 177 | .now_uncached = ceph_fscache_inode_now_uncached, |
182 | }; | 178 | }; |
183 | 179 | ||
184 | void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, | 180 | void ceph_fscache_register_inode_cookie(struct inode *inode) |
185 | struct ceph_inode_info* ci) | ||
186 | { | 181 | { |
187 | struct inode* inode = &ci->vfs_inode; | 182 | struct ceph_inode_info *ci = ceph_inode(inode); |
183 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | ||
188 | 184 | ||
189 | /* No caching for filesystem */ | 185 | /* No caching for filesystem */ |
190 | if (fsc->fscache == NULL) | 186 | if (fsc->fscache == NULL) |
191 | return; | 187 | return; |
192 | 188 | ||
193 | /* Only cache for regular files that are read only */ | 189 | /* Only cache for regular files that are read only */ |
194 | if ((ci->vfs_inode.i_mode & S_IFREG) == 0) | 190 | if (!S_ISREG(inode->i_mode)) |
195 | return; | 191 | return; |
196 | 192 | ||
197 | /* Avoid multiple racing open requests */ | 193 | inode_lock_nested(inode, I_MUTEX_CHILD); |
198 | inode_lock(inode); | 194 | if (!ci->fscache) { |
199 | 195 | ci->fscache = fscache_acquire_cookie(fsc->fscache, | |
200 | if (ci->fscache) | 196 | &ceph_fscache_inode_object_def, |
201 | goto done; | 197 | ci, false); |
202 | 198 | } | |
203 | ci->fscache = fscache_acquire_cookie(fsc->fscache, | ||
204 | &ceph_fscache_inode_object_def, | ||
205 | ci, true); | ||
206 | fscache_check_consistency(ci->fscache); | ||
207 | done: | ||
208 | inode_unlock(inode); | 199 | inode_unlock(inode); |
209 | |||
210 | } | 200 | } |
211 | 201 | ||
212 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | 202 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) |
@@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | |||
222 | fscache_relinquish_cookie(cookie, 0); | 212 | fscache_relinquish_cookie(cookie, 0); |
223 | } | 213 | } |
224 | 214 | ||
215 | static bool ceph_fscache_can_enable(void *data) | ||
216 | { | ||
217 | struct inode *inode = data; | ||
218 | return !inode_is_open_for_write(inode); | ||
219 | } | ||
220 | |||
221 | void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) | ||
222 | { | ||
223 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
224 | |||
225 | if (!fscache_cookie_valid(ci->fscache)) | ||
226 | return; | ||
227 | |||
228 | if (inode_is_open_for_write(inode)) { | ||
229 | dout("fscache_file_set_cookie %p %p disabling cache\n", | ||
230 | inode, filp); | ||
231 | fscache_disable_cookie(ci->fscache, false); | ||
232 | fscache_uncache_all_inode_pages(ci->fscache, inode); | ||
233 | } else { | ||
234 | fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable, | ||
235 | inode); | ||
236 | if (fscache_cookie_enabled(ci->fscache)) { | ||
237 | dout("fscache_file_set_cookie %p %p enabing cache\n", | ||
238 | inode, filp); | ||
239 | } | ||
240 | } | ||
241 | } | ||
242 | |||
225 | static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) | 243 | static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) |
226 | { | 244 | { |
227 | if (!error) | 245 | if (!error) |
@@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int | |||
238 | 256 | ||
239 | static inline bool cache_valid(struct ceph_inode_info *ci) | 257 | static inline bool cache_valid(struct ceph_inode_info *ci) |
240 | { | 258 | { |
241 | return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && | 259 | return ci->i_fscache_gen == ci->i_rdcache_gen; |
242 | (ci->i_fscache_gen == ci->i_rdcache_gen)); | ||
243 | } | 260 | } |
244 | 261 | ||
245 | 262 | ||
@@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page) | |||
332 | 349 | ||
333 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) | 350 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) |
334 | { | 351 | { |
335 | if (fsc->revalidate_wq) | ||
336 | destroy_workqueue(fsc->revalidate_wq); | ||
337 | |||
338 | fscache_relinquish_cookie(fsc->fscache, 0); | 352 | fscache_relinquish_cookie(fsc->fscache, 0); |
339 | fsc->fscache = NULL; | 353 | fsc->fscache = NULL; |
340 | } | 354 | } |
341 | 355 | ||
342 | static void ceph_revalidate_work(struct work_struct *work) | 356 | /* |
343 | { | 357 | * caller should hold CEPH_CAP_FILE_{RD,CACHE} |
344 | int issued; | 358 | */ |
345 | u32 orig_gen; | 359 | void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) |
346 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, | ||
347 | i_revalidate_work); | ||
348 | struct inode *inode = &ci->vfs_inode; | ||
349 | |||
350 | spin_lock(&ci->i_ceph_lock); | ||
351 | issued = __ceph_caps_issued(ci, NULL); | ||
352 | orig_gen = ci->i_rdcache_gen; | ||
353 | spin_unlock(&ci->i_ceph_lock); | ||
354 | |||
355 | if (!(issued & CEPH_CAP_FILE_CACHE)) { | ||
356 | dout("revalidate_work lost cache before validation %p\n", | ||
357 | inode); | ||
358 | goto out; | ||
359 | } | ||
360 | |||
361 | if (!fscache_check_consistency(ci->fscache)) | ||
362 | fscache_invalidate(ci->fscache); | ||
363 | |||
364 | spin_lock(&ci->i_ceph_lock); | ||
365 | /* Update the new valid generation (backwards sanity check too) */ | ||
366 | if (orig_gen > ci->i_fscache_gen) { | ||
367 | ci->i_fscache_gen = orig_gen; | ||
368 | } | ||
369 | spin_unlock(&ci->i_ceph_lock); | ||
370 | |||
371 | out: | ||
372 | iput(&ci->vfs_inode); | ||
373 | } | ||
374 | |||
375 | void ceph_queue_revalidate(struct inode *inode) | ||
376 | { | 360 | { |
377 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | 361 | if (cache_valid(ci)) |
378 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
379 | |||
380 | if (fsc->revalidate_wq == NULL || ci->fscache == NULL) | ||
381 | return; | 362 | return; |
382 | 363 | ||
383 | ihold(inode); | 364 | /* resue i_truncate_mutex. There should be no pending |
384 | 365 | * truncate while the caller holds CEPH_CAP_FILE_RD */ | |
385 | if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, | 366 | mutex_lock(&ci->i_truncate_mutex); |
386 | &ci->i_revalidate_work)) { | 367 | if (!cache_valid(ci)) { |
387 | dout("ceph_queue_revalidate %p\n", inode); | 368 | if (fscache_check_consistency(ci->fscache)) |
388 | } else { | 369 | fscache_invalidate(ci->fscache); |
389 | dout("ceph_queue_revalidate %p failed\n)", inode); | 370 | spin_lock(&ci->i_ceph_lock); |
390 | iput(inode); | 371 | ci->i_fscache_gen = ci->i_rdcache_gen; |
372 | spin_unlock(&ci->i_ceph_lock); | ||
391 | } | 373 | } |
392 | } | 374 | mutex_unlock(&ci->i_truncate_mutex); |
393 | |||
394 | void ceph_fscache_inode_init(struct ceph_inode_info *ci) | ||
395 | { | ||
396 | ci->fscache = NULL; | ||
397 | /* The first load is verifed cookie open time */ | ||
398 | ci->i_fscache_gen = 1; | ||
399 | INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work); | ||
400 | } | 375 | } |
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 5ac591bd012b..7e72c7594f0c 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h | |||
@@ -34,10 +34,10 @@ void ceph_fscache_unregister(void); | |||
34 | int ceph_fscache_register_fs(struct ceph_fs_client* fsc); | 34 | int ceph_fscache_register_fs(struct ceph_fs_client* fsc); |
35 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); | 35 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); |
36 | 36 | ||
37 | void ceph_fscache_inode_init(struct ceph_inode_info *ci); | 37 | void ceph_fscache_register_inode_cookie(struct inode *inode); |
38 | void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, | ||
39 | struct ceph_inode_info* ci); | ||
40 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); | 38 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); |
39 | void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp); | ||
40 | void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci); | ||
41 | 41 | ||
42 | int ceph_readpage_from_fscache(struct inode *inode, struct page *page); | 42 | int ceph_readpage_from_fscache(struct inode *inode, struct page *page); |
43 | int ceph_readpages_from_fscache(struct inode *inode, | 43 | int ceph_readpages_from_fscache(struct inode *inode, |
@@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode, | |||
46 | unsigned *nr_pages); | 46 | unsigned *nr_pages); |
47 | void ceph_readpage_to_fscache(struct inode *inode, struct page *page); | 47 | void ceph_readpage_to_fscache(struct inode *inode, struct page *page); |
48 | void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); | 48 | void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); |
49 | void ceph_queue_revalidate(struct inode *inode); | ||
50 | 49 | ||
51 | static inline void ceph_fscache_update_objectsize(struct inode *inode) | 50 | static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) |
52 | { | 51 | { |
53 | struct ceph_inode_info *ci = ceph_inode(inode); | 52 | ci->fscache = NULL; |
54 | fscache_attr_changed(ci->fscache); | 53 | ci->i_fscache_gen = 0; |
55 | } | 54 | } |
56 | 55 | ||
57 | static inline void ceph_fscache_invalidate(struct inode *inode) | 56 | static inline void ceph_fscache_invalidate(struct inode *inode) |
@@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, | |||
88 | return fscache_readpages_cancel(ci->fscache, pages); | 87 | return fscache_readpages_cancel(ci->fscache, pages); |
89 | } | 88 | } |
90 | 89 | ||
90 | static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) | ||
91 | { | ||
92 | ci->i_fscache_gen = ci->i_rdcache_gen - 1; | ||
93 | } | ||
94 | |||
91 | #else | 95 | #else |
92 | 96 | ||
93 | static inline int ceph_fscache_register(void) | 97 | static inline int ceph_fscache_register(void) |
@@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) | |||
112 | { | 116 | { |
113 | } | 117 | } |
114 | 118 | ||
115 | static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, | 119 | static inline void ceph_fscache_register_inode_cookie(struct inode *inode) |
116 | struct ceph_inode_info* ci) | 120 | { |
121 | } | ||
122 | |||
123 | static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | ||
124 | { | ||
125 | } | ||
126 | |||
127 | static inline void ceph_fscache_file_set_cookie(struct inode *inode, | ||
128 | struct file *filp) | ||
129 | { | ||
130 | } | ||
131 | |||
132 | static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) | ||
117 | { | 133 | { |
118 | } | 134 | } |
119 | 135 | ||
@@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode, | |||
141 | { | 157 | { |
142 | } | 158 | } |
143 | 159 | ||
144 | static inline void ceph_fscache_update_objectsize(struct inode *inode) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static inline void ceph_fscache_invalidate(struct inode *inode) | 160 | static inline void ceph_fscache_invalidate(struct inode *inode) |
149 | { | 161 | { |
150 | } | 162 | } |
@@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode, | |||
154 | { | 166 | { |
155 | } | 167 | } |
156 | 168 | ||
157 | static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | ||
158 | { | ||
159 | } | ||
160 | |||
161 | static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) | 169 | static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) |
162 | { | 170 | { |
163 | return 1; | 171 | return 1; |
@@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, | |||
173 | { | 181 | { |
174 | } | 182 | } |
175 | 183 | ||
176 | static inline void ceph_queue_revalidate(struct inode *inode) | 184 | static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) |
177 | { | 185 | { |
178 | } | 186 | } |
179 | 187 | ||
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index c17b5d76d75e..6f60d0a3d0f9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -2393,6 +2393,9 @@ again: | |||
2393 | snap_rwsem_locked = true; | 2393 | snap_rwsem_locked = true; |
2394 | } | 2394 | } |
2395 | *got = need | (have & want); | 2395 | *got = need | (have & want); |
2396 | if ((need & CEPH_CAP_FILE_RD) && | ||
2397 | !(*got & CEPH_CAP_FILE_CACHE)) | ||
2398 | ceph_disable_fscache_readpage(ci); | ||
2396 | __take_cap_refs(ci, *got, true); | 2399 | __take_cap_refs(ci, *got, true); |
2397 | ret = 1; | 2400 | ret = 1; |
2398 | } | 2401 | } |
@@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, | |||
2554 | break; | 2557 | break; |
2555 | } | 2558 | } |
2556 | 2559 | ||
2560 | if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) | ||
2561 | ceph_fscache_revalidate_cookie(ci); | ||
2562 | |||
2557 | *got = _got; | 2563 | *got = _got; |
2558 | return 0; | 2564 | return 0; |
2559 | } | 2565 | } |
@@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
2795 | bool writeback = false; | 2801 | bool writeback = false; |
2796 | bool queue_trunc = false; | 2802 | bool queue_trunc = false; |
2797 | bool queue_invalidate = false; | 2803 | bool queue_invalidate = false; |
2798 | bool queue_revalidate = false; | ||
2799 | bool deleted_inode = false; | 2804 | bool deleted_inode = false; |
2800 | bool fill_inline = false; | 2805 | bool fill_inline = false; |
2801 | 2806 | ||
@@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
2837 | ci->i_rdcache_revoking = ci->i_rdcache_gen; | 2842 | ci->i_rdcache_revoking = ci->i_rdcache_gen; |
2838 | } | 2843 | } |
2839 | } | 2844 | } |
2840 | |||
2841 | ceph_fscache_invalidate(inode); | ||
2842 | } | 2845 | } |
2843 | 2846 | ||
2844 | /* side effects now are allowed */ | 2847 | /* side effects now are allowed */ |
@@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
2880 | } | 2883 | } |
2881 | } | 2884 | } |
2882 | 2885 | ||
2883 | /* Do we need to revalidate our fscache cookie. Don't bother on the | ||
2884 | * first cache cap as we already validate at cookie creation time. */ | ||
2885 | if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) | ||
2886 | queue_revalidate = true; | ||
2887 | |||
2888 | if (newcaps & CEPH_CAP_ANY_RD) { | 2886 | if (newcaps & CEPH_CAP_ANY_RD) { |
2889 | /* ctime/mtime/atime? */ | 2887 | /* ctime/mtime/atime? */ |
2890 | ceph_decode_timespec(&mtime, &grant->mtime); | 2888 | ceph_decode_timespec(&mtime, &grant->mtime); |
@@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
2993 | if (fill_inline) | 2991 | if (fill_inline) |
2994 | ceph_fill_inline_data(inode, NULL, inline_data, inline_len); | 2992 | ceph_fill_inline_data(inode, NULL, inline_data, inline_len); |
2995 | 2993 | ||
2996 | if (queue_trunc) { | 2994 | if (queue_trunc) |
2997 | ceph_queue_vmtruncate(inode); | 2995 | ceph_queue_vmtruncate(inode); |
2998 | ceph_queue_revalidate(inode); | ||
2999 | } else if (queue_revalidate) | ||
3000 | ceph_queue_revalidate(inode); | ||
3001 | 2996 | ||
3002 | if (writeback) | 2997 | if (writeback) |
3003 | /* | 2998 | /* |
@@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode, | |||
3199 | truncate_seq, truncate_size, size); | 3194 | truncate_seq, truncate_size, size); |
3200 | spin_unlock(&ci->i_ceph_lock); | 3195 | spin_unlock(&ci->i_ceph_lock); |
3201 | 3196 | ||
3202 | if (queue_trunc) { | 3197 | if (queue_trunc) |
3203 | ceph_queue_vmtruncate(inode); | 3198 | ceph_queue_vmtruncate(inode); |
3204 | ceph_fscache_invalidate(inode); | ||
3205 | } | ||
3206 | } | 3199 | } |
3207 | 3200 | ||
3208 | /* | 3201 | /* |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a888df6f2d71..ce2f5795e44b 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |||
137 | { | 137 | { |
138 | struct ceph_file_info *cf; | 138 | struct ceph_file_info *cf; |
139 | int ret = 0; | 139 | int ret = 0; |
140 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
141 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | ||
142 | struct ceph_mds_client *mdsc = fsc->mdsc; | ||
143 | 140 | ||
144 | switch (inode->i_mode & S_IFMT) { | 141 | switch (inode->i_mode & S_IFMT) { |
145 | case S_IFREG: | 142 | case S_IFREG: |
146 | /* First file open request creates the cookie, we want to keep | 143 | ceph_fscache_register_inode_cookie(inode); |
147 | * this cookie around for the filetime of the inode as not to | 144 | ceph_fscache_file_set_cookie(inode, file); |
148 | * have to worry about fscache register / revoke / operation | ||
149 | * races. | ||
150 | * | ||
151 | * Also, if we know the operation is going to invalidate data | ||
152 | * (non readonly) just nuke the cache right away. | ||
153 | */ | ||
154 | ceph_fscache_register_inode_cookie(mdsc->fsc, ci); | ||
155 | if ((fmode & CEPH_FILE_MODE_WR)) | ||
156 | ceph_fscache_invalidate(inode); | ||
157 | case S_IFDIR: | 145 | case S_IFDIR: |
158 | dout("init_file %p %p 0%o (regular)\n", inode, file, | 146 | dout("init_file %p %p 0%o (regular)\n", inode, file, |
159 | inode->i_mode); | 147 | inode->i_mode); |
@@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
1349 | } | 1337 | } |
1350 | 1338 | ||
1351 | retry_snap: | 1339 | retry_snap: |
1352 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { | 1340 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { |
1353 | err = -ENOSPC; | 1341 | err = -ENOSPC; |
1354 | goto out; | 1342 | goto out; |
1355 | } | 1343 | } |
@@ -1407,7 +1395,6 @@ retry_snap: | |||
1407 | iov_iter_advance(from, written); | 1395 | iov_iter_advance(from, written); |
1408 | ceph_put_snap_context(snapc); | 1396 | ceph_put_snap_context(snapc); |
1409 | } else { | 1397 | } else { |
1410 | loff_t old_size = i_size_read(inode); | ||
1411 | /* | 1398 | /* |
1412 | * No need to acquire the i_truncate_mutex. Because | 1399 | * No need to acquire the i_truncate_mutex. Because |
1413 | * the MDS revokes Fwb caps before sending truncate | 1400 | * the MDS revokes Fwb caps before sending truncate |
@@ -1418,8 +1405,6 @@ retry_snap: | |||
1418 | written = generic_perform_write(file, from, pos); | 1405 | written = generic_perform_write(file, from, pos); |
1419 | if (likely(written >= 0)) | 1406 | if (likely(written >= 0)) |
1420 | iocb->ki_pos = pos + written; | 1407 | iocb->ki_pos = pos + written; |
1421 | if (i_size_read(inode) > old_size) | ||
1422 | ceph_fscache_update_objectsize(inode); | ||
1423 | inode_unlock(inode); | 1408 | inode_unlock(inode); |
1424 | } | 1409 | } |
1425 | 1410 | ||
@@ -1440,7 +1425,7 @@ retry_snap: | |||
1440 | ceph_put_cap_refs(ci, got); | 1425 | ceph_put_cap_refs(ci, got); |
1441 | 1426 | ||
1442 | if (written >= 0) { | 1427 | if (written >= 0) { |
1443 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) | 1428 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) |
1444 | iocb->ki_flags |= IOCB_DSYNC; | 1429 | iocb->ki_flags |= IOCB_DSYNC; |
1445 | 1430 | ||
1446 | written = generic_write_sync(iocb, written); | 1431 | written = generic_write_sync(iocb, written); |
@@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode, | |||
1672 | goto unlock; | 1657 | goto unlock; |
1673 | } | 1658 | } |
1674 | 1659 | ||
1675 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && | 1660 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && |
1676 | !(mode & FALLOC_FL_PUNCH_HOLE)) { | 1661 | !(mode & FALLOC_FL_PUNCH_HOLE)) { |
1677 | ret = -ENOSPC; | 1662 | ret = -ENOSPC; |
1678 | goto unlock; | 1663 | goto unlock; |
1679 | } | 1664 | } |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 0130a8592191..0168b49fb6ad 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -103,7 +103,6 @@ struct ceph_fs_client { | |||
103 | 103 | ||
104 | #ifdef CONFIG_CEPH_FSCACHE | 104 | #ifdef CONFIG_CEPH_FSCACHE |
105 | struct fscache_cookie *fscache; | 105 | struct fscache_cookie *fscache; |
106 | struct workqueue_struct *revalidate_wq; | ||
107 | #endif | 106 | #endif |
108 | }; | 107 | }; |
109 | 108 | ||
@@ -360,8 +359,7 @@ struct ceph_inode_info { | |||
360 | 359 | ||
361 | #ifdef CONFIG_CEPH_FSCACHE | 360 | #ifdef CONFIG_CEPH_FSCACHE |
362 | struct fscache_cookie *fscache; | 361 | struct fscache_cookie *fscache; |
363 | u32 i_fscache_gen; /* sequence, for delayed fscache validate */ | 362 | u32 i_fscache_gen; |
364 | struct work_struct i_revalidate_work; | ||
365 | #endif | 363 | #endif |
366 | struct inode vfs_inode; /* at end */ | 364 | struct inode vfs_inode; /* at end */ |
367 | }; | 365 | }; |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 3078b679fcd1..c8c4f79c7ce1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) | |||
887 | put_page(results[i]); | 887 | put_page(results[i]); |
888 | } | 888 | } |
889 | 889 | ||
890 | wake_up_bit(&cookie->flags, 0); | ||
891 | |||
890 | _leave(""); | 892 | _leave(""); |
891 | } | 893 | } |
892 | 894 | ||
diff --git a/include/acpi/video.h b/include/acpi/video.h index 70a41f742037..5731ccb42585 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h | |||
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); | |||
51 | */ | 51 | */ |
52 | extern bool acpi_video_handles_brightness_key_presses(void); | 52 | extern bool acpi_video_handles_brightness_key_presses(void); |
53 | extern int acpi_video_get_levels(struct acpi_device *device, | 53 | extern int acpi_video_get_levels(struct acpi_device *device, |
54 | struct acpi_video_device_brightness **dev_br); | 54 | struct acpi_video_device_brightness **dev_br, |
55 | int *pmax_level); | ||
55 | #else | 56 | #else |
56 | static inline int acpi_video_register(void) { return 0; } | 57 | static inline int acpi_video_register(void) { return 0; } |
57 | static inline void acpi_video_unregister(void) { return; } | 58 | static inline void acpi_video_unregister(void) { return; } |
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void) | |||
72 | return false; | 73 | return false; |
73 | } | 74 | } |
74 | static inline int acpi_video_get_levels(struct acpi_device *device, | 75 | static inline int acpi_video_get_levels(struct acpi_device *device, |
75 | struct acpi_video_device_brightness **dev_br) | 76 | struct acpi_video_device_brightness **dev_br, |
77 | int *pmax_level) | ||
76 | { | 78 | { |
77 | return -ENODEV; | 79 | return -ENODEV; |
78 | } | 80 | } |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 19b14862d3e0..1b3b6e155392 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -279,6 +279,11 @@ struct ceph_osd_client { | |||
279 | struct workqueue_struct *notify_wq; | 279 | struct workqueue_struct *notify_wq; |
280 | }; | 280 | }; |
281 | 281 | ||
282 | static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) | ||
283 | { | ||
284 | return osdc->osdmap->flags & flag; | ||
285 | } | ||
286 | |||
282 | extern int ceph_osdc_setup(void); | 287 | extern int ceph_osdc_setup(void); |
283 | extern void ceph_osdc_cleanup(void); | 288 | extern void ceph_osdc_cleanup(void); |
284 | 289 | ||
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index ddc426b22d81..9ccf4dbe55f8 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h | |||
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) | |||
189 | return !ceph_osd_is_up(map, osd); | 189 | return !ceph_osd_is_up(map, osd); |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) | ||
193 | { | ||
194 | return map && (map->flags & flag); | ||
195 | } | ||
196 | |||
197 | extern char *ceph_osdmap_state_str(char *str, int len, int state); | 192 | extern char *ceph_osdmap_state_str(char *str, int len, int state); |
198 | extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); | 193 | extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); |
199 | 194 | ||
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3fe90d494edb..4551c6f2a6c4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -112,19 +112,24 @@ struct dma_buf_ops { | |||
112 | * @file: file pointer used for sharing buffers across, and for refcounting. | 112 | * @file: file pointer used for sharing buffers across, and for refcounting. |
113 | * @attachments: list of dma_buf_attachment that denotes all devices attached. | 113 | * @attachments: list of dma_buf_attachment that denotes all devices attached. |
114 | * @ops: dma_buf_ops associated with this buffer object. | 114 | * @ops: dma_buf_ops associated with this buffer object. |
115 | * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap | ||
116 | * @vmapping_counter: used internally to refcnt the vmaps | ||
117 | * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 | ||
115 | * @exp_name: name of the exporter; useful for debugging. | 118 | * @exp_name: name of the exporter; useful for debugging. |
116 | * @owner: pointer to exporter module; used for refcounting when exporter is a | 119 | * @owner: pointer to exporter module; used for refcounting when exporter is a |
117 | * kernel module. | 120 | * kernel module. |
118 | * @list_node: node for dma_buf accounting and debugging. | 121 | * @list_node: node for dma_buf accounting and debugging. |
119 | * @priv: exporter specific private data for this buffer object. | 122 | * @priv: exporter specific private data for this buffer object. |
120 | * @resv: reservation object linked to this dma-buf | 123 | * @resv: reservation object linked to this dma-buf |
124 | * @poll: for userspace poll support | ||
125 | * @cb_excl: for userspace poll support | ||
126 | * @cb_shared: for userspace poll support | ||
121 | */ | 127 | */ |
122 | struct dma_buf { | 128 | struct dma_buf { |
123 | size_t size; | 129 | size_t size; |
124 | struct file *file; | 130 | struct file *file; |
125 | struct list_head attachments; | 131 | struct list_head attachments; |
126 | const struct dma_buf_ops *ops; | 132 | const struct dma_buf_ops *ops; |
127 | /* mutex to serialize list manipulation, attach/detach and vmap/unmap */ | ||
128 | struct mutex lock; | 133 | struct mutex lock; |
129 | unsigned vmapping_counter; | 134 | unsigned vmapping_counter; |
130 | void *vmap_ptr; | 135 | void *vmap_ptr; |
@@ -188,9 +193,11 @@ struct dma_buf_export_info { | |||
188 | 193 | ||
189 | /** | 194 | /** |
190 | * helper macro for exporters; zeros and fills in most common values | 195 | * helper macro for exporters; zeros and fills in most common values |
196 | * | ||
197 | * @name: export-info name | ||
191 | */ | 198 | */ |
192 | #define DEFINE_DMA_BUF_EXPORT_INFO(a) \ | 199 | #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ |
193 | struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ | 200 | struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ |
194 | .owner = THIS_MODULE } | 201 | .owner = THIS_MODULE } |
195 | 202 | ||
196 | /** | 203 | /** |
diff --git a/include/linux/fence.h b/include/linux/fence.h index 2b17698b60b8..2056e9fd0138 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
@@ -49,6 +49,8 @@ struct fence_cb; | |||
49 | * @timestamp: Timestamp when the fence was signaled. | 49 | * @timestamp: Timestamp when the fence was signaled. |
50 | * @status: Optional, only valid if < 0, must be set before calling | 50 | * @status: Optional, only valid if < 0, must be set before calling |
51 | * fence_signal, indicates that the fence has completed with an error. | 51 | * fence_signal, indicates that the fence has completed with an error. |
52 | * @child_list: list of children fences | ||
53 | * @active_list: list of active fences | ||
52 | * | 54 | * |
53 | * the flags member must be manipulated and read using the appropriate | 55 | * the flags member must be manipulated and read using the appropriate |
54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | 56 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 604e1526cd00..13ba552e6c09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -241,7 +241,7 @@ struct fscache_cache_ops { | |||
241 | 241 | ||
242 | /* check the consistency between the backing cache and the FS-Cache | 242 | /* check the consistency between the backing cache and the FS-Cache |
243 | * cookie */ | 243 | * cookie */ |
244 | bool (*check_consistency)(struct fscache_operation *op); | 244 | int (*check_consistency)(struct fscache_operation *op); |
245 | 245 | ||
246 | /* store the updated auxiliary data on an object */ | 246 | /* store the updated auxiliary data on an object */ |
247 | void (*update_object)(struct fscache_object *object); | 247 | void (*update_object)(struct fscache_object *object); |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index bfbd707de390..dc493e0f0ff7 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -305,12 +305,12 @@ | |||
305 | #define ICC_SGI1R_AFFINITY_1_SHIFT 16 | 305 | #define ICC_SGI1R_AFFINITY_1_SHIFT 16 |
306 | #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) | 306 | #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) |
307 | #define ICC_SGI1R_SGI_ID_SHIFT 24 | 307 | #define ICC_SGI1R_SGI_ID_SHIFT 24 |
308 | #define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) | 308 | #define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) |
309 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 | 309 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 |
310 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 310 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) |
311 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 | 311 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 |
312 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 | 312 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 |
313 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 313 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) |
314 | 314 | ||
315 | #include <asm/arch_gicv3.h> | 315 | #include <asm/arch_gicv3.h> |
316 | 316 | ||
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h | |||
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; | |||
46 | 46 | ||
47 | static inline bool page_is_young(struct page *page) | 47 | static inline bool page_is_young(struct page *page) |
48 | { | 48 | { |
49 | return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | 49 | struct page_ext *page_ext = lookup_page_ext(page); |
50 | |||
51 | if (unlikely(!page_ext)) | ||
52 | return false; | ||
53 | |||
54 | return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
50 | } | 55 | } |
51 | 56 | ||
52 | static inline void set_page_young(struct page *page) | 57 | static inline void set_page_young(struct page *page) |
53 | { | 58 | { |
54 | set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | 59 | struct page_ext *page_ext = lookup_page_ext(page); |
60 | |||
61 | if (unlikely(!page_ext)) | ||
62 | return; | ||
63 | |||
64 | set_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
55 | } | 65 | } |
56 | 66 | ||
57 | static inline bool test_and_clear_page_young(struct page *page) | 67 | static inline bool test_and_clear_page_young(struct page *page) |
58 | { | 68 | { |
59 | return test_and_clear_bit(PAGE_EXT_YOUNG, | 69 | struct page_ext *page_ext = lookup_page_ext(page); |
60 | &lookup_page_ext(page)->flags); | 70 | |
71 | if (unlikely(!page_ext)) | ||
72 | return false; | ||
73 | |||
74 | return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
61 | } | 75 | } |
62 | 76 | ||
63 | static inline bool page_is_idle(struct page *page) | 77 | static inline bool page_is_idle(struct page *page) |
64 | { | 78 | { |
65 | return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 79 | struct page_ext *page_ext = lookup_page_ext(page); |
80 | |||
81 | if (unlikely(!page_ext)) | ||
82 | return false; | ||
83 | |||
84 | return test_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
66 | } | 85 | } |
67 | 86 | ||
68 | static inline void set_page_idle(struct page *page) | 87 | static inline void set_page_idle(struct page *page) |
69 | { | 88 | { |
70 | set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 89 | struct page_ext *page_ext = lookup_page_ext(page); |
90 | |||
91 | if (unlikely(!page_ext)) | ||
92 | return; | ||
93 | |||
94 | set_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
71 | } | 95 | } |
72 | 96 | ||
73 | static inline void clear_page_idle(struct page *page) | 97 | static inline void clear_page_idle(struct page *page) |
74 | { | 98 | { |
75 | clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 99 | struct page_ext *page_ext = lookup_page_ext(page); |
100 | |||
101 | if (unlikely(!page_ext)) | ||
102 | return; | ||
103 | |||
104 | clear_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
76 | } | 105 | } |
77 | #endif /* CONFIG_64BIT */ | 106 | #endif /* CONFIG_64BIT */ |
78 | 107 | ||
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 49d057655d62..b0f305e77b7f 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class; | |||
49 | extern struct lock_class_key reservation_seqcount_class; | 49 | extern struct lock_class_key reservation_seqcount_class; |
50 | extern const char reservation_seqcount_string[]; | 50 | extern const char reservation_seqcount_string[]; |
51 | 51 | ||
52 | /** | ||
53 | * struct reservation_object_list - a list of shared fences | ||
54 | * @rcu: for internal use | ||
55 | * @shared_count: table of shared fences | ||
56 | * @shared_max: for growing shared fence table | ||
57 | * @shared: shared fence table | ||
58 | */ | ||
52 | struct reservation_object_list { | 59 | struct reservation_object_list { |
53 | struct rcu_head rcu; | 60 | struct rcu_head rcu; |
54 | u32 shared_count, shared_max; | 61 | u32 shared_count, shared_max; |
55 | struct fence __rcu *shared[]; | 62 | struct fence __rcu *shared[]; |
56 | }; | 63 | }; |
57 | 64 | ||
65 | /** | ||
66 | * struct reservation_object - a reservation object manages fences for a buffer | ||
67 | * @lock: update side lock | ||
68 | * @seq: sequence count for managing RCU read-side synchronization | ||
69 | * @fence_excl: the exclusive fence, if there is one currently | ||
70 | * @fence: list of current shared fences | ||
71 | * @staged: staged copy of shared fences for RCU updates | ||
72 | */ | ||
58 | struct reservation_object { | 73 | struct reservation_object { |
59 | struct ww_mutex lock; | 74 | struct ww_mutex lock; |
60 | seqcount_t seq; | 75 | seqcount_t seq; |
@@ -68,6 +83,10 @@ struct reservation_object { | |||
68 | #define reservation_object_assert_held(obj) \ | 83 | #define reservation_object_assert_held(obj) \ |
69 | lockdep_assert_held(&(obj)->lock.base) | 84 | lockdep_assert_held(&(obj)->lock.base) |
70 | 85 | ||
86 | /** | ||
87 | * reservation_object_init - initialize a reservation object | ||
88 | * @obj: the reservation object | ||
89 | */ | ||
71 | static inline void | 90 | static inline void |
72 | reservation_object_init(struct reservation_object *obj) | 91 | reservation_object_init(struct reservation_object *obj) |
73 | { | 92 | { |
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj) | |||
79 | obj->staged = NULL; | 98 | obj->staged = NULL; |
80 | } | 99 | } |
81 | 100 | ||
101 | /** | ||
102 | * reservation_object_fini - destroys a reservation object | ||
103 | * @obj: the reservation object | ||
104 | */ | ||
82 | static inline void | 105 | static inline void |
83 | reservation_object_fini(struct reservation_object *obj) | 106 | reservation_object_fini(struct reservation_object *obj) |
84 | { | 107 | { |
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj) | |||
106 | ww_mutex_destroy(&obj->lock); | 129 | ww_mutex_destroy(&obj->lock); |
107 | } | 130 | } |
108 | 131 | ||
132 | /** | ||
133 | * reservation_object_get_list - get the reservation object's | ||
134 | * shared fence list, with update-side lock held | ||
135 | * @obj: the reservation object | ||
136 | * | ||
137 | * Returns the shared fence list. Does NOT take references to | ||
138 | * the fence. The obj->lock must be held. | ||
139 | */ | ||
109 | static inline struct reservation_object_list * | 140 | static inline struct reservation_object_list * |
110 | reservation_object_get_list(struct reservation_object *obj) | 141 | reservation_object_get_list(struct reservation_object *obj) |
111 | { | 142 | { |
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj) | |||
113 | reservation_object_held(obj)); | 144 | reservation_object_held(obj)); |
114 | } | 145 | } |
115 | 146 | ||
147 | /** | ||
148 | * reservation_object_get_excl - get the reservation object's | ||
149 | * exclusive fence, with update-side lock held | ||
150 | * @obj: the reservation object | ||
151 | * | ||
152 | * Returns the exclusive fence (if any). Does NOT take a | ||
153 | * reference. The obj->lock must be held. | ||
154 | * | ||
155 | * RETURNS | ||
156 | * The exclusive fence or NULL | ||
157 | */ | ||
116 | static inline struct fence * | 158 | static inline struct fence * |
117 | reservation_object_get_excl(struct reservation_object *obj) | 159 | reservation_object_get_excl(struct reservation_object *obj) |
118 | { | 160 | { |
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj) | |||
120 | reservation_object_held(obj)); | 162 | reservation_object_held(obj)); |
121 | } | 163 | } |
122 | 164 | ||
165 | /** | ||
166 | * reservation_object_get_excl_rcu - get the reservation object's | ||
167 | * exclusive fence, without lock held. | ||
168 | * @obj: the reservation object | ||
169 | * | ||
170 | * If there is an exclusive fence, this atomically increments it's | ||
171 | * reference count and returns it. | ||
172 | * | ||
173 | * RETURNS | ||
174 | * The exclusive fence or NULL if none | ||
175 | */ | ||
123 | static inline struct fence * | 176 | static inline struct fence * |
124 | reservation_object_get_excl_rcu(struct reservation_object *obj) | 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) |
125 | { | 178 | { |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index dacb5e711994..de1f64318fc4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
@@ -765,6 +765,8 @@ struct sctp_info { | |||
765 | __u8 sctpi_s_disable_fragments; | 765 | __u8 sctpi_s_disable_fragments; |
766 | __u8 sctpi_s_v4mapped; | 766 | __u8 sctpi_s_v4mapped; |
767 | __u8 sctpi_s_frag_interleave; | 767 | __u8 sctpi_s_frag_interleave; |
768 | __u32 sctpi_s_type; | ||
769 | __u32 __reserved3; | ||
768 | }; | 770 | }; |
769 | 771 | ||
770 | struct sctp_infox { | 772 | struct sctp_infox { |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 37dbacf84849..816b7543f81b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv, | |||
21 | struct timespec64 ts64; | 21 | struct timespec64 ts64; |
22 | 22 | ||
23 | if (!tv) | 23 | if (!tv) |
24 | return do_sys_settimeofday64(NULL, tz); | ||
25 | |||
26 | if (!timespec_valid(tv)) | ||
24 | return -EINVAL; | 27 | return -EINVAL; |
25 | 28 | ||
26 | ts64 = timespec_to_timespec64(*tv); | 29 | ts64 = timespec_to_timespec64(*tv); |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index d325c81332e3..43a5a0e4524c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops { | |||
63 | u8 *protocol, struct flowi6 *fl6); | 63 | u8 *protocol, struct flowi6 *fl6); |
64 | }; | 64 | }; |
65 | 65 | ||
66 | #ifdef CONFIG_INET | ||
67 | |||
66 | extern const struct ip6_tnl_encap_ops __rcu * | 68 | extern const struct ip6_tnl_encap_ops __rcu * |
67 | ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; | 69 | ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; |
68 | 70 | ||
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev); | |||
138 | int ip6_tnl_get_iflink(const struct net_device *dev); | 140 | int ip6_tnl_get_iflink(const struct net_device *dev); |
139 | int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); | 141 | int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); |
140 | 142 | ||
141 | #ifdef CONFIG_INET | ||
142 | static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | 143 | static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, |
143 | struct net_device *dev) | 144 | struct net_device *dev) |
144 | { | 145 | { |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 401038d2f9b8..fea53f4d92ca 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | struct qdisc_watchdog { | 63 | struct qdisc_watchdog { |
64 | u64 last_expires; | ||
64 | struct hrtimer timer; | 65 | struct hrtimer timer; |
65 | struct Qdisc *qdisc; | 66 | struct Qdisc *qdisc; |
66 | }; | 67 | }; |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9222db8ccccc..5f030b46cff4 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices { | |||
1353 | ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, | 1353 | ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, |
1354 | ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, | 1354 | ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, |
1355 | ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, | 1355 | ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, |
1356 | ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, | ||
1357 | ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, | ||
1358 | ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, | ||
1359 | ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, | ||
1360 | ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, | ||
1361 | ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, | ||
1362 | ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, | ||
1363 | ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, | ||
1364 | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, | ||
1356 | 1365 | ||
1357 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit | 1366 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit |
1358 | * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* | 1367 | * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* |
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices { | |||
1361 | */ | 1370 | */ |
1362 | 1371 | ||
1363 | __ETHTOOL_LINK_MODE_LAST | 1372 | __ETHTOOL_LINK_MODE_LAST |
1364 | = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, | 1373 | = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, |
1365 | }; | 1374 | }; |
1366 | 1375 | ||
1367 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ | 1376 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ |
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index eba5914ba5d1..f4297c8a42fe 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h | |||
@@ -145,6 +145,8 @@ enum { | |||
145 | TCA_POLICE_PEAKRATE, | 145 | TCA_POLICE_PEAKRATE, |
146 | TCA_POLICE_AVRATE, | 146 | TCA_POLICE_AVRATE, |
147 | TCA_POLICE_RESULT, | 147 | TCA_POLICE_RESULT, |
148 | TCA_POLICE_TM, | ||
149 | TCA_POLICE_PAD, | ||
148 | __TCA_POLICE_MAX | 150 | __TCA_POLICE_MAX |
149 | #define TCA_POLICE_RESULT TCA_POLICE_RESULT | 151 | #define TCA_POLICE_RESULT TCA_POLICE_RESULT |
150 | }; | 152 | }; |
@@ -173,7 +175,7 @@ enum { | |||
173 | TCA_U32_DIVISOR, | 175 | TCA_U32_DIVISOR, |
174 | TCA_U32_SEL, | 176 | TCA_U32_SEL, |
175 | TCA_U32_POLICE, | 177 | TCA_U32_POLICE, |
176 | TCA_U32_ACT, | 178 | TCA_U32_ACT, |
177 | TCA_U32_INDEV, | 179 | TCA_U32_INDEV, |
178 | TCA_U32_PCNT, | 180 | TCA_U32_PCNT, |
179 | TCA_U32_MARK, | 181 | TCA_U32_MARK, |
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 04be7021f848..318858edb1cd 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
@@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = { | |||
365 | .name = "bpf", | 365 | .name = "bpf", |
366 | .mount = bpf_mount, | 366 | .mount = bpf_mount, |
367 | .kill_sb = kill_litter_super, | 367 | .kill_sb = kill_litter_super, |
368 | .fs_flags = FS_USERNS_MOUNT, | ||
369 | }; | 368 | }; |
370 | 369 | ||
371 | MODULE_ALIAS_FS("bpf"); | 370 | MODULE_ALIAS_FS("bpf"); |
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index c42742208e5e..89b49f6773f0 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c | |||
@@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) | |||
125 | 125 | ||
126 | domain = data->domain; | 126 | domain = data->domain; |
127 | if (WARN_ON(domain == NULL)) | 127 | if (WARN_ON(domain == NULL)) |
128 | return; | 128 | return -EINVAL; |
129 | 129 | ||
130 | if (!irq_domain_is_ipi(domain)) { | 130 | if (!irq_domain_is_ipi(domain)) { |
131 | pr_warn("Trying to destroy a non IPI domain!\n"); | 131 | pr_warn("Trying to destroy a non IPI domain!\n"); |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 8c7392c4fdbd..e99df0ff1d42 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer) | |||
425 | { | 425 | { |
426 | debug_object_free(timer, &hrtimer_debug_descr); | 426 | debug_object_free(timer, &hrtimer_debug_descr); |
427 | } | 427 | } |
428 | EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); | ||
428 | 429 | ||
429 | #else | 430 | #else |
430 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | 431 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 77d7d034bac3..b9cfdbfae9aa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1841,6 +1841,9 @@ config TEST_BITMAP | |||
1841 | 1841 | ||
1842 | If unsure, say N. | 1842 | If unsure, say N. |
1843 | 1843 | ||
1844 | config TEST_UUID | ||
1845 | tristate "Test functions located in the uuid module at runtime" | ||
1846 | |||
1844 | config TEST_RHASHTABLE | 1847 | config TEST_RHASHTABLE |
1845 | tristate "Perform selftest on resizable hash table" | 1848 | tristate "Perform selftest on resizable hash table" |
1846 | default n | 1849 | default n |
diff --git a/lib/Makefile b/lib/Makefile index 499fb354d627..ff6a7a6c6395 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o | |||
58 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o | 58 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o |
59 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o | 59 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o |
60 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o | 60 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o |
61 | obj-$(CONFIG_TEST_UUID) += test_uuid.o | ||
61 | 62 | ||
62 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 63 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
63 | CFLAGS_kobject.o += -DDEBUG | 64 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/test_uuid.c b/lib/test_uuid.c new file mode 100644 index 000000000000..547d3127a3cf --- /dev/null +++ b/lib/test_uuid.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * Test cases for lib/uuid.c module. | ||
3 | */ | ||
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/string.h> | ||
10 | #include <linux/uuid.h> | ||
11 | |||
12 | struct test_uuid_data { | ||
13 | const char *uuid; | ||
14 | uuid_le le; | ||
15 | uuid_be be; | ||
16 | }; | ||
17 | |||
18 | static const struct test_uuid_data test_uuid_test_data[] = { | ||
19 | { | ||
20 | .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", | ||
21 | .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
22 | .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
23 | }, | ||
24 | { | ||
25 | .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", | ||
26 | .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
27 | .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
28 | }, | ||
29 | { | ||
30 | .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", | ||
31 | .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
32 | .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
33 | }, | ||
34 | }; | ||
35 | |||
36 | static const char * const test_uuid_wrong_data[] = { | ||
37 | "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */ | ||
38 | "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */ | ||
39 | "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */ | ||
40 | }; | ||
41 | |||
42 | static unsigned total_tests __initdata; | ||
43 | static unsigned failed_tests __initdata; | ||
44 | |||
45 | static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, | ||
46 | const char *data, const char *actual) | ||
47 | { | ||
48 | pr_err("%s test #%u %s %s data: '%s'\n", | ||
49 | prefix, | ||
50 | total_tests, | ||
51 | wrong ? "passed on wrong" : "failed on", | ||
52 | be ? "BE" : "LE", | ||
53 | data); | ||
54 | if (actual && *actual) | ||
55 | pr_err("%s test #%u actual data: '%s'\n", | ||
56 | prefix, | ||
57 | total_tests, | ||
58 | actual); | ||
59 | failed_tests++; | ||
60 | } | ||
61 | |||
62 | static void __init test_uuid_test(const struct test_uuid_data *data) | ||
63 | { | ||
64 | uuid_le le; | ||
65 | uuid_be be; | ||
66 | char buf[48]; | ||
67 | |||
68 | /* LE */ | ||
69 | total_tests++; | ||
70 | if (uuid_le_to_bin(data->uuid, &le)) | ||
71 | test_uuid_failed("conversion", false, false, data->uuid, NULL); | ||
72 | |||
73 | total_tests++; | ||
74 | if (uuid_le_cmp(data->le, le)) { | ||
75 | sprintf(buf, "%pUl", &le); | ||
76 | test_uuid_failed("cmp", false, false, data->uuid, buf); | ||
77 | } | ||
78 | |||
79 | /* BE */ | ||
80 | total_tests++; | ||
81 | if (uuid_be_to_bin(data->uuid, &be)) | ||
82 | test_uuid_failed("conversion", false, true, data->uuid, NULL); | ||
83 | |||
84 | total_tests++; | ||
85 | if (uuid_be_cmp(data->be, be)) { | ||
86 | sprintf(buf, "%pUb", &be); | ||
87 | test_uuid_failed("cmp", false, true, data->uuid, buf); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static void __init test_uuid_wrong(const char *data) | ||
92 | { | ||
93 | uuid_le le; | ||
94 | uuid_be be; | ||
95 | |||
96 | /* LE */ | ||
97 | total_tests++; | ||
98 | if (!uuid_le_to_bin(data, &le)) | ||
99 | test_uuid_failed("negative", true, false, data, NULL); | ||
100 | |||
101 | /* BE */ | ||
102 | total_tests++; | ||
103 | if (!uuid_be_to_bin(data, &be)) | ||
104 | test_uuid_failed("negative", true, true, data, NULL); | ||
105 | } | ||
106 | |||
107 | static int __init test_uuid_init(void) | ||
108 | { | ||
109 | unsigned int i; | ||
110 | |||
111 | for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) | ||
112 | test_uuid_test(&test_uuid_test_data[i]); | ||
113 | |||
114 | for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) | ||
115 | test_uuid_wrong(test_uuid_wrong_data[i]); | ||
116 | |||
117 | if (failed_tests == 0) | ||
118 | pr_info("all %u tests passed\n", total_tests); | ||
119 | else | ||
120 | pr_err("failed %u out of %u tests\n", failed_tests, total_tests); | ||
121 | |||
122 | return failed_tests ? -EINVAL : 0; | ||
123 | } | ||
124 | module_init(test_uuid_init); | ||
125 | |||
126 | static void __exit test_uuid_exit(void) | ||
127 | { | ||
128 | /* do nothing */ | ||
129 | } | ||
130 | module_exit(test_uuid_exit); | ||
131 | |||
132 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
133 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/uuid.c b/lib/uuid.c index e116ae5fa00f..37687af77ff8 100644 --- a/lib/uuid.c +++ b/lib/uuid.c | |||
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) | |||
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | for (i = 0; i < 16; i++) { | 108 | for (i = 0; i < 16; i++) { |
109 | int hi = hex_to_bin(uuid[si[i]] + 0); | 109 | int hi = hex_to_bin(uuid[si[i] + 0]); |
110 | int lo = hex_to_bin(uuid[si[i]] + 1); | 110 | int lo = hex_to_bin(uuid[si[i] + 1]); |
111 | 111 | ||
112 | b[ei[i]] = (hi << 4) | lo; | 112 | b[ei[i]] = (hi << 4) | lo; |
113 | } | 113 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 925b431f3f03..58c69c94402a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) | |||
2896 | * ordering is imposed by list_lru_node->lock taken by | 2896 | * ordering is imposed by list_lru_node->lock taken by |
2897 | * memcg_drain_all_list_lrus(). | 2897 | * memcg_drain_all_list_lrus(). |
2898 | */ | 2898 | */ |
2899 | rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ | ||
2899 | css_for_each_descendant_pre(css, &memcg->css) { | 2900 | css_for_each_descendant_pre(css, &memcg->css) { |
2900 | child = mem_cgroup_from_css(css); | 2901 | child = mem_cgroup_from_css(css); |
2901 | BUG_ON(child->kmemcg_id != kmemcg_id); | 2902 | BUG_ON(child->kmemcg_id != kmemcg_id); |
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) | |||
2903 | if (!memcg->use_hierarchy) | 2904 | if (!memcg->use_hierarchy) |
2904 | break; | 2905 | break; |
2905 | } | 2906 | } |
2907 | rcu_read_unlock(); | ||
2908 | |||
2906 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); | 2909 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); |
2907 | 2910 | ||
2908 | memcg_free_cache_id(kmemcg_id); | 2911 | memcg_free_cache_id(kmemcg_id); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dfb1ab61fb23..acbc432d1a52 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk) | |||
625 | if (atomic_read(&mm->mm_users) > 1) { | 625 | if (atomic_read(&mm->mm_users) > 1) { |
626 | rcu_read_lock(); | 626 | rcu_read_lock(); |
627 | for_each_process(p) { | 627 | for_each_process(p) { |
628 | bool exiting; | ||
629 | |||
630 | if (!process_shares_mm(p, mm)) | 628 | if (!process_shares_mm(p, mm)) |
631 | continue; | 629 | continue; |
632 | if (fatal_signal_pending(p)) | 630 | if (fatal_signal_pending(p)) |
@@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk) | |||
636 | * If the task is exiting make sure the whole thread group | 634 | * If the task is exiting make sure the whole thread group |
637 | * is exiting and cannot acces mm anymore. | 635 | * is exiting and cannot acces mm anymore. |
638 | */ | 636 | */ |
639 | spin_lock_irq(&p->sighand->siglock); | 637 | if (signal_group_exit(p->signal)) |
640 | exiting = signal_group_exit(p->signal); | ||
641 | spin_unlock_irq(&p->sighand->siglock); | ||
642 | if (exiting) | ||
643 | continue; | 638 | continue; |
644 | 639 | ||
645 | /* Give up */ | 640 | /* Give up */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8f3bfc435ee..6903b695ebae 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, | |||
656 | return; | 656 | return; |
657 | 657 | ||
658 | page_ext = lookup_page_ext(page); | 658 | page_ext = lookup_page_ext(page); |
659 | if (unlikely(!page_ext)) | ||
660 | return; | ||
661 | |||
659 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 662 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
660 | 663 | ||
661 | INIT_LIST_HEAD(&page->lru); | 664 | INIT_LIST_HEAD(&page->lru); |
@@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, | |||
673 | return; | 676 | return; |
674 | 677 | ||
675 | page_ext = lookup_page_ext(page); | 678 | page_ext = lookup_page_ext(page); |
679 | if (unlikely(!page_ext)) | ||
680 | return; | ||
681 | |||
676 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 682 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
677 | 683 | ||
678 | set_page_private(page, 0); | 684 | set_page_private(page, 0); |
@@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
2609 | page = list_last_entry(list, struct page, lru); | 2615 | page = list_last_entry(list, struct page, lru); |
2610 | else | 2616 | else |
2611 | page = list_first_entry(list, struct page, lru); | 2617 | page = list_first_entry(list, struct page, lru); |
2612 | } while (page && check_new_pcp(page)); | ||
2613 | 2618 | ||
2614 | __dec_zone_state(zone, NR_ALLOC_BATCH); | 2619 | __dec_zone_state(zone, NR_ALLOC_BATCH); |
2615 | list_del(&page->lru); | 2620 | list_del(&page->lru); |
2616 | pcp->count--; | 2621 | pcp->count--; |
2622 | |||
2623 | } while (check_new_pcp(page)); | ||
2617 | } else { | 2624 | } else { |
2618 | /* | 2625 | /* |
2619 | * We most definitely don't want callers attempting to | 2626 | * We most definitely don't want callers attempting to |
@@ -3023,6 +3030,7 @@ reset_fair: | |||
3023 | apply_fair = false; | 3030 | apply_fair = false; |
3024 | fair_skipped = false; | 3031 | fair_skipped = false; |
3025 | reset_alloc_batches(ac->preferred_zoneref->zone); | 3032 | reset_alloc_batches(ac->preferred_zoneref->zone); |
3033 | z = ac->preferred_zoneref; | ||
3026 | goto zonelist_scan; | 3034 | goto zonelist_scan; |
3027 | } | 3035 | } |
3028 | 3036 | ||
@@ -3596,6 +3604,17 @@ retry: | |||
3596 | */ | 3604 | */ |
3597 | alloc_flags = gfp_to_alloc_flags(gfp_mask); | 3605 | alloc_flags = gfp_to_alloc_flags(gfp_mask); |
3598 | 3606 | ||
3607 | /* | ||
3608 | * Reset the zonelist iterators if memory policies can be ignored. | ||
3609 | * These allocations are high priority and system rather than user | ||
3610 | * orientated. | ||
3611 | */ | ||
3612 | if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { | ||
3613 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
3614 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, | ||
3615 | ac->high_zoneidx, ac->nodemask); | ||
3616 | } | ||
3617 | |||
3599 | /* This is the last chance, in general, before the goto nopage. */ | 3618 | /* This is the last chance, in general, before the goto nopage. */ |
3600 | page = get_page_from_freelist(gfp_mask, order, | 3619 | page = get_page_from_freelist(gfp_mask, order, |
3601 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); | 3620 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); |
@@ -3604,12 +3623,6 @@ retry: | |||
3604 | 3623 | ||
3605 | /* Allocate without watermarks if the context allows */ | 3624 | /* Allocate without watermarks if the context allows */ |
3606 | if (alloc_flags & ALLOC_NO_WATERMARKS) { | 3625 | if (alloc_flags & ALLOC_NO_WATERMARKS) { |
3607 | /* | ||
3608 | * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds | ||
3609 | * the allocation is high priority and these type of | ||
3610 | * allocations are system rather than user orientated | ||
3611 | */ | ||
3612 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
3613 | page = get_page_from_freelist(gfp_mask, order, | 3626 | page = get_page_from_freelist(gfp_mask, order, |
3614 | ALLOC_NO_WATERMARKS, ac); | 3627 | ALLOC_NO_WATERMARKS, ac); |
3615 | if (page) | 3628 | if (page) |
@@ -3808,7 +3821,11 @@ retry_cpuset: | |||
3808 | /* Dirty zone balancing only done in the fast path */ | 3821 | /* Dirty zone balancing only done in the fast path */ |
3809 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); | 3822 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
3810 | 3823 | ||
3811 | /* The preferred zone is used for statistics later */ | 3824 | /* |
3825 | * The preferred zone is used for statistics but crucially it is | ||
3826 | * also used as the starting point for the zonelist iterator. It | ||
3827 | * may get reset for allocations that ignore memory policies. | ||
3828 | */ | ||
3812 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, | 3829 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, |
3813 | ac.high_zoneidx, ac.nodemask); | 3830 | ac.high_zoneidx, ac.nodemask); |
3814 | if (!ac.preferred_zoneref) { | 3831 | if (!ac.preferred_zoneref) { |
diff --git a/mm/page_owner.c b/mm/page_owner.c index 792b56da13d8..c6cda3e36212 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c | |||
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
55 | 55 | ||
56 | for (i = 0; i < (1 << order); i++) { | 56 | for (i = 0; i < (1 << order); i++) { |
57 | page_ext = lookup_page_ext(page + i); | 57 | page_ext = lookup_page_ext(page + i); |
58 | if (unlikely(!page_ext)) | ||
59 | continue; | ||
58 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); | 60 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); |
59 | } | 61 | } |
60 | } | 62 | } |
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
62 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | 64 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) |
63 | { | 65 | { |
64 | struct page_ext *page_ext = lookup_page_ext(page); | 66 | struct page_ext *page_ext = lookup_page_ext(page); |
67 | |||
65 | struct stack_trace trace = { | 68 | struct stack_trace trace = { |
66 | .nr_entries = 0, | 69 | .nr_entries = 0, |
67 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), | 70 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), |
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
69 | .skip = 3, | 72 | .skip = 3, |
70 | }; | 73 | }; |
71 | 74 | ||
75 | if (unlikely(!page_ext)) | ||
76 | return; | ||
77 | |||
72 | save_stack_trace(&trace); | 78 | save_stack_trace(&trace); |
73 | 79 | ||
74 | page_ext->order = order; | 80 | page_ext->order = order; |
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
82 | void __set_page_owner_migrate_reason(struct page *page, int reason) | 88 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
83 | { | 89 | { |
84 | struct page_ext *page_ext = lookup_page_ext(page); | 90 | struct page_ext *page_ext = lookup_page_ext(page); |
91 | if (unlikely(!page_ext)) | ||
92 | return; | ||
85 | 93 | ||
86 | page_ext->last_migrate_reason = reason; | 94 | page_ext->last_migrate_reason = reason; |
87 | } | 95 | } |
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) | |||
89 | gfp_t __get_page_owner_gfp(struct page *page) | 97 | gfp_t __get_page_owner_gfp(struct page *page) |
90 | { | 98 | { |
91 | struct page_ext *page_ext = lookup_page_ext(page); | 99 | struct page_ext *page_ext = lookup_page_ext(page); |
100 | if (unlikely(!page_ext)) | ||
101 | /* | ||
102 | * The caller just returns 0 if no valid gfp | ||
103 | * So return 0 here too. | ||
104 | */ | ||
105 | return 0; | ||
92 | 106 | ||
93 | return page_ext->gfp_mask; | 107 | return page_ext->gfp_mask; |
94 | } | 108 | } |
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) | |||
99 | struct page_ext *new_ext = lookup_page_ext(newpage); | 113 | struct page_ext *new_ext = lookup_page_ext(newpage); |
100 | int i; | 114 | int i; |
101 | 115 | ||
116 | if (unlikely(!old_ext || !new_ext)) | ||
117 | return; | ||
118 | |||
102 | new_ext->order = old_ext->order; | 119 | new_ext->order = old_ext->order; |
103 | new_ext->gfp_mask = old_ext->gfp_mask; | 120 | new_ext->gfp_mask = old_ext->gfp_mask; |
104 | new_ext->nr_entries = old_ext->nr_entries; | 121 | new_ext->nr_entries = old_ext->nr_entries; |
@@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page) | |||
193 | gfp_t gfp_mask = page_ext->gfp_mask; | 210 | gfp_t gfp_mask = page_ext->gfp_mask; |
194 | int mt = gfpflags_to_migratetype(gfp_mask); | 211 | int mt = gfpflags_to_migratetype(gfp_mask); |
195 | 212 | ||
213 | if (unlikely(!page_ext)) { | ||
214 | pr_alert("There is not page extension available.\n"); | ||
215 | return; | ||
216 | } | ||
217 | |||
196 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { | 218 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
197 | pr_alert("page_owner info is not active (free page?)\n"); | 219 | pr_alert("page_owner info is not active (free page?)\n"); |
198 | return; | 220 | return; |
@@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
251 | } | 273 | } |
252 | 274 | ||
253 | page_ext = lookup_page_ext(page); | 275 | page_ext = lookup_page_ext(page); |
276 | if (unlikely(!page_ext)) | ||
277 | continue; | ||
254 | 278 | ||
255 | /* | 279 | /* |
256 | * Some pages could be missed by concurrent allocation or free, | 280 | * Some pages could be missed by concurrent allocation or free, |
@@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) | |||
317 | continue; | 341 | continue; |
318 | 342 | ||
319 | page_ext = lookup_page_ext(page); | 343 | page_ext = lookup_page_ext(page); |
344 | if (unlikely(!page_ext)) | ||
345 | continue; | ||
320 | 346 | ||
321 | /* Maybe overraping zone */ | 347 | /* Maybe overraping zone */ |
322 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 348 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
diff --git a/mm/page_poison.c b/mm/page_poison.c index 1eae5fad2446..2e647c65916b 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c | |||
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page) | |||
54 | struct page_ext *page_ext; | 54 | struct page_ext *page_ext; |
55 | 55 | ||
56 | page_ext = lookup_page_ext(page); | 56 | page_ext = lookup_page_ext(page); |
57 | if (unlikely(!page_ext)) | ||
58 | return; | ||
59 | |||
57 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 60 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
58 | } | 61 | } |
59 | 62 | ||
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page) | |||
62 | struct page_ext *page_ext; | 65 | struct page_ext *page_ext; |
63 | 66 | ||
64 | page_ext = lookup_page_ext(page); | 67 | page_ext = lookup_page_ext(page); |
68 | if (unlikely(!page_ext)) | ||
69 | return; | ||
70 | |||
65 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 71 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
66 | } | 72 | } |
67 | 73 | ||
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page) | |||
70 | struct page_ext *page_ext; | 76 | struct page_ext *page_ext; |
71 | 77 | ||
72 | page_ext = lookup_page_ext(page); | 78 | page_ext = lookup_page_ext(page); |
73 | if (!page_ext) | 79 | if (unlikely(!page_ext)) |
74 | return false; | 80 | return false; |
75 | 81 | ||
76 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 82 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cf7ad1a53be0..e11475cdeb7a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); | |||
1105 | */ | 1105 | */ |
1106 | void vm_unmap_ram(const void *mem, unsigned int count) | 1106 | void vm_unmap_ram(const void *mem, unsigned int count) |
1107 | { | 1107 | { |
1108 | unsigned long size = count << PAGE_SHIFT; | 1108 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
1109 | unsigned long addr = (unsigned long)mem; | 1109 | unsigned long addr = (unsigned long)mem; |
1110 | 1110 | ||
1111 | BUG_ON(!addr); | 1111 | BUG_ON(!addr); |
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram); | |||
1140 | */ | 1140 | */ |
1141 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 1141 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
1142 | { | 1142 | { |
1143 | unsigned long size = count << PAGE_SHIFT; | 1143 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
1144 | unsigned long addr; | 1144 | unsigned long addr; |
1145 | void *mem; | 1145 | void *mem; |
1146 | 1146 | ||
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count, | |||
1574 | unsigned long flags, pgprot_t prot) | 1574 | unsigned long flags, pgprot_t prot) |
1575 | { | 1575 | { |
1576 | struct vm_struct *area; | 1576 | struct vm_struct *area; |
1577 | unsigned long size; /* In bytes */ | ||
1577 | 1578 | ||
1578 | might_sleep(); | 1579 | might_sleep(); |
1579 | 1580 | ||
1580 | if (count > totalram_pages) | 1581 | if (count > totalram_pages) |
1581 | return NULL; | 1582 | return NULL; |
1582 | 1583 | ||
1583 | area = get_vm_area_caller((count << PAGE_SHIFT), flags, | 1584 | size = (unsigned long)count << PAGE_SHIFT; |
1584 | __builtin_return_address(0)); | 1585 | area = get_vm_area_caller(size, flags, __builtin_return_address(0)); |
1585 | if (!area) | 1586 | if (!area) |
1586 | return NULL; | 1587 | return NULL; |
1587 | 1588 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 77e42ef388c2..cb2a67bb4158 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, | |||
1061 | continue; | 1061 | continue; |
1062 | 1062 | ||
1063 | page_ext = lookup_page_ext(page); | 1063 | page_ext = lookup_page_ext(page); |
1064 | if (unlikely(!page_ext)) | ||
1065 | continue; | ||
1064 | 1066 | ||
1065 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 1067 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
1066 | continue; | 1068 | continue; |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 34917d55d311..8f9e89ca1d31 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
412 | /* HEADLESS page stored */ | 412 | /* HEADLESS page stored */ |
413 | bud = HEADLESS; | 413 | bud = HEADLESS; |
414 | } else { | 414 | } else { |
415 | bud = (handle - zhdr->first_num) & BUDDY_MASK; | 415 | bud = handle_to_buddy(handle); |
416 | 416 | ||
417 | switch (bud) { | 417 | switch (bud) { |
418 | case FIRST: | 418 | case FIRST: |
@@ -572,15 +572,19 @@ next: | |||
572 | pool->pages_nr--; | 572 | pool->pages_nr--; |
573 | spin_unlock(&pool->lock); | 573 | spin_unlock(&pool->lock); |
574 | return 0; | 574 | return 0; |
575 | } else if (zhdr->first_chunks != 0 && | 575 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { |
576 | zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { | 576 | if (zhdr->first_chunks != 0 && |
577 | /* Full, add to buddied list */ | 577 | zhdr->last_chunks != 0 && |
578 | list_add(&zhdr->buddy, &pool->buddied); | 578 | zhdr->middle_chunks != 0) { |
579 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { | 579 | /* Full, add to buddied list */ |
580 | z3fold_compact_page(zhdr); | 580 | list_add(&zhdr->buddy, &pool->buddied); |
581 | /* add to unbuddied list */ | 581 | } else { |
582 | freechunks = num_free_chunks(zhdr); | 582 | z3fold_compact_page(zhdr); |
583 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 583 | /* add to unbuddied list */ |
584 | freechunks = num_free_chunks(zhdr); | ||
585 | list_add(&zhdr->buddy, | ||
586 | &pool->unbuddied[freechunks]); | ||
587 | } | ||
584 | } | 588 | } |
585 | 589 | ||
586 | /* add to beginning of LRU */ | 590 | /* add to beginning of LRU */ |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a1e273af6fc8..82a116ba590e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev, | |||
290 | if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) | 290 | if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) |
291 | return; | 291 | return; |
292 | 292 | ||
293 | /* vlan continues to inherit address of lower device */ | ||
294 | if (vlan_dev_inherit_address(vlandev, dev)) | ||
295 | goto out; | ||
296 | |||
293 | /* vlan address was different from the old address and is equal to | 297 | /* vlan address was different from the old address and is equal to |
294 | * the new address */ | 298 | * the new address */ |
295 | if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && | 299 | if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && |
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev, | |||
302 | !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) | 306 | !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) |
303 | dev_uc_add(dev, vlandev->dev_addr); | 307 | dev_uc_add(dev, vlandev->dev_addr); |
304 | 308 | ||
309 | out: | ||
305 | ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); | 310 | ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); |
306 | } | 311 | } |
307 | 312 | ||
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 9d010a09ab98..cc1557978066 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev, | |||
109 | void vlan_setup(struct net_device *dev); | 109 | void vlan_setup(struct net_device *dev); |
110 | int register_vlan_dev(struct net_device *dev); | 110 | int register_vlan_dev(struct net_device *dev); |
111 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); | 111 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
112 | bool vlan_dev_inherit_address(struct net_device *dev, | ||
113 | struct net_device *real_dev); | ||
112 | 114 | ||
113 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | 115 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, |
114 | u16 vlan_tci) | 116 | u16 vlan_tci) |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e7e62570bdb8..86ae75b77390 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) | |||
245 | strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); | 245 | strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); |
246 | } | 246 | } |
247 | 247 | ||
248 | bool vlan_dev_inherit_address(struct net_device *dev, | ||
249 | struct net_device *real_dev) | ||
250 | { | ||
251 | if (dev->addr_assign_type != NET_ADDR_STOLEN) | ||
252 | return false; | ||
253 | |||
254 | ether_addr_copy(dev->dev_addr, real_dev->dev_addr); | ||
255 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
256 | return true; | ||
257 | } | ||
258 | |||
248 | static int vlan_dev_open(struct net_device *dev) | 259 | static int vlan_dev_open(struct net_device *dev) |
249 | { | 260 | { |
250 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 261 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
@@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
255 | !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) | 266 | !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) |
256 | return -ENETDOWN; | 267 | return -ENETDOWN; |
257 | 268 | ||
258 | if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { | 269 | if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && |
270 | !vlan_dev_inherit_address(dev, real_dev)) { | ||
259 | err = dev_uc_add(real_dev, dev->dev_addr); | 271 | err = dev_uc_add(real_dev, dev->dev_addr); |
260 | if (err < 0) | 272 | if (err < 0) |
261 | goto out; | 273 | goto out; |
@@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev) | |||
560 | /* ipv6 shared card related stuff */ | 572 | /* ipv6 shared card related stuff */ |
561 | dev->dev_id = real_dev->dev_id; | 573 | dev->dev_id = real_dev->dev_id; |
562 | 574 | ||
563 | if (is_zero_ether_addr(dev->dev_addr)) | 575 | if (is_zero_ether_addr(dev->dev_addr)) { |
564 | eth_hw_addr_inherit(dev, real_dev); | 576 | ether_addr_copy(dev->dev_addr, real_dev->dev_addr); |
577 | dev->addr_assign_type = NET_ADDR_STOLEN; | ||
578 | } | ||
565 | if (is_zero_ether_addr(dev->broadcast)) | 579 | if (is_zero_ether_addr(dev->broadcast)) |
566 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); | 580 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); |
567 | 581 | ||
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 4fd6af47383a..adb6e3d21b1e 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
@@ -124,7 +124,7 @@ as_indicate_complete: | |||
124 | break; | 124 | break; |
125 | case as_addparty: | 125 | case as_addparty: |
126 | case as_dropparty: | 126 | case as_dropparty: |
127 | sk->sk_err_soft = msg->reply; | 127 | sk->sk_err_soft = -msg->reply; |
128 | /* < 0 failure, otherwise ep_ref */ | 128 | /* < 0 failure, otherwise ep_ref */ |
129 | clear_bit(ATM_VF_WAITING, &vcc->flags); | 129 | clear_bit(ATM_VF_WAITING, &vcc->flags); |
130 | break; | 130 | break; |
diff --git a/net/atm/svc.c b/net/atm/svc.c index 3fa0a9ee98d1..878563a8354d 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, | |||
546 | schedule(); | 546 | schedule(); |
547 | } | 547 | } |
548 | finish_wait(sk_sleep(sk), &wait); | 548 | finish_wait(sk_sleep(sk), &wait); |
549 | error = xchg(&sk->sk_err_soft, 0); | 549 | error = -xchg(&sk->sk_err_soft, 0); |
550 | out: | 550 | out: |
551 | release_sock(sk); | 551 | release_sock(sk); |
552 | return error; | 552 | return error; |
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref) | |||
573 | error = -EUNATCH; | 573 | error = -EUNATCH; |
574 | goto out; | 574 | goto out; |
575 | } | 575 | } |
576 | error = xchg(&sk->sk_err_soft, 0); | 576 | error = -xchg(&sk->sk_err_soft, 0); |
577 | out: | 577 | out: |
578 | release_sock(sk); | 578 | release_sock(sk); |
579 | return error; | 579 | return error; |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0160d7d09a1e..89469592076c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, | |||
1276 | const struct ceph_osd_request_target *t, | 1276 | const struct ceph_osd_request_target *t, |
1277 | struct ceph_pg_pool_info *pi) | 1277 | struct ceph_pg_pool_info *pi) |
1278 | { | 1278 | { |
1279 | bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 1279 | bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
1280 | bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 1280 | bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
1281 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1281 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
1282 | __pool_full(pi); | 1282 | __pool_full(pi); |
1283 | 1283 | ||
1284 | WARN_ON(pi->id != t->base_oloc.pool); | 1284 | WARN_ON(pi->id != t->base_oloc.pool); |
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, | |||
1303 | bool force_resend = false; | 1303 | bool force_resend = false; |
1304 | bool need_check_tiering = false; | 1304 | bool need_check_tiering = false; |
1305 | bool need_resend = false; | 1305 | bool need_resend = false; |
1306 | bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, | 1306 | bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); |
1307 | CEPH_OSDMAP_SORTBITWISE); | ||
1308 | enum calc_target_result ct_res; | 1307 | enum calc_target_result ct_res; |
1309 | int ret; | 1308 | int ret; |
1310 | 1309 | ||
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg) | |||
1540 | */ | 1539 | */ |
1541 | msg->hdr.data_off = cpu_to_le16(req->r_data_offset); | 1540 | msg->hdr.data_off = cpu_to_le16(req->r_data_offset); |
1542 | 1541 | ||
1543 | dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, | 1542 | dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__, |
1544 | req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, | 1543 | req, req->r_t.target_oid.name, req->r_t.target_oid.name_len, |
1545 | req->r_t.target_oid.name_len, msg->front.iov_len, data_len); | 1544 | msg->front.iov_len, data_len); |
1546 | } | 1545 | } |
1547 | 1546 | ||
1548 | /* | 1547 | /* |
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc) | |||
1590 | verify_osdc_locked(osdc); | 1589 | verify_osdc_locked(osdc); |
1591 | WARN_ON(!osdc->osdmap->epoch); | 1590 | WARN_ON(!osdc->osdmap->epoch); |
1592 | 1591 | ||
1593 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1592 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
1594 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || | 1593 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || |
1595 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { | 1594 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { |
1596 | dout("%s osdc %p continuous\n", __func__, osdc); | 1595 | dout("%s osdc %p continuous\n", __func__, osdc); |
1597 | continuous = true; | 1596 | continuous = true; |
1598 | } else { | 1597 | } else { |
@@ -1629,19 +1628,19 @@ again: | |||
1629 | } | 1628 | } |
1630 | 1629 | ||
1631 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && | 1630 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && |
1632 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { | 1631 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { |
1633 | dout("req %p pausewr\n", req); | 1632 | dout("req %p pausewr\n", req); |
1634 | req->r_t.paused = true; | 1633 | req->r_t.paused = true; |
1635 | maybe_request_map(osdc); | 1634 | maybe_request_map(osdc); |
1636 | } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && | 1635 | } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && |
1637 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { | 1636 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { |
1638 | dout("req %p pauserd\n", req); | 1637 | dout("req %p pauserd\n", req); |
1639 | req->r_t.paused = true; | 1638 | req->r_t.paused = true; |
1640 | maybe_request_map(osdc); | 1639 | maybe_request_map(osdc); |
1641 | } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && | 1640 | } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && |
1642 | !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | | 1641 | !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | |
1643 | CEPH_OSD_FLAG_FULL_FORCE)) && | 1642 | CEPH_OSD_FLAG_FULL_FORCE)) && |
1644 | (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1643 | (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
1645 | pool_full(osdc, req->r_t.base_oloc.pool))) { | 1644 | pool_full(osdc, req->r_t.base_oloc.pool))) { |
1646 | dout("req %p full/pool_full\n", req); | 1645 | dout("req %p full/pool_full\n", req); |
1647 | pr_warn_ratelimited("FULL or reached pool quota\n"); | 1646 | pr_warn_ratelimited("FULL or reached pool quota\n"); |
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq) | |||
2280 | struct ceph_osd_request *req = lreq->ping_req; | 2279 | struct ceph_osd_request *req = lreq->ping_req; |
2281 | struct ceph_osd_req_op *op = &req->r_ops[0]; | 2280 | struct ceph_osd_req_op *op = &req->r_ops[0]; |
2282 | 2281 | ||
2283 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { | 2282 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { |
2284 | dout("%s PAUSERD\n", __func__); | 2283 | dout("%s PAUSERD\n", __func__); |
2285 | return; | 2284 | return; |
2286 | } | 2285 | } |
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) | |||
2893 | dout("req %p tid %llu cb\n", req, req->r_tid); | 2892 | dout("req %p tid %llu cb\n", req, req->r_tid); |
2894 | __complete_request(req); | 2893 | __complete_request(req); |
2895 | } | 2894 | } |
2895 | if (m.flags & CEPH_OSD_FLAG_ONDISK) | ||
2896 | complete_all(&req->r_safe_completion); | ||
2897 | ceph_osdc_put_request(req); | ||
2896 | } else { | 2898 | } else { |
2897 | if (req->r_unsafe_callback) { | 2899 | if (req->r_unsafe_callback) { |
2898 | dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); | 2900 | dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); |
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) | |||
2901 | WARN_ON(1); | 2903 | WARN_ON(1); |
2902 | } | 2904 | } |
2903 | } | 2905 | } |
2904 | if (m.flags & CEPH_OSD_FLAG_ONDISK) | ||
2905 | complete_all(&req->r_safe_completion); | ||
2906 | 2906 | ||
2907 | ceph_osdc_put_request(req); | ||
2908 | return; | 2907 | return; |
2909 | 2908 | ||
2910 | fail_request: | 2909 | fail_request: |
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, | |||
3050 | bool skipped_map = false; | 3049 | bool skipped_map = false; |
3051 | bool was_full; | 3050 | bool was_full; |
3052 | 3051 | ||
3053 | was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); | 3052 | was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); |
3054 | set_pool_was_full(osdc); | 3053 | set_pool_was_full(osdc); |
3055 | 3054 | ||
3056 | if (incremental) | 3055 | if (incremental) |
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, | |||
3088 | osdc->osdmap = newmap; | 3087 | osdc->osdmap = newmap; |
3089 | } | 3088 | } |
3090 | 3089 | ||
3091 | was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); | 3090 | was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); |
3092 | scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, | 3091 | scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, |
3093 | need_resend, need_resend_linger); | 3092 | need_resend, need_resend_linger); |
3094 | 3093 | ||
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |||
3174 | if (ceph_check_fsid(osdc->client, &fsid) < 0) | 3173 | if (ceph_check_fsid(osdc->client, &fsid) < 0) |
3175 | goto bad; | 3174 | goto bad; |
3176 | 3175 | ||
3177 | was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 3176 | was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
3178 | was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 3177 | was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
3179 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 3178 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
3180 | have_pool_full(osdc); | 3179 | have_pool_full(osdc); |
3181 | 3180 | ||
3182 | /* incremental maps */ | 3181 | /* incremental maps */ |
@@ -3238,9 +3237,9 @@ done: | |||
3238 | * we find out when we are no longer full and stop returning | 3237 | * we find out when we are no longer full and stop returning |
3239 | * ENOSPC. | 3238 | * ENOSPC. |
3240 | */ | 3239 | */ |
3241 | pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 3240 | pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
3242 | pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 3241 | pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
3243 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 3242 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
3244 | have_pool_full(osdc); | 3243 | have_pool_full(osdc); |
3245 | if (was_pauserd || was_pausewr || pauserd || pausewr) | 3244 | if (was_pauserd || was_pausewr || pauserd || pausewr) |
3246 | maybe_request_map(osdc); | 3245 | maybe_request_map(osdc); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index cde52e94732f..03062bb763b3 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, | |||
1778 | raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, | 1778 | raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, |
1779 | oid->name_len); | 1779 | oid->name_len); |
1780 | 1780 | ||
1781 | dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, | 1781 | dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, |
1782 | oid->name, raw_pgid->pool, raw_pgid->seed); | 1782 | raw_pgid->pool, raw_pgid->seed); |
1783 | return 0; | 1783 | return 0; |
1784 | } | 1784 | } |
1785 | EXPORT_SYMBOL(ceph_object_locator_to_pg); | 1785 | EXPORT_SYMBOL(ceph_object_locator_to_pg); |
diff --git a/net/core/hwbm.c b/net/core/hwbm.c index 941c28486896..2cab489ae62e 100644 --- a/net/core/hwbm.c +++ b/net/core/hwbm.c | |||
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) | |||
55 | spin_lock_irqsave(&bm_pool->lock, flags); | 55 | spin_lock_irqsave(&bm_pool->lock, flags); |
56 | if (bm_pool->buf_num == bm_pool->size) { | 56 | if (bm_pool->buf_num == bm_pool->size) { |
57 | pr_warn("pool already filled\n"); | 57 | pr_warn("pool already filled\n"); |
58 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
58 | return bm_pool->buf_num; | 59 | return bm_pool->buf_num; |
59 | } | 60 | } |
60 | 61 | ||
61 | if (buf_num + bm_pool->buf_num > bm_pool->size) { | 62 | if (buf_num + bm_pool->buf_num > bm_pool->size) { |
62 | pr_warn("cannot allocate %d buffers for pool\n", | 63 | pr_warn("cannot allocate %d buffers for pool\n", |
63 | buf_num); | 64 | buf_num); |
65 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
64 | return 0; | 66 | return 0; |
65 | } | 67 | } |
66 | 68 | ||
67 | if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { | 69 | if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { |
68 | pr_warn("Adding %d buffers to the %d current buffers will overflow\n", | 70 | pr_warn("Adding %d buffers to the %d current buffers will overflow\n", |
69 | buf_num, bm_pool->buf_num); | 71 | buf_num, bm_pool->buf_num); |
72 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
70 | return 0; | 73 | return 0; |
71 | } | 74 | } |
72 | 75 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8604ae245960..8b02df0d354d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2245 | hrtimer_set_expires(&t.timer, spin_until); | 2245 | hrtimer_set_expires(&t.timer, spin_until); |
2246 | 2246 | ||
2247 | remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); | 2247 | remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); |
2248 | if (remaining <= 0) { | 2248 | if (remaining <= 0) |
2249 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); | 2249 | goto out; |
2250 | return; | ||
2251 | } | ||
2252 | 2250 | ||
2253 | start_time = ktime_get(); | 2251 | start_time = ktime_get(); |
2254 | if (remaining < 100000) { | 2252 | if (remaining < 100000) { |
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2273 | } | 2271 | } |
2274 | 2272 | ||
2275 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2273 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2274 | out: | ||
2276 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); | 2275 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2276 | destroy_hrtimer_on_stack(&t.timer); | ||
2277 | } | 2277 | } |
2278 | 2278 | ||
2279 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2279 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index ca207dbf673b..116187b5c267 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c | |||
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla, | |||
1289 | nl802154_dev_addr_policy)) | 1289 | nl802154_dev_addr_policy)) |
1290 | return -EINVAL; | 1290 | return -EINVAL; |
1291 | 1291 | ||
1292 | if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && | 1292 | if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || |
1293 | !attrs[NL802154_DEV_ADDR_ATTR_MODE] && | 1293 | !attrs[NL802154_DEV_ADDR_ATTR_MODE] || |
1294 | !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || | 1294 | !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || |
1295 | attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) | 1295 | attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) |
1296 | return -EINVAL; | 1296 | return -EINVAL; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 377424ea17a4..d39e9e47a26e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net) | |||
1681 | */ | 1681 | */ |
1682 | net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); | 1682 | net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); |
1683 | net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); | 1683 | net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); |
1684 | |||
1685 | /* Default values for sysctl-controlled parameters. | ||
1686 | * We set them here, in case sysctl is not compiled. | ||
1687 | */ | ||
1688 | net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; | ||
1689 | net->ipv4.sysctl_ip_dynaddr = 0; | ||
1690 | net->ipv4.sysctl_ip_early_demux = 1; | ||
1691 | |||
1684 | return 0; | 1692 | return 0; |
1685 | } | 1693 | } |
1686 | 1694 | ||
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index bb0419582b8d..1cb67de106fe 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
999 | if (!net->ipv4.sysctl_local_reserved_ports) | 999 | if (!net->ipv4.sysctl_local_reserved_ports) |
1000 | goto err_ports; | 1000 | goto err_ports; |
1001 | 1001 | ||
1002 | net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; | ||
1003 | net->ipv4.sysctl_ip_dynaddr = 0; | ||
1004 | net->ipv4.sysctl_ip_early_demux = 1; | ||
1005 | |||
1006 | return 0; | 1002 | return 0; |
1007 | 1003 | ||
1008 | err_ports: | 1004 | err_ports: |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 3f8411328de5..2343e4f2e0bf 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -232,6 +232,15 @@ config IPV6_GRE | |||
232 | 232 | ||
233 | Saying M here will produce a module called ip6_gre. If unsure, say N. | 233 | Saying M here will produce a module called ip6_gre. If unsure, say N. |
234 | 234 | ||
235 | config IPV6_FOU | ||
236 | tristate | ||
237 | default NET_FOU && IPV6 | ||
238 | |||
239 | config IPV6_FOU_TUNNEL | ||
240 | tristate | ||
241 | default NET_FOU_IP_TUNNELS && IPV6_FOU | ||
242 | select IPV6_TUNNEL | ||
243 | |||
235 | config IPV6_MULTIPLE_TABLES | 244 | config IPV6_MULTIPLE_TABLES |
236 | bool "IPv6: Multiple Routing Tables" | 245 | bool "IPv6: Multiple Routing Tables" |
237 | select FIB_RULES | 246 | select FIB_RULES |
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 7ec3129c9ace..6d8ea099213e 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o | |||
42 | obj-$(CONFIG_IPV6_SIT) += sit.o | 42 | obj-$(CONFIG_IPV6_SIT) += sit.o |
43 | obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o | 43 | obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o |
44 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o | 44 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o |
45 | obj-$(CONFIG_NET_FOU) += fou6.o | 45 | obj-$(CONFIG_IPV6_FOU) += fou6.o |
46 | 46 | ||
47 | obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o | 47 | obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o |
48 | obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) | 48 | obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index c972d0b52579..9ea249b9451e 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, | |||
69 | } | 69 | } |
70 | EXPORT_SYMBOL(gue6_build_header); | 70 | EXPORT_SYMBOL(gue6_build_header); |
71 | 71 | ||
72 | #ifdef CONFIG_NET_FOU_IP_TUNNELS | 72 | #if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL) |
73 | 73 | ||
74 | static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { | 74 | static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { |
75 | .encap_hlen = fou_encap_hlen, | 75 | .encap_hlen = fou_encap_hlen, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index af503f518278..f4ac2842d4d9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
712 | fl6->daddr = p->raddr; | 712 | fl6->daddr = p->raddr; |
713 | fl6->flowi6_oif = p->link; | 713 | fl6->flowi6_oif = p->link; |
714 | fl6->flowlabel = 0; | 714 | fl6->flowlabel = 0; |
715 | fl6->flowi6_proto = IPPROTO_GRE; | ||
715 | 716 | ||
716 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) | 717 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) |
717 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; | 718 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; |
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) | |||
1027 | 1028 | ||
1028 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | 1029 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; |
1029 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1030 | dev->mtu = ETH_DATA_LEN - t_hlen; |
1031 | if (dev->type == ARPHRD_ETHER) | ||
1032 | dev->mtu -= ETH_HLEN; | ||
1030 | if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1033 | if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1031 | dev->mtu -= 8; | 1034 | dev->mtu -= 8; |
1032 | 1035 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c6f5df1bed12..6c54e03fe9c1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, | |||
128 | */ | 128 | */ |
129 | static int l2tp_ip6_recv(struct sk_buff *skb) | 129 | static int l2tp_ip6_recv(struct sk_buff *skb) |
130 | { | 130 | { |
131 | struct net *net = dev_net(skb->dev); | ||
131 | struct sock *sk; | 132 | struct sock *sk; |
132 | u32 session_id; | 133 | u32 session_id; |
133 | u32 tunnel_id; | 134 | u32 tunnel_id; |
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) | |||
154 | } | 155 | } |
155 | 156 | ||
156 | /* Ok, this is a data packet. Lookup the session. */ | 157 | /* Ok, this is a data packet. Lookup the session. */ |
157 | session = l2tp_session_find(&init_net, NULL, session_id); | 158 | session = l2tp_session_find(net, NULL, session_id); |
158 | if (session == NULL) | 159 | if (session == NULL) |
159 | goto discard; | 160 | goto discard; |
160 | 161 | ||
@@ -188,14 +189,14 @@ pass_up: | |||
188 | goto discard; | 189 | goto discard; |
189 | 190 | ||
190 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); | 191 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); |
191 | tunnel = l2tp_tunnel_find(&init_net, tunnel_id); | 192 | tunnel = l2tp_tunnel_find(net, tunnel_id); |
192 | if (tunnel != NULL) | 193 | if (tunnel != NULL) |
193 | sk = tunnel->sock; | 194 | sk = tunnel->sock; |
194 | else { | 195 | else { |
195 | struct ipv6hdr *iph = ipv6_hdr(skb); | 196 | struct ipv6hdr *iph = ipv6_hdr(skb); |
196 | 197 | ||
197 | read_lock_bh(&l2tp_ip6_lock); | 198 | read_lock_bh(&l2tp_ip6_lock); |
198 | sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, | 199 | sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, |
199 | 0, tunnel_id); | 200 | 0, tunnel_id); |
200 | read_unlock_bh(&l2tp_ip6_lock); | 201 | read_unlock_bh(&l2tp_ip6_lock); |
201 | } | 202 | } |
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
263 | struct inet_sock *inet = inet_sk(sk); | 264 | struct inet_sock *inet = inet_sk(sk); |
264 | struct ipv6_pinfo *np = inet6_sk(sk); | 265 | struct ipv6_pinfo *np = inet6_sk(sk); |
265 | struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; | 266 | struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; |
267 | struct net *net = sock_net(sk); | ||
266 | __be32 v4addr = 0; | 268 | __be32 v4addr = 0; |
267 | int addr_type; | 269 | int addr_type; |
268 | int err; | 270 | int err; |
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
286 | 288 | ||
287 | err = -EADDRINUSE; | 289 | err = -EADDRINUSE; |
288 | read_lock_bh(&l2tp_ip6_lock); | 290 | read_lock_bh(&l2tp_ip6_lock); |
289 | if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, | 291 | if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, |
290 | sk->sk_bound_dev_if, addr->l2tp_conn_id)) | 292 | sk->sk_bound_dev_if, addr->l2tp_conn_id)) |
291 | goto out_in_use; | 293 | goto out_in_use; |
292 | read_unlock_bh(&l2tp_ip6_lock); | 294 | read_unlock_bh(&l2tp_ip6_lock); |
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) | |||
456 | return 0; | 458 | return 0; |
457 | 459 | ||
458 | drop: | 460 | drop: |
459 | IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); | 461 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); |
460 | kfree_skb(skb); | 462 | kfree_skb(skb); |
461 | return -1; | 463 | return -1; |
462 | } | 464 | } |
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index 5dba899131b3..182470847fcf 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c | |||
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, | |||
444 | break; | 444 | break; |
445 | 445 | ||
446 | case LAPB_FRMR: | 446 | case LAPB_FRMR: |
447 | lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", | 447 | lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n", |
448 | lapb->dev, frame->pf, | 448 | lapb->dev, frame->pf, |
449 | skb->data[0], skb->data[1], skb->data[2], | 449 | skb->data); |
450 | skb->data[3], skb->data[4]); | ||
451 | lapb_establish_data_link(lapb); | 450 | lapb_establish_data_link(lapb); |
452 | lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); | 451 | lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); |
453 | lapb_requeue_frames(lapb); | 452 | lapb_requeue_frames(lapb); |
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index ba4d015bd1a6..482c94d9d958 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c | |||
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", | 151 | lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data); |
152 | lapb->dev, lapb->state, | ||
153 | skb->data[0], skb->data[1], skb->data[2]); | ||
154 | 152 | ||
155 | if (!lapb_data_transmit(lapb, skb)) | 153 | if (!lapb_data_transmit(lapb, skb)) |
156 | kfree_skb(skb); | 154 | kfree_skb(skb); |
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index 9d0a426eccbb..3c1914df641f 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c | |||
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, | |||
113 | { | 113 | { |
114 | frame->type = LAPB_ILLEGAL; | 114 | frame->type = LAPB_ILLEGAL; |
115 | 115 | ||
116 | lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", | 116 | lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data); |
117 | lapb->dev, lapb->state, | ||
118 | skb->data[0], skb->data[1], skb->data[2]); | ||
119 | 117 | ||
120 | /* We always need to look at 2 bytes, sometimes we need | 118 | /* We always need to look at 2 bytes, sometimes we need |
121 | * to look at 3 and those cases are handled below. | 119 | * to look at 3 and those cases are handled below. |
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) | |||
284 | dptr++; | 282 | dptr++; |
285 | *dptr++ = lapb->frmr_type; | 283 | *dptr++ = lapb->frmr_type; |
286 | 284 | ||
287 | lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", | 285 | lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n", |
288 | lapb->dev, lapb->state, | 286 | lapb->dev, lapb->state, |
289 | skb->data[1], skb->data[2], skb->data[3], | 287 | &skb->data[1]); |
290 | skb->data[4], skb->data[5]); | ||
291 | } else { | 288 | } else { |
292 | dptr = skb_put(skb, 4); | 289 | dptr = skb_put(skb, 4); |
293 | *dptr++ = LAPB_FRMR; | 290 | *dptr++ = LAPB_FRMR; |
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) | |||
299 | dptr++; | 296 | dptr++; |
300 | *dptr++ = lapb->frmr_type; | 297 | *dptr++ = lapb->frmr_type; |
301 | 298 | ||
302 | lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", | 299 | lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n", |
303 | lapb->dev, lapb->state, skb->data[1], | 300 | lapb->dev, lapb->state, &skb->data[1]); |
304 | skb->data[2], skb->data[3]); | ||
305 | } | 301 | } |
306 | 302 | ||
307 | lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); | 303 | lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 879185fe183f..9a3eb7a0ebf4 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key) | |||
137 | return !!key->eth.type; | 137 | return !!key->eth.type; |
138 | } | 138 | } |
139 | 139 | ||
140 | static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, | ||
141 | __be16 ethertype) | ||
142 | { | ||
143 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
144 | __be16 diff[] = { ~(hdr->h_proto), ethertype }; | ||
145 | |||
146 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), | ||
147 | ~skb->csum); | ||
148 | } | ||
149 | |||
150 | hdr->h_proto = ethertype; | ||
151 | } | ||
152 | |||
140 | static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, | 153 | static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, |
141 | const struct ovs_action_push_mpls *mpls) | 154 | const struct ovs_action_push_mpls *mpls) |
142 | { | 155 | { |
143 | __be32 *new_mpls_lse; | 156 | __be32 *new_mpls_lse; |
144 | struct ethhdr *hdr; | ||
145 | 157 | ||
146 | /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ | 158 | /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ |
147 | if (skb->encapsulation) | 159 | if (skb->encapsulation) |
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
160 | 172 | ||
161 | skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); | 173 | skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); |
162 | 174 | ||
163 | hdr = eth_hdr(skb); | 175 | update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); |
164 | hdr->h_proto = mpls->mpls_ethertype; | ||
165 | |||
166 | if (!skb->inner_protocol) | 176 | if (!skb->inner_protocol) |
167 | skb_set_inner_protocol(skb, skb->protocol); | 177 | skb_set_inner_protocol(skb, skb->protocol); |
168 | skb->protocol = mpls->mpls_ethertype; | 178 | skb->protocol = mpls->mpls_ethertype; |
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
193 | * field correctly in the presence of VLAN tags. | 203 | * field correctly in the presence of VLAN tags. |
194 | */ | 204 | */ |
195 | hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); | 205 | hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); |
196 | hdr->h_proto = ethertype; | 206 | update_ethertype(skb, hdr, ethertype); |
197 | if (eth_p_mpls(skb->protocol)) | 207 | if (eth_p_mpls(skb->protocol)) |
198 | skb->protocol = ethertype; | 208 | skb->protocol = ethertype; |
199 | 209 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 330f14e302e8..b884dae692a1 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -239,6 +239,8 @@ override: | |||
239 | police->tcfp_t_c = ktime_get_ns(); | 239 | police->tcfp_t_c = ktime_get_ns(); |
240 | police->tcf_index = parm->index ? parm->index : | 240 | police->tcf_index = parm->index ? parm->index : |
241 | tcf_hash_new_index(tn); | 241 | tcf_hash_new_index(tn); |
242 | police->tcf_tm.install = jiffies; | ||
243 | police->tcf_tm.lastuse = jiffies; | ||
242 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); | 244 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); |
243 | spin_lock_bh(&hinfo->lock); | 245 | spin_lock_bh(&hinfo->lock); |
244 | hlist_add_head(&police->tcf_head, &hinfo->htab[h]); | 246 | hlist_add_head(&police->tcf_head, &hinfo->htab[h]); |
@@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, | |||
268 | spin_lock(&police->tcf_lock); | 270 | spin_lock(&police->tcf_lock); |
269 | 271 | ||
270 | bstats_update(&police->tcf_bstats, skb); | 272 | bstats_update(&police->tcf_bstats, skb); |
273 | tcf_lastuse_update(&police->tcf_tm); | ||
271 | 274 | ||
272 | if (police->tcfp_ewma_rate && | 275 | if (police->tcfp_ewma_rate && |
273 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | 276 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { |
@@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
327 | .refcnt = police->tcf_refcnt - ref, | 330 | .refcnt = police->tcf_refcnt - ref, |
328 | .bindcnt = police->tcf_bindcnt - bind, | 331 | .bindcnt = police->tcf_bindcnt - bind, |
329 | }; | 332 | }; |
333 | struct tcf_t t; | ||
330 | 334 | ||
331 | if (police->rate_present) | 335 | if (police->rate_present) |
332 | psched_ratecfg_getrate(&opt.rate, &police->rate); | 336 | psched_ratecfg_getrate(&opt.rate, &police->rate); |
@@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
340 | if (police->tcfp_ewma_rate && | 344 | if (police->tcfp_ewma_rate && |
341 | nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) | 345 | nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) |
342 | goto nla_put_failure; | 346 | goto nla_put_failure; |
347 | |||
348 | t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); | ||
349 | t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse); | ||
350 | t.expires = jiffies_to_clock_t(police->tcf_tm.expires); | ||
351 | if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) | ||
352 | goto nla_put_failure; | ||
353 | |||
343 | return skb->len; | 354 | return skb->len; |
344 | 355 | ||
345 | nla_put_failure: | 356 | nla_put_failure: |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 64f71a2155f3..ddf047df5361 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr | |||
607 | if (throttle) | 607 | if (throttle) |
608 | qdisc_throttled(wd->qdisc); | 608 | qdisc_throttled(wd->qdisc); |
609 | 609 | ||
610 | if (wd->last_expires == expires) | ||
611 | return; | ||
612 | |||
613 | wd->last_expires = expires; | ||
610 | hrtimer_start(&wd->timer, | 614 | hrtimer_start(&wd->timer, |
611 | ns_to_ktime(expires), | 615 | ns_to_ktime(expires), |
612 | HRTIMER_MODE_ABS_PINNED); | 616 | HRTIMER_MODE_ABS_PINNED); |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f6bf5818ed4d..d4b4218af6b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -928,17 +928,10 @@ ok: | |||
928 | } | 928 | } |
929 | } | 929 | } |
930 | qdisc_qstats_overlimit(sch); | 930 | qdisc_qstats_overlimit(sch); |
931 | if (likely(next_event > q->now)) { | 931 | if (likely(next_event > q->now)) |
932 | if (!test_bit(__QDISC_STATE_DEACTIVATED, | 932 | qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); |
933 | &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { | 933 | else |
934 | ktime_t time = ns_to_ktime(next_event); | ||
935 | qdisc_throttled(q->watchdog.qdisc); | ||
936 | hrtimer_start(&q->watchdog.timer, time, | ||
937 | HRTIMER_MODE_ABS_PINNED); | ||
938 | } | ||
939 | } else { | ||
940 | schedule_work(&q->work); | 934 | schedule_work(&q->work); |
941 | } | ||
942 | fin: | 935 | fin: |
943 | return skb; | 936 | return skb; |
944 | } | 937 | } |
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 8e3e769dc9ea..1ce724b87618 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) | |||
356 | if (cb->args[4] < cb->args[1]) | 356 | if (cb->args[4] < cb->args[1]) |
357 | goto next; | 357 | goto next; |
358 | 358 | ||
359 | if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) | ||
360 | goto next; | ||
361 | |||
359 | if (r->sdiag_family != AF_UNSPEC && | 362 | if (r->sdiag_family != AF_UNSPEC && |
360 | sk->sk_family != r->sdiag_family) | 363 | sk->sk_family != r->sdiag_family) |
361 | goto next; | 364 | goto next; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 777d0324594a..67154b848aa9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, | |||
4220 | info->sctpi_s_disable_fragments = sp->disable_fragments; | 4220 | info->sctpi_s_disable_fragments = sp->disable_fragments; |
4221 | info->sctpi_s_v4mapped = sp->v4mapped; | 4221 | info->sctpi_s_v4mapped = sp->v4mapped; |
4222 | info->sctpi_s_frag_interleave = sp->frag_interleave; | 4222 | info->sctpi_s_frag_interleave = sp->frag_interleave; |
4223 | info->sctpi_s_type = sp->type; | ||
4223 | 4224 | ||
4224 | return 0; | 4225 | return 0; |
4225 | } | 4226 | } |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 4dfc5c14f8c3..f795b1dd0ccd 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, | |||
346 | struct nlattr **attrs) | 346 | struct nlattr **attrs) |
347 | { | 347 | { |
348 | struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; | 348 | struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; |
349 | int err; | ||
350 | |||
351 | if (!attrs[TIPC_NLA_BEARER]) | ||
352 | return -EINVAL; | ||
349 | 353 | ||
350 | nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], | 354 | err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, |
351 | NULL); | 355 | attrs[TIPC_NLA_BEARER], NULL); |
356 | if (err) | ||
357 | return err; | ||
352 | 358 | ||
353 | return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, | 359 | return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, |
354 | nla_data(bearer[TIPC_NLA_BEARER_NAME]), | 360 | nla_data(bearer[TIPC_NLA_BEARER_NAME]), |
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
460 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | 466 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; |
461 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; | 467 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; |
462 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; | 468 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; |
469 | int err; | ||
463 | 470 | ||
464 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | 471 | if (!attrs[TIPC_NLA_LINK]) |
472 | return -EINVAL; | ||
465 | 473 | ||
466 | nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], | 474 | err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], |
467 | NULL); | 475 | NULL); |
476 | if (err) | ||
477 | return err; | ||
478 | |||
479 | if (!link[TIPC_NLA_LINK_PROP]) | ||
480 | return -EINVAL; | ||
468 | 481 | ||
469 | nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], | 482 | err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX, |
470 | NULL); | 483 | link[TIPC_NLA_LINK_PROP], NULL); |
484 | if (err) | ||
485 | return err; | ||
486 | |||
487 | if (!link[TIPC_NLA_LINK_STATS]) | ||
488 | return -EINVAL; | ||
489 | |||
490 | err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX, | ||
491 | link[TIPC_NLA_LINK_STATS], NULL); | ||
492 | if (err) | ||
493 | return err; | ||
471 | 494 | ||
472 | name = (char *)TLV_DATA(msg->req); | 495 | name = (char *)TLV_DATA(msg->req); |
473 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) | 496 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) |
@@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, | |||
569 | { | 592 | { |
570 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | 593 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; |
571 | struct tipc_link_info link_info; | 594 | struct tipc_link_info link_info; |
595 | int err; | ||
572 | 596 | ||
573 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | 597 | if (!attrs[TIPC_NLA_LINK]) |
598 | return -EINVAL; | ||
599 | |||
600 | err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], | ||
601 | NULL); | ||
602 | if (err) | ||
603 | return err; | ||
574 | 604 | ||
575 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); | 605 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); |
576 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); | 606 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); |
@@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, | |||
758 | u32 node, depth, type, lowbound, upbound; | 788 | u32 node, depth, type, lowbound, upbound; |
759 | static const char * const scope_str[] = {"", " zone", " cluster", | 789 | static const char * const scope_str[] = {"", " zone", " cluster", |
760 | " node"}; | 790 | " node"}; |
791 | int err; | ||
761 | 792 | ||
762 | nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, | 793 | if (!attrs[TIPC_NLA_NAME_TABLE]) |
763 | attrs[TIPC_NLA_NAME_TABLE], NULL); | 794 | return -EINVAL; |
764 | 795 | ||
765 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], | 796 | err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, |
766 | NULL); | 797 | attrs[TIPC_NLA_NAME_TABLE], NULL); |
798 | if (err) | ||
799 | return err; | ||
800 | |||
801 | if (!nt[TIPC_NLA_NAME_TABLE_PUBL]) | ||
802 | return -EINVAL; | ||
803 | |||
804 | err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, | ||
805 | nt[TIPC_NLA_NAME_TABLE_PUBL], NULL); | ||
806 | if (err) | ||
807 | return err; | ||
767 | 808 | ||
768 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | 809 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); |
769 | 810 | ||
@@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, | |||
815 | { | 856 | { |
816 | u32 type, lower, upper; | 857 | u32 type, lower, upper; |
817 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; | 858 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; |
859 | int err; | ||
818 | 860 | ||
819 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); | 861 | if (!attrs[TIPC_NLA_PUBL]) |
862 | return -EINVAL; | ||
863 | |||
864 | err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], | ||
865 | NULL); | ||
866 | if (err) | ||
867 | return err; | ||
820 | 868 | ||
821 | type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); | 869 | type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); |
822 | lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); | 870 | lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); |
@@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, | |||
876 | u32 sock_ref; | 924 | u32 sock_ref; |
877 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; | 925 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; |
878 | 926 | ||
879 | nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); | 927 | if (!attrs[TIPC_NLA_SOCK]) |
928 | return -EINVAL; | ||
929 | |||
930 | err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], | ||
931 | NULL); | ||
932 | if (err) | ||
933 | return err; | ||
880 | 934 | ||
881 | sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); | 935 | sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); |
882 | tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); | 936 | tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); |
@@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, | |||
917 | struct nlattr **attrs) | 971 | struct nlattr **attrs) |
918 | { | 972 | { |
919 | struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; | 973 | struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; |
974 | int err; | ||
975 | |||
976 | if (!attrs[TIPC_NLA_MEDIA]) | ||
977 | return -EINVAL; | ||
920 | 978 | ||
921 | nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], | 979 | err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], |
922 | NULL); | 980 | NULL); |
981 | if (err) | ||
982 | return err; | ||
923 | 983 | ||
924 | return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, | 984 | return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, |
925 | nla_data(media[TIPC_NLA_MEDIA_NAME]), | 985 | nla_data(media[TIPC_NLA_MEDIA_NAME]), |
@@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, | |||
931 | { | 991 | { |
932 | struct tipc_node_info node_info; | 992 | struct tipc_node_info node_info; |
933 | struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; | 993 | struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; |
994 | int err; | ||
934 | 995 | ||
935 | nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); | 996 | if (!attrs[TIPC_NLA_NODE]) |
997 | return -EINVAL; | ||
998 | |||
999 | err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], | ||
1000 | NULL); | ||
1001 | if (err) | ||
1002 | return err; | ||
936 | 1003 | ||
937 | node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); | 1004 | node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); |
938 | node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); | 1005 | node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); |
@@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, | |||
971 | { | 1038 | { |
972 | __be32 id; | 1039 | __be32 id; |
973 | struct nlattr *net[TIPC_NLA_NET_MAX + 1]; | 1040 | struct nlattr *net[TIPC_NLA_NET_MAX + 1]; |
1041 | int err; | ||
1042 | |||
1043 | if (!attrs[TIPC_NLA_NET]) | ||
1044 | return -EINVAL; | ||
1045 | |||
1046 | err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], | ||
1047 | NULL); | ||
1048 | if (err) | ||
1049 | return err; | ||
974 | 1050 | ||
975 | nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL); | ||
976 | id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); | 1051 | id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); |
977 | 1052 | ||
978 | return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); | 1053 | return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6750595bd7b8..4904ced676d4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -2454,6 +2454,7 @@ sub process { | |||
2454 | 2454 | ||
2455 | # Check for git id commit length and improperly formed commit descriptions | 2455 | # Check for git id commit length and improperly formed commit descriptions |
2456 | if ($in_commit_log && !$commit_log_possible_stack_dump && | 2456 | if ($in_commit_log && !$commit_log_possible_stack_dump && |
2457 | $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i && | ||
2457 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || | 2458 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || |
2458 | ($line =~ /\b[0-9a-f]{12,40}\b/i && | 2459 | ($line =~ /\b[0-9a-f]{12,40}\b/i && |
2459 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && | 2460 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && |
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f12b3b277b..3a3a699b7489 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c | |||
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) | |||
100 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | 100 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
101 | continue; | 101 | continue; |
102 | 102 | ||
103 | if (cpu_if->vgic_elrsr & (1UL << i)) { | 103 | if (cpu_if->vgic_elrsr & (1UL << i)) |
104 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; | 104 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; |
105 | continue; | 105 | else |
106 | } | 106 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); |
107 | 107 | ||
108 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); | ||
109 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); | 108 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); |
110 | } | 109 | } |
111 | } | 110 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 059595ec3da0..9f6fab74dce7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
191 | * other thread sync back the IRQ. | 191 | * other thread sync back the IRQ. |
192 | */ | 192 | */ |
193 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ | 193 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ |
194 | irq->vcpu->cpu != -1) { /* VCPU thread is running */ | 194 | irq->vcpu->cpu != -1) /* VCPU thread is running */ |
195 | BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); | ||
196 | cond_resched_lock(&irq->irq_lock); | 195 | cond_resched_lock(&irq->irq_lock); |
197 | } | ||
198 | 196 | ||
199 | irq->active = new_active_state; | 197 | irq->active = new_active_state; |
200 | if (new_active_state) | 198 | if (new_active_state) |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 8ad42c217770..e31405ee5515 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Clear soft pending state when level IRQs have been acked */ | 115 | /* |
116 | if (irq->config == VGIC_CONFIG_LEVEL && | 116 | * Clear soft pending state when level irqs have been acked. |
117 | !(val & GICH_LR_PENDING_BIT)) { | 117 | * Always regenerate the pending state. |
118 | irq->soft_pending = false; | 118 | */ |
119 | irq->pending = irq->line_level; | 119 | if (irq->config == VGIC_CONFIG_LEVEL) { |
120 | if (!(val & GICH_LR_PENDING_BIT)) | ||
121 | irq->soft_pending = false; | ||
122 | |||
123 | irq->pending = irq->line_level || irq->soft_pending; | ||
120 | } | 124 | } |
121 | 125 | ||
122 | spin_unlock(&irq->irq_lock); | 126 | spin_unlock(&irq->irq_lock); |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 336a46115937..346b4ad12b49 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | /* Clear soft pending state when level irqs have been acked */ | 104 | /* |
105 | if (irq->config == VGIC_CONFIG_LEVEL && | 105 | * Clear soft pending state when level irqs have been acked. |
106 | !(val & ICH_LR_PENDING_BIT)) { | 106 | * Always regenerate the pending state. |
107 | irq->soft_pending = false; | 107 | */ |
108 | irq->pending = irq->line_level; | 108 | if (irq->config == VGIC_CONFIG_LEVEL) { |
109 | if (!(val & ICH_LR_PENDING_BIT)) | ||
110 | irq->soft_pending = false; | ||
111 | |||
112 | irq->pending = irq->line_level || irq->soft_pending; | ||
109 | } | 113 | } |
110 | 114 | ||
111 | spin_unlock(&irq->irq_lock); | 115 | spin_unlock(&irq->irq_lock); |
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index fe84e1a95dd5..8db197bb6c7a 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c | |||
@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, | |||
40 | 40 | ||
41 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | 41 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, |
42 | lockdep_is_held(&kvm->irq_lock)); | 42 | lockdep_is_held(&kvm->irq_lock)); |
43 | if (gsi < irq_rt->nr_rt_entries) { | 43 | if (irq_rt && gsi < irq_rt->nr_rt_entries) { |
44 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | 44 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { |
45 | entries[n] = *e; | 45 | entries[n] = *e; |
46 | ++n; | 46 | ++n; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 37af23052470..02e98f3131bd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2935 | case KVM_SET_GSI_ROUTING: { | 2935 | case KVM_SET_GSI_ROUTING: { |
2936 | struct kvm_irq_routing routing; | 2936 | struct kvm_irq_routing routing; |
2937 | struct kvm_irq_routing __user *urouting; | 2937 | struct kvm_irq_routing __user *urouting; |
2938 | struct kvm_irq_routing_entry *entries; | 2938 | struct kvm_irq_routing_entry *entries = NULL; |
2939 | 2939 | ||
2940 | r = -EFAULT; | 2940 | r = -EFAULT; |
2941 | if (copy_from_user(&routing, argp, sizeof(routing))) | 2941 | if (copy_from_user(&routing, argp, sizeof(routing))) |
@@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp, | |||
2945 | goto out; | 2945 | goto out; |
2946 | if (routing.flags) | 2946 | if (routing.flags) |
2947 | goto out; | 2947 | goto out; |
2948 | r = -ENOMEM; | 2948 | if (routing.nr) { |
2949 | entries = vmalloc(routing.nr * sizeof(*entries)); | 2949 | r = -ENOMEM; |
2950 | if (!entries) | 2950 | entries = vmalloc(routing.nr * sizeof(*entries)); |
2951 | goto out; | 2951 | if (!entries) |
2952 | r = -EFAULT; | 2952 | goto out; |
2953 | urouting = argp; | 2953 | r = -EFAULT; |
2954 | if (copy_from_user(entries, urouting->entries, | 2954 | urouting = argp; |
2955 | routing.nr * sizeof(*entries))) | 2955 | if (copy_from_user(entries, urouting->entries, |
2956 | goto out_free_irq_routing; | 2956 | routing.nr * sizeof(*entries))) |
2957 | goto out_free_irq_routing; | ||
2958 | } | ||
2957 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | 2959 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
2958 | routing.flags); | 2960 | routing.flags); |
2959 | out_free_irq_routing: | 2961 | out_free_irq_routing: |