diff options
93 files changed, 939 insertions, 576 deletions
| @@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de> | |||
| 51 | Greg Kroah-Hartman <greg@kroah.com> | 51 | Greg Kroah-Hartman <greg@kroah.com> |
| 52 | Henk Vergonet <Henk.Vergonet@gmail.com> | 52 | Henk Vergonet <Henk.Vergonet@gmail.com> |
| 53 | Henrik Kretzschmar <henne@nachtwindheim.de> | 53 | Henrik Kretzschmar <henne@nachtwindheim.de> |
| 54 | Henrik Rydberg <rydberg@bitmath.org> | ||
| 54 | Herbert Xu <herbert@gondor.apana.org.au> | 55 | Herbert Xu <herbert@gondor.apana.org.au> |
| 55 | Jacob Shin <Jacob.Shin@amd.com> | 56 | Jacob Shin <Jacob.Shin@amd.com> |
| 56 | James Bottomley <jejb@mulgrave.(none)> | 57 | James Bottomley <jejb@mulgrave.(none)> |
diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8d32b3..3589d67437f8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h | |||
| 724 | F: drivers/char/apm-emulation.c | 724 | F: drivers/char/apm-emulation.c |
| 725 | 725 | ||
| 726 | APPLE BCM5974 MULTITOUCH DRIVER | 726 | APPLE BCM5974 MULTITOUCH DRIVER |
| 727 | M: Henrik Rydberg <rydberg@euromail.se> | 727 | M: Henrik Rydberg <rydberg@bitmath.org> |
| 728 | L: linux-input@vger.kernel.org | 728 | L: linux-input@vger.kernel.org |
| 729 | S: Maintained | 729 | S: Odd fixes |
| 730 | F: drivers/input/mouse/bcm5974.c | 730 | F: drivers/input/mouse/bcm5974.c |
| 731 | 731 | ||
| 732 | APPLE SMC DRIVER | 732 | APPLE SMC DRIVER |
| 733 | M: Henrik Rydberg <rydberg@euromail.se> | 733 | M: Henrik Rydberg <rydberg@bitmath.org> |
| 734 | L: lm-sensors@lm-sensors.org | 734 | L: lm-sensors@lm-sensors.org |
| 735 | S: Maintained | 735 | S: Odd fixes |
| 736 | F: drivers/hwmon/applesmc.c | 736 | F: drivers/hwmon/applesmc.c |
| 737 | 737 | ||
| 738 | APPLETALK NETWORK LAYER | 738 | APPLETALK NETWORK LAYER |
| @@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c | |||
| 2259 | BTRFS FILE SYSTEM | 2259 | BTRFS FILE SYSTEM |
| 2260 | M: Chris Mason <clm@fb.com> | 2260 | M: Chris Mason <clm@fb.com> |
| 2261 | M: Josef Bacik <jbacik@fb.com> | 2261 | M: Josef Bacik <jbacik@fb.com> |
| 2262 | M: David Sterba <dsterba@suse.cz> | ||
| 2262 | L: linux-btrfs@vger.kernel.org | 2263 | L: linux-btrfs@vger.kernel.org |
| 2263 | W: http://btrfs.wiki.kernel.org/ | 2264 | W: http://btrfs.wiki.kernel.org/ |
| 2264 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ | 2265 | Q: http://patchwork.kernel.org/project/linux-btrfs/list/ |
| @@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h | |||
| 4940 | F: include/linux/input/ | 4941 | F: include/linux/input/ |
| 4941 | 4942 | ||
| 4942 | INPUT MULTITOUCH (MT) PROTOCOL | 4943 | INPUT MULTITOUCH (MT) PROTOCOL |
| 4943 | M: Henrik Rydberg <rydberg@euromail.se> | 4944 | M: Henrik Rydberg <rydberg@bitmath.org> |
| 4944 | L: linux-input@vger.kernel.org | 4945 | L: linux-input@vger.kernel.org |
| 4945 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git | 4946 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git |
| 4946 | S: Maintained | 4947 | S: Odd fixes |
| 4947 | F: Documentation/input/multi-touch-protocol.txt | 4948 | F: Documentation/input/multi-touch-protocol.txt |
| 4948 | F: drivers/input/input-mt.c | 4949 | F: drivers/input/input-mt.c |
| 4949 | K: \b(ABS|SYN)_MT_ | 4950 | K: \b(ABS|SYN)_MT_ |
| @@ -391,6 +391,7 @@ USERINCLUDE := \ | |||
| 391 | # Needed to be compatible with the O= option | 391 | # Needed to be compatible with the O= option |
| 392 | LINUXINCLUDE := \ | 392 | LINUXINCLUDE := \ |
| 393 | -I$(srctree)/arch/$(hdr-arch)/include \ | 393 | -I$(srctree)/arch/$(hdr-arch)/include \ |
| 394 | -Iarch/$(hdr-arch)/include/generated/uapi \ | ||
| 394 | -Iarch/$(hdr-arch)/include/generated \ | 395 | -Iarch/$(hdr-arch)/include/generated \ |
| 395 | $(if $(KBUILD_SRC), -I$(srctree)/include) \ | 396 | $(if $(KBUILD_SRC), -I$(srctree)/include) \ |
| 396 | -Iinclude \ | 397 | -Iinclude \ |
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6f4bac969bf7..23eada79439c 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
| 10 | #include <linux/delay.h> | ||
| 10 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
| 11 | #include <linux/mtd/mtd.h> | 12 | #include <linux/mtd/mtd.h> |
| 12 | #include <linux/mtd/partitions.h> | 13 | #include <linux/mtd/partitions.h> |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 615ef81def49..e795cb848154 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) | |||
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | /* wrapper to silence section mismatch warning */ | 895 | /* wrapper to silence section mismatch warning */ |
| 896 | int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) | 896 | int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) |
| 897 | { | 897 | { |
| 898 | return _acpi_map_lsapic(handle, physid, pcpu); | 898 | return _acpi_map_lsapic(handle, physid, pcpu); |
| 899 | } | 899 | } |
| 900 | EXPORT_SYMBOL(acpi_map_lsapic); | 900 | EXPORT_SYMBOL(acpi_map_cpu); |
| 901 | 901 | ||
| 902 | int acpi_unmap_lsapic(int cpu) | 902 | int acpi_unmap_cpu(int cpu) |
| 903 | { | 903 | { |
| 904 | ia64_cpu_to_sapicid[cpu] = -1; | 904 | ia64_cpu_to_sapicid[cpu] = -1; |
| 905 | set_cpu_present(cpu, false); | 905 | set_cpu_present(cpu, false); |
| @@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu) | |||
| 910 | 910 | ||
| 911 | return (0); | 911 | return (0); |
| 912 | } | 912 | } |
| 913 | 913 | EXPORT_SYMBOL(acpi_unmap_cpu); | |
| 914 | EXPORT_SYMBOL(acpi_unmap_lsapic); | ||
| 915 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 914 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
| 916 | 915 | ||
| 917 | #ifdef CONFIG_ACPI_NUMA | 916 | #ifdef CONFIG_ACPI_NUMA |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index fd0f848938cc..5a4a089e8b1f 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
| @@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o | |||
| 26 | 26 | ||
| 27 | obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o | 27 | obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o |
| 28 | obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o | 28 | obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o |
| 29 | obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ | ||
| 30 | obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o | 29 | obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o |
| 31 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o | 30 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o |
| 32 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o | 31 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o |
| @@ -46,6 +45,7 @@ endif | |||
| 46 | ifeq ($(avx2_supported),yes) | 45 | ifeq ($(avx2_supported),yes) |
| 47 | obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o | 46 | obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o |
| 48 | obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o | 47 | obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o |
| 48 | obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ | ||
| 49 | endif | 49 | endif |
| 50 | 50 | ||
| 51 | aes-i586-y := aes-i586-asm_32.o aes_glue.o | 51 | aes-i586-y := aes-i586-asm_32.o aes_glue.o |
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 2df2a0298f5a..a916c4a61165 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S | |||
| @@ -208,7 +208,7 @@ ddq_add_8: | |||
| 208 | 208 | ||
| 209 | .if (klen == KEY_128) | 209 | .if (klen == KEY_128) |
| 210 | .if (load_keys) | 210 | .if (load_keys) |
| 211 | vmovdqa 3*16(p_keys), xkeyA | 211 | vmovdqa 3*16(p_keys), xkey4 |
| 212 | .endif | 212 | .endif |
| 213 | .else | 213 | .else |
| 214 | vmovdqa 3*16(p_keys), xkeyA | 214 | vmovdqa 3*16(p_keys), xkeyA |
| @@ -224,7 +224,7 @@ ddq_add_8: | |||
| 224 | add $(16*by), p_in | 224 | add $(16*by), p_in |
| 225 | 225 | ||
| 226 | .if (klen == KEY_128) | 226 | .if (klen == KEY_128) |
| 227 | vmovdqa 4*16(p_keys), xkey4 | 227 | vmovdqa 4*16(p_keys), xkeyB |
| 228 | .else | 228 | .else |
| 229 | .if (load_keys) | 229 | .if (load_keys) |
| 230 | vmovdqa 4*16(p_keys), xkey4 | 230 | vmovdqa 4*16(p_keys), xkey4 |
| @@ -234,7 +234,12 @@ ddq_add_8: | |||
| 234 | .set i, 0 | 234 | .set i, 0 |
| 235 | .rept by | 235 | .rept by |
| 236 | club XDATA, i | 236 | club XDATA, i |
| 237 | vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ | 237 | /* key 3 */ |
| 238 | .if (klen == KEY_128) | ||
| 239 | vaesenc xkey4, var_xdata, var_xdata | ||
| 240 | .else | ||
| 241 | vaesenc xkeyA, var_xdata, var_xdata | ||
| 242 | .endif | ||
| 238 | .set i, (i +1) | 243 | .set i, (i +1) |
| 239 | .endr | 244 | .endr |
| 240 | 245 | ||
| @@ -243,13 +248,18 @@ ddq_add_8: | |||
| 243 | .set i, 0 | 248 | .set i, 0 |
| 244 | .rept by | 249 | .rept by |
| 245 | club XDATA, i | 250 | club XDATA, i |
| 246 | vaesenc xkey4, var_xdata, var_xdata /* key 4 */ | 251 | /* key 4 */ |
| 252 | .if (klen == KEY_128) | ||
| 253 | vaesenc xkeyB, var_xdata, var_xdata | ||
| 254 | .else | ||
| 255 | vaesenc xkey4, var_xdata, var_xdata | ||
| 256 | .endif | ||
| 247 | .set i, (i +1) | 257 | .set i, (i +1) |
| 248 | .endr | 258 | .endr |
| 249 | 259 | ||
| 250 | .if (klen == KEY_128) | 260 | .if (klen == KEY_128) |
| 251 | .if (load_keys) | 261 | .if (load_keys) |
| 252 | vmovdqa 6*16(p_keys), xkeyB | 262 | vmovdqa 6*16(p_keys), xkey8 |
| 253 | .endif | 263 | .endif |
| 254 | .else | 264 | .else |
| 255 | vmovdqa 6*16(p_keys), xkeyB | 265 | vmovdqa 6*16(p_keys), xkeyB |
| @@ -267,12 +277,17 @@ ddq_add_8: | |||
| 267 | .set i, 0 | 277 | .set i, 0 |
| 268 | .rept by | 278 | .rept by |
| 269 | club XDATA, i | 279 | club XDATA, i |
| 270 | vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ | 280 | /* key 6 */ |
| 281 | .if (klen == KEY_128) | ||
| 282 | vaesenc xkey8, var_xdata, var_xdata | ||
| 283 | .else | ||
| 284 | vaesenc xkeyB, var_xdata, var_xdata | ||
| 285 | .endif | ||
| 271 | .set i, (i +1) | 286 | .set i, (i +1) |
| 272 | .endr | 287 | .endr |
| 273 | 288 | ||
| 274 | .if (klen == KEY_128) | 289 | .if (klen == KEY_128) |
| 275 | vmovdqa 8*16(p_keys), xkey8 | 290 | vmovdqa 8*16(p_keys), xkeyB |
| 276 | .else | 291 | .else |
| 277 | .if (load_keys) | 292 | .if (load_keys) |
| 278 | vmovdqa 8*16(p_keys), xkey8 | 293 | vmovdqa 8*16(p_keys), xkey8 |
| @@ -288,7 +303,7 @@ ddq_add_8: | |||
| 288 | 303 | ||
| 289 | .if (klen == KEY_128) | 304 | .if (klen == KEY_128) |
| 290 | .if (load_keys) | 305 | .if (load_keys) |
| 291 | vmovdqa 9*16(p_keys), xkeyA | 306 | vmovdqa 9*16(p_keys), xkey12 |
| 292 | .endif | 307 | .endif |
| 293 | .else | 308 | .else |
| 294 | vmovdqa 9*16(p_keys), xkeyA | 309 | vmovdqa 9*16(p_keys), xkeyA |
| @@ -297,7 +312,12 @@ ddq_add_8: | |||
| 297 | .set i, 0 | 312 | .set i, 0 |
| 298 | .rept by | 313 | .rept by |
| 299 | club XDATA, i | 314 | club XDATA, i |
| 300 | vaesenc xkey8, var_xdata, var_xdata /* key 8 */ | 315 | /* key 8 */ |
| 316 | .if (klen == KEY_128) | ||
| 317 | vaesenc xkeyB, var_xdata, var_xdata | ||
| 318 | .else | ||
| 319 | vaesenc xkey8, var_xdata, var_xdata | ||
| 320 | .endif | ||
| 301 | .set i, (i +1) | 321 | .set i, (i +1) |
| 302 | .endr | 322 | .endr |
| 303 | 323 | ||
| @@ -306,7 +326,12 @@ ddq_add_8: | |||
| 306 | .set i, 0 | 326 | .set i, 0 |
| 307 | .rept by | 327 | .rept by |
| 308 | club XDATA, i | 328 | club XDATA, i |
| 309 | vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ | 329 | /* key 9 */ |
| 330 | .if (klen == KEY_128) | ||
| 331 | vaesenc xkey12, var_xdata, var_xdata | ||
| 332 | .else | ||
| 333 | vaesenc xkeyA, var_xdata, var_xdata | ||
| 334 | .endif | ||
| 310 | .set i, (i +1) | 335 | .set i, (i +1) |
| 311 | .endr | 336 | .endr |
| 312 | 337 | ||
| @@ -412,7 +437,6 @@ ddq_add_8: | |||
| 412 | /* main body of aes ctr load */ | 437 | /* main body of aes ctr load */ |
| 413 | 438 | ||
| 414 | .macro do_aes_ctrmain key_len | 439 | .macro do_aes_ctrmain key_len |
| 415 | |||
| 416 | cmp $16, num_bytes | 440 | cmp $16, num_bytes |
| 417 | jb .Ldo_return2\key_len | 441 | jb .Ldo_return2\key_len |
| 418 | 442 | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4433a4be8171..d1626364a28a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) | |||
| 750 | } | 750 | } |
| 751 | 751 | ||
| 752 | /* wrapper to silence section mismatch warning */ | 752 | /* wrapper to silence section mismatch warning */ |
| 753 | int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) | 753 | int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) |
| 754 | { | 754 | { |
| 755 | return _acpi_map_lsapic(handle, physid, pcpu); | 755 | return _acpi_map_lsapic(handle, physid, pcpu); |
| 756 | } | 756 | } |
| 757 | EXPORT_SYMBOL(acpi_map_lsapic); | 757 | EXPORT_SYMBOL(acpi_map_cpu); |
| 758 | 758 | ||
| 759 | int acpi_unmap_lsapic(int cpu) | 759 | int acpi_unmap_cpu(int cpu) |
| 760 | { | 760 | { |
| 761 | #ifdef CONFIG_ACPI_NUMA | 761 | #ifdef CONFIG_ACPI_NUMA |
| 762 | set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); | 762 | set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); |
| @@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu) | |||
| 768 | 768 | ||
| 769 | return (0); | 769 | return (0); |
| 770 | } | 770 | } |
| 771 | 771 | EXPORT_SYMBOL(acpi_unmap_cpu); | |
| 772 | EXPORT_SYMBOL(acpi_unmap_lsapic); | ||
| 773 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 772 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
| 774 | 773 | ||
| 775 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | 774 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 1fdf5e07a1c7..1020b1b53a17 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) | |||
| 170 | acpi_status status; | 170 | acpi_status status; |
| 171 | int ret; | 171 | int ret; |
| 172 | 172 | ||
| 173 | if (pr->apic_id == -1) | 173 | if (pr->phys_id == -1) |
| 174 | return -ENODEV; | 174 | return -ENODEV; |
| 175 | 175 | ||
| 176 | status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); | 176 | status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); |
| @@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) | |||
| 180 | cpu_maps_update_begin(); | 180 | cpu_maps_update_begin(); |
| 181 | cpu_hotplug_begin(); | 181 | cpu_hotplug_begin(); |
| 182 | 182 | ||
| 183 | ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); | 183 | ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); |
| 184 | if (ret) | 184 | if (ret) |
| 185 | goto out; | 185 | goto out; |
| 186 | 186 | ||
| 187 | ret = arch_register_cpu(pr->id); | 187 | ret = arch_register_cpu(pr->id); |
| 188 | if (ret) { | 188 | if (ret) { |
| 189 | acpi_unmap_lsapic(pr->id); | 189 | acpi_unmap_cpu(pr->id); |
| 190 | goto out; | 190 | goto out; |
| 191 | } | 191 | } |
| 192 | 192 | ||
| @@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 215 | union acpi_object object = { 0 }; | 215 | union acpi_object object = { 0 }; |
| 216 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | 216 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; |
| 217 | struct acpi_processor *pr = acpi_driver_data(device); | 217 | struct acpi_processor *pr = acpi_driver_data(device); |
| 218 | int apic_id, cpu_index, device_declaration = 0; | 218 | int phys_id, cpu_index, device_declaration = 0; |
| 219 | acpi_status status = AE_OK; | 219 | acpi_status status = AE_OK; |
| 220 | static int cpu0_initialized; | 220 | static int cpu0_initialized; |
| 221 | unsigned long long value; | 221 | unsigned long long value; |
| @@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 262 | pr->acpi_id = value; | 262 | pr->acpi_id = value; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); | 265 | phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); |
| 266 | if (apic_id < 0) | 266 | if (phys_id < 0) |
| 267 | acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); | 267 | acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); |
| 268 | pr->apic_id = apic_id; | 268 | pr->phys_id = phys_id; |
| 269 | 269 | ||
| 270 | cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); | 270 | cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); |
| 271 | if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { | 271 | if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { |
| 272 | cpu0_initialized = 1; | 272 | cpu0_initialized = 1; |
| 273 | /* Handle UP system running SMP kernel, with no LAPIC in MADT */ | 273 | /* |
| 274 | * Handle UP system running SMP kernel, with no CPU | ||
| 275 | * entry in MADT | ||
| 276 | */ | ||
| 274 | if ((cpu_index == -1) && (num_online_cpus() == 1)) | 277 | if ((cpu_index == -1) && (num_online_cpus() == 1)) |
| 275 | cpu_index = 0; | 278 | cpu_index = 0; |
| 276 | } | 279 | } |
| @@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
| 458 | 461 | ||
| 459 | /* Remove the CPU. */ | 462 | /* Remove the CPU. */ |
| 460 | arch_unregister_cpu(pr->id); | 463 | arch_unregister_cpu(pr->id); |
| 461 | acpi_unmap_lsapic(pr->id); | 464 | acpi_unmap_cpu(pr->id); |
| 462 | 465 | ||
| 463 | cpu_hotplug_done(); | 466 | cpu_hotplug_done(); |
| 464 | cpu_maps_update_done(); | 467 | cpu_maps_update_done(); |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index c2daa85fc9f7..c0d44d394ca3 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
| @@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device) | |||
| 257 | 257 | ||
| 258 | device->power.state = ACPI_STATE_UNKNOWN; | 258 | device->power.state = ACPI_STATE_UNKNOWN; |
| 259 | if (!acpi_device_is_present(device)) | 259 | if (!acpi_device_is_present(device)) |
| 260 | return 0; | 260 | return -ENXIO; |
| 261 | 261 | ||
| 262 | result = acpi_device_get_power(device, &state); | 262 | result = acpi_device_get_power(device, &state); |
| 263 | if (result) | 263 | if (result) |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 342942f90a10..02e48394276c 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
| @@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id) | |||
| 69 | unsigned long madt_end, entry; | 69 | unsigned long madt_end, entry; |
| 70 | static struct acpi_table_madt *madt; | 70 | static struct acpi_table_madt *madt; |
| 71 | static int read_madt; | 71 | static int read_madt; |
| 72 | int apic_id = -1; | 72 | int phys_id = -1; /* CPU hardware ID */ |
| 73 | 73 | ||
| 74 | if (!read_madt) { | 74 | if (!read_madt) { |
| 75 | if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, | 75 | if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, |
| @@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id) | |||
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | if (!madt) | 81 | if (!madt) |
| 82 | return apic_id; | 82 | return phys_id; |
| 83 | 83 | ||
| 84 | entry = (unsigned long)madt; | 84 | entry = (unsigned long)madt; |
| 85 | madt_end = entry + madt->header.length; | 85 | madt_end = entry + madt->header.length; |
| @@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id) | |||
| 91 | struct acpi_subtable_header *header = | 91 | struct acpi_subtable_header *header = |
| 92 | (struct acpi_subtable_header *)entry; | 92 | (struct acpi_subtable_header *)entry; |
| 93 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { | 93 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { |
| 94 | if (!map_lapic_id(header, acpi_id, &apic_id)) | 94 | if (!map_lapic_id(header, acpi_id, &phys_id)) |
| 95 | break; | 95 | break; |
| 96 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { | 96 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { |
| 97 | if (!map_x2apic_id(header, type, acpi_id, &apic_id)) | 97 | if (!map_x2apic_id(header, type, acpi_id, &phys_id)) |
| 98 | break; | 98 | break; |
| 99 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { | 99 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { |
| 100 | if (!map_lsapic_id(header, type, acpi_id, &apic_id)) | 100 | if (!map_lsapic_id(header, type, acpi_id, &phys_id)) |
| 101 | break; | 101 | break; |
| 102 | } | 102 | } |
| 103 | entry += header->length; | 103 | entry += header->length; |
| 104 | } | 104 | } |
| 105 | return apic_id; | 105 | return phys_id; |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) | 108 | static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) |
| @@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) | |||
| 110 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 110 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 111 | union acpi_object *obj; | 111 | union acpi_object *obj; |
| 112 | struct acpi_subtable_header *header; | 112 | struct acpi_subtable_header *header; |
| 113 | int apic_id = -1; | 113 | int phys_id = -1; |
| 114 | 114 | ||
| 115 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 115 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
| 116 | goto exit; | 116 | goto exit; |
| @@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) | |||
| 126 | 126 | ||
| 127 | header = (struct acpi_subtable_header *)obj->buffer.pointer; | 127 | header = (struct acpi_subtable_header *)obj->buffer.pointer; |
| 128 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) | 128 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) |
| 129 | map_lapic_id(header, acpi_id, &apic_id); | 129 | map_lapic_id(header, acpi_id, &phys_id); |
| 130 | else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) | 130 | else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) |
| 131 | map_lsapic_id(header, type, acpi_id, &apic_id); | 131 | map_lsapic_id(header, type, acpi_id, &phys_id); |
| 132 | else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) | 132 | else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) |
| 133 | map_x2apic_id(header, type, acpi_id, &apic_id); | 133 | map_x2apic_id(header, type, acpi_id, &phys_id); |
| 134 | 134 | ||
| 135 | exit: | 135 | exit: |
| 136 | kfree(buffer.pointer); | 136 | kfree(buffer.pointer); |
| 137 | return apic_id; | 137 | return phys_id; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) | 140 | int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) |
| 141 | { | 141 | { |
| 142 | int apic_id; | 142 | int phys_id; |
| 143 | 143 | ||
| 144 | apic_id = map_mat_entry(handle, type, acpi_id); | 144 | phys_id = map_mat_entry(handle, type, acpi_id); |
| 145 | if (apic_id == -1) | 145 | if (phys_id == -1) |
| 146 | apic_id = map_madt_entry(type, acpi_id); | 146 | phys_id = map_madt_entry(type, acpi_id); |
| 147 | 147 | ||
| 148 | return apic_id; | 148 | return phys_id; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | int acpi_map_cpuid(int apic_id, u32 acpi_id) | 151 | int acpi_map_cpuid(int phys_id, u32 acpi_id) |
| 152 | { | 152 | { |
| 153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
| 154 | int i; | 154 | int i; |
| 155 | #endif | 155 | #endif |
| 156 | 156 | ||
| 157 | if (apic_id == -1) { | 157 | if (phys_id == -1) { |
| 158 | /* | 158 | /* |
| 159 | * On UP processor, there is no _MAT or MADT table. | 159 | * On UP processor, there is no _MAT or MADT table. |
| 160 | * So above apic_id is always set to -1. | 160 | * So above phys_id is always set to -1. |
| 161 | * | 161 | * |
| 162 | * BIOS may define multiple CPU handles even for UP processor. | 162 | * BIOS may define multiple CPU handles even for UP processor. |
| 163 | * For example, | 163 | * For example, |
| @@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) | |||
| 170 | * Processor (CPU3, 0x03, 0x00000410, 0x06) {} | 170 | * Processor (CPU3, 0x03, 0x00000410, 0x06) {} |
| 171 | * } | 171 | * } |
| 172 | * | 172 | * |
| 173 | * Ignores apic_id and always returns 0 for the processor | 173 | * Ignores phys_id and always returns 0 for the processor |
| 174 | * handle with acpi id 0 if nr_cpu_ids is 1. | 174 | * handle with acpi id 0 if nr_cpu_ids is 1. |
| 175 | * This should be the case if SMP tables are not found. | 175 | * This should be the case if SMP tables are not found. |
| 176 | * Return -1 for other CPU's handle. | 176 | * Return -1 for other CPU's handle. |
| @@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) | |||
| 178 | if (nr_cpu_ids <= 1 && acpi_id == 0) | 178 | if (nr_cpu_ids <= 1 && acpi_id == 0) |
| 179 | return acpi_id; | 179 | return acpi_id; |
| 180 | else | 180 | else |
| 181 | return apic_id; | 181 | return phys_id; |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
| 185 | for_each_possible_cpu(i) { | 185 | for_each_possible_cpu(i) { |
| 186 | if (cpu_physical_id(i) == apic_id) | 186 | if (cpu_physical_id(i) == phys_id) |
| 187 | return i; | 187 | return i; |
| 188 | } | 188 | } |
| 189 | #else | 189 | #else |
| 190 | /* In UP kernel, only processor 0 is valid */ | 190 | /* In UP kernel, only processor 0 is valid */ |
| 191 | if (apic_id == 0) | 191 | if (phys_id == 0) |
| 192 | return apic_id; | 192 | return phys_id; |
| 193 | #endif | 193 | #endif |
| 194 | return -1; | 194 | return -1; |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | 197 | int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) |
| 198 | { | 198 | { |
| 199 | int apic_id; | 199 | int phys_id; |
| 200 | 200 | ||
| 201 | apic_id = acpi_get_apicid(handle, type, acpi_id); | 201 | phys_id = acpi_get_phys_id(handle, type, acpi_id); |
| 202 | 202 | ||
| 203 | return acpi_map_cpuid(apic_id, acpi_id); | 203 | return acpi_map_cpuid(phys_id, acpi_id); |
| 204 | } | 204 | } |
| 205 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); | 205 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 16914cc30882..dc4d8960684a 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device) | |||
| 1001 | if (device->wakeup.flags.valid) | 1001 | if (device->wakeup.flags.valid) |
| 1002 | acpi_power_resources_list_free(&device->wakeup.resources); | 1002 | acpi_power_resources_list_free(&device->wakeup.resources); |
| 1003 | 1003 | ||
| 1004 | if (!device->flags.power_manageable) | 1004 | if (!device->power.flags.power_resources) |
| 1005 | return; | 1005 | return; |
| 1006 | 1006 | ||
| 1007 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { | 1007 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { |
| @@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) | |||
| 1744 | device->power.flags.power_resources) | 1744 | device->power.flags.power_resources) |
| 1745 | device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; | 1745 | device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; |
| 1746 | 1746 | ||
| 1747 | if (acpi_bus_init_power(device)) { | 1747 | if (acpi_bus_init_power(device)) |
| 1748 | acpi_free_power_resources_lists(device); | ||
| 1749 | device->flags.power_manageable = 0; | 1748 | device->flags.power_manageable = 0; |
| 1750 | } | ||
| 1751 | } | 1749 | } |
| 1752 | 1750 | ||
| 1753 | static void acpi_bus_get_flags(struct acpi_device *device) | 1751 | static void acpi_bus_get_flags(struct acpi_device *device) |
| @@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device) | |||
| 2371 | /* Skip devices that are not present. */ | 2369 | /* Skip devices that are not present. */ |
| 2372 | if (!acpi_device_is_present(device)) { | 2370 | if (!acpi_device_is_present(device)) { |
| 2373 | device->flags.visited = false; | 2371 | device->flags.visited = false; |
| 2372 | device->flags.power_manageable = 0; | ||
| 2374 | return; | 2373 | return; |
| 2375 | } | 2374 | } |
| 2376 | if (device->handler) | 2375 | if (device->handler) |
| 2377 | goto ok; | 2376 | goto ok; |
| 2378 | 2377 | ||
| 2379 | if (!device->flags.initialized) { | 2378 | if (!device->flags.initialized) { |
| 2380 | acpi_bus_update_power(device, NULL); | 2379 | device->flags.power_manageable = |
| 2380 | device->power.states[ACPI_STATE_D0].flags.valid; | ||
| 2381 | if (acpi_bus_init_power(device)) | ||
| 2382 | device->flags.power_manageable = 0; | ||
| 2383 | |||
| 2381 | device->flags.initialized = true; | 2384 | device->flags.initialized = true; |
| 2382 | } | 2385 | } |
| 2383 | device->flags.visited = false; | 2386 | device->flags.visited = false; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c72e79d2c5ad..032db459370f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
| 522 | DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), | 522 | DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), |
| 523 | }, | 523 | }, |
| 524 | }, | 524 | }, |
| 525 | |||
| 526 | { | ||
| 527 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ | ||
| 528 | .callback = video_disable_native_backlight, | ||
| 529 | .ident = "Dell XPS15 L521X", | ||
| 530 | .matches = { | ||
| 531 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 532 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), | ||
| 533 | }, | ||
| 534 | }, | ||
| 525 | {} | 535 | {} |
| 526 | }; | 536 | }; |
| 527 | 537 | ||
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index fd5a5e85d7dc..982b96323f82 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
| @@ -969,7 +969,8 @@ static void sender(void *send_info, | |||
| 969 | 969 | ||
| 970 | do_gettimeofday(&t); | 970 | do_gettimeofday(&t); |
| 971 | pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", | 971 | pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", |
| 972 | msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); | 972 | msg->data[0], msg->data[1], |
| 973 | (long) t.tv_sec, (long) t.tv_usec); | ||
| 973 | } | 974 | } |
| 974 | } | 975 | } |
| 975 | 976 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1232336b960e..40dfbc0444c0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb, | |||
| 4029 | if (action != BUS_NOTIFY_REMOVED_DEVICE) | 4029 | if (action != BUS_NOTIFY_REMOVED_DEVICE) |
| 4030 | return 0; | 4030 | return 0; |
| 4031 | 4031 | ||
| 4032 | /* | ||
| 4033 | * If the device is still attached to a device driver we can't | ||
| 4034 | * tear down the domain yet as DMA mappings may still be in use. | ||
| 4035 | * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. | ||
| 4036 | */ | ||
| 4037 | if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) | ||
| 4038 | return 0; | ||
| 4039 | |||
| 4040 | domain = find_domain(dev); | 4032 | domain = find_domain(dev); |
| 4041 | if (!domain) | 4033 | if (!domain) |
| 4042 | return 0; | 4034 | return 0; |
| @@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 4428 | domain_remove_one_dev_info(old_domain, dev); | 4420 | domain_remove_one_dev_info(old_domain, dev); |
| 4429 | else | 4421 | else |
| 4430 | domain_remove_dev_info(old_domain); | 4422 | domain_remove_dev_info(old_domain); |
| 4423 | |||
| 4424 | if (!domain_type_is_vm_or_si(old_domain) && | ||
| 4425 | list_empty(&old_domain->devices)) | ||
| 4426 | domain_exit(old_domain); | ||
| 4431 | } | 4427 | } |
| 4432 | } | 4428 | } |
| 4433 | 4429 | ||
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 68dfb0fd5ee9..748693192c20 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
| @@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, | |||
| 558 | 558 | ||
| 559 | static u64 ipmmu_page_prot(unsigned int prot, u64 type) | 559 | static u64 ipmmu_page_prot(unsigned int prot, u64 type) |
| 560 | { | 560 | { |
| 561 | u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF | 561 | u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF |
| 562 | | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV | 562 | | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV |
| 563 | | ARM_VMSA_PTE_NS | type; | 563 | | ARM_VMSA_PTE_NS | type; |
| 564 | 564 | ||
| @@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type) | |||
| 568 | if (prot & IOMMU_CACHE) | 568 | if (prot & IOMMU_CACHE) |
| 569 | pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; | 569 | pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; |
| 570 | 570 | ||
| 571 | if (prot & IOMMU_EXEC) | 571 | if (prot & IOMMU_NOEXEC) |
| 572 | pgprot &= ~ARM_VMSA_PTE_XN; | 572 | pgprot |= ARM_VMSA_PTE_XN; |
| 573 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | 573 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
| 574 | /* If no access create a faulting entry to avoid TLB fills. */ | 574 | /* If no access create a faulting entry to avoid TLB fills. */ |
| 575 | pgprot &= ~ARM_VMSA_PTE_PAGE; | 575 | pgprot &= ~ARM_VMSA_PTE_PAGE; |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index b2023af384b9..6a8b1ec4a48a 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = { | |||
| 1009 | .remove = rk_iommu_remove, | 1009 | .remove = rk_iommu_remove, |
| 1010 | .driver = { | 1010 | .driver = { |
| 1011 | .name = "rk_iommu", | 1011 | .name = "rk_iommu", |
| 1012 | .owner = THIS_MODULE, | ||
| 1013 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), | 1012 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), |
| 1014 | }, | 1013 | }, |
| 1015 | }; | 1014 | }; |
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 1fcd5568a352..f3470d96837a 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c | |||
| @@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev) | |||
| 850 | } | 850 | } |
| 851 | 851 | ||
| 852 | db->clk = devm_clk_get(&pdev->dev, NULL); | 852 | db->clk = devm_clk_get(&pdev->dev, NULL); |
| 853 | if (IS_ERR(db->clk)) | 853 | if (IS_ERR(db->clk)) { |
| 854 | ret = PTR_ERR(db->clk); | ||
| 854 | goto out; | 855 | goto out; |
| 856 | } | ||
| 855 | 857 | ||
| 856 | clk_prepare_enable(db->clk); | 858 | clk_prepare_enable(db->clk); |
| 857 | 859 | ||
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3498760dc22a..760c72c6e2ac 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
| @@ -1170,10 +1170,6 @@ tx_request_irq_error: | |||
| 1170 | init_error: | 1170 | init_error: |
| 1171 | free_skbufs(dev); | 1171 | free_skbufs(dev); |
| 1172 | alloc_skbuf_error: | 1172 | alloc_skbuf_error: |
| 1173 | if (priv->phydev) { | ||
| 1174 | phy_disconnect(priv->phydev); | ||
| 1175 | priv->phydev = NULL; | ||
| 1176 | } | ||
| 1177 | phy_error: | 1173 | phy_error: |
| 1178 | return ret; | 1174 | return ret; |
| 1179 | } | 1175 | } |
| @@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev) | |||
| 1186 | int ret; | 1182 | int ret; |
| 1187 | unsigned long int flags; | 1183 | unsigned long int flags; |
| 1188 | 1184 | ||
| 1189 | /* Stop and disconnect the PHY */ | 1185 | /* Stop the PHY */ |
| 1190 | if (priv->phydev) { | 1186 | if (priv->phydev) |
| 1191 | phy_stop(priv->phydev); | 1187 | phy_stop(priv->phydev); |
| 1192 | phy_disconnect(priv->phydev); | ||
| 1193 | priv->phydev = NULL; | ||
| 1194 | } | ||
| 1195 | 1188 | ||
| 1196 | netif_stop_queue(dev); | 1189 | netif_stop_queue(dev); |
| 1197 | napi_disable(&priv->napi); | 1190 | napi_disable(&priv->napi); |
| @@ -1525,6 +1518,10 @@ err_free_netdev: | |||
| 1525 | static int altera_tse_remove(struct platform_device *pdev) | 1518 | static int altera_tse_remove(struct platform_device *pdev) |
| 1526 | { | 1519 | { |
| 1527 | struct net_device *ndev = platform_get_drvdata(pdev); | 1520 | struct net_device *ndev = platform_get_drvdata(pdev); |
| 1521 | struct altera_tse_private *priv = netdev_priv(ndev); | ||
| 1522 | |||
| 1523 | if (priv->phydev) | ||
| 1524 | phy_disconnect(priv->phydev); | ||
| 1528 | 1525 | ||
| 1529 | platform_set_drvdata(pdev, NULL); | 1526 | platform_set_drvdata(pdev, NULL); |
| 1530 | altera_tse_mdio_destroy(ndev); | 1527 | altera_tse_mdio_destroy(ndev); |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 705f334ebb85..b29e027c476e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev) | |||
| 1616 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | 1616 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { |
| 1617 | netdev_err(netdev, "Unable to alloc receive buffers\n"); | 1617 | netdev_err(netdev, "Unable to alloc receive buffers\n"); |
| 1618 | err = -ENOMEM; | 1618 | err = -ENOMEM; |
| 1619 | goto err_out_notify_unset; | 1619 | goto err_out_free_rq; |
| 1620 | } | 1620 | } |
| 1621 | } | 1621 | } |
| 1622 | 1622 | ||
| @@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev) | |||
| 1649 | 1649 | ||
| 1650 | return 0; | 1650 | return 0; |
| 1651 | 1651 | ||
| 1652 | err_out_notify_unset: | 1652 | err_out_free_rq: |
| 1653 | for (i = 0; i < enic->rq_count; i++) | ||
| 1654 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | ||
| 1653 | enic_dev_notify_unset(enic); | 1655 | enic_dev_notify_unset(enic); |
| 1654 | err_out_free_intr: | 1656 | err_out_free_intr: |
| 1655 | enic_free_intr(enic); | 1657 | enic_free_intr(enic); |
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 781065eb5431..e9c3a87e5b11 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
| @@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) | |||
| 1543 | mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); | 1543 | mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); |
| 1544 | } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && | 1544 | } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && |
| 1545 | (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && | 1545 | (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && |
| 1546 | !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { | 1546 | (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { |
| 1547 | /* enable/disable MDI/MDI-X auto-switching. */ | 1547 | /* enable/disable MDI/MDI-X auto-switching. */ |
| 1548 | mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, | 1548 | mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, |
| 1549 | nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); | 1549 | nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 433a55886ad2..cb0de455683e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
| @@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, | |||
| 829 | if (desc_n >= ring->count || desc_n < 0) { | 829 | if (desc_n >= ring->count || desc_n < 0) { |
| 830 | dev_info(&pf->pdev->dev, | 830 | dev_info(&pf->pdev->dev, |
| 831 | "descriptor %d not found\n", desc_n); | 831 | "descriptor %d not found\n", desc_n); |
| 832 | return; | 832 | goto out; |
| 833 | } | 833 | } |
| 834 | if (!is_rx_ring) { | 834 | if (!is_rx_ring) { |
| 835 | txd = I40E_TX_DESC(ring, desc_n); | 835 | txd = I40E_TX_DESC(ring, desc_n); |
| @@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, | |||
| 855 | } else { | 855 | } else { |
| 856 | dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); | 856 | dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); |
| 857 | } | 857 | } |
| 858 | |||
| 859 | out: | ||
| 858 | kfree(ring); | 860 | kfree(ring); |
| 859 | } | 861 | } |
| 860 | 862 | ||
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 051ea94bdcd3..0f69ef81751a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
| @@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) | |||
| 1125 | u32 swmask = mask; | 1125 | u32 swmask = mask; |
| 1126 | u32 fwmask = mask << 16; | 1126 | u32 fwmask = mask << 16; |
| 1127 | s32 ret_val = 0; | 1127 | s32 ret_val = 0; |
| 1128 | s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ | 1128 | s32 i = 0, timeout = 200; |
| 1129 | 1129 | ||
| 1130 | while (i < timeout) { | 1130 | while (i < timeout) { |
| 1131 | if (igb_get_hw_semaphore(hw)) { | 1131 | if (igb_get_hw_semaphore(hw)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 943cbd47d832..03e9eb0dc761 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 1829 | err = mlx4_dev_cap(dev, &dev_cap); | 1829 | err = mlx4_dev_cap(dev, &dev_cap); |
| 1830 | if (err) { | 1830 | if (err) { |
| 1831 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); | 1831 | mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); |
| 1832 | goto err_stop_fw; | 1832 | return err; |
| 1833 | } | 1833 | } |
| 1834 | 1834 | ||
| 1835 | choose_steering_mode(dev, &dev_cap); | 1835 | choose_steering_mode(dev, &dev_cap); |
| @@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 1860 | &init_hca); | 1860 | &init_hca); |
| 1861 | if ((long long) icm_size < 0) { | 1861 | if ((long long) icm_size < 0) { |
| 1862 | err = icm_size; | 1862 | err = icm_size; |
| 1863 | goto err_stop_fw; | 1863 | return err; |
| 1864 | } | 1864 | } |
| 1865 | 1865 | ||
| 1866 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; | 1866 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; |
| @@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 1874 | 1874 | ||
| 1875 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); | 1875 | err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); |
| 1876 | if (err) | 1876 | if (err) |
| 1877 | goto err_stop_fw; | 1877 | return err; |
| 1878 | 1878 | ||
| 1879 | err = mlx4_INIT_HCA(dev, &init_hca); | 1879 | err = mlx4_INIT_HCA(dev, &init_hca); |
| 1880 | if (err) { | 1880 | if (err) { |
| @@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 1886 | err = mlx4_query_func(dev, &dev_cap); | 1886 | err = mlx4_query_func(dev, &dev_cap); |
| 1887 | if (err < 0) { | 1887 | if (err < 0) { |
| 1888 | mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); | 1888 | mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); |
| 1889 | goto err_stop_fw; | 1889 | goto err_close; |
| 1890 | } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { | 1890 | } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { |
| 1891 | dev->caps.num_eqs = dev_cap.max_eqs; | 1891 | dev->caps.num_eqs = dev_cap.max_eqs; |
| 1892 | dev->caps.reserved_eqs = dev_cap.reserved_eqs; | 1892 | dev->caps.reserved_eqs = dev_cap.reserved_eqs; |
| @@ -2006,11 +2006,6 @@ err_free_icm: | |||
| 2006 | if (!mlx4_is_slave(dev)) | 2006 | if (!mlx4_is_slave(dev)) |
| 2007 | mlx4_free_icms(dev); | 2007 | mlx4_free_icms(dev); |
| 2008 | 2008 | ||
| 2009 | err_stop_fw: | ||
| 2010 | if (!mlx4_is_slave(dev)) { | ||
| 2011 | mlx4_UNMAP_FA(dev); | ||
| 2012 | mlx4_free_icm(dev, priv->fw.fw_icm, 0); | ||
| 2013 | } | ||
| 2014 | return err; | 2009 | return err; |
| 2015 | } | 2010 | } |
| 2016 | 2011 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d6f549685c0f..7094a9c70fd5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c | |||
| @@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free); | |||
| 584 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) | 584 | void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) |
| 585 | { | 585 | { |
| 586 | mlx4_mtt_cleanup(dev, &mr->mtt); | 586 | mlx4_mtt_cleanup(dev, &mr->mtt); |
| 587 | mr->mtt.order = -1; | ||
| 587 | } | 588 | } |
| 588 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); | 589 | EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); |
| 589 | 590 | ||
| @@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, | |||
| 593 | { | 594 | { |
| 594 | int err; | 595 | int err; |
| 595 | 596 | ||
| 596 | mpt_entry->start = cpu_to_be64(iova); | ||
| 597 | mpt_entry->length = cpu_to_be64(size); | ||
| 598 | mpt_entry->entity_size = cpu_to_be32(page_shift); | ||
| 599 | |||
| 600 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); | 597 | err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); |
| 601 | if (err) | 598 | if (err) |
| 602 | return err; | 599 | return err; |
| 603 | 600 | ||
| 601 | mpt_entry->start = cpu_to_be64(mr->iova); | ||
| 602 | mpt_entry->length = cpu_to_be64(mr->size); | ||
| 603 | mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); | ||
| 604 | |||
| 604 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | | 605 | mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | |
| 605 | MLX4_MPT_PD_FLAG_EN_INV); | 606 | MLX4_MPT_PD_FLAG_EN_INV); |
| 606 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | | 607 | mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | |
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index af099057f0e9..71af98bb72cb 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
| @@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4033 | (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 4033 | (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 4034 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4034 | mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), |
| 4035 | &mgp->cmd_bus, GFP_KERNEL); | 4035 | &mgp->cmd_bus, GFP_KERNEL); |
| 4036 | if (mgp->cmd == NULL) | 4036 | if (!mgp->cmd) { |
| 4037 | status = -ENOMEM; | ||
| 4037 | goto abort_with_enabled; | 4038 | goto abort_with_enabled; |
| 4039 | } | ||
| 4038 | 4040 | ||
| 4039 | mgp->board_span = pci_resource_len(pdev, 0); | 4041 | mgp->board_span = pci_resource_len(pdev, 0); |
| 4040 | mgp->iomem_base = pci_resource_start(pdev, 0); | 4042 | mgp->iomem_base = pci_resource_start(pdev, 0); |
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c2f09af5c25b..4847713211ca 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c | |||
| @@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
| 146 | { | 146 | { |
| 147 | int i = 0; | 147 | int i = 0; |
| 148 | 148 | ||
| 149 | while (i < 10) { | 149 | do { |
| 150 | if (i) | ||
| 151 | ssleep(1); | ||
| 152 | |||
| 153 | if (ql_sem_lock(qdev, | 150 | if (ql_sem_lock(qdev, |
| 154 | QL_DRVR_SEM_MASK, | 151 | QL_DRVR_SEM_MASK, |
| 155 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) | 152 | (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) |
| @@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) | |||
| 158 | "driver lock acquired\n"); | 155 | "driver lock acquired\n"); |
| 159 | return 1; | 156 | return 1; |
| 160 | } | 157 | } |
| 161 | } | 158 | ssleep(1); |
| 159 | } while (++i < 10); | ||
| 162 | 160 | ||
| 163 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); | 161 | netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); |
| 164 | return 0; | 162 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9929b97cfb36..2528c3fb6b90 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
| @@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2605 | } else { | 2605 | } else { |
| 2606 | dev_err(&pdev->dev, | 2606 | dev_err(&pdev->dev, |
| 2607 | "%s: failed. Please Reboot\n", __func__); | 2607 | "%s: failed. Please Reboot\n", __func__); |
| 2608 | err = -ENODEV; | ||
| 2608 | goto err_out_free_hw; | 2609 | goto err_out_free_hw; |
| 2609 | } | 2610 | } |
| 2610 | 2611 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c560f9aeb55d..e61ee8351272 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -757,6 +757,14 @@ requeue: | |||
| 757 | static irqreturn_t cpsw_interrupt(int irq, void *dev_id) | 757 | static irqreturn_t cpsw_interrupt(int irq, void *dev_id) |
| 758 | { | 758 | { |
| 759 | struct cpsw_priv *priv = dev_id; | 759 | struct cpsw_priv *priv = dev_id; |
| 760 | int value = irq - priv->irqs_table[0]; | ||
| 761 | |||
| 762 | /* NOTICE: Ending IRQ here. The trick with the 'value' variable above | ||
| 763 | * is to make sure we will always write the correct value to the EOI | ||
| 764 | * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 | ||
| 765 | * for TX Interrupt and 3 for MISC Interrupt. | ||
| 766 | */ | ||
| 767 | cpdma_ctlr_eoi(priv->dma, value); | ||
| 760 | 768 | ||
| 761 | cpsw_intr_disable(priv); | 769 | cpsw_intr_disable(priv); |
| 762 | if (priv->irq_enabled == true) { | 770 | if (priv->irq_enabled == true) { |
| @@ -786,8 +794,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) | |||
| 786 | int num_tx, num_rx; | 794 | int num_tx, num_rx; |
| 787 | 795 | ||
| 788 | num_tx = cpdma_chan_process(priv->txch, 128); | 796 | num_tx = cpdma_chan_process(priv->txch, 128); |
| 789 | if (num_tx) | ||
| 790 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
| 791 | 797 | ||
| 792 | num_rx = cpdma_chan_process(priv->rxch, budget); | 798 | num_rx = cpdma_chan_process(priv->rxch, budget); |
| 793 | if (num_rx < budget) { | 799 | if (num_rx < budget) { |
| @@ -795,7 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) | |||
| 795 | 801 | ||
| 796 | napi_complete(napi); | 802 | napi_complete(napi); |
| 797 | cpsw_intr_enable(priv); | 803 | cpsw_intr_enable(priv); |
| 798 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
| 799 | prim_cpsw = cpsw_get_slave_priv(priv, 0); | 804 | prim_cpsw = cpsw_get_slave_priv(priv, 0); |
| 800 | if (prim_cpsw->irq_enabled == false) { | 805 | if (prim_cpsw->irq_enabled == false) { |
| 801 | prim_cpsw->irq_enabled = true; | 806 | prim_cpsw->irq_enabled = true; |
| @@ -1310,8 +1315,6 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
| 1310 | napi_enable(&priv->napi); | 1315 | napi_enable(&priv->napi); |
| 1311 | cpdma_ctlr_start(priv->dma); | 1316 | cpdma_ctlr_start(priv->dma); |
| 1312 | cpsw_intr_enable(priv); | 1317 | cpsw_intr_enable(priv); |
| 1313 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
| 1314 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
| 1315 | 1318 | ||
| 1316 | prim_cpsw = cpsw_get_slave_priv(priv, 0); | 1319 | prim_cpsw = cpsw_get_slave_priv(priv, 0); |
| 1317 | if (prim_cpsw->irq_enabled == false) { | 1320 | if (prim_cpsw->irq_enabled == false) { |
| @@ -1578,9 +1581,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) | |||
| 1578 | cpdma_chan_start(priv->txch); | 1581 | cpdma_chan_start(priv->txch); |
| 1579 | cpdma_ctlr_int_ctrl(priv->dma, true); | 1582 | cpdma_ctlr_int_ctrl(priv->dma, true); |
| 1580 | cpsw_intr_enable(priv); | 1583 | cpsw_intr_enable(priv); |
| 1581 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
| 1582 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
| 1583 | |||
| 1584 | } | 1584 | } |
| 1585 | 1585 | ||
| 1586 | static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) | 1586 | static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) |
| @@ -1620,9 +1620,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) | |||
| 1620 | cpsw_interrupt(ndev->irq, priv); | 1620 | cpsw_interrupt(ndev->irq, priv); |
| 1621 | cpdma_ctlr_int_ctrl(priv->dma, true); | 1621 | cpdma_ctlr_int_ctrl(priv->dma, true); |
| 1622 | cpsw_intr_enable(priv); | 1622 | cpsw_intr_enable(priv); |
| 1623 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); | ||
| 1624 | cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); | ||
| 1625 | |||
| 1626 | } | 1623 | } |
| 1627 | #endif | 1624 | #endif |
| 1628 | 1625 | ||
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9c2d91ea0af4..dbcbf0c5bcfa 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
| @@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op) | |||
| 1043 | lp->regs = of_iomap(op->dev.of_node, 0); | 1043 | lp->regs = of_iomap(op->dev.of_node, 0); |
| 1044 | if (!lp->regs) { | 1044 | if (!lp->regs) { |
| 1045 | dev_err(&op->dev, "could not map temac regs.\n"); | 1045 | dev_err(&op->dev, "could not map temac regs.\n"); |
| 1046 | rc = -ENOMEM; | ||
| 1046 | goto nodev; | 1047 | goto nodev; |
| 1047 | } | 1048 | } |
| 1048 | 1049 | ||
| @@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op) | |||
| 1062 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); | 1063 | np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); |
| 1063 | if (!np) { | 1064 | if (!np) { |
| 1064 | dev_err(&op->dev, "could not find DMA node\n"); | 1065 | dev_err(&op->dev, "could not find DMA node\n"); |
| 1066 | rc = -ENODEV; | ||
| 1065 | goto err_iounmap; | 1067 | goto err_iounmap; |
| 1066 | } | 1068 | } |
| 1067 | 1069 | ||
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c18a0c637c44..a6d2860b712c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
| @@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
| 1501 | lp->regs = of_iomap(op->dev.of_node, 0); | 1501 | lp->regs = of_iomap(op->dev.of_node, 0); |
| 1502 | if (!lp->regs) { | 1502 | if (!lp->regs) { |
| 1503 | dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); | 1503 | dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); |
| 1504 | ret = -ENOMEM; | ||
| 1504 | goto nodev; | 1505 | goto nodev; |
| 1505 | } | 1506 | } |
| 1506 | /* Setup checksum offload, but default to off if not specified */ | 1507 | /* Setup checksum offload, but default to off if not specified */ |
| @@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op) | |||
| 1563 | np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); | 1564 | np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); |
| 1564 | if (!np) { | 1565 | if (!np) { |
| 1565 | dev_err(&op->dev, "could not find DMA node\n"); | 1566 | dev_err(&op->dev, "could not find DMA node\n"); |
| 1567 | ret = -ENODEV; | ||
| 1566 | goto err_iounmap; | 1568 | goto err_iounmap; |
| 1567 | } | 1569 | } |
| 1568 | lp->dma_regs = of_iomap(np, 0); | 1570 | lp->dma_regs = of_iomap(np, 0); |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 24858799c204..9d4ce388510a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
| @@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) | |||
| 1109 | res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); | 1109 | res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); |
| 1110 | if (!res) { | 1110 | if (!res) { |
| 1111 | dev_err(dev, "no IRQ found\n"); | 1111 | dev_err(dev, "no IRQ found\n"); |
| 1112 | rc = -ENXIO; | ||
| 1112 | goto error; | 1113 | goto error; |
| 1113 | } | 1114 | } |
| 1114 | 1115 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b8a82b86f909..602dc6668c3a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -56,6 +56,8 @@ struct qmi_wwan_state { | |||
| 56 | /* default ethernet address used by the modem */ | 56 | /* default ethernet address used by the modem */ |
| 57 | static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; | 57 | static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; |
| 58 | 58 | ||
| 59 | static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; | ||
| 60 | |||
| 59 | /* Make up an ethernet header if the packet doesn't have one. | 61 | /* Make up an ethernet header if the packet doesn't have one. |
| 60 | * | 62 | * |
| 61 | * A firmware bug common among several devices cause them to send raw | 63 | * A firmware bug common among several devices cause them to send raw |
| @@ -332,10 +334,12 @@ next_desc: | |||
| 332 | usb_driver_release_interface(driver, info->data); | 334 | usb_driver_release_interface(driver, info->data); |
| 333 | } | 335 | } |
| 334 | 336 | ||
| 335 | /* Never use the same address on both ends of the link, even | 337 | /* Never use the same address on both ends of the link, even if the |
| 336 | * if the buggy firmware told us to. | 338 | * buggy firmware told us to. Or, if device is assigned the well-known |
| 339 | * buggy firmware MAC address, replace it with a random address, | ||
| 337 | */ | 340 | */ |
| 338 | if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) | 341 | if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) || |
| 342 | ether_addr_equal(dev->net->dev_addr, buggy_fw_addr)) | ||
| 339 | eth_hw_addr_random(dev->net); | 343 | eth_hw_addr_random(dev->net); |
| 340 | 344 | ||
| 341 | /* make MAC addr easily distinguishable from an IP header */ | 345 | /* make MAC addr easily distinguishable from an IP header */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2d1c77e81836..57ec23e8ccfa 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) | |||
| 1897 | netif_wake_queue(netdev); | 1897 | netif_wake_queue(netdev); |
| 1898 | } | 1898 | } |
| 1899 | 1899 | ||
| 1900 | static netdev_features_t | ||
| 1901 | rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, | ||
| 1902 | netdev_features_t features) | ||
| 1903 | { | ||
| 1904 | u32 mss = skb_shinfo(skb)->gso_size; | ||
| 1905 | int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; | ||
| 1906 | int offset = skb_transport_offset(skb); | ||
| 1907 | |||
| 1908 | if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) | ||
| 1909 | features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); | ||
| 1910 | else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) | ||
| 1911 | features &= ~NETIF_F_GSO_MASK; | ||
| 1912 | |||
| 1913 | return features; | ||
| 1914 | } | ||
| 1915 | |||
| 1900 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | 1916 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, |
| 1901 | struct net_device *netdev) | 1917 | struct net_device *netdev) |
| 1902 | { | 1918 | { |
| @@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { | |||
| 3706 | .ndo_set_mac_address = rtl8152_set_mac_address, | 3722 | .ndo_set_mac_address = rtl8152_set_mac_address, |
| 3707 | .ndo_change_mtu = rtl8152_change_mtu, | 3723 | .ndo_change_mtu = rtl8152_change_mtu, |
| 3708 | .ndo_validate_addr = eth_validate_addr, | 3724 | .ndo_validate_addr = eth_validate_addr, |
| 3725 | .ndo_features_check = rtl8152_features_check, | ||
| 3709 | }; | 3726 | }; |
| 3710 | 3727 | ||
| 3711 | static void r8152b_get_version(struct r8152 *tp) | 3728 | static void r8152b_get_version(struct r8152 *tp) |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index efbaf2ae1999..794204e34fba 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
| @@ -737,6 +737,7 @@ static void connect(struct backend_info *be) | |||
| 737 | } | 737 | } |
| 738 | 738 | ||
| 739 | queue->remaining_credit = credit_bytes; | 739 | queue->remaining_credit = credit_bytes; |
| 740 | queue->credit_usec = credit_usec; | ||
| 740 | 741 | ||
| 741 | err = connect_rings(be, queue); | 742 | err = connect_rings(be, queue); |
| 742 | if (err) { | 743 | if (err) { |
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index ba74f0aa60c7..3c22dbebc80f 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c | |||
| @@ -89,6 +89,7 @@ struct rockchip_iomux { | |||
| 89 | * @reg_pull: optional separate register for additional pull settings | 89 | * @reg_pull: optional separate register for additional pull settings |
| 90 | * @clk: clock of the gpio bank | 90 | * @clk: clock of the gpio bank |
| 91 | * @irq: interrupt of the gpio bank | 91 | * @irq: interrupt of the gpio bank |
| 92 | * @saved_enables: Saved content of GPIO_INTEN at suspend time. | ||
| 92 | * @pin_base: first pin number | 93 | * @pin_base: first pin number |
| 93 | * @nr_pins: number of pins in this bank | 94 | * @nr_pins: number of pins in this bank |
| 94 | * @name: name of the bank | 95 | * @name: name of the bank |
| @@ -107,6 +108,7 @@ struct rockchip_pin_bank { | |||
| 107 | struct regmap *regmap_pull; | 108 | struct regmap *regmap_pull; |
| 108 | struct clk *clk; | 109 | struct clk *clk; |
| 109 | int irq; | 110 | int irq; |
| 111 | u32 saved_enables; | ||
| 110 | u32 pin_base; | 112 | u32 pin_base; |
| 111 | u8 nr_pins; | 113 | u8 nr_pins; |
| 112 | char *name; | 114 | char *name; |
| @@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 1543 | return 0; | 1545 | return 0; |
| 1544 | } | 1546 | } |
| 1545 | 1547 | ||
| 1548 | static void rockchip_irq_suspend(struct irq_data *d) | ||
| 1549 | { | ||
| 1550 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 1551 | struct rockchip_pin_bank *bank = gc->private; | ||
| 1552 | |||
| 1553 | bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN); | ||
| 1554 | irq_reg_writel(gc, gc->wake_active, GPIO_INTEN); | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | static void rockchip_irq_resume(struct irq_data *d) | ||
| 1558 | { | ||
| 1559 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 1560 | struct rockchip_pin_bank *bank = gc->private; | ||
| 1561 | |||
| 1562 | irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN); | ||
| 1563 | } | ||
| 1564 | |||
| 1565 | static void rockchip_irq_disable(struct irq_data *d) | ||
| 1566 | { | ||
| 1567 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 1568 | u32 val; | ||
| 1569 | |||
| 1570 | irq_gc_lock(gc); | ||
| 1571 | |||
| 1572 | val = irq_reg_readl(gc, GPIO_INTEN); | ||
| 1573 | val &= ~d->mask; | ||
| 1574 | irq_reg_writel(gc, val, GPIO_INTEN); | ||
| 1575 | |||
| 1576 | irq_gc_unlock(gc); | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | static void rockchip_irq_enable(struct irq_data *d) | ||
| 1580 | { | ||
| 1581 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | ||
| 1582 | u32 val; | ||
| 1583 | |||
| 1584 | irq_gc_lock(gc); | ||
| 1585 | |||
| 1586 | val = irq_reg_readl(gc, GPIO_INTEN); | ||
| 1587 | val |= d->mask; | ||
| 1588 | irq_reg_writel(gc, val, GPIO_INTEN); | ||
| 1589 | |||
| 1590 | irq_gc_unlock(gc); | ||
| 1591 | } | ||
| 1592 | |||
| 1546 | static int rockchip_interrupts_register(struct platform_device *pdev, | 1593 | static int rockchip_interrupts_register(struct platform_device *pdev, |
| 1547 | struct rockchip_pinctrl *info) | 1594 | struct rockchip_pinctrl *info) |
| 1548 | { | 1595 | { |
| @@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev, | |||
| 1581 | gc = irq_get_domain_generic_chip(bank->domain, 0); | 1628 | gc = irq_get_domain_generic_chip(bank->domain, 0); |
| 1582 | gc->reg_base = bank->reg_base; | 1629 | gc->reg_base = bank->reg_base; |
| 1583 | gc->private = bank; | 1630 | gc->private = bank; |
| 1584 | gc->chip_types[0].regs.mask = GPIO_INTEN; | 1631 | gc->chip_types[0].regs.mask = GPIO_INTMASK; |
| 1585 | gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; | 1632 | gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; |
| 1586 | gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; | 1633 | gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; |
| 1587 | gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; | 1634 | gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; |
| 1588 | gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; | 1635 | gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; |
| 1636 | gc->chip_types[0].chip.irq_enable = rockchip_irq_enable; | ||
| 1637 | gc->chip_types[0].chip.irq_disable = rockchip_irq_disable; | ||
| 1589 | gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; | 1638 | gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; |
| 1639 | gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; | ||
| 1640 | gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; | ||
| 1590 | gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; | 1641 | gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; |
| 1591 | gc->wake_enabled = IRQ_MSK(bank->nr_pins); | 1642 | gc->wake_enabled = IRQ_MSK(bank->nr_pins); |
| 1592 | 1643 | ||
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 7c9d51382248..9e5ec00084bb 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c | |||
| @@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev, | |||
| 1012 | struct seq_file *s, unsigned pin_id) | 1012 | struct seq_file *s, unsigned pin_id) |
| 1013 | { | 1013 | { |
| 1014 | unsigned long config; | 1014 | unsigned long config; |
| 1015 | st_pinconf_get(pctldev, pin_id, &config); | ||
| 1016 | 1015 | ||
| 1016 | mutex_unlock(&pctldev->mutex); | ||
| 1017 | st_pinconf_get(pctldev, pin_id, &config); | ||
| 1018 | mutex_lock(&pctldev->mutex); | ||
| 1017 | seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" | 1019 | seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" |
| 1018 | "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," | 1020 | "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," |
| 1019 | "de:%ld,rt-clk:%ld,rt-delay:%ld]", | 1021 | "de:%ld,rt-clk:%ld,rt-delay:%ld]", |
| @@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = { | |||
| 1443 | 1445 | ||
| 1444 | static struct irq_chip st_gpio_irqchip = { | 1446 | static struct irq_chip st_gpio_irqchip = { |
| 1445 | .name = "GPIO", | 1447 | .name = "GPIO", |
| 1448 | .irq_disable = st_gpio_irq_mask, | ||
| 1446 | .irq_mask = st_gpio_irq_mask, | 1449 | .irq_mask = st_gpio_irq_mask, |
| 1447 | .irq_unmask = st_gpio_irq_unmask, | 1450 | .irq_unmask = st_gpio_irq_unmask, |
| 1448 | .irq_set_type = st_gpio_irq_set_type, | 1451 | .irq_set_type = st_gpio_irq_set_type, |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 14419a8ccbb6..d415d69dc237 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
| 538 | ++headcount; | 538 | ++headcount; |
| 539 | seg += in; | 539 | seg += in; |
| 540 | } | 540 | } |
| 541 | heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); | 541 | heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); |
| 542 | *iovcount = seg; | 542 | *iovcount = seg; |
| 543 | if (unlikely(log)) | 543 | if (unlikely(log)) |
| 544 | *log_num = nlogs; | 544 | *log_num = nlogs; |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 2ef9529809d8..9756f21b809e 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev) | |||
| 282 | 282 | ||
| 283 | vp_free_vectors(vdev); | 283 | vp_free_vectors(vdev); |
| 284 | kfree(vp_dev->vqs); | 284 | kfree(vp_dev->vqs); |
| 285 | vp_dev->vqs = NULL; | ||
| 285 | } | 286 | } |
| 286 | 287 | ||
| 287 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 288 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
| @@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) | |||
| 421 | return 0; | 422 | return 0; |
| 422 | } | 423 | } |
| 423 | 424 | ||
| 424 | void virtio_pci_release_dev(struct device *_d) | ||
| 425 | { | ||
| 426 | /* | ||
| 427 | * No need for a release method as we allocate/free | ||
| 428 | * all devices together with the pci devices. | ||
| 429 | * Provide an empty one to avoid getting a warning from core. | ||
| 430 | */ | ||
| 431 | } | ||
| 432 | |||
| 433 | #ifdef CONFIG_PM_SLEEP | 425 | #ifdef CONFIG_PM_SLEEP |
| 434 | static int virtio_pci_freeze(struct device *dev) | 426 | static int virtio_pci_freeze(struct device *dev) |
| 435 | { | 427 | { |
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index adddb647b21d..5a497289b7e9 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h | |||
| @@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev); | |||
| 126 | * - ignore the affinity request if we're using INTX | 126 | * - ignore the affinity request if we're using INTX |
| 127 | */ | 127 | */ |
| 128 | int vp_set_vq_affinity(struct virtqueue *vq, int cpu); | 128 | int vp_set_vq_affinity(struct virtqueue *vq, int cpu); |
| 129 | void virtio_pci_release_dev(struct device *); | ||
| 130 | 129 | ||
| 131 | int virtio_pci_legacy_probe(struct pci_dev *pci_dev, | 130 | int virtio_pci_legacy_probe(struct pci_dev *pci_dev, |
| 132 | const struct pci_device_id *id); | 131 | const struct pci_device_id *id); |
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6c76f0f5658c..a5486e65e04b 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c | |||
| @@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = { | |||
| 211 | .set_vq_affinity = vp_set_vq_affinity, | 211 | .set_vq_affinity = vp_set_vq_affinity, |
| 212 | }; | 212 | }; |
| 213 | 213 | ||
| 214 | static void virtio_pci_release_dev(struct device *_d) | ||
| 215 | { | ||
| 216 | struct virtio_device *vdev = dev_to_virtio(_d); | ||
| 217 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | ||
| 218 | |||
| 219 | /* As struct device is a kobject, it's not safe to | ||
| 220 | * free the memory (including the reference counter itself) | ||
| 221 | * until it's release callback. */ | ||
| 222 | kfree(vp_dev); | ||
| 223 | } | ||
| 224 | |||
| 214 | /* the PCI probing function */ | 225 | /* the PCI probing function */ |
| 215 | int virtio_pci_legacy_probe(struct pci_dev *pci_dev, | 226 | int virtio_pci_legacy_probe(struct pci_dev *pci_dev, |
| 216 | const struct pci_device_id *id) | 227 | const struct pci_device_id *id) |
| @@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev) | |||
| 302 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 313 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
| 303 | pci_release_regions(pci_dev); | 314 | pci_release_regions(pci_dev); |
| 304 | pci_disable_device(pci_dev); | 315 | pci_disable_device(pci_dev); |
| 305 | kfree(vp_dev); | ||
| 306 | } | 316 | } |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 2d3e32ebfd15..8729cf68d2fe 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
| @@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |||
| 1552 | { | 1552 | { |
| 1553 | int ret; | 1553 | int ret; |
| 1554 | int type; | 1554 | int type; |
| 1555 | struct btrfs_tree_block_info *info; | ||
| 1556 | struct btrfs_extent_inline_ref *eiref; | 1555 | struct btrfs_extent_inline_ref *eiref; |
| 1557 | 1556 | ||
| 1558 | if (*ptr == (unsigned long)-1) | 1557 | if (*ptr == (unsigned long)-1) |
| @@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, | |||
| 1573 | } | 1572 | } |
| 1574 | 1573 | ||
| 1575 | /* we can treat both ref types equally here */ | 1574 | /* we can treat both ref types equally here */ |
| 1576 | info = (struct btrfs_tree_block_info *)(ei + 1); | ||
| 1577 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); | 1575 | *out_root = btrfs_extent_inline_ref_offset(eb, eiref); |
| 1578 | *out_level = btrfs_tree_block_level(eb, info); | 1576 | |
| 1577 | if (key->type == BTRFS_EXTENT_ITEM_KEY) { | ||
| 1578 | struct btrfs_tree_block_info *info; | ||
| 1579 | |||
| 1580 | info = (struct btrfs_tree_block_info *)(ei + 1); | ||
| 1581 | *out_level = btrfs_tree_block_level(eb, info); | ||
| 1582 | } else { | ||
| 1583 | ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); | ||
| 1584 | *out_level = (u8)key->offset; | ||
| 1585 | } | ||
| 1579 | 1586 | ||
| 1580 | if (ret == 1) | 1587 | if (ret == 1) |
| 1581 | *ptr = (unsigned long)-1; | 1588 | *ptr = (unsigned long)-1; |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 054577bddaf2..de4e70fb3cbb 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
| @@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) | |||
| 1857 | { | 1857 | { |
| 1858 | struct btrfs_delayed_node *delayed_node; | 1858 | struct btrfs_delayed_node *delayed_node; |
| 1859 | 1859 | ||
| 1860 | /* | ||
| 1861 | * we don't do delayed inode updates during log recovery because it | ||
| 1862 | * leads to enospc problems. This means we also can't do | ||
| 1863 | * delayed inode refs | ||
| 1864 | */ | ||
| 1865 | if (BTRFS_I(inode)->root->fs_info->log_root_recovering) | ||
| 1866 | return -EAGAIN; | ||
| 1867 | |||
| 1860 | delayed_node = btrfs_get_or_create_delayed_node(inode); | 1868 | delayed_node = btrfs_get_or_create_delayed_node(inode); |
| 1861 | if (IS_ERR(delayed_node)) | 1869 | if (IS_ERR(delayed_node)) |
| 1862 | return PTR_ERR(delayed_node); | 1870 | return PTR_ERR(delayed_node); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a80b97100d90..15116585e714 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
| 3139 | struct extent_buffer *leaf; | 3139 | struct extent_buffer *leaf; |
| 3140 | 3140 | ||
| 3141 | ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); | 3141 | ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); |
| 3142 | if (ret < 0) | 3142 | if (ret) { |
| 3143 | if (ret > 0) | ||
| 3144 | ret = -ENOENT; | ||
| 3143 | goto fail; | 3145 | goto fail; |
| 3144 | BUG_ON(ret); /* Corruption */ | 3146 | } |
| 3145 | 3147 | ||
| 3146 | leaf = path->nodes[0]; | 3148 | leaf = path->nodes[0]; |
| 3147 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | 3149 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); |
| @@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
| 3149 | btrfs_mark_buffer_dirty(leaf); | 3151 | btrfs_mark_buffer_dirty(leaf); |
| 3150 | btrfs_release_path(path); | 3152 | btrfs_release_path(path); |
| 3151 | fail: | 3153 | fail: |
| 3152 | if (ret) { | 3154 | if (ret) |
| 3153 | btrfs_abort_transaction(trans, root, ret); | 3155 | btrfs_abort_transaction(trans, root, ret); |
| 3154 | return ret; | 3156 | return ret; |
| 3155 | } | ||
| 3156 | return 0; | ||
| 3157 | 3157 | ||
| 3158 | } | 3158 | } |
| 3159 | 3159 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e687bb0dc73a..8bf326affb94 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 6255 | 6255 | ||
| 6256 | out_fail: | 6256 | out_fail: |
| 6257 | btrfs_end_transaction(trans, root); | 6257 | btrfs_end_transaction(trans, root); |
| 6258 | if (drop_on_err) | 6258 | if (drop_on_err) { |
| 6259 | inode_dec_link_count(inode); | ||
| 6259 | iput(inode); | 6260 | iput(inode); |
| 6261 | } | ||
| 6260 | btrfs_balance_delayed_items(root); | 6262 | btrfs_balance_delayed_items(root); |
| 6261 | btrfs_btree_balance_dirty(root); | 6263 | btrfs_btree_balance_dirty(root); |
| 6262 | return err; | 6264 | return err; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f2bb13a23f86..9e1569ffbf6e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity, | |||
| 2607 | ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, | 2607 | ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, |
| 2608 | flags, gen, mirror_num, | 2608 | flags, gen, mirror_num, |
| 2609 | have_csum ? csum : NULL); | 2609 | have_csum ? csum : NULL); |
| 2610 | skip: | ||
| 2611 | if (ret) | 2610 | if (ret) |
| 2612 | return ret; | 2611 | return ret; |
| 2612 | skip: | ||
| 2613 | len -= l; | 2613 | len -= l; |
| 2614 | logical += l; | 2614 | logical += l; |
| 2615 | physical += l; | 2615 | physical += l; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f5013d92a7e6..c81c0e004588 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, | |||
| 1416 | } | 1416 | } |
| 1417 | } | 1417 | } |
| 1418 | 1418 | ||
| 1419 | dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", | 1419 | dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", |
| 1420 | inode, ceph_vinop(inode), len, locked_page); | 1420 | inode, ceph_vinop(inode), len, locked_page); |
| 1421 | 1421 | ||
| 1422 | if (len > 0) { | 1422 | if (len > 0) { |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e5d3eadf47b1..bed43081720f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 5166 | 5166 | ||
| 5167 | /* fallback to generic here if not in extents fmt */ | 5167 | /* fallback to generic here if not in extents fmt */ |
| 5168 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | 5168 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
| 5169 | return __generic_block_fiemap(inode, fieinfo, start, len, | 5169 | return generic_block_fiemap(inode, fieinfo, start, len, |
| 5170 | ext4_get_block); | 5170 | ext4_get_block); |
| 5171 | 5171 | ||
| 5172 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) | 5172 | if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) |
| 5173 | return -EBADR; | 5173 | return -EBADR; |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 513c12cf444c..8131be8c0af3 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
| 273 | * we determine this extent as a data or a hole according to whether the | 273 | * we determine this extent as a data or a hole according to whether the |
| 274 | * page cache has data or not. | 274 | * page cache has data or not. |
| 275 | */ | 275 | */ |
| 276 | static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, | 276 | static int ext4_find_unwritten_pgoff(struct inode *inode, |
| 277 | loff_t endoff, loff_t *offset) | 277 | int whence, |
| 278 | struct ext4_map_blocks *map, | ||
| 279 | loff_t *offset) | ||
| 278 | { | 280 | { |
| 279 | struct pagevec pvec; | 281 | struct pagevec pvec; |
| 282 | unsigned int blkbits; | ||
| 280 | pgoff_t index; | 283 | pgoff_t index; |
| 281 | pgoff_t end; | 284 | pgoff_t end; |
| 285 | loff_t endoff; | ||
| 282 | loff_t startoff; | 286 | loff_t startoff; |
| 283 | loff_t lastoff; | 287 | loff_t lastoff; |
| 284 | int found = 0; | 288 | int found = 0; |
| 285 | 289 | ||
| 290 | blkbits = inode->i_sb->s_blocksize_bits; | ||
| 286 | startoff = *offset; | 291 | startoff = *offset; |
| 287 | lastoff = startoff; | 292 | lastoff = startoff; |
| 288 | 293 | endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; | |
| 289 | 294 | ||
| 290 | index = startoff >> PAGE_CACHE_SHIFT; | 295 | index = startoff >> PAGE_CACHE_SHIFT; |
| 291 | end = endoff >> PAGE_CACHE_SHIFT; | 296 | end = endoff >> PAGE_CACHE_SHIFT; |
| @@ -403,144 +408,147 @@ out: | |||
| 403 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) | 408 | static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
| 404 | { | 409 | { |
| 405 | struct inode *inode = file->f_mapping->host; | 410 | struct inode *inode = file->f_mapping->host; |
| 406 | struct fiemap_extent_info fie; | 411 | struct ext4_map_blocks map; |
| 407 | struct fiemap_extent ext[2]; | 412 | struct extent_status es; |
| 408 | loff_t next; | 413 | ext4_lblk_t start, last, end; |
| 409 | int i, ret = 0; | 414 | loff_t dataoff, isize; |
| 415 | int blkbits; | ||
| 416 | int ret = 0; | ||
| 410 | 417 | ||
| 411 | mutex_lock(&inode->i_mutex); | 418 | mutex_lock(&inode->i_mutex); |
| 412 | if (offset >= inode->i_size) { | 419 | |
| 420 | isize = i_size_read(inode); | ||
| 421 | if (offset >= isize) { | ||
| 413 | mutex_unlock(&inode->i_mutex); | 422 | mutex_unlock(&inode->i_mutex); |
| 414 | return -ENXIO; | 423 | return -ENXIO; |
| 415 | } | 424 | } |
| 416 | fie.fi_flags = 0; | 425 | |
| 417 | fie.fi_extents_max = 2; | 426 | blkbits = inode->i_sb->s_blocksize_bits; |
| 418 | fie.fi_extents_start = (struct fiemap_extent __user *) &ext; | 427 | start = offset >> blkbits; |
| 419 | while (1) { | 428 | last = start; |
| 420 | mm_segment_t old_fs = get_fs(); | 429 | end = isize >> blkbits; |
| 421 | 430 | dataoff = offset; | |
| 422 | fie.fi_extents_mapped = 0; | 431 | |
| 423 | memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); | 432 | do { |
| 424 | 433 | map.m_lblk = last; | |
| 425 | set_fs(get_ds()); | 434 | map.m_len = end - last + 1; |
| 426 | ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); | 435 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
| 427 | set_fs(old_fs); | 436 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
| 428 | if (ret) | 437 | if (last != start) |
| 438 | dataoff = (loff_t)last << blkbits; | ||
| 429 | break; | 439 | break; |
| 440 | } | ||
| 430 | 441 | ||
| 431 | /* No extents found, EOF */ | 442 | /* |
| 432 | if (!fie.fi_extents_mapped) { | 443 | * If there is a delay extent at this offset, |
| 433 | ret = -ENXIO; | 444 | * it will be as a data. |
| 445 | */ | ||
| 446 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | ||
| 447 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | ||
| 448 | if (last != start) | ||
| 449 | dataoff = (loff_t)last << blkbits; | ||
| 434 | break; | 450 | break; |
| 435 | } | 451 | } |
| 436 | for (i = 0; i < fie.fi_extents_mapped; i++) { | ||
| 437 | next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); | ||
| 438 | 452 | ||
| 439 | if (offset < (loff_t)ext[i].fe_logical) | 453 | /* |
| 440 | offset = (loff_t)ext[i].fe_logical; | 454 | * If there is a unwritten extent at this offset, |
| 441 | /* | 455 | * it will be as a data or a hole according to page |
| 442 | * If extent is not unwritten, then it contains valid | 456 | * cache that has data or not. |
| 443 | * data, mapped or delayed. | 457 | */ |
| 444 | */ | 458 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
| 445 | if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) | 459 | int unwritten; |
| 446 | goto out; | 460 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, |
| 461 | &map, &dataoff); | ||
| 462 | if (unwritten) | ||
| 463 | break; | ||
| 464 | } | ||
| 447 | 465 | ||
| 448 | /* | 466 | last++; |
| 449 | * If there is a unwritten extent at this offset, | 467 | dataoff = (loff_t)last << blkbits; |
| 450 | * it will be as a data or a hole according to page | 468 | } while (last <= end); |
| 451 | * cache that has data or not. | ||
| 452 | */ | ||
| 453 | if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, | ||
| 454 | next, &offset)) | ||
| 455 | goto out; | ||
| 456 | 469 | ||
| 457 | if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { | ||
| 458 | ret = -ENXIO; | ||
| 459 | goto out; | ||
| 460 | } | ||
| 461 | offset = next; | ||
| 462 | } | ||
| 463 | } | ||
| 464 | if (offset > inode->i_size) | ||
| 465 | offset = inode->i_size; | ||
| 466 | out: | ||
| 467 | mutex_unlock(&inode->i_mutex); | 470 | mutex_unlock(&inode->i_mutex); |
| 468 | if (ret) | ||
| 469 | return ret; | ||
| 470 | 471 | ||
| 471 | return vfs_setpos(file, offset, maxsize); | 472 | if (dataoff > isize) |
| 473 | return -ENXIO; | ||
| 474 | |||
| 475 | return vfs_setpos(file, dataoff, maxsize); | ||
| 472 | } | 476 | } |
| 473 | 477 | ||
| 474 | /* | 478 | /* |
| 475 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE | 479 | * ext4_seek_hole() retrieves the offset for SEEK_HOLE. |
| 476 | */ | 480 | */ |
| 477 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) | 481 | static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
| 478 | { | 482 | { |
| 479 | struct inode *inode = file->f_mapping->host; | 483 | struct inode *inode = file->f_mapping->host; |
| 480 | struct fiemap_extent_info fie; | 484 | struct ext4_map_blocks map; |
| 481 | struct fiemap_extent ext[2]; | 485 | struct extent_status es; |
| 482 | loff_t next; | 486 | ext4_lblk_t start, last, end; |
| 483 | int i, ret = 0; | 487 | loff_t holeoff, isize; |
| 488 | int blkbits; | ||
| 489 | int ret = 0; | ||
| 484 | 490 | ||
| 485 | mutex_lock(&inode->i_mutex); | 491 | mutex_lock(&inode->i_mutex); |
| 486 | if (offset >= inode->i_size) { | 492 | |
| 493 | isize = i_size_read(inode); | ||
| 494 | if (offset >= isize) { | ||
| 487 | mutex_unlock(&inode->i_mutex); | 495 | mutex_unlock(&inode->i_mutex); |
| 488 | return -ENXIO; | 496 | return -ENXIO; |
| 489 | } | 497 | } |
| 490 | 498 | ||
| 491 | fie.fi_flags = 0; | 499 | blkbits = inode->i_sb->s_blocksize_bits; |
| 492 | fie.fi_extents_max = 2; | 500 | start = offset >> blkbits; |
| 493 | fie.fi_extents_start = (struct fiemap_extent __user *)&ext; | 501 | last = start; |
| 494 | while (1) { | 502 | end = isize >> blkbits; |
| 495 | mm_segment_t old_fs = get_fs(); | 503 | holeoff = offset; |
| 496 | |||
| 497 | fie.fi_extents_mapped = 0; | ||
| 498 | memset(ext, 0, sizeof(*ext)); | ||
| 499 | 504 | ||
| 500 | set_fs(get_ds()); | 505 | do { |
| 501 | ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); | 506 | map.m_lblk = last; |
| 502 | set_fs(old_fs); | 507 | map.m_len = end - last + 1; |
| 503 | if (ret) | 508 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
| 504 | break; | 509 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
| 510 | last += ret; | ||
| 511 | holeoff = (loff_t)last << blkbits; | ||
| 512 | continue; | ||
| 513 | } | ||
| 505 | 514 | ||
| 506 | /* No extents found */ | 515 | /* |
| 507 | if (!fie.fi_extents_mapped) | 516 | * If there is a delay extent at this offset, |
| 508 | break; | 517 | * we will skip this extent. |
| 518 | */ | ||
| 519 | ext4_es_find_delayed_extent_range(inode, last, last, &es); | ||
| 520 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { | ||
| 521 | last = es.es_lblk + es.es_len; | ||
| 522 | holeoff = (loff_t)last << blkbits; | ||
| 523 | continue; | ||
| 524 | } | ||
| 509 | 525 | ||
| 510 | for (i = 0; i < fie.fi_extents_mapped; i++) { | 526 | /* |
| 511 | next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); | 527 | * If there is a unwritten extent at this offset, |
| 512 | /* | 528 | * it will be as a data or a hole according to page |
| 513 | * If extent is not unwritten, then it contains valid | 529 | * cache that has data or not. |
| 514 | * data, mapped or delayed. | 530 | */ |
| 515 | */ | 531 | if (map.m_flags & EXT4_MAP_UNWRITTEN) { |
| 516 | if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { | 532 | int unwritten; |
| 517 | if (offset < (loff_t)ext[i].fe_logical) | 533 | unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, |
| 518 | goto out; | 534 | &map, &holeoff); |
| 519 | offset = next; | 535 | if (!unwritten) { |
| 536 | last += ret; | ||
| 537 | holeoff = (loff_t)last << blkbits; | ||
| 520 | continue; | 538 | continue; |
| 521 | } | 539 | } |
| 522 | /* | ||
| 523 | * If there is a unwritten extent at this offset, | ||
| 524 | * it will be as a data or a hole according to page | ||
| 525 | * cache that has data or not. | ||
| 526 | */ | ||
| 527 | if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, | ||
| 528 | next, &offset)) | ||
| 529 | goto out; | ||
| 530 | |||
| 531 | offset = next; | ||
| 532 | if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) | ||
| 533 | goto out; | ||
| 534 | } | 540 | } |
| 535 | } | 541 | |
| 536 | if (offset > inode->i_size) | 542 | /* find a hole */ |
| 537 | offset = inode->i_size; | 543 | break; |
| 538 | out: | 544 | } while (last <= end); |
| 545 | |||
| 539 | mutex_unlock(&inode->i_mutex); | 546 | mutex_unlock(&inode->i_mutex); |
| 540 | if (ret) | ||
| 541 | return ret; | ||
| 542 | 547 | ||
| 543 | return vfs_setpos(file, offset, maxsize); | 548 | if (holeoff > isize) |
| 549 | holeoff = isize; | ||
| 550 | |||
| 551 | return vfs_setpos(file, holeoff, maxsize); | ||
| 544 | } | 552 | } |
| 545 | 553 | ||
| 546 | /* | 554 | /* |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bf76f405a5f9..8a8ec6293b19 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
| @@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb) | |||
| 24 | return -EPERM; | 24 | return -EPERM; |
| 25 | 25 | ||
| 26 | /* | 26 | /* |
| 27 | * If we are not using the primary superblock/GDT copy don't resize, | ||
| 28 | * because the user tools have no way of handling this. Probably a | ||
| 29 | * bad time to do it anyways. | ||
| 30 | */ | ||
| 31 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | ||
| 32 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | ||
| 33 | ext4_warning(sb, "won't resize using backup superblock at %llu", | ||
| 34 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | ||
| 35 | return -EPERM; | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | ||
| 27 | * We are not allowed to do online-resizing on a filesystem mounted | 39 | * We are not allowed to do online-resizing on a filesystem mounted |
| 28 | * with error, because it can destroy the filesystem easily. | 40 | * with error, because it can destroy the filesystem easily. |
| 29 | */ | 41 | */ |
| @@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
| 758 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", | 770 | "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", |
| 759 | gdb_num); | 771 | gdb_num); |
| 760 | 772 | ||
| 761 | /* | ||
| 762 | * If we are not using the primary superblock/GDT copy don't resize, | ||
| 763 | * because the user tools have no way of handling this. Probably a | ||
| 764 | * bad time to do it anyways. | ||
| 765 | */ | ||
| 766 | if (EXT4_SB(sb)->s_sbh->b_blocknr != | ||
| 767 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { | ||
| 768 | ext4_warning(sb, "won't resize using backup superblock at %llu", | ||
| 769 | (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); | ||
| 770 | return -EPERM; | ||
| 771 | } | ||
| 772 | |||
| 773 | gdb_bh = sb_bread(sb, gdblock); | 773 | gdb_bh = sb_bread(sb, gdblock); |
| 774 | if (!gdb_bh) | 774 | if (!gdb_bh) |
| 775 | return -EIO; | 775 | return -EIO; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 43c92b1685cb..74c5f53595fb 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3482 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 3482 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
| 3483 | EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && | 3483 | EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && |
| 3484 | EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) | 3484 | EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) |
| 3485 | ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " | 3485 | ext4_warning(sb, "metadata_csum and uninit_bg are " |
| 3486 | "redundant flags; please run fsck."); | 3486 | "redundant flags; please run fsck."); |
| 3487 | 3487 | ||
| 3488 | /* Check for a known checksum algorithm */ | 3488 | /* Check for a known checksum algorithm */ |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 99d440a4a6ba..ee85cd4e136a 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
| @@ -740,14 +740,15 @@ static int __init fcntl_init(void) | |||
| 740 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY | 740 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY |
| 741 | * is defined as O_NONBLOCK on some platforms and not on others. | 741 | * is defined as O_NONBLOCK on some platforms and not on others. |
| 742 | */ | 742 | */ |
| 743 | BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | 743 | BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( |
| 744 | O_RDONLY | O_WRONLY | O_RDWR | | 744 | O_RDONLY | O_WRONLY | O_RDWR | |
| 745 | O_CREAT | O_EXCL | O_NOCTTY | | 745 | O_CREAT | O_EXCL | O_NOCTTY | |
| 746 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ | 746 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
| 747 | __O_SYNC | O_DSYNC | FASYNC | | 747 | __O_SYNC | O_DSYNC | FASYNC | |
| 748 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 748 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
| 749 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 749 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
| 750 | __FMODE_EXEC | O_PATH | __O_TMPFILE | 750 | __FMODE_EXEC | O_PATH | __O_TMPFILE | |
| 751 | __FMODE_NONOTIFY | ||
| 751 | )); | 752 | )); |
| 752 | 753 | ||
| 753 | fasync_cache = kmem_cache_create("fasync_cache", | 754 | fasync_cache = kmem_cache_create("fasync_cache", |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3550a9c87616..c06a1ba80d73 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, | |||
| 3897 | status = nfs4_setlease(dp); | 3897 | status = nfs4_setlease(dp); |
| 3898 | goto out; | 3898 | goto out; |
| 3899 | } | 3899 | } |
| 3900 | atomic_inc(&fp->fi_delegees); | ||
| 3901 | if (fp->fi_had_conflict) { | 3900 | if (fp->fi_had_conflict) { |
| 3902 | status = -EAGAIN; | 3901 | status = -EAGAIN; |
| 3903 | goto out_unlock; | 3902 | goto out_unlock; |
| 3904 | } | 3903 | } |
| 3904 | atomic_inc(&fp->fi_delegees); | ||
| 3905 | hash_delegation_locked(dp, fp); | 3905 | hash_delegation_locked(dp, fp); |
| 3906 | status = 0; | 3906 | status = 0; |
| 3907 | out_unlock: | 3907 | out_unlock: |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 79b5af5e6a7b..cecd875653e4 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
| @@ -2023,11 +2023,8 @@ leave: | |||
| 2023 | dlm_lockres_drop_inflight_ref(dlm, res); | 2023 | dlm_lockres_drop_inflight_ref(dlm, res); |
| 2024 | spin_unlock(&res->spinlock); | 2024 | spin_unlock(&res->spinlock); |
| 2025 | 2025 | ||
| 2026 | if (ret < 0) { | 2026 | if (ret < 0) |
| 2027 | mlog_errno(ret); | 2027 | mlog_errno(ret); |
| 2028 | if (newlock) | ||
| 2029 | dlm_lock_put(newlock); | ||
| 2030 | } | ||
| 2031 | 2028 | ||
| 2032 | return ret; | 2029 | return ret; |
| 2033 | } | 2030 | } |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b931e04e3388..914c121ec890 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, | |||
| 94 | struct inode *inode, | 94 | struct inode *inode, |
| 95 | const char *symname); | 95 | const char *symname); |
| 96 | 96 | ||
| 97 | static int ocfs2_double_lock(struct ocfs2_super *osb, | ||
| 98 | struct buffer_head **bh1, | ||
| 99 | struct inode *inode1, | ||
| 100 | struct buffer_head **bh2, | ||
| 101 | struct inode *inode2, | ||
| 102 | int rename); | ||
| 103 | |||
| 104 | static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); | ||
| 97 | /* An orphan dir name is an 8 byte value, printed as a hex string */ | 105 | /* An orphan dir name is an 8 byte value, printed as a hex string */ |
| 98 | #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) | 106 | #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) |
| 99 | 107 | ||
| @@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
| 678 | { | 686 | { |
| 679 | handle_t *handle; | 687 | handle_t *handle; |
| 680 | struct inode *inode = old_dentry->d_inode; | 688 | struct inode *inode = old_dentry->d_inode; |
| 689 | struct inode *old_dir = old_dentry->d_parent->d_inode; | ||
| 681 | int err; | 690 | int err; |
| 682 | struct buffer_head *fe_bh = NULL; | 691 | struct buffer_head *fe_bh = NULL; |
| 692 | struct buffer_head *old_dir_bh = NULL; | ||
| 683 | struct buffer_head *parent_fe_bh = NULL; | 693 | struct buffer_head *parent_fe_bh = NULL; |
| 684 | struct ocfs2_dinode *fe = NULL; | 694 | struct ocfs2_dinode *fe = NULL; |
| 685 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); | 695 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
| @@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry, | |||
| 696 | 706 | ||
| 697 | dquot_initialize(dir); | 707 | dquot_initialize(dir); |
| 698 | 708 | ||
| 699 | err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); | 709 | err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
| 710 | &parent_fe_bh, dir, 0); | ||
| 700 | if (err < 0) { | 711 | if (err < 0) { |
| 701 | if (err != -ENOENT) | 712 | if (err != -ENOENT) |
| 702 | mlog_errno(err); | 713 | mlog_errno(err); |
| 703 | return err; | 714 | return err; |
| 704 | } | 715 | } |
| 705 | 716 | ||
| 717 | /* make sure both dirs have bhs | ||
| 718 | * get an extra ref on old_dir_bh if old==new */ | ||
| 719 | if (!parent_fe_bh) { | ||
| 720 | if (old_dir_bh) { | ||
| 721 | parent_fe_bh = old_dir_bh; | ||
| 722 | get_bh(parent_fe_bh); | ||
| 723 | } else { | ||
| 724 | mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); | ||
| 725 | err = -EIO; | ||
| 726 | goto out; | ||
| 727 | } | ||
| 728 | } | ||
| 729 | |||
| 706 | if (!dir->i_nlink) { | 730 | if (!dir->i_nlink) { |
| 707 | err = -ENOENT; | 731 | err = -ENOENT; |
| 708 | goto out; | 732 | goto out; |
| 709 | } | 733 | } |
| 710 | 734 | ||
| 711 | err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, | 735 | err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, |
| 712 | old_dentry->d_name.len, &old_de_ino); | 736 | old_dentry->d_name.len, &old_de_ino); |
| 713 | if (err) { | 737 | if (err) { |
| 714 | err = -ENOENT; | 738 | err = -ENOENT; |
| @@ -801,10 +825,11 @@ out_unlock_inode: | |||
| 801 | ocfs2_inode_unlock(inode, 1); | 825 | ocfs2_inode_unlock(inode, 1); |
| 802 | 826 | ||
| 803 | out: | 827 | out: |
| 804 | ocfs2_inode_unlock(dir, 1); | 828 | ocfs2_double_unlock(old_dir, dir); |
| 805 | 829 | ||
| 806 | brelse(fe_bh); | 830 | brelse(fe_bh); |
| 807 | brelse(parent_fe_bh); | 831 | brelse(parent_fe_bh); |
| 832 | brelse(old_dir_bh); | ||
| 808 | 833 | ||
| 809 | ocfs2_free_dir_lookup_result(&lookup); | 834 | ocfs2_free_dir_lookup_result(&lookup); |
| 810 | 835 | ||
| @@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb, | |||
| 1072 | } | 1097 | } |
| 1073 | 1098 | ||
| 1074 | /* | 1099 | /* |
| 1075 | * The only place this should be used is rename! | 1100 | * The only place this should be used is rename and link! |
| 1076 | * if they have the same id, then the 1st one is the only one locked. | 1101 | * if they have the same id, then the 1st one is the only one locked. |
| 1077 | */ | 1102 | */ |
| 1078 | static int ocfs2_double_lock(struct ocfs2_super *osb, | 1103 | static int ocfs2_double_lock(struct ocfs2_super *osb, |
| 1079 | struct buffer_head **bh1, | 1104 | struct buffer_head **bh1, |
| 1080 | struct inode *inode1, | 1105 | struct inode *inode1, |
| 1081 | struct buffer_head **bh2, | 1106 | struct buffer_head **bh2, |
| 1082 | struct inode *inode2) | 1107 | struct inode *inode2, |
| 1108 | int rename) | ||
| 1083 | { | 1109 | { |
| 1084 | int status; | 1110 | int status; |
| 1085 | int inode1_is_ancestor, inode2_is_ancestor; | 1111 | int inode1_is_ancestor, inode2_is_ancestor; |
| @@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
| 1127 | } | 1153 | } |
| 1128 | /* lock id2 */ | 1154 | /* lock id2 */ |
| 1129 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, | 1155 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, |
| 1130 | OI_LS_RENAME1); | 1156 | rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); |
| 1131 | if (status < 0) { | 1157 | if (status < 0) { |
| 1132 | if (status != -ENOENT) | 1158 | if (status != -ENOENT) |
| 1133 | mlog_errno(status); | 1159 | mlog_errno(status); |
| @@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
| 1136 | } | 1162 | } |
| 1137 | 1163 | ||
| 1138 | /* lock id1 */ | 1164 | /* lock id1 */ |
| 1139 | status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); | 1165 | status = ocfs2_inode_lock_nested(inode1, bh1, 1, |
| 1166 | rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); | ||
| 1140 | if (status < 0) { | 1167 | if (status < 0) { |
| 1141 | /* | 1168 | /* |
| 1142 | * An error return must mean that no cluster locks | 1169 | * An error return must mean that no cluster locks |
| @@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir, | |||
| 1252 | 1279 | ||
| 1253 | /* if old and new are the same, this'll just do one lock. */ | 1280 | /* if old and new are the same, this'll just do one lock. */ |
| 1254 | status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, | 1281 | status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
| 1255 | &new_dir_bh, new_dir); | 1282 | &new_dir_bh, new_dir, 1); |
| 1256 | if (status < 0) { | 1283 | if (status < 0) { |
| 1257 | mlog_errno(status); | 1284 | mlog_errno(status); |
| 1258 | goto bail; | 1285 | goto bail; |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 3ca9b751f122..b95dc32a6e6b 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
| @@ -196,8 +196,8 @@ struct acpi_processor_flags { | |||
| 196 | struct acpi_processor { | 196 | struct acpi_processor { |
| 197 | acpi_handle handle; | 197 | acpi_handle handle; |
| 198 | u32 acpi_id; | 198 | u32 acpi_id; |
| 199 | u32 apic_id; | 199 | u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ |
| 200 | u32 id; | 200 | u32 id; /* CPU logical ID allocated by OS */ |
| 201 | u32 pblk; | 201 | u32 pblk; |
| 202 | int performance_platform_limit; | 202 | int performance_platform_limit; |
| 203 | int throttling_platform_limit; | 203 | int throttling_platform_limit; |
| @@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) | |||
| 310 | #endif /* CONFIG_CPU_FREQ */ | 310 | #endif /* CONFIG_CPU_FREQ */ |
| 311 | 311 | ||
| 312 | /* in processor_core.c */ | 312 | /* in processor_core.c */ |
| 313 | int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); | 313 | int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); |
| 314 | int acpi_map_cpuid(int apic_id, u32 acpi_id); | 314 | int acpi_map_cpuid(int phys_id, u32 acpi_id); |
| 315 | int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); | 315 | int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); |
| 316 | 316 | ||
| 317 | /* in processor_pdc.c */ | 317 | /* in processor_pdc.c */ |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 856d381b1d5b..d459cd17b477 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void); | |||
| 147 | 147 | ||
| 148 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 148 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
| 149 | /* Arch dependent functions for cpu hotplug support */ | 149 | /* Arch dependent functions for cpu hotplug support */ |
| 150 | int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); | 150 | int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); |
| 151 | int acpi_unmap_lsapic(int cpu); | 151 | int acpi_unmap_cpu(int cpu); |
| 152 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 152 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
| 153 | 153 | ||
| 154 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); | 154 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5d86416d35f2..61b19c46bdb3 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -87,8 +87,8 @@ struct ceph_osd_req_op { | |||
| 87 | struct ceph_osd_data osd_data; | 87 | struct ceph_osd_data osd_data; |
| 88 | } extent; | 88 | } extent; |
| 89 | struct { | 89 | struct { |
| 90 | __le32 name_len; | 90 | u32 name_len; |
| 91 | __le32 value_len; | 91 | u32 value_len; |
| 92 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | 92 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ |
| 93 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | 93 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ |
| 94 | struct ceph_osd_data osd_data; | 94 | struct ceph_osd_data osd_data; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index f90c0282c114..42efe13077b6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 135 | #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) | 135 | #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) |
| 136 | 136 | ||
| 137 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 137 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
| 138 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | 138 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) |
| 139 | 139 | ||
| 140 | /* | 140 | /* |
| 141 | * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector | 141 | * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector |
diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 290db1269c4c..75ae2e2631fc 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h | |||
| @@ -13,11 +13,54 @@ | |||
| 13 | * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> | 13 | * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | /* Shifted versions of the command enable bits are be used if the command | ||
| 17 | * has no arguments (see kdb_check_flags). This allows commands, such as | ||
| 18 | * go, to have different permissions depending upon whether it is called | ||
| 19 | * with an argument. | ||
| 20 | */ | ||
| 21 | #define KDB_ENABLE_NO_ARGS_SHIFT 10 | ||
| 22 | |||
| 16 | typedef enum { | 23 | typedef enum { |
| 17 | KDB_REPEAT_NONE = 0, /* Do not repeat this command */ | 24 | KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ |
| 18 | KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ | 25 | KDB_ENABLE_MEM_READ = (1 << 1), |
| 19 | KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ | 26 | KDB_ENABLE_MEM_WRITE = (1 << 2), |
| 20 | } kdb_repeat_t; | 27 | KDB_ENABLE_REG_READ = (1 << 3), |
| 28 | KDB_ENABLE_REG_WRITE = (1 << 4), | ||
| 29 | KDB_ENABLE_INSPECT = (1 << 5), | ||
| 30 | KDB_ENABLE_FLOW_CTRL = (1 << 6), | ||
| 31 | KDB_ENABLE_SIGNAL = (1 << 7), | ||
| 32 | KDB_ENABLE_REBOOT = (1 << 8), | ||
| 33 | /* User exposed values stop here, all remaining flags are | ||
| 34 | * exclusively used to describe a commands behaviour. | ||
| 35 | */ | ||
| 36 | |||
| 37 | KDB_ENABLE_ALWAYS_SAFE = (1 << 9), | ||
| 38 | KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, | ||
| 39 | |||
| 40 | KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 41 | KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ | ||
| 42 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 43 | KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE | ||
| 44 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 45 | KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ | ||
| 46 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 47 | KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE | ||
| 48 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 49 | KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT | ||
| 50 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 51 | KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL | ||
| 52 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 53 | KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL | ||
| 54 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 55 | KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT | ||
| 56 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 57 | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE | ||
| 58 | << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 59 | KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, | ||
| 60 | |||
| 61 | KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ | ||
| 62 | KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ | ||
| 63 | } kdb_cmdflags_t; | ||
| 21 | 64 | ||
| 22 | typedef int (*kdb_func_t)(int, const char **); | 65 | typedef int (*kdb_func_t)(int, const char **); |
| 23 | 66 | ||
| @@ -62,6 +105,7 @@ extern atomic_t kdb_event; | |||
| 62 | #define KDB_BADLENGTH (-19) | 105 | #define KDB_BADLENGTH (-19) |
| 63 | #define KDB_NOBP (-20) | 106 | #define KDB_NOBP (-20) |
| 64 | #define KDB_BADADDR (-21) | 107 | #define KDB_BADADDR (-21) |
| 108 | #define KDB_NOPERM (-22) | ||
| 65 | 109 | ||
| 66 | /* | 110 | /* |
| 67 | * kdb_diemsg | 111 | * kdb_diemsg |
| @@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) | |||
| 146 | 190 | ||
| 147 | /* Dynamic kdb shell command registration */ | 191 | /* Dynamic kdb shell command registration */ |
| 148 | extern int kdb_register(char *, kdb_func_t, char *, char *, short); | 192 | extern int kdb_register(char *, kdb_func_t, char *, char *, short); |
| 149 | extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, | 193 | extern int kdb_register_flags(char *, kdb_func_t, char *, char *, |
| 150 | short, kdb_repeat_t); | 194 | short, kdb_cmdflags_t); |
| 151 | extern int kdb_unregister(char *); | 195 | extern int kdb_unregister(char *); |
| 152 | #else /* ! CONFIG_KGDB_KDB */ | 196 | #else /* ! CONFIG_KGDB_KDB */ |
| 153 | static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } | 197 | static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } |
| 154 | static inline void kdb_init(int level) {} | 198 | static inline void kdb_init(int level) {} |
| 155 | static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, | 199 | static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, |
| 156 | char *help, short minlen) { return 0; } | 200 | char *help, short minlen) { return 0; } |
| 157 | static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, | 201 | static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, |
| 158 | char *help, short minlen, | 202 | char *help, short minlen, |
| 159 | kdb_repeat_t repeat) { return 0; } | 203 | kdb_cmdflags_t flags) { return 0; } |
| 160 | static inline int kdb_unregister(char *cmd) { return 0; } | 204 | static inline int kdb_unregister(char *cmd) { return 0; } |
| 161 | #endif /* CONFIG_KGDB_KDB */ | 205 | #endif /* CONFIG_KGDB_KDB */ |
| 162 | enum { | 206 | enum { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f80d0194c9bc..80fc92a49649 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma, | |||
| 1952 | #if VM_GROWSUP | 1952 | #if VM_GROWSUP |
| 1953 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1953 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
| 1954 | #else | 1954 | #else |
| 1955 | #define expand_upwards(vma, address) do { } while (0) | 1955 | #define expand_upwards(vma, address) (0) |
| 1956 | #endif | 1956 | #endif |
| 1957 | 1957 | ||
| 1958 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ | 1958 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c0c2bce6b0b7..d9d7e7e56352 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -37,6 +37,16 @@ struct anon_vma { | |||
| 37 | atomic_t refcount; | 37 | atomic_t refcount; |
| 38 | 38 | ||
| 39 | /* | 39 | /* |
| 40 | * Count of child anon_vmas and VMAs which points to this anon_vma. | ||
| 41 | * | ||
| 42 | * This counter is used for making decision about reusing anon_vma | ||
| 43 | * instead of forking new one. See comments in function anon_vma_clone. | ||
| 44 | */ | ||
| 45 | unsigned degree; | ||
| 46 | |||
| 47 | struct anon_vma *parent; /* Parent of this anon_vma */ | ||
| 48 | |||
| 49 | /* | ||
| 40 | * NOTE: the LSB of the rb_root.rb_node is set by | 50 | * NOTE: the LSB of the rb_root.rb_node is set by |
| 41 | * mm_take_all_locks() _after_ taking the above lock. So the | 51 | * mm_take_all_locks() _after_ taking the above lock. So the |
| 42 | * rb_root must only be read/written after taking the above lock | 52 | * rb_root must only be read/written after taking the above lock |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a219be961c0a..00048339c23e 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping, | |||
| 177 | struct writeback_control *wbc, writepage_t writepage, | 177 | struct writeback_control *wbc, writepage_t writepage, |
| 178 | void *data); | 178 | void *data); |
| 179 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 179 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
| 180 | void set_page_dirty_balance(struct page *page); | ||
| 181 | void writeback_set_ratelimit(void); | 180 | void writeback_set_ratelimit(void); |
| 182 | void tag_pages_for_writeback(struct address_space *mapping, | 181 | void tag_pages_for_writeback(struct address_space *mapping, |
| 183 | pgoff_t start, pgoff_t end); | 182 | pgoff_t start, pgoff_t end); |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 58d719ddaa60..29c7be8808d5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); | |||
| 1270 | * | 1270 | * |
| 1271 | * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the | 1271 | * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the |
| 1272 | * driver to indicate that it requires IV generation for this | 1272 | * driver to indicate that it requires IV generation for this |
| 1273 | * particular key. Setting this flag does not necessarily mean that SKBs | 1273 | * particular key. |
| 1274 | * will have sufficient tailroom for ICV or MIC. | ||
| 1275 | * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by | 1274 | * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by |
| 1276 | * the driver for a TKIP key if it requires Michael MIC | 1275 | * the driver for a TKIP key if it requires Michael MIC |
| 1277 | * generation in software. | 1276 | * generation in software. |
| @@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); | |||
| 1283 | * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver | 1282 | * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver |
| 1284 | * if space should be prepared for the IV, but the IV | 1283 | * if space should be prepared for the IV, but the IV |
| 1285 | * itself should not be generated. Do not set together with | 1284 | * itself should not be generated. Do not set together with |
| 1286 | * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does | 1285 | * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. |
| 1287 | * not necessarily mean that SKBs will have sufficient tailroom for ICV or | ||
| 1288 | * MIC. | ||
| 1289 | * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received | 1286 | * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received |
| 1290 | * management frames. The flag can help drivers that have a hardware | 1287 | * management frames. The flag can help drivers that have a hardware |
| 1291 | * crypto implementation that doesn't deal with management frames | 1288 | * crypto implementation that doesn't deal with management frames |
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index 7543b3e51331..e063effe0cc1 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | /* | 6 | /* |
| 7 | * FMODE_EXEC is 0x20 | 7 | * FMODE_EXEC is 0x20 |
| 8 | * FMODE_NONOTIFY is 0x1000000 | 8 | * FMODE_NONOTIFY is 0x4000000 |
| 9 | * These cannot be used by userspace O_* until internal and external open | 9 | * These cannot be used by userspace O_* until internal and external open |
| 10 | * flags are split. | 10 | * flags are split. |
| 11 | * -Eric Paris | 11 | * -Eric Paris |
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 1adf62b39b96..07ce18ca71e0 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | * version 2. This program is licensed "as is" without any warranty of any | 27 | * version 2. This program is licensed "as is" without any warranty of any |
| 28 | * kind, whether express or implied. | 28 | * kind, whether express or implied. |
| 29 | */ | 29 | */ |
| 30 | |||
| 31 | #define pr_fmt(fmt) "KGDB: " fmt | ||
| 32 | |||
| 30 | #include <linux/pid_namespace.h> | 33 | #include <linux/pid_namespace.h> |
| 31 | #include <linux/clocksource.h> | 34 | #include <linux/clocksource.h> |
| 32 | #include <linux/serial_core.h> | 35 | #include <linux/serial_core.h> |
| @@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr) | |||
| 196 | return err; | 199 | return err; |
| 197 | err = kgdb_arch_remove_breakpoint(&tmp); | 200 | err = kgdb_arch_remove_breakpoint(&tmp); |
| 198 | if (err) | 201 | if (err) |
| 199 | printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " | 202 | pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", |
| 200 | "memory destroyed at: %lx", addr); | 203 | addr); |
| 201 | return err; | 204 | return err; |
| 202 | } | 205 | } |
| 203 | 206 | ||
| @@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void) | |||
| 256 | error = kgdb_arch_set_breakpoint(&kgdb_break[i]); | 259 | error = kgdb_arch_set_breakpoint(&kgdb_break[i]); |
| 257 | if (error) { | 260 | if (error) { |
| 258 | ret = error; | 261 | ret = error; |
| 259 | printk(KERN_INFO "KGDB: BP install failed: %lx", | 262 | pr_info("BP install failed: %lx\n", |
| 260 | kgdb_break[i].bpt_addr); | 263 | kgdb_break[i].bpt_addr); |
| 261 | continue; | 264 | continue; |
| 262 | } | 265 | } |
| 263 | 266 | ||
| @@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void) | |||
| 319 | continue; | 322 | continue; |
| 320 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); | 323 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); |
| 321 | if (error) { | 324 | if (error) { |
| 322 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", | 325 | pr_info("BP remove failed: %lx\n", |
| 323 | kgdb_break[i].bpt_addr); | 326 | kgdb_break[i].bpt_addr); |
| 324 | ret = error; | 327 | ret = error; |
| 325 | } | 328 | } |
| 326 | 329 | ||
| @@ -367,7 +370,7 @@ int dbg_remove_all_break(void) | |||
| 367 | goto setundefined; | 370 | goto setundefined; |
| 368 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); | 371 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); |
| 369 | if (error) | 372 | if (error) |
| 370 | printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", | 373 | pr_err("breakpoint remove failed: %lx\n", |
| 371 | kgdb_break[i].bpt_addr); | 374 | kgdb_break[i].bpt_addr); |
| 372 | setundefined: | 375 | setundefined: |
| 373 | kgdb_break[i].state = BP_UNDEFINED; | 376 | kgdb_break[i].state = BP_UNDEFINED; |
| @@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait) | |||
| 400 | if (print_wait) { | 403 | if (print_wait) { |
| 401 | #ifdef CONFIG_KGDB_KDB | 404 | #ifdef CONFIG_KGDB_KDB |
| 402 | if (!dbg_kdb_mode) | 405 | if (!dbg_kdb_mode) |
| 403 | printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); | 406 | pr_crit("waiting... or $3#33 for KDB\n"); |
| 404 | #else | 407 | #else |
| 405 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); | 408 | pr_crit("Waiting for remote debugger\n"); |
| 406 | #endif | 409 | #endif |
| 407 | } | 410 | } |
| 408 | return 1; | 411 | return 1; |
| @@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
| 430 | exception_level = 0; | 433 | exception_level = 0; |
| 431 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | 434 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); |
| 432 | dbg_activate_sw_breakpoints(); | 435 | dbg_activate_sw_breakpoints(); |
| 433 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", | 436 | pr_crit("re-enter error: breakpoint removed %lx\n", addr); |
| 434 | addr); | ||
| 435 | WARN_ON_ONCE(1); | 437 | WARN_ON_ONCE(1); |
| 436 | 438 | ||
| 437 | return 1; | 439 | return 1; |
| @@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
| 444 | panic("Recursive entry to debugger"); | 446 | panic("Recursive entry to debugger"); |
| 445 | } | 447 | } |
| 446 | 448 | ||
| 447 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); | 449 | pr_crit("re-enter exception: ALL breakpoints killed\n"); |
| 448 | #ifdef CONFIG_KGDB_KDB | 450 | #ifdef CONFIG_KGDB_KDB |
| 449 | /* Allow kdb to debug itself one level */ | 451 | /* Allow kdb to debug itself one level */ |
| 450 | return 0; | 452 | return 0; |
| @@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, | |||
| 471 | int cpu; | 473 | int cpu; |
| 472 | int trace_on = 0; | 474 | int trace_on = 0; |
| 473 | int online_cpus = num_online_cpus(); | 475 | int online_cpus = num_online_cpus(); |
| 476 | u64 time_left; | ||
| 474 | 477 | ||
| 475 | kgdb_info[ks->cpu].enter_kgdb++; | 478 | kgdb_info[ks->cpu].enter_kgdb++; |
| 476 | kgdb_info[ks->cpu].exception_state |= exception_state; | 479 | kgdb_info[ks->cpu].exception_state |= exception_state; |
| @@ -595,9 +598,13 @@ return_normal: | |||
| 595 | /* | 598 | /* |
| 596 | * Wait for the other CPUs to be notified and be waiting for us: | 599 | * Wait for the other CPUs to be notified and be waiting for us: |
| 597 | */ | 600 | */ |
| 598 | while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + | 601 | time_left = loops_per_jiffy * HZ; |
| 599 | atomic_read(&slaves_in_kgdb)) != online_cpus) | 602 | while (kgdb_do_roundup && --time_left && |
| 603 | (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != | ||
| 604 | online_cpus) | ||
| 600 | cpu_relax(); | 605 | cpu_relax(); |
| 606 | if (!time_left) | ||
| 607 | pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); | ||
| 601 | 608 | ||
| 602 | /* | 609 | /* |
| 603 | * At this point the primary processor is completely | 610 | * At this point the primary processor is completely |
| @@ -795,15 +802,15 @@ static struct console kgdbcons = { | |||
| 795 | static void sysrq_handle_dbg(int key) | 802 | static void sysrq_handle_dbg(int key) |
| 796 | { | 803 | { |
| 797 | if (!dbg_io_ops) { | 804 | if (!dbg_io_ops) { |
| 798 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); | 805 | pr_crit("ERROR: No KGDB I/O module available\n"); |
| 799 | return; | 806 | return; |
| 800 | } | 807 | } |
| 801 | if (!kgdb_connected) { | 808 | if (!kgdb_connected) { |
| 802 | #ifdef CONFIG_KGDB_KDB | 809 | #ifdef CONFIG_KGDB_KDB |
| 803 | if (!dbg_kdb_mode) | 810 | if (!dbg_kdb_mode) |
| 804 | printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); | 811 | pr_crit("KGDB or $3#33 for KDB\n"); |
| 805 | #else | 812 | #else |
| 806 | printk(KERN_CRIT "Entering KGDB\n"); | 813 | pr_crit("Entering KGDB\n"); |
| 807 | #endif | 814 | #endif |
| 808 | } | 815 | } |
| 809 | 816 | ||
| @@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void) | |||
| 945 | { | 952 | { |
| 946 | kgdb_break_asap = 0; | 953 | kgdb_break_asap = 0; |
| 947 | 954 | ||
| 948 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); | 955 | pr_crit("Waiting for connection from remote gdb...\n"); |
| 949 | kgdb_breakpoint(); | 956 | kgdb_breakpoint(); |
| 950 | } | 957 | } |
| 951 | 958 | ||
| @@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) | |||
| 964 | if (dbg_io_ops) { | 971 | if (dbg_io_ops) { |
| 965 | spin_unlock(&kgdb_registration_lock); | 972 | spin_unlock(&kgdb_registration_lock); |
| 966 | 973 | ||
| 967 | printk(KERN_ERR "kgdb: Another I/O driver is already " | 974 | pr_err("Another I/O driver is already registered with KGDB\n"); |
| 968 | "registered with KGDB.\n"); | ||
| 969 | return -EBUSY; | 975 | return -EBUSY; |
| 970 | } | 976 | } |
| 971 | 977 | ||
| @@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) | |||
| 981 | 987 | ||
| 982 | spin_unlock(&kgdb_registration_lock); | 988 | spin_unlock(&kgdb_registration_lock); |
| 983 | 989 | ||
| 984 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", | 990 | pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); |
| 985 | new_dbg_io_ops->name); | ||
| 986 | 991 | ||
| 987 | /* Arm KGDB now. */ | 992 | /* Arm KGDB now. */ |
| 988 | kgdb_register_callbacks(); | 993 | kgdb_register_callbacks(); |
| @@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) | |||
| 1017 | 1022 | ||
| 1018 | spin_unlock(&kgdb_registration_lock); | 1023 | spin_unlock(&kgdb_registration_lock); |
| 1019 | 1024 | ||
| 1020 | printk(KERN_INFO | 1025 | pr_info("Unregistered I/O driver %s, debugger disabled\n", |
| 1021 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", | ||
| 1022 | old_dbg_io_ops->name); | 1026 | old_dbg_io_ops->name); |
| 1023 | } | 1027 | } |
| 1024 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | 1028 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); |
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index b20d544f20c2..e1dbf4a2c69e 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
| @@ -531,22 +531,29 @@ void __init kdb_initbptab(void) | |||
| 531 | for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) | 531 | for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) |
| 532 | bp->bp_free = 1; | 532 | bp->bp_free = 1; |
| 533 | 533 | ||
| 534 | kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", | 534 | kdb_register_flags("bp", kdb_bp, "[<vaddr>]", |
| 535 | "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); | 535 | "Set/Display breakpoints", 0, |
| 536 | kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", | 536 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); |
| 537 | "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); | 537 | kdb_register_flags("bl", kdb_bp, "[<vaddr>]", |
| 538 | "Display breakpoints", 0, | ||
| 539 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); | ||
| 538 | if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) | 540 | if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) |
| 539 | kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", | 541 | kdb_register_flags("bph", kdb_bp, "[<vaddr>]", |
| 540 | "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); | 542 | "[datar [length]|dataw [length]] Set hw brk", 0, |
| 541 | kdb_register_repeat("bc", kdb_bc, "<bpnum>", | 543 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); |
| 542 | "Clear Breakpoint", 0, KDB_REPEAT_NONE); | 544 | kdb_register_flags("bc", kdb_bc, "<bpnum>", |
| 543 | kdb_register_repeat("be", kdb_bc, "<bpnum>", | 545 | "Clear Breakpoint", 0, |
| 544 | "Enable Breakpoint", 0, KDB_REPEAT_NONE); | 546 | KDB_ENABLE_FLOW_CTRL); |
| 545 | kdb_register_repeat("bd", kdb_bc, "<bpnum>", | 547 | kdb_register_flags("be", kdb_bc, "<bpnum>", |
| 546 | "Disable Breakpoint", 0, KDB_REPEAT_NONE); | 548 | "Enable Breakpoint", 0, |
| 547 | 549 | KDB_ENABLE_FLOW_CTRL); | |
| 548 | kdb_register_repeat("ss", kdb_ss, "", | 550 | kdb_register_flags("bd", kdb_bc, "<bpnum>", |
| 549 | "Single Step", 1, KDB_REPEAT_NO_ARGS); | 551 | "Disable Breakpoint", 0, |
| 552 | KDB_ENABLE_FLOW_CTRL); | ||
| 553 | |||
| 554 | kdb_register_flags("ss", kdb_ss, "", | ||
| 555 | "Single Step", 1, | ||
| 556 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); | ||
| 550 | /* | 557 | /* |
| 551 | * Architecture dependent initialization. | 558 | * Architecture dependent initialization. |
| 552 | */ | 559 | */ |
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 8859ca34dcfe..15e1a7af5dd0 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c | |||
| @@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks) | |||
| 129 | ks->pass_exception = 1; | 129 | ks->pass_exception = 1; |
| 130 | KDB_FLAG_SET(CATASTROPHIC); | 130 | KDB_FLAG_SET(CATASTROPHIC); |
| 131 | } | 131 | } |
| 132 | /* set CATASTROPHIC if the system contains unresponsive processors */ | ||
| 133 | for_each_online_cpu(i) | ||
| 134 | if (!kgdb_info[i].enter_kgdb) | ||
| 135 | KDB_FLAG_SET(CATASTROPHIC); | ||
| 132 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { | 136 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { |
| 133 | KDB_STATE_CLEAR(SSBPT); | 137 | KDB_STATE_CLEAR(SSBPT); |
| 134 | KDB_STATE_CLEAR(DOING_SS); | 138 | KDB_STATE_CLEAR(DOING_SS); |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 379650b984f8..f191bddf64b8 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/ctype.h> | 14 | #include <linux/ctype.h> |
| 15 | #include <linux/types.h> | ||
| 15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 17 | #include <linux/kmsg_dump.h> | 18 | #include <linux/kmsg_dump.h> |
| @@ -23,6 +24,7 @@ | |||
| 23 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
| 24 | #include <linux/atomic.h> | 25 | #include <linux/atomic.h> |
| 25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 27 | #include <linux/moduleparam.h> | ||
| 26 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
| 27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 28 | #include <linux/kallsyms.h> | 30 | #include <linux/kallsyms.h> |
| @@ -42,6 +44,12 @@ | |||
| 42 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
| 43 | #include "kdb_private.h" | 45 | #include "kdb_private.h" |
| 44 | 46 | ||
| 47 | #undef MODULE_PARAM_PREFIX | ||
| 48 | #define MODULE_PARAM_PREFIX "kdb." | ||
| 49 | |||
| 50 | static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; | ||
| 51 | module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); | ||
| 52 | |||
| 45 | #define GREP_LEN 256 | 53 | #define GREP_LEN 256 |
| 46 | char kdb_grep_string[GREP_LEN]; | 54 | char kdb_grep_string[GREP_LEN]; |
| 47 | int kdb_grepping_flag; | 55 | int kdb_grepping_flag; |
| @@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = { | |||
| 121 | KDBMSG(BADLENGTH, "Invalid length field"), | 129 | KDBMSG(BADLENGTH, "Invalid length field"), |
| 122 | KDBMSG(NOBP, "No Breakpoint exists"), | 130 | KDBMSG(NOBP, "No Breakpoint exists"), |
| 123 | KDBMSG(BADADDR, "Invalid address"), | 131 | KDBMSG(BADADDR, "Invalid address"), |
| 132 | KDBMSG(NOPERM, "Permission denied"), | ||
| 124 | }; | 133 | }; |
| 125 | #undef KDBMSG | 134 | #undef KDBMSG |
| 126 | 135 | ||
| @@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu) | |||
| 188 | } | 197 | } |
| 189 | 198 | ||
| 190 | /* | 199 | /* |
| 200 | * Check whether the flags of the current command and the permissions | ||
| 201 | * of the kdb console has allow a command to be run. | ||
| 202 | */ | ||
| 203 | static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, | ||
| 204 | bool no_args) | ||
| 205 | { | ||
| 206 | /* permissions comes from userspace so needs massaging slightly */ | ||
| 207 | permissions &= KDB_ENABLE_MASK; | ||
| 208 | permissions |= KDB_ENABLE_ALWAYS_SAFE; | ||
| 209 | |||
| 210 | /* some commands change group when launched with no arguments */ | ||
| 211 | if (no_args) | ||
| 212 | permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; | ||
| 213 | |||
| 214 | flags |= KDB_ENABLE_ALL; | ||
| 215 | |||
| 216 | return permissions & flags; | ||
| 217 | } | ||
| 218 | |||
| 219 | /* | ||
| 191 | * kdbgetenv - This function will return the character string value of | 220 | * kdbgetenv - This function will return the character string value of |
| 192 | * an environment variable. | 221 | * an environment variable. |
| 193 | * Parameters: | 222 | * Parameters: |
| @@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, | |||
| 476 | kdb_symtab_t symtab; | 505 | kdb_symtab_t symtab; |
| 477 | 506 | ||
| 478 | /* | 507 | /* |
| 508 | * If the enable flags prohibit both arbitrary memory access | ||
| 509 | * and flow control then there are no reasonable grounds to | ||
| 510 | * provide symbol lookup. | ||
| 511 | */ | ||
| 512 | if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, | ||
| 513 | kdb_cmd_enabled, false)) | ||
| 514 | return KDB_NOPERM; | ||
| 515 | |||
| 516 | /* | ||
| 479 | * Process arguments which follow the following syntax: | 517 | * Process arguments which follow the following syntax: |
| 480 | * | 518 | * |
| 481 | * symbol | numeric-address [+/- numeric-offset] | 519 | * symbol | numeric-address [+/- numeric-offset] |
| @@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) | |||
| 641 | if (!s->count) | 679 | if (!s->count) |
| 642 | s->usable = 0; | 680 | s->usable = 0; |
| 643 | if (s->usable) | 681 | if (s->usable) |
| 644 | kdb_register(s->name, kdb_exec_defcmd, | 682 | /* macros are always safe because when executed each |
| 645 | s->usage, s->help, 0); | 683 | * internal command re-enters kdb_parse() and is |
| 684 | * safety checked individually. | ||
| 685 | */ | ||
| 686 | kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, | ||
| 687 | s->help, 0, | ||
| 688 | KDB_ENABLE_ALWAYS_SAFE); | ||
| 646 | return 0; | 689 | return 0; |
| 647 | } | 690 | } |
| 648 | if (!s->usable) | 691 | if (!s->usable) |
| @@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr) | |||
| 1003 | 1046 | ||
| 1004 | if (i < kdb_max_commands) { | 1047 | if (i < kdb_max_commands) { |
| 1005 | int result; | 1048 | int result; |
| 1049 | |||
| 1050 | if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) | ||
| 1051 | return KDB_NOPERM; | ||
| 1052 | |||
| 1006 | KDB_STATE_SET(CMD); | 1053 | KDB_STATE_SET(CMD); |
| 1007 | result = (*tp->cmd_func)(argc-1, (const char **)argv); | 1054 | result = (*tp->cmd_func)(argc-1, (const char **)argv); |
| 1008 | if (result && ignore_errors && result > KDB_CMD_GO) | 1055 | if (result && ignore_errors && result > KDB_CMD_GO) |
| 1009 | result = 0; | 1056 | result = 0; |
| 1010 | KDB_STATE_CLEAR(CMD); | 1057 | KDB_STATE_CLEAR(CMD); |
| 1011 | switch (tp->cmd_repeat) { | 1058 | |
| 1012 | case KDB_REPEAT_NONE: | 1059 | if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) |
| 1013 | argc = 0; | 1060 | return result; |
| 1014 | if (argv[0]) | 1061 | |
| 1015 | *(argv[0]) = '\0'; | 1062 | argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; |
| 1016 | break; | 1063 | if (argv[argc]) |
| 1017 | case KDB_REPEAT_NO_ARGS: | 1064 | *(argv[argc]) = '\0'; |
| 1018 | argc = 1; | ||
| 1019 | if (argv[1]) | ||
| 1020 | *(argv[1]) = '\0'; | ||
| 1021 | break; | ||
| 1022 | case KDB_REPEAT_WITH_ARGS: | ||
| 1023 | break; | ||
| 1024 | } | ||
| 1025 | return result; | 1065 | return result; |
| 1026 | } | 1066 | } |
| 1027 | 1067 | ||
| @@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv) | |||
| 1921 | */ | 1961 | */ |
| 1922 | static int kdb_sr(int argc, const char **argv) | 1962 | static int kdb_sr(int argc, const char **argv) |
| 1923 | { | 1963 | { |
| 1964 | bool check_mask = | ||
| 1965 | !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); | ||
| 1966 | |||
| 1924 | if (argc != 1) | 1967 | if (argc != 1) |
| 1925 | return KDB_ARGCOUNT; | 1968 | return KDB_ARGCOUNT; |
| 1969 | |||
| 1926 | kdb_trap_printk++; | 1970 | kdb_trap_printk++; |
| 1927 | __handle_sysrq(*argv[1], false); | 1971 | __handle_sysrq(*argv[1], check_mask); |
| 1928 | kdb_trap_printk--; | 1972 | kdb_trap_printk--; |
| 1929 | 1973 | ||
| 1930 | return 0; | 1974 | return 0; |
| @@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void) | |||
| 2157 | for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { | 2201 | for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { |
| 2158 | if (!cpu_online(i)) { | 2202 | if (!cpu_online(i)) { |
| 2159 | state = 'F'; /* cpu is offline */ | 2203 | state = 'F'; /* cpu is offline */ |
| 2204 | } else if (!kgdb_info[i].enter_kgdb) { | ||
| 2205 | state = 'D'; /* cpu is online but unresponsive */ | ||
| 2160 | } else { | 2206 | } else { |
| 2161 | state = ' '; /* cpu is responding to kdb */ | 2207 | state = ' '; /* cpu is responding to kdb */ |
| 2162 | if (kdb_task_state_char(KDB_TSK(i)) == 'I') | 2208 | if (kdb_task_state_char(KDB_TSK(i)) == 'I') |
| @@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv) | |||
| 2210 | /* | 2256 | /* |
| 2211 | * Validate cpunum | 2257 | * Validate cpunum |
| 2212 | */ | 2258 | */ |
| 2213 | if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) | 2259 | if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) |
| 2214 | return KDB_BADCPUNUM; | 2260 | return KDB_BADCPUNUM; |
| 2215 | 2261 | ||
| 2216 | dbg_switch_cpu = cpunum; | 2262 | dbg_switch_cpu = cpunum; |
| @@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv) | |||
| 2375 | return 0; | 2421 | return 0; |
| 2376 | if (!kt->cmd_name) | 2422 | if (!kt->cmd_name) |
| 2377 | continue; | 2423 | continue; |
| 2424 | if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) | ||
| 2425 | continue; | ||
| 2378 | if (strlen(kt->cmd_usage) > 20) | 2426 | if (strlen(kt->cmd_usage) > 20) |
| 2379 | space = "\n "; | 2427 | space = "\n "; |
| 2380 | kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, | 2428 | kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, |
| @@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv) | |||
| 2629 | } | 2677 | } |
| 2630 | 2678 | ||
| 2631 | /* | 2679 | /* |
| 2632 | * kdb_register_repeat - This function is used to register a kernel | 2680 | * kdb_register_flags - This function is used to register a kernel |
| 2633 | * debugger command. | 2681 | * debugger command. |
| 2634 | * Inputs: | 2682 | * Inputs: |
| 2635 | * cmd Command name | 2683 | * cmd Command name |
| @@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv) | |||
| 2641 | * zero for success, one if a duplicate command. | 2689 | * zero for success, one if a duplicate command. |
| 2642 | */ | 2690 | */ |
| 2643 | #define kdb_command_extend 50 /* arbitrary */ | 2691 | #define kdb_command_extend 50 /* arbitrary */ |
| 2644 | int kdb_register_repeat(char *cmd, | 2692 | int kdb_register_flags(char *cmd, |
| 2645 | kdb_func_t func, | 2693 | kdb_func_t func, |
| 2646 | char *usage, | 2694 | char *usage, |
| 2647 | char *help, | 2695 | char *help, |
| 2648 | short minlen, | 2696 | short minlen, |
| 2649 | kdb_repeat_t repeat) | 2697 | kdb_cmdflags_t flags) |
| 2650 | { | 2698 | { |
| 2651 | int i; | 2699 | int i; |
| 2652 | kdbtab_t *kp; | 2700 | kdbtab_t *kp; |
| @@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd, | |||
| 2694 | kp->cmd_func = func; | 2742 | kp->cmd_func = func; |
| 2695 | kp->cmd_usage = usage; | 2743 | kp->cmd_usage = usage; |
| 2696 | kp->cmd_help = help; | 2744 | kp->cmd_help = help; |
| 2697 | kp->cmd_flags = 0; | ||
| 2698 | kp->cmd_minlen = minlen; | 2745 | kp->cmd_minlen = minlen; |
| 2699 | kp->cmd_repeat = repeat; | 2746 | kp->cmd_flags = flags; |
| 2700 | 2747 | ||
| 2701 | return 0; | 2748 | return 0; |
| 2702 | } | 2749 | } |
| 2703 | EXPORT_SYMBOL_GPL(kdb_register_repeat); | 2750 | EXPORT_SYMBOL_GPL(kdb_register_flags); |
| 2704 | 2751 | ||
| 2705 | 2752 | ||
| 2706 | /* | 2753 | /* |
| 2707 | * kdb_register - Compatibility register function for commands that do | 2754 | * kdb_register - Compatibility register function for commands that do |
| 2708 | * not need to specify a repeat state. Equivalent to | 2755 | * not need to specify a repeat state. Equivalent to |
| 2709 | * kdb_register_repeat with KDB_REPEAT_NONE. | 2756 | * kdb_register_flags with flags set to 0. |
| 2710 | * Inputs: | 2757 | * Inputs: |
| 2711 | * cmd Command name | 2758 | * cmd Command name |
| 2712 | * func Function to execute the command | 2759 | * func Function to execute the command |
| @@ -2721,8 +2768,7 @@ int kdb_register(char *cmd, | |||
| 2721 | char *help, | 2768 | char *help, |
| 2722 | short minlen) | 2769 | short minlen) |
| 2723 | { | 2770 | { |
| 2724 | return kdb_register_repeat(cmd, func, usage, help, minlen, | 2771 | return kdb_register_flags(cmd, func, usage, help, minlen, 0); |
| 2725 | KDB_REPEAT_NONE); | ||
| 2726 | } | 2772 | } |
| 2727 | EXPORT_SYMBOL_GPL(kdb_register); | 2773 | EXPORT_SYMBOL_GPL(kdb_register); |
| 2728 | 2774 | ||
| @@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void) | |||
| 2764 | for_each_kdbcmd(kp, i) | 2810 | for_each_kdbcmd(kp, i) |
| 2765 | kp->cmd_name = NULL; | 2811 | kp->cmd_name = NULL; |
| 2766 | 2812 | ||
| 2767 | kdb_register_repeat("md", kdb_md, "<vaddr>", | 2813 | kdb_register_flags("md", kdb_md, "<vaddr>", |
| 2768 | "Display Memory Contents, also mdWcN, e.g. md8c1", 1, | 2814 | "Display Memory Contents, also mdWcN, e.g. md8c1", 1, |
| 2769 | KDB_REPEAT_NO_ARGS); | 2815 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
| 2770 | kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", | 2816 | kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", |
| 2771 | "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); | 2817 | "Display Raw Memory", 0, |
| 2772 | kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", | 2818 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
| 2773 | "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); | 2819 | kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", |
| 2774 | kdb_register_repeat("mds", kdb_md, "<vaddr>", | 2820 | "Display Physical Memory", 0, |
| 2775 | "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); | 2821 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
| 2776 | kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", | 2822 | kdb_register_flags("mds", kdb_md, "<vaddr>", |
| 2777 | "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); | 2823 | "Display Memory Symbolically", 0, |
| 2778 | kdb_register_repeat("go", kdb_go, "[<vaddr>]", | 2824 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
| 2779 | "Continue Execution", 1, KDB_REPEAT_NONE); | 2825 | kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", |
| 2780 | kdb_register_repeat("rd", kdb_rd, "", | 2826 | "Modify Memory Contents", 0, |
| 2781 | "Display Registers", 0, KDB_REPEAT_NONE); | 2827 | KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); |
| 2782 | kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", | 2828 | kdb_register_flags("go", kdb_go, "[<vaddr>]", |
| 2783 | "Modify Registers", 0, KDB_REPEAT_NONE); | 2829 | "Continue Execution", 1, |
| 2784 | kdb_register_repeat("ef", kdb_ef, "<vaddr>", | 2830 | KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); |
| 2785 | "Display exception frame", 0, KDB_REPEAT_NONE); | 2831 | kdb_register_flags("rd", kdb_rd, "", |
| 2786 | kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", | 2832 | "Display Registers", 0, |
| 2787 | "Stack traceback", 1, KDB_REPEAT_NONE); | 2833 | KDB_ENABLE_REG_READ); |
| 2788 | kdb_register_repeat("btp", kdb_bt, "<pid>", | 2834 | kdb_register_flags("rm", kdb_rm, "<reg> <contents>", |
| 2789 | "Display stack for process <pid>", 0, KDB_REPEAT_NONE); | 2835 | "Modify Registers", 0, |
| 2790 | kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", | 2836 | KDB_ENABLE_REG_WRITE); |
| 2791 | "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); | 2837 | kdb_register_flags("ef", kdb_ef, "<vaddr>", |
| 2792 | kdb_register_repeat("btc", kdb_bt, "", | 2838 | "Display exception frame", 0, |
| 2793 | "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); | 2839 | KDB_ENABLE_MEM_READ); |
| 2794 | kdb_register_repeat("btt", kdb_bt, "<vaddr>", | 2840 | kdb_register_flags("bt", kdb_bt, "[<vaddr>]", |
| 2841 | "Stack traceback", 1, | ||
| 2842 | KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); | ||
| 2843 | kdb_register_flags("btp", kdb_bt, "<pid>", | ||
| 2844 | "Display stack for process <pid>", 0, | ||
| 2845 | KDB_ENABLE_INSPECT); | ||
| 2846 | kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", | ||
| 2847 | "Backtrace all processes matching state flag", 0, | ||
| 2848 | KDB_ENABLE_INSPECT); | ||
| 2849 | kdb_register_flags("btc", kdb_bt, "", | ||
| 2850 | "Backtrace current process on each cpu", 0, | ||
| 2851 | KDB_ENABLE_INSPECT); | ||
| 2852 | kdb_register_flags("btt", kdb_bt, "<vaddr>", | ||
| 2795 | "Backtrace process given its struct task address", 0, | 2853 | "Backtrace process given its struct task address", 0, |
| 2796 | KDB_REPEAT_NONE); | 2854 | KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); |
| 2797 | kdb_register_repeat("env", kdb_env, "", | 2855 | kdb_register_flags("env", kdb_env, "", |
| 2798 | "Show environment variables", 0, KDB_REPEAT_NONE); | 2856 | "Show environment variables", 0, |
| 2799 | kdb_register_repeat("set", kdb_set, "", | 2857 | KDB_ENABLE_ALWAYS_SAFE); |
| 2800 | "Set environment variables", 0, KDB_REPEAT_NONE); | 2858 | kdb_register_flags("set", kdb_set, "", |
| 2801 | kdb_register_repeat("help", kdb_help, "", | 2859 | "Set environment variables", 0, |
| 2802 | "Display Help Message", 1, KDB_REPEAT_NONE); | 2860 | KDB_ENABLE_ALWAYS_SAFE); |
| 2803 | kdb_register_repeat("?", kdb_help, "", | 2861 | kdb_register_flags("help", kdb_help, "", |
| 2804 | "Display Help Message", 0, KDB_REPEAT_NONE); | 2862 | "Display Help Message", 1, |
| 2805 | kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", | 2863 | KDB_ENABLE_ALWAYS_SAFE); |
| 2806 | "Switch to new cpu", 0, KDB_REPEAT_NONE); | 2864 | kdb_register_flags("?", kdb_help, "", |
| 2807 | kdb_register_repeat("kgdb", kdb_kgdb, "", | 2865 | "Display Help Message", 0, |
| 2808 | "Enter kgdb mode", 0, KDB_REPEAT_NONE); | 2866 | KDB_ENABLE_ALWAYS_SAFE); |
| 2809 | kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", | 2867 | kdb_register_flags("cpu", kdb_cpu, "<cpunum>", |
| 2810 | "Display active task list", 0, KDB_REPEAT_NONE); | 2868 | "Switch to new cpu", 0, |
| 2811 | kdb_register_repeat("pid", kdb_pid, "<pidnum>", | 2869 | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); |
| 2812 | "Switch to another task", 0, KDB_REPEAT_NONE); | 2870 | kdb_register_flags("kgdb", kdb_kgdb, "", |
| 2813 | kdb_register_repeat("reboot", kdb_reboot, "", | 2871 | "Enter kgdb mode", 0, 0); |
| 2814 | "Reboot the machine immediately", 0, KDB_REPEAT_NONE); | 2872 | kdb_register_flags("ps", kdb_ps, "[<flags>|A]", |
| 2873 | "Display active task list", 0, | ||
| 2874 | KDB_ENABLE_INSPECT); | ||
| 2875 | kdb_register_flags("pid", kdb_pid, "<pidnum>", | ||
| 2876 | "Switch to another task", 0, | ||
| 2877 | KDB_ENABLE_INSPECT); | ||
| 2878 | kdb_register_flags("reboot", kdb_reboot, "", | ||
| 2879 | "Reboot the machine immediately", 0, | ||
| 2880 | KDB_ENABLE_REBOOT); | ||
| 2815 | #if defined(CONFIG_MODULES) | 2881 | #if defined(CONFIG_MODULES) |
| 2816 | kdb_register_repeat("lsmod", kdb_lsmod, "", | 2882 | kdb_register_flags("lsmod", kdb_lsmod, "", |
| 2817 | "List loaded kernel modules", 0, KDB_REPEAT_NONE); | 2883 | "List loaded kernel modules", 0, |
| 2884 | KDB_ENABLE_INSPECT); | ||
| 2818 | #endif | 2885 | #endif |
| 2819 | #if defined(CONFIG_MAGIC_SYSRQ) | 2886 | #if defined(CONFIG_MAGIC_SYSRQ) |
| 2820 | kdb_register_repeat("sr", kdb_sr, "<key>", | 2887 | kdb_register_flags("sr", kdb_sr, "<key>", |
| 2821 | "Magic SysRq key", 0, KDB_REPEAT_NONE); | 2888 | "Magic SysRq key", 0, |
| 2889 | KDB_ENABLE_ALWAYS_SAFE); | ||
| 2822 | #endif | 2890 | #endif |
| 2823 | #if defined(CONFIG_PRINTK) | 2891 | #if defined(CONFIG_PRINTK) |
| 2824 | kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", | 2892 | kdb_register_flags("dmesg", kdb_dmesg, "[lines]", |
| 2825 | "Display syslog buffer", 0, KDB_REPEAT_NONE); | 2893 | "Display syslog buffer", 0, |
| 2894 | KDB_ENABLE_ALWAYS_SAFE); | ||
| 2826 | #endif | 2895 | #endif |
| 2827 | if (arch_kgdb_ops.enable_nmi) { | 2896 | if (arch_kgdb_ops.enable_nmi) { |
| 2828 | kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", | 2897 | kdb_register_flags("disable_nmi", kdb_disable_nmi, "", |
| 2829 | "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); | 2898 | "Disable NMI entry to KDB", 0, |
| 2830 | } | 2899 | KDB_ENABLE_ALWAYS_SAFE); |
| 2831 | kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", | 2900 | } |
| 2832 | "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); | 2901 | kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", |
| 2833 | kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", | 2902 | "Define a set of commands, down to endefcmd", 0, |
| 2834 | "Send a signal to a process", 0, KDB_REPEAT_NONE); | 2903 | KDB_ENABLE_ALWAYS_SAFE); |
| 2835 | kdb_register_repeat("summary", kdb_summary, "", | 2904 | kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", |
| 2836 | "Summarize the system", 4, KDB_REPEAT_NONE); | 2905 | "Send a signal to a process", 0, |
| 2837 | kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", | 2906 | KDB_ENABLE_SIGNAL); |
| 2838 | "Display per_cpu variables", 3, KDB_REPEAT_NONE); | 2907 | kdb_register_flags("summary", kdb_summary, "", |
| 2839 | kdb_register_repeat("grephelp", kdb_grep_help, "", | 2908 | "Summarize the system", 4, |
| 2840 | "Display help on | grep", 0, KDB_REPEAT_NONE); | 2909 | KDB_ENABLE_ALWAYS_SAFE); |
| 2910 | kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", | ||
| 2911 | "Display per_cpu variables", 3, | ||
| 2912 | KDB_ENABLE_MEM_READ); | ||
| 2913 | kdb_register_flags("grephelp", kdb_grep_help, "", | ||
| 2914 | "Display help on | grep", 0, | ||
| 2915 | KDB_ENABLE_ALWAYS_SAFE); | ||
| 2841 | } | 2916 | } |
| 2842 | 2917 | ||
| 2843 | /* Execute any commands defined in kdb_cmds. */ | 2918 | /* Execute any commands defined in kdb_cmds. */ |
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 7afd3c8c41d5..eaacd1693954 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h | |||
| @@ -172,10 +172,9 @@ typedef struct _kdbtab { | |||
| 172 | kdb_func_t cmd_func; /* Function to execute command */ | 172 | kdb_func_t cmd_func; /* Function to execute command */ |
| 173 | char *cmd_usage; /* Usage String for this command */ | 173 | char *cmd_usage; /* Usage String for this command */ |
| 174 | char *cmd_help; /* Help message for this command */ | 174 | char *cmd_help; /* Help message for this command */ |
| 175 | short cmd_flags; /* Parsing flags */ | ||
| 176 | short cmd_minlen; /* Minimum legal # command | 175 | short cmd_minlen; /* Minimum legal # command |
| 177 | * chars required */ | 176 | * chars required */ |
| 178 | kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ | 177 | kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ |
| 179 | } kdbtab_t; | 178 | } kdbtab_t; |
| 180 | 179 | ||
| 181 | extern int kdb_bt(int, const char **); /* KDB display back trace */ | 180 | extern int kdb_bt(int, const char **); /* KDB display back trace */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 1ea4369890a3..6806c55475ee 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
| 1287 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1287 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
| 1288 | struct task_struct *p) | 1288 | struct task_struct *p) |
| 1289 | { | 1289 | { |
| 1290 | /* | ||
| 1291 | * We can race with wait_task_zombie() from another thread. | ||
| 1292 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition | ||
| 1293 | * can't confuse the checks below. | ||
| 1294 | */ | ||
| 1295 | int exit_state = ACCESS_ONCE(p->exit_state); | ||
| 1290 | int ret; | 1296 | int ret; |
| 1291 | 1297 | ||
| 1292 | if (unlikely(p->exit_state == EXIT_DEAD)) | 1298 | if (unlikely(exit_state == EXIT_DEAD)) |
| 1293 | return 0; | 1299 | return 0; |
| 1294 | 1300 | ||
| 1295 | ret = eligible_child(wo, p); | 1301 | ret = eligible_child(wo, p); |
| @@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1310 | return 0; | 1316 | return 0; |
| 1311 | } | 1317 | } |
| 1312 | 1318 | ||
| 1313 | if (unlikely(p->exit_state == EXIT_TRACE)) { | 1319 | if (unlikely(exit_state == EXIT_TRACE)) { |
| 1314 | /* | 1320 | /* |
| 1315 | * ptrace == 0 means we are the natural parent. In this case | 1321 | * ptrace == 0 means we are the natural parent. In this case |
| 1316 | * we should clear notask_error, debugger will notify us. | 1322 | * we should clear notask_error, debugger will notify us. |
| @@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
| 1337 | } | 1343 | } |
| 1338 | 1344 | ||
| 1339 | /* slay zombie? */ | 1345 | /* slay zombie? */ |
| 1340 | if (p->exit_state == EXIT_ZOMBIE) { | 1346 | if (exit_state == EXIT_ZOMBIE) { |
| 1341 | /* we don't reap group leaders with subthreads */ | 1347 | /* we don't reap group leaders with subthreads */ |
| 1342 | if (!delay_group_leader(p)) { | 1348 | if (!delay_group_leader(p)) { |
| 1343 | /* | 1349 | /* |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index b0b1c44e923a..3ccf5c2c1320 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
| @@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv) | |||
| 132 | 132 | ||
| 133 | static __init int kdb_ftrace_register(void) | 133 | static __init int kdb_ftrace_register(void) |
| 134 | { | 134 | { |
| 135 | kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", | 135 | kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", |
| 136 | "Dump ftrace log", 0, KDB_REPEAT_NONE); | 136 | "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); |
| 137 | return 0; | 137 | return 0; |
| 138 | } | 138 | } |
| 139 | 139 | ||
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 358eb81fa28d..c635a107a7de 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -73,6 +73,31 @@ config KGDB_KDB | |||
| 73 | help | 73 | help |
| 74 | KDB frontend for kernel | 74 | KDB frontend for kernel |
| 75 | 75 | ||
| 76 | config KDB_DEFAULT_ENABLE | ||
| 77 | hex "KDB: Select kdb command functions to be enabled by default" | ||
| 78 | depends on KGDB_KDB | ||
| 79 | default 0x1 | ||
| 80 | help | ||
| 81 | Specifiers which kdb commands are enabled by default. This may | ||
| 82 | be set to 1 or 0 to enable all commands or disable almost all | ||
| 83 | commands. | ||
| 84 | |||
| 85 | Alternatively the following bitmask applies: | ||
| 86 | |||
| 87 | 0x0002 - allow arbitrary reads from memory and symbol lookup | ||
| 88 | 0x0004 - allow arbitrary writes to memory | ||
| 89 | 0x0008 - allow current register state to be inspected | ||
| 90 | 0x0010 - allow current register state to be modified | ||
| 91 | 0x0020 - allow passive inspection (backtrace, process list, lsmod) | ||
| 92 | 0x0040 - allow flow control management (breakpoint, single step) | ||
| 93 | 0x0080 - enable signalling of processes | ||
| 94 | 0x0100 - allow machine to be rebooted | ||
| 95 | |||
| 96 | The config option merely sets the default at boot time. Both | ||
| 97 | issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or | ||
| 98 | setting with kdb.cmd_enable=X kernel command line option will | ||
| 99 | override the default settings. | ||
| 100 | |||
| 76 | config KDB_KEYBOARD | 101 | config KDB_KEYBOARD |
| 77 | bool "KGDB_KDB: keyboard as input device" | 102 | bool "KGDB_KDB: keyboard as input device" |
| 78 | depends on VT && KGDB_KDB | 103 | depends on VT && KGDB_KDB |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 2404d03e251a..03dd576e6773 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * 2 of the Licence, or (at your option) any later version. | 11 | * 2 of the Licence, or (at your option) any later version. |
| 12 | */ | 12 | */ |
| 13 | //#define DEBUG | 13 | //#define DEBUG |
| 14 | #include <linux/rcupdate.h> | ||
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 15 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 16 | #include <linux/assoc_array_priv.h> | 17 | #include <linux/assoc_array_priv.h> |
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 56badfc4810a..957d3da53ddd 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug | |||
| @@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC | |||
| 14 | depends on !KMEMCHECK | 14 | depends on !KMEMCHECK |
| 15 | select PAGE_EXTENSION | 15 | select PAGE_EXTENSION |
| 16 | select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC | 16 | select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC |
| 17 | select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
| 18 | ---help--- | 17 | ---help--- |
| 19 | Unmap pages from the kernel linear mapping after free_pages(). | 18 | Unmap pages from the kernel linear mapping after free_pages(). |
| 20 | This results in a large slowdown, but helps to find certain types | 19 | This results in a large slowdown, but helps to find certain types |
| @@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC | |||
| 27 | that would result in incorrect warnings of memory corruption after | 26 | that would result in incorrect warnings of memory corruption after |
| 28 | a resume because free pages are not saved to the suspend image. | 27 | a resume because free pages are not saved to the suspend image. |
| 29 | 28 | ||
| 30 | config WANT_PAGE_DEBUG_FLAGS | ||
| 31 | bool | ||
| 32 | |||
| 33 | config PAGE_POISONING | 29 | config PAGE_POISONING |
| 34 | bool | 30 | bool |
| 35 | select WANT_PAGE_DEBUG_FLAGS | ||
| 36 | |||
| 37 | config PAGE_GUARD | ||
| 38 | bool | ||
| 39 | select WANT_PAGE_DEBUG_FLAGS | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef91e856c7e4..851924fa5170 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, | |||
| 3043 | if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { | 3043 | if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { |
| 3044 | mem_cgroup_swap_statistics(from, false); | 3044 | mem_cgroup_swap_statistics(from, false); |
| 3045 | mem_cgroup_swap_statistics(to, true); | 3045 | mem_cgroup_swap_statistics(to, true); |
| 3046 | /* | ||
| 3047 | * This function is only called from task migration context now. | ||
| 3048 | * It postpones page_counter and refcount handling till the end | ||
| 3049 | * of task migration(mem_cgroup_clear_mc()) for performance | ||
| 3050 | * improvement. But we cannot postpone css_get(to) because if | ||
| 3051 | * the process that has been moved to @to does swap-in, the | ||
| 3052 | * refcount of @to might be decreased to 0. | ||
| 3053 | * | ||
| 3054 | * We are in attach() phase, so the cgroup is guaranteed to be | ||
| 3055 | * alive, so we can just call css_get(). | ||
| 3056 | */ | ||
| 3057 | css_get(&to->css); | ||
| 3058 | return 0; | 3046 | return 0; |
| 3059 | } | 3047 | } |
| 3060 | return -EINVAL; | 3048 | return -EINVAL; |
| @@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |||
| 4679 | if (parent_css == NULL) { | 4667 | if (parent_css == NULL) { |
| 4680 | root_mem_cgroup = memcg; | 4668 | root_mem_cgroup = memcg; |
| 4681 | page_counter_init(&memcg->memory, NULL); | 4669 | page_counter_init(&memcg->memory, NULL); |
| 4670 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
| 4682 | page_counter_init(&memcg->memsw, NULL); | 4671 | page_counter_init(&memcg->memsw, NULL); |
| 4683 | page_counter_init(&memcg->kmem, NULL); | 4672 | page_counter_init(&memcg->kmem, NULL); |
| 4684 | } | 4673 | } |
| @@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
| 4724 | 4713 | ||
| 4725 | if (parent->use_hierarchy) { | 4714 | if (parent->use_hierarchy) { |
| 4726 | page_counter_init(&memcg->memory, &parent->memory); | 4715 | page_counter_init(&memcg->memory, &parent->memory); |
| 4716 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
| 4727 | page_counter_init(&memcg->memsw, &parent->memsw); | 4717 | page_counter_init(&memcg->memsw, &parent->memsw); |
| 4728 | page_counter_init(&memcg->kmem, &parent->kmem); | 4718 | page_counter_init(&memcg->kmem, &parent->kmem); |
| 4729 | 4719 | ||
| @@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
| 4733 | */ | 4723 | */ |
| 4734 | } else { | 4724 | } else { |
| 4735 | page_counter_init(&memcg->memory, NULL); | 4725 | page_counter_init(&memcg->memory, NULL); |
| 4726 | memcg->soft_limit = PAGE_COUNTER_MAX; | ||
| 4736 | page_counter_init(&memcg->memsw, NULL); | 4727 | page_counter_init(&memcg->memsw, NULL); |
| 4737 | page_counter_init(&memcg->kmem, NULL); | 4728 | page_counter_init(&memcg->kmem, NULL); |
| 4738 | /* | 4729 | /* |
| @@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) | |||
| 4807 | mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); | 4798 | mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); |
| 4808 | mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); | 4799 | mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); |
| 4809 | memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); | 4800 | memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); |
| 4810 | memcg->soft_limit = 0; | 4801 | memcg->soft_limit = PAGE_COUNTER_MAX; |
| 4811 | } | 4802 | } |
| 4812 | 4803 | ||
| 4813 | #ifdef CONFIG_MMU | 4804 | #ifdef CONFIG_MMU |
diff --git a/mm/memory.c b/mm/memory.c index ca920d1fd314..c6565f00fb38 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2137,17 +2137,24 @@ reuse: | |||
| 2137 | if (!dirty_page) | 2137 | if (!dirty_page) |
| 2138 | return ret; | 2138 | return ret; |
| 2139 | 2139 | ||
| 2140 | /* | ||
| 2141 | * Yes, Virginia, this is actually required to prevent a race | ||
| 2142 | * with clear_page_dirty_for_io() from clearing the page dirty | ||
| 2143 | * bit after it clear all dirty ptes, but before a racing | ||
| 2144 | * do_wp_page installs a dirty pte. | ||
| 2145 | * | ||
| 2146 | * do_shared_fault is protected similarly. | ||
| 2147 | */ | ||
| 2148 | if (!page_mkwrite) { | 2140 | if (!page_mkwrite) { |
| 2149 | wait_on_page_locked(dirty_page); | 2141 | struct address_space *mapping; |
| 2150 | set_page_dirty_balance(dirty_page); | 2142 | int dirtied; |
| 2143 | |||
| 2144 | lock_page(dirty_page); | ||
| 2145 | dirtied = set_page_dirty(dirty_page); | ||
| 2146 | VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); | ||
| 2147 | mapping = dirty_page->mapping; | ||
| 2148 | unlock_page(dirty_page); | ||
| 2149 | |||
| 2150 | if (dirtied && mapping) { | ||
| 2151 | /* | ||
| 2152 | * Some device drivers do not set page.mapping | ||
| 2153 | * but still dirty their pages | ||
| 2154 | */ | ||
| 2155 | balance_dirty_pages_ratelimited(mapping); | ||
| 2156 | } | ||
| 2157 | |||
| 2151 | /* file_update_time outside page_lock */ | 2158 | /* file_update_time outside page_lock */ |
| 2152 | if (vma->vm_file) | 2159 | if (vma->vm_file) |
| 2153 | file_update_time(vma->vm_file); | 2160 | file_update_time(vma->vm_file); |
| @@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
| 2593 | if (prev && prev->vm_end == address) | 2600 | if (prev && prev->vm_end == address) |
| 2594 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; | 2601 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; |
| 2595 | 2602 | ||
| 2596 | expand_downwards(vma, address - PAGE_SIZE); | 2603 | return expand_downwards(vma, address - PAGE_SIZE); |
| 2597 | } | 2604 | } |
| 2598 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { | 2605 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { |
| 2599 | struct vm_area_struct *next = vma->vm_next; | 2606 | struct vm_area_struct *next = vma->vm_next; |
| @@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo | |||
| 2602 | if (next && next->vm_start == address + PAGE_SIZE) | 2609 | if (next && next->vm_start == address + PAGE_SIZE) |
| 2603 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; | 2610 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; |
| 2604 | 2611 | ||
| 2605 | expand_upwards(vma, address + PAGE_SIZE); | 2612 | return expand_upwards(vma, address + PAGE_SIZE); |
| 2606 | } | 2613 | } |
| 2607 | return 0; | 2614 | return 0; |
| 2608 | } | 2615 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d5d81f5384d1..6f4335238e33 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -1541,16 +1541,6 @@ pause: | |||
| 1541 | bdi_start_background_writeback(bdi); | 1541 | bdi_start_background_writeback(bdi); |
| 1542 | } | 1542 | } |
| 1543 | 1543 | ||
| 1544 | void set_page_dirty_balance(struct page *page) | ||
| 1545 | { | ||
| 1546 | if (set_page_dirty(page)) { | ||
| 1547 | struct address_space *mapping = page_mapping(page); | ||
| 1548 | |||
| 1549 | if (mapping) | ||
| 1550 | balance_dirty_pages_ratelimited(mapping); | ||
| 1551 | } | ||
| 1552 | } | ||
| 1553 | |||
| 1554 | static DEFINE_PER_CPU(int, bdp_ratelimits); | 1544 | static DEFINE_PER_CPU(int, bdp_ratelimits); |
| 1555 | 1545 | ||
| 1556 | /* | 1546 | /* |
| @@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied); | |||
| 2123 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | 2113 | * page dirty in that case, but not all the buffers. This is a "bottom-up" |
| 2124 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | 2114 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. |
| 2125 | * | 2115 | * |
| 2126 | * Most callers have locked the page, which pins the address_space in memory. | 2116 | * The caller must ensure this doesn't race with truncation. Most will simply |
| 2127 | * But zap_pte_range() does not lock the page, however in that case the | 2117 | * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and |
| 2128 | * mapping is pinned by the vma's ->vm_file reference. | 2118 | * the pte lock held, which also locks out truncation. |
| 2129 | * | ||
| 2130 | * We take care to handle the case where the page was truncated from the | ||
| 2131 | * mapping by re-checking page_mapping() inside tree_lock. | ||
| 2132 | */ | 2119 | */ |
| 2133 | int __set_page_dirty_nobuffers(struct page *page) | 2120 | int __set_page_dirty_nobuffers(struct page *page) |
| 2134 | { | 2121 | { |
| 2135 | if (!TestSetPageDirty(page)) { | 2122 | if (!TestSetPageDirty(page)) { |
| 2136 | struct address_space *mapping = page_mapping(page); | 2123 | struct address_space *mapping = page_mapping(page); |
| 2137 | struct address_space *mapping2; | ||
| 2138 | unsigned long flags; | 2124 | unsigned long flags; |
| 2139 | 2125 | ||
| 2140 | if (!mapping) | 2126 | if (!mapping) |
| 2141 | return 1; | 2127 | return 1; |
| 2142 | 2128 | ||
| 2143 | spin_lock_irqsave(&mapping->tree_lock, flags); | 2129 | spin_lock_irqsave(&mapping->tree_lock, flags); |
| 2144 | mapping2 = page_mapping(page); | 2130 | BUG_ON(page_mapping(page) != mapping); |
| 2145 | if (mapping2) { /* Race with truncate? */ | 2131 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
| 2146 | BUG_ON(mapping2 != mapping); | 2132 | account_page_dirtied(page, mapping); |
| 2147 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 2133 | radix_tree_tag_set(&mapping->page_tree, page_index(page), |
| 2148 | account_page_dirtied(page, mapping); | 2134 | PAGECACHE_TAG_DIRTY); |
| 2149 | radix_tree_tag_set(&mapping->page_tree, | ||
| 2150 | page_index(page), PAGECACHE_TAG_DIRTY); | ||
| 2151 | } | ||
| 2152 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 2135 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
| 2153 | if (mapping->host) { | 2136 | if (mapping->host) { |
| 2154 | /* !PageAnon && !swapper_space */ | 2137 | /* !PageAnon && !swapper_space */ |
| @@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page) | |||
| 2305 | /* | 2288 | /* |
| 2306 | * We carefully synchronise fault handlers against | 2289 | * We carefully synchronise fault handlers against |
| 2307 | * installing a dirty pte and marking the page dirty | 2290 | * installing a dirty pte and marking the page dirty |
| 2308 | * at this point. We do this by having them hold the | 2291 | * at this point. We do this by having them hold the |
| 2309 | * page lock at some point after installing their | 2292 | * page lock while dirtying the page, and pages are |
| 2310 | * pte, but before marking the page dirty. | 2293 | * always locked coming in here, so we get the desired |
| 2311 | * Pages are always locked coming in here, so we get | 2294 | * exclusion. |
| 2312 | * the desired exclusion. See mm/memory.c:do_wp_page() | ||
| 2313 | * for more comments. | ||
| 2314 | */ | 2295 | */ |
| 2315 | if (TestClearPageDirty(page)) { | 2296 | if (TestClearPageDirty(page)) { |
| 2316 | dec_zone_page_state(page, NR_FILE_DIRTY); | 2297 | dec_zone_page_state(page, NR_FILE_DIRTY); |
| @@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void) | |||
| 72 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); | 72 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
| 73 | if (anon_vma) { | 73 | if (anon_vma) { |
| 74 | atomic_set(&anon_vma->refcount, 1); | 74 | atomic_set(&anon_vma->refcount, 1); |
| 75 | anon_vma->degree = 1; /* Reference for first vma */ | ||
| 76 | anon_vma->parent = anon_vma; | ||
| 75 | /* | 77 | /* |
| 76 | * Initialise the anon_vma root to point to itself. If called | 78 | * Initialise the anon_vma root to point to itself. If called |
| 77 | * from fork, the root will be reset to the parents anon_vma. | 79 | * from fork, the root will be reset to the parents anon_vma. |
| @@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) | |||
| 188 | if (likely(!vma->anon_vma)) { | 190 | if (likely(!vma->anon_vma)) { |
| 189 | vma->anon_vma = anon_vma; | 191 | vma->anon_vma = anon_vma; |
| 190 | anon_vma_chain_link(vma, avc, anon_vma); | 192 | anon_vma_chain_link(vma, avc, anon_vma); |
| 193 | /* vma reference or self-parent link for new root */ | ||
| 194 | anon_vma->degree++; | ||
| 191 | allocated = NULL; | 195 | allocated = NULL; |
| 192 | avc = NULL; | 196 | avc = NULL; |
| 193 | } | 197 | } |
| @@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) | |||
| 236 | /* | 240 | /* |
| 237 | * Attach the anon_vmas from src to dst. | 241 | * Attach the anon_vmas from src to dst. |
| 238 | * Returns 0 on success, -ENOMEM on failure. | 242 | * Returns 0 on success, -ENOMEM on failure. |
| 243 | * | ||
| 244 | * If dst->anon_vma is NULL this function tries to find and reuse existing | ||
| 245 | * anon_vma which has no vmas and only one child anon_vma. This prevents | ||
| 246 | * degradation of anon_vma hierarchy to endless linear chain in case of | ||
| 247 | * constantly forking task. On the other hand, an anon_vma with more than one | ||
| 248 | * child isn't reused even if there was no alive vma, thus rmap walker has a | ||
| 249 | * good chance of avoiding scanning the whole hierarchy when it searches where | ||
| 250 | * page is mapped. | ||
| 239 | */ | 251 | */ |
| 240 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | 252 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
| 241 | { | 253 | { |
| @@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) | |||
| 256 | anon_vma = pavc->anon_vma; | 268 | anon_vma = pavc->anon_vma; |
| 257 | root = lock_anon_vma_root(root, anon_vma); | 269 | root = lock_anon_vma_root(root, anon_vma); |
| 258 | anon_vma_chain_link(dst, avc, anon_vma); | 270 | anon_vma_chain_link(dst, avc, anon_vma); |
| 271 | |||
| 272 | /* | ||
| 273 | * Reuse existing anon_vma if its degree lower than two, | ||
| 274 | * that means it has no vma and only one anon_vma child. | ||
| 275 | * | ||
| 276 | * Do not chose parent anon_vma, otherwise first child | ||
| 277 | * will always reuse it. Root anon_vma is never reused: | ||
| 278 | * it has self-parent reference and at least one child. | ||
| 279 | */ | ||
| 280 | if (!dst->anon_vma && anon_vma != src->anon_vma && | ||
| 281 | anon_vma->degree < 2) | ||
| 282 | dst->anon_vma = anon_vma; | ||
| 259 | } | 283 | } |
| 284 | if (dst->anon_vma) | ||
| 285 | dst->anon_vma->degree++; | ||
| 260 | unlock_anon_vma_root(root); | 286 | unlock_anon_vma_root(root); |
| 261 | return 0; | 287 | return 0; |
| 262 | 288 | ||
| @@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
| 280 | if (!pvma->anon_vma) | 306 | if (!pvma->anon_vma) |
| 281 | return 0; | 307 | return 0; |
| 282 | 308 | ||
| 309 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ | ||
| 310 | vma->anon_vma = NULL; | ||
| 311 | |||
| 283 | /* | 312 | /* |
| 284 | * First, attach the new VMA to the parent VMA's anon_vmas, | 313 | * First, attach the new VMA to the parent VMA's anon_vmas, |
| 285 | * so rmap can find non-COWed pages in child processes. | 314 | * so rmap can find non-COWed pages in child processes. |
| @@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
| 288 | if (error) | 317 | if (error) |
| 289 | return error; | 318 | return error; |
| 290 | 319 | ||
| 320 | /* An existing anon_vma has been reused, all done then. */ | ||
| 321 | if (vma->anon_vma) | ||
| 322 | return 0; | ||
| 323 | |||
| 291 | /* Then add our own anon_vma. */ | 324 | /* Then add our own anon_vma. */ |
| 292 | anon_vma = anon_vma_alloc(); | 325 | anon_vma = anon_vma_alloc(); |
| 293 | if (!anon_vma) | 326 | if (!anon_vma) |
| @@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
| 301 | * lock any of the anon_vmas in this anon_vma tree. | 334 | * lock any of the anon_vmas in this anon_vma tree. |
| 302 | */ | 335 | */ |
| 303 | anon_vma->root = pvma->anon_vma->root; | 336 | anon_vma->root = pvma->anon_vma->root; |
| 337 | anon_vma->parent = pvma->anon_vma; | ||
| 304 | /* | 338 | /* |
| 305 | * With refcounts, an anon_vma can stay around longer than the | 339 | * With refcounts, an anon_vma can stay around longer than the |
| 306 | * process it belongs to. The root anon_vma needs to be pinned until | 340 | * process it belongs to. The root anon_vma needs to be pinned until |
| @@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
| 311 | vma->anon_vma = anon_vma; | 345 | vma->anon_vma = anon_vma; |
| 312 | anon_vma_lock_write(anon_vma); | 346 | anon_vma_lock_write(anon_vma); |
| 313 | anon_vma_chain_link(vma, avc, anon_vma); | 347 | anon_vma_chain_link(vma, avc, anon_vma); |
| 348 | anon_vma->parent->degree++; | ||
| 314 | anon_vma_unlock_write(anon_vma); | 349 | anon_vma_unlock_write(anon_vma); |
| 315 | 350 | ||
| 316 | return 0; | 351 | return 0; |
| @@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma) | |||
| 341 | * Leave empty anon_vmas on the list - we'll need | 376 | * Leave empty anon_vmas on the list - we'll need |
| 342 | * to free them outside the lock. | 377 | * to free them outside the lock. |
| 343 | */ | 378 | */ |
| 344 | if (RB_EMPTY_ROOT(&anon_vma->rb_root)) | 379 | if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { |
| 380 | anon_vma->parent->degree--; | ||
| 345 | continue; | 381 | continue; |
| 382 | } | ||
| 346 | 383 | ||
| 347 | list_del(&avc->same_vma); | 384 | list_del(&avc->same_vma); |
| 348 | anon_vma_chain_free(avc); | 385 | anon_vma_chain_free(avc); |
| 349 | } | 386 | } |
| 387 | if (vma->anon_vma) | ||
| 388 | vma->anon_vma->degree--; | ||
| 350 | unlock_anon_vma_root(root); | 389 | unlock_anon_vma_root(root); |
| 351 | 390 | ||
| 352 | /* | 391 | /* |
| @@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) | |||
| 357 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { | 396 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
| 358 | struct anon_vma *anon_vma = avc->anon_vma; | 397 | struct anon_vma *anon_vma = avc->anon_vma; |
| 359 | 398 | ||
| 399 | BUG_ON(anon_vma->degree); | ||
| 360 | put_anon_vma(anon_vma); | 400 | put_anon_vma(anon_vma); |
| 361 | 401 | ||
| 362 | list_del(&avc->same_vma); | 402 | list_del(&avc->same_vma); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index bd9a72bc4a1b..ab2505c3ef54 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, | |||
| 2921 | return false; | 2921 | return false; |
| 2922 | 2922 | ||
| 2923 | /* | 2923 | /* |
| 2924 | * There is a potential race between when kswapd checks its watermarks | 2924 | * The throttled processes are normally woken up in balance_pgdat() as |
| 2925 | * and a process gets throttled. There is also a potential race if | 2925 | * soon as pfmemalloc_watermark_ok() is true. But there is a potential |
| 2926 | * processes get throttled, kswapd wakes, a large process exits therby | 2926 | * race between when kswapd checks the watermarks and a process gets |
| 2927 | * balancing the zones that causes kswapd to miss a wakeup. If kswapd | 2927 | * throttled. There is also a potential race if processes get |
| 2928 | * is going to sleep, no process should be sleeping on pfmemalloc_wait | 2928 | * throttled, kswapd wakes, a large process exits thereby balancing the |
| 2929 | * so wake them now if necessary. If necessary, processes will wake | 2929 | * zones, which causes kswapd to exit balance_pgdat() before reaching |
| 2930 | * kswapd and get throttled again | 2930 | * the wake up checks. If kswapd is going to sleep, no process should |
| 2931 | * be sleeping on pfmemalloc_wait, so wake them now if necessary. If | ||
| 2932 | * the wake up is premature, processes will wake kswapd and get | ||
| 2933 | * throttled again. The difference from wake ups in balance_pgdat() is | ||
| 2934 | * that here we are under prepare_to_wait(). | ||
| 2931 | */ | 2935 | */ |
| 2932 | if (waitqueue_active(&pgdat->pfmemalloc_wait)) { | 2936 | if (waitqueue_active(&pgdat->pfmemalloc_wait)) |
| 2933 | wake_up(&pgdat->pfmemalloc_wait); | 2937 | wake_up_all(&pgdat->pfmemalloc_wait); |
| 2934 | return false; | ||
| 2935 | } | ||
| 2936 | 2938 | ||
| 2937 | return pgdat_balanced(pgdat, order, classzone_idx); | 2939 | return pgdat_balanced(pgdat, order, classzone_idx); |
| 2938 | } | 2940 | } |
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index ab6bb2af1d45..b24e4bb64fb5 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c | |||
| @@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, | |||
| 685 | if (orig_initialized) | 685 | if (orig_initialized) |
| 686 | atomic_dec(&bat_priv->mcast.num_disabled); | 686 | atomic_dec(&bat_priv->mcast.num_disabled); |
| 687 | orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; | 687 | orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; |
| 688 | /* If mcast support is being switched off increase the disabled | 688 | /* If mcast support is being switched off or if this is an initial |
| 689 | * mcast node counter. | 689 | * OGM without mcast support then increase the disabled mcast |
| 690 | * node counter. | ||
| 690 | */ | 691 | */ |
| 691 | } else if (!orig_mcast_enabled && | 692 | } else if (!orig_mcast_enabled && |
| 692 | orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { | 693 | (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || |
| 694 | !orig_initialized)) { | ||
| 693 | atomic_inc(&bat_priv->mcast.num_disabled); | 695 | atomic_inc(&bat_priv->mcast.num_disabled); |
| 694 | orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; | 696 | orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; |
| 695 | } | 697 | } |
| @@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) | |||
| 738 | { | 740 | { |
| 739 | struct batadv_priv *bat_priv = orig->bat_priv; | 741 | struct batadv_priv *bat_priv = orig->bat_priv; |
| 740 | 742 | ||
| 741 | if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) | 743 | if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && |
| 744 | orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) | ||
| 742 | atomic_dec(&bat_priv->mcast.num_disabled); | 745 | atomic_dec(&bat_priv->mcast.num_disabled); |
| 743 | 746 | ||
| 744 | batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); | 747 | batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 8d04d174669e..fab47f1f3ef9 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
| @@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) | |||
| 133 | if (!bat_priv->nc.decoding_hash) | 133 | if (!bat_priv->nc.decoding_hash) |
| 134 | goto err; | 134 | goto err; |
| 135 | 135 | ||
| 136 | batadv_hash_set_lock_class(bat_priv->nc.coding_hash, | 136 | batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, |
| 137 | &batadv_nc_decoding_hash_lock_class_key); | 137 | &batadv_nc_decoding_hash_lock_class_key); |
| 138 | 138 | ||
| 139 | INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); | 139 | INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6a484514cd3e..bea8198d0198 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
| @@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) | |||
| 570 | 570 | ||
| 571 | batadv_frag_purge_orig(orig_node, NULL); | 571 | batadv_frag_purge_orig(orig_node, NULL); |
| 572 | 572 | ||
| 573 | batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, | ||
| 574 | "originator timed out"); | ||
| 575 | |||
| 576 | if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) | 573 | if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) |
| 577 | orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); | 574 | orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); |
| 578 | 575 | ||
| @@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, | |||
| 678 | atomic_set(&orig_node->last_ttvn, 0); | 675 | atomic_set(&orig_node->last_ttvn, 0); |
| 679 | orig_node->tt_buff = NULL; | 676 | orig_node->tt_buff = NULL; |
| 680 | orig_node->tt_buff_len = 0; | 677 | orig_node->tt_buff_len = 0; |
| 678 | orig_node->last_seen = jiffies; | ||
| 681 | reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); | 679 | reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); |
| 682 | orig_node->bcast_seqno_reset = reset_time; | 680 | orig_node->bcast_seqno_reset = reset_time; |
| 683 | #ifdef CONFIG_BATMAN_ADV_MCAST | 681 | #ifdef CONFIG_BATMAN_ADV_MCAST |
| @@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) | |||
| 977 | if (batadv_purge_orig_node(bat_priv, orig_node)) { | 975 | if (batadv_purge_orig_node(bat_priv, orig_node)) { |
| 978 | batadv_gw_node_delete(bat_priv, orig_node); | 976 | batadv_gw_node_delete(bat_priv, orig_node); |
| 979 | hlist_del_rcu(&orig_node->hash_entry); | 977 | hlist_del_rcu(&orig_node->hash_entry); |
| 978 | batadv_tt_global_del_orig(orig_node->bat_priv, | ||
| 979 | orig_node, -1, | ||
| 980 | "originator timed out"); | ||
| 980 | batadv_orig_node_free_ref(orig_node); | 981 | batadv_orig_node_free_ref(orig_node); |
| 981 | continue; | 982 | continue; |
| 982 | } | 983 | } |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 35f76f2f7824..6648f321864d 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv, | |||
| 443 | 443 | ||
| 444 | router = batadv_orig_router_get(orig_node, recv_if); | 444 | router = batadv_orig_router_get(orig_node, recv_if); |
| 445 | 445 | ||
| 446 | if (!router) | ||
| 447 | return router; | ||
| 448 | |||
| 446 | /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) | 449 | /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) |
| 447 | * and if activated. | 450 | * and if activated. |
| 448 | */ | 451 | */ |
| 449 | if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || | 452 | if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding))) |
| 450 | !router) | ||
| 451 | return router; | 453 | return router; |
| 452 | 454 | ||
| 453 | /* bonding: loop through the list of possible routers found | 455 | /* bonding: loop through the list of possible routers found |
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 15845814a0f2..ba6eb17226da 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
| @@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au, | |||
| 676 | int ret; | 676 | int ret; |
| 677 | char tmp_enc[40]; | 677 | char tmp_enc[40]; |
| 678 | __le32 tmp[5] = { | 678 | __le32 tmp[5] = { |
| 679 | 16u, msg->hdr.crc, msg->footer.front_crc, | 679 | cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, |
| 680 | msg->footer.middle_crc, msg->footer.data_crc, | 680 | msg->footer.middle_crc, msg->footer.data_crc, |
| 681 | }; | 681 | }; |
| 682 | ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), | 682 | ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index a83062ceeec9..f2148e22b148 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
| @@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len, | |||
| 717 | if (src_len != sizeof(u32) + dst_len) | 717 | if (src_len != sizeof(u32) + dst_len) |
| 718 | return -EINVAL; | 718 | return -EINVAL; |
| 719 | 719 | ||
| 720 | buf_len = le32_to_cpu(*(u32 *)src); | 720 | buf_len = le32_to_cpu(*(__le32 *)src); |
| 721 | if (buf_len != dst_len) | 721 | if (buf_len != dst_len) |
| 722 | return -EINVAL; | 722 | return -EINVAL; |
| 723 | 723 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7f18262e2326..65caf8b95e17 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 2019 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) | 2019 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) |
| 2020 | break; | 2020 | break; |
| 2021 | 2021 | ||
| 2022 | if (tso_segs == 1) { | 2022 | if (tso_segs == 1 || !max_segs) { |
| 2023 | if (unlikely(!tcp_nagle_test(tp, skb, mss_now, | 2023 | if (unlikely(!tcp_nagle_test(tp, skb, mss_now, |
| 2024 | (tcp_skb_is_last(sk, skb) ? | 2024 | (tcp_skb_is_last(sk, skb) ? |
| 2025 | nonagle : TCP_NAGLE_PUSH)))) | 2025 | nonagle : TCP_NAGLE_PUSH)))) |
| @@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 2032 | } | 2032 | } |
| 2033 | 2033 | ||
| 2034 | limit = mss_now; | 2034 | limit = mss_now; |
| 2035 | if (tso_segs > 1 && !tcp_urg_mode(tp)) | 2035 | if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) |
| 2036 | limit = tcp_mss_split_point(sk, skb, mss_now, | 2036 | limit = tcp_mss_split_point(sk, skb, mss_now, |
| 2037 | min_t(unsigned int, | 2037 | min_t(unsigned int, |
| 2038 | cwnd_quota, | 2038 | cwnd_quota, |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 0bb7038121ac..bd4e46ec32bd 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
| @@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
| 140 | if (!ret) { | 140 | if (!ret) { |
| 141 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; | 141 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; |
| 142 | 142 | ||
| 143 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) | 143 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
| 144 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || | ||
| 145 | (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) | ||
| 144 | sdata->crypto_tx_tailroom_needed_cnt--; | 146 | sdata->crypto_tx_tailroom_needed_cnt--; |
| 145 | 147 | ||
| 146 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && | 148 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && |
| @@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
| 188 | sta = key->sta; | 190 | sta = key->sta; |
| 189 | sdata = key->sdata; | 191 | sdata = key->sdata; |
| 190 | 192 | ||
| 191 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) | 193 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
| 194 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || | ||
| 195 | (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) | ||
| 192 | increment_tailroom_need_count(sdata); | 196 | increment_tailroom_need_count(sdata); |
| 193 | 197 | ||
| 194 | ret = drv_set_key(key->local, DISABLE_KEY, sdata, | 198 | ret = drv_set_key(key->local, DISABLE_KEY, sdata, |
| @@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) | |||
| 884 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { | 888 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
| 885 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; | 889 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; |
| 886 | 890 | ||
| 887 | if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) | 891 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
| 892 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || | ||
| 893 | (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) | ||
| 888 | increment_tailroom_need_count(key->sdata); | 894 | increment_tailroom_need_count(key->sdata); |
| 889 | } | 895 | } |
| 890 | 896 | ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 70bef2ab7f2b..da2fae0873a5 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, | |||
| 70 | { | 70 | { |
| 71 | struct flow_stats *stats; | 71 | struct flow_stats *stats; |
| 72 | int node = numa_node_id(); | 72 | int node = numa_node_id(); |
| 73 | int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); | ||
| 73 | 74 | ||
| 74 | stats = rcu_dereference(flow->stats[node]); | 75 | stats = rcu_dereference(flow->stats[node]); |
| 75 | 76 | ||
| @@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, | |||
| 105 | if (likely(new_stats)) { | 106 | if (likely(new_stats)) { |
| 106 | new_stats->used = jiffies; | 107 | new_stats->used = jiffies; |
| 107 | new_stats->packet_count = 1; | 108 | new_stats->packet_count = 1; |
| 108 | new_stats->byte_count = skb->len; | 109 | new_stats->byte_count = len; |
| 109 | new_stats->tcp_flags = tcp_flags; | 110 | new_stats->tcp_flags = tcp_flags; |
| 110 | spin_lock_init(&new_stats->lock); | 111 | spin_lock_init(&new_stats->lock); |
| 111 | 112 | ||
| @@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, | |||
| 120 | 121 | ||
| 121 | stats->used = jiffies; | 122 | stats->used = jiffies; |
| 122 | stats->packet_count++; | 123 | stats->packet_count++; |
| 123 | stats->byte_count += skb->len; | 124 | stats->byte_count += len; |
| 124 | stats->tcp_flags |= tcp_flags; | 125 | stats->tcp_flags |= tcp_flags; |
| 125 | unlock: | 126 | unlock: |
| 126 | spin_unlock(&stats->lock); | 127 | spin_unlock(&stats->lock); |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 53f3ebbfceab..2034c6d9cb5a 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
| @@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, | |||
| 480 | stats = this_cpu_ptr(vport->percpu_stats); | 480 | stats = this_cpu_ptr(vport->percpu_stats); |
| 481 | u64_stats_update_begin(&stats->syncp); | 481 | u64_stats_update_begin(&stats->syncp); |
| 482 | stats->rx_packets++; | 482 | stats->rx_packets++; |
| 483 | stats->rx_bytes += skb->len; | 483 | stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); |
| 484 | u64_stats_update_end(&stats->syncp); | 484 | u64_stats_update_end(&stats->syncp); |
| 485 | 485 | ||
| 486 | OVS_CB(skb)->input_vport = vport; | 486 | OVS_CB(skb)->input_vport = vport; |
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 1cb61242e55e..4439ac4c1b53 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
| @@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) | |||
| 606 | struct kvec *head = buf->head; | 606 | struct kvec *head = buf->head; |
| 607 | struct kvec *tail = buf->tail; | 607 | struct kvec *tail = buf->tail; |
| 608 | int fraglen; | 608 | int fraglen; |
| 609 | int new, old; | 609 | int new; |
| 610 | 610 | ||
| 611 | if (len > buf->len) { | 611 | if (len > buf->len) { |
| 612 | WARN_ON_ONCE(1); | 612 | WARN_ON_ONCE(1); |
| @@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) | |||
| 629 | buf->len -= fraglen; | 629 | buf->len -= fraglen; |
| 630 | 630 | ||
| 631 | new = buf->page_base + buf->page_len; | 631 | new = buf->page_base + buf->page_len; |
| 632 | old = new + fraglen; | 632 | |
| 633 | xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); | 633 | xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); |
| 634 | 634 | ||
| 635 | if (buf->page_len) { | 635 | if (buf->page_len) { |
| 636 | xdr->p = page_address(*xdr->page_ptr); | 636 | xdr->p = page_address(*xdr->page_ptr); |
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 1bca180db8ad..627f8cbbedb8 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean | |||
| @@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \ | |||
| 42 | 42 | ||
| 43 | __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) | 43 | __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) |
| 44 | 44 | ||
| 45 | # as clean-files is given relative to the current directory, this adds | 45 | # clean-files is given relative to the current directory, unless it |
| 46 | # a $(obj) prefix, except for absolute paths | 46 | # starts with $(objtree)/ (which means "./", so do not add "./" unless |
| 47 | # you want to delete a file from the toplevel object directory). | ||
| 47 | 48 | ||
| 48 | __clean-files := $(wildcard \ | 49 | __clean-files := $(wildcard \ |
| 49 | $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ | 50 | $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ |
| 50 | $(filter /%, $(__clean-files))) | 51 | $(filter $(objtree)/%, $(__clean-files))) |
| 51 | 52 | ||
| 52 | # as clean-dirs is given relative to the current directory, this adds | 53 | # same as clean-files |
| 53 | # a $(obj) prefix, except for absolute paths | ||
| 54 | 54 | ||
| 55 | __clean-dirs := $(wildcard \ | 55 | __clean-dirs := $(wildcard \ |
| 56 | $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ | 56 | $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ |
| 57 | $(filter /%, $(clean-dirs))) | 57 | $(filter $(objtree)/%, $(clean-dirs))) |
| 58 | 58 | ||
| 59 | # ========================================================================== | 59 | # ========================================================================== |
| 60 | 60 | ||
diff --git a/security/keys/gc.c b/security/keys/gc.c index 9609a7f0faea..c7952375ac53 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c | |||
| @@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
| 148 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) | 148 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) |
| 149 | atomic_dec(&key->user->nikeys); | 149 | atomic_dec(&key->user->nikeys); |
| 150 | 150 | ||
| 151 | key_user_put(key->user); | ||
| 152 | |||
| 153 | /* now throw away the key memory */ | 151 | /* now throw away the key memory */ |
| 154 | if (key->type->destroy) | 152 | if (key->type->destroy) |
| 155 | key->type->destroy(key); | 153 | key->type->destroy(key); |
| 156 | 154 | ||
| 155 | key_user_put(key->user); | ||
| 156 | |||
| 157 | kfree(key->description); | 157 | kfree(key->description); |
| 158 | 158 | ||
| 159 | #ifdef KEY_DEBUGGING | 159 | #ifdef KEY_DEBUGGING |
