diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-07 01:15:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-07 01:15:42 -0500 |
commit | a2e5790d841658485d642196dbb0927303d6c22f (patch) | |
tree | b3d28c9bcb7da6880806146fd22a88a7ee7f733e | |
parent | ab2d92ad881da11331280aedf612d82e61cb6d41 (diff) | |
parent | 60c3e026d73ccabb075fb70ba02f8512ab40cf2c (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- kasan updates
- procfs
- lib/bitmap updates
- other lib/ updates
- checkpatch tweaks
- rapidio
- ubsan
- pipe fixes and cleanups
- lots of other misc bits
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits)
Documentation/sysctl/user.txt: fix typo
MAINTAINERS: update ARM/QUALCOMM SUPPORT patterns
MAINTAINERS: update various PALM patterns
MAINTAINERS: update "ARM/OXNAS platform support" patterns
MAINTAINERS: update Cortina/Gemini patterns
MAINTAINERS: remove ARM/CLKDEV SUPPORT file pattern
MAINTAINERS: remove ANDROID ION pattern
mm: docs: add blank lines to silence sphinx "Unexpected indentation" errors
mm: docs: fix parameter names mismatch
mm: docs: fixup punctuation
pipe: read buffer limits atomically
pipe: simplify round_pipe_size()
pipe: reject F_SETPIPE_SZ with size over UINT_MAX
pipe: fix off-by-one error when checking buffer limits
pipe: actually allow root to exceed the pipe buffer limits
pipe, sysctl: remove pipe_proc_fn()
pipe, sysctl: drop 'min' parameter from pipe-max-size converter
kasan: rework Kconfig settings
crash_dump: is_kdump_kernel can be boolean
kernel/mutex: mutex_is_locked can be boolean
...
134 files changed, 1422 insertions, 1075 deletions
diff --git a/Documentation/sysctl/user.txt b/Documentation/sysctl/user.txt index 1291c498f78f..a5882865836e 100644 --- a/Documentation/sysctl/user.txt +++ b/Documentation/sysctl/user.txt | |||
@@ -3,7 +3,7 @@ Documentation for /proc/sys/user/* kernel version 4.9.0 | |||
3 | 3 | ||
4 | ============================================================== | 4 | ============================================================== |
5 | 5 | ||
6 | This file contains the documetation for the sysctl files in | 6 | This file contains the documentation for the sysctl files in |
7 | /proc/sys/user. | 7 | /proc/sys/user. |
8 | 8 | ||
9 | The files in this directory can be used to override the default | 9 | The files in this directory can be used to override the default |
diff --git a/MAINTAINERS b/MAINTAINERS index d1212226cbba..7653656e64b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -903,7 +903,6 @@ L: devel@driverdev.osuosl.org | |||
903 | S: Supported | 903 | S: Supported |
904 | F: drivers/staging/android/ion | 904 | F: drivers/staging/android/ion |
905 | F: drivers/staging/android/uapi/ion.h | 905 | F: drivers/staging/android/uapi/ion.h |
906 | F: drivers/staging/android/uapi/ion_test.h | ||
907 | 906 | ||
908 | AOA (Apple Onboard Audio) ALSA DRIVER | 907 | AOA (Apple Onboard Audio) ALSA DRIVER |
909 | M: Johannes Berg <johannes@sipsolutions.net> | 908 | M: Johannes Berg <johannes@sipsolutions.net> |
@@ -1308,7 +1307,6 @@ M: Russell King <linux@armlinux.org.uk> | |||
1308 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1307 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1309 | S: Maintained | 1308 | S: Maintained |
1310 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev | 1309 | T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev |
1311 | F: arch/arm/include/asm/clkdev.h | ||
1312 | F: drivers/clk/clkdev.c | 1310 | F: drivers/clk/clkdev.c |
1313 | 1311 | ||
1314 | ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT | 1312 | ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT |
@@ -1360,7 +1358,7 @@ F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt | |||
1360 | F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt | 1358 | F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt |
1361 | F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt | 1359 | F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt |
1362 | F: arch/arm/mach-gemini/ | 1360 | F: arch/arm/mach-gemini/ |
1363 | F: drivers/net/ethernet/cortina/gemini/* | 1361 | F: drivers/net/ethernet/cortina/ |
1364 | F: drivers/pinctrl/pinctrl-gemini.c | 1362 | F: drivers/pinctrl/pinctrl-gemini.c |
1365 | F: drivers/rtc/rtc-ftrtc010.c | 1363 | F: drivers/rtc/rtc-ftrtc010.c |
1366 | 1364 | ||
@@ -1737,9 +1735,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | |||
1737 | L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers) | 1735 | L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers) |
1738 | S: Maintained | 1736 | S: Maintained |
1739 | F: arch/arm/mach-oxnas/ | 1737 | F: arch/arm/mach-oxnas/ |
1740 | F: arch/arm/boot/dts/ox8*.dtsi | 1738 | F: arch/arm/boot/dts/ox8*.dts* |
1741 | F: arch/arm/boot/dts/wd-mbwe.dts | ||
1742 | F: arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts | ||
1743 | N: oxnas | 1739 | N: oxnas |
1744 | 1740 | ||
1745 | ARM/PALM TREO SUPPORT | 1741 | ARM/PALM TREO SUPPORT |
@@ -1747,8 +1743,7 @@ M: Tomas Cech <sleep_walker@suse.com> | |||
1747 | L: linux-arm-kernel@lists.infradead.org | 1743 | L: linux-arm-kernel@lists.infradead.org |
1748 | W: http://hackndev.com | 1744 | W: http://hackndev.com |
1749 | S: Maintained | 1745 | S: Maintained |
1750 | F: arch/arm/mach-pxa/include/mach/palmtreo.h | 1746 | F: arch/arm/mach-pxa/palmtreo.* |
1751 | F: arch/arm/mach-pxa/palmtreo.c | ||
1752 | 1747 | ||
1753 | ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT | 1748 | ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT |
1754 | M: Marek Vasut <marek.vasut@gmail.com> | 1749 | M: Marek Vasut <marek.vasut@gmail.com> |
@@ -1757,12 +1752,10 @@ W: http://hackndev.com | |||
1757 | S: Maintained | 1752 | S: Maintained |
1758 | F: arch/arm/mach-pxa/include/mach/palmtx.h | 1753 | F: arch/arm/mach-pxa/include/mach/palmtx.h |
1759 | F: arch/arm/mach-pxa/palmtx.c | 1754 | F: arch/arm/mach-pxa/palmtx.c |
1760 | F: arch/arm/mach-pxa/include/mach/palmt5.h | 1755 | F: arch/arm/mach-pxa/palmt5.* |
1761 | F: arch/arm/mach-pxa/palmt5.c | ||
1762 | F: arch/arm/mach-pxa/include/mach/palmld.h | 1756 | F: arch/arm/mach-pxa/include/mach/palmld.h |
1763 | F: arch/arm/mach-pxa/palmld.c | 1757 | F: arch/arm/mach-pxa/palmld.c |
1764 | F: arch/arm/mach-pxa/include/mach/palmte2.h | 1758 | F: arch/arm/mach-pxa/palmte2.* |
1765 | F: arch/arm/mach-pxa/palmte2.c | ||
1766 | F: arch/arm/mach-pxa/include/mach/palmtc.h | 1759 | F: arch/arm/mach-pxa/include/mach/palmtc.h |
1767 | F: arch/arm/mach-pxa/palmtc.c | 1760 | F: arch/arm/mach-pxa/palmtc.c |
1768 | 1761 | ||
@@ -1771,8 +1764,7 @@ M: Sergey Lapin <slapin@ossfans.org> | |||
1771 | L: linux-arm-kernel@lists.infradead.org | 1764 | L: linux-arm-kernel@lists.infradead.org |
1772 | W: http://hackndev.com | 1765 | W: http://hackndev.com |
1773 | S: Maintained | 1766 | S: Maintained |
1774 | F: arch/arm/mach-pxa/include/mach/palmz72.h | 1767 | F: arch/arm/mach-pxa/palmz72.* |
1775 | F: arch/arm/mach-pxa/palmz72.c | ||
1776 | 1768 | ||
1777 | ARM/PLEB SUPPORT | 1769 | ARM/PLEB SUPPORT |
1778 | M: Peter Chubb <pleb@gelato.unsw.edu.au> | 1770 | M: Peter Chubb <pleb@gelato.unsw.edu.au> |
@@ -1801,7 +1793,6 @@ F: drivers/clk/qcom/ | |||
1801 | F: drivers/dma/qcom/ | 1793 | F: drivers/dma/qcom/ |
1802 | F: drivers/soc/qcom/ | 1794 | F: drivers/soc/qcom/ |
1803 | F: drivers/spi/spi-qup.c | 1795 | F: drivers/spi/spi-qup.c |
1804 | F: drivers/tty/serial/msm_serial.h | ||
1805 | F: drivers/tty/serial/msm_serial.c | 1796 | F: drivers/tty/serial/msm_serial.c |
1806 | F: drivers/*/pm8???-* | 1797 | F: drivers/*/pm8???-* |
1807 | F: drivers/mfd/ssbi.c | 1798 | F: drivers/mfd/ssbi.c |
@@ -3567,7 +3558,7 @@ F: drivers/media/platform/coda/ | |||
3567 | 3558 | ||
3568 | COMMON CLK FRAMEWORK | 3559 | COMMON CLK FRAMEWORK |
3569 | M: Michael Turquette <mturquette@baylibre.com> | 3560 | M: Michael Turquette <mturquette@baylibre.com> |
3570 | M: Stephen Boyd <sboyd@codeaurora.org> | 3561 | M: Stephen Boyd <sboyd@kernel.org> |
3571 | L: linux-clk@vger.kernel.org | 3562 | L: linux-clk@vger.kernel.org |
3572 | Q: http://patchwork.kernel.org/project/linux-clk/list/ | 3563 | Q: http://patchwork.kernel.org/project/linux-clk/list/ |
3573 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git | 3564 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git |
@@ -10284,7 +10275,7 @@ F: include/uapi/linux/openvswitch.h | |||
10284 | OPERATING PERFORMANCE POINTS (OPP) | 10275 | OPERATING PERFORMANCE POINTS (OPP) |
10285 | M: Viresh Kumar <vireshk@kernel.org> | 10276 | M: Viresh Kumar <vireshk@kernel.org> |
10286 | M: Nishanth Menon <nm@ti.com> | 10277 | M: Nishanth Menon <nm@ti.com> |
10287 | M: Stephen Boyd <sboyd@codeaurora.org> | 10278 | M: Stephen Boyd <sboyd@kernel.org> |
10288 | L: linux-pm@vger.kernel.org | 10279 | L: linux-pm@vger.kernel.org |
10289 | S: Maintained | 10280 | S: Maintained |
10290 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git | 10281 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git |
@@ -13020,7 +13011,7 @@ F: Documentation/networking/spider_net.txt | |||
13020 | F: drivers/net/ethernet/toshiba/spider_net* | 13011 | F: drivers/net/ethernet/toshiba/spider_net* |
13021 | 13012 | ||
13022 | SPMI SUBSYSTEM | 13013 | SPMI SUBSYSTEM |
13023 | R: Stephen Boyd <sboyd@codeaurora.org> | 13014 | R: Stephen Boyd <sboyd@kernel.org> |
13024 | L: linux-arm-msm@vger.kernel.org | 13015 | L: linux-arm-msm@vger.kernel.org |
13025 | F: Documentation/devicetree/bindings/spmi/ | 13016 | F: Documentation/devicetree/bindings/spmi/ |
13026 | F: drivers/spmi/ | 13017 | F: drivers/spmi/ |
@@ -13905,7 +13896,7 @@ F: include/linux/usb/tilegx.h | |||
13905 | TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER | 13896 | TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER |
13906 | M: John Stultz <john.stultz@linaro.org> | 13897 | M: John Stultz <john.stultz@linaro.org> |
13907 | M: Thomas Gleixner <tglx@linutronix.de> | 13898 | M: Thomas Gleixner <tglx@linutronix.de> |
13908 | R: Stephen Boyd <sboyd@codeaurora.org> | 13899 | R: Stephen Boyd <sboyd@kernel.org> |
13909 | L: linux-kernel@vger.kernel.org | 13900 | L: linux-kernel@vger.kernel.org |
13910 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core | 13901 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core |
13911 | S: Supported | 13902 | S: Supported |
@@ -434,7 +434,8 @@ export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE | |||
434 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS | 434 | export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS |
435 | 435 | ||
436 | export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS | 436 | export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS |
437 | export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN | 437 | export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE |
438 | export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN | ||
438 | export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE | 439 | export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE |
439 | export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE | 440 | export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE |
440 | export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL | 441 | export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL |
@@ -679,6 +680,10 @@ endif | |||
679 | # This selects the stack protector compiler flag. Testing it is delayed | 680 | # This selects the stack protector compiler flag. Testing it is delayed |
680 | # until after .config has been reprocessed, in the prepare-compiler-check | 681 | # until after .config has been reprocessed, in the prepare-compiler-check |
681 | # target. | 682 | # target. |
683 | ifdef CONFIG_CC_STACKPROTECTOR_AUTO | ||
684 | stackp-flag := $(call cc-option,-fstack-protector-strong,$(call cc-option,-fstack-protector)) | ||
685 | stackp-name := AUTO | ||
686 | else | ||
682 | ifdef CONFIG_CC_STACKPROTECTOR_REGULAR | 687 | ifdef CONFIG_CC_STACKPROTECTOR_REGULAR |
683 | stackp-flag := -fstack-protector | 688 | stackp-flag := -fstack-protector |
684 | stackp-name := REGULAR | 689 | stackp-name := REGULAR |
@@ -687,16 +692,40 @@ ifdef CONFIG_CC_STACKPROTECTOR_STRONG | |||
687 | stackp-flag := -fstack-protector-strong | 692 | stackp-flag := -fstack-protector-strong |
688 | stackp-name := STRONG | 693 | stackp-name := STRONG |
689 | else | 694 | else |
695 | # If either there is no stack protector for this architecture or | ||
696 | # CONFIG_CC_STACKPROTECTOR_NONE is selected, we're done, and $(stackp-name) | ||
697 | # is empty, skipping all remaining stack protector tests. | ||
698 | # | ||
690 | # Force off for distro compilers that enable stack protector by default. | 699 | # Force off for distro compilers that enable stack protector by default. |
691 | stackp-flag := $(call cc-option, -fno-stack-protector) | 700 | KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) |
701 | endif | ||
692 | endif | 702 | endif |
693 | endif | 703 | endif |
694 | # Find arch-specific stack protector compiler sanity-checking script. | 704 | # Find arch-specific stack protector compiler sanity-checking script. |
695 | ifdef CONFIG_CC_STACKPROTECTOR | 705 | ifdef stackp-name |
706 | ifneq ($(stackp-flag),) | ||
696 | stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh | 707 | stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh |
697 | stackp-check := $(wildcard $(stackp-path)) | 708 | stackp-check := $(wildcard $(stackp-path)) |
709 | # If the wildcard test matches a test script, run it to check functionality. | ||
710 | ifdef stackp-check | ||
711 | ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) | ||
712 | stackp-broken := y | ||
713 | endif | ||
714 | endif | ||
715 | ifndef stackp-broken | ||
716 | # If the stack protector is functional, enable code that depends on it. | ||
717 | KBUILD_CPPFLAGS += -DCONFIG_CC_STACKPROTECTOR | ||
718 | # Either we've already detected the flag (for AUTO) or we'll fail the | ||
719 | # build in the prepare-compiler-check rule (for specific flag). | ||
720 | KBUILD_CFLAGS += $(stackp-flag) | ||
721 | else | ||
722 | # We have to make sure stack protector is unconditionally disabled if | ||
723 | # the compiler is broken (in case we're going to continue the build in | ||
724 | # AUTO mode). | ||
725 | KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) | ||
726 | endif | ||
727 | endif | ||
698 | endif | 728 | endif |
699 | KBUILD_CFLAGS += $(stackp-flag) | ||
700 | 729 | ||
701 | ifeq ($(cc-name),clang) | 730 | ifeq ($(cc-name),clang) |
702 | KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) | 731 | KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) |
@@ -1091,14 +1120,25 @@ PHONY += prepare-compiler-check | |||
1091 | prepare-compiler-check: FORCE | 1120 | prepare-compiler-check: FORCE |
1092 | # Make sure compiler supports requested stack protector flag. | 1121 | # Make sure compiler supports requested stack protector flag. |
1093 | ifdef stackp-name | 1122 | ifdef stackp-name |
1123 | # Warn about CONFIG_CC_STACKPROTECTOR_AUTO having found no option. | ||
1124 | ifeq ($(stackp-flag),) | ||
1125 | @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ | ||
1126 | Compiler does not support any known stack-protector >&2 | ||
1127 | else | ||
1128 | # Fail if specifically requested stack protector is missing. | ||
1094 | ifeq ($(call cc-option, $(stackp-flag)),) | 1129 | ifeq ($(call cc-option, $(stackp-flag)),) |
1095 | @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ | 1130 | @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ |
1096 | $(stackp-flag) not supported by compiler >&2 && exit 1 | 1131 | $(stackp-flag) not supported by compiler >&2 && exit 1 |
1097 | endif | 1132 | endif |
1133 | endif | ||
1098 | endif | 1134 | endif |
1099 | # Make sure compiler does not have buggy stack-protector support. | 1135 | # Make sure compiler does not have buggy stack-protector support. If a |
1100 | ifdef stackp-check | 1136 | # specific stack-protector was requested, fail the build, otherwise warn. |
1101 | ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) | 1137 | ifdef stackp-broken |
1138 | ifeq ($(stackp-name),AUTO) | ||
1139 | @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ | ||
1140 | $(stackp-flag) available but compiler is broken: disabling >&2 | ||
1141 | else | ||
1102 | @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ | 1142 | @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ |
1103 | $(stackp-flag) available but compiler is broken >&2 && exit 1 | 1143 | $(stackp-flag) available but compiler is broken >&2 && exit 1 |
1104 | endif | 1144 | endif |
diff --git a/arch/Kconfig b/arch/Kconfig index 467dfa35bf96..76c0b54443b1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -538,16 +538,10 @@ config HAVE_CC_STACKPROTECTOR | |||
538 | - its compiler supports the -fstack-protector option | 538 | - its compiler supports the -fstack-protector option |
539 | - it has implemented a stack canary (e.g. __stack_chk_guard) | 539 | - it has implemented a stack canary (e.g. __stack_chk_guard) |
540 | 540 | ||
541 | config CC_STACKPROTECTOR | ||
542 | def_bool n | ||
543 | help | ||
544 | Set when a stack-protector mode is enabled, so that the build | ||
545 | can enable kernel-side support for the GCC feature. | ||
546 | |||
547 | choice | 541 | choice |
548 | prompt "Stack Protector buffer overflow detection" | 542 | prompt "Stack Protector buffer overflow detection" |
549 | depends on HAVE_CC_STACKPROTECTOR | 543 | depends on HAVE_CC_STACKPROTECTOR |
550 | default CC_STACKPROTECTOR_NONE | 544 | default CC_STACKPROTECTOR_AUTO |
551 | help | 545 | help |
552 | This option turns on the "stack-protector" GCC feature. This | 546 | This option turns on the "stack-protector" GCC feature. This |
553 | feature puts, at the beginning of functions, a canary value on | 547 | feature puts, at the beginning of functions, a canary value on |
@@ -564,7 +558,6 @@ config CC_STACKPROTECTOR_NONE | |||
564 | 558 | ||
565 | config CC_STACKPROTECTOR_REGULAR | 559 | config CC_STACKPROTECTOR_REGULAR |
566 | bool "Regular" | 560 | bool "Regular" |
567 | select CC_STACKPROTECTOR | ||
568 | help | 561 | help |
569 | Functions will have the stack-protector canary logic added if they | 562 | Functions will have the stack-protector canary logic added if they |
570 | have an 8-byte or larger character array on the stack. | 563 | have an 8-byte or larger character array on the stack. |
@@ -578,7 +571,6 @@ config CC_STACKPROTECTOR_REGULAR | |||
578 | 571 | ||
579 | config CC_STACKPROTECTOR_STRONG | 572 | config CC_STACKPROTECTOR_STRONG |
580 | bool "Strong" | 573 | bool "Strong" |
581 | select CC_STACKPROTECTOR | ||
582 | help | 574 | help |
583 | Functions will have the stack-protector canary logic added in any | 575 | Functions will have the stack-protector canary logic added in any |
584 | of the following conditions: | 576 | of the following conditions: |
@@ -596,6 +588,12 @@ config CC_STACKPROTECTOR_STRONG | |||
596 | about 20% of all kernel functions, which increases the kernel code | 588 | about 20% of all kernel functions, which increases the kernel code |
597 | size by about 2%. | 589 | size by about 2%. |
598 | 590 | ||
591 | config CC_STACKPROTECTOR_AUTO | ||
592 | bool "Automatic" | ||
593 | help | ||
594 | If the compiler supports it, the best available stack-protector | ||
595 | option will be chosen. | ||
596 | |||
599 | endchoice | 597 | endchoice |
600 | 598 | ||
601 | config THIN_ARCHIVES | 599 | config THIN_ARCHIVES |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index ce5ee762ed66..4cab9bb823fb 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -338,6 +338,7 @@ static inline int find_next_bit_le(const void *p, int size, int offset) | |||
338 | 338 | ||
339 | #endif | 339 | #endif |
340 | 340 | ||
341 | #include <asm-generic/bitops/find.h> | ||
341 | #include <asm-generic/bitops/le.h> | 342 | #include <asm-generic/bitops/le.h> |
342 | 343 | ||
343 | /* | 344 | /* |
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index e266f80e45b7..8758bb008436 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h | |||
@@ -12,7 +12,8 @@ | |||
12 | 12 | ||
13 | /* | 13 | /* |
14 | * KASAN_SHADOW_START: beginning of the kernel virtual addresses. | 14 | * KASAN_SHADOW_START: beginning of the kernel virtual addresses. |
15 | * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses. | 15 | * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, |
16 | * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). | ||
16 | */ | 17 | */ |
17 | #define KASAN_SHADOW_START (VA_START) | 18 | #define KASAN_SHADOW_START (VA_START) |
18 | #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) | 19 | #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) |
@@ -20,14 +21,16 @@ | |||
20 | /* | 21 | /* |
21 | * This value is used to map an address to the corresponding shadow | 22 | * This value is used to map an address to the corresponding shadow |
22 | * address by the following formula: | 23 | * address by the following formula: |
23 | * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET; | 24 | * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET |
24 | * | 25 | * |
25 | * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END] | 26 | * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range |
26 | * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET | 27 | * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual |
27 | * should satisfy the following equation: | 28 | * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation: |
28 | * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61) | 29 | * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - |
30 | * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) | ||
29 | */ | 31 | */ |
30 | #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3))) | 32 | #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ |
33 | (64 - KASAN_SHADOW_SCALE_SHIFT))) | ||
31 | 34 | ||
32 | void kasan_init(void); | 35 | void kasan_init(void); |
33 | void kasan_copy_shadow(pgd_t *pgdir); | 36 | void kasan_copy_shadow(pgd_t *pgdir); |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d4bae7d6e0d8..50fa96a49792 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -85,7 +85,8 @@ | |||
85 | * stack size when KASAN is in use. | 85 | * stack size when KASAN is in use. |
86 | */ | 86 | */ |
87 | #ifdef CONFIG_KASAN | 87 | #ifdef CONFIG_KASAN |
88 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) | 88 | #define KASAN_SHADOW_SCALE_SHIFT 3 |
89 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) | ||
89 | #define KASAN_THREAD_SHIFT 1 | 90 | #define KASAN_THREAD_SHIFT 1 |
90 | #else | 91 | #else |
91 | #define KASAN_SHADOW_SIZE (0) | 92 | #define KASAN_SHADOW_SIZE (0) |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3affca3dd96a..75b220ba73a3 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -925,9 +925,8 @@ static void __armv8pmu_probe_pmu(void *info) | |||
925 | pmceid[0] = read_sysreg(pmceid0_el0); | 925 | pmceid[0] = read_sysreg(pmceid0_el0); |
926 | pmceid[1] = read_sysreg(pmceid1_el0); | 926 | pmceid[1] = read_sysreg(pmceid1_el0); |
927 | 927 | ||
928 | bitmap_from_u32array(cpu_pmu->pmceid_bitmap, | 928 | bitmap_from_arr32(cpu_pmu->pmceid_bitmap, |
929 | ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid, | 929 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); |
930 | ARRAY_SIZE(pmceid)); | ||
931 | } | 930 | } |
932 | 931 | ||
933 | static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) | 932 | static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index acba49fb5aac..6e02e6fb4c7b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
@@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, | |||
135 | /* The early shadow maps everything to a single page of zeroes */ | 135 | /* The early shadow maps everything to a single page of zeroes */ |
136 | asmlinkage void __init kasan_early_init(void) | 136 | asmlinkage void __init kasan_early_init(void) |
137 | { | 137 | { |
138 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61)); | 138 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != |
139 | KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); | ||
139 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); | 140 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); |
140 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); | 141 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); |
141 | kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, | 142 | kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c44f002e8f6b..858602494096 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2610,17 +2610,10 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) | |||
2610 | if (pid < 2) return -EPERM; | 2610 | if (pid < 2) return -EPERM; |
2611 | 2611 | ||
2612 | if (pid != task_pid_vnr(current)) { | 2612 | if (pid != task_pid_vnr(current)) { |
2613 | |||
2614 | read_lock(&tasklist_lock); | ||
2615 | |||
2616 | p = find_task_by_vpid(pid); | ||
2617 | |||
2618 | /* make sure task cannot go away while we operate on it */ | 2613 | /* make sure task cannot go away while we operate on it */ |
2619 | if (p) get_task_struct(p); | 2614 | p = find_get_task_by_vpid(pid); |
2620 | 2615 | if (!p) | |
2621 | read_unlock(&tasklist_lock); | 2616 | return -ESRCH; |
2622 | |||
2623 | if (p == NULL) return -ESRCH; | ||
2624 | } | 2617 | } |
2625 | 2618 | ||
2626 | ret = pfm_task_incompatible(ctx, p); | 2619 | ret = pfm_task_incompatible(ctx, p); |
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index dda58cfe8c22..93b47b1f6fb4 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h | |||
@@ -311,7 +311,6 @@ static inline int bfchg_mem_test_and_change_bit(int nr, | |||
311 | * functions. | 311 | * functions. |
312 | */ | 312 | */ |
313 | #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) | 313 | #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) |
314 | #include <asm-generic/bitops/find.h> | ||
315 | #include <asm-generic/bitops/ffz.h> | 314 | #include <asm-generic/bitops/ffz.h> |
316 | #else | 315 | #else |
317 | 316 | ||
@@ -441,6 +440,8 @@ static inline unsigned long ffz(unsigned long word) | |||
441 | 440 | ||
442 | #endif | 441 | #endif |
443 | 442 | ||
443 | #include <asm-generic/bitops/find.h> | ||
444 | |||
444 | #ifdef __KERNEL__ | 445 | #ifdef __KERNEL__ |
445 | 446 | ||
446 | #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) | 447 | #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) |
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c index f3a0649ab521..627416bbd0b1 100644 --- a/arch/score/kernel/setup.c +++ b/arch/score/kernel/setup.c | |||
@@ -124,9 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
124 | { | 124 | { |
125 | unsigned long n = (unsigned long) v - 1; | 125 | unsigned long n = (unsigned long) v - 1; |
126 | 126 | ||
127 | seq_printf(m, "processor\t\t: %ld\n", n); | 127 | seq_printf(m, "processor\t\t: %ld\n\n", n); |
128 | seq_printf(m, "\n"); | ||
129 | |||
130 | return 0; | 128 | return 0; |
131 | } | 129 | } |
132 | 130 | ||
diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h index 401f597bc38c..c0cbdbe17168 100644 --- a/arch/unicore32/include/asm/bitops.h +++ b/arch/unicore32/include/asm/bitops.h | |||
@@ -44,4 +44,6 @@ static inline int fls(int x) | |||
44 | #define find_first_bit find_first_bit | 44 | #define find_first_bit find_first_bit |
45 | #define find_first_zero_bit find_first_zero_bit | 45 | #define find_first_zero_bit find_first_zero_bit |
46 | 46 | ||
47 | #include <asm-generic/bitops/find.h> | ||
48 | |||
47 | #endif /* __UNICORE_BITOPS_H__ */ | 49 | #endif /* __UNICORE_BITOPS_H__ */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cefa6dbe80ae..63bf349b2b24 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -324,7 +324,7 @@ config X86_64_SMP | |||
324 | 324 | ||
325 | config X86_32_LAZY_GS | 325 | config X86_32_LAZY_GS |
326 | def_bool y | 326 | def_bool y |
327 | depends on X86_32 && !CC_STACKPROTECTOR | 327 | depends on X86_32 && CC_STACKPROTECTOR_NONE |
328 | 328 | ||
329 | config ARCH_SUPPORTS_UPROBES | 329 | config ARCH_SUPPORTS_UPROBES |
330 | def_bool y | 330 | def_bool y |
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index b577dd0916aa..13e70da38bed 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/const.h> | 5 | #include <linux/const.h> |
6 | #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) | 6 | #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) |
7 | #define KASAN_SHADOW_SCALE_SHIFT 3 | ||
7 | 8 | ||
8 | /* | 9 | /* |
9 | * Compiler uses shadow offset assuming that addresses start | 10 | * Compiler uses shadow offset assuming that addresses start |
@@ -12,12 +13,15 @@ | |||
12 | * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT | 13 | * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT |
13 | */ | 14 | */ |
14 | #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ | 15 | #define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \ |
15 | ((-1UL << __VIRTUAL_MASK_SHIFT) >> 3)) | 16 | ((-1UL << __VIRTUAL_MASK_SHIFT) >> \ |
17 | KASAN_SHADOW_SCALE_SHIFT)) | ||
16 | /* | 18 | /* |
17 | * 47 bits for kernel address -> (47 - 3) bits for shadow | 19 | * 47 bits for kernel address -> (47 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow |
18 | * 56 bits for kernel address -> (56 - 3) bits for shadow | 20 | * 56 bits for kernel address -> (56 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow |
19 | */ | 21 | */ |
20 | #define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3))) | 22 | #define KASAN_SHADOW_END (KASAN_SHADOW_START + \ |
23 | (1ULL << (__VIRTUAL_MASK_SHIFT - \ | ||
24 | KASAN_SHADOW_SCALE_SHIFT))) | ||
21 | 25 | ||
22 | #ifndef __ASSEMBLY__ | 26 | #ifndef __ASSEMBLY__ |
23 | 27 | ||
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 93c50e377507..38f2cc2a6c74 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c | |||
@@ -658,10 +658,8 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu) | |||
658 | return; | 658 | return; |
659 | cpmceid[0] = __dsu_pmu_read_pmceid(0); | 659 | cpmceid[0] = __dsu_pmu_read_pmceid(0); |
660 | cpmceid[1] = __dsu_pmu_read_pmceid(1); | 660 | cpmceid[1] = __dsu_pmu_read_pmceid(1); |
661 | bitmap_from_u32array(dsu_pmu->cpmceid_bitmap, | 661 | bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid, |
662 | DSU_PMU_MAX_COMMON_EVENTS, | 662 | DSU_PMU_MAX_COMMON_EVENTS); |
663 | cpmceid, | ||
664 | ARRAY_SIZE(cpmceid)); | ||
665 | } | 663 | } |
666 | 664 | ||
667 | static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) | 665 | static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) |
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c index dcd39fba6ddd..51cfde6afffd 100644 --- a/drivers/pps/generators/pps_gen_parport.c +++ b/drivers/pps/generators/pps_gen_parport.c | |||
@@ -70,7 +70,7 @@ static long hrtimer_error = SAFETY_INTERVAL; | |||
70 | /* the kernel hrtimer event */ | 70 | /* the kernel hrtimer event */ |
71 | static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) | 71 | static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) |
72 | { | 72 | { |
73 | struct timespec expire_time, ts1, ts2, ts3, dts; | 73 | struct timespec64 expire_time, ts1, ts2, ts3, dts; |
74 | struct pps_generator_pp *dev; | 74 | struct pps_generator_pp *dev; |
75 | struct parport *port; | 75 | struct parport *port; |
76 | long lim, delta; | 76 | long lim, delta; |
@@ -78,7 +78,7 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) | |||
78 | 78 | ||
79 | /* We have to disable interrupts here. The idea is to prevent | 79 | /* We have to disable interrupts here. The idea is to prevent |
80 | * other interrupts on the same processor to introduce random | 80 | * other interrupts on the same processor to introduce random |
81 | * lags while polling the clock. getnstimeofday() takes <1us on | 81 | * lags while polling the clock. ktime_get_real_ts64() takes <1us on |
82 | * most machines while other interrupt handlers can take much | 82 | * most machines while other interrupt handlers can take much |
83 | * more potentially. | 83 | * more potentially. |
84 | * | 84 | * |
@@ -88,22 +88,22 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) | |||
88 | local_irq_save(flags); | 88 | local_irq_save(flags); |
89 | 89 | ||
90 | /* first of all we get the time stamp... */ | 90 | /* first of all we get the time stamp... */ |
91 | getnstimeofday(&ts1); | 91 | ktime_get_real_ts64(&ts1); |
92 | expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer)); | 92 | expire_time = ktime_to_timespec64(hrtimer_get_softexpires(timer)); |
93 | dev = container_of(timer, struct pps_generator_pp, timer); | 93 | dev = container_of(timer, struct pps_generator_pp, timer); |
94 | lim = NSEC_PER_SEC - send_delay - dev->port_write_time; | 94 | lim = NSEC_PER_SEC - send_delay - dev->port_write_time; |
95 | 95 | ||
96 | /* check if we are late */ | 96 | /* check if we are late */ |
97 | if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { | 97 | if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) { |
98 | local_irq_restore(flags); | 98 | local_irq_restore(flags); |
99 | pr_err("we are late this time %ld.%09ld\n", | 99 | pr_err("we are late this time %lld.%09ld\n", |
100 | ts1.tv_sec, ts1.tv_nsec); | 100 | (s64)ts1.tv_sec, ts1.tv_nsec); |
101 | goto done; | 101 | goto done; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* busy loop until the time is right for an assert edge */ | 104 | /* busy loop until the time is right for an assert edge */ |
105 | do { | 105 | do { |
106 | getnstimeofday(&ts2); | 106 | ktime_get_real_ts64(&ts2); |
107 | } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); | 107 | } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); |
108 | 108 | ||
109 | /* set the signal */ | 109 | /* set the signal */ |
@@ -113,25 +113,25 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer) | |||
113 | /* busy loop until the time is right for a clear edge */ | 113 | /* busy loop until the time is right for a clear edge */ |
114 | lim = NSEC_PER_SEC - dev->port_write_time; | 114 | lim = NSEC_PER_SEC - dev->port_write_time; |
115 | do { | 115 | do { |
116 | getnstimeofday(&ts2); | 116 | ktime_get_real_ts64(&ts2); |
117 | } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); | 117 | } while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim); |
118 | 118 | ||
119 | /* unset the signal */ | 119 | /* unset the signal */ |
120 | port->ops->write_control(port, NO_SIGNAL); | 120 | port->ops->write_control(port, NO_SIGNAL); |
121 | 121 | ||
122 | getnstimeofday(&ts3); | 122 | ktime_get_real_ts64(&ts3); |
123 | 123 | ||
124 | local_irq_restore(flags); | 124 | local_irq_restore(flags); |
125 | 125 | ||
126 | /* update calibrated port write time */ | 126 | /* update calibrated port write time */ |
127 | dts = timespec_sub(ts3, ts2); | 127 | dts = timespec64_sub(ts3, ts2); |
128 | dev->port_write_time = | 128 | dev->port_write_time = |
129 | (dev->port_write_time + timespec_to_ns(&dts)) >> 1; | 129 | (dev->port_write_time + timespec64_to_ns(&dts)) >> 1; |
130 | 130 | ||
131 | done: | 131 | done: |
132 | /* update calibrated hrtimer error */ | 132 | /* update calibrated hrtimer error */ |
133 | dts = timespec_sub(ts1, expire_time); | 133 | dts = timespec64_sub(ts1, expire_time); |
134 | delta = timespec_to_ns(&dts); | 134 | delta = timespec64_to_ns(&dts); |
135 | /* If the new error value is bigger then the old, use the new | 135 | /* If the new error value is bigger then the old, use the new |
136 | * value, if not then slowly move towards the new value. This | 136 | * value, if not then slowly move towards the new value. This |
137 | * way it should be safe in bad conditions and efficient in | 137 | * way it should be safe in bad conditions and efficient in |
@@ -161,17 +161,17 @@ static void calibrate_port(struct pps_generator_pp *dev) | |||
161 | long acc = 0; | 161 | long acc = 0; |
162 | 162 | ||
163 | for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) { | 163 | for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) { |
164 | struct timespec a, b; | 164 | struct timespec64 a, b; |
165 | unsigned long irq_flags; | 165 | unsigned long irq_flags; |
166 | 166 | ||
167 | local_irq_save(irq_flags); | 167 | local_irq_save(irq_flags); |
168 | getnstimeofday(&a); | 168 | ktime_get_real_ts64(&a); |
169 | port->ops->write_control(port, NO_SIGNAL); | 169 | port->ops->write_control(port, NO_SIGNAL); |
170 | getnstimeofday(&b); | 170 | ktime_get_real_ts64(&b); |
171 | local_irq_restore(irq_flags); | 171 | local_irq_restore(irq_flags); |
172 | 172 | ||
173 | b = timespec_sub(b, a); | 173 | b = timespec64_sub(b, a); |
174 | acc += timespec_to_ns(&b); | 174 | acc += timespec64_to_ns(&b); |
175 | } | 175 | } |
176 | 176 | ||
177 | dev->port_write_time = acc >> PORT_NTESTS_SHIFT; | 177 | dev->port_write_time = acc >> PORT_NTESTS_SHIFT; |
@@ -180,9 +180,9 @@ static void calibrate_port(struct pps_generator_pp *dev) | |||
180 | 180 | ||
181 | static inline ktime_t next_intr_time(struct pps_generator_pp *dev) | 181 | static inline ktime_t next_intr_time(struct pps_generator_pp *dev) |
182 | { | 182 | { |
183 | struct timespec ts; | 183 | struct timespec64 ts; |
184 | 184 | ||
185 | getnstimeofday(&ts); | 185 | ktime_get_real_ts64(&ts); |
186 | 186 | ||
187 | return ktime_set(ts.tv_sec + | 187 | return ktime_set(ts.tv_sec + |
188 | ((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0), | 188 | ((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0), |
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index e2a418598129..006ea5a45020 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c | |||
@@ -222,7 +222,7 @@ static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | |||
222 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | 222 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); |
223 | #endif | 223 | #endif |
224 | 224 | ||
225 | if (bdma_chan->bd_base == NULL) | 225 | if (!bdma_chan->bd_base) |
226 | return 0; | 226 | return 0; |
227 | 227 | ||
228 | /* Check if DMA channel still running */ | 228 | /* Check if DMA channel still running */ |
@@ -346,7 +346,7 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc, | |||
346 | { | 346 | { |
347 | u64 rio_addr; | 347 | u64 rio_addr; |
348 | 348 | ||
349 | if (bd_ptr == NULL) | 349 | if (!bd_ptr) |
350 | return -EINVAL; | 350 | return -EINVAL; |
351 | 351 | ||
352 | /* Initialize DMA descriptor */ | 352 | /* Initialize DMA descriptor */ |
@@ -370,7 +370,7 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc, | |||
370 | static int | 370 | static int |
371 | tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) | 371 | tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) |
372 | { | 372 | { |
373 | if (bd_ptr == NULL) | 373 | if (!bd_ptr) |
374 | return -EINVAL; | 374 | return -EINVAL; |
375 | 375 | ||
376 | /* Update DMA descriptor */ | 376 | /* Update DMA descriptor */ |
@@ -555,9 +555,7 @@ static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, | |||
555 | * If there is no data transfer in progress, fetch new descriptor from | 555 | * If there is no data transfer in progress, fetch new descriptor from |
556 | * the pending queue. | 556 | * the pending queue. |
557 | */ | 557 | */ |
558 | 558 | if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) { | |
559 | if (desc == NULL && bdma_chan->active_tx == NULL && | ||
560 | !list_empty(&bdma_chan->queue)) { | ||
561 | desc = list_first_entry(&bdma_chan->queue, | 559 | desc = list_first_entry(&bdma_chan->queue, |
562 | struct tsi721_tx_desc, desc_node); | 560 | struct tsi721_tx_desc, desc_node); |
563 | list_del_init((&desc->desc_node)); | 561 | list_del_init((&desc->desc_node)); |
@@ -735,7 +733,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | |||
735 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | 733 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) |
736 | { | 734 | { |
737 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | 735 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
738 | struct tsi721_tx_desc *desc = NULL; | 736 | struct tsi721_tx_desc *desc; |
739 | int i; | 737 | int i; |
740 | 738 | ||
741 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); | 739 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
@@ -754,9 +752,6 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |||
754 | desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), | 752 | desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), |
755 | GFP_ATOMIC); | 753 | GFP_ATOMIC); |
756 | if (!desc) { | 754 | if (!desc) { |
757 | tsi_err(&dchan->dev->device, | ||
758 | "DMAC%d Failed to allocate logical descriptors", | ||
759 | bdma_chan->id); | ||
760 | tsi721_bdma_ch_free(bdma_chan); | 755 | tsi721_bdma_ch_free(bdma_chan); |
761 | return -ENOMEM; | 756 | return -ENOMEM; |
762 | } | 757 | } |
@@ -799,7 +794,7 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) | |||
799 | 794 | ||
800 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); | 795 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
801 | 796 | ||
802 | if (bdma_chan->bd_base == NULL) | 797 | if (!bdma_chan->bd_base) |
803 | return; | 798 | return; |
804 | 799 | ||
805 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | 800 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 38d949405618..83406696c7aa 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -81,6 +81,7 @@ u16 rio_local_get_device_id(struct rio_mport *port) | |||
81 | 81 | ||
82 | return (RIO_GET_DID(port->sys_size, result)); | 82 | return (RIO_GET_DID(port->sys_size, result)); |
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); | ||
84 | 85 | ||
85 | /** | 86 | /** |
86 | * rio_query_mport - Query mport device attributes | 87 | * rio_query_mport - Query mport device attributes |
@@ -110,9 +111,8 @@ EXPORT_SYMBOL(rio_query_mport); | |||
110 | */ | 111 | */ |
111 | struct rio_net *rio_alloc_net(struct rio_mport *mport) | 112 | struct rio_net *rio_alloc_net(struct rio_mport *mport) |
112 | { | 113 | { |
113 | struct rio_net *net; | 114 | struct rio_net *net = kzalloc(sizeof(*net), GFP_KERNEL); |
114 | 115 | ||
115 | net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); | ||
116 | if (net) { | 116 | if (net) { |
117 | INIT_LIST_HEAD(&net->node); | 117 | INIT_LIST_HEAD(&net->node); |
118 | INIT_LIST_HEAD(&net->devices); | 118 | INIT_LIST_HEAD(&net->devices); |
@@ -243,18 +243,17 @@ int rio_request_inb_mbox(struct rio_mport *mport, | |||
243 | int rc = -ENOSYS; | 243 | int rc = -ENOSYS; |
244 | struct resource *res; | 244 | struct resource *res; |
245 | 245 | ||
246 | if (mport->ops->open_inb_mbox == NULL) | 246 | if (!mport->ops->open_inb_mbox) |
247 | goto out; | 247 | goto out; |
248 | 248 | ||
249 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 249 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
250 | |||
251 | if (res) { | 250 | if (res) { |
252 | rio_init_mbox_res(res, mbox, mbox); | 251 | rio_init_mbox_res(res, mbox, mbox); |
253 | 252 | ||
254 | /* Make sure this mailbox isn't in use */ | 253 | /* Make sure this mailbox isn't in use */ |
255 | if ((rc = | 254 | rc = request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], |
256 | request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], | 255 | res); |
257 | res)) < 0) { | 256 | if (rc < 0) { |
258 | kfree(res); | 257 | kfree(res); |
259 | goto out; | 258 | goto out; |
260 | } | 259 | } |
@@ -277,6 +276,7 @@ int rio_request_inb_mbox(struct rio_mport *mport, | |||
277 | out: | 276 | out: |
278 | return rc; | 277 | return rc; |
279 | } | 278 | } |
279 | EXPORT_SYMBOL_GPL(rio_request_inb_mbox); | ||
280 | 280 | ||
281 | /** | 281 | /** |
282 | * rio_release_inb_mbox - release inbound mailbox message service | 282 | * rio_release_inb_mbox - release inbound mailbox message service |
@@ -305,6 +305,7 @@ int rio_release_inb_mbox(struct rio_mport *mport, int mbox) | |||
305 | 305 | ||
306 | return 0; | 306 | return 0; |
307 | } | 307 | } |
308 | EXPORT_SYMBOL_GPL(rio_release_inb_mbox); | ||
308 | 309 | ||
309 | /** | 310 | /** |
310 | * rio_request_outb_mbox - request outbound mailbox service | 311 | * rio_request_outb_mbox - request outbound mailbox service |
@@ -326,18 +327,17 @@ int rio_request_outb_mbox(struct rio_mport *mport, | |||
326 | int rc = -ENOSYS; | 327 | int rc = -ENOSYS; |
327 | struct resource *res; | 328 | struct resource *res; |
328 | 329 | ||
329 | if (mport->ops->open_outb_mbox == NULL) | 330 | if (!mport->ops->open_outb_mbox) |
330 | goto out; | 331 | goto out; |
331 | 332 | ||
332 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 333 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
333 | |||
334 | if (res) { | 334 | if (res) { |
335 | rio_init_mbox_res(res, mbox, mbox); | 335 | rio_init_mbox_res(res, mbox, mbox); |
336 | 336 | ||
337 | /* Make sure this outbound mailbox isn't in use */ | 337 | /* Make sure this outbound mailbox isn't in use */ |
338 | if ((rc = | 338 | rc = request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], |
339 | request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], | 339 | res); |
340 | res)) < 0) { | 340 | if (rc < 0) { |
341 | kfree(res); | 341 | kfree(res); |
342 | goto out; | 342 | goto out; |
343 | } | 343 | } |
@@ -360,6 +360,7 @@ int rio_request_outb_mbox(struct rio_mport *mport, | |||
360 | out: | 360 | out: |
361 | return rc; | 361 | return rc; |
362 | } | 362 | } |
363 | EXPORT_SYMBOL_GPL(rio_request_outb_mbox); | ||
363 | 364 | ||
364 | /** | 365 | /** |
365 | * rio_release_outb_mbox - release outbound mailbox message service | 366 | * rio_release_outb_mbox - release outbound mailbox message service |
@@ -388,6 +389,7 @@ int rio_release_outb_mbox(struct rio_mport *mport, int mbox) | |||
388 | 389 | ||
389 | return 0; | 390 | return 0; |
390 | } | 391 | } |
392 | EXPORT_SYMBOL_GPL(rio_release_outb_mbox); | ||
391 | 393 | ||
392 | /** | 394 | /** |
393 | * rio_setup_inb_dbell - bind inbound doorbell callback | 395 | * rio_setup_inb_dbell - bind inbound doorbell callback |
@@ -405,13 +407,10 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, | |||
405 | void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, | 407 | void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, |
406 | u16 info)) | 408 | u16 info)) |
407 | { | 409 | { |
408 | int rc = 0; | 410 | struct rio_dbell *dbell = kmalloc(sizeof(*dbell), GFP_KERNEL); |
409 | struct rio_dbell *dbell; | ||
410 | 411 | ||
411 | if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) { | 412 | if (!dbell) |
412 | rc = -ENOMEM; | 413 | return -ENOMEM; |
413 | goto out; | ||
414 | } | ||
415 | 414 | ||
416 | dbell->res = res; | 415 | dbell->res = res; |
417 | dbell->dinb = dinb; | 416 | dbell->dinb = dinb; |
@@ -420,9 +419,7 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, | |||
420 | mutex_lock(&mport->lock); | 419 | mutex_lock(&mport->lock); |
421 | list_add_tail(&dbell->node, &mport->dbells); | 420 | list_add_tail(&dbell->node, &mport->dbells); |
422 | mutex_unlock(&mport->lock); | 421 | mutex_unlock(&mport->lock); |
423 | 422 | return 0; | |
424 | out: | ||
425 | return rc; | ||
426 | } | 423 | } |
427 | 424 | ||
428 | /** | 425 | /** |
@@ -444,17 +441,16 @@ int rio_request_inb_dbell(struct rio_mport *mport, | |||
444 | void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, | 441 | void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, |
445 | u16 dst, u16 info)) | 442 | u16 dst, u16 info)) |
446 | { | 443 | { |
447 | int rc = 0; | 444 | int rc; |
448 | 445 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | |
449 | struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
450 | 446 | ||
451 | if (res) { | 447 | if (res) { |
452 | rio_init_dbell_res(res, start, end); | 448 | rio_init_dbell_res(res, start, end); |
453 | 449 | ||
454 | /* Make sure these doorbells aren't in use */ | 450 | /* Make sure these doorbells aren't in use */ |
455 | if ((rc = | 451 | rc = request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], |
456 | request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], | 452 | res); |
457 | res)) < 0) { | 453 | if (rc < 0) { |
458 | kfree(res); | 454 | kfree(res); |
459 | goto out; | 455 | goto out; |
460 | } | 456 | } |
@@ -467,6 +463,7 @@ int rio_request_inb_dbell(struct rio_mport *mport, | |||
467 | out: | 463 | out: |
468 | return rc; | 464 | return rc; |
469 | } | 465 | } |
466 | EXPORT_SYMBOL_GPL(rio_request_inb_dbell); | ||
470 | 467 | ||
471 | /** | 468 | /** |
472 | * rio_release_inb_dbell - release inbound doorbell message service | 469 | * rio_release_inb_dbell - release inbound doorbell message service |
@@ -508,6 +505,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) | |||
508 | out: | 505 | out: |
509 | return rc; | 506 | return rc; |
510 | } | 507 | } |
508 | EXPORT_SYMBOL_GPL(rio_release_inb_dbell); | ||
511 | 509 | ||
512 | /** | 510 | /** |
513 | * rio_request_outb_dbell - request outbound doorbell message range | 511 | * rio_request_outb_dbell - request outbound doorbell message range |
@@ -536,6 +534,7 @@ struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, | |||
536 | 534 | ||
537 | return res; | 535 | return res; |
538 | } | 536 | } |
537 | EXPORT_SYMBOL_GPL(rio_request_outb_dbell); | ||
539 | 538 | ||
540 | /** | 539 | /** |
541 | * rio_release_outb_dbell - release outbound doorbell message range | 540 | * rio_release_outb_dbell - release outbound doorbell message range |
@@ -553,6 +552,7 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) | |||
553 | 552 | ||
554 | return rc; | 553 | return rc; |
555 | } | 554 | } |
555 | EXPORT_SYMBOL_GPL(rio_release_outb_dbell); | ||
556 | 556 | ||
557 | /** | 557 | /** |
558 | * rio_add_mport_pw_handler - add port-write message handler into the list | 558 | * rio_add_mport_pw_handler - add port-write message handler into the list |
@@ -567,22 +567,17 @@ int rio_add_mport_pw_handler(struct rio_mport *mport, void *context, | |||
567 | int (*pwcback)(struct rio_mport *mport, | 567 | int (*pwcback)(struct rio_mport *mport, |
568 | void *context, union rio_pw_msg *msg, int step)) | 568 | void *context, union rio_pw_msg *msg, int step)) |
569 | { | 569 | { |
570 | int rc = 0; | 570 | struct rio_pwrite *pwrite = kzalloc(sizeof(*pwrite), GFP_KERNEL); |
571 | struct rio_pwrite *pwrite; | ||
572 | 571 | ||
573 | pwrite = kzalloc(sizeof(struct rio_pwrite), GFP_KERNEL); | 572 | if (!pwrite) |
574 | if (!pwrite) { | 573 | return -ENOMEM; |
575 | rc = -ENOMEM; | ||
576 | goto out; | ||
577 | } | ||
578 | 574 | ||
579 | pwrite->pwcback = pwcback; | 575 | pwrite->pwcback = pwcback; |
580 | pwrite->context = context; | 576 | pwrite->context = context; |
581 | mutex_lock(&mport->lock); | 577 | mutex_lock(&mport->lock); |
582 | list_add_tail(&pwrite->node, &mport->pwrites); | 578 | list_add_tail(&pwrite->node, &mport->pwrites); |
583 | mutex_unlock(&mport->lock); | 579 | mutex_unlock(&mport->lock); |
584 | out: | 580 | return 0; |
585 | return rc; | ||
586 | } | 581 | } |
587 | EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler); | 582 | EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler); |
588 | 583 | ||
@@ -632,7 +627,7 @@ int rio_request_inb_pwrite(struct rio_dev *rdev, | |||
632 | int rc = 0; | 627 | int rc = 0; |
633 | 628 | ||
634 | spin_lock(&rio_global_list_lock); | 629 | spin_lock(&rio_global_list_lock); |
635 | if (rdev->pwcback != NULL) | 630 | if (rdev->pwcback) |
636 | rc = -ENOMEM; | 631 | rc = -ENOMEM; |
637 | else | 632 | else |
638 | rdev->pwcback = pwcback; | 633 | rdev->pwcback = pwcback; |
@@ -698,7 +693,7 @@ EXPORT_SYMBOL_GPL(rio_pw_enable); | |||
698 | int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, | 693 | int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, |
699 | u64 rbase, u32 size, u32 rflags) | 694 | u64 rbase, u32 size, u32 rflags) |
700 | { | 695 | { |
701 | int rc = 0; | 696 | int rc; |
702 | unsigned long flags; | 697 | unsigned long flags; |
703 | 698 | ||
704 | if (!mport->ops->map_inb) | 699 | if (!mport->ops->map_inb) |
@@ -742,7 +737,7 @@ EXPORT_SYMBOL_GPL(rio_unmap_inb_region); | |||
742 | int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, | 737 | int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, |
743 | u32 size, u32 rflags, dma_addr_t *local) | 738 | u32 size, u32 rflags, dma_addr_t *local) |
744 | { | 739 | { |
745 | int rc = 0; | 740 | int rc; |
746 | unsigned long flags; | 741 | unsigned long flags; |
747 | 742 | ||
748 | if (!mport->ops->map_outb) | 743 | if (!mport->ops->map_outb) |
@@ -975,7 +970,7 @@ rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) | |||
975 | rdev = rdev->prev; | 970 | rdev = rdev->prev; |
976 | } | 971 | } |
977 | 972 | ||
978 | if (prev == NULL) | 973 | if (!prev) |
979 | goto err_out; | 974 | goto err_out; |
980 | 975 | ||
981 | p_port = prev->rswitch->route_table[rdev->destid]; | 976 | p_port = prev->rswitch->route_table[rdev->destid]; |
@@ -1054,7 +1049,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) | |||
1054 | RIO_MNT_REQ_CMD_IS); | 1049 | RIO_MNT_REQ_CMD_IS); |
1055 | 1050 | ||
1056 | /* Exit if the response is not expected */ | 1051 | /* Exit if the response is not expected */ |
1057 | if (lnkresp == NULL) | 1052 | if (!lnkresp) |
1058 | return 0; | 1053 | return 0; |
1059 | 1054 | ||
1060 | checkcount = 3; | 1055 | checkcount = 3; |
@@ -1411,7 +1406,9 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, | |||
1411 | ext_ftr_ptr, &ftr_header); | 1406 | ext_ftr_ptr, &ftr_header); |
1412 | if (RIO_GET_BLOCK_ID(ftr_header) == ftr) | 1407 | if (RIO_GET_BLOCK_ID(ftr_header) == ftr) |
1413 | return ext_ftr_ptr; | 1408 | return ext_ftr_ptr; |
1414 | if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header))) | 1409 | |
1410 | ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header); | ||
1411 | if (!ext_ftr_ptr) | ||
1415 | break; | 1412 | break; |
1416 | } | 1413 | } |
1417 | 1414 | ||
@@ -1462,6 +1459,7 @@ struct rio_dev *rio_get_asm(u16 vid, u16 did, | |||
1462 | spin_unlock(&rio_global_list_lock); | 1459 | spin_unlock(&rio_global_list_lock); |
1463 | return rdev; | 1460 | return rdev; |
1464 | } | 1461 | } |
1462 | EXPORT_SYMBOL_GPL(rio_get_asm); | ||
1465 | 1463 | ||
1466 | /** | 1464 | /** |
1467 | * rio_get_device - Begin or continue searching for a RIO device by vid/did | 1465 | * rio_get_device - Begin or continue searching for a RIO device by vid/did |
@@ -1481,6 +1479,7 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) | |||
1481 | { | 1479 | { |
1482 | return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); | 1480 | return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); |
1483 | } | 1481 | } |
1482 | EXPORT_SYMBOL_GPL(rio_get_device); | ||
1484 | 1483 | ||
1485 | /** | 1484 | /** |
1486 | * rio_std_route_add_entry - Add switch route table entry using standard | 1485 | * rio_std_route_add_entry - Add switch route table entry using standard |
@@ -1696,7 +1695,7 @@ int rio_route_add_entry(struct rio_dev *rdev, | |||
1696 | 1695 | ||
1697 | spin_lock(&rdev->rswitch->lock); | 1696 | spin_lock(&rdev->rswitch->lock); |
1698 | 1697 | ||
1699 | if (ops == NULL || ops->add_entry == NULL) { | 1698 | if (!ops || !ops->add_entry) { |
1700 | rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, | 1699 | rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, |
1701 | rdev->hopcount, table, | 1700 | rdev->hopcount, table, |
1702 | route_destid, route_port); | 1701 | route_destid, route_port); |
@@ -1749,7 +1748,7 @@ int rio_route_get_entry(struct rio_dev *rdev, u16 table, | |||
1749 | 1748 | ||
1750 | spin_lock(&rdev->rswitch->lock); | 1749 | spin_lock(&rdev->rswitch->lock); |
1751 | 1750 | ||
1752 | if (ops == NULL || ops->get_entry == NULL) { | 1751 | if (!ops || !ops->get_entry) { |
1753 | rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, | 1752 | rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, |
1754 | rdev->hopcount, table, | 1753 | rdev->hopcount, table, |
1755 | route_destid, route_port); | 1754 | route_destid, route_port); |
@@ -1797,7 +1796,7 @@ int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) | |||
1797 | 1796 | ||
1798 | spin_lock(&rdev->rswitch->lock); | 1797 | spin_lock(&rdev->rswitch->lock); |
1799 | 1798 | ||
1800 | if (ops == NULL || ops->clr_table == NULL) { | 1799 | if (!ops || !ops->clr_table) { |
1801 | rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, | 1800 | rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, |
1802 | rdev->hopcount, table); | 1801 | rdev->hopcount, table); |
1803 | } else if (try_module_get(ops->owner)) { | 1802 | } else if (try_module_get(ops->owner)) { |
@@ -1889,7 +1888,7 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, | |||
1889 | { | 1888 | { |
1890 | struct rio_dma_ext rio_ext; | 1889 | struct rio_dma_ext rio_ext; |
1891 | 1890 | ||
1892 | if (dchan->device->device_prep_slave_sg == NULL) { | 1891 | if (!dchan->device->device_prep_slave_sg) { |
1893 | pr_err("%s: prep_rio_sg == NULL\n", __func__); | 1892 | pr_err("%s: prep_rio_sg == NULL\n", __func__); |
1894 | return NULL; | 1893 | return NULL; |
1895 | } | 1894 | } |
@@ -2189,7 +2188,6 @@ int rio_init_mports(void) | |||
2189 | 2188 | ||
2190 | work = kcalloc(n, sizeof *work, GFP_KERNEL); | 2189 | work = kcalloc(n, sizeof *work, GFP_KERNEL); |
2191 | if (!work) { | 2190 | if (!work) { |
2192 | pr_err("RIO: no memory for work struct\n"); | ||
2193 | destroy_workqueue(rio_wq); | 2191 | destroy_workqueue(rio_wq); |
2194 | goto no_disc; | 2192 | goto no_disc; |
2195 | } | 2193 | } |
@@ -2216,6 +2214,7 @@ no_disc: | |||
2216 | 2214 | ||
2217 | return 0; | 2215 | return 0; |
2218 | } | 2216 | } |
2217 | EXPORT_SYMBOL_GPL(rio_init_mports); | ||
2219 | 2218 | ||
2220 | static int rio_get_hdid(int index) | 2219 | static int rio_get_hdid(int index) |
2221 | { | 2220 | { |
@@ -2330,16 +2329,3 @@ int rio_unregister_mport(struct rio_mport *port) | |||
2330 | return 0; | 2329 | return 0; |
2331 | } | 2330 | } |
2332 | EXPORT_SYMBOL_GPL(rio_unregister_mport); | 2331 | EXPORT_SYMBOL_GPL(rio_unregister_mport); |
2333 | |||
2334 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); | ||
2335 | EXPORT_SYMBOL_GPL(rio_get_device); | ||
2336 | EXPORT_SYMBOL_GPL(rio_get_asm); | ||
2337 | EXPORT_SYMBOL_GPL(rio_request_inb_dbell); | ||
2338 | EXPORT_SYMBOL_GPL(rio_release_inb_dbell); | ||
2339 | EXPORT_SYMBOL_GPL(rio_request_outb_dbell); | ||
2340 | EXPORT_SYMBOL_GPL(rio_release_outb_dbell); | ||
2341 | EXPORT_SYMBOL_GPL(rio_request_inb_mbox); | ||
2342 | EXPORT_SYMBOL_GPL(rio_release_inb_mbox); | ||
2343 | EXPORT_SYMBOL_GPL(rio_request_outb_mbox); | ||
2344 | EXPORT_SYMBOL_GPL(rio_release_outb_mbox); | ||
2345 | EXPORT_SYMBOL_GPL(rio_init_mports); | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 83732fef510d..bdb201230bae 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1599,6 +1599,8 @@ static int fill_files_note(struct memelfnote *note) | |||
1599 | 1599 | ||
1600 | /* *Estimated* file count and total data size needed */ | 1600 | /* *Estimated* file count and total data size needed */ |
1601 | count = current->mm->map_count; | 1601 | count = current->mm->map_count; |
1602 | if (count > UINT_MAX / 64) | ||
1603 | return -EINVAL; | ||
1602 | size = count * 64; | 1604 | size = count * 64; |
1603 | 1605 | ||
1604 | names_ofs = (2 + 3 * count) * sizeof(data[0]); | 1606 | names_ofs = (2 + 3 * count) * sizeof(data[0]); |
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index e8120a282435..15e06fb552da 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c | |||
@@ -444,7 +444,7 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, | |||
444 | int res = -ENOMEM; | 444 | int res = -ENOMEM; |
445 | 445 | ||
446 | mutex_lock(&sbi->vh_mutex); | 446 | mutex_lock(&sbi->vh_mutex); |
447 | inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); | 447 | inode = hfsplus_new_inode(dir->i_sb, dir, S_IFLNK | S_IRWXUGO); |
448 | if (!inode) | 448 | if (!inode) |
449 | goto out; | 449 | goto out; |
450 | 450 | ||
@@ -486,7 +486,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, | |||
486 | int res = -ENOMEM; | 486 | int res = -ENOMEM; |
487 | 487 | ||
488 | mutex_lock(&sbi->vh_mutex); | 488 | mutex_lock(&sbi->vh_mutex); |
489 | inode = hfsplus_new_inode(dir->i_sb, mode); | 489 | inode = hfsplus_new_inode(dir->i_sb, dir, mode); |
490 | if (!inode) | 490 | if (!inode) |
491 | goto out; | 491 | goto out; |
492 | 492 | ||
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index a015044daa05..d9255abafb81 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -478,7 +478,8 @@ extern const struct address_space_operations hfsplus_aops; | |||
478 | extern const struct address_space_operations hfsplus_btree_aops; | 478 | extern const struct address_space_operations hfsplus_btree_aops; |
479 | extern const struct dentry_operations hfsplus_dentry_operations; | 479 | extern const struct dentry_operations hfsplus_dentry_operations; |
480 | 480 | ||
481 | struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode); | 481 | struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, |
482 | umode_t mode); | ||
482 | void hfsplus_delete_inode(struct inode *inode); | 483 | void hfsplus_delete_inode(struct inode *inode); |
483 | void hfsplus_inode_read_fork(struct inode *inode, | 484 | void hfsplus_inode_read_fork(struct inode *inode, |
484 | struct hfsplus_fork_raw *fork); | 485 | struct hfsplus_fork_raw *fork); |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 190c60efbc99..c0c8d433864f 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -354,7 +354,8 @@ static const struct file_operations hfsplus_file_operations = { | |||
354 | .unlocked_ioctl = hfsplus_ioctl, | 354 | .unlocked_ioctl = hfsplus_ioctl, |
355 | }; | 355 | }; |
356 | 356 | ||
357 | struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) | 357 | struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, |
358 | umode_t mode) | ||
358 | { | 359 | { |
359 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | 360 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); |
360 | struct inode *inode = new_inode(sb); | 361 | struct inode *inode = new_inode(sb); |
@@ -364,9 +365,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) | |||
364 | return NULL; | 365 | return NULL; |
365 | 366 | ||
366 | inode->i_ino = sbi->next_cnid++; | 367 | inode->i_ino = sbi->next_cnid++; |
367 | inode->i_mode = mode; | 368 | inode_init_owner(inode, dir, mode); |
368 | inode->i_uid = current_fsuid(); | ||
369 | inode->i_gid = current_fsgid(); | ||
370 | set_nlink(inode, 1); | 369 | set_nlink(inode, 1); |
371 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); | 370 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); |
372 | 371 | ||
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 1d458b716957..513c357c734b 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -549,7 +549,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
549 | 549 | ||
550 | if (!sbi->hidden_dir) { | 550 | if (!sbi->hidden_dir) { |
551 | mutex_lock(&sbi->vh_mutex); | 551 | mutex_lock(&sbi->vh_mutex); |
552 | sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); | 552 | sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR); |
553 | if (!sbi->hidden_dir) { | 553 | if (!sbi->hidden_dir) { |
554 | mutex_unlock(&sbi->vh_mutex); | 554 | mutex_unlock(&sbi->vh_mutex); |
555 | err = -ENOMEM; | 555 | err = -ENOMEM; |
diff --git a/fs/inode.c b/fs/inode.c index e2ca0f4b5151..ef362364d396 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -498,7 +498,6 @@ EXPORT_SYMBOL(__remove_inode_hash); | |||
498 | 498 | ||
499 | void clear_inode(struct inode *inode) | 499 | void clear_inode(struct inode *inode) |
500 | { | 500 | { |
501 | might_sleep(); | ||
502 | /* | 501 | /* |
503 | * We have to cycle tree_lock here because reclaim can be still in the | 502 | * We have to cycle tree_lock here because reclaim can be still in the |
504 | * process of removing the last page (in __delete_from_page_cache()) | 503 | * process of removing the last page (in __delete_from_page_cache()) |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 6c5009cc4e6f..68cb9e4740b4 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -130,7 +130,7 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, | |||
130 | } | 130 | } |
131 | 131 | ||
132 | int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags, | 132 | int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags, |
133 | time_t ctime, __u64 cno) | 133 | time64_t ctime, __u64 cno) |
134 | { | 134 | { |
135 | int err; | 135 | int err; |
136 | 136 | ||
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index 7bbccc099709..10e16935fff6 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h | |||
@@ -46,7 +46,7 @@ struct nilfs_segsum_info { | |||
46 | unsigned long nfileblk; | 46 | unsigned long nfileblk; |
47 | u64 seg_seq; | 47 | u64 seg_seq; |
48 | __u64 cno; | 48 | __u64 cno; |
49 | time_t ctime; | 49 | time64_t ctime; |
50 | sector_t next; | 50 | sector_t next; |
51 | }; | 51 | }; |
52 | 52 | ||
@@ -120,7 +120,7 @@ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, | |||
120 | struct nilfs_segment_buffer *prev); | 120 | struct nilfs_segment_buffer *prev); |
121 | void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64, | 121 | void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64, |
122 | struct the_nilfs *); | 122 | struct the_nilfs *); |
123 | int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time_t, | 123 | int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time64_t, |
124 | __u64); | 124 | __u64); |
125 | int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *); | 125 | int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *); |
126 | int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *, | 126 | int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *, |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 9f3ffba41533..0953635e7d48 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -2040,7 +2040,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) | |||
2040 | goto out; | 2040 | goto out; |
2041 | 2041 | ||
2042 | /* Update time stamp */ | 2042 | /* Update time stamp */ |
2043 | sci->sc_seg_ctime = get_seconds(); | 2043 | sci->sc_seg_ctime = ktime_get_real_seconds(); |
2044 | 2044 | ||
2045 | err = nilfs_segctor_collect(sci, nilfs, mode); | 2045 | err = nilfs_segctor_collect(sci, nilfs, mode); |
2046 | if (unlikely(err)) | 2046 | if (unlikely(err)) |
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 84084a4d9b3e..04634e3e3d58 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h | |||
@@ -157,7 +157,7 @@ struct nilfs_sc_info { | |||
157 | unsigned long sc_blk_cnt; | 157 | unsigned long sc_blk_cnt; |
158 | unsigned long sc_datablk_cnt; | 158 | unsigned long sc_datablk_cnt; |
159 | unsigned long sc_nblk_this_inc; | 159 | unsigned long sc_nblk_this_inc; |
160 | time_t sc_seg_ctime; | 160 | time64_t sc_seg_ctime; |
161 | __u64 sc_cno; | 161 | __u64 sc_cno; |
162 | unsigned long sc_flags; | 162 | unsigned long sc_flags; |
163 | 163 | ||
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 1341a41e7b43..c7fa139d50e8 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c | |||
@@ -526,7 +526,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) | |||
526 | * @modtime: modification time (option) | 526 | * @modtime: modification time (option) |
527 | */ | 527 | */ |
528 | int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | 528 | int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, |
529 | unsigned long nblocks, time_t modtime) | 529 | unsigned long nblocks, time64_t modtime) |
530 | { | 530 | { |
531 | struct buffer_head *bh; | 531 | struct buffer_head *bh; |
532 | struct nilfs_segment_usage *su; | 532 | struct nilfs_segment_usage *su; |
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 158a9190c8ec..673a891350f4 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h | |||
@@ -35,7 +35,7 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end); | |||
35 | int nilfs_sufile_alloc(struct inode *, __u64 *); | 35 | int nilfs_sufile_alloc(struct inode *, __u64 *); |
36 | int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum); | 36 | int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum); |
37 | int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, | 37 | int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, |
38 | unsigned long nblocks, time_t modtime); | 38 | unsigned long nblocks, time64_t modtime); |
39 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); | 39 | int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); |
40 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned int, | 40 | ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned int, |
41 | size_t); | 41 | size_t); |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 3073b646e1ba..6ffeca84d7c3 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -283,10 +283,10 @@ int nilfs_commit_super(struct super_block *sb, int flag) | |||
283 | { | 283 | { |
284 | struct the_nilfs *nilfs = sb->s_fs_info; | 284 | struct the_nilfs *nilfs = sb->s_fs_info; |
285 | struct nilfs_super_block **sbp = nilfs->ns_sbp; | 285 | struct nilfs_super_block **sbp = nilfs->ns_sbp; |
286 | time_t t; | 286 | time64_t t; |
287 | 287 | ||
288 | /* nilfs->ns_sem must be locked by the caller. */ | 288 | /* nilfs->ns_sem must be locked by the caller. */ |
289 | t = get_seconds(); | 289 | t = ktime_get_real_seconds(); |
290 | nilfs->ns_sbwtime = t; | 290 | nilfs->ns_sbwtime = t; |
291 | sbp[0]->s_wtime = cpu_to_le64(t); | 291 | sbp[0]->s_wtime = cpu_to_le64(t); |
292 | sbp[0]->s_sum = 0; | 292 | sbp[0]->s_sum = 0; |
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 490303e3d517..4b25837e7724 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c | |||
@@ -31,7 +31,7 @@ static struct kset *nilfs_kset; | |||
31 | #define NILFS_SHOW_TIME(time_t_val, buf) ({ \ | 31 | #define NILFS_SHOW_TIME(time_t_val, buf) ({ \ |
32 | struct tm res; \ | 32 | struct tm res; \ |
33 | int count = 0; \ | 33 | int count = 0; \ |
34 | time_to_tm(time_t_val, 0, &res); \ | 34 | time64_to_tm(time_t_val, 0, &res); \ |
35 | res.tm_year += 1900; \ | 35 | res.tm_year += 1900; \ |
36 | res.tm_mon += 1; \ | 36 | res.tm_mon += 1; \ |
37 | count = scnprintf(buf, PAGE_SIZE, \ | 37 | count = scnprintf(buf, PAGE_SIZE, \ |
@@ -579,7 +579,7 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr, | |||
579 | struct the_nilfs *nilfs, | 579 | struct the_nilfs *nilfs, |
580 | char *buf) | 580 | char *buf) |
581 | { | 581 | { |
582 | time_t ctime; | 582 | time64_t ctime; |
583 | 583 | ||
584 | down_read(&nilfs->ns_segctor_sem); | 584 | down_read(&nilfs->ns_segctor_sem); |
585 | ctime = nilfs->ns_ctime; | 585 | ctime = nilfs->ns_ctime; |
@@ -593,13 +593,13 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr, | |||
593 | struct the_nilfs *nilfs, | 593 | struct the_nilfs *nilfs, |
594 | char *buf) | 594 | char *buf) |
595 | { | 595 | { |
596 | time_t ctime; | 596 | time64_t ctime; |
597 | 597 | ||
598 | down_read(&nilfs->ns_segctor_sem); | 598 | down_read(&nilfs->ns_segctor_sem); |
599 | ctime = nilfs->ns_ctime; | 599 | ctime = nilfs->ns_ctime; |
600 | up_read(&nilfs->ns_segctor_sem); | 600 | up_read(&nilfs->ns_segctor_sem); |
601 | 601 | ||
602 | return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime); | 602 | return snprintf(buf, PAGE_SIZE, "%llu\n", ctime); |
603 | } | 603 | } |
604 | 604 | ||
605 | static ssize_t | 605 | static ssize_t |
@@ -607,7 +607,7 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr, | |||
607 | struct the_nilfs *nilfs, | 607 | struct the_nilfs *nilfs, |
608 | char *buf) | 608 | char *buf) |
609 | { | 609 | { |
610 | time_t nongc_ctime; | 610 | time64_t nongc_ctime; |
611 | 611 | ||
612 | down_read(&nilfs->ns_segctor_sem); | 612 | down_read(&nilfs->ns_segctor_sem); |
613 | nongc_ctime = nilfs->ns_nongc_ctime; | 613 | nongc_ctime = nilfs->ns_nongc_ctime; |
@@ -621,14 +621,13 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr, | |||
621 | struct the_nilfs *nilfs, | 621 | struct the_nilfs *nilfs, |
622 | char *buf) | 622 | char *buf) |
623 | { | 623 | { |
624 | time_t nongc_ctime; | 624 | time64_t nongc_ctime; |
625 | 625 | ||
626 | down_read(&nilfs->ns_segctor_sem); | 626 | down_read(&nilfs->ns_segctor_sem); |
627 | nongc_ctime = nilfs->ns_nongc_ctime; | 627 | nongc_ctime = nilfs->ns_nongc_ctime; |
628 | up_read(&nilfs->ns_segctor_sem); | 628 | up_read(&nilfs->ns_segctor_sem); |
629 | 629 | ||
630 | return snprintf(buf, PAGE_SIZE, "%llu\n", | 630 | return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime); |
631 | (unsigned long long)nongc_ctime); | ||
632 | } | 631 | } |
633 | 632 | ||
634 | static ssize_t | 633 | static ssize_t |
@@ -728,7 +727,7 @@ nilfs_superblock_sb_write_time_show(struct nilfs_superblock_attr *attr, | |||
728 | struct the_nilfs *nilfs, | 727 | struct the_nilfs *nilfs, |
729 | char *buf) | 728 | char *buf) |
730 | { | 729 | { |
731 | time_t sbwtime; | 730 | time64_t sbwtime; |
732 | 731 | ||
733 | down_read(&nilfs->ns_sem); | 732 | down_read(&nilfs->ns_sem); |
734 | sbwtime = nilfs->ns_sbwtime; | 733 | sbwtime = nilfs->ns_sbwtime; |
@@ -742,13 +741,13 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr, | |||
742 | struct the_nilfs *nilfs, | 741 | struct the_nilfs *nilfs, |
743 | char *buf) | 742 | char *buf) |
744 | { | 743 | { |
745 | time_t sbwtime; | 744 | time64_t sbwtime; |
746 | 745 | ||
747 | down_read(&nilfs->ns_sem); | 746 | down_read(&nilfs->ns_sem); |
748 | sbwtime = nilfs->ns_sbwtime; | 747 | sbwtime = nilfs->ns_sbwtime; |
749 | up_read(&nilfs->ns_sem); | 748 | up_read(&nilfs->ns_sem); |
750 | 749 | ||
751 | return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)sbwtime); | 750 | return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime); |
752 | } | 751 | } |
753 | 752 | ||
754 | static ssize_t | 753 | static ssize_t |
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 883d732b0259..36da1779f976 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h | |||
@@ -116,7 +116,7 @@ struct the_nilfs { | |||
116 | */ | 116 | */ |
117 | struct buffer_head *ns_sbh[2]; | 117 | struct buffer_head *ns_sbh[2]; |
118 | struct nilfs_super_block *ns_sbp[2]; | 118 | struct nilfs_super_block *ns_sbp[2]; |
119 | time_t ns_sbwtime; | 119 | time64_t ns_sbwtime; |
120 | unsigned int ns_sbwcount; | 120 | unsigned int ns_sbwcount; |
121 | unsigned int ns_sbsize; | 121 | unsigned int ns_sbsize; |
122 | unsigned int ns_mount_state; | 122 | unsigned int ns_mount_state; |
@@ -131,8 +131,8 @@ struct the_nilfs { | |||
131 | __u64 ns_nextnum; | 131 | __u64 ns_nextnum; |
132 | unsigned long ns_pseg_offset; | 132 | unsigned long ns_pseg_offset; |
133 | __u64 ns_cno; | 133 | __u64 ns_cno; |
134 | time_t ns_ctime; | 134 | time64_t ns_ctime; |
135 | time_t ns_nongc_ctime; | 135 | time64_t ns_nongc_ctime; |
136 | atomic_t ns_ndirtyblks; | 136 | atomic_t ns_ndirtyblks; |
137 | 137 | ||
138 | /* | 138 | /* |
@@ -267,7 +267,7 @@ struct nilfs_root { | |||
267 | 267 | ||
268 | static inline int nilfs_sb_need_update(struct the_nilfs *nilfs) | 268 | static inline int nilfs_sb_need_update(struct the_nilfs *nilfs) |
269 | { | 269 | { |
270 | u64 t = get_seconds(); | 270 | u64 t = ktime_get_real_seconds(); |
271 | 271 | ||
272 | return t < nilfs->ns_sbwtime || | 272 | return t < nilfs->ns_sbwtime || |
273 | t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq; | 273 | t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq; |
@@ -35,11 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | unsigned int pipe_max_size = 1048576; | 36 | unsigned int pipe_max_size = 1048576; |
37 | 37 | ||
38 | /* | ||
39 | * Minimum pipe size, as required by POSIX | ||
40 | */ | ||
41 | unsigned int pipe_min_size = PAGE_SIZE; | ||
42 | |||
43 | /* Maximum allocatable pages per user. Hard limit is unset by default, soft | 38 | /* Maximum allocatable pages per user. Hard limit is unset by default, soft |
44 | * matches default values. | 39 | * matches default values. |
45 | */ | 40 | */ |
@@ -610,12 +605,21 @@ static unsigned long account_pipe_buffers(struct user_struct *user, | |||
610 | 605 | ||
611 | static bool too_many_pipe_buffers_soft(unsigned long user_bufs) | 606 | static bool too_many_pipe_buffers_soft(unsigned long user_bufs) |
612 | { | 607 | { |
613 | return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft; | 608 | unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft); |
609 | |||
610 | return soft_limit && user_bufs > soft_limit; | ||
614 | } | 611 | } |
615 | 612 | ||
616 | static bool too_many_pipe_buffers_hard(unsigned long user_bufs) | 613 | static bool too_many_pipe_buffers_hard(unsigned long user_bufs) |
617 | { | 614 | { |
618 | return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; | 615 | unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard); |
616 | |||
617 | return hard_limit && user_bufs > hard_limit; | ||
618 | } | ||
619 | |||
620 | static bool is_unprivileged_user(void) | ||
621 | { | ||
622 | return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); | ||
619 | } | 623 | } |
620 | 624 | ||
621 | struct pipe_inode_info *alloc_pipe_info(void) | 625 | struct pipe_inode_info *alloc_pipe_info(void) |
@@ -624,22 +628,23 @@ struct pipe_inode_info *alloc_pipe_info(void) | |||
624 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; | 628 | unsigned long pipe_bufs = PIPE_DEF_BUFFERS; |
625 | struct user_struct *user = get_current_user(); | 629 | struct user_struct *user = get_current_user(); |
626 | unsigned long user_bufs; | 630 | unsigned long user_bufs; |
631 | unsigned int max_size = READ_ONCE(pipe_max_size); | ||
627 | 632 | ||
628 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); | 633 | pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); |
629 | if (pipe == NULL) | 634 | if (pipe == NULL) |
630 | goto out_free_uid; | 635 | goto out_free_uid; |
631 | 636 | ||
632 | if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE)) | 637 | if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE)) |
633 | pipe_bufs = pipe_max_size >> PAGE_SHIFT; | 638 | pipe_bufs = max_size >> PAGE_SHIFT; |
634 | 639 | ||
635 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); | 640 | user_bufs = account_pipe_buffers(user, 0, pipe_bufs); |
636 | 641 | ||
637 | if (too_many_pipe_buffers_soft(user_bufs)) { | 642 | if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { |
638 | user_bufs = account_pipe_buffers(user, pipe_bufs, 1); | 643 | user_bufs = account_pipe_buffers(user, pipe_bufs, 1); |
639 | pipe_bufs = 1; | 644 | pipe_bufs = 1; |
640 | } | 645 | } |
641 | 646 | ||
642 | if (too_many_pipe_buffers_hard(user_bufs)) | 647 | if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) |
643 | goto out_revert_acct; | 648 | goto out_revert_acct; |
644 | 649 | ||
645 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), | 650 | pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), |
@@ -1020,18 +1025,16 @@ const struct file_operations pipefifo_fops = { | |||
1020 | * Currently we rely on the pipe array holding a power-of-2 number | 1025 | * Currently we rely on the pipe array holding a power-of-2 number |
1021 | * of pages. Returns 0 on error. | 1026 | * of pages. Returns 0 on error. |
1022 | */ | 1027 | */ |
1023 | unsigned int round_pipe_size(unsigned int size) | 1028 | unsigned int round_pipe_size(unsigned long size) |
1024 | { | 1029 | { |
1025 | unsigned long nr_pages; | 1030 | if (size > (1U << 31)) |
1026 | |||
1027 | if (size < pipe_min_size) | ||
1028 | size = pipe_min_size; | ||
1029 | |||
1030 | nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
1031 | if (nr_pages == 0) | ||
1032 | return 0; | 1031 | return 0; |
1033 | 1032 | ||
1034 | return roundup_pow_of_two(nr_pages) << PAGE_SHIFT; | 1033 | /* Minimum pipe size, as required by POSIX */ |
1034 | if (size < PAGE_SIZE) | ||
1035 | return PAGE_SIZE; | ||
1036 | |||
1037 | return roundup_pow_of_two(size); | ||
1035 | } | 1038 | } |
1036 | 1039 | ||
1037 | /* | 1040 | /* |
@@ -1046,8 +1049,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) | |||
1046 | long ret = 0; | 1049 | long ret = 0; |
1047 | 1050 | ||
1048 | size = round_pipe_size(arg); | 1051 | size = round_pipe_size(arg); |
1049 | if (size == 0) | ||
1050 | return -EINVAL; | ||
1051 | nr_pages = size >> PAGE_SHIFT; | 1052 | nr_pages = size >> PAGE_SHIFT; |
1052 | 1053 | ||
1053 | if (!nr_pages) | 1054 | if (!nr_pages) |
@@ -1069,7 +1070,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) | |||
1069 | if (nr_pages > pipe->buffers && | 1070 | if (nr_pages > pipe->buffers && |
1070 | (too_many_pipe_buffers_hard(user_bufs) || | 1071 | (too_many_pipe_buffers_hard(user_bufs) || |
1071 | too_many_pipe_buffers_soft(user_bufs)) && | 1072 | too_many_pipe_buffers_soft(user_bufs)) && |
1072 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { | 1073 | is_unprivileged_user()) { |
1073 | ret = -EPERM; | 1074 | ret = -EPERM; |
1074 | goto out_revert_acct; | 1075 | goto out_revert_acct; |
1075 | } | 1076 | } |
@@ -1125,16 +1126,6 @@ out_revert_acct: | |||
1125 | } | 1126 | } |
1126 | 1127 | ||
1127 | /* | 1128 | /* |
1128 | * This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size | ||
1129 | * will return an error. | ||
1130 | */ | ||
1131 | int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, | ||
1132 | size_t *lenp, loff_t *ppos) | ||
1133 | { | ||
1134 | return proc_dopipe_max_size(table, write, buf, lenp, ppos); | ||
1135 | } | ||
1136 | |||
1137 | /* | ||
1138 | * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same | 1129 | * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same |
1139 | * location, so checking ->i_pipe is not enough to verify that this is a | 1130 | * location, so checking ->i_pipe is not enough to verify that this is a |
1140 | * pipe. | 1131 | * pipe. |
diff --git a/fs/proc/array.c b/fs/proc/array.c index d67a72dcb92c..598803576e4c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -736,16 +736,10 @@ static int children_seq_open(struct inode *inode, struct file *file) | |||
736 | return ret; | 736 | return ret; |
737 | } | 737 | } |
738 | 738 | ||
739 | int children_seq_release(struct inode *inode, struct file *file) | ||
740 | { | ||
741 | seq_release(inode, file); | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | const struct file_operations proc_tid_children_operations = { | 739 | const struct file_operations proc_tid_children_operations = { |
746 | .open = children_seq_open, | 740 | .open = children_seq_open, |
747 | .read = seq_read, | 741 | .read = seq_read, |
748 | .llseek = seq_lseek, | 742 | .llseek = seq_lseek, |
749 | .release = children_seq_release, | 743 | .release = seq_release, |
750 | }; | 744 | }; |
751 | #endif /* CONFIG_PROC_CHILDREN */ | 745 | #endif /* CONFIG_PROC_CHILDREN */ |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 60316b52d659..9298324325ed 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -75,6 +75,7 @@ | |||
75 | #include <linux/ptrace.h> | 75 | #include <linux/ptrace.h> |
76 | #include <linux/tracehook.h> | 76 | #include <linux/tracehook.h> |
77 | #include <linux/printk.h> | 77 | #include <linux/printk.h> |
78 | #include <linux/cache.h> | ||
78 | #include <linux/cgroup.h> | 79 | #include <linux/cgroup.h> |
79 | #include <linux/cpuset.h> | 80 | #include <linux/cpuset.h> |
80 | #include <linux/audit.h> | 81 | #include <linux/audit.h> |
@@ -100,6 +101,8 @@ | |||
100 | #include "internal.h" | 101 | #include "internal.h" |
101 | #include "fd.h" | 102 | #include "fd.h" |
102 | 103 | ||
104 | #include "../../lib/kstrtox.h" | ||
105 | |||
103 | /* NOTE: | 106 | /* NOTE: |
104 | * Implementing inode permission operations in /proc is almost | 107 | * Implementing inode permission operations in /proc is almost |
105 | * certainly an error. Permission checks need to happen during | 108 | * certainly an error. Permission checks need to happen during |
@@ -110,8 +113,8 @@ | |||
110 | * in /proc for a task before it execs a suid executable. | 113 | * in /proc for a task before it execs a suid executable. |
111 | */ | 114 | */ |
112 | 115 | ||
113 | static u8 nlink_tid; | 116 | static u8 nlink_tid __ro_after_init; |
114 | static u8 nlink_tgid; | 117 | static u8 nlink_tgid __ro_after_init; |
115 | 118 | ||
116 | struct pid_entry { | 119 | struct pid_entry { |
117 | const char *name; | 120 | const char *name; |
@@ -1370,7 +1373,7 @@ static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf, | |||
1370 | task = get_proc_task(file_inode(file)); | 1373 | task = get_proc_task(file_inode(file)); |
1371 | if (!task) | 1374 | if (!task) |
1372 | return -ESRCH; | 1375 | return -ESRCH; |
1373 | WRITE_ONCE(task->fail_nth, n); | 1376 | task->fail_nth = n; |
1374 | put_task_struct(task); | 1377 | put_task_struct(task); |
1375 | 1378 | ||
1376 | return count; | 1379 | return count; |
@@ -1386,8 +1389,7 @@ static ssize_t proc_fail_nth_read(struct file *file, char __user *buf, | |||
1386 | task = get_proc_task(file_inode(file)); | 1389 | task = get_proc_task(file_inode(file)); |
1387 | if (!task) | 1390 | if (!task) |
1388 | return -ESRCH; | 1391 | return -ESRCH; |
1389 | len = snprintf(numbuf, sizeof(numbuf), "%u\n", | 1392 | len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth); |
1390 | READ_ONCE(task->fail_nth)); | ||
1391 | len = simple_read_from_buffer(buf, count, ppos, numbuf, len); | 1393 | len = simple_read_from_buffer(buf, count, ppos, numbuf, len); |
1392 | put_task_struct(task); | 1394 | put_task_struct(task); |
1393 | 1395 | ||
@@ -1907,8 +1909,33 @@ end_instantiate: | |||
1907 | static int dname_to_vma_addr(struct dentry *dentry, | 1909 | static int dname_to_vma_addr(struct dentry *dentry, |
1908 | unsigned long *start, unsigned long *end) | 1910 | unsigned long *start, unsigned long *end) |
1909 | { | 1911 | { |
1910 | if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) | 1912 | const char *str = dentry->d_name.name; |
1913 | unsigned long long sval, eval; | ||
1914 | unsigned int len; | ||
1915 | |||
1916 | len = _parse_integer(str, 16, &sval); | ||
1917 | if (len & KSTRTOX_OVERFLOW) | ||
1918 | return -EINVAL; | ||
1919 | if (sval != (unsigned long)sval) | ||
1911 | return -EINVAL; | 1920 | return -EINVAL; |
1921 | str += len; | ||
1922 | |||
1923 | if (*str != '-') | ||
1924 | return -EINVAL; | ||
1925 | str++; | ||
1926 | |||
1927 | len = _parse_integer(str, 16, &eval); | ||
1928 | if (len & KSTRTOX_OVERFLOW) | ||
1929 | return -EINVAL; | ||
1930 | if (eval != (unsigned long)eval) | ||
1931 | return -EINVAL; | ||
1932 | str += len; | ||
1933 | |||
1934 | if (*str != '\0') | ||
1935 | return -EINVAL; | ||
1936 | |||
1937 | *start = sval; | ||
1938 | *end = eval; | ||
1912 | 1939 | ||
1913 | return 0; | 1940 | return 0; |
1914 | } | 1941 | } |
@@ -2000,9 +2027,9 @@ out: | |||
2000 | } | 2027 | } |
2001 | 2028 | ||
2002 | struct map_files_info { | 2029 | struct map_files_info { |
2030 | unsigned long start; | ||
2031 | unsigned long end; | ||
2003 | fmode_t mode; | 2032 | fmode_t mode; |
2004 | unsigned int len; | ||
2005 | unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */ | ||
2006 | }; | 2033 | }; |
2007 | 2034 | ||
2008 | /* | 2035 | /* |
@@ -2172,10 +2199,9 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) | |||
2172 | if (++pos <= ctx->pos) | 2199 | if (++pos <= ctx->pos) |
2173 | continue; | 2200 | continue; |
2174 | 2201 | ||
2202 | info.start = vma->vm_start; | ||
2203 | info.end = vma->vm_end; | ||
2175 | info.mode = vma->vm_file->f_mode; | 2204 | info.mode = vma->vm_file->f_mode; |
2176 | info.len = snprintf(info.name, | ||
2177 | sizeof(info.name), "%lx-%lx", | ||
2178 | vma->vm_start, vma->vm_end); | ||
2179 | if (flex_array_put(fa, i++, &info, GFP_KERNEL)) | 2205 | if (flex_array_put(fa, i++, &info, GFP_KERNEL)) |
2180 | BUG(); | 2206 | BUG(); |
2181 | } | 2207 | } |
@@ -2183,9 +2209,13 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx) | |||
2183 | up_read(&mm->mmap_sem); | 2209 | up_read(&mm->mmap_sem); |
2184 | 2210 | ||
2185 | for (i = 0; i < nr_files; i++) { | 2211 | for (i = 0; i < nr_files; i++) { |
2212 | char buf[4 * sizeof(long) + 2]; /* max: %lx-%lx\0 */ | ||
2213 | unsigned int len; | ||
2214 | |||
2186 | p = flex_array_get(fa, i); | 2215 | p = flex_array_get(fa, i); |
2216 | len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end); | ||
2187 | if (!proc_fill_cache(file, ctx, | 2217 | if (!proc_fill_cache(file, ctx, |
2188 | p->name, p->len, | 2218 | buf, len, |
2189 | proc_map_files_instantiate, | 2219 | proc_map_files_instantiate, |
2190 | task, | 2220 | task, |
2191 | (void *)(unsigned long)p->mode)) | 2221 | (void *)(unsigned long)p->mode)) |
@@ -3018,11 +3048,11 @@ static const struct inode_operations proc_tgid_base_inode_operations = { | |||
3018 | static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) | 3048 | static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) |
3019 | { | 3049 | { |
3020 | struct dentry *dentry, *leader, *dir; | 3050 | struct dentry *dentry, *leader, *dir; |
3021 | char buf[PROC_NUMBUF]; | 3051 | char buf[10 + 1]; |
3022 | struct qstr name; | 3052 | struct qstr name; |
3023 | 3053 | ||
3024 | name.name = buf; | 3054 | name.name = buf; |
3025 | name.len = snprintf(buf, sizeof(buf), "%d", pid); | 3055 | name.len = snprintf(buf, sizeof(buf), "%u", pid); |
3026 | /* no ->d_hash() rejects on procfs */ | 3056 | /* no ->d_hash() rejects on procfs */ |
3027 | dentry = d_hash_and_lookup(mnt->mnt_root, &name); | 3057 | dentry = d_hash_and_lookup(mnt->mnt_root, &name); |
3028 | if (dentry) { | 3058 | if (dentry) { |
@@ -3034,7 +3064,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) | |||
3034 | return; | 3064 | return; |
3035 | 3065 | ||
3036 | name.name = buf; | 3066 | name.name = buf; |
3037 | name.len = snprintf(buf, sizeof(buf), "%d", tgid); | 3067 | name.len = snprintf(buf, sizeof(buf), "%u", tgid); |
3038 | leader = d_hash_and_lookup(mnt->mnt_root, &name); | 3068 | leader = d_hash_and_lookup(mnt->mnt_root, &name); |
3039 | if (!leader) | 3069 | if (!leader) |
3040 | goto out; | 3070 | goto out; |
@@ -3046,7 +3076,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) | |||
3046 | goto out_put_leader; | 3076 | goto out_put_leader; |
3047 | 3077 | ||
3048 | name.name = buf; | 3078 | name.name = buf; |
3049 | name.len = snprintf(buf, sizeof(buf), "%d", pid); | 3079 | name.len = snprintf(buf, sizeof(buf), "%u", pid); |
3050 | dentry = d_hash_and_lookup(dir, &name); | 3080 | dentry = d_hash_and_lookup(dir, &name); |
3051 | if (dentry) { | 3081 | if (dentry) { |
3052 | d_invalidate(dentry); | 3082 | d_invalidate(dentry); |
@@ -3225,14 +3255,14 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) | |||
3225 | for (iter = next_tgid(ns, iter); | 3255 | for (iter = next_tgid(ns, iter); |
3226 | iter.task; | 3256 | iter.task; |
3227 | iter.tgid += 1, iter = next_tgid(ns, iter)) { | 3257 | iter.tgid += 1, iter = next_tgid(ns, iter)) { |
3228 | char name[PROC_NUMBUF]; | 3258 | char name[10 + 1]; |
3229 | int len; | 3259 | int len; |
3230 | 3260 | ||
3231 | cond_resched(); | 3261 | cond_resched(); |
3232 | if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) | 3262 | if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) |
3233 | continue; | 3263 | continue; |
3234 | 3264 | ||
3235 | len = snprintf(name, sizeof(name), "%d", iter.tgid); | 3265 | len = snprintf(name, sizeof(name), "%u", iter.tgid); |
3236 | ctx->pos = iter.tgid + TGID_OFFSET; | 3266 | ctx->pos = iter.tgid + TGID_OFFSET; |
3237 | if (!proc_fill_cache(file, ctx, name, len, | 3267 | if (!proc_fill_cache(file, ctx, name, len, |
3238 | proc_pid_instantiate, iter.task, NULL)) { | 3268 | proc_pid_instantiate, iter.task, NULL)) { |
@@ -3560,10 +3590,10 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx) | |||
3560 | for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); | 3590 | for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); |
3561 | task; | 3591 | task; |
3562 | task = next_tid(task), ctx->pos++) { | 3592 | task = next_tid(task), ctx->pos++) { |
3563 | char name[PROC_NUMBUF]; | 3593 | char name[10 + 1]; |
3564 | int len; | 3594 | int len; |
3565 | tid = task_pid_nr_ns(task, ns); | 3595 | tid = task_pid_nr_ns(task, ns); |
3566 | len = snprintf(name, sizeof(name), "%d", tid); | 3596 | len = snprintf(name, sizeof(name), "%u", tid); |
3567 | if (!proc_fill_cache(file, ctx, name, len, | 3597 | if (!proc_fill_cache(file, ctx, name, len, |
3568 | proc_task_instantiate, task, NULL)) { | 3598 | proc_task_instantiate, task, NULL)) { |
3569 | /* returning this tgid failed, save it as the first | 3599 | /* returning this tgid failed, save it as the first |
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c index 290ba85cb900..a8ac48aebd59 100644 --- a/fs/proc/consoles.c +++ b/fs/proc/consoles.c | |||
@@ -55,8 +55,7 @@ static int show_console_dev(struct seq_file *m, void *v) | |||
55 | if (dev) | 55 | if (dev) |
56 | seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); | 56 | seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); |
57 | 57 | ||
58 | seq_printf(m, "\n"); | 58 | seq_putc(m, '\n'); |
59 | |||
60 | return 0; | 59 | return 0; |
61 | } | 60 | } |
62 | 61 | ||
diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 96fc70225e54..6b80cd1e419a 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c | |||
@@ -236,7 +236,7 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx, | |||
236 | for (fd = ctx->pos - 2; | 236 | for (fd = ctx->pos - 2; |
237 | fd < files_fdtable(files)->max_fds; | 237 | fd < files_fdtable(files)->max_fds; |
238 | fd++, ctx->pos++) { | 238 | fd++, ctx->pos++) { |
239 | char name[PROC_NUMBUF]; | 239 | char name[10 + 1]; |
240 | int len; | 240 | int len; |
241 | 241 | ||
242 | if (!fcheck_files(files, fd)) | 242 | if (!fcheck_files(files, fd)) |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 793a67574668..5d709fa8f3a2 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | static DEFINE_RWLOCK(proc_subdir_lock); | 29 | static DEFINE_RWLOCK(proc_subdir_lock); |
30 | 30 | ||
31 | static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de) | 31 | static int proc_match(const char *name, struct proc_dir_entry *de, unsigned int len) |
32 | { | 32 | { |
33 | if (len < de->namelen) | 33 | if (len < de->namelen) |
34 | return -1; | 34 | return -1; |
@@ -60,7 +60,7 @@ static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir, | |||
60 | struct proc_dir_entry *de = rb_entry(node, | 60 | struct proc_dir_entry *de = rb_entry(node, |
61 | struct proc_dir_entry, | 61 | struct proc_dir_entry, |
62 | subdir_node); | 62 | subdir_node); |
63 | int result = proc_match(len, name, de); | 63 | int result = proc_match(name, de, len); |
64 | 64 | ||
65 | if (result < 0) | 65 | if (result < 0) |
66 | node = node->rb_left; | 66 | node = node->rb_left; |
@@ -84,7 +84,7 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir, | |||
84 | struct proc_dir_entry *this = rb_entry(*new, | 84 | struct proc_dir_entry *this = rb_entry(*new, |
85 | struct proc_dir_entry, | 85 | struct proc_dir_entry, |
86 | subdir_node); | 86 | subdir_node); |
87 | int result = proc_match(de->namelen, de->name, this); | 87 | int result = proc_match(de->name, this, de->namelen); |
88 | 88 | ||
89 | parent = *new; | 89 | parent = *new; |
90 | if (result < 0) | 90 | if (result < 0) |
@@ -211,8 +211,8 @@ void proc_free_inum(unsigned int inum) | |||
211 | * Don't create negative dentries here, return -ENOENT by hand | 211 | * Don't create negative dentries here, return -ENOENT by hand |
212 | * instead. | 212 | * instead. |
213 | */ | 213 | */ |
214 | struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, | 214 | struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, |
215 | struct dentry *dentry) | 215 | struct proc_dir_entry *de) |
216 | { | 216 | { |
217 | struct inode *inode; | 217 | struct inode *inode; |
218 | 218 | ||
@@ -235,7 +235,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, | |||
235 | struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, | 235 | struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, |
236 | unsigned int flags) | 236 | unsigned int flags) |
237 | { | 237 | { |
238 | return proc_lookup_de(PDE(dir), dir, dentry); | 238 | return proc_lookup_de(dir, dentry, PDE(dir)); |
239 | } | 239 | } |
240 | 240 | ||
241 | /* | 241 | /* |
@@ -247,8 +247,8 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, | |||
247 | * value of the readdir() call, as long as it's non-negative | 247 | * value of the readdir() call, as long as it's non-negative |
248 | * for success.. | 248 | * for success.. |
249 | */ | 249 | */ |
250 | int proc_readdir_de(struct proc_dir_entry *de, struct file *file, | 250 | int proc_readdir_de(struct file *file, struct dir_context *ctx, |
251 | struct dir_context *ctx) | 251 | struct proc_dir_entry *de) |
252 | { | 252 | { |
253 | int i; | 253 | int i; |
254 | 254 | ||
@@ -292,7 +292,7 @@ int proc_readdir(struct file *file, struct dir_context *ctx) | |||
292 | { | 292 | { |
293 | struct inode *inode = file_inode(file); | 293 | struct inode *inode = file_inode(file); |
294 | 294 | ||
295 | return proc_readdir_de(PDE(inode), file, ctx); | 295 | return proc_readdir_de(file, ctx, PDE(inode)); |
296 | } | 296 | } |
297 | 297 | ||
298 | /* | 298 | /* |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 8dacaabb9f37..6e8724958116 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Copyright (C) 1991, 1992 Linus Torvalds | 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/cache.h> | ||
8 | #include <linux/time.h> | 9 | #include <linux/time.h> |
9 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
10 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
@@ -52,7 +53,7 @@ static void proc_evict_inode(struct inode *inode) | |||
52 | } | 53 | } |
53 | } | 54 | } |
54 | 55 | ||
55 | static struct kmem_cache * proc_inode_cachep; | 56 | static struct kmem_cache *proc_inode_cachep __ro_after_init; |
56 | 57 | ||
57 | static struct inode *proc_alloc_inode(struct super_block *sb) | 58 | static struct inode *proc_alloc_inode(struct super_block *sb) |
58 | { | 59 | { |
@@ -128,12 +129,12 @@ enum {BIAS = -1U<<31}; | |||
128 | 129 | ||
129 | static inline int use_pde(struct proc_dir_entry *pde) | 130 | static inline int use_pde(struct proc_dir_entry *pde) |
130 | { | 131 | { |
131 | return atomic_inc_unless_negative(&pde->in_use); | 132 | return likely(atomic_inc_unless_negative(&pde->in_use)); |
132 | } | 133 | } |
133 | 134 | ||
134 | static void unuse_pde(struct proc_dir_entry *pde) | 135 | static void unuse_pde(struct proc_dir_entry *pde) |
135 | { | 136 | { |
136 | if (atomic_dec_return(&pde->in_use) == BIAS) | 137 | if (unlikely(atomic_dec_return(&pde->in_use) == BIAS)) |
137 | complete(pde->pde_unload_completion); | 138 | complete(pde->pde_unload_completion); |
138 | } | 139 | } |
139 | 140 | ||
@@ -166,7 +167,7 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo) | |||
166 | spin_lock(&pde->pde_unload_lock); | 167 | spin_lock(&pde->pde_unload_lock); |
167 | /* After ->release. */ | 168 | /* After ->release. */ |
168 | list_del(&pdeo->lh); | 169 | list_del(&pdeo->lh); |
169 | if (pdeo->c) | 170 | if (unlikely(pdeo->c)) |
170 | complete(pdeo->c); | 171 | complete(pdeo->c); |
171 | kfree(pdeo); | 172 | kfree(pdeo); |
172 | } | 173 | } |
@@ -420,7 +421,7 @@ static const char *proc_get_link(struct dentry *dentry, | |||
420 | struct delayed_call *done) | 421 | struct delayed_call *done) |
421 | { | 422 | { |
422 | struct proc_dir_entry *pde = PDE(inode); | 423 | struct proc_dir_entry *pde = PDE(inode); |
423 | if (unlikely(!use_pde(pde))) | 424 | if (!use_pde(pde)) |
424 | return ERR_PTR(-EINVAL); | 425 | return ERR_PTR(-EINVAL); |
425 | set_delayed_call(done, proc_put_link, pde); | 426 | set_delayed_call(done, proc_put_link, pde); |
426 | return pde->data; | 427 | return pde->data; |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 4a67188c8d74..d697c8ab0a14 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -31,24 +31,28 @@ struct mempolicy; | |||
31 | * subdir_node is used to build the rb tree "subdir" of the parent. | 31 | * subdir_node is used to build the rb tree "subdir" of the parent. |
32 | */ | 32 | */ |
33 | struct proc_dir_entry { | 33 | struct proc_dir_entry { |
34 | /* | ||
35 | * number of callers into module in progress; | ||
36 | * negative -> it's going away RSN | ||
37 | */ | ||
38 | atomic_t in_use; | ||
39 | atomic_t count; /* use count */ | ||
40 | struct list_head pde_openers; /* who did ->open, but not ->release */ | ||
41 | /* protects ->pde_openers and all struct pde_opener instances */ | ||
42 | spinlock_t pde_unload_lock; | ||
43 | struct completion *pde_unload_completion; | ||
44 | const struct inode_operations *proc_iops; | ||
45 | const struct file_operations *proc_fops; | ||
46 | void *data; | ||
34 | unsigned int low_ino; | 47 | unsigned int low_ino; |
35 | umode_t mode; | ||
36 | nlink_t nlink; | 48 | nlink_t nlink; |
37 | kuid_t uid; | 49 | kuid_t uid; |
38 | kgid_t gid; | 50 | kgid_t gid; |
39 | loff_t size; | 51 | loff_t size; |
40 | const struct inode_operations *proc_iops; | ||
41 | const struct file_operations *proc_fops; | ||
42 | struct proc_dir_entry *parent; | 52 | struct proc_dir_entry *parent; |
43 | struct rb_root_cached subdir; | 53 | struct rb_root_cached subdir; |
44 | struct rb_node subdir_node; | 54 | struct rb_node subdir_node; |
45 | void *data; | 55 | umode_t mode; |
46 | atomic_t count; /* use count */ | ||
47 | atomic_t in_use; /* number of callers into module in progress; */ | ||
48 | /* negative -> it's going away RSN */ | ||
49 | struct completion *pde_unload_completion; | ||
50 | struct list_head pde_openers; /* who did ->open, but not ->release */ | ||
51 | spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ | ||
52 | u8 namelen; | 56 | u8 namelen; |
53 | char name[]; | 57 | char name[]; |
54 | } __randomize_layout; | 58 | } __randomize_layout; |
@@ -149,10 +153,9 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i | |||
149 | * generic.c | 153 | * generic.c |
150 | */ | 154 | */ |
151 | extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); | 155 | extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int); |
152 | extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *, | 156 | struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *); |
153 | struct dentry *); | ||
154 | extern int proc_readdir(struct file *, struct dir_context *); | 157 | extern int proc_readdir(struct file *, struct dir_context *); |
155 | extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *); | 158 | int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *); |
156 | 159 | ||
157 | static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) | 160 | static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) |
158 | { | 161 | { |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 4bc85cb8be6a..e8a93bc8285d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
512 | return -EFAULT; | 512 | return -EFAULT; |
513 | } else { | 513 | } else { |
514 | if (kern_addr_valid(start)) { | 514 | if (kern_addr_valid(start)) { |
515 | unsigned long n; | ||
516 | |||
517 | /* | 515 | /* |
518 | * Using bounce buffer to bypass the | 516 | * Using bounce buffer to bypass the |
519 | * hardened user copy kernel text checks. | 517 | * hardened user copy kernel text checks. |
520 | */ | 518 | */ |
521 | memcpy(buf, (char *) start, tsz); | 519 | if (probe_kernel_read(buf, (void *) start, tsz)) { |
522 | n = copy_to_user(buffer, buf, tsz); | 520 | if (clear_user(buffer, tsz)) |
523 | /* | 521 | return -EFAULT; |
524 | * We cannot distinguish between fault on source | 522 | } else { |
525 | * and fault on destination. When this happens | 523 | if (copy_to_user(buffer, buf, tsz)) |
526 | * we clear too and hope it will trigger the | ||
527 | * EFAULT again. | ||
528 | */ | ||
529 | if (n) { | ||
530 | if (clear_user(buffer + tsz - n, | ||
531 | n)) | ||
532 | return -EFAULT; | 524 | return -EFAULT; |
533 | } | 525 | } |
534 | } else { | 526 | } else { |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index a2bf369c923d..68c06ae7888c 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -135,7 +135,7 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir, | |||
135 | de = ERR_PTR(-ENOENT); | 135 | de = ERR_PTR(-ENOENT); |
136 | net = get_proc_task_net(dir); | 136 | net = get_proc_task_net(dir); |
137 | if (net != NULL) { | 137 | if (net != NULL) { |
138 | de = proc_lookup_de(net->proc_net, dir, dentry); | 138 | de = proc_lookup_de(dir, dentry, net->proc_net); |
139 | put_net(net); | 139 | put_net(net); |
140 | } | 140 | } |
141 | return de; | 141 | return de; |
@@ -172,7 +172,7 @@ static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) | |||
172 | ret = -EINVAL; | 172 | ret = -EINVAL; |
173 | net = get_proc_task_net(file_inode(file)); | 173 | net = get_proc_task_net(file_inode(file)); |
174 | if (net != NULL) { | 174 | if (net != NULL) { |
175 | ret = proc_readdir_de(net->proc_net, file, ctx); | 175 | ret = proc_readdir_de(file, ctx, net->proc_net); |
176 | put_net(net); | 176 | put_net(net); |
177 | } | 177 | } |
178 | return ret; | 178 | return ret; |
diff --git a/fs/proc/self.c b/fs/proc/self.c index 31326bb23b8b..4d7d061696b3 100644 --- a/fs/proc/self.c +++ b/fs/proc/self.c | |||
@@ -1,4 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/cache.h> | ||
2 | #include <linux/sched.h> | 3 | #include <linux/sched.h> |
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/pid_namespace.h> | 5 | #include <linux/pid_namespace.h> |
@@ -17,11 +18,11 @@ static const char *proc_self_get_link(struct dentry *dentry, | |||
17 | 18 | ||
18 | if (!tgid) | 19 | if (!tgid) |
19 | return ERR_PTR(-ENOENT); | 20 | return ERR_PTR(-ENOENT); |
20 | /* 11 for max length of signed int in decimal + NULL term */ | 21 | /* max length of unsigned int in decimal + NULL term */ |
21 | name = kmalloc(12, dentry ? GFP_KERNEL : GFP_ATOMIC); | 22 | name = kmalloc(10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); |
22 | if (unlikely(!name)) | 23 | if (unlikely(!name)) |
23 | return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD); | 24 | return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD); |
24 | sprintf(name, "%d", tgid); | 25 | sprintf(name, "%u", tgid); |
25 | set_delayed_call(done, kfree_link, name); | 26 | set_delayed_call(done, kfree_link, name); |
26 | return name; | 27 | return name; |
27 | } | 28 | } |
@@ -30,7 +31,7 @@ static const struct inode_operations proc_self_inode_operations = { | |||
30 | .get_link = proc_self_get_link, | 31 | .get_link = proc_self_get_link, |
31 | }; | 32 | }; |
32 | 33 | ||
33 | static unsigned self_inum; | 34 | static unsigned self_inum __ro_after_init; |
34 | 35 | ||
35 | int proc_setup_self(struct super_block *s) | 36 | int proc_setup_self(struct super_block *s) |
36 | { | 37 | { |
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c index b813e3b529f2..9d2efaca499f 100644 --- a/fs/proc/thread_self.c +++ b/fs/proc/thread_self.c | |||
@@ -1,4 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/cache.h> | ||
2 | #include <linux/sched.h> | 3 | #include <linux/sched.h> |
3 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
4 | #include <linux/pid_namespace.h> | 5 | #include <linux/pid_namespace.h> |
@@ -18,11 +19,10 @@ static const char *proc_thread_self_get_link(struct dentry *dentry, | |||
18 | 19 | ||
19 | if (!pid) | 20 | if (!pid) |
20 | return ERR_PTR(-ENOENT); | 21 | return ERR_PTR(-ENOENT); |
21 | name = kmalloc(PROC_NUMBUF + 6 + PROC_NUMBUF, | 22 | name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC); |
22 | dentry ? GFP_KERNEL : GFP_ATOMIC); | ||
23 | if (unlikely(!name)) | 23 | if (unlikely(!name)) |
24 | return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD); | 24 | return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD); |
25 | sprintf(name, "%d/task/%d", tgid, pid); | 25 | sprintf(name, "%u/task/%u", tgid, pid); |
26 | set_delayed_call(done, kfree_link, name); | 26 | set_delayed_call(done, kfree_link, name); |
27 | return name; | 27 | return name; |
28 | } | 28 | } |
@@ -31,7 +31,7 @@ static const struct inode_operations proc_thread_self_inode_operations = { | |||
31 | .get_link = proc_thread_self_get_link, | 31 | .get_link = proc_thread_self_get_link, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static unsigned thread_self_inum; | 34 | static unsigned thread_self_inum __ro_after_init; |
35 | 35 | ||
36 | int proc_setup_thread_self(struct super_block *s) | 36 | int proc_setup_thread_self(struct super_block *s) |
37 | { | 37 | { |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 885d445afa0d..a45f0af22a60 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -1178,18 +1178,16 @@ fs_initcall(vmcore_init); | |||
1178 | /* Cleanup function for vmcore module. */ | 1178 | /* Cleanup function for vmcore module. */ |
1179 | void vmcore_cleanup(void) | 1179 | void vmcore_cleanup(void) |
1180 | { | 1180 | { |
1181 | struct list_head *pos, *next; | ||
1182 | |||
1183 | if (proc_vmcore) { | 1181 | if (proc_vmcore) { |
1184 | proc_remove(proc_vmcore); | 1182 | proc_remove(proc_vmcore); |
1185 | proc_vmcore = NULL; | 1183 | proc_vmcore = NULL; |
1186 | } | 1184 | } |
1187 | 1185 | ||
1188 | /* clear the vmcore list. */ | 1186 | /* clear the vmcore list. */ |
1189 | list_for_each_safe(pos, next, &vmcore_list) { | 1187 | while (!list_empty(&vmcore_list)) { |
1190 | struct vmcore *m; | 1188 | struct vmcore *m; |
1191 | 1189 | ||
1192 | m = list_entry(pos, struct vmcore, list); | 1190 | m = list_first_entry(&vmcore_list, struct vmcore, list); |
1193 | list_del(&m->list); | 1191 | list_del(&m->list); |
1194 | kfree(m); | 1192 | kfree(m); |
1195 | } | 1193 | } |
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1ba611e16fa0..8a1ee10014de 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h | |||
@@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | |||
16 | size, unsigned long offset); | 16 | size, unsigned long offset); |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #ifndef find_next_and_bit | ||
20 | /** | ||
21 | * find_next_and_bit - find the next set bit in both memory regions | ||
22 | * @addr1: The first address to base the search on | ||
23 | * @addr2: The second address to base the search on | ||
24 | * @offset: The bitnumber to start searching at | ||
25 | * @size: The bitmap size in bits | ||
26 | * | ||
27 | * Returns the bit number for the next set bit | ||
28 | * If no bits are set, returns @size. | ||
29 | */ | ||
30 | extern unsigned long find_next_and_bit(const unsigned long *addr1, | ||
31 | const unsigned long *addr2, unsigned long size, | ||
32 | unsigned long offset); | ||
33 | #endif | ||
34 | |||
19 | #ifndef find_next_zero_bit | 35 | #ifndef find_next_zero_bit |
20 | /** | 36 | /** |
21 | * find_next_zero_bit - find the next cleared bit in a memory region | 37 | * find_next_zero_bit - find the next cleared bit in a memory region |
@@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, | |||
55 | unsigned long size); | 71 | unsigned long size); |
56 | #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 72 | #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
57 | 73 | ||
74 | #ifndef find_first_bit | ||
58 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | 75 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) |
76 | #endif | ||
77 | #ifndef find_first_zero_bit | ||
59 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 78 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
79 | #endif | ||
60 | 80 | ||
61 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 81 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
62 | 82 | ||
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3489253e38fc..5f11fbdc27f8 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -64,9 +64,14 @@ | |||
64 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region | 64 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
65 | * bitmap_release_region(bitmap, pos, order) Free specified bit region | 65 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
66 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region | 66 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
67 | * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) | 67 | * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst |
68 | * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) | 68 | * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst |
69 | * | 69 | * |
70 | * Note, bitmap_zero() and bitmap_fill() operate over the region of | ||
71 | * unsigned longs, that is, bits behind bitmap till the unsigned long | ||
72 | * boundary will be zeroed or filled as well. Consider to use | ||
73 | * bitmap_clear() or bitmap_set() to make explicit zeroing or filling | ||
74 | * respectively. | ||
70 | */ | 75 | */ |
71 | 76 | ||
72 | /** | 77 | /** |
@@ -83,8 +88,12 @@ | |||
83 | * test_and_change_bit(bit, addr) Change bit and return old value | 88 | * test_and_change_bit(bit, addr) Change bit and return old value |
84 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr | 89 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
85 | * find_first_bit(addr, nbits) Position first set bit in *addr | 90 | * find_first_bit(addr, nbits) Position first set bit in *addr |
86 | * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit | 91 | * find_next_zero_bit(addr, nbits, bit) |
92 | * Position next zero bit in *addr >= bit | ||
87 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit | 93 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
94 | * find_next_and_bit(addr1, addr2, nbits, bit) | ||
95 | * Same as find_next_bit, but in | ||
96 | * (*addr1 & *addr2) | ||
88 | * | 97 | * |
89 | */ | 98 | */ |
90 | 99 | ||
@@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
174 | extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); | 183 | extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); |
175 | extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); | 184 | extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); |
176 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); | 185 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
177 | extern unsigned int bitmap_from_u32array(unsigned long *bitmap, | 186 | |
178 | unsigned int nbits, | ||
179 | const u32 *buf, | ||
180 | unsigned int nwords); | ||
181 | extern unsigned int bitmap_to_u32array(u32 *buf, | ||
182 | unsigned int nwords, | ||
183 | const unsigned long *bitmap, | ||
184 | unsigned int nbits); | ||
185 | #ifdef __BIG_ENDIAN | 187 | #ifdef __BIG_ENDIAN |
186 | extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); | 188 | extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); |
187 | #else | 189 | #else |
@@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) | |||
209 | 211 | ||
210 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) | 212 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) |
211 | { | 213 | { |
212 | unsigned int nlongs = BITS_TO_LONGS(nbits); | 214 | if (small_const_nbits(nbits)) |
213 | if (!small_const_nbits(nbits)) { | 215 | *dst = ~0UL; |
214 | unsigned int len = (nlongs - 1) * sizeof(unsigned long); | 216 | else { |
215 | memset(dst, 0xff, len); | 217 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
218 | memset(dst, 0xff, len); | ||
216 | } | 219 | } |
217 | dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); | ||
218 | } | 220 | } |
219 | 221 | ||
220 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | 222 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
@@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | |||
228 | } | 230 | } |
229 | } | 231 | } |
230 | 232 | ||
233 | /* | ||
234 | * Copy bitmap and clear tail bits in last word. | ||
235 | */ | ||
236 | static inline void bitmap_copy_clear_tail(unsigned long *dst, | ||
237 | const unsigned long *src, unsigned int nbits) | ||
238 | { | ||
239 | bitmap_copy(dst, src, nbits); | ||
240 | if (nbits % BITS_PER_LONG) | ||
241 | dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * On 32-bit systems bitmaps are represented as u32 arrays internally, and | ||
246 | * therefore conversion is not needed when copying data from/to arrays of u32. | ||
247 | */ | ||
248 | #if BITS_PER_LONG == 64 | ||
249 | extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, | ||
250 | unsigned int nbits); | ||
251 | extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, | ||
252 | unsigned int nbits); | ||
253 | #else | ||
254 | #define bitmap_from_arr32(bitmap, buf, nbits) \ | ||
255 | bitmap_copy_clear_tail((unsigned long *) (bitmap), \ | ||
256 | (const unsigned long *) (buf), (nbits)) | ||
257 | #define bitmap_to_arr32(buf, bitmap, nbits) \ | ||
258 | bitmap_copy_clear_tail((unsigned long *) (buf), \ | ||
259 | (const unsigned long *) (bitmap), (nbits)) | ||
260 | #endif | ||
261 | |||
231 | static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, | 262 | static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
232 | const unsigned long *src2, unsigned int nbits) | 263 | const unsigned long *src2, unsigned int nbits) |
233 | { | 264 | { |
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 3efed0d742a0..43d1fd50d433 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) | 8 | #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) |
9 | #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) | 9 | #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) |
10 | #define BUILD_BUG_ON_ZERO(e) (0) | 10 | #define BUILD_BUG_ON_ZERO(e) (0) |
11 | #define BUILD_BUG_ON_NULL(e) ((void *)0) | ||
12 | #define BUILD_BUG_ON_INVALID(e) (0) | 11 | #define BUILD_BUG_ON_INVALID(e) (0) |
13 | #define BUILD_BUG_ON_MSG(cond, msg) (0) | 12 | #define BUILD_BUG_ON_MSG(cond, msg) (0) |
14 | #define BUILD_BUG_ON(condition) (0) | 13 | #define BUILD_BUG_ON(condition) (0) |
@@ -28,7 +27,6 @@ | |||
28 | * aren't permitted). | 27 | * aren't permitted). |
29 | */ | 28 | */ |
30 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) | 29 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) |
31 | #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); })) | ||
32 | 30 | ||
33 | /* | 31 | /* |
34 | * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the | 32 | * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3b609edffa8f..d02a4df3f473 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -19,3 +19,11 @@ | |||
19 | 19 | ||
20 | #define randomized_struct_fields_start struct { | 20 | #define randomized_struct_fields_start struct { |
21 | #define randomized_struct_fields_end }; | 21 | #define randomized_struct_fields_end }; |
22 | |||
23 | /* all clang versions usable with the kernel support KASAN ABI version 5 */ | ||
24 | #define KASAN_ABI_VERSION 5 | ||
25 | |||
26 | /* emulate gcc's __SANITIZE_ADDRESS__ flag */ | ||
27 | #if __has_feature(address_sanitizer) | ||
28 | #define __SANITIZE_ADDRESS__ | ||
29 | #endif | ||
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 75b565194437..d4a2a7dcd72d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -640,7 +640,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | |||
640 | /** | 640 | /** |
641 | * cpumask_size - size to allocate for a 'struct cpumask' in bytes | 641 | * cpumask_size - size to allocate for a 'struct cpumask' in bytes |
642 | */ | 642 | */ |
643 | static inline size_t cpumask_size(void) | 643 | static inline unsigned int cpumask_size(void) |
644 | { | 644 | { |
645 | return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); | 645 | return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); |
646 | } | 646 | } |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b8e41597ef5..934633a05d20 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void) | |||
112 | return task_spread_slab(current); | 112 | return task_spread_slab(current); |
113 | } | 113 | } |
114 | 114 | ||
115 | extern int current_cpuset_is_being_rebound(void); | 115 | extern bool current_cpuset_is_being_rebound(void); |
116 | 116 | ||
117 | extern void rebuild_sched_domains(void); | 117 | extern void rebuild_sched_domains(void); |
118 | 118 | ||
@@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void) | |||
247 | return 0; | 247 | return 0; |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline int current_cpuset_is_being_rebound(void) | 250 | static inline bool current_cpuset_is_being_rebound(void) |
251 | { | 251 | { |
252 | return 0; | 252 | return false; |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline void rebuild_sched_domains(void) | 255 | static inline void rebuild_sched_domains(void) |
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index a992e6ca2f1c..f7ac2aa93269 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
@@ -2,13 +2,13 @@ | |||
2 | #ifndef LINUX_CRASH_DUMP_H | 2 | #ifndef LINUX_CRASH_DUMP_H |
3 | #define LINUX_CRASH_DUMP_H | 3 | #define LINUX_CRASH_DUMP_H |
4 | 4 | ||
5 | #ifdef CONFIG_CRASH_DUMP | ||
6 | #include <linux/kexec.h> | 5 | #include <linux/kexec.h> |
7 | #include <linux/proc_fs.h> | 6 | #include <linux/proc_fs.h> |
8 | #include <linux/elf.h> | 7 | #include <linux/elf.h> |
9 | 8 | ||
10 | #include <asm/pgtable.h> /* for pgprot_t */ | 9 | #include <asm/pgtable.h> /* for pgprot_t */ |
11 | 10 | ||
11 | #ifdef CONFIG_CRASH_DUMP | ||
12 | #define ELFCORE_ADDR_MAX (-1ULL) | 12 | #define ELFCORE_ADDR_MAX (-1ULL) |
13 | #define ELFCORE_ADDR_ERR (-2ULL) | 13 | #define ELFCORE_ADDR_ERR (-2ULL) |
14 | 14 | ||
@@ -52,13 +52,13 @@ void vmcore_cleanup(void); | |||
52 | * has passed the elf core header address on command line. | 52 | * has passed the elf core header address on command line. |
53 | * | 53 | * |
54 | * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will | 54 | * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will |
55 | * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of | 55 | * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic |
56 | * previous kernel. | 56 | * of previous kernel. |
57 | */ | 57 | */ |
58 | 58 | ||
59 | static inline int is_kdump_kernel(void) | 59 | static inline bool is_kdump_kernel(void) |
60 | { | 60 | { |
61 | return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; | 61 | return elfcorehdr_addr != ELFCORE_ADDR_MAX; |
62 | } | 62 | } |
63 | 63 | ||
64 | /* is_vmcore_usable() checks if the kernel is booting after a panic and | 64 | /* is_vmcore_usable() checks if the kernel is booting after a panic and |
@@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); | |||
89 | extern void unregister_oldmem_pfn_is_ram(void); | 89 | extern void unregister_oldmem_pfn_is_ram(void); |
90 | 90 | ||
91 | #else /* !CONFIG_CRASH_DUMP */ | 91 | #else /* !CONFIG_CRASH_DUMP */ |
92 | static inline int is_kdump_kernel(void) { return 0; } | 92 | static inline bool is_kdump_kernel(void) { return 0; } |
93 | #endif /* CONFIG_CRASH_DUMP */ | 93 | #endif /* CONFIG_CRASH_DUMP */ |
94 | 94 | ||
95 | extern unsigned long saved_max_pfn; | 95 | extern unsigned long saved_max_pfn; |
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 604967609e55..83f81ac53282 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #ifndef GENL_MAGIC_FUNC_H | 2 | #ifndef GENL_MAGIC_FUNC_H |
3 | #define GENL_MAGIC_FUNC_H | 3 | #define GENL_MAGIC_FUNC_H |
4 | 4 | ||
5 | #include <linux/build_bug.h> | ||
5 | #include <linux/genl_magic_struct.h> | 6 | #include <linux/genl_magic_struct.h> |
6 | 7 | ||
7 | /* | 8 | /* |
@@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type, | |||
132 | * use one static buffer for parsing of nested attributes */ | 133 | * use one static buffer for parsing of nested attributes */ |
133 | static struct nlattr *nested_attr_tb[128]; | 134 | static struct nlattr *nested_attr_tb[128]; |
134 | 135 | ||
135 | #ifndef BUILD_BUG_ON | ||
136 | /* Force a compilation error if condition is true */ | ||
137 | #define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) | ||
138 | /* Force a compilation error if condition is true, but also produce a | ||
139 | result (of value 0 and type size_t), so the expression can be used | ||
140 | e.g. in a structure initializer (or where-ever else comma expressions | ||
141 | aren't permitted). */ | ||
142 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) | ||
143 | #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) | ||
144 | #endif | ||
145 | |||
146 | #undef GENL_struct | 136 | #undef GENL_struct |
147 | #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ | 137 | #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ |
148 | /* *_from_attrs functions are static, but potentially unused */ \ | 138 | /* *_from_attrs functions are static, but potentially unused */ \ |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 93b4183cf53d..da0ebaec25f0 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev, | |||
265 | extern void __devm_release_region(struct device *dev, struct resource *parent, | 265 | extern void __devm_release_region(struct device *dev, struct resource *parent, |
266 | resource_size_t start, resource_size_t n); | 266 | resource_size_t start, resource_size_t n); |
267 | extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); | 267 | extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); |
268 | extern int iomem_is_exclusive(u64 addr); | 268 | extern bool iomem_is_exclusive(u64 addr); |
269 | 269 | ||
270 | extern int | 270 | extern int |
271 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 271 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index d79d1e7486bd..657a83b943f0 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -167,7 +167,7 @@ static inline int kallsyms_show_value(void) | |||
167 | 167 | ||
168 | static inline void print_ip_sym(unsigned long ip) | 168 | static inline void print_ip_sym(unsigned long ip) |
169 | { | 169 | { |
170 | printk("[<%p>] %pS\n", (void *) ip, (void *) ip); | 170 | printk("[<%px>] %pS\n", (void *) ip, (void *) ip); |
171 | } | 171 | } |
172 | 172 | ||
173 | #endif /*_LINUX_KALLSYMS_H*/ | 173 | #endif /*_LINUX_KALLSYMS_H*/ |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e3eb834c9a35..adc13474a53b 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -11,8 +11,6 @@ struct task_struct; | |||
11 | 11 | ||
12 | #ifdef CONFIG_KASAN | 12 | #ifdef CONFIG_KASAN |
13 | 13 | ||
14 | #define KASAN_SHADOW_SCALE_SHIFT 3 | ||
15 | |||
16 | #include <asm/kasan.h> | 14 | #include <asm/kasan.h> |
17 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
18 | 16 | ||
@@ -56,14 +54,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); | |||
56 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); | 54 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); |
57 | 55 | ||
58 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); | 56 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); |
59 | void kasan_kfree_large(const void *ptr); | 57 | void kasan_kfree_large(void *ptr, unsigned long ip); |
60 | void kasan_poison_kfree(void *ptr); | 58 | void kasan_poison_kfree(void *ptr, unsigned long ip); |
61 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, | 59 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, |
62 | gfp_t flags); | 60 | gfp_t flags); |
63 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); | 61 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); |
64 | 62 | ||
65 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); | 63 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); |
66 | bool kasan_slab_free(struct kmem_cache *s, void *object); | 64 | bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); |
67 | 65 | ||
68 | struct kasan_cache { | 66 | struct kasan_cache { |
69 | int alloc_meta_offset; | 67 | int alloc_meta_offset; |
@@ -108,8 +106,8 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, | |||
108 | const void *object) {} | 106 | const void *object) {} |
109 | 107 | ||
110 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} | 108 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} |
111 | static inline void kasan_kfree_large(const void *ptr) {} | 109 | static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} |
112 | static inline void kasan_poison_kfree(void *ptr) {} | 110 | static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} |
113 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, | 111 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, |
114 | size_t size, gfp_t flags) {} | 112 | size_t size, gfp_t flags) {} |
115 | static inline void kasan_krealloc(const void *object, size_t new_size, | 113 | static inline void kasan_krealloc(const void *object, size_t new_size, |
@@ -117,7 +115,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size, | |||
117 | 115 | ||
118 | static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, | 116 | static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, |
119 | gfp_t flags) {} | 117 | gfp_t flags) {} |
120 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object) | 118 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, |
119 | unsigned long ip) | ||
121 | { | 120 | { |
122 | return false; | 121 | return false; |
123 | } | 122 | } |
diff --git a/include/linux/lockref.h b/include/linux/lockref.h index ef3c9342e119..2eac32095113 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h | |||
@@ -44,7 +44,7 @@ extern void lockref_mark_dead(struct lockref *); | |||
44 | extern int lockref_get_not_dead(struct lockref *); | 44 | extern int lockref_get_not_dead(struct lockref *); |
45 | 45 | ||
46 | /* Must be called under spinlock for reliable results */ | 46 | /* Must be called under spinlock for reliable results */ |
47 | static inline int __lockref_is_dead(const struct lockref *l) | 47 | static inline bool __lockref_is_dead(const struct lockref *l) |
48 | { | 48 | { |
49 | return ((int)l->count < 0); | 49 | return ((int)l->count < 0); |
50 | } | 50 | } |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 7ed0f7782d16..8be5077efb5f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -332,8 +332,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |||
332 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); | 332 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); |
333 | void memblock_mem_limit_remove_map(phys_addr_t limit); | 333 | void memblock_mem_limit_remove_map(phys_addr_t limit); |
334 | bool memblock_is_memory(phys_addr_t addr); | 334 | bool memblock_is_memory(phys_addr_t addr); |
335 | int memblock_is_map_memory(phys_addr_t addr); | 335 | bool memblock_is_map_memory(phys_addr_t addr); |
336 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | 336 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
337 | bool memblock_is_reserved(phys_addr_t addr); | 337 | bool memblock_is_reserved(phys_addr_t addr); |
338 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | 338 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
339 | 339 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 8dc7065d904d..d44df9b2c131 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -491,7 +491,7 @@ extern struct mutex module_mutex; | |||
491 | /* FIXME: It'd be nice to isolate modules during init, too, so they | 491 | /* FIXME: It'd be nice to isolate modules during init, too, so they |
492 | aren't used before they (may) fail. But presently too much code | 492 | aren't used before they (may) fail. But presently too much code |
493 | (IDE & SCSI) require entry into the module during init.*/ | 493 | (IDE & SCSI) require entry into the module during init.*/ |
494 | static inline int module_is_live(struct module *mod) | 494 | static inline bool module_is_live(struct module *mod) |
495 | { | 495 | { |
496 | return mod->state != MODULE_STATE_GOING; | 496 | return mod->state != MODULE_STATE_GOING; |
497 | } | 497 | } |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 153274f78402..f25c13423bd4 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -138,9 +138,9 @@ extern void __mutex_init(struct mutex *lock, const char *name, | |||
138 | * mutex_is_locked - is the mutex locked | 138 | * mutex_is_locked - is the mutex locked |
139 | * @lock: the mutex to be queried | 139 | * @lock: the mutex to be queried |
140 | * | 140 | * |
141 | * Returns 1 if the mutex is locked, 0 if unlocked. | 141 | * Returns true if the mutex is locked, false if unlocked. |
142 | */ | 142 | */ |
143 | static inline int mutex_is_locked(struct mutex *lock) | 143 | static inline bool mutex_is_locked(struct mutex *lock) |
144 | { | 144 | { |
145 | /* | 145 | /* |
146 | * XXX think about spin_is_locked | 146 | * XXX think about spin_is_locked |
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 2dc5e9870fcd..5a3bb3b7c9ad 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -167,10 +167,9 @@ void pipe_lock(struct pipe_inode_info *); | |||
167 | void pipe_unlock(struct pipe_inode_info *); | 167 | void pipe_unlock(struct pipe_inode_info *); |
168 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); | 168 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); |
169 | 169 | ||
170 | extern unsigned int pipe_max_size, pipe_min_size; | 170 | extern unsigned int pipe_max_size; |
171 | extern unsigned long pipe_user_pages_hard; | 171 | extern unsigned long pipe_user_pages_hard; |
172 | extern unsigned long pipe_user_pages_soft; | 172 | extern unsigned long pipe_user_pages_soft; |
173 | int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); | ||
174 | 173 | ||
175 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | 174 | /* Drop the inode semaphore and wait for a pipe event, atomically */ |
176 | void pipe_wait(struct pipe_inode_info *pipe); | 175 | void pipe_wait(struct pipe_inode_info *pipe); |
@@ -191,6 +190,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg); | |||
191 | struct pipe_inode_info *get_pipe_info(struct file *file); | 190 | struct pipe_inode_info *get_pipe_info(struct file *file); |
192 | 191 | ||
193 | int create_pipe_files(struct file **, int); | 192 | int create_pipe_files(struct file **, int); |
194 | unsigned int round_pipe_size(unsigned int size); | 193 | unsigned int round_pipe_size(unsigned long size); |
195 | 194 | ||
196 | #endif | 195 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 92744e3f1556..b161ef8a902e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1497,6 +1497,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task) | |||
1497 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 1497 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
1498 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); | 1498 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); |
1499 | 1499 | ||
1500 | /* | ||
1501 | * find a task by its virtual pid and get the task struct | ||
1502 | */ | ||
1503 | extern struct task_struct *find_get_task_by_vpid(pid_t nr); | ||
1504 | |||
1500 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 1505 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
1501 | extern int wake_up_process(struct task_struct *tsk); | 1506 | extern int wake_up_process(struct task_struct *tsk); |
1502 | extern void wake_up_new_task(struct task_struct *tsk); | 1507 | extern void wake_up_new_task(struct task_struct *tsk); |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 992bc9948232..b769ecfcc3bd 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -51,9 +51,6 @@ extern int proc_dointvec_minmax(struct ctl_table *, int, | |||
51 | extern int proc_douintvec_minmax(struct ctl_table *table, int write, | 51 | extern int proc_douintvec_minmax(struct ctl_table *table, int write, |
52 | void __user *buffer, size_t *lenp, | 52 | void __user *buffer, size_t *lenp, |
53 | loff_t *ppos); | 53 | loff_t *ppos); |
54 | extern int proc_dopipe_max_size(struct ctl_table *table, int write, | ||
55 | void __user *buffer, size_t *lenp, | ||
56 | loff_t *ppos); | ||
57 | extern int proc_dointvec_jiffies(struct ctl_table *, int, | 54 | extern int proc_dointvec_jiffies(struct ctl_table *, int, |
58 | void __user *, size_t *, loff_t *); | 55 | void __user *, size_t *, loff_t *); |
59 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, | 56 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, |
diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 33b0bdbb613c..d9c4a6cce3c2 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define _LINUX_UUID_H_ | 17 | #define _LINUX_UUID_H_ |
18 | 18 | ||
19 | #include <uapi/linux/uuid.h> | 19 | #include <uapi/linux/uuid.h> |
20 | #include <linux/string.h> | ||
20 | 21 | ||
21 | #define UUID_SIZE 16 | 22 | #define UUID_SIZE 16 |
22 | 23 | ||
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 254afc31e3be..85dc965afd89 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h | |||
@@ -280,8 +280,8 @@ typedef struct siginfo { | |||
280 | #define NSIGTRAP 4 | 280 | #define NSIGTRAP 4 |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * There are an additional set of SIGTRAP si_codes used by ptrace | 283 | * There is an additional set of SIGTRAP si_codes used by ptrace |
284 | * that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP) | 284 | * that are of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP) |
285 | */ | 285 | */ |
286 | 286 | ||
287 | /* | 287 | /* |
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h index 5c04130bb524..e5a7eecef7c3 100644 --- a/include/uapi/linux/uuid.h +++ b/include/uapi/linux/uuid.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define _UAPI_LINUX_UUID_H_ | 19 | #define _UAPI_LINUX_UUID_H_ |
20 | 20 | ||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/string.h> | ||
23 | 22 | ||
24 | typedef struct { | 23 | typedef struct { |
25 | __u8 b[16]; | 24 | __u8 b[16]; |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 690ae6665500..360e564ae7d1 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -596,7 +596,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, | |||
596 | ewp->task = current; | 596 | ewp->task = current; |
597 | 597 | ||
598 | list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { | 598 | list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { |
599 | if (walk->task->static_prio <= current->static_prio) { | 599 | if (walk->task->prio <= current->prio) { |
600 | list_add_tail(&ewp->list, &walk->list); | 600 | list_add_tail(&ewp->list, &walk->list); |
601 | return; | 601 | return; |
602 | } | 602 | } |
@@ -476,9 +476,9 @@ static int msgctl_info(struct ipc_namespace *ns, int msqid, | |||
476 | static int msgctl_stat(struct ipc_namespace *ns, int msqid, | 476 | static int msgctl_stat(struct ipc_namespace *ns, int msqid, |
477 | int cmd, struct msqid64_ds *p) | 477 | int cmd, struct msqid64_ds *p) |
478 | { | 478 | { |
479 | int err; | ||
480 | struct msg_queue *msq; | 479 | struct msg_queue *msq; |
481 | int success_return; | 480 | int id = 0; |
481 | int err; | ||
482 | 482 | ||
483 | memset(p, 0, sizeof(*p)); | 483 | memset(p, 0, sizeof(*p)); |
484 | 484 | ||
@@ -489,14 +489,13 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid, | |||
489 | err = PTR_ERR(msq); | 489 | err = PTR_ERR(msq); |
490 | goto out_unlock; | 490 | goto out_unlock; |
491 | } | 491 | } |
492 | success_return = msq->q_perm.id; | 492 | id = msq->q_perm.id; |
493 | } else { | 493 | } else { |
494 | msq = msq_obtain_object_check(ns, msqid); | 494 | msq = msq_obtain_object_check(ns, msqid); |
495 | if (IS_ERR(msq)) { | 495 | if (IS_ERR(msq)) { |
496 | err = PTR_ERR(msq); | 496 | err = PTR_ERR(msq); |
497 | goto out_unlock; | 497 | goto out_unlock; |
498 | } | 498 | } |
499 | success_return = 0; | ||
500 | } | 499 | } |
501 | 500 | ||
502 | err = -EACCES; | 501 | err = -EACCES; |
@@ -507,6 +506,14 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid, | |||
507 | if (err) | 506 | if (err) |
508 | goto out_unlock; | 507 | goto out_unlock; |
509 | 508 | ||
509 | ipc_lock_object(&msq->q_perm); | ||
510 | |||
511 | if (!ipc_valid_object(&msq->q_perm)) { | ||
512 | ipc_unlock_object(&msq->q_perm); | ||
513 | err = -EIDRM; | ||
514 | goto out_unlock; | ||
515 | } | ||
516 | |||
510 | kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm); | 517 | kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm); |
511 | p->msg_stime = msq->q_stime; | 518 | p->msg_stime = msq->q_stime; |
512 | p->msg_rtime = msq->q_rtime; | 519 | p->msg_rtime = msq->q_rtime; |
@@ -516,9 +523,10 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid, | |||
516 | p->msg_qbytes = msq->q_qbytes; | 523 | p->msg_qbytes = msq->q_qbytes; |
517 | p->msg_lspid = msq->q_lspid; | 524 | p->msg_lspid = msq->q_lspid; |
518 | p->msg_lrpid = msq->q_lrpid; | 525 | p->msg_lrpid = msq->q_lrpid; |
519 | rcu_read_unlock(); | ||
520 | 526 | ||
521 | return success_return; | 527 | ipc_unlock_object(&msq->q_perm); |
528 | rcu_read_unlock(); | ||
529 | return id; | ||
522 | 530 | ||
523 | out_unlock: | 531 | out_unlock: |
524 | rcu_read_unlock(); | 532 | rcu_read_unlock(); |
@@ -1213,10 +1213,20 @@ static int semctl_stat(struct ipc_namespace *ns, int semid, | |||
1213 | if (err) | 1213 | if (err) |
1214 | goto out_unlock; | 1214 | goto out_unlock; |
1215 | 1215 | ||
1216 | ipc_lock_object(&sma->sem_perm); | ||
1217 | |||
1218 | if (!ipc_valid_object(&sma->sem_perm)) { | ||
1219 | ipc_unlock_object(&sma->sem_perm); | ||
1220 | err = -EIDRM; | ||
1221 | goto out_unlock; | ||
1222 | } | ||
1223 | |||
1216 | kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); | 1224 | kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm); |
1217 | semid64->sem_otime = get_semotime(sma); | 1225 | semid64->sem_otime = get_semotime(sma); |
1218 | semid64->sem_ctime = sma->sem_ctime; | 1226 | semid64->sem_ctime = sma->sem_ctime; |
1219 | semid64->sem_nsems = sma->sem_nsems; | 1227 | semid64->sem_nsems = sma->sem_nsems; |
1228 | |||
1229 | ipc_unlock_object(&sma->sem_perm); | ||
1220 | rcu_read_unlock(); | 1230 | rcu_read_unlock(); |
1221 | return id; | 1231 | return id; |
1222 | 1232 | ||
@@ -909,9 +909,11 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid, | |||
909 | int cmd, struct shmid64_ds *tbuf) | 909 | int cmd, struct shmid64_ds *tbuf) |
910 | { | 910 | { |
911 | struct shmid_kernel *shp; | 911 | struct shmid_kernel *shp; |
912 | int result; | 912 | int id = 0; |
913 | int err; | 913 | int err; |
914 | 914 | ||
915 | memset(tbuf, 0, sizeof(*tbuf)); | ||
916 | |||
915 | rcu_read_lock(); | 917 | rcu_read_lock(); |
916 | if (cmd == SHM_STAT) { | 918 | if (cmd == SHM_STAT) { |
917 | shp = shm_obtain_object(ns, shmid); | 919 | shp = shm_obtain_object(ns, shmid); |
@@ -919,14 +921,13 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid, | |||
919 | err = PTR_ERR(shp); | 921 | err = PTR_ERR(shp); |
920 | goto out_unlock; | 922 | goto out_unlock; |
921 | } | 923 | } |
922 | result = shp->shm_perm.id; | 924 | id = shp->shm_perm.id; |
923 | } else { | 925 | } else { |
924 | shp = shm_obtain_object_check(ns, shmid); | 926 | shp = shm_obtain_object_check(ns, shmid); |
925 | if (IS_ERR(shp)) { | 927 | if (IS_ERR(shp)) { |
926 | err = PTR_ERR(shp); | 928 | err = PTR_ERR(shp); |
927 | goto out_unlock; | 929 | goto out_unlock; |
928 | } | 930 | } |
929 | result = 0; | ||
930 | } | 931 | } |
931 | 932 | ||
932 | err = -EACCES; | 933 | err = -EACCES; |
@@ -937,7 +938,14 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid, | |||
937 | if (err) | 938 | if (err) |
938 | goto out_unlock; | 939 | goto out_unlock; |
939 | 940 | ||
940 | memset(tbuf, 0, sizeof(*tbuf)); | 941 | ipc_lock_object(&shp->shm_perm); |
942 | |||
943 | if (!ipc_valid_object(&shp->shm_perm)) { | ||
944 | ipc_unlock_object(&shp->shm_perm); | ||
945 | err = -EIDRM; | ||
946 | goto out_unlock; | ||
947 | } | ||
948 | |||
941 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); | 949 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); |
942 | tbuf->shm_segsz = shp->shm_segsz; | 950 | tbuf->shm_segsz = shp->shm_segsz; |
943 | tbuf->shm_atime = shp->shm_atim; | 951 | tbuf->shm_atime = shp->shm_atim; |
@@ -946,8 +954,10 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid, | |||
946 | tbuf->shm_cpid = shp->shm_cprid; | 954 | tbuf->shm_cpid = shp->shm_cprid; |
947 | tbuf->shm_lpid = shp->shm_lprid; | 955 | tbuf->shm_lpid = shp->shm_lprid; |
948 | tbuf->shm_nattch = shp->shm_nattch; | 956 | tbuf->shm_nattch = shp->shm_nattch; |
957 | |||
958 | ipc_unlock_object(&shp->shm_perm); | ||
949 | rcu_read_unlock(); | 959 | rcu_read_unlock(); |
950 | return result; | 960 | return id; |
951 | 961 | ||
952 | out_unlock: | 962 | out_unlock: |
953 | rcu_read_unlock(); | 963 | rcu_read_unlock(); |
diff --git a/ipc/util.c b/ipc/util.c index ff045fec8d83..4ed5a17dd06f 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -23,9 +23,12 @@ | |||
23 | * tree. | 23 | * tree. |
24 | * - perform initial checks (capabilities, auditing and permission, | 24 | * - perform initial checks (capabilities, auditing and permission, |
25 | * etc). | 25 | * etc). |
26 | * - perform read-only operations, such as STAT, INFO commands. | 26 | * - perform read-only operations, such as INFO command, that |
27 | * do not demand atomicity | ||
27 | * acquire the ipc lock (kern_ipc_perm.lock) through | 28 | * acquire the ipc lock (kern_ipc_perm.lock) through |
28 | * ipc_lock_object() | 29 | * ipc_lock_object() |
30 | * - perform read-only operations that demand atomicity, | ||
31 | * such as STAT command. | ||
29 | * - perform data updates, such as SET, RMID commands and | 32 | * - perform data updates, such as SET, RMID commands and |
30 | * mechanism-specific operations (semop/semtimedop, | 33 | * mechanism-specific operations (semop/semtimedop, |
31 | * msgsnd/msgrcv, shmat/shmdt). | 34 | * msgsnd/msgrcv, shmat/shmdt). |
diff --git a/kernel/async.c b/kernel/async.c index 2cbd3dd5940d..a893d6170944 100644 --- a/kernel/async.c +++ b/kernel/async.c | |||
@@ -84,20 +84,24 @@ static atomic_t entry_count; | |||
84 | 84 | ||
85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) | 85 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
86 | { | 86 | { |
87 | struct list_head *pending; | 87 | struct async_entry *first = NULL; |
88 | async_cookie_t ret = ASYNC_COOKIE_MAX; | 88 | async_cookie_t ret = ASYNC_COOKIE_MAX; |
89 | unsigned long flags; | 89 | unsigned long flags; |
90 | 90 | ||
91 | spin_lock_irqsave(&async_lock, flags); | 91 | spin_lock_irqsave(&async_lock, flags); |
92 | 92 | ||
93 | if (domain) | 93 | if (domain) { |
94 | pending = &domain->pending; | 94 | if (!list_empty(&domain->pending)) |
95 | else | 95 | first = list_first_entry(&domain->pending, |
96 | pending = &async_global_pending; | 96 | struct async_entry, domain_list); |
97 | } else { | ||
98 | if (!list_empty(&async_global_pending)) | ||
99 | first = list_first_entry(&async_global_pending, | ||
100 | struct async_entry, global_list); | ||
101 | } | ||
97 | 102 | ||
98 | if (!list_empty(pending)) | 103 | if (first) |
99 | ret = list_first_entry(pending, struct async_entry, | 104 | ret = first->cookie; |
100 | domain_list)->cookie; | ||
101 | 105 | ||
102 | spin_unlock_irqrestore(&async_lock, flags); | 106 | spin_unlock_irqrestore(&async_lock, flags); |
103 | return ret; | 107 | return ret; |
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index f7efa7b4d825..b42037e6e81d 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c | |||
@@ -1254,9 +1254,9 @@ done: | |||
1254 | return retval; | 1254 | return retval; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | int current_cpuset_is_being_rebound(void) | 1257 | bool current_cpuset_is_being_rebound(void) |
1258 | { | 1258 | { |
1259 | int ret; | 1259 | bool ret; |
1260 | 1260 | ||
1261 | rcu_read_lock(); | 1261 | rcu_read_lock(); |
1262 | ret = task_cs(current) == cpuset_being_rebound; | 1262 | ret = task_cs(current) == cpuset_being_rebound; |
diff --git a/kernel/compat.c b/kernel/compat.c index d1cee656a7ed..3247fe761f60 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -355,7 +355,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, | |||
355 | 355 | ||
356 | ret = sched_getaffinity(pid, mask); | 356 | ret = sched_getaffinity(pid, mask); |
357 | if (ret == 0) { | 357 | if (ret == 0) { |
358 | size_t retlen = min_t(size_t, len, cpumask_size()); | 358 | unsigned int retlen = min(len, cpumask_size()); |
359 | 359 | ||
360 | if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) | 360 | if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) |
361 | ret = -EFAULT; | 361 | ret = -EFAULT; |
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config index 7fa0c4ae6394..9bfdffc100da 100644 --- a/kernel/configs/tiny.config +++ b/kernel/configs/tiny.config | |||
@@ -10,3 +10,7 @@ CONFIG_OPTIMIZE_INLINING=y | |||
10 | # CONFIG_SLAB is not set | 10 | # CONFIG_SLAB is not set |
11 | # CONFIG_SLUB is not set | 11 | # CONFIG_SLUB is not set |
12 | CONFIG_SLOB=y | 12 | CONFIG_SLOB=y |
13 | CONFIG_CC_STACKPROTECTOR_NONE=y | ||
14 | # CONFIG_CC_STACKPROTECTOR_REGULAR is not set | ||
15 | # CONFIG_CC_STACKPROTECTOR_STRONG is not set | ||
16 | # CONFIG_CC_STACKPROTECTOR_AUTO is not set | ||
diff --git a/kernel/fork.c b/kernel/fork.c index c7c112391d79..be8aa5b98666 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1592,6 +1592,10 @@ static __latent_entropy struct task_struct *copy_process( | |||
1592 | int retval; | 1592 | int retval; |
1593 | struct task_struct *p; | 1593 | struct task_struct *p; |
1594 | 1594 | ||
1595 | /* | ||
1596 | * Don't allow sharing the root directory with processes in a different | ||
1597 | * namespace | ||
1598 | */ | ||
1595 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) | 1599 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
1596 | return ERR_PTR(-EINVAL); | 1600 | return ERR_PTR(-EINVAL); |
1597 | 1601 | ||
@@ -2067,6 +2071,8 @@ long _do_fork(unsigned long clone_flags, | |||
2067 | int __user *child_tidptr, | 2071 | int __user *child_tidptr, |
2068 | unsigned long tls) | 2072 | unsigned long tls) |
2069 | { | 2073 | { |
2074 | struct completion vfork; | ||
2075 | struct pid *pid; | ||
2070 | struct task_struct *p; | 2076 | struct task_struct *p; |
2071 | int trace = 0; | 2077 | int trace = 0; |
2072 | long nr; | 2078 | long nr; |
@@ -2092,43 +2098,40 @@ long _do_fork(unsigned long clone_flags, | |||
2092 | p = copy_process(clone_flags, stack_start, stack_size, | 2098 | p = copy_process(clone_flags, stack_start, stack_size, |
2093 | child_tidptr, NULL, trace, tls, NUMA_NO_NODE); | 2099 | child_tidptr, NULL, trace, tls, NUMA_NO_NODE); |
2094 | add_latent_entropy(); | 2100 | add_latent_entropy(); |
2101 | |||
2102 | if (IS_ERR(p)) | ||
2103 | return PTR_ERR(p); | ||
2104 | |||
2095 | /* | 2105 | /* |
2096 | * Do this prior waking up the new thread - the thread pointer | 2106 | * Do this prior waking up the new thread - the thread pointer |
2097 | * might get invalid after that point, if the thread exits quickly. | 2107 | * might get invalid after that point, if the thread exits quickly. |
2098 | */ | 2108 | */ |
2099 | if (!IS_ERR(p)) { | 2109 | trace_sched_process_fork(current, p); |
2100 | struct completion vfork; | ||
2101 | struct pid *pid; | ||
2102 | |||
2103 | trace_sched_process_fork(current, p); | ||
2104 | 2110 | ||
2105 | pid = get_task_pid(p, PIDTYPE_PID); | 2111 | pid = get_task_pid(p, PIDTYPE_PID); |
2106 | nr = pid_vnr(pid); | 2112 | nr = pid_vnr(pid); |
2107 | 2113 | ||
2108 | if (clone_flags & CLONE_PARENT_SETTID) | 2114 | if (clone_flags & CLONE_PARENT_SETTID) |
2109 | put_user(nr, parent_tidptr); | 2115 | put_user(nr, parent_tidptr); |
2110 | 2116 | ||
2111 | if (clone_flags & CLONE_VFORK) { | 2117 | if (clone_flags & CLONE_VFORK) { |
2112 | p->vfork_done = &vfork; | 2118 | p->vfork_done = &vfork; |
2113 | init_completion(&vfork); | 2119 | init_completion(&vfork); |
2114 | get_task_struct(p); | 2120 | get_task_struct(p); |
2115 | } | 2121 | } |
2116 | |||
2117 | wake_up_new_task(p); | ||
2118 | 2122 | ||
2119 | /* forking complete and child started to run, tell ptracer */ | 2123 | wake_up_new_task(p); |
2120 | if (unlikely(trace)) | ||
2121 | ptrace_event_pid(trace, pid); | ||
2122 | 2124 | ||
2123 | if (clone_flags & CLONE_VFORK) { | 2125 | /* forking complete and child started to run, tell ptracer */ |
2124 | if (!wait_for_vfork_done(p, &vfork)) | 2126 | if (unlikely(trace)) |
2125 | ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); | 2127 | ptrace_event_pid(trace, pid); |
2126 | } | ||
2127 | 2128 | ||
2128 | put_pid(pid); | 2129 | if (clone_flags & CLONE_VFORK) { |
2129 | } else { | 2130 | if (!wait_for_vfork_done(p, &vfork)) |
2130 | nr = PTR_ERR(p); | 2131 | ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); |
2131 | } | 2132 | } |
2133 | |||
2134 | put_pid(pid); | ||
2132 | return nr; | 2135 | return nr; |
2133 | } | 2136 | } |
2134 | 2137 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index 7f719d110908..1f450e092c74 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -862,24 +862,6 @@ static void put_pi_state(struct futex_pi_state *pi_state) | |||
862 | } | 862 | } |
863 | } | 863 | } |
864 | 864 | ||
865 | /* | ||
866 | * Look up the task based on what TID userspace gave us. | ||
867 | * We dont trust it. | ||
868 | */ | ||
869 | static struct task_struct *futex_find_get_task(pid_t pid) | ||
870 | { | ||
871 | struct task_struct *p; | ||
872 | |||
873 | rcu_read_lock(); | ||
874 | p = find_task_by_vpid(pid); | ||
875 | if (p) | ||
876 | get_task_struct(p); | ||
877 | |||
878 | rcu_read_unlock(); | ||
879 | |||
880 | return p; | ||
881 | } | ||
882 | |||
883 | #ifdef CONFIG_FUTEX_PI | 865 | #ifdef CONFIG_FUTEX_PI |
884 | 866 | ||
885 | /* | 867 | /* |
@@ -1183,7 +1165,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, | |||
1183 | */ | 1165 | */ |
1184 | if (!pid) | 1166 | if (!pid) |
1185 | return -ESRCH; | 1167 | return -ESRCH; |
1186 | p = futex_find_get_task(pid); | 1168 | p = find_get_task_by_vpid(pid); |
1187 | if (!p) | 1169 | if (!p) |
1188 | return -ESRCH; | 1170 | return -ESRCH; |
1189 | 1171 | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index ef2a47e0eab6..6cdecc6f4c53 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/kallsyms.h> | ||
14 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
15 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
16 | #include <linux/timer.h> | 15 | #include <linux/timer.h> |
diff --git a/kernel/kcov.c b/kernel/kcov.c index 7594c033d98a..2c16f1ab5e10 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c | |||
@@ -358,7 +358,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, | |||
358 | */ | 358 | */ |
359 | if (kcov->mode != KCOV_MODE_INIT || !kcov->area) | 359 | if (kcov->mode != KCOV_MODE_INIT || !kcov->area) |
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | if (kcov->t != NULL) | 361 | t = current; |
362 | if (kcov->t != NULL || t->kcov != NULL) | ||
362 | return -EBUSY; | 363 | return -EBUSY; |
363 | if (arg == KCOV_TRACE_PC) | 364 | if (arg == KCOV_TRACE_PC) |
364 | kcov->mode = KCOV_MODE_TRACE_PC; | 365 | kcov->mode = KCOV_MODE_TRACE_PC; |
@@ -370,7 +371,6 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, | |||
370 | #endif | 371 | #endif |
371 | else | 372 | else |
372 | return -EINVAL; | 373 | return -EINVAL; |
373 | t = current; | ||
374 | /* Cache in task struct for performance. */ | 374 | /* Cache in task struct for performance. */ |
375 | t->kcov_size = kcov->size; | 375 | t->kcov_size = kcov->size; |
376 | t->kcov_area = kcov->area; | 376 | t->kcov_area = kcov->area; |
diff --git a/kernel/pid.c b/kernel/pid.c index 5d30c87e3c42..ed6c343fe50d 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -343,6 +343,19 @@ struct task_struct *find_task_by_vpid(pid_t vnr) | |||
343 | return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); | 343 | return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); |
344 | } | 344 | } |
345 | 345 | ||
346 | struct task_struct *find_get_task_by_vpid(pid_t nr) | ||
347 | { | ||
348 | struct task_struct *task; | ||
349 | |||
350 | rcu_read_lock(); | ||
351 | task = find_task_by_vpid(nr); | ||
352 | if (task) | ||
353 | get_task_struct(task); | ||
354 | rcu_read_unlock(); | ||
355 | |||
356 | return task; | ||
357 | } | ||
358 | |||
346 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | 359 | struct pid *get_task_pid(struct task_struct *task, enum pid_type type) |
347 | { | 360 | { |
348 | struct pid *pid; | 361 | struct pid *pid; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5e1d713c8e61..21fec73d45d4 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -1103,21 +1103,6 @@ int ptrace_request(struct task_struct *child, long request, | |||
1103 | return ret; | 1103 | return ret; |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | static struct task_struct *ptrace_get_task_struct(pid_t pid) | ||
1107 | { | ||
1108 | struct task_struct *child; | ||
1109 | |||
1110 | rcu_read_lock(); | ||
1111 | child = find_task_by_vpid(pid); | ||
1112 | if (child) | ||
1113 | get_task_struct(child); | ||
1114 | rcu_read_unlock(); | ||
1115 | |||
1116 | if (!child) | ||
1117 | return ERR_PTR(-ESRCH); | ||
1118 | return child; | ||
1119 | } | ||
1120 | |||
1121 | #ifndef arch_ptrace_attach | 1106 | #ifndef arch_ptrace_attach |
1122 | #define arch_ptrace_attach(child) do { } while (0) | 1107 | #define arch_ptrace_attach(child) do { } while (0) |
1123 | #endif | 1108 | #endif |
@@ -1135,9 +1120,9 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, | |||
1135 | goto out; | 1120 | goto out; |
1136 | } | 1121 | } |
1137 | 1122 | ||
1138 | child = ptrace_get_task_struct(pid); | 1123 | child = find_get_task_by_vpid(pid); |
1139 | if (IS_ERR(child)) { | 1124 | if (!child) { |
1140 | ret = PTR_ERR(child); | 1125 | ret = -ESRCH; |
1141 | goto out; | 1126 | goto out; |
1142 | } | 1127 | } |
1143 | 1128 | ||
@@ -1281,9 +1266,9 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid, | |||
1281 | goto out; | 1266 | goto out; |
1282 | } | 1267 | } |
1283 | 1268 | ||
1284 | child = ptrace_get_task_struct(pid); | 1269 | child = find_get_task_by_vpid(pid); |
1285 | if (IS_ERR(child)) { | 1270 | if (!child) { |
1286 | ret = PTR_ERR(child); | 1271 | ret = -ESRCH; |
1287 | goto out; | 1272 | goto out; |
1288 | } | 1273 | } |
1289 | 1274 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index 41280033a4c5..f7f40a6e6352 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -611,7 +611,6 @@ free_bufs: | |||
611 | 611 | ||
612 | kref_put(&chan->kref, relay_destroy_channel); | 612 | kref_put(&chan->kref, relay_destroy_channel); |
613 | mutex_unlock(&relay_channels_mutex); | 613 | mutex_unlock(&relay_channels_mutex); |
614 | kfree(chan); | ||
615 | return NULL; | 614 | return NULL; |
616 | } | 615 | } |
617 | EXPORT_SYMBOL_GPL(relay_open); | 616 | EXPORT_SYMBOL_GPL(relay_open); |
diff --git a/kernel/resource.c b/kernel/resource.c index 8c527d83ca76..e270b5048988 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -1576,17 +1576,17 @@ static int strict_iomem_checks; | |||
1576 | 1576 | ||
1577 | /* | 1577 | /* |
1578 | * check if an address is reserved in the iomem resource tree | 1578 | * check if an address is reserved in the iomem resource tree |
1579 | * returns 1 if reserved, 0 if not reserved. | 1579 | * returns true if reserved, false if not reserved. |
1580 | */ | 1580 | */ |
1581 | int iomem_is_exclusive(u64 addr) | 1581 | bool iomem_is_exclusive(u64 addr) |
1582 | { | 1582 | { |
1583 | struct resource *p = &iomem_resource; | 1583 | struct resource *p = &iomem_resource; |
1584 | int err = 0; | 1584 | bool err = false; |
1585 | loff_t l; | 1585 | loff_t l; |
1586 | int size = PAGE_SIZE; | 1586 | int size = PAGE_SIZE; |
1587 | 1587 | ||
1588 | if (!strict_iomem_checks) | 1588 | if (!strict_iomem_checks) |
1589 | return 0; | 1589 | return false; |
1590 | 1590 | ||
1591 | addr = addr & PAGE_MASK; | 1591 | addr = addr & PAGE_MASK; |
1592 | 1592 | ||
@@ -1609,7 +1609,7 @@ int iomem_is_exclusive(u64 addr) | |||
1609 | continue; | 1609 | continue; |
1610 | if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) | 1610 | if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM) |
1611 | || p->flags & IORESOURCE_EXCLUSIVE) { | 1611 | || p->flags & IORESOURCE_EXCLUSIVE) { |
1612 | err = 1; | 1612 | err = true; |
1613 | break; | 1613 | break; |
1614 | } | 1614 | } |
1615 | } | 1615 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 36f113ac6353..bf724c1952ea 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4867,7 +4867,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
4867 | 4867 | ||
4868 | ret = sched_getaffinity(pid, mask); | 4868 | ret = sched_getaffinity(pid, mask); |
4869 | if (ret == 0) { | 4869 | if (ret == 0) { |
4870 | size_t retlen = min_t(size_t, len, cpumask_size()); | 4870 | unsigned int retlen = min(len, cpumask_size()); |
4871 | 4871 | ||
4872 | if (copy_to_user(user_mask_ptr, mask, retlen)) | 4872 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
4873 | ret = -EFAULT; | 4873 | ret = -EFAULT; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 2fb4e27c636a..f98f28c12020 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -218,6 +218,8 @@ static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, | |||
218 | static int proc_dostring_coredump(struct ctl_table *table, int write, | 218 | static int proc_dostring_coredump(struct ctl_table *table, int write, |
219 | void __user *buffer, size_t *lenp, loff_t *ppos); | 219 | void __user *buffer, size_t *lenp, loff_t *ppos); |
220 | #endif | 220 | #endif |
221 | static int proc_dopipe_max_size(struct ctl_table *table, int write, | ||
222 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
221 | 223 | ||
222 | #ifdef CONFIG_MAGIC_SYSRQ | 224 | #ifdef CONFIG_MAGIC_SYSRQ |
223 | /* Note: sysrq code uses it's own private copy */ | 225 | /* Note: sysrq code uses it's own private copy */ |
@@ -1812,8 +1814,7 @@ static struct ctl_table fs_table[] = { | |||
1812 | .data = &pipe_max_size, | 1814 | .data = &pipe_max_size, |
1813 | .maxlen = sizeof(pipe_max_size), | 1815 | .maxlen = sizeof(pipe_max_size), |
1814 | .mode = 0644, | 1816 | .mode = 0644, |
1815 | .proc_handler = &pipe_proc_fn, | 1817 | .proc_handler = proc_dopipe_max_size, |
1816 | .extra1 = &pipe_min_size, | ||
1817 | }, | 1818 | }, |
1818 | { | 1819 | { |
1819 | .procname = "pipe-user-pages-hard", | 1820 | .procname = "pipe-user-pages-hard", |
@@ -2615,29 +2616,17 @@ int proc_douintvec_minmax(struct ctl_table *table, int write, | |||
2615 | do_proc_douintvec_minmax_conv, ¶m); | 2616 | do_proc_douintvec_minmax_conv, ¶m); |
2616 | } | 2617 | } |
2617 | 2618 | ||
2618 | struct do_proc_dopipe_max_size_conv_param { | ||
2619 | unsigned int *min; | ||
2620 | }; | ||
2621 | |||
2622 | static int do_proc_dopipe_max_size_conv(unsigned long *lvalp, | 2619 | static int do_proc_dopipe_max_size_conv(unsigned long *lvalp, |
2623 | unsigned int *valp, | 2620 | unsigned int *valp, |
2624 | int write, void *data) | 2621 | int write, void *data) |
2625 | { | 2622 | { |
2626 | struct do_proc_dopipe_max_size_conv_param *param = data; | ||
2627 | |||
2628 | if (write) { | 2623 | if (write) { |
2629 | unsigned int val; | 2624 | unsigned int val; |
2630 | 2625 | ||
2631 | if (*lvalp > UINT_MAX) | ||
2632 | return -EINVAL; | ||
2633 | |||
2634 | val = round_pipe_size(*lvalp); | 2626 | val = round_pipe_size(*lvalp); |
2635 | if (val == 0) | 2627 | if (val == 0) |
2636 | return -EINVAL; | 2628 | return -EINVAL; |
2637 | 2629 | ||
2638 | if (param->min && *param->min > val) | ||
2639 | return -ERANGE; | ||
2640 | |||
2641 | *valp = val; | 2630 | *valp = val; |
2642 | } else { | 2631 | } else { |
2643 | unsigned int val = *valp; | 2632 | unsigned int val = *valp; |
@@ -2647,14 +2636,11 @@ static int do_proc_dopipe_max_size_conv(unsigned long *lvalp, | |||
2647 | return 0; | 2636 | return 0; |
2648 | } | 2637 | } |
2649 | 2638 | ||
2650 | int proc_dopipe_max_size(struct ctl_table *table, int write, | 2639 | static int proc_dopipe_max_size(struct ctl_table *table, int write, |
2651 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2640 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2652 | { | 2641 | { |
2653 | struct do_proc_dopipe_max_size_conv_param param = { | ||
2654 | .min = (unsigned int *) table->extra1, | ||
2655 | }; | ||
2656 | return do_proc_douintvec(table, write, buffer, lenp, ppos, | 2642 | return do_proc_douintvec(table, write, buffer, lenp, ppos, |
2657 | do_proc_dopipe_max_size_conv, ¶m); | 2643 | do_proc_dopipe_max_size_conv, NULL); |
2658 | } | 2644 | } |
2659 | 2645 | ||
2660 | static void validate_coredump_safety(void) | 2646 | static void validate_coredump_safety(void) |
@@ -3160,12 +3146,6 @@ int proc_douintvec_minmax(struct ctl_table *table, int write, | |||
3160 | return -ENOSYS; | 3146 | return -ENOSYS; |
3161 | } | 3147 | } |
3162 | 3148 | ||
3163 | int proc_dopipe_max_size(struct ctl_table *table, int write, | ||
3164 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
3165 | { | ||
3166 | return -ENOSYS; | ||
3167 | } | ||
3168 | |||
3169 | int proc_dointvec_jiffies(struct ctl_table *table, int write, | 3149 | int proc_dointvec_jiffies(struct ctl_table *table, int write, |
3170 | void __user *buffer, size_t *lenp, loff_t *ppos) | 3150 | void __user *buffer, size_t *lenp, loff_t *ppos) |
3171 | { | 3151 | { |
@@ -3209,7 +3189,6 @@ EXPORT_SYMBOL(proc_douintvec); | |||
3209 | EXPORT_SYMBOL(proc_dointvec_jiffies); | 3189 | EXPORT_SYMBOL(proc_dointvec_jiffies); |
3210 | EXPORT_SYMBOL(proc_dointvec_minmax); | 3190 | EXPORT_SYMBOL(proc_dointvec_minmax); |
3211 | EXPORT_SYMBOL_GPL(proc_douintvec_minmax); | 3191 | EXPORT_SYMBOL_GPL(proc_douintvec_minmax); |
3212 | EXPORT_SYMBOL_GPL(proc_dopipe_max_size); | ||
3213 | EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); | 3192 | EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); |
3214 | EXPORT_SYMBOL(proc_dointvec_ms_jiffies); | 3193 | EXPORT_SYMBOL(proc_dointvec_ms_jiffies); |
3215 | EXPORT_SYMBOL(proc_dostring); | 3194 | EXPORT_SYMBOL(proc_dostring); |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4559e914452b..4e62a4a8fa91 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -194,11 +194,7 @@ static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) | |||
194 | { | 194 | { |
195 | struct task_struct *tsk; | 195 | struct task_struct *tsk; |
196 | 196 | ||
197 | rcu_read_lock(); | 197 | tsk = find_get_task_by_vpid(pid); |
198 | tsk = find_task_by_vpid(pid); | ||
199 | if (tsk) | ||
200 | get_task_struct(tsk); | ||
201 | rcu_read_unlock(); | ||
202 | if (!tsk) | 198 | if (!tsk) |
203 | return -ESRCH; | 199 | return -ESRCH; |
204 | fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); | 200 | fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index ae0c8a411fe7..23788100e214 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/hrtimer.h> | 37 | #include <linux/hrtimer.h> |
38 | #include <linux/notifier.h> | 38 | #include <linux/notifier.h> |
39 | #include <linux/syscalls.h> | 39 | #include <linux/syscalls.h> |
40 | #include <linux/kallsyms.h> | ||
41 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
42 | #include <linux/tick.h> | 41 | #include <linux/tick.h> |
43 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 64d7c19d3167..b66c264d4194 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK | |||
217 | config FRAME_WARN | 217 | config FRAME_WARN |
218 | int "Warn for stack frames larger than (needs gcc 4.4)" | 218 | int "Warn for stack frames larger than (needs gcc 4.4)" |
219 | range 0 8192 | 219 | range 0 8192 |
220 | default 0 if KASAN | 220 | default 3072 if KASAN_EXTRA |
221 | default 2048 if GCC_PLUGIN_LATENT_ENTROPY | 221 | default 2048 if GCC_PLUGIN_LATENT_ENTROPY |
222 | default 1280 if (!64BIT && PARISC) | 222 | default 1280 if (!64BIT && PARISC) |
223 | default 1024 if (!64BIT && !PARISC) | 223 | default 1024 if (!64BIT && !PARISC) |
@@ -1641,7 +1641,10 @@ config DMA_API_DEBUG | |||
1641 | 1641 | ||
1642 | If unsure, say N. | 1642 | If unsure, say N. |
1643 | 1643 | ||
1644 | menu "Runtime Testing" | 1644 | menuconfig RUNTIME_TESTING_MENU |
1645 | bool "Runtime Testing" | ||
1646 | |||
1647 | if RUNTIME_TESTING_MENU | ||
1645 | 1648 | ||
1646 | config LKDTM | 1649 | config LKDTM |
1647 | tristate "Linux Kernel Dump Test Tool Module" | 1650 | tristate "Linux Kernel Dump Test Tool Module" |
@@ -1841,7 +1844,7 @@ config TEST_BPF | |||
1841 | 1844 | ||
1842 | If unsure, say N. | 1845 | If unsure, say N. |
1843 | 1846 | ||
1844 | config TEST_FIND_BIT | 1847 | config FIND_BIT_BENCHMARK |
1845 | tristate "Test find_bit functions" | 1848 | tristate "Test find_bit functions" |
1846 | default n | 1849 | default n |
1847 | help | 1850 | help |
@@ -1929,7 +1932,7 @@ config TEST_DEBUG_VIRTUAL | |||
1929 | 1932 | ||
1930 | If unsure, say N. | 1933 | If unsure, say N. |
1931 | 1934 | ||
1932 | endmenu # runtime tests | 1935 | endif # RUNTIME_TESTING_MENU |
1933 | 1936 | ||
1934 | config MEMTEST | 1937 | config MEMTEST |
1935 | bool "Memtest" | 1938 | bool "Memtest" |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index bd38aab05929..3d35d062970d 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
@@ -20,6 +20,17 @@ config KASAN | |||
20 | Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB | 20 | Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB |
21 | (the resulting kernel does not boot). | 21 | (the resulting kernel does not boot). |
22 | 22 | ||
23 | config KASAN_EXTRA | ||
24 | bool "KAsan: extra checks" | ||
25 | depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST | ||
26 | help | ||
27 | This enables further checks in the kernel address sanitizer, for now | ||
28 | it only includes the address-use-after-scope check that can lead | ||
29 | to excessive kernel stack usage, frame size warnings and longer | ||
30 | compile time. | ||
31 | https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more | ||
32 | |||
33 | |||
23 | choice | 34 | choice |
24 | prompt "Instrumentation type" | 35 | prompt "Instrumentation type" |
25 | depends on KASAN | 36 | depends on KASAN |
diff --git a/lib/Makefile b/lib/Makefile index 7adb066692b3..a90d4fcd748f 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -46,8 +46,8 @@ obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | |||
46 | obj-y += hexdump.o | 46 | obj-y += hexdump.o |
47 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o | 47 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o |
48 | obj-y += kstrtox.o | 48 | obj-y += kstrtox.o |
49 | obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o | ||
49 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 50 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
50 | obj-$(CONFIG_TEST_FIND_BIT) += test_find_bit.o | ||
51 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | 51 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o |
52 | obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o | 52 | obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o |
53 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o | 53 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index d8f0c094b18e..9e498c77ed0e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -1106,111 +1106,80 @@ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) | |||
1106 | EXPORT_SYMBOL(bitmap_allocate_region); | 1106 | EXPORT_SYMBOL(bitmap_allocate_region); |
1107 | 1107 | ||
1108 | /** | 1108 | /** |
1109 | * bitmap_from_u32array - copy the contents of a u32 array of bits to bitmap | 1109 | * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. |
1110 | * @bitmap: array of unsigned longs, the destination bitmap, non NULL | 1110 | * @dst: destination buffer |
1111 | * @nbits: number of bits in @bitmap | 1111 | * @src: bitmap to copy |
1112 | * @buf: array of u32 (in host byte order), the source bitmap, non NULL | 1112 | * @nbits: number of bits in the bitmap |
1113 | * @nwords: number of u32 words in @buf | ||
1114 | * | ||
1115 | * copy min(nbits, 32*nwords) bits from @buf to @bitmap, remaining | ||
1116 | * bits between nword and nbits in @bitmap (if any) are cleared. In | ||
1117 | * last word of @bitmap, the bits beyond nbits (if any) are kept | ||
1118 | * unchanged. | ||
1119 | * | 1113 | * |
1120 | * Return the number of bits effectively copied. | 1114 | * Require nbits % BITS_PER_LONG == 0. |
1121 | */ | 1115 | */ |
1122 | unsigned int | 1116 | #ifdef __BIG_ENDIAN |
1123 | bitmap_from_u32array(unsigned long *bitmap, unsigned int nbits, | 1117 | void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits) |
1124 | const u32 *buf, unsigned int nwords) | ||
1125 | { | 1118 | { |
1126 | unsigned int dst_idx, src_idx; | 1119 | unsigned int i; |
1127 | |||
1128 | for (src_idx = dst_idx = 0; dst_idx < BITS_TO_LONGS(nbits); ++dst_idx) { | ||
1129 | unsigned long part = 0; | ||
1130 | |||
1131 | if (src_idx < nwords) | ||
1132 | part = buf[src_idx++]; | ||
1133 | |||
1134 | #if BITS_PER_LONG == 64 | ||
1135 | if (src_idx < nwords) | ||
1136 | part |= ((unsigned long) buf[src_idx++]) << 32; | ||
1137 | #endif | ||
1138 | |||
1139 | if (dst_idx < nbits/BITS_PER_LONG) | ||
1140 | bitmap[dst_idx] = part; | ||
1141 | else { | ||
1142 | unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); | ||
1143 | 1120 | ||
1144 | bitmap[dst_idx] = (bitmap[dst_idx] & ~mask) | 1121 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { |
1145 | | (part & mask); | 1122 | if (BITS_PER_LONG == 64) |
1146 | } | 1123 | dst[i] = cpu_to_le64(src[i]); |
1124 | else | ||
1125 | dst[i] = cpu_to_le32(src[i]); | ||
1147 | } | 1126 | } |
1148 | |||
1149 | return min_t(unsigned int, nbits, 32*nwords); | ||
1150 | } | 1127 | } |
1151 | EXPORT_SYMBOL(bitmap_from_u32array); | 1128 | EXPORT_SYMBOL(bitmap_copy_le); |
1129 | #endif | ||
1152 | 1130 | ||
1131 | #if BITS_PER_LONG == 64 | ||
1153 | /** | 1132 | /** |
1154 | * bitmap_to_u32array - copy the contents of bitmap to a u32 array of bits | 1133 | * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap |
1155 | * @buf: array of u32 (in host byte order), the dest bitmap, non NULL | 1134 | * @bitmap: array of unsigned longs, the destination bitmap |
1156 | * @nwords: number of u32 words in @buf | 1135 | * @buf: array of u32 (in host byte order), the source bitmap |
1157 | * @bitmap: array of unsigned longs, the source bitmap, non NULL | ||
1158 | * @nbits: number of bits in @bitmap | 1136 | * @nbits: number of bits in @bitmap |
1159 | * | ||
1160 | * copy min(nbits, 32*nwords) bits from @bitmap to @buf. Remaining | ||
1161 | * bits after nbits in @buf (if any) are cleared. | ||
1162 | * | ||
1163 | * Return the number of bits effectively copied. | ||
1164 | */ | 1137 | */ |
1165 | unsigned int | 1138 | void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, |
1166 | bitmap_to_u32array(u32 *buf, unsigned int nwords, | 1139 | unsigned int nbits) |
1167 | const unsigned long *bitmap, unsigned int nbits) | ||
1168 | { | 1140 | { |
1169 | unsigned int dst_idx = 0, src_idx = 0; | 1141 | unsigned int i, halfwords; |
1170 | |||
1171 | while (dst_idx < nwords) { | ||
1172 | unsigned long part = 0; | ||
1173 | |||
1174 | if (src_idx < BITS_TO_LONGS(nbits)) { | ||
1175 | part = bitmap[src_idx]; | ||
1176 | if (src_idx >= nbits/BITS_PER_LONG) | ||
1177 | part &= BITMAP_LAST_WORD_MASK(nbits); | ||
1178 | src_idx++; | ||
1179 | } | ||
1180 | 1142 | ||
1181 | buf[dst_idx++] = part & 0xffffffffUL; | 1143 | if (!nbits) |
1144 | return; | ||
1182 | 1145 | ||
1183 | #if BITS_PER_LONG == 64 | 1146 | halfwords = DIV_ROUND_UP(nbits, 32); |
1184 | if (dst_idx < nwords) { | 1147 | for (i = 0; i < halfwords; i++) { |
1185 | part >>= 32; | 1148 | bitmap[i/2] = (unsigned long) buf[i]; |
1186 | buf[dst_idx++] = part & 0xffffffffUL; | 1149 | if (++i < halfwords) |
1187 | } | 1150 | bitmap[i/2] |= ((unsigned long) buf[i]) << 32; |
1188 | #endif | ||
1189 | } | 1151 | } |
1190 | 1152 | ||
1191 | return min_t(unsigned int, nbits, 32*nwords); | 1153 | /* Clear tail bits in last word beyond nbits. */ |
1154 | if (nbits % BITS_PER_LONG) | ||
1155 | bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); | ||
1192 | } | 1156 | } |
1193 | EXPORT_SYMBOL(bitmap_to_u32array); | 1157 | EXPORT_SYMBOL(bitmap_from_arr32); |
1194 | 1158 | ||
1195 | /** | 1159 | /** |
1196 | * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. | 1160 | * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits |
1197 | * @dst: destination buffer | 1161 | * @buf: array of u32 (in host byte order), the dest bitmap |
1198 | * @src: bitmap to copy | 1162 | * @bitmap: array of unsigned longs, the source bitmap |
1199 | * @nbits: number of bits in the bitmap | 1163 | * @nbits: number of bits in @bitmap |
1200 | * | ||
1201 | * Require nbits % BITS_PER_LONG == 0. | ||
1202 | */ | 1164 | */ |
1203 | #ifdef __BIG_ENDIAN | 1165 | void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) |
1204 | void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits) | ||
1205 | { | 1166 | { |
1206 | unsigned int i; | 1167 | unsigned int i, halfwords; |
1207 | 1168 | ||
1208 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { | 1169 | if (!nbits) |
1209 | if (BITS_PER_LONG == 64) | 1170 | return; |
1210 | dst[i] = cpu_to_le64(src[i]); | 1171 | |
1211 | else | 1172 | halfwords = DIV_ROUND_UP(nbits, 32); |
1212 | dst[i] = cpu_to_le32(src[i]); | 1173 | for (i = 0; i < halfwords; i++) { |
1174 | buf[i] = (u32) (bitmap[i/2] & UINT_MAX); | ||
1175 | if (++i < halfwords) | ||
1176 | buf[i] = (u32) (bitmap[i/2] >> 32); | ||
1213 | } | 1177 | } |
1178 | |||
1179 | /* Clear tail bits in last element of array beyond nbits. */ | ||
1180 | if (nbits % BITS_PER_LONG) | ||
1181 | buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); | ||
1214 | } | 1182 | } |
1215 | EXPORT_SYMBOL(bitmap_copy_le); | 1183 | EXPORT_SYMBOL(bitmap_to_arr32); |
1184 | |||
1216 | #endif | 1185 | #endif |
diff --git a/lib/cpumask.c b/lib/cpumask.c index 35fe142ebb5e..beca6244671a 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -33,10 +33,11 @@ EXPORT_SYMBOL(cpumask_next); | |||
33 | int cpumask_next_and(int n, const struct cpumask *src1p, | 33 | int cpumask_next_and(int n, const struct cpumask *src1p, |
34 | const struct cpumask *src2p) | 34 | const struct cpumask *src2p) |
35 | { | 35 | { |
36 | while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) | 36 | /* -1 is a legal arg here. */ |
37 | if (cpumask_test_cpu(n, src2p)) | 37 | if (n != -1) |
38 | break; | 38 | cpumask_check(n); |
39 | return n; | 39 | return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), |
40 | nr_cpumask_bits, n + 1); | ||
40 | } | 41 | } |
41 | EXPORT_SYMBOL(cpumask_next_and); | 42 | EXPORT_SYMBOL(cpumask_next_and); |
42 | 43 | ||
diff --git a/lib/find_bit.c b/lib/find_bit.c index 6ed74f78380c..ee3df93ba69a 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c | |||
@@ -21,22 +21,29 @@ | |||
21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | 23 | ||
24 | #if !defined(find_next_bit) || !defined(find_next_zero_bit) | 24 | #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ |
25 | !defined(find_next_and_bit) | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * This is a common helper function for find_next_bit and | 28 | * This is a common helper function for find_next_bit, find_next_zero_bit, and |
28 | * find_next_zero_bit. The difference is the "invert" argument, which | 29 | * find_next_and_bit. The differences are: |
29 | * is XORed with each fetched word before searching it for one bits. | 30 | * - The "invert" argument, which is XORed with each fetched word before |
31 | * searching it for one bits. | ||
32 | * - The optional "addr2", which is anded with "addr1" if present. | ||
30 | */ | 33 | */ |
31 | static unsigned long _find_next_bit(const unsigned long *addr, | 34 | static inline unsigned long _find_next_bit(const unsigned long *addr1, |
32 | unsigned long nbits, unsigned long start, unsigned long invert) | 35 | const unsigned long *addr2, unsigned long nbits, |
36 | unsigned long start, unsigned long invert) | ||
33 | { | 37 | { |
34 | unsigned long tmp; | 38 | unsigned long tmp; |
35 | 39 | ||
36 | if (unlikely(start >= nbits)) | 40 | if (unlikely(start >= nbits)) |
37 | return nbits; | 41 | return nbits; |
38 | 42 | ||
39 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 43 | tmp = addr1[start / BITS_PER_LONG]; |
44 | if (addr2) | ||
45 | tmp &= addr2[start / BITS_PER_LONG]; | ||
46 | tmp ^= invert; | ||
40 | 47 | ||
41 | /* Handle 1st word. */ | 48 | /* Handle 1st word. */ |
42 | tmp &= BITMAP_FIRST_WORD_MASK(start); | 49 | tmp &= BITMAP_FIRST_WORD_MASK(start); |
@@ -47,7 +54,10 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
47 | if (start >= nbits) | 54 | if (start >= nbits) |
48 | return nbits; | 55 | return nbits; |
49 | 56 | ||
50 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 57 | tmp = addr1[start / BITS_PER_LONG]; |
58 | if (addr2) | ||
59 | tmp &= addr2[start / BITS_PER_LONG]; | ||
60 | tmp ^= invert; | ||
51 | } | 61 | } |
52 | 62 | ||
53 | return min(start + __ffs(tmp), nbits); | 63 | return min(start + __ffs(tmp), nbits); |
@@ -61,7 +71,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
61 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 71 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
62 | unsigned long offset) | 72 | unsigned long offset) |
63 | { | 73 | { |
64 | return _find_next_bit(addr, size, offset, 0UL); | 74 | return _find_next_bit(addr, NULL, size, offset, 0UL); |
65 | } | 75 | } |
66 | EXPORT_SYMBOL(find_next_bit); | 76 | EXPORT_SYMBOL(find_next_bit); |
67 | #endif | 77 | #endif |
@@ -70,11 +80,21 @@ EXPORT_SYMBOL(find_next_bit); | |||
70 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 80 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
71 | unsigned long offset) | 81 | unsigned long offset) |
72 | { | 82 | { |
73 | return _find_next_bit(addr, size, offset, ~0UL); | 83 | return _find_next_bit(addr, NULL, size, offset, ~0UL); |
74 | } | 84 | } |
75 | EXPORT_SYMBOL(find_next_zero_bit); | 85 | EXPORT_SYMBOL(find_next_zero_bit); |
76 | #endif | 86 | #endif |
77 | 87 | ||
88 | #if !defined(find_next_and_bit) | ||
89 | unsigned long find_next_and_bit(const unsigned long *addr1, | ||
90 | const unsigned long *addr2, unsigned long size, | ||
91 | unsigned long offset) | ||
92 | { | ||
93 | return _find_next_bit(addr1, addr2, size, offset, 0UL); | ||
94 | } | ||
95 | EXPORT_SYMBOL(find_next_and_bit); | ||
96 | #endif | ||
97 | |||
78 | #ifndef find_first_bit | 98 | #ifndef find_first_bit |
79 | /* | 99 | /* |
80 | * Find the first set bit in a memory region. | 100 | * Find the first set bit in a memory region. |
@@ -146,15 +166,19 @@ static inline unsigned long ext2_swab(const unsigned long y) | |||
146 | } | 166 | } |
147 | 167 | ||
148 | #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) | 168 | #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le) |
149 | static unsigned long _find_next_bit_le(const unsigned long *addr, | 169 | static inline unsigned long _find_next_bit_le(const unsigned long *addr1, |
150 | unsigned long nbits, unsigned long start, unsigned long invert) | 170 | const unsigned long *addr2, unsigned long nbits, |
171 | unsigned long start, unsigned long invert) | ||
151 | { | 172 | { |
152 | unsigned long tmp; | 173 | unsigned long tmp; |
153 | 174 | ||
154 | if (unlikely(start >= nbits)) | 175 | if (unlikely(start >= nbits)) |
155 | return nbits; | 176 | return nbits; |
156 | 177 | ||
157 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 178 | tmp = addr1[start / BITS_PER_LONG]; |
179 | if (addr2) | ||
180 | tmp &= addr2[start / BITS_PER_LONG]; | ||
181 | tmp ^= invert; | ||
158 | 182 | ||
159 | /* Handle 1st word. */ | 183 | /* Handle 1st word. */ |
160 | tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); | 184 | tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start)); |
@@ -165,7 +189,10 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, | |||
165 | if (start >= nbits) | 189 | if (start >= nbits) |
166 | return nbits; | 190 | return nbits; |
167 | 191 | ||
168 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 192 | tmp = addr1[start / BITS_PER_LONG]; |
193 | if (addr2) | ||
194 | tmp &= addr2[start / BITS_PER_LONG]; | ||
195 | tmp ^= invert; | ||
169 | } | 196 | } |
170 | 197 | ||
171 | return min(start + __ffs(ext2_swab(tmp)), nbits); | 198 | return min(start + __ffs(ext2_swab(tmp)), nbits); |
@@ -176,7 +203,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr, | |||
176 | unsigned long find_next_zero_bit_le(const void *addr, unsigned | 203 | unsigned long find_next_zero_bit_le(const void *addr, unsigned |
177 | long size, unsigned long offset) | 204 | long size, unsigned long offset) |
178 | { | 205 | { |
179 | return _find_next_bit_le(addr, size, offset, ~0UL); | 206 | return _find_next_bit_le(addr, NULL, size, offset, ~0UL); |
180 | } | 207 | } |
181 | EXPORT_SYMBOL(find_next_zero_bit_le); | 208 | EXPORT_SYMBOL(find_next_zero_bit_le); |
182 | #endif | 209 | #endif |
@@ -185,7 +212,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le); | |||
185 | unsigned long find_next_bit_le(const void *addr, unsigned | 212 | unsigned long find_next_bit_le(const void *addr, unsigned |
186 | long size, unsigned long offset) | 213 | long size, unsigned long offset) |
187 | { | 214 | { |
188 | return _find_next_bit_le(addr, size, offset, 0UL); | 215 | return _find_next_bit_le(addr, NULL, size, offset, 0UL); |
189 | } | 216 | } |
190 | EXPORT_SYMBOL(find_next_bit_le); | 217 | EXPORT_SYMBOL(find_next_bit_le); |
191 | #endif | 218 | #endif |
diff --git a/lib/test_find_bit.c b/lib/find_bit_benchmark.c index f4394a36f9aa..5985a25e6cbc 100644 --- a/lib/test_find_bit.c +++ b/lib/find_bit_benchmark.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #define SPARSE 500 | 35 | #define SPARSE 500 |
36 | 36 | ||
37 | static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata; | 37 | static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata; |
38 | static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata; | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * This is Schlemiel the Painter's algorithm. It should be called after | 41 | * This is Schlemiel the Painter's algorithm. It should be called after |
@@ -43,16 +44,15 @@ static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata; | |||
43 | static int __init test_find_first_bit(void *bitmap, unsigned long len) | 44 | static int __init test_find_first_bit(void *bitmap, unsigned long len) |
44 | { | 45 | { |
45 | unsigned long i, cnt; | 46 | unsigned long i, cnt; |
46 | cycles_t cycles; | 47 | ktime_t time; |
47 | 48 | ||
48 | cycles = get_cycles(); | 49 | time = ktime_get(); |
49 | for (cnt = i = 0; i < len; cnt++) { | 50 | for (cnt = i = 0; i < len; cnt++) { |
50 | i = find_first_bit(bitmap, len); | 51 | i = find_first_bit(bitmap, len); |
51 | __clear_bit(i, bitmap); | 52 | __clear_bit(i, bitmap); |
52 | } | 53 | } |
53 | cycles = get_cycles() - cycles; | 54 | time = ktime_get() - time; |
54 | pr_err("find_first_bit:\t\t%llu cycles,\t%ld iterations\n", | 55 | pr_err("find_first_bit: %18llu ns, %6ld iterations\n", time, cnt); |
55 | (u64)cycles, cnt); | ||
56 | 56 | ||
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
@@ -60,14 +60,13 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len) | |||
60 | static int __init test_find_next_bit(const void *bitmap, unsigned long len) | 60 | static int __init test_find_next_bit(const void *bitmap, unsigned long len) |
61 | { | 61 | { |
62 | unsigned long i, cnt; | 62 | unsigned long i, cnt; |
63 | cycles_t cycles; | 63 | ktime_t time; |
64 | 64 | ||
65 | cycles = get_cycles(); | 65 | time = ktime_get(); |
66 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) | 66 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) |
67 | i = find_next_bit(bitmap, BITMAP_LEN, i) + 1; | 67 | i = find_next_bit(bitmap, BITMAP_LEN, i) + 1; |
68 | cycles = get_cycles() - cycles; | 68 | time = ktime_get() - time; |
69 | pr_err("find_next_bit:\t\t%llu cycles,\t%ld iterations\n", | 69 | pr_err("find_next_bit: %18llu ns, %6ld iterations\n", time, cnt); |
70 | (u64)cycles, cnt); | ||
71 | 70 | ||
72 | return 0; | 71 | return 0; |
73 | } | 72 | } |
@@ -75,14 +74,13 @@ static int __init test_find_next_bit(const void *bitmap, unsigned long len) | |||
75 | static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len) | 74 | static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len) |
76 | { | 75 | { |
77 | unsigned long i, cnt; | 76 | unsigned long i, cnt; |
78 | cycles_t cycles; | 77 | ktime_t time; |
79 | 78 | ||
80 | cycles = get_cycles(); | 79 | time = ktime_get(); |
81 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) | 80 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) |
82 | i = find_next_zero_bit(bitmap, len, i) + 1; | 81 | i = find_next_zero_bit(bitmap, len, i) + 1; |
83 | cycles = get_cycles() - cycles; | 82 | time = ktime_get() - time; |
84 | pr_err("find_next_zero_bit:\t%llu cycles,\t%ld iterations\n", | 83 | pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt); |
85 | (u64)cycles, cnt); | ||
86 | 84 | ||
87 | return 0; | 85 | return 0; |
88 | } | 86 | } |
@@ -90,9 +88,9 @@ static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len) | |||
90 | static int __init test_find_last_bit(const void *bitmap, unsigned long len) | 88 | static int __init test_find_last_bit(const void *bitmap, unsigned long len) |
91 | { | 89 | { |
92 | unsigned long l, cnt = 0; | 90 | unsigned long l, cnt = 0; |
93 | cycles_t cycles; | 91 | ktime_t time; |
94 | 92 | ||
95 | cycles = get_cycles(); | 93 | time = ktime_get(); |
96 | do { | 94 | do { |
97 | cnt++; | 95 | cnt++; |
98 | l = find_last_bit(bitmap, len); | 96 | l = find_last_bit(bitmap, len); |
@@ -100,9 +98,24 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len) | |||
100 | break; | 98 | break; |
101 | len = l; | 99 | len = l; |
102 | } while (len); | 100 | } while (len); |
101 | time = ktime_get() - time; | ||
102 | pr_err("find_last_bit: %18llu ns, %6ld iterations\n", time, cnt); | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int __init test_find_next_and_bit(const void *bitmap, | ||
108 | const void *bitmap2, unsigned long len) | ||
109 | { | ||
110 | unsigned long i, cnt; | ||
111 | cycles_t cycles; | ||
112 | |||
113 | cycles = get_cycles(); | ||
114 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) | ||
115 | i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1); | ||
103 | cycles = get_cycles() - cycles; | 116 | cycles = get_cycles() - cycles; |
104 | pr_err("find_last_bit:\t\t%llu cycles,\t%ld iterations\n", | 117 | pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n", |
105 | (u64)cycles, cnt); | 118 | (u64)cycles, cnt); |
106 | 119 | ||
107 | return 0; | 120 | return 0; |
108 | } | 121 | } |
@@ -114,31 +127,36 @@ static int __init find_bit_test(void) | |||
114 | pr_err("\nStart testing find_bit() with random-filled bitmap\n"); | 127 | pr_err("\nStart testing find_bit() with random-filled bitmap\n"); |
115 | 128 | ||
116 | get_random_bytes(bitmap, sizeof(bitmap)); | 129 | get_random_bytes(bitmap, sizeof(bitmap)); |
130 | get_random_bytes(bitmap2, sizeof(bitmap2)); | ||
117 | 131 | ||
118 | test_find_next_bit(bitmap, BITMAP_LEN); | 132 | test_find_next_bit(bitmap, BITMAP_LEN); |
119 | test_find_next_zero_bit(bitmap, BITMAP_LEN); | 133 | test_find_next_zero_bit(bitmap, BITMAP_LEN); |
120 | test_find_last_bit(bitmap, BITMAP_LEN); | 134 | test_find_last_bit(bitmap, BITMAP_LEN); |
121 | test_find_first_bit(bitmap, BITMAP_LEN); | 135 | test_find_first_bit(bitmap, BITMAP_LEN); |
136 | test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); | ||
122 | 137 | ||
123 | pr_err("\nStart testing find_bit() with sparse bitmap\n"); | 138 | pr_err("\nStart testing find_bit() with sparse bitmap\n"); |
124 | 139 | ||
125 | bitmap_zero(bitmap, BITMAP_LEN); | 140 | bitmap_zero(bitmap, BITMAP_LEN); |
141 | bitmap_zero(bitmap2, BITMAP_LEN); | ||
126 | 142 | ||
127 | while (nbits--) | 143 | while (nbits--) { |
128 | __set_bit(prandom_u32() % BITMAP_LEN, bitmap); | 144 | __set_bit(prandom_u32() % BITMAP_LEN, bitmap); |
145 | __set_bit(prandom_u32() % BITMAP_LEN, bitmap2); | ||
146 | } | ||
129 | 147 | ||
130 | test_find_next_bit(bitmap, BITMAP_LEN); | 148 | test_find_next_bit(bitmap, BITMAP_LEN); |
131 | test_find_next_zero_bit(bitmap, BITMAP_LEN); | 149 | test_find_next_zero_bit(bitmap, BITMAP_LEN); |
132 | test_find_last_bit(bitmap, BITMAP_LEN); | 150 | test_find_last_bit(bitmap, BITMAP_LEN); |
133 | test_find_first_bit(bitmap, BITMAP_LEN); | 151 | test_find_first_bit(bitmap, BITMAP_LEN); |
152 | test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); | ||
134 | 153 | ||
135 | return 0; | 154 | /* |
155 | * Everything is OK. Return error just to let user run benchmark | ||
156 | * again without annoying rmmod. | ||
157 | */ | ||
158 | return -EINVAL; | ||
136 | } | 159 | } |
137 | module_init(find_bit_test); | 160 | module_init(find_bit_test); |
138 | 161 | ||
139 | static void __exit test_find_bit_cleanup(void) | ||
140 | { | ||
141 | } | ||
142 | module_exit(test_find_bit_cleanup); | ||
143 | |||
144 | MODULE_LICENSE("GPL"); | 162 | MODULE_LICENSE("GPL"); |
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index f87d138e9672..e513459a5601 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c | |||
@@ -163,6 +163,21 @@ static inline u32 hash_stack(unsigned long *entries, unsigned int size) | |||
163 | STACK_HASH_SEED); | 163 | STACK_HASH_SEED); |
164 | } | 164 | } |
165 | 165 | ||
166 | /* Use our own, non-instrumented version of memcmp(). | ||
167 | * | ||
168 | * We actually don't care about the order, just the equality. | ||
169 | */ | ||
170 | static inline | ||
171 | int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, | ||
172 | unsigned int n) | ||
173 | { | ||
174 | for ( ; n-- ; u1++, u2++) { | ||
175 | if (*u1 != *u2) | ||
176 | return 1; | ||
177 | } | ||
178 | return 0; | ||
179 | } | ||
180 | |||
166 | /* Find a stack that is equal to the one stored in entries in the hash */ | 181 | /* Find a stack that is equal to the one stored in entries in the hash */ |
167 | static inline struct stack_record *find_stack(struct stack_record *bucket, | 182 | static inline struct stack_record *find_stack(struct stack_record *bucket, |
168 | unsigned long *entries, int size, | 183 | unsigned long *entries, int size, |
@@ -173,10 +188,8 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, | |||
173 | for (found = bucket; found; found = found->next) { | 188 | for (found = bucket; found; found = found->next) { |
174 | if (found->hash == hash && | 189 | if (found->hash == hash && |
175 | found->size == size && | 190 | found->size == size && |
176 | !memcmp(entries, found->entries, | 191 | !stackdepot_memcmp(entries, found->entries, size)) |
177 | size * sizeof(unsigned long))) { | ||
178 | return found; | 192 | return found; |
179 | } | ||
180 | } | 193 | } |
181 | return NULL; | 194 | return NULL; |
182 | } | 195 | } |
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index aa1f2669bdd5..b3f235baa05d 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c | |||
@@ -23,7 +23,7 @@ __check_eq_uint(const char *srcfile, unsigned int line, | |||
23 | const unsigned int exp_uint, unsigned int x) | 23 | const unsigned int exp_uint, unsigned int x) |
24 | { | 24 | { |
25 | if (exp_uint != x) { | 25 | if (exp_uint != x) { |
26 | pr_warn("[%s:%u] expected %u, got %u\n", | 26 | pr_err("[%s:%u] expected %u, got %u\n", |
27 | srcfile, line, exp_uint, x); | 27 | srcfile, line, exp_uint, x); |
28 | return false; | 28 | return false; |
29 | } | 29 | } |
@@ -33,19 +33,13 @@ __check_eq_uint(const char *srcfile, unsigned int line, | |||
33 | 33 | ||
34 | static bool __init | 34 | static bool __init |
35 | __check_eq_bitmap(const char *srcfile, unsigned int line, | 35 | __check_eq_bitmap(const char *srcfile, unsigned int line, |
36 | const unsigned long *exp_bmap, unsigned int exp_nbits, | 36 | const unsigned long *exp_bmap, const unsigned long *bmap, |
37 | const unsigned long *bmap, unsigned int nbits) | 37 | unsigned int nbits) |
38 | { | 38 | { |
39 | if (exp_nbits != nbits) { | ||
40 | pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n", | ||
41 | srcfile, line, exp_nbits, nbits); | ||
42 | return false; | ||
43 | } | ||
44 | |||
45 | if (!bitmap_equal(exp_bmap, bmap, nbits)) { | 39 | if (!bitmap_equal(exp_bmap, bmap, nbits)) { |
46 | pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", | 40 | pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", |
47 | srcfile, line, | 41 | srcfile, line, |
48 | exp_nbits, exp_bmap, nbits, bmap); | 42 | nbits, exp_bmap, nbits, bmap); |
49 | return false; | 43 | return false; |
50 | } | 44 | } |
51 | return true; | 45 | return true; |
@@ -69,6 +63,10 @@ __check_eq_pbl(const char *srcfile, unsigned int line, | |||
69 | static bool __init | 63 | static bool __init |
70 | __check_eq_u32_array(const char *srcfile, unsigned int line, | 64 | __check_eq_u32_array(const char *srcfile, unsigned int line, |
71 | const u32 *exp_arr, unsigned int exp_len, | 65 | const u32 *exp_arr, unsigned int exp_len, |
66 | const u32 *arr, unsigned int len) __used; | ||
67 | static bool __init | ||
68 | __check_eq_u32_array(const char *srcfile, unsigned int line, | ||
69 | const u32 *exp_arr, unsigned int exp_len, | ||
72 | const u32 *arr, unsigned int len) | 70 | const u32 *arr, unsigned int len) |
73 | { | 71 | { |
74 | if (exp_len != len) { | 72 | if (exp_len != len) { |
@@ -107,7 +105,65 @@ __check_eq_u32_array(const char *srcfile, unsigned int line, | |||
107 | #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) | 105 | #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) |
108 | #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) | 106 | #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) |
109 | 107 | ||
110 | static void __init test_zero_fill_copy(void) | 108 | static void __init test_zero_clear(void) |
109 | { | ||
110 | DECLARE_BITMAP(bmap, 1024); | ||
111 | |||
112 | /* Known way to set all bits */ | ||
113 | memset(bmap, 0xff, 128); | ||
114 | |||
115 | expect_eq_pbl("0-22", bmap, 23); | ||
116 | expect_eq_pbl("0-1023", bmap, 1024); | ||
117 | |||
118 | /* single-word bitmaps */ | ||
119 | bitmap_clear(bmap, 0, 9); | ||
120 | expect_eq_pbl("9-1023", bmap, 1024); | ||
121 | |||
122 | bitmap_zero(bmap, 35); | ||
123 | expect_eq_pbl("64-1023", bmap, 1024); | ||
124 | |||
125 | /* cross boundaries operations */ | ||
126 | bitmap_clear(bmap, 79, 19); | ||
127 | expect_eq_pbl("64-78,98-1023", bmap, 1024); | ||
128 | |||
129 | bitmap_zero(bmap, 115); | ||
130 | expect_eq_pbl("128-1023", bmap, 1024); | ||
131 | |||
132 | /* Zeroing entire area */ | ||
133 | bitmap_zero(bmap, 1024); | ||
134 | expect_eq_pbl("", bmap, 1024); | ||
135 | } | ||
136 | |||
137 | static void __init test_fill_set(void) | ||
138 | { | ||
139 | DECLARE_BITMAP(bmap, 1024); | ||
140 | |||
141 | /* Known way to clear all bits */ | ||
142 | memset(bmap, 0x00, 128); | ||
143 | |||
144 | expect_eq_pbl("", bmap, 23); | ||
145 | expect_eq_pbl("", bmap, 1024); | ||
146 | |||
147 | /* single-word bitmaps */ | ||
148 | bitmap_set(bmap, 0, 9); | ||
149 | expect_eq_pbl("0-8", bmap, 1024); | ||
150 | |||
151 | bitmap_fill(bmap, 35); | ||
152 | expect_eq_pbl("0-63", bmap, 1024); | ||
153 | |||
154 | /* cross boundaries operations */ | ||
155 | bitmap_set(bmap, 79, 19); | ||
156 | expect_eq_pbl("0-63,79-97", bmap, 1024); | ||
157 | |||
158 | bitmap_fill(bmap, 115); | ||
159 | expect_eq_pbl("0-127", bmap, 1024); | ||
160 | |||
161 | /* Zeroing entire area */ | ||
162 | bitmap_fill(bmap, 1024); | ||
163 | expect_eq_pbl("0-1023", bmap, 1024); | ||
164 | } | ||
165 | |||
166 | static void __init test_copy(void) | ||
111 | { | 167 | { |
112 | DECLARE_BITMAP(bmap1, 1024); | 168 | DECLARE_BITMAP(bmap1, 1024); |
113 | DECLARE_BITMAP(bmap2, 1024); | 169 | DECLARE_BITMAP(bmap2, 1024); |
@@ -116,36 +172,20 @@ static void __init test_zero_fill_copy(void) | |||
116 | bitmap_zero(bmap2, 1024); | 172 | bitmap_zero(bmap2, 1024); |
117 | 173 | ||
118 | /* single-word bitmaps */ | 174 | /* single-word bitmaps */ |
119 | expect_eq_pbl("", bmap1, 23); | 175 | bitmap_set(bmap1, 0, 19); |
120 | |||
121 | bitmap_fill(bmap1, 19); | ||
122 | expect_eq_pbl("0-18", bmap1, 1024); | ||
123 | |||
124 | bitmap_copy(bmap2, bmap1, 23); | 176 | bitmap_copy(bmap2, bmap1, 23); |
125 | expect_eq_pbl("0-18", bmap2, 1024); | 177 | expect_eq_pbl("0-18", bmap2, 1024); |
126 | 178 | ||
127 | bitmap_fill(bmap2, 23); | 179 | bitmap_set(bmap2, 0, 23); |
128 | expect_eq_pbl("0-22", bmap2, 1024); | ||
129 | |||
130 | bitmap_copy(bmap2, bmap1, 23); | 180 | bitmap_copy(bmap2, bmap1, 23); |
131 | expect_eq_pbl("0-18", bmap2, 1024); | 181 | expect_eq_pbl("0-18", bmap2, 1024); |
132 | 182 | ||
133 | bitmap_zero(bmap1, 23); | ||
134 | expect_eq_pbl("", bmap1, 1024); | ||
135 | |||
136 | /* multi-word bitmaps */ | 183 | /* multi-word bitmaps */ |
137 | bitmap_zero(bmap1, 1024); | 184 | bitmap_set(bmap1, 0, 109); |
138 | expect_eq_pbl("", bmap1, 1024); | ||
139 | |||
140 | bitmap_fill(bmap1, 109); | ||
141 | expect_eq_pbl("0-108", bmap1, 1024); | ||
142 | |||
143 | bitmap_copy(bmap2, bmap1, 1024); | 185 | bitmap_copy(bmap2, bmap1, 1024); |
144 | expect_eq_pbl("0-108", bmap2, 1024); | 186 | expect_eq_pbl("0-108", bmap2, 1024); |
145 | 187 | ||
146 | bitmap_fill(bmap2, 1024); | 188 | bitmap_fill(bmap2, 1024); |
147 | expect_eq_pbl("0-1023", bmap2, 1024); | ||
148 | |||
149 | bitmap_copy(bmap2, bmap1, 1024); | 189 | bitmap_copy(bmap2, bmap1, 1024); |
150 | expect_eq_pbl("0-108", bmap2, 1024); | 190 | expect_eq_pbl("0-108", bmap2, 1024); |
151 | 191 | ||
@@ -160,9 +200,6 @@ static void __init test_zero_fill_copy(void) | |||
160 | bitmap_fill(bmap2, 1024); | 200 | bitmap_fill(bmap2, 1024); |
161 | bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */ | 201 | bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */ |
162 | expect_eq_pbl("0-108,128-1023", bmap2, 1024); | 202 | expect_eq_pbl("0-108,128-1023", bmap2, 1024); |
163 | |||
164 | bitmap_zero(bmap2, 97); /* ... but 0-padded til word length */ | ||
165 | expect_eq_pbl("128-1023", bmap2, 1024); | ||
166 | } | 203 | } |
167 | 204 | ||
168 | #define PARSE_TIME 0x1 | 205 | #define PARSE_TIME 0x1 |
@@ -255,171 +292,29 @@ static void __init test_bitmap_parselist(void) | |||
255 | } | 292 | } |
256 | } | 293 | } |
257 | 294 | ||
258 | static void __init test_bitmap_u32_array_conversions(void) | 295 | static void __init test_bitmap_arr32(void) |
259 | { | 296 | { |
260 | DECLARE_BITMAP(bmap1, 1024); | 297 | unsigned int nbits, next_bit, len = sizeof(exp) * 8; |
261 | DECLARE_BITMAP(bmap2, 1024); | 298 | u32 arr[sizeof(exp) / 4]; |
262 | u32 exp_arr[32], arr[32]; | 299 | DECLARE_BITMAP(bmap2, len); |
263 | unsigned nbits; | 300 | |
264 | 301 | memset(arr, 0xa5, sizeof(arr)); | |
265 | for (nbits = 0 ; nbits < 257 ; ++nbits) { | 302 | |
266 | const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32); | 303 | for (nbits = 0; nbits < len; ++nbits) { |
267 | unsigned int i, rv; | 304 | bitmap_to_arr32(arr, exp, nbits); |
268 | 305 | bitmap_from_arr32(bmap2, arr, nbits); | |
269 | bitmap_zero(bmap1, nbits); | 306 | expect_eq_bitmap(bmap2, exp, nbits); |
270 | bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ | 307 | |
271 | 308 | next_bit = find_next_bit(bmap2, | |
272 | memset(arr, 0xff, sizeof(arr)); | 309 | round_up(nbits, BITS_PER_LONG), nbits); |
273 | rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); | 310 | if (next_bit < round_up(nbits, BITS_PER_LONG)) |
274 | expect_eq_uint(nbits, rv); | 311 | pr_err("bitmap_copy_arr32(nbits == %d:" |
275 | 312 | " tail is not safely cleared: %d\n", | |
276 | memset(exp_arr, 0xff, sizeof(exp_arr)); | 313 | nbits, next_bit); |
277 | memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); | 314 | |
278 | expect_eq_u32_array(exp_arr, 32, arr, 32); | 315 | if (nbits < len - 32) |
279 | 316 | expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], | |
280 | bitmap_fill(bmap2, 1024); | 317 | 0xa5a5a5a5); |
281 | rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s); | ||
282 | expect_eq_uint(nbits, rv); | ||
283 | expect_eq_bitmap(bmap1, 1024, bmap2, 1024); | ||
284 | |||
285 | for (i = 0 ; i < nbits ; ++i) { | ||
286 | /* | ||
287 | * test conversion bitmap -> u32[] | ||
288 | */ | ||
289 | |||
290 | bitmap_zero(bmap1, 1024); | ||
291 | __set_bit(i, bmap1); | ||
292 | bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ | ||
293 | |||
294 | memset(arr, 0xff, sizeof(arr)); | ||
295 | rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); | ||
296 | expect_eq_uint(nbits, rv); | ||
297 | |||
298 | /* 1st used u32 words contain expected bit set, the | ||
299 | * remaining words are left unchanged (0xff) | ||
300 | */ | ||
301 | memset(exp_arr, 0xff, sizeof(exp_arr)); | ||
302 | memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); | ||
303 | exp_arr[i/32] = (1U<<(i%32)); | ||
304 | expect_eq_u32_array(exp_arr, 32, arr, 32); | ||
305 | |||
306 | |||
307 | /* same, with longer array to fill | ||
308 | */ | ||
309 | memset(arr, 0xff, sizeof(arr)); | ||
310 | rv = bitmap_to_u32array(arr, 32, bmap1, nbits); | ||
311 | expect_eq_uint(nbits, rv); | ||
312 | |||
313 | /* 1st used u32 words contain expected bit set, the | ||
314 | * remaining words are all 0s | ||
315 | */ | ||
316 | memset(exp_arr, 0, sizeof(exp_arr)); | ||
317 | exp_arr[i/32] = (1U<<(i%32)); | ||
318 | expect_eq_u32_array(exp_arr, 32, arr, 32); | ||
319 | |||
320 | /* | ||
321 | * test conversion u32[] -> bitmap | ||
322 | */ | ||
323 | |||
324 | /* the 1st nbits of bmap2 are identical to | ||
325 | * bmap1, the remaining bits of bmap2 are left | ||
326 | * unchanged (all 1s) | ||
327 | */ | ||
328 | bitmap_fill(bmap2, 1024); | ||
329 | rv = bitmap_from_u32array(bmap2, nbits, | ||
330 | exp_arr, used_u32s); | ||
331 | expect_eq_uint(nbits, rv); | ||
332 | |||
333 | expect_eq_bitmap(bmap1, 1024, bmap2, 1024); | ||
334 | |||
335 | /* same, with more bits to fill | ||
336 | */ | ||
337 | memset(arr, 0xff, sizeof(arr)); /* garbage */ | ||
338 | memset(arr, 0, used_u32s*sizeof(u32)); | ||
339 | arr[i/32] = (1U<<(i%32)); | ||
340 | |||
341 | bitmap_fill(bmap2, 1024); | ||
342 | rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s); | ||
343 | expect_eq_uint(used_u32s*32, rv); | ||
344 | |||
345 | /* the 1st nbits of bmap2 are identical to | ||
346 | * bmap1, the remaining bits of bmap2 are cleared | ||
347 | */ | ||
348 | bitmap_zero(bmap1, 1024); | ||
349 | __set_bit(i, bmap1); | ||
350 | expect_eq_bitmap(bmap1, 1024, bmap2, 1024); | ||
351 | |||
352 | |||
353 | /* | ||
354 | * test short conversion bitmap -> u32[] (1 | ||
355 | * word too short) | ||
356 | */ | ||
357 | if (used_u32s > 1) { | ||
358 | bitmap_zero(bmap1, 1024); | ||
359 | __set_bit(i, bmap1); | ||
360 | bitmap_set(bmap1, nbits, | ||
361 | 1024 - nbits); /* garbage */ | ||
362 | memset(arr, 0xff, sizeof(arr)); | ||
363 | |||
364 | rv = bitmap_to_u32array(arr, used_u32s - 1, | ||
365 | bmap1, nbits); | ||
366 | expect_eq_uint((used_u32s - 1)*32, rv); | ||
367 | |||
368 | /* 1st used u32 words contain expected | ||
369 | * bit set, the remaining words are | ||
370 | * left unchanged (0xff) | ||
371 | */ | ||
372 | memset(exp_arr, 0xff, sizeof(exp_arr)); | ||
373 | memset(exp_arr, 0, | ||
374 | (used_u32s-1)*sizeof(*exp_arr)); | ||
375 | if ((i/32) < (used_u32s - 1)) | ||
376 | exp_arr[i/32] = (1U<<(i%32)); | ||
377 | expect_eq_u32_array(exp_arr, 32, arr, 32); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * test short conversion u32[] -> bitmap (3 | ||
382 | * bits too short) | ||
383 | */ | ||
384 | if (nbits > 3) { | ||
385 | memset(arr, 0xff, sizeof(arr)); /* garbage */ | ||
386 | memset(arr, 0, used_u32s*sizeof(*arr)); | ||
387 | arr[i/32] = (1U<<(i%32)); | ||
388 | |||
389 | bitmap_zero(bmap1, 1024); | ||
390 | rv = bitmap_from_u32array(bmap1, nbits - 3, | ||
391 | arr, used_u32s); | ||
392 | expect_eq_uint(nbits - 3, rv); | ||
393 | |||
394 | /* we are expecting the bit < nbits - | ||
395 | * 3 (none otherwise), and the rest of | ||
396 | * bmap1 unchanged (0-filled) | ||
397 | */ | ||
398 | bitmap_zero(bmap2, 1024); | ||
399 | if (i < nbits - 3) | ||
400 | __set_bit(i, bmap2); | ||
401 | expect_eq_bitmap(bmap2, 1024, bmap1, 1024); | ||
402 | |||
403 | /* do the same with bmap1 initially | ||
404 | * 1-filled | ||
405 | */ | ||
406 | |||
407 | bitmap_fill(bmap1, 1024); | ||
408 | rv = bitmap_from_u32array(bmap1, nbits - 3, | ||
409 | arr, used_u32s); | ||
410 | expect_eq_uint(nbits - 3, rv); | ||
411 | |||
412 | /* we are expecting the bit < nbits - | ||
413 | * 3 (none otherwise), and the rest of | ||
414 | * bmap1 unchanged (1-filled) | ||
415 | */ | ||
416 | bitmap_zero(bmap2, 1024); | ||
417 | if (i < nbits - 3) | ||
418 | __set_bit(i, bmap2); | ||
419 | bitmap_set(bmap2, nbits-3, 1024 - nbits + 3); | ||
420 | expect_eq_bitmap(bmap2, 1024, bmap1, 1024); | ||
421 | } | ||
422 | } | ||
423 | } | 318 | } |
424 | } | 319 | } |
425 | 320 | ||
@@ -453,8 +348,10 @@ static void noinline __init test_mem_optimisations(void) | |||
453 | 348 | ||
454 | static int __init test_bitmap_init(void) | 349 | static int __init test_bitmap_init(void) |
455 | { | 350 | { |
456 | test_zero_fill_copy(); | 351 | test_zero_clear(); |
457 | test_bitmap_u32_array_conversions(); | 352 | test_fill_set(); |
353 | test_copy(); | ||
354 | test_bitmap_arr32(); | ||
458 | test_bitmap_parselist(); | 355 | test_bitmap_parselist(); |
459 | test_mem_optimisations(); | 356 | test_mem_optimisations(); |
460 | 357 | ||
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index ef1a3ac1397e..98854a64b014 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
@@ -94,6 +94,37 @@ static noinline void __init kmalloc_pagealloc_oob_right(void) | |||
94 | ptr[size] = 0; | 94 | ptr[size] = 0; |
95 | kfree(ptr); | 95 | kfree(ptr); |
96 | } | 96 | } |
97 | |||
98 | static noinline void __init kmalloc_pagealloc_uaf(void) | ||
99 | { | ||
100 | char *ptr; | ||
101 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | ||
102 | |||
103 | pr_info("kmalloc pagealloc allocation: use-after-free\n"); | ||
104 | ptr = kmalloc(size, GFP_KERNEL); | ||
105 | if (!ptr) { | ||
106 | pr_err("Allocation failed\n"); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | kfree(ptr); | ||
111 | ptr[0] = 0; | ||
112 | } | ||
113 | |||
114 | static noinline void __init kmalloc_pagealloc_invalid_free(void) | ||
115 | { | ||
116 | char *ptr; | ||
117 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | ||
118 | |||
119 | pr_info("kmalloc pagealloc allocation: invalid-free\n"); | ||
120 | ptr = kmalloc(size, GFP_KERNEL); | ||
121 | if (!ptr) { | ||
122 | pr_err("Allocation failed\n"); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | kfree(ptr + 1); | ||
127 | } | ||
97 | #endif | 128 | #endif |
98 | 129 | ||
99 | static noinline void __init kmalloc_large_oob_right(void) | 130 | static noinline void __init kmalloc_large_oob_right(void) |
@@ -388,7 +419,7 @@ static noinline void __init kasan_stack_oob(void) | |||
388 | static noinline void __init ksize_unpoisons_memory(void) | 419 | static noinline void __init ksize_unpoisons_memory(void) |
389 | { | 420 | { |
390 | char *ptr; | 421 | char *ptr; |
391 | size_t size = 123, real_size = size; | 422 | size_t size = 123, real_size; |
392 | 423 | ||
393 | pr_info("ksize() unpoisons the whole allocated chunk\n"); | 424 | pr_info("ksize() unpoisons the whole allocated chunk\n"); |
394 | ptr = kmalloc(size, GFP_KERNEL); | 425 | ptr = kmalloc(size, GFP_KERNEL); |
@@ -472,6 +503,74 @@ static noinline void __init use_after_scope_test(void) | |||
472 | p[1023] = 1; | 503 | p[1023] = 1; |
473 | } | 504 | } |
474 | 505 | ||
506 | static noinline void __init kasan_alloca_oob_left(void) | ||
507 | { | ||
508 | volatile int i = 10; | ||
509 | char alloca_array[i]; | ||
510 | char *p = alloca_array - 1; | ||
511 | |||
512 | pr_info("out-of-bounds to left on alloca\n"); | ||
513 | *(volatile char *)p; | ||
514 | } | ||
515 | |||
516 | static noinline void __init kasan_alloca_oob_right(void) | ||
517 | { | ||
518 | volatile int i = 10; | ||
519 | char alloca_array[i]; | ||
520 | char *p = alloca_array + i; | ||
521 | |||
522 | pr_info("out-of-bounds to right on alloca\n"); | ||
523 | *(volatile char *)p; | ||
524 | } | ||
525 | |||
526 | static noinline void __init kmem_cache_double_free(void) | ||
527 | { | ||
528 | char *p; | ||
529 | size_t size = 200; | ||
530 | struct kmem_cache *cache; | ||
531 | |||
532 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | ||
533 | if (!cache) { | ||
534 | pr_err("Cache allocation failed\n"); | ||
535 | return; | ||
536 | } | ||
537 | pr_info("double-free on heap object\n"); | ||
538 | p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
539 | if (!p) { | ||
540 | pr_err("Allocation failed\n"); | ||
541 | kmem_cache_destroy(cache); | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | kmem_cache_free(cache, p); | ||
546 | kmem_cache_free(cache, p); | ||
547 | kmem_cache_destroy(cache); | ||
548 | } | ||
549 | |||
550 | static noinline void __init kmem_cache_invalid_free(void) | ||
551 | { | ||
552 | char *p; | ||
553 | size_t size = 200; | ||
554 | struct kmem_cache *cache; | ||
555 | |||
556 | cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, | ||
557 | NULL); | ||
558 | if (!cache) { | ||
559 | pr_err("Cache allocation failed\n"); | ||
560 | return; | ||
561 | } | ||
562 | pr_info("invalid-free of heap object\n"); | ||
563 | p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
564 | if (!p) { | ||
565 | pr_err("Allocation failed\n"); | ||
566 | kmem_cache_destroy(cache); | ||
567 | return; | ||
568 | } | ||
569 | |||
570 | kmem_cache_free(cache, p + 1); | ||
571 | kmem_cache_destroy(cache); | ||
572 | } | ||
573 | |||
475 | static int __init kmalloc_tests_init(void) | 574 | static int __init kmalloc_tests_init(void) |
476 | { | 575 | { |
477 | /* | 576 | /* |
@@ -485,6 +584,8 @@ static int __init kmalloc_tests_init(void) | |||
485 | kmalloc_node_oob_right(); | 584 | kmalloc_node_oob_right(); |
486 | #ifdef CONFIG_SLUB | 585 | #ifdef CONFIG_SLUB |
487 | kmalloc_pagealloc_oob_right(); | 586 | kmalloc_pagealloc_oob_right(); |
587 | kmalloc_pagealloc_uaf(); | ||
588 | kmalloc_pagealloc_invalid_free(); | ||
488 | #endif | 589 | #endif |
489 | kmalloc_large_oob_right(); | 590 | kmalloc_large_oob_right(); |
490 | kmalloc_oob_krealloc_more(); | 591 | kmalloc_oob_krealloc_more(); |
@@ -502,9 +603,13 @@ static int __init kmalloc_tests_init(void) | |||
502 | memcg_accounted_kmem_cache(); | 603 | memcg_accounted_kmem_cache(); |
503 | kasan_stack_oob(); | 604 | kasan_stack_oob(); |
504 | kasan_global_oob(); | 605 | kasan_global_oob(); |
606 | kasan_alloca_oob_left(); | ||
607 | kasan_alloca_oob_right(); | ||
505 | ksize_unpoisons_memory(); | 608 | ksize_unpoisons_memory(); |
506 | copy_user_test(); | 609 | copy_user_test(); |
507 | use_after_scope_test(); | 610 | use_after_scope_test(); |
611 | kmem_cache_double_free(); | ||
612 | kmem_cache_invalid_free(); | ||
508 | 613 | ||
509 | kasan_restore_multi_shot(multishot); | 614 | kasan_restore_multi_shot(multishot); |
510 | 615 | ||
diff --git a/lib/test_sort.c b/lib/test_sort.c index d389c1cc2f6c..385c0ed5202f 100644 --- a/lib/test_sort.c +++ b/lib/test_sort.c | |||
@@ -39,5 +39,11 @@ exit: | |||
39 | return err; | 39 | return err; |
40 | } | 40 | } |
41 | 41 | ||
42 | static void __exit test_sort_exit(void) | ||
43 | { | ||
44 | } | ||
45 | |||
42 | module_init(test_sort_init); | 46 | module_init(test_sort_init); |
47 | module_exit(test_sort_exit); | ||
48 | |||
43 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
diff --git a/lib/ubsan.c b/lib/ubsan.c index fb0409df1bcf..59fee96c29a0 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c | |||
@@ -141,11 +141,6 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, | |||
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
144 | static bool location_is_valid(struct source_location *loc) | ||
145 | { | ||
146 | return loc->file_name != NULL; | ||
147 | } | ||
148 | |||
149 | static DEFINE_SPINLOCK(report_lock); | 144 | static DEFINE_SPINLOCK(report_lock); |
150 | 145 | ||
151 | static void ubsan_prologue(struct source_location *location, | 146 | static void ubsan_prologue(struct source_location *location, |
@@ -265,14 +260,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, | |||
265 | } | 260 | } |
266 | EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); | 261 | EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); |
267 | 262 | ||
268 | static void handle_null_ptr_deref(struct type_mismatch_data *data) | 263 | static void handle_null_ptr_deref(struct type_mismatch_data_common *data) |
269 | { | 264 | { |
270 | unsigned long flags; | 265 | unsigned long flags; |
271 | 266 | ||
272 | if (suppress_report(&data->location)) | 267 | if (suppress_report(data->location)) |
273 | return; | 268 | return; |
274 | 269 | ||
275 | ubsan_prologue(&data->location, &flags); | 270 | ubsan_prologue(data->location, &flags); |
276 | 271 | ||
277 | pr_err("%s null pointer of type %s\n", | 272 | pr_err("%s null pointer of type %s\n", |
278 | type_check_kinds[data->type_check_kind], | 273 | type_check_kinds[data->type_check_kind], |
@@ -281,15 +276,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data) | |||
281 | ubsan_epilogue(&flags); | 276 | ubsan_epilogue(&flags); |
282 | } | 277 | } |
283 | 278 | ||
284 | static void handle_missaligned_access(struct type_mismatch_data *data, | 279 | static void handle_misaligned_access(struct type_mismatch_data_common *data, |
285 | unsigned long ptr) | 280 | unsigned long ptr) |
286 | { | 281 | { |
287 | unsigned long flags; | 282 | unsigned long flags; |
288 | 283 | ||
289 | if (suppress_report(&data->location)) | 284 | if (suppress_report(data->location)) |
290 | return; | 285 | return; |
291 | 286 | ||
292 | ubsan_prologue(&data->location, &flags); | 287 | ubsan_prologue(data->location, &flags); |
293 | 288 | ||
294 | pr_err("%s misaligned address %p for type %s\n", | 289 | pr_err("%s misaligned address %p for type %s\n", |
295 | type_check_kinds[data->type_check_kind], | 290 | type_check_kinds[data->type_check_kind], |
@@ -299,15 +294,15 @@ static void handle_missaligned_access(struct type_mismatch_data *data, | |||
299 | ubsan_epilogue(&flags); | 294 | ubsan_epilogue(&flags); |
300 | } | 295 | } |
301 | 296 | ||
302 | static void handle_object_size_mismatch(struct type_mismatch_data *data, | 297 | static void handle_object_size_mismatch(struct type_mismatch_data_common *data, |
303 | unsigned long ptr) | 298 | unsigned long ptr) |
304 | { | 299 | { |
305 | unsigned long flags; | 300 | unsigned long flags; |
306 | 301 | ||
307 | if (suppress_report(&data->location)) | 302 | if (suppress_report(data->location)) |
308 | return; | 303 | return; |
309 | 304 | ||
310 | ubsan_prologue(&data->location, &flags); | 305 | ubsan_prologue(data->location, &flags); |
311 | pr_err("%s address %p with insufficient space\n", | 306 | pr_err("%s address %p with insufficient space\n", |
312 | type_check_kinds[data->type_check_kind], | 307 | type_check_kinds[data->type_check_kind], |
313 | (void *) ptr); | 308 | (void *) ptr); |
@@ -315,37 +310,46 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, | |||
315 | ubsan_epilogue(&flags); | 310 | ubsan_epilogue(&flags); |
316 | } | 311 | } |
317 | 312 | ||
318 | void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, | 313 | static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, |
319 | unsigned long ptr) | 314 | unsigned long ptr) |
320 | { | 315 | { |
321 | 316 | ||
322 | if (!ptr) | 317 | if (!ptr) |
323 | handle_null_ptr_deref(data); | 318 | handle_null_ptr_deref(data); |
324 | else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) | 319 | else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) |
325 | handle_missaligned_access(data, ptr); | 320 | handle_misaligned_access(data, ptr); |
326 | else | 321 | else |
327 | handle_object_size_mismatch(data, ptr); | 322 | handle_object_size_mismatch(data, ptr); |
328 | } | 323 | } |
329 | EXPORT_SYMBOL(__ubsan_handle_type_mismatch); | ||
330 | 324 | ||
331 | void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) | 325 | void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, |
326 | unsigned long ptr) | ||
332 | { | 327 | { |
333 | unsigned long flags; | 328 | struct type_mismatch_data_common common_data = { |
334 | 329 | .location = &data->location, | |
335 | if (suppress_report(&data->location)) | 330 | .type = data->type, |
336 | return; | 331 | .alignment = data->alignment, |
337 | 332 | .type_check_kind = data->type_check_kind | |
338 | ubsan_prologue(&data->location, &flags); | 333 | }; |
334 | |||
335 | ubsan_type_mismatch_common(&common_data, ptr); | ||
336 | } | ||
337 | EXPORT_SYMBOL(__ubsan_handle_type_mismatch); | ||
339 | 338 | ||
340 | pr_err("null pointer returned from function declared to never return null\n"); | 339 | void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, |
340 | unsigned long ptr) | ||
341 | { | ||
341 | 342 | ||
342 | if (location_is_valid(&data->attr_location)) | 343 | struct type_mismatch_data_common common_data = { |
343 | print_source_location("returns_nonnull attribute specified in", | 344 | .location = &data->location, |
344 | &data->attr_location); | 345 | .type = data->type, |
346 | .alignment = 1UL << data->log_alignment, | ||
347 | .type_check_kind = data->type_check_kind | ||
348 | }; | ||
345 | 349 | ||
346 | ubsan_epilogue(&flags); | 350 | ubsan_type_mismatch_common(&common_data, ptr); |
347 | } | 351 | } |
348 | EXPORT_SYMBOL(__ubsan_handle_nonnull_return); | 352 | EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); |
349 | 353 | ||
350 | void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, | 354 | void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, |
351 | unsigned long bound) | 355 | unsigned long bound) |
diff --git a/lib/ubsan.h b/lib/ubsan.h index 88f23557edbe..f4d8d0bd4016 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h | |||
@@ -37,15 +37,24 @@ struct type_mismatch_data { | |||
37 | unsigned char type_check_kind; | 37 | unsigned char type_check_kind; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct nonnull_arg_data { | 40 | struct type_mismatch_data_v1 { |
41 | struct source_location location; | 41 | struct source_location location; |
42 | struct source_location attr_location; | 42 | struct type_descriptor *type; |
43 | int arg_index; | 43 | unsigned char log_alignment; |
44 | unsigned char type_check_kind; | ||
45 | }; | ||
46 | |||
47 | struct type_mismatch_data_common { | ||
48 | struct source_location *location; | ||
49 | struct type_descriptor *type; | ||
50 | unsigned long alignment; | ||
51 | unsigned char type_check_kind; | ||
44 | }; | 52 | }; |
45 | 53 | ||
46 | struct nonnull_return_data { | 54 | struct nonnull_arg_data { |
47 | struct source_location location; | 55 | struct source_location location; |
48 | struct source_location attr_location; | 56 | struct source_location attr_location; |
57 | int arg_index; | ||
49 | }; | 58 | }; |
50 | 59 | ||
51 | struct vla_bound_data { | 60 | struct vla_bound_data { |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 6aef64254203..9e197987b67d 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -410,7 +410,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
410 | 410 | ||
411 | /** | 411 | /** |
412 | * free_bootmem - mark a page range as usable | 412 | * free_bootmem - mark a page range as usable |
413 | * @addr: starting physical address of the range | 413 | * @physaddr: starting physical address of the range |
414 | * @size: size of the range in bytes | 414 | * @size: size of the range in bytes |
415 | * | 415 | * |
416 | * Partial pages will be considered reserved and left as they are. | 416 | * Partial pages will be considered reserved and left as they are. |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 405bba487df5..e13d911251e7 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
6 | * | 6 | * |
7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
8 | * Andrey Konovalov <adech.fo@gmail.com> | 8 | * Andrey Konovalov <andreyknvl@gmail.com> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -489,21 +489,17 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) | |||
489 | kasan_kmalloc(cache, object, cache->object_size, flags); | 489 | kasan_kmalloc(cache, object, cache->object_size, flags); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) | 492 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
493 | { | 493 | unsigned long ip, bool quarantine) |
494 | unsigned long size = cache->object_size; | ||
495 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | ||
496 | |||
497 | /* RCU slabs could be legally used after free within the RCU period */ | ||
498 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) | ||
499 | return; | ||
500 | |||
501 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | ||
502 | } | ||
503 | |||
504 | bool kasan_slab_free(struct kmem_cache *cache, void *object) | ||
505 | { | 494 | { |
506 | s8 shadow_byte; | 495 | s8 shadow_byte; |
496 | unsigned long rounded_up_size; | ||
497 | |||
498 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != | ||
499 | object)) { | ||
500 | kasan_report_invalid_free(object, ip); | ||
501 | return true; | ||
502 | } | ||
507 | 503 | ||
508 | /* RCU slabs could be legally used after free within the RCU period */ | 504 | /* RCU slabs could be legally used after free within the RCU period */ |
509 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) | 505 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
@@ -511,14 +507,14 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) | |||
511 | 507 | ||
512 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); | 508 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); |
513 | if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { | 509 | if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { |
514 | kasan_report_double_free(cache, object, | 510 | kasan_report_invalid_free(object, ip); |
515 | __builtin_return_address(1)); | ||
516 | return true; | 511 | return true; |
517 | } | 512 | } |
518 | 513 | ||
519 | kasan_poison_slab_free(cache, object); | 514 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); |
515 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | ||
520 | 516 | ||
521 | if (unlikely(!(cache->flags & SLAB_KASAN))) | 517 | if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) |
522 | return false; | 518 | return false; |
523 | 519 | ||
524 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); | 520 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); |
@@ -526,6 +522,11 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object) | |||
526 | return true; | 522 | return true; |
527 | } | 523 | } |
528 | 524 | ||
525 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | ||
526 | { | ||
527 | return __kasan_slab_free(cache, object, ip, true); | ||
528 | } | ||
529 | |||
529 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, | 530 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
530 | gfp_t flags) | 531 | gfp_t flags) |
531 | { | 532 | { |
@@ -589,25 +590,29 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags) | |||
589 | kasan_kmalloc(page->slab_cache, object, size, flags); | 590 | kasan_kmalloc(page->slab_cache, object, size, flags); |
590 | } | 591 | } |
591 | 592 | ||
592 | void kasan_poison_kfree(void *ptr) | 593 | void kasan_poison_kfree(void *ptr, unsigned long ip) |
593 | { | 594 | { |
594 | struct page *page; | 595 | struct page *page; |
595 | 596 | ||
596 | page = virt_to_head_page(ptr); | 597 | page = virt_to_head_page(ptr); |
597 | 598 | ||
598 | if (unlikely(!PageSlab(page))) | 599 | if (unlikely(!PageSlab(page))) { |
600 | if (ptr != page_address(page)) { | ||
601 | kasan_report_invalid_free(ptr, ip); | ||
602 | return; | ||
603 | } | ||
599 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | 604 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
600 | KASAN_FREE_PAGE); | 605 | KASAN_FREE_PAGE); |
601 | else | 606 | } else { |
602 | kasan_poison_slab_free(page->slab_cache, ptr); | 607 | __kasan_slab_free(page->slab_cache, ptr, ip, false); |
608 | } | ||
603 | } | 609 | } |
604 | 610 | ||
605 | void kasan_kfree_large(const void *ptr) | 611 | void kasan_kfree_large(void *ptr, unsigned long ip) |
606 | { | 612 | { |
607 | struct page *page = virt_to_page(ptr); | 613 | if (ptr != page_address(virt_to_head_page(ptr))) |
608 | 614 | kasan_report_invalid_free(ptr, ip); | |
609 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | 615 | /* The object will be poisoned by page_alloc. */ |
610 | KASAN_FREE_PAGE); | ||
611 | } | 616 | } |
612 | 617 | ||
613 | int kasan_module_alloc(void *addr, size_t size) | 618 | int kasan_module_alloc(void *addr, size_t size) |
@@ -736,6 +741,55 @@ void __asan_unpoison_stack_memory(const void *addr, size_t size) | |||
736 | } | 741 | } |
737 | EXPORT_SYMBOL(__asan_unpoison_stack_memory); | 742 | EXPORT_SYMBOL(__asan_unpoison_stack_memory); |
738 | 743 | ||
744 | /* Emitted by compiler to poison alloca()ed objects. */ | ||
745 | void __asan_alloca_poison(unsigned long addr, size_t size) | ||
746 | { | ||
747 | size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | ||
748 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - | ||
749 | rounded_up_size; | ||
750 | size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); | ||
751 | |||
752 | const void *left_redzone = (const void *)(addr - | ||
753 | KASAN_ALLOCA_REDZONE_SIZE); | ||
754 | const void *right_redzone = (const void *)(addr + rounded_up_size); | ||
755 | |||
756 | WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); | ||
757 | |||
758 | kasan_unpoison_shadow((const void *)(addr + rounded_down_size), | ||
759 | size - rounded_down_size); | ||
760 | kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, | ||
761 | KASAN_ALLOCA_LEFT); | ||
762 | kasan_poison_shadow(right_redzone, | ||
763 | padding_size + KASAN_ALLOCA_REDZONE_SIZE, | ||
764 | KASAN_ALLOCA_RIGHT); | ||
765 | } | ||
766 | EXPORT_SYMBOL(__asan_alloca_poison); | ||
767 | |||
768 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ | ||
769 | void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) | ||
770 | { | ||
771 | if (unlikely(!stack_top || stack_top > stack_bottom)) | ||
772 | return; | ||
773 | |||
774 | kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); | ||
775 | } | ||
776 | EXPORT_SYMBOL(__asan_allocas_unpoison); | ||
777 | |||
778 | /* Emitted by the compiler to [un]poison local variables. */ | ||
779 | #define DEFINE_ASAN_SET_SHADOW(byte) \ | ||
780 | void __asan_set_shadow_##byte(const void *addr, size_t size) \ | ||
781 | { \ | ||
782 | __memset((void *)addr, 0x##byte, size); \ | ||
783 | } \ | ||
784 | EXPORT_SYMBOL(__asan_set_shadow_##byte) | ||
785 | |||
786 | DEFINE_ASAN_SET_SHADOW(00); | ||
787 | DEFINE_ASAN_SET_SHADOW(f1); | ||
788 | DEFINE_ASAN_SET_SHADOW(f2); | ||
789 | DEFINE_ASAN_SET_SHADOW(f3); | ||
790 | DEFINE_ASAN_SET_SHADOW(f5); | ||
791 | DEFINE_ASAN_SET_SHADOW(f8); | ||
792 | |||
739 | #ifdef CONFIG_MEMORY_HOTPLUG | 793 | #ifdef CONFIG_MEMORY_HOTPLUG |
740 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | 794 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
741 | unsigned long action, void *data) | 795 | unsigned long action, void *data) |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index c70851a9a6a4..c12dcfde2ebd 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h | |||
@@ -24,6 +24,14 @@ | |||
24 | #define KASAN_STACK_PARTIAL 0xF4 | 24 | #define KASAN_STACK_PARTIAL 0xF4 |
25 | #define KASAN_USE_AFTER_SCOPE 0xF8 | 25 | #define KASAN_USE_AFTER_SCOPE 0xF8 |
26 | 26 | ||
27 | /* | ||
28 | * alloca redzone shadow values | ||
29 | */ | ||
30 | #define KASAN_ALLOCA_LEFT 0xCA | ||
31 | #define KASAN_ALLOCA_RIGHT 0xCB | ||
32 | |||
33 | #define KASAN_ALLOCA_REDZONE_SIZE 32 | ||
34 | |||
27 | /* Don't break randconfig/all*config builds */ | 35 | /* Don't break randconfig/all*config builds */ |
28 | #ifndef KASAN_ABI_VERSION | 36 | #ifndef KASAN_ABI_VERSION |
29 | #define KASAN_ABI_VERSION 1 | 37 | #define KASAN_ABI_VERSION 1 |
@@ -99,8 +107,7 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) | |||
99 | 107 | ||
100 | void kasan_report(unsigned long addr, size_t size, | 108 | void kasan_report(unsigned long addr, size_t size, |
101 | bool is_write, unsigned long ip); | 109 | bool is_write, unsigned long ip); |
102 | void kasan_report_double_free(struct kmem_cache *cache, void *object, | 110 | void kasan_report_invalid_free(void *object, unsigned long ip); |
103 | void *ip); | ||
104 | 111 | ||
105 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) | 112 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB) |
106 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); | 113 | void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); |
@@ -113,4 +120,48 @@ static inline void quarantine_reduce(void) { } | |||
113 | static inline void quarantine_remove_cache(struct kmem_cache *cache) { } | 120 | static inline void quarantine_remove_cache(struct kmem_cache *cache) { } |
114 | #endif | 121 | #endif |
115 | 122 | ||
123 | /* | ||
124 | * Exported functions for interfaces called from assembly or from generated | ||
125 | * code. Declarations here to avoid warning about missing declarations. | ||
126 | */ | ||
127 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); | ||
128 | void __asan_register_globals(struct kasan_global *globals, size_t size); | ||
129 | void __asan_unregister_globals(struct kasan_global *globals, size_t size); | ||
130 | void __asan_loadN(unsigned long addr, size_t size); | ||
131 | void __asan_storeN(unsigned long addr, size_t size); | ||
132 | void __asan_handle_no_return(void); | ||
133 | void __asan_poison_stack_memory(const void *addr, size_t size); | ||
134 | void __asan_unpoison_stack_memory(const void *addr, size_t size); | ||
135 | void __asan_alloca_poison(unsigned long addr, size_t size); | ||
136 | void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom); | ||
137 | |||
138 | void __asan_load1(unsigned long addr); | ||
139 | void __asan_store1(unsigned long addr); | ||
140 | void __asan_load2(unsigned long addr); | ||
141 | void __asan_store2(unsigned long addr); | ||
142 | void __asan_load4(unsigned long addr); | ||
143 | void __asan_store4(unsigned long addr); | ||
144 | void __asan_load8(unsigned long addr); | ||
145 | void __asan_store8(unsigned long addr); | ||
146 | void __asan_load16(unsigned long addr); | ||
147 | void __asan_store16(unsigned long addr); | ||
148 | |||
149 | void __asan_load1_noabort(unsigned long addr); | ||
150 | void __asan_store1_noabort(unsigned long addr); | ||
151 | void __asan_load2_noabort(unsigned long addr); | ||
152 | void __asan_store2_noabort(unsigned long addr); | ||
153 | void __asan_load4_noabort(unsigned long addr); | ||
154 | void __asan_store4_noabort(unsigned long addr); | ||
155 | void __asan_load8_noabort(unsigned long addr); | ||
156 | void __asan_store8_noabort(unsigned long addr); | ||
157 | void __asan_load16_noabort(unsigned long addr); | ||
158 | void __asan_store16_noabort(unsigned long addr); | ||
159 | |||
160 | void __asan_set_shadow_00(const void *addr, size_t size); | ||
161 | void __asan_set_shadow_f1(const void *addr, size_t size); | ||
162 | void __asan_set_shadow_f2(const void *addr, size_t size); | ||
163 | void __asan_set_shadow_f3(const void *addr, size_t size); | ||
164 | void __asan_set_shadow_f5(const void *addr, size_t size); | ||
165 | void __asan_set_shadow_f8(const void *addr, size_t size); | ||
166 | |||
116 | #endif | 167 | #endif |
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 410c8235e671..5c169aa688fd 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
6 | * | 6 | * |
7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
8 | * Andrey Konovalov <adech.fo@gmail.com> | 8 | * Andrey Konovalov <andreyknvl@gmail.com> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -102,6 +102,10 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info) | |||
102 | case KASAN_USE_AFTER_SCOPE: | 102 | case KASAN_USE_AFTER_SCOPE: |
103 | bug_type = "use-after-scope"; | 103 | bug_type = "use-after-scope"; |
104 | break; | 104 | break; |
105 | case KASAN_ALLOCA_LEFT: | ||
106 | case KASAN_ALLOCA_RIGHT: | ||
107 | bug_type = "alloca-out-of-bounds"; | ||
108 | break; | ||
105 | } | 109 | } |
106 | 110 | ||
107 | return bug_type; | 111 | return bug_type; |
@@ -322,13 +326,12 @@ static void print_shadow_for_address(const void *addr) | |||
322 | } | 326 | } |
323 | } | 327 | } |
324 | 328 | ||
325 | void kasan_report_double_free(struct kmem_cache *cache, void *object, | 329 | void kasan_report_invalid_free(void *object, unsigned long ip) |
326 | void *ip) | ||
327 | { | 330 | { |
328 | unsigned long flags; | 331 | unsigned long flags; |
329 | 332 | ||
330 | kasan_start_report(&flags); | 333 | kasan_start_report(&flags); |
331 | pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", ip); | 334 | pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); |
332 | pr_err("\n"); | 335 | pr_err("\n"); |
333 | print_address_description(object); | 336 | print_address_description(object); |
334 | pr_err("\n"); | 337 | pr_err("\n"); |
@@ -2302,7 +2302,7 @@ next_mm: | |||
2302 | 2302 | ||
2303 | /** | 2303 | /** |
2304 | * ksm_do_scan - the ksm scanner main worker function. | 2304 | * ksm_do_scan - the ksm scanner main worker function. |
2305 | * @scan_npages - number of pages we want to scan before we return. | 2305 | * @scan_npages: number of pages we want to scan before we return. |
2306 | */ | 2306 | */ |
2307 | static void ksm_do_scan(unsigned int scan_npages) | 2307 | static void ksm_do_scan(unsigned int scan_npages) |
2308 | { | 2308 | { |
diff --git a/mm/maccess.c b/mm/maccess.c index 78f9274dd49d..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c | |||
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_write); | |||
70 | * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. | 70 | * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. |
71 | * @dst: Destination address, in kernel space. This buffer must be at | 71 | * @dst: Destination address, in kernel space. This buffer must be at |
72 | * least @count bytes long. | 72 | * least @count bytes long. |
73 | * @src: Unsafe address. | 73 | * @unsafe_addr: Unsafe address. |
74 | * @count: Maximum number of bytes to copy, including the trailing NUL. | 74 | * @count: Maximum number of bytes to copy, including the trailing NUL. |
75 | * | 75 | * |
76 | * Copies a NUL-terminated string from unsafe address to kernel buffer. | 76 | * Copies a NUL-terminated string from unsafe address to kernel buffer. |
diff --git a/mm/memblock.c b/mm/memblock.c index 46aacdfa4f4d..5a9ca2a1751b 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -1654,7 +1654,7 @@ bool __init_memblock memblock_is_memory(phys_addr_t addr) | |||
1654 | return memblock_search(&memblock.memory, addr) != -1; | 1654 | return memblock_search(&memblock.memory, addr) != -1; |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | int __init_memblock memblock_is_map_memory(phys_addr_t addr) | 1657 | bool __init_memblock memblock_is_map_memory(phys_addr_t addr) |
1658 | { | 1658 | { |
1659 | int i = memblock_search(&memblock.memory, addr); | 1659 | int i = memblock_search(&memblock.memory, addr); |
1660 | 1660 | ||
@@ -1690,13 +1690,13 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn, | |||
1690 | * RETURNS: | 1690 | * RETURNS: |
1691 | * 0 if false, non-zero if true | 1691 | * 0 if false, non-zero if true |
1692 | */ | 1692 | */ |
1693 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | 1693 | bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
1694 | { | 1694 | { |
1695 | int idx = memblock_search(&memblock.memory, base); | 1695 | int idx = memblock_search(&memblock.memory, base); |
1696 | phys_addr_t end = base + memblock_cap_size(base, &size); | 1696 | phys_addr_t end = base + memblock_cap_size(base, &size); |
1697 | 1697 | ||
1698 | if (idx == -1) | 1698 | if (idx == -1) |
1699 | return 0; | 1699 | return false; |
1700 | return (memblock.memory.regions[idx].base + | 1700 | return (memblock.memory.regions[idx].base + |
1701 | memblock.memory.regions[idx].size) >= end; | 1701 | memblock.memory.regions[idx].size) >= end; |
1702 | } | 1702 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0937f2c52c7d..13b35ffa021e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -917,7 +917,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, | |||
917 | /** | 917 | /** |
918 | * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page | 918 | * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page |
919 | * @page: the page | 919 | * @page: the page |
920 | * @zone: zone of the page | 920 | * @pgdat: pgdat of the page |
921 | * | 921 | * |
922 | * This function is only safe when following the LRU page isolation | 922 | * This function is only safe when following the LRU page isolation |
923 | * and putback protocol: the LRU lock must be held, and the page must | 923 | * and putback protocol: the LRU lock must be held, and the page must |
@@ -5818,8 +5818,8 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
5818 | 5818 | ||
5819 | /** | 5819 | /** |
5820 | * mem_cgroup_uncharge_skmem - uncharge socket memory | 5820 | * mem_cgroup_uncharge_skmem - uncharge socket memory |
5821 | * @memcg - memcg to uncharge | 5821 | * @memcg: memcg to uncharge |
5822 | * @nr_pages - number of pages to uncharge | 5822 | * @nr_pages: number of pages to uncharge |
5823 | */ | 5823 | */ |
5824 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) | 5824 | void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) |
5825 | { | 5825 | { |
diff --git a/mm/memory.c b/mm/memory.c index 2248529e71c1..dd8de96f5547 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #include <linux/writeback.h> | 59 | #include <linux/writeback.h> |
60 | #include <linux/memcontrol.h> | 60 | #include <linux/memcontrol.h> |
61 | #include <linux/mmu_notifier.h> | 61 | #include <linux/mmu_notifier.h> |
62 | #include <linux/kallsyms.h> | ||
63 | #include <linux/swapops.h> | 62 | #include <linux/swapops.h> |
64 | #include <linux/elf.h> | 63 | #include <linux/elf.h> |
65 | #include <linux/gfp.h> | 64 | #include <linux/gfp.h> |
@@ -767,9 +766,6 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
767 | dump_page(page, "bad pte"); | 766 | dump_page(page, "bad pte"); |
768 | pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", | 767 | pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", |
769 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); | 768 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); |
770 | /* | ||
771 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y | ||
772 | */ | ||
773 | pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", | 769 | pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", |
774 | vma->vm_file, | 770 | vma->vm_file, |
775 | vma->vm_ops ? vma->vm_ops->fault : NULL, | 771 | vma->vm_ops ? vma->vm_ops->fault : NULL, |
diff --git a/mm/mempool.c b/mm/mempool.c index 7d8c5a0010a2..5c9dce34719b 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -103,10 +103,10 @@ static inline void poison_element(mempool_t *pool, void *element) | |||
103 | } | 103 | } |
104 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | 104 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ |
105 | 105 | ||
106 | static void kasan_poison_element(mempool_t *pool, void *element) | 106 | static __always_inline void kasan_poison_element(mempool_t *pool, void *element) |
107 | { | 107 | { |
108 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | 108 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
109 | kasan_poison_kfree(element); | 109 | kasan_poison_kfree(element, _RET_IP_); |
110 | if (pool->alloc == mempool_alloc_pages) | 110 | if (pool->alloc == mempool_alloc_pages) |
111 | kasan_free_pages(element, (unsigned long)pool->pool_data); | 111 | kasan_free_pages(element, (unsigned long)pool->pool_data); |
112 | } | 112 | } |
@@ -119,7 +119,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) | |||
119 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); | 119 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); |
120 | } | 120 | } |
121 | 121 | ||
122 | static void add_element(mempool_t *pool, void *element) | 122 | static __always_inline void add_element(mempool_t *pool, void *element) |
123 | { | 123 | { |
124 | BUG_ON(pool->curr_nr >= pool->min_nr); | 124 | BUG_ON(pool->curr_nr >= pool->min_nr); |
125 | poison_element(pool, element); | 125 | poison_element(pool, element); |
diff --git a/mm/mlock.c b/mm/mlock.c index f7f54fd2e13f..79398200e423 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -157,7 +157,7 @@ static void __munlock_isolation_failed(struct page *page) | |||
157 | 157 | ||
158 | /** | 158 | /** |
159 | * munlock_vma_page - munlock a vma page | 159 | * munlock_vma_page - munlock a vma page |
160 | * @page - page to be unlocked, either a normal page or THP page head | 160 | * @page: page to be unlocked, either a normal page or THP page head |
161 | * | 161 | * |
162 | * returns the size of the page as a page mask (0 for normal page, | 162 | * returns the size of the page as a page mask (0 for normal page, |
163 | * HPAGE_PMD_NR - 1 for THP head page) | 163 | * HPAGE_PMD_NR - 1 for THP head page) |
diff --git a/mm/nommu.c b/mm/nommu.c index 4b9864b17cb0..ebb6e618dade 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1836,7 +1836,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
1836 | } | 1836 | } |
1837 | 1837 | ||
1838 | /** | 1838 | /** |
1839 | * @access_remote_vm - access another process' address space | 1839 | * access_remote_vm - access another process' address space |
1840 | * @mm: the mm_struct of the target address space | 1840 | * @mm: the mm_struct of the target address space |
1841 | * @addr: start address to access | 1841 | * @addr: start address to access |
1842 | * @buf: source or destination buffer | 1842 | * @buf: source or destination buffer |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 23a3e415ac2c..8d2da5dec1e0 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -265,6 +265,7 @@ static int __walk_page_range(unsigned long start, unsigned long end, | |||
265 | * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these | 265 | * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these |
266 | * callbacks, the associated entries/pages are just ignored. | 266 | * callbacks, the associated entries/pages are just ignored. |
267 | * The return values of these callbacks are commonly defined like below: | 267 | * The return values of these callbacks are commonly defined like below: |
268 | * | ||
268 | * - 0 : succeeded to handle the current entry, and if you don't reach the | 269 | * - 0 : succeeded to handle the current entry, and if you don't reach the |
269 | * end address yet, continue to walk. | 270 | * end address yet, continue to walk. |
270 | * - >0 : succeeded to handle the current entry, and return to the caller | 271 | * - >0 : succeeded to handle the current entry, and return to the caller |
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 8973cd231ece..a447092d4635 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -25,7 +25,7 @@ | |||
25 | /** | 25 | /** |
26 | * process_vm_rw_pages - read/write pages from task specified | 26 | * process_vm_rw_pages - read/write pages from task specified |
27 | * @pages: array of pointers to pages we want to copy | 27 | * @pages: array of pointers to pages we want to copy |
28 | * @start_offset: offset in page to start copying from/to | 28 | * @offset: offset in page to start copying from/to |
29 | * @len: number of bytes to copy | 29 | * @len: number of bytes to copy |
30 | * @iter: where to copy to/from locally | 30 | * @iter: where to copy to/from locally |
31 | * @vm_write: 0 means copy from, 1 means copy to | 31 | * @vm_write: 0 means copy from, 1 means copy to |
@@ -147,6 +147,7 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
147 | * @riovcnt: size of rvec array | 147 | * @riovcnt: size of rvec array |
148 | * @flags: currently unused | 148 | * @flags: currently unused |
149 | * @vm_write: 0 if reading from other process, 1 if writing to other process | 149 | * @vm_write: 0 if reading from other process, 1 if writing to other process |
150 | * | ||
150 | * Returns the number of bytes read/written or error code. May | 151 | * Returns the number of bytes read/written or error code. May |
151 | * return less bytes than expected if an error occurs during the copying | 152 | * return less bytes than expected if an error occurs during the copying |
152 | * process. | 153 | * process. |
@@ -197,11 +198,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, | |||
197 | } | 198 | } |
198 | 199 | ||
199 | /* Get process information */ | 200 | /* Get process information */ |
200 | rcu_read_lock(); | 201 | task = find_get_task_by_vpid(pid); |
201 | task = find_task_by_vpid(pid); | ||
202 | if (task) | ||
203 | get_task_struct(task); | ||
204 | rcu_read_unlock(); | ||
205 | if (!task) { | 202 | if (!task) { |
206 | rc = -ESRCH; | 203 | rc = -ESRCH; |
207 | goto free_proc_pages; | 204 | goto free_proc_pages; |
@@ -253,6 +250,7 @@ free_proc_pages: | |||
253 | * @riovcnt: size of rvec array | 250 | * @riovcnt: size of rvec array |
254 | * @flags: currently unused | 251 | * @flags: currently unused |
255 | * @vm_write: 0 if reading from other process, 1 if writing to other process | 252 | * @vm_write: 0 if reading from other process, 1 if writing to other process |
253 | * | ||
256 | * Returns the number of bytes read/written or error code. May | 254 | * Returns the number of bytes read/written or error code. May |
257 | * return less bytes than expected if an error occurs during the copying | 255 | * return less bytes than expected if an error occurs during the copying |
258 | * process. | 256 | * process. |
@@ -3478,11 +3478,11 @@ free_done: | |||
3478 | * Release an obj back to its cache. If the obj has a constructed state, it must | 3478 | * Release an obj back to its cache. If the obj has a constructed state, it must |
3479 | * be in this state _before_ it is released. Called with disabled ints. | 3479 | * be in this state _before_ it is released. Called with disabled ints. |
3480 | */ | 3480 | */ |
3481 | static inline void __cache_free(struct kmem_cache *cachep, void *objp, | 3481 | static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, |
3482 | unsigned long caller) | 3482 | unsigned long caller) |
3483 | { | 3483 | { |
3484 | /* Put the object into the quarantine, don't touch it for now. */ | 3484 | /* Put the object into the quarantine, don't touch it for now. */ |
3485 | if (kasan_slab_free(cachep, objp)) | 3485 | if (kasan_slab_free(cachep, objp, _RET_IP_)) |
3486 | return; | 3486 | return; |
3487 | 3487 | ||
3488 | ___cache_free(cachep, objp, caller); | 3488 | ___cache_free(cachep, objp, caller); |
@@ -1356,13 +1356,13 @@ static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | |||
1356 | kasan_kmalloc_large(ptr, size, flags); | 1356 | kasan_kmalloc_large(ptr, size, flags); |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | static inline void kfree_hook(const void *x) | 1359 | static __always_inline void kfree_hook(void *x) |
1360 | { | 1360 | { |
1361 | kmemleak_free(x); | 1361 | kmemleak_free(x); |
1362 | kasan_kfree_large(x); | 1362 | kasan_kfree_large(x, _RET_IP_); |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | static inline void *slab_free_hook(struct kmem_cache *s, void *x) | 1365 | static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x) |
1366 | { | 1366 | { |
1367 | void *freeptr; | 1367 | void *freeptr; |
1368 | 1368 | ||
@@ -1390,7 +1390,7 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x) | |||
1390 | * kasan_slab_free() may put x into memory quarantine, delaying its | 1390 | * kasan_slab_free() may put x into memory quarantine, delaying its |
1391 | * reuse. In this case the object's freelist pointer is changed. | 1391 | * reuse. In this case the object's freelist pointer is changed. |
1392 | */ | 1392 | */ |
1393 | kasan_slab_free(s, x); | 1393 | kasan_slab_free(s, x, _RET_IP_); |
1394 | return freeptr; | 1394 | return freeptr; |
1395 | } | 1395 | } |
1396 | 1396 | ||
@@ -3910,7 +3910,7 @@ void kfree(const void *x) | |||
3910 | page = virt_to_head_page(x); | 3910 | page = virt_to_head_page(x); |
3911 | if (unlikely(!PageSlab(page))) { | 3911 | if (unlikely(!PageSlab(page))) { |
3912 | BUG_ON(!PageCompound(page)); | 3912 | BUG_ON(!PageCompound(page)); |
3913 | kfree_hook(x); | 3913 | kfree_hook(object); |
3914 | __free_pages(page, compound_order(page)); | 3914 | __free_pages(page, compound_order(page)); |
3915 | return; | 3915 | return; |
3916 | } | 3916 | } |
@@ -913,11 +913,11 @@ EXPORT_SYMBOL(__pagevec_lru_add); | |||
913 | * @pvec: Where the resulting entries are placed | 913 | * @pvec: Where the resulting entries are placed |
914 | * @mapping: The address_space to search | 914 | * @mapping: The address_space to search |
915 | * @start: The starting entry index | 915 | * @start: The starting entry index |
916 | * @nr_entries: The maximum number of entries | 916 | * @nr_pages: The maximum number of pages |
917 | * @indices: The cache indices corresponding to the entries in @pvec | 917 | * @indices: The cache indices corresponding to the entries in @pvec |
918 | * | 918 | * |
919 | * pagevec_lookup_entries() will search for and return a group of up | 919 | * pagevec_lookup_entries() will search for and return a group of up |
920 | * to @nr_entries pages and shadow entries in the mapping. All | 920 | * to @nr_pages pages and shadow entries in the mapping. All |
921 | * entries are placed in @pvec. pagevec_lookup_entries() takes a | 921 | * entries are placed in @pvec. pagevec_lookup_entries() takes a |
922 | * reference against actual pages in @pvec. | 922 | * reference against actual pages in @pvec. |
923 | * | 923 | * |
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 81192701964d..39791b81ede7 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/userfaultfd_k.h> | 16 | #include <linux/userfaultfd_k.h> |
17 | #include <linux/mmu_notifier.h> | 17 | #include <linux/mmu_notifier.h> |
18 | #include <linux/hugetlb.h> | 18 | #include <linux/hugetlb.h> |
19 | #include <linux/pagemap.h> | ||
20 | #include <linux/shmem_fs.h> | 19 | #include <linux/shmem_fs.h> |
21 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
22 | #include "internal.h" | 21 | #include "internal.h" |
diff --git a/mm/vmscan.c b/mm/vmscan.c index fdd3fc6be862..444749669187 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1595,6 +1595,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1595 | * found will be decremented. | 1595 | * found will be decremented. |
1596 | * | 1596 | * |
1597 | * Restrictions: | 1597 | * Restrictions: |
1598 | * | ||
1598 | * (1) Must be called with an elevated refcount on the page. This is a | 1599 | * (1) Must be called with an elevated refcount on the page. This is a |
1599 | * fundamentnal difference from isolate_lru_pages (which is called | 1600 | * fundamentnal difference from isolate_lru_pages (which is called |
1600 | * without a stable reference). | 1601 | * without a stable reference). |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 39e19125d6a0..d589d318727f 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -769,7 +769,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
769 | /** | 769 | /** |
770 | * z3fold_reclaim_page() - evicts allocations from a pool page and frees it | 770 | * z3fold_reclaim_page() - evicts allocations from a pool page and frees it |
771 | * @pool: pool from which a page will attempt to be evicted | 771 | * @pool: pool from which a page will attempt to be evicted |
772 | * @retires: number of pages on the LRU list for which eviction will | 772 | * @retries: number of pages on the LRU list for which eviction will |
773 | * be attempted before failing | 773 | * be attempted before failing |
774 | * | 774 | * |
775 | * z3fold reclaim is different from normal system reclaim in that it is done | 775 | * z3fold reclaim is different from normal system reclaim in that it is done |
@@ -779,7 +779,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
779 | * z3fold and the user, however. | 779 | * z3fold and the user, however. |
780 | * | 780 | * |
781 | * To avoid these, this is how z3fold_reclaim_page() should be called: | 781 | * To avoid these, this is how z3fold_reclaim_page() should be called: |
782 | 782 | * | |
783 | * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). | 783 | * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). |
784 | * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and | 784 | * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and |
785 | * call the user-defined eviction handler with the pool and handle as | 785 | * call the user-defined eviction handler with the pool and handle as |
@@ -466,7 +466,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) | |||
466 | /** | 466 | /** |
467 | * zbud_reclaim_page() - evicts allocations from a pool page and frees it | 467 | * zbud_reclaim_page() - evicts allocations from a pool page and frees it |
468 | * @pool: pool from which a page will attempt to be evicted | 468 | * @pool: pool from which a page will attempt to be evicted |
469 | * @retires: number of pages on the LRU list for which eviction will | 469 | * @retries: number of pages on the LRU list for which eviction will |
470 | * be attempted before failing | 470 | * be attempted before failing |
471 | * | 471 | * |
472 | * zbud reclaim is different from normal system reclaim in that the reclaim is | 472 | * zbud reclaim is different from normal system reclaim in that the reclaim is |
@@ -476,7 +476,7 @@ void zbud_free(struct zbud_pool *pool, unsigned long handle) | |||
476 | * the user, however. | 476 | * the user, however. |
477 | * | 477 | * |
478 | * To avoid these, this is how zbud_reclaim_page() should be called: | 478 | * To avoid these, this is how zbud_reclaim_page() should be called: |
479 | 479 | * | |
480 | * The user detects a page should be reclaimed and calls zbud_reclaim_page(). | 480 | * The user detects a page should be reclaimed and calls zbud_reclaim_page(). |
481 | * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call | 481 | * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call |
482 | * the user-defined eviction handler with the pool and handle as arguments. | 482 | * the user-defined eviction handler with the pool and handle as arguments. |
diff --git a/mm/zpool.c b/mm/zpool.c index e1e7aa6d1d06..f8cb83e7699b 100644 --- a/mm/zpool.c +++ b/mm/zpool.c | |||
@@ -101,7 +101,7 @@ static void zpool_put_driver(struct zpool_driver *driver) | |||
101 | 101 | ||
102 | /** | 102 | /** |
103 | * zpool_has_pool() - Check if the pool driver is available | 103 | * zpool_has_pool() - Check if the pool driver is available |
104 | * @type The type of the zpool to check (e.g. zbud, zsmalloc) | 104 | * @type: The type of the zpool to check (e.g. zbud, zsmalloc) |
105 | * | 105 | * |
106 | * This checks if the @type pool driver is available. This will try to load | 106 | * This checks if the @type pool driver is available. This will try to load |
107 | * the requested module, if needed, but there is no guarantee the module will | 107 | * the requested module, if needed, but there is no guarantee the module will |
@@ -136,10 +136,10 @@ EXPORT_SYMBOL(zpool_has_pool); | |||
136 | 136 | ||
137 | /** | 137 | /** |
138 | * zpool_create_pool() - Create a new zpool | 138 | * zpool_create_pool() - Create a new zpool |
139 | * @type The type of the zpool to create (e.g. zbud, zsmalloc) | 139 | * @type: The type of the zpool to create (e.g. zbud, zsmalloc) |
140 | * @name The name of the zpool (e.g. zram0, zswap) | 140 | * @name: The name of the zpool (e.g. zram0, zswap) |
141 | * @gfp The GFP flags to use when allocating the pool. | 141 | * @gfp: The GFP flags to use when allocating the pool. |
142 | * @ops The optional ops callback. | 142 | * @ops: The optional ops callback. |
143 | * | 143 | * |
144 | * This creates a new zpool of the specified type. The gfp flags will be | 144 | * This creates a new zpool of the specified type. The gfp flags will be |
145 | * used when allocating memory, if the implementation supports it. If the | 145 | * used when allocating memory, if the implementation supports it. If the |
@@ -201,7 +201,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, | |||
201 | 201 | ||
202 | /** | 202 | /** |
203 | * zpool_destroy_pool() - Destroy a zpool | 203 | * zpool_destroy_pool() - Destroy a zpool |
204 | * @pool The zpool to destroy. | 204 | * @zpool: The zpool to destroy. |
205 | * | 205 | * |
206 | * Implementations must guarantee this to be thread-safe, | 206 | * Implementations must guarantee this to be thread-safe, |
207 | * however only when destroying different pools. The same | 207 | * however only when destroying different pools. The same |
@@ -224,7 +224,7 @@ void zpool_destroy_pool(struct zpool *zpool) | |||
224 | 224 | ||
225 | /** | 225 | /** |
226 | * zpool_get_type() - Get the type of the zpool | 226 | * zpool_get_type() - Get the type of the zpool |
227 | * @pool The zpool to check | 227 | * @zpool: The zpool to check |
228 | * | 228 | * |
229 | * This returns the type of the pool. | 229 | * This returns the type of the pool. |
230 | * | 230 | * |
@@ -239,10 +239,10 @@ const char *zpool_get_type(struct zpool *zpool) | |||
239 | 239 | ||
240 | /** | 240 | /** |
241 | * zpool_malloc() - Allocate memory | 241 | * zpool_malloc() - Allocate memory |
242 | * @pool The zpool to allocate from. | 242 | * @zpool: The zpool to allocate from. |
243 | * @size The amount of memory to allocate. | 243 | * @size: The amount of memory to allocate. |
244 | * @gfp The GFP flags to use when allocating memory. | 244 | * @gfp: The GFP flags to use when allocating memory. |
245 | * @handle Pointer to the handle to set | 245 | * @handle: Pointer to the handle to set |
246 | * | 246 | * |
247 | * This allocates the requested amount of memory from the pool. | 247 | * This allocates the requested amount of memory from the pool. |
248 | * The gfp flags will be used when allocating memory, if the | 248 | * The gfp flags will be used when allocating memory, if the |
@@ -261,8 +261,8 @@ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, | |||
261 | 261 | ||
262 | /** | 262 | /** |
263 | * zpool_free() - Free previously allocated memory | 263 | * zpool_free() - Free previously allocated memory |
264 | * @pool The zpool that allocated the memory. | 264 | * @zpool: The zpool that allocated the memory. |
265 | * @handle The handle to the memory to free. | 265 | * @handle: The handle to the memory to free. |
266 | * | 266 | * |
267 | * This frees previously allocated memory. This does not guarantee | 267 | * This frees previously allocated memory. This does not guarantee |
268 | * that the pool will actually free memory, only that the memory | 268 | * that the pool will actually free memory, only that the memory |
@@ -280,9 +280,9 @@ void zpool_free(struct zpool *zpool, unsigned long handle) | |||
280 | 280 | ||
281 | /** | 281 | /** |
282 | * zpool_shrink() - Shrink the pool size | 282 | * zpool_shrink() - Shrink the pool size |
283 | * @pool The zpool to shrink. | 283 | * @zpool: The zpool to shrink. |
284 | * @pages The number of pages to shrink the pool. | 284 | * @pages: The number of pages to shrink the pool. |
285 | * @reclaimed The number of pages successfully evicted. | 285 | * @reclaimed: The number of pages successfully evicted. |
286 | * | 286 | * |
287 | * This attempts to shrink the actual memory size of the pool | 287 | * This attempts to shrink the actual memory size of the pool |
288 | * by evicting currently used handle(s). If the pool was | 288 | * by evicting currently used handle(s). If the pool was |
@@ -304,11 +304,11 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, | |||
304 | 304 | ||
305 | /** | 305 | /** |
306 | * zpool_map_handle() - Map a previously allocated handle into memory | 306 | * zpool_map_handle() - Map a previously allocated handle into memory |
307 | * @pool The zpool that the handle was allocated from | 307 | * @zpool: The zpool that the handle was allocated from |
308 | * @handle The handle to map | 308 | * @handle: The handle to map |
309 | * @mm How the memory should be mapped | 309 | * @mapmode: How the memory should be mapped |
310 | * | 310 | * |
311 | * This maps a previously allocated handle into memory. The @mm | 311 | * This maps a previously allocated handle into memory. The @mapmode |
312 | * param indicates to the implementation how the memory will be | 312 | * param indicates to the implementation how the memory will be |
313 | * used, i.e. read-only, write-only, read-write. If the | 313 | * used, i.e. read-only, write-only, read-write. If the |
314 | * implementation does not support it, the memory will be treated | 314 | * implementation does not support it, the memory will be treated |
@@ -332,8 +332,8 @@ void *zpool_map_handle(struct zpool *zpool, unsigned long handle, | |||
332 | 332 | ||
333 | /** | 333 | /** |
334 | * zpool_unmap_handle() - Unmap a previously mapped handle | 334 | * zpool_unmap_handle() - Unmap a previously mapped handle |
335 | * @pool The zpool that the handle was allocated from | 335 | * @zpool: The zpool that the handle was allocated from |
336 | * @handle The handle to unmap | 336 | * @handle: The handle to unmap |
337 | * | 337 | * |
338 | * This unmaps a previously mapped handle. Any locks or other | 338 | * This unmaps a previously mapped handle. Any locks or other |
339 | * actions that the implementation took in zpool_map_handle() | 339 | * actions that the implementation took in zpool_map_handle() |
@@ -347,7 +347,7 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) | |||
347 | 347 | ||
348 | /** | 348 | /** |
349 | * zpool_get_total_size() - The total size of the pool | 349 | * zpool_get_total_size() - The total size of the pool |
350 | * @pool The zpool to check | 350 | * @zpool: The zpool to check |
351 | * | 351 | * |
352 | * This returns the total size in bytes of the pool. | 352 | * This returns the total size in bytes of the pool. |
353 | * | 353 | * |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 107b122c8969..494e6a5d7306 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -616,18 +616,15 @@ static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to, | |||
616 | return -EFAULT; | 616 | return -EFAULT; |
617 | 617 | ||
618 | memcpy(&to->base, &link_usettings.base, sizeof(to->base)); | 618 | memcpy(&to->base, &link_usettings.base, sizeof(to->base)); |
619 | bitmap_from_u32array(to->link_modes.supported, | 619 | bitmap_from_arr32(to->link_modes.supported, |
620 | __ETHTOOL_LINK_MODE_MASK_NBITS, | 620 | link_usettings.link_modes.supported, |
621 | link_usettings.link_modes.supported, | 621 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
622 | __ETHTOOL_LINK_MODE_MASK_NU32); | 622 | bitmap_from_arr32(to->link_modes.advertising, |
623 | bitmap_from_u32array(to->link_modes.advertising, | 623 | link_usettings.link_modes.advertising, |
624 | __ETHTOOL_LINK_MODE_MASK_NBITS, | 624 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
625 | link_usettings.link_modes.advertising, | 625 | bitmap_from_arr32(to->link_modes.lp_advertising, |
626 | __ETHTOOL_LINK_MODE_MASK_NU32); | 626 | link_usettings.link_modes.lp_advertising, |
627 | bitmap_from_u32array(to->link_modes.lp_advertising, | 627 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
628 | __ETHTOOL_LINK_MODE_MASK_NBITS, | ||
629 | link_usettings.link_modes.lp_advertising, | ||
630 | __ETHTOOL_LINK_MODE_MASK_NU32); | ||
631 | 628 | ||
632 | return 0; | 629 | return 0; |
633 | } | 630 | } |
@@ -643,18 +640,15 @@ store_link_ksettings_for_user(void __user *to, | |||
643 | struct ethtool_link_usettings link_usettings; | 640 | struct ethtool_link_usettings link_usettings; |
644 | 641 | ||
645 | memcpy(&link_usettings.base, &from->base, sizeof(link_usettings)); | 642 | memcpy(&link_usettings.base, &from->base, sizeof(link_usettings)); |
646 | bitmap_to_u32array(link_usettings.link_modes.supported, | 643 | bitmap_to_arr32(link_usettings.link_modes.supported, |
647 | __ETHTOOL_LINK_MODE_MASK_NU32, | 644 | from->link_modes.supported, |
648 | from->link_modes.supported, | 645 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
649 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 646 | bitmap_to_arr32(link_usettings.link_modes.advertising, |
650 | bitmap_to_u32array(link_usettings.link_modes.advertising, | 647 | from->link_modes.advertising, |
651 | __ETHTOOL_LINK_MODE_MASK_NU32, | 648 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
652 | from->link_modes.advertising, | 649 | bitmap_to_arr32(link_usettings.link_modes.lp_advertising, |
653 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 650 | from->link_modes.lp_advertising, |
654 | bitmap_to_u32array(link_usettings.link_modes.lp_advertising, | 651 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
655 | __ETHTOOL_LINK_MODE_MASK_NU32, | ||
656 | from->link_modes.lp_advertising, | ||
657 | __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
658 | 652 | ||
659 | if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) | 653 | if (copy_to_user(to, &link_usettings, sizeof(link_usettings))) |
660 | return -EFAULT; | 654 | return -EFAULT; |
@@ -2358,10 +2352,8 @@ static int ethtool_get_per_queue_coalesce(struct net_device *dev, | |||
2358 | 2352 | ||
2359 | useraddr += sizeof(*per_queue_opt); | 2353 | useraddr += sizeof(*per_queue_opt); |
2360 | 2354 | ||
2361 | bitmap_from_u32array(queue_mask, | 2355 | bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, |
2362 | MAX_NUM_QUEUE, | 2356 | MAX_NUM_QUEUE); |
2363 | per_queue_opt->queue_mask, | ||
2364 | DIV_ROUND_UP(MAX_NUM_QUEUE, 32)); | ||
2365 | 2357 | ||
2366 | for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { | 2358 | for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) { |
2367 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; | 2359 | struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; |
@@ -2393,10 +2385,7 @@ static int ethtool_set_per_queue_coalesce(struct net_device *dev, | |||
2393 | 2385 | ||
2394 | useraddr += sizeof(*per_queue_opt); | 2386 | useraddr += sizeof(*per_queue_opt); |
2395 | 2387 | ||
2396 | bitmap_from_u32array(queue_mask, | 2388 | bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE); |
2397 | MAX_NUM_QUEUE, | ||
2398 | per_queue_opt->queue_mask, | ||
2399 | DIV_ROUND_UP(MAX_NUM_QUEUE, 32)); | ||
2400 | n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); | 2389 | n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE); |
2401 | tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); | 2390 | tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL); |
2402 | if (!backup) | 2391 | if (!backup) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6a5d0e39bb87..9b9d2ff01b35 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -65,10 +65,10 @@ | |||
65 | #include <net/ip6_checksum.h> | 65 | #include <net/ip6_checksum.h> |
66 | 66 | ||
67 | /* Ensure that we have struct in6_addr aligned on 32bit word. */ | 67 | /* Ensure that we have struct in6_addr aligned on 32bit word. */ |
68 | static void *__mld2_query_bugs[] __attribute__((__unused__)) = { | 68 | static int __mld2_query_bugs[] __attribute__((__unused__)) = { |
69 | BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), | 69 | BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4), |
70 | BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), | 70 | BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4), |
71 | BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) | 71 | BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4) |
72 | }; | 72 | }; |
73 | 73 | ||
74 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; | 74 | static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; |
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 1ce7115aa499..69552a39951d 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan | |||
@@ -10,10 +10,7 @@ KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) | |||
10 | 10 | ||
11 | CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address | 11 | CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address |
12 | 12 | ||
13 | CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ | 13 | cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) |
14 | -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \ | ||
15 | --param asan-stack=1 --param asan-globals=1 \ | ||
16 | --param asan-instrumentation-with-call-threshold=$(call_threshold)) | ||
17 | 14 | ||
18 | ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),) | 15 | ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),) |
19 | ifneq ($(CONFIG_COMPILE_TEST),y) | 16 | ifneq ($(CONFIG_COMPILE_TEST),y) |
@@ -21,14 +18,30 @@ ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),) | |||
21 | -fsanitize=kernel-address is not supported by compiler) | 18 | -fsanitize=kernel-address is not supported by compiler) |
22 | endif | 19 | endif |
23 | else | 20 | else |
24 | ifeq ($(CFLAGS_KASAN),) | 21 | # -fasan-shadow-offset fails without -fsanitize |
25 | ifneq ($(CONFIG_COMPILE_TEST),y) | 22 | CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \ |
26 | $(warning CONFIG_KASAN: compiler does not support all options.\ | 23 | -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \ |
27 | Trying minimal configuration) | 24 | $(call cc-option, -fsanitize=kernel-address \ |
28 | endif | 25 | -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET))) |
29 | CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) | 26 | |
30 | endif | 27 | ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),) |
28 | CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) | ||
29 | else | ||
30 | # Now add all the compiler specific options that are valid standalone | ||
31 | CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ | ||
32 | $(call cc-param,asan-globals=1) \ | ||
33 | $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ | ||
34 | $(call cc-param,asan-stack=1) \ | ||
35 | $(call cc-param,asan-use-after-scope=1) \ | ||
36 | $(call cc-param,asan-instrument-allocas=1) | ||
37 | endif | ||
38 | |||
31 | endif | 39 | endif |
32 | 40 | ||
41 | ifdef CONFIG_KASAN_EXTRA | ||
33 | CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) | 42 | CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) |
34 | endif | 43 | endif |
44 | |||
45 | CFLAGS_KASAN_NOSANITIZE := -fno-builtin | ||
46 | |||
47 | endif | ||
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 5fdc1a19b02c..5589bae34af6 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib | |||
@@ -121,7 +121,7 @@ endif | |||
121 | ifeq ($(CONFIG_KASAN),y) | 121 | ifeq ($(CONFIG_KASAN),y) |
122 | _c_flags += $(if $(patsubst n%,, \ | 122 | _c_flags += $(if $(patsubst n%,, \ |
123 | $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ | 123 | $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ |
124 | $(CFLAGS_KASAN)) | 124 | $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) |
125 | endif | 125 | endif |
126 | 126 | ||
127 | ifeq ($(CONFIG_UBSAN),y) | 127 | ifeq ($(CONFIG_UBSAN),y) |
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index 8fd4d44fbcd1..b593b36ccff8 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan | |||
@@ -7,7 +7,6 @@ ifdef CONFIG_UBSAN | |||
7 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) | 7 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) |
8 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) | 8 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) |
9 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) | 9 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) |
10 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=returns-nonnull-attribute) | ||
11 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool) | 10 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool) |
12 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum) | 11 | CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum) |
13 | 12 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e954df2b2077..3d4040322ae1 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -566,6 +566,7 @@ foreach my $entry (@mode_permission_funcs) { | |||
566 | $mode_perms_search .= '|' if ($mode_perms_search ne ""); | 566 | $mode_perms_search .= '|' if ($mode_perms_search ne ""); |
567 | $mode_perms_search .= $entry->[0]; | 567 | $mode_perms_search .= $entry->[0]; |
568 | } | 568 | } |
569 | $mode_perms_search = "(?:${mode_perms_search})"; | ||
569 | 570 | ||
570 | our $mode_perms_world_writable = qr{ | 571 | our $mode_perms_world_writable = qr{ |
571 | S_IWUGO | | 572 | S_IWUGO | |
@@ -600,6 +601,37 @@ foreach my $entry (keys %mode_permission_string_types) { | |||
600 | $mode_perms_string_search .= '|' if ($mode_perms_string_search ne ""); | 601 | $mode_perms_string_search .= '|' if ($mode_perms_string_search ne ""); |
601 | $mode_perms_string_search .= $entry; | 602 | $mode_perms_string_search .= $entry; |
602 | } | 603 | } |
604 | our $single_mode_perms_string_search = "(?:${mode_perms_string_search})"; | ||
605 | our $multi_mode_perms_string_search = qr{ | ||
606 | ${single_mode_perms_string_search} | ||
607 | (?:\s*\|\s*${single_mode_perms_string_search})* | ||
608 | }x; | ||
609 | |||
610 | sub perms_to_octal { | ||
611 | my ($string) = @_; | ||
612 | |||
613 | return trim($string) if ($string =~ /^\s*0[0-7]{3,3}\s*$/); | ||
614 | |||
615 | my $val = ""; | ||
616 | my $oval = ""; | ||
617 | my $to = 0; | ||
618 | my $curpos = 0; | ||
619 | my $lastpos = 0; | ||
620 | while ($string =~ /\b(($single_mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) { | ||
621 | $curpos = pos($string); | ||
622 | my $match = $2; | ||
623 | my $omatch = $1; | ||
624 | last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos)); | ||
625 | $lastpos = $curpos; | ||
626 | $to |= $mode_permission_string_types{$match}; | ||
627 | $val .= '\s*\|\s*' if ($val ne ""); | ||
628 | $val .= $match; | ||
629 | $oval .= $omatch; | ||
630 | } | ||
631 | $oval =~ s/^\s*\|\s*//; | ||
632 | $oval =~ s/\s*\|\s*$//; | ||
633 | return sprintf("%04o", $to); | ||
634 | } | ||
603 | 635 | ||
604 | our $allowed_asm_includes = qr{(?x: | 636 | our $allowed_asm_includes = qr{(?x: |
605 | irq| | 637 | irq| |
@@ -2875,6 +2907,7 @@ sub process { | |||
2875 | # logging functions like pr_info that end in a string | 2907 | # logging functions like pr_info that end in a string |
2876 | # lines with a single string | 2908 | # lines with a single string |
2877 | # #defines that are a single string | 2909 | # #defines that are a single string |
2910 | # lines with an RFC3986 like URL | ||
2878 | # | 2911 | # |
2879 | # There are 3 different line length message types: | 2912 | # There are 3 different line length message types: |
2880 | # LONG_LINE_COMMENT a comment starts before but extends beyond $max_line_length | 2913 | # LONG_LINE_COMMENT a comment starts before but extends beyond $max_line_length |
@@ -2906,6 +2939,10 @@ sub process { | |||
2906 | $line =~ /^\+\s*(?:\w+)?\s*DEFINE_PER_CPU/) { | 2939 | $line =~ /^\+\s*(?:\w+)?\s*DEFINE_PER_CPU/) { |
2907 | $msg_type = ""; | 2940 | $msg_type = ""; |
2908 | 2941 | ||
2942 | # URL ($rawline is used in case the URL is in a comment) | ||
2943 | } elsif ($rawline =~ /^\+.*\b[a-z][\w\.\+\-]*:\/\/\S+/i) { | ||
2944 | $msg_type = ""; | ||
2945 | |||
2909 | # Otherwise set the alternate message types | 2946 | # Otherwise set the alternate message types |
2910 | 2947 | ||
2911 | # a comment starts before $max_line_length | 2948 | # a comment starts before $max_line_length |
@@ -2983,7 +3020,7 @@ sub process { | |||
2983 | 3020 | ||
2984 | # check indentation starts on a tab stop | 3021 | # check indentation starts on a tab stop |
2985 | if ($^V && $^V ge 5.10.0 && | 3022 | if ($^V && $^V ge 5.10.0 && |
2986 | $sline =~ /^\+\t+( +)(?:$c90_Keywords\b|\{\s*$|\}\s*(?:else\b|while\b|\s*$))/) { | 3023 | $sline =~ /^\+\t+( +)(?:$c90_Keywords\b|\{\s*$|\}\s*(?:else\b|while\b|\s*$)|$Declare\s*$Ident\s*[;=])/) { |
2987 | my $indent = length($1); | 3024 | my $indent = length($1); |
2988 | if ($indent % 8) { | 3025 | if ($indent % 8) { |
2989 | if (WARN("TABSTOP", | 3026 | if (WARN("TABSTOP", |
@@ -3882,10 +3919,12 @@ sub process { | |||
3882 | 3919 | ||
3883 | # function brace can't be on same line, except for #defines of do while, | 3920 | # function brace can't be on same line, except for #defines of do while, |
3884 | # or if closed on same line | 3921 | # or if closed on same line |
3885 | if (($line=~/$Type\s*$Ident\(.*\).*\s*{/) and | 3922 | if ($^V && $^V ge 5.10.0 && |
3886 | !($line=~/\#\s*define.*do\s\{/) and !($line=~/}/)) { | 3923 | $sline =~ /$Type\s*$Ident\s*$balanced_parens\s*\{/ && |
3924 | $sline !~ /\#\s*define\b.*do\s*\{/ && | ||
3925 | $sline !~ /}/) { | ||
3887 | if (ERROR("OPEN_BRACE", | 3926 | if (ERROR("OPEN_BRACE", |
3888 | "open brace '{' following function declarations go on the next line\n" . $herecurr) && | 3927 | "open brace '{' following function definitions go on the next line\n" . $herecurr) && |
3889 | $fix) { | 3928 | $fix) { |
3890 | fix_delete_line($fixlinenr, $rawline); | 3929 | fix_delete_line($fixlinenr, $rawline); |
3891 | my $fixed_line = $rawline; | 3930 | my $fixed_line = $rawline; |
@@ -4489,7 +4528,9 @@ sub process { | |||
4489 | } | 4528 | } |
4490 | 4529 | ||
4491 | # check for unnecessary parentheses around comparisons in if uses | 4530 | # check for unnecessary parentheses around comparisons in if uses |
4492 | if ($^V && $^V ge 5.10.0 && defined($stat) && | 4531 | # when !drivers/staging or command-line uses --strict |
4532 | if (($realfile !~ m@^(?:drivers/staging/)@ || $check_orig) && | ||
4533 | $^V && $^V ge 5.10.0 && defined($stat) && | ||
4493 | $stat =~ /(^.\s*if\s*($balanced_parens))/) { | 4534 | $stat =~ /(^.\s*if\s*($balanced_parens))/) { |
4494 | my $if_stat = $1; | 4535 | my $if_stat = $1; |
4495 | my $test = substr($2, 1, -1); | 4536 | my $test = substr($2, 1, -1); |
@@ -5307,7 +5348,7 @@ sub process { | |||
5307 | } | 5348 | } |
5308 | 5349 | ||
5309 | # check for line continuations in quoted strings with odd counts of " | 5350 | # check for line continuations in quoted strings with odd counts of " |
5310 | if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) { | 5351 | if ($rawline =~ /\\$/ && $sline =~ tr/"/"/ % 2) { |
5311 | WARN("LINE_CONTINUATIONS", | 5352 | WARN("LINE_CONTINUATIONS", |
5312 | "Avoid line continuations in quoted strings\n" . $herecurr); | 5353 | "Avoid line continuations in quoted strings\n" . $herecurr); |
5313 | } | 5354 | } |
@@ -6269,8 +6310,69 @@ sub process { | |||
6269 | "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); | 6310 | "Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); |
6270 | } | 6311 | } |
6271 | 6312 | ||
6313 | # check for DEVICE_ATTR uses that could be DEVICE_ATTR_<FOO> | ||
6314 | # and whether or not function naming is typical and if | ||
6315 | # DEVICE_ATTR permissions uses are unusual too | ||
6316 | if ($^V && $^V ge 5.10.0 && | ||
6317 | defined $stat && | ||
6318 | $stat =~ /\bDEVICE_ATTR\s*\(\s*(\w+)\s*,\s*\(?\s*(\s*(?:${multi_mode_perms_string_search}|0[0-7]{3,3})\s*)\s*\)?\s*,\s*(\w+)\s*,\s*(\w+)\s*\)/) { | ||
6319 | my $var = $1; | ||
6320 | my $perms = $2; | ||
6321 | my $show = $3; | ||
6322 | my $store = $4; | ||
6323 | my $octal_perms = perms_to_octal($perms); | ||
6324 | if ($show =~ /^${var}_show$/ && | ||
6325 | $store =~ /^${var}_store$/ && | ||
6326 | $octal_perms eq "0644") { | ||
6327 | if (WARN("DEVICE_ATTR_RW", | ||
6328 | "Use DEVICE_ATTR_RW\n" . $herecurr) && | ||
6329 | $fix) { | ||
6330 | $fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*$show\s*,\s*$store\s*\)/DEVICE_ATTR_RW(${var})/; | ||
6331 | } | ||
6332 | } elsif ($show =~ /^${var}_show$/ && | ||
6333 | $store =~ /^NULL$/ && | ||
6334 | $octal_perms eq "0444") { | ||
6335 | if (WARN("DEVICE_ATTR_RO", | ||
6336 | "Use DEVICE_ATTR_RO\n" . $herecurr) && | ||
6337 | $fix) { | ||
6338 | $fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*$show\s*,\s*NULL\s*\)/DEVICE_ATTR_RO(${var})/; | ||
6339 | } | ||
6340 | } elsif ($show =~ /^NULL$/ && | ||
6341 | $store =~ /^${var}_store$/ && | ||
6342 | $octal_perms eq "0200") { | ||
6343 | if (WARN("DEVICE_ATTR_WO", | ||
6344 | "Use DEVICE_ATTR_WO\n" . $herecurr) && | ||
6345 | $fix) { | ||
6346 | $fixed[$fixlinenr] =~ s/\bDEVICE_ATTR\s*\(\s*$var\s*,\s*\Q$perms\E\s*,\s*NULL\s*,\s*$store\s*\)/DEVICE_ATTR_WO(${var})/; | ||
6347 | } | ||
6348 | } elsif ($octal_perms eq "0644" || | ||
6349 | $octal_perms eq "0444" || | ||
6350 | $octal_perms eq "0200") { | ||
6351 | my $newshow = "$show"; | ||
6352 | $newshow = "${var}_show" if ($show ne "NULL" && $show ne "${var}_show"); | ||
6353 | my $newstore = $store; | ||
6354 | $newstore = "${var}_store" if ($store ne "NULL" && $store ne "${var}_store"); | ||
6355 | my $rename = ""; | ||
6356 | if ($show ne $newshow) { | ||
6357 | $rename .= " '$show' to '$newshow'"; | ||
6358 | } | ||
6359 | if ($store ne $newstore) { | ||
6360 | $rename .= " '$store' to '$newstore'"; | ||
6361 | } | ||
6362 | WARN("DEVICE_ATTR_FUNCTIONS", | ||
6363 | "Consider renaming function(s)$rename\n" . $herecurr); | ||
6364 | } else { | ||
6365 | WARN("DEVICE_ATTR_PERMS", | ||
6366 | "DEVICE_ATTR unusual permissions '$perms' used\n" . $herecurr); | ||
6367 | } | ||
6368 | } | ||
6369 | |||
6272 | # Mode permission misuses where it seems decimal should be octal | 6370 | # Mode permission misuses where it seems decimal should be octal |
6273 | # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop | 6371 | # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop |
6372 | # o Ignore module_param*(...) uses with a decimal 0 permission as that has a | ||
6373 | # specific definition of not visible in sysfs. | ||
6374 | # o Ignore proc_create*(...) uses with a decimal 0 permission as that means | ||
6375 | # use the default permissions | ||
6274 | if ($^V && $^V ge 5.10.0 && | 6376 | if ($^V && $^V ge 5.10.0 && |
6275 | defined $stat && | 6377 | defined $stat && |
6276 | $line =~ /$mode_perms_search/) { | 6378 | $line =~ /$mode_perms_search/) { |
@@ -6294,8 +6396,9 @@ sub process { | |||
6294 | if ($stat =~ /$test/) { | 6396 | if ($stat =~ /$test/) { |
6295 | my $val = $1; | 6397 | my $val = $1; |
6296 | $val = $6 if ($skip_args ne ""); | 6398 | $val = $6 if ($skip_args ne ""); |
6297 | if (($val =~ /^$Int$/ && $val !~ /^$Octal$/) || | 6399 | if (!($func =~ /^(?:module_param|proc_create)/ && $val eq "0") && |
6298 | ($val =~ /^$Octal$/ && length($val) ne 4)) { | 6400 | (($val =~ /^$Int$/ && $val !~ /^$Octal$/) || |
6401 | ($val =~ /^$Octal$/ && length($val) ne 4))) { | ||
6299 | ERROR("NON_OCTAL_PERMISSIONS", | 6402 | ERROR("NON_OCTAL_PERMISSIONS", |
6300 | "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real); | 6403 | "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real); |
6301 | } | 6404 | } |
@@ -6308,30 +6411,13 @@ sub process { | |||
6308 | } | 6411 | } |
6309 | 6412 | ||
6310 | # check for uses of S_<PERMS> that could be octal for readability | 6413 | # check for uses of S_<PERMS> that could be octal for readability |
6311 | if ($line =~ /\b$mode_perms_string_search\b/) { | 6414 | if ($line =~ /\b($multi_mode_perms_string_search)\b/) { |
6312 | my $val = ""; | 6415 | my $oval = $1; |
6313 | my $oval = ""; | 6416 | my $octal = perms_to_octal($oval); |
6314 | my $to = 0; | ||
6315 | my $curpos = 0; | ||
6316 | my $lastpos = 0; | ||
6317 | while ($line =~ /\b(($mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) { | ||
6318 | $curpos = pos($line); | ||
6319 | my $match = $2; | ||
6320 | my $omatch = $1; | ||
6321 | last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos)); | ||
6322 | $lastpos = $curpos; | ||
6323 | $to |= $mode_permission_string_types{$match}; | ||
6324 | $val .= '\s*\|\s*' if ($val ne ""); | ||
6325 | $val .= $match; | ||
6326 | $oval .= $omatch; | ||
6327 | } | ||
6328 | $oval =~ s/^\s*\|\s*//; | ||
6329 | $oval =~ s/\s*\|\s*$//; | ||
6330 | my $octal = sprintf("%04o", $to); | ||
6331 | if (WARN("SYMBOLIC_PERMS", | 6417 | if (WARN("SYMBOLIC_PERMS", |
6332 | "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) && | 6418 | "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) && |
6333 | $fix) { | 6419 | $fix) { |
6334 | $fixed[$fixlinenr] =~ s/$val/$octal/; | 6420 | $fixed[$fixlinenr] =~ s/\Q$oval\E/$octal/; |
6335 | } | 6421 | } |
6336 | } | 6422 | } |
6337 | 6423 | ||
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 8298e094f4f7..ffda91a4a1aa 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
@@ -250,15 +250,10 @@ int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
250 | } else { | 250 | } else { |
251 | struct task_struct *tracer; | 251 | struct task_struct *tracer; |
252 | 252 | ||
253 | rcu_read_lock(); | 253 | tracer = find_get_task_by_vpid(arg2); |
254 | tracer = find_task_by_vpid(arg2); | 254 | if (!tracer) { |
255 | if (tracer) | ||
256 | get_task_struct(tracer); | ||
257 | else | ||
258 | rc = -EINVAL; | 255 | rc = -EINVAL; |
259 | rcu_read_unlock(); | 256 | } else { |
260 | |||
261 | if (tracer) { | ||
262 | rc = yama_ptracer_add(tracer, myself); | 257 | rc = yama_ptracer_add(tracer, myself); |
263 | put_task_struct(tracer); | 258 | put_task_struct(tracer); |
264 | } | 259 | } |
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h index 9311fadaaab2..16ed1982cb34 100644 --- a/tools/include/asm-generic/bitops/find.h +++ b/tools/include/asm-generic/bitops/find.h | |||
@@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | |||
16 | size, unsigned long offset); | 16 | size, unsigned long offset); |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #ifndef find_next_and_bit | ||
20 | /** | ||
21 | * find_next_and_bit - find the next set bit in both memory regions | ||
22 | * @addr1: The first address to base the search on | ||
23 | * @addr2: The second address to base the search on | ||
24 | * @offset: The bitnumber to start searching at | ||
25 | * @size: The bitmap size in bits | ||
26 | * | ||
27 | * Returns the bit number for the next set bit | ||
28 | * If no bits are set, returns @size. | ||
29 | */ | ||
30 | extern unsigned long find_next_and_bit(const unsigned long *addr1, | ||
31 | const unsigned long *addr2, unsigned long size, | ||
32 | unsigned long offset); | ||
33 | #endif | ||
34 | |||
19 | #ifndef find_next_zero_bit | 35 | #ifndef find_next_zero_bit |
20 | 36 | ||
21 | /** | 37 | /** |
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c index 42c15f906aac..a88bd507091e 100644 --- a/tools/lib/find_bit.c +++ b/tools/lib/find_bit.c | |||
@@ -22,22 +22,29 @@ | |||
22 | #include <linux/bitmap.h> | 22 | #include <linux/bitmap.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | 24 | ||
25 | #if !defined(find_next_bit) | 25 | #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \ |
26 | !defined(find_next_and_bit) | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * This is a common helper function for find_next_bit and | 29 | * This is a common helper function for find_next_bit, find_next_zero_bit, and |
29 | * find_next_zero_bit. The difference is the "invert" argument, which | 30 | * find_next_and_bit. The differences are: |
30 | * is XORed with each fetched word before searching it for one bits. | 31 | * - The "invert" argument, which is XORed with each fetched word before |
32 | * searching it for one bits. | ||
33 | * - The optional "addr2", which is anded with "addr1" if present. | ||
31 | */ | 34 | */ |
32 | static unsigned long _find_next_bit(const unsigned long *addr, | 35 | static inline unsigned long _find_next_bit(const unsigned long *addr1, |
33 | unsigned long nbits, unsigned long start, unsigned long invert) | 36 | const unsigned long *addr2, unsigned long nbits, |
37 | unsigned long start, unsigned long invert) | ||
34 | { | 38 | { |
35 | unsigned long tmp; | 39 | unsigned long tmp; |
36 | 40 | ||
37 | if (unlikely(start >= nbits)) | 41 | if (unlikely(start >= nbits)) |
38 | return nbits; | 42 | return nbits; |
39 | 43 | ||
40 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 44 | tmp = addr1[start / BITS_PER_LONG]; |
45 | if (addr2) | ||
46 | tmp &= addr2[start / BITS_PER_LONG]; | ||
47 | tmp ^= invert; | ||
41 | 48 | ||
42 | /* Handle 1st word. */ | 49 | /* Handle 1st word. */ |
43 | tmp &= BITMAP_FIRST_WORD_MASK(start); | 50 | tmp &= BITMAP_FIRST_WORD_MASK(start); |
@@ -48,7 +55,10 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
48 | if (start >= nbits) | 55 | if (start >= nbits) |
49 | return nbits; | 56 | return nbits; |
50 | 57 | ||
51 | tmp = addr[start / BITS_PER_LONG] ^ invert; | 58 | tmp = addr1[start / BITS_PER_LONG]; |
59 | if (addr2) | ||
60 | tmp &= addr2[start / BITS_PER_LONG]; | ||
61 | tmp ^= invert; | ||
52 | } | 62 | } |
53 | 63 | ||
54 | return min(start + __ffs(tmp), nbits); | 64 | return min(start + __ffs(tmp), nbits); |
@@ -62,7 +72,7 @@ static unsigned long _find_next_bit(const unsigned long *addr, | |||
62 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | 72 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
63 | unsigned long offset) | 73 | unsigned long offset) |
64 | { | 74 | { |
65 | return _find_next_bit(addr, size, offset, 0UL); | 75 | return _find_next_bit(addr, NULL, size, offset, 0UL); |
66 | } | 76 | } |
67 | #endif | 77 | #endif |
68 | 78 | ||
@@ -104,6 +114,15 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) | |||
104 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | 114 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
105 | unsigned long offset) | 115 | unsigned long offset) |
106 | { | 116 | { |
107 | return _find_next_bit(addr, size, offset, ~0UL); | 117 | return _find_next_bit(addr, NULL, size, offset, ~0UL); |
118 | } | ||
119 | #endif | ||
120 | |||
121 | #ifndef find_next_and_bit | ||
122 | unsigned long find_next_and_bit(const unsigned long *addr1, | ||
123 | const unsigned long *addr2, unsigned long size, | ||
124 | unsigned long offset) | ||
125 | { | ||
126 | return _find_next_bit(addr1, addr2, size, offset, 0UL); | ||
108 | } | 127 | } |
109 | #endif | 128 | #endif |
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c index 5ba754d17952..9997a8805a82 100644 --- a/tools/lib/subcmd/pager.c +++ b/tools/lib/subcmd/pager.c | |||
@@ -30,10 +30,13 @@ static void pager_preexec(void) | |||
30 | * have real input | 30 | * have real input |
31 | */ | 31 | */ |
32 | fd_set in; | 32 | fd_set in; |
33 | fd_set exception; | ||
33 | 34 | ||
34 | FD_ZERO(&in); | 35 | FD_ZERO(&in); |
36 | FD_ZERO(&exception); | ||
35 | FD_SET(0, &in); | 37 | FD_SET(0, &in); |
36 | select(1, &in, NULL, &in, NULL); | 38 | FD_SET(0, &exception); |
39 | select(1, &in, NULL, &exception, NULL); | ||
37 | 40 | ||
38 | setenv("LESS", "FRSX", 0); | 41 | setenv("LESS", "FRSX", 0); |
39 | } | 42 | } |