diff options
512 files changed, 6665 insertions, 2406 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 640f65e79ef1..8e69345c37cc 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl | |||
| @@ -244,3 +244,11 @@ Description: read only | |||
| 244 | Returns 1 if the psl timebase register is synchronized | 244 | Returns 1 if the psl timebase register is synchronized |
| 245 | with the core timebase register, 0 otherwise. | 245 | with the core timebase register, 0 otherwise. |
| 246 | Users: https://github.com/ibm-capi/libcxl | 246 | Users: https://github.com/ibm-capi/libcxl |
| 247 | |||
| 248 | What: /sys/class/cxl/<card>/tunneled_ops_supported | ||
| 249 | Date: May 2018 | ||
| 250 | Contact: linuxppc-dev@lists.ozlabs.org | ||
| 251 | Description: read only | ||
| 252 | Returns 1 if tunneled operations are supported in capi mode, | ||
| 253 | 0 otherwise. | ||
| 254 | Users: https://github.com/ibm-capi/libcxl | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 025b7cf3768d..bd4975e132d3 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
| @@ -478,6 +478,7 @@ What: /sys/devices/system/cpu/vulnerabilities | |||
| 478 | /sys/devices/system/cpu/vulnerabilities/meltdown | 478 | /sys/devices/system/cpu/vulnerabilities/meltdown |
| 479 | /sys/devices/system/cpu/vulnerabilities/spectre_v1 | 479 | /sys/devices/system/cpu/vulnerabilities/spectre_v1 |
| 480 | /sys/devices/system/cpu/vulnerabilities/spectre_v2 | 480 | /sys/devices/system/cpu/vulnerabilities/spectre_v2 |
| 481 | /sys/devices/system/cpu/vulnerabilities/spec_store_bypass | ||
| 481 | Date: January 2018 | 482 | Date: January 2018 |
| 482 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | 483 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> |
| 483 | Description: Information about CPU vulnerabilities | 484 | Description: Information about CPU vulnerabilities |
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 11fc28ecdb6d..f2040d46f095 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
| @@ -2680,6 +2680,9 @@ | |||
| 2680 | allow data leaks with this option, which is equivalent | 2680 | allow data leaks with this option, which is equivalent |
| 2681 | to spectre_v2=off. | 2681 | to spectre_v2=off. |
| 2682 | 2682 | ||
| 2683 | nospec_store_bypass_disable | ||
| 2684 | [HW] Disable all mitigations for the Speculative Store Bypass vulnerability | ||
| 2685 | |||
| 2683 | noxsave [BUGS=X86] Disables x86 extended register state save | 2686 | noxsave [BUGS=X86] Disables x86 extended register state save |
| 2684 | and restore using xsave. The kernel will fallback to | 2687 | and restore using xsave. The kernel will fallback to |
| 2685 | enabling legacy floating-point and sse state. | 2688 | enabling legacy floating-point and sse state. |
| @@ -4025,6 +4028,48 @@ | |||
| 4025 | Not specifying this option is equivalent to | 4028 | Not specifying this option is equivalent to |
| 4026 | spectre_v2=auto. | 4029 | spectre_v2=auto. |
| 4027 | 4030 | ||
| 4031 | spec_store_bypass_disable= | ||
| 4032 | [HW] Control Speculative Store Bypass (SSB) Disable mitigation | ||
| 4033 | (Speculative Store Bypass vulnerability) | ||
| 4034 | |||
| 4035 | Certain CPUs are vulnerable to an exploit against a | ||
| 4036 | a common industry wide performance optimization known | ||
| 4037 | as "Speculative Store Bypass" in which recent stores | ||
| 4038 | to the same memory location may not be observed by | ||
| 4039 | later loads during speculative execution. The idea | ||
| 4040 | is that such stores are unlikely and that they can | ||
| 4041 | be detected prior to instruction retirement at the | ||
| 4042 | end of a particular speculation execution window. | ||
| 4043 | |||
| 4044 | In vulnerable processors, the speculatively forwarded | ||
| 4045 | store can be used in a cache side channel attack, for | ||
| 4046 | example to read memory to which the attacker does not | ||
| 4047 | directly have access (e.g. inside sandboxed code). | ||
| 4048 | |||
| 4049 | This parameter controls whether the Speculative Store | ||
| 4050 | Bypass optimization is used. | ||
| 4051 | |||
| 4052 | on - Unconditionally disable Speculative Store Bypass | ||
| 4053 | off - Unconditionally enable Speculative Store Bypass | ||
| 4054 | auto - Kernel detects whether the CPU model contains an | ||
| 4055 | implementation of Speculative Store Bypass and | ||
| 4056 | picks the most appropriate mitigation. If the | ||
| 4057 | CPU is not vulnerable, "off" is selected. If the | ||
| 4058 | CPU is vulnerable the default mitigation is | ||
| 4059 | architecture and Kconfig dependent. See below. | ||
| 4060 | prctl - Control Speculative Store Bypass per thread | ||
| 4061 | via prctl. Speculative Store Bypass is enabled | ||
| 4062 | for a process by default. The state of the control | ||
| 4063 | is inherited on fork. | ||
| 4064 | seccomp - Same as "prctl" above, but all seccomp threads | ||
| 4065 | will disable SSB unless they explicitly opt out. | ||
| 4066 | |||
| 4067 | Not specifying this option is equivalent to | ||
| 4068 | spec_store_bypass_disable=auto. | ||
| 4069 | |||
| 4070 | Default mitigations: | ||
| 4071 | X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl" | ||
| 4072 | |||
| 4028 | spia_io_base= [HW,MTD] | 4073 | spia_io_base= [HW,MTD] |
| 4029 | spia_fio_base= | 4074 | spia_fio_base= |
| 4030 | spia_pedr= | 4075 | spia_pedr= |
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 1814fa13f6ab..fc019df0d863 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt | |||
| @@ -21,9 +21,10 @@ Required properties: | |||
| 21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) | 21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) |
| 22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) | 22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) |
| 23 | - MG clock (only for armada-7k-pp2) | 23 | - MG clock (only for armada-7k-pp2) |
| 24 | - MG Core clock (only for armada-7k-pp2) | ||
| 24 | - AXI clock (only for armada-7k-pp2) | 25 | - AXI clock (only for armada-7k-pp2) |
| 25 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk" | 26 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk", |
| 26 | and "axi_clk" (the 2 latter only for armada-7k-pp2). | 27 | "mg_core_clk" and "axi_clk" (the 3 latter only for armada-7k-pp2). |
| 27 | 28 | ||
| 28 | The ethernet ports are represented by subnodes. At least one port is | 29 | The ethernet ports are represented by subnodes. At least one port is |
| 29 | required. | 30 | required. |
| @@ -80,8 +81,8 @@ cpm_ethernet: ethernet@0 { | |||
| 80 | compatible = "marvell,armada-7k-pp22"; | 81 | compatible = "marvell,armada-7k-pp22"; |
| 81 | reg = <0x0 0x100000>, <0x129000 0xb000>; | 82 | reg = <0x0 0x100000>, <0x129000 0xb000>; |
| 82 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, | 83 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, |
| 83 | <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>; | 84 | <&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>; |
| 84 | clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk"; | 85 | clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk"; |
| 85 | 86 | ||
| 86 | eth0: eth0 { | 87 | eth0: eth0 { |
| 87 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, | 88 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, |
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt index 42a248301615..e22d8cfea687 100644 --- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt +++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt | |||
| @@ -57,6 +57,13 @@ KSZ9031: | |||
| 57 | - txd2-skew-ps : Skew control of TX data 2 pad | 57 | - txd2-skew-ps : Skew control of TX data 2 pad |
| 58 | - txd3-skew-ps : Skew control of TX data 3 pad | 58 | - txd3-skew-ps : Skew control of TX data 3 pad |
| 59 | 59 | ||
| 60 | - micrel,force-master: | ||
| 61 | Boolean, force phy to master mode. Only set this option if the phy | ||
| 62 | reference clock provided at CLK125_NDO pin is used as MAC reference | ||
| 63 | clock because the clock jitter in slave mode is to high (errata#2). | ||
| 64 | Attention: The link partner must be configurable as slave otherwise | ||
| 65 | no link will be established. | ||
| 66 | |||
| 60 | Examples: | 67 | Examples: |
| 61 | 68 | ||
| 62 | mdio { | 69 | mdio { |
diff --git a/Documentation/networking/ppp_generic.txt b/Documentation/networking/ppp_generic.txt index 091d20273dcb..61daf4b39600 100644 --- a/Documentation/networking/ppp_generic.txt +++ b/Documentation/networking/ppp_generic.txt | |||
| @@ -300,12 +300,6 @@ unattached instance are: | |||
| 300 | The ioctl calls available on an instance of /dev/ppp attached to a | 300 | The ioctl calls available on an instance of /dev/ppp attached to a |
| 301 | channel are: | 301 | channel are: |
| 302 | 302 | ||
| 303 | * PPPIOCDETACH detaches the instance from the channel. This ioctl is | ||
| 304 | deprecated since the same effect can be achieved by closing the | ||
| 305 | instance. In order to prevent possible races this ioctl will fail | ||
| 306 | with an EINVAL error if more than one file descriptor refers to this | ||
| 307 | instance (i.e. as a result of dup(), dup2() or fork()). | ||
| 308 | |||
| 309 | * PPPIOCCONNECT connects this channel to a PPP interface. The | 303 | * PPPIOCCONNECT connects this channel to a PPP interface. The |
| 310 | argument should point to an int containing the interface unit | 304 | argument should point to an int containing the interface unit |
| 311 | number. It will return an EINVAL error if the channel is already | 305 | number. It will return an EINVAL error if the channel is already |
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 7b2eb1b7d4ca..a3233da7fa88 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst | |||
| @@ -19,6 +19,7 @@ place where this information is gathered. | |||
| 19 | no_new_privs | 19 | no_new_privs |
| 20 | seccomp_filter | 20 | seccomp_filter |
| 21 | unshare | 21 | unshare |
| 22 | spec_ctrl | ||
| 22 | 23 | ||
| 23 | .. only:: subproject and html | 24 | .. only:: subproject and html |
| 24 | 25 | ||
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst new file mode 100644 index 000000000000..32f3d55c54b7 --- /dev/null +++ b/Documentation/userspace-api/spec_ctrl.rst | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | =================== | ||
| 2 | Speculation Control | ||
| 3 | =================== | ||
| 4 | |||
| 5 | Quite some CPUs have speculation-related misfeatures which are in | ||
| 6 | fact vulnerabilities causing data leaks in various forms even across | ||
| 7 | privilege domains. | ||
| 8 | |||
| 9 | The kernel provides mitigation for such vulnerabilities in various | ||
| 10 | forms. Some of these mitigations are compile-time configurable and some | ||
| 11 | can be supplied on the kernel command line. | ||
| 12 | |||
| 13 | There is also a class of mitigations which are very expensive, but they can | ||
| 14 | be restricted to a certain set of processes or tasks in controlled | ||
| 15 | environments. The mechanism to control these mitigations is via | ||
| 16 | :manpage:`prctl(2)`. | ||
| 17 | |||
| 18 | There are two prctl options which are related to this: | ||
| 19 | |||
| 20 | * PR_GET_SPECULATION_CTRL | ||
| 21 | |||
| 22 | * PR_SET_SPECULATION_CTRL | ||
| 23 | |||
| 24 | PR_GET_SPECULATION_CTRL | ||
| 25 | ----------------------- | ||
| 26 | |||
| 27 | PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature | ||
| 28 | which is selected with arg2 of prctl(2). The return value uses bits 0-3 with | ||
| 29 | the following meaning: | ||
| 30 | |||
| 31 | ==== ===================== =================================================== | ||
| 32 | Bit Define Description | ||
| 33 | ==== ===================== =================================================== | ||
| 34 | 0 PR_SPEC_PRCTL Mitigation can be controlled per task by | ||
| 35 | PR_SET_SPECULATION_CTRL. | ||
| 36 | 1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is | ||
| 37 | disabled. | ||
| 38 | 2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is | ||
| 39 | enabled. | ||
| 40 | 3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A | ||
| 41 | subsequent prctl(..., PR_SPEC_ENABLE) will fail. | ||
| 42 | ==== ===================== =================================================== | ||
| 43 | |||
| 44 | If all bits are 0 the CPU is not affected by the speculation misfeature. | ||
| 45 | |||
| 46 | If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is | ||
| 47 | available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation | ||
| 48 | misfeature will fail. | ||
| 49 | |||
| 50 | PR_SET_SPECULATION_CTRL | ||
| 51 | ----------------------- | ||
| 52 | |||
| 53 | PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which | ||
| 54 | is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand | ||
| 55 | in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or | ||
| 56 | PR_SPEC_FORCE_DISABLE. | ||
| 57 | |||
| 58 | Common error codes | ||
| 59 | ------------------ | ||
| 60 | ======= ================================================================= | ||
| 61 | Value Meaning | ||
| 62 | ======= ================================================================= | ||
| 63 | EINVAL The prctl is not implemented by the architecture or unused | ||
| 64 | prctl(2) arguments are not 0. | ||
| 65 | |||
| 66 | ENODEV arg2 is selecting a not supported speculation misfeature. | ||
| 67 | ======= ================================================================= | ||
| 68 | |||
| 69 | PR_SET_SPECULATION_CTRL error codes | ||
| 70 | ----------------------------------- | ||
| 71 | ======= ================================================================= | ||
| 72 | Value Meaning | ||
| 73 | ======= ================================================================= | ||
| 74 | 0 Success | ||
| 75 | |||
| 76 | ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor | ||
| 77 | PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE. | ||
| 78 | |||
| 79 | ENXIO Control of the selected speculation misfeature is not possible. | ||
| 80 | See PR_GET_SPECULATION_CTRL. | ||
| 81 | |||
| 82 | EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller | ||
| 83 | tried to enable it again. | ||
| 84 | ======= ================================================================= | ||
| 85 | |||
| 86 | Speculation misfeature controls | ||
| 87 | ------------------------------- | ||
| 88 | - PR_SPEC_STORE_BYPASS: Speculative Store Bypass | ||
| 89 | |||
| 90 | Invocations: | ||
| 91 | * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); | ||
| 92 | * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); | ||
| 93 | * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); | ||
| 94 | * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); | ||
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index d4f33eb805dd..ab022dcd0911 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
| @@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side | |||
| 72 | 72 | ||
| 73 | flag || value || meaning | 73 | flag || value || meaning |
| 74 | ================================================================================== | 74 | ================================================================================== |
| 75 | KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to | 75 | KVM_HINTS_REALTIME || 0 || guest checks this feature bit to |
| 76 | || || determine if there is vCPU pinning | 76 | || || determine that vCPUs are never |
| 77 | || || and there is no vCPU over-commitment, | 77 | || || preempted for an unlimited time, |
| 78 | || || allowing optimizations | 78 | || || allowing optimizations |
| 79 | ---------------------------------------------------------------------------------- | 79 | ---------------------------------------------------------------------------------- |
diff --git a/MAINTAINERS b/MAINTAINERS index 58b9861ccf99..ca4afd68530c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c | |||
| 2332 | F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt | 2332 | F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt |
| 2333 | 2333 | ||
| 2334 | ATHEROS ATH GENERIC UTILITIES | 2334 | ATHEROS ATH GENERIC UTILITIES |
| 2335 | M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> | 2335 | M: Kalle Valo <kvalo@codeaurora.org> |
| 2336 | L: linux-wireless@vger.kernel.org | 2336 | L: linux-wireless@vger.kernel.org |
| 2337 | S: Supported | 2337 | S: Supported |
| 2338 | F: drivers/net/wireless/ath/* | 2338 | F: drivers/net/wireless/ath/* |
| @@ -2347,7 +2347,7 @@ S: Maintained | |||
| 2347 | F: drivers/net/wireless/ath/ath5k/ | 2347 | F: drivers/net/wireless/ath/ath5k/ |
| 2348 | 2348 | ||
| 2349 | ATHEROS ATH6KL WIRELESS DRIVER | 2349 | ATHEROS ATH6KL WIRELESS DRIVER |
| 2350 | M: Kalle Valo <kvalo@qca.qualcomm.com> | 2350 | M: Kalle Valo <kvalo@codeaurora.org> |
| 2351 | L: linux-wireless@vger.kernel.org | 2351 | L: linux-wireless@vger.kernel.org |
| 2352 | W: http://wireless.kernel.org/en/users/Drivers/ath6kl | 2352 | W: http://wireless.kernel.org/en/users/Drivers/ath6kl |
| 2353 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git | 2353 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git |
| @@ -4309,7 +4309,7 @@ F: Documentation/driver-api/dma-buf.rst | |||
| 4309 | T: git git://anongit.freedesktop.org/drm/drm-misc | 4309 | T: git git://anongit.freedesktop.org/drm/drm-misc |
| 4310 | 4310 | ||
| 4311 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 4311 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
| 4312 | M: Vinod Koul <vinod.koul@intel.com> | 4312 | M: Vinod Koul <vkoul@kernel.org> |
| 4313 | L: dmaengine@vger.kernel.org | 4313 | L: dmaengine@vger.kernel.org |
| 4314 | Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ | 4314 | Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ |
| 4315 | S: Maintained | 4315 | S: Maintained |
| @@ -5388,7 +5388,6 @@ S: Maintained | |||
| 5388 | F: drivers/iommu/exynos-iommu.c | 5388 | F: drivers/iommu/exynos-iommu.c |
| 5389 | 5389 | ||
| 5390 | EZchip NPS platform support | 5390 | EZchip NPS platform support |
| 5391 | M: Elad Kanfi <eladkan@mellanox.com> | ||
| 5392 | M: Vineet Gupta <vgupta@synopsys.com> | 5391 | M: Vineet Gupta <vgupta@synopsys.com> |
| 5393 | S: Supported | 5392 | S: Supported |
| 5394 | F: arch/arc/plat-eznps | 5393 | F: arch/arc/plat-eznps |
| @@ -6504,9 +6503,15 @@ F: Documentation/networking/hinic.txt | |||
| 6504 | F: drivers/net/ethernet/huawei/hinic/ | 6503 | F: drivers/net/ethernet/huawei/hinic/ |
| 6505 | 6504 | ||
| 6506 | HUGETLB FILESYSTEM | 6505 | HUGETLB FILESYSTEM |
| 6507 | M: Nadia Yvette Chambers <nyc@holomorphy.com> | 6506 | M: Mike Kravetz <mike.kravetz@oracle.com> |
| 6507 | L: linux-mm@kvack.org | ||
| 6508 | S: Maintained | 6508 | S: Maintained |
| 6509 | F: fs/hugetlbfs/ | 6509 | F: fs/hugetlbfs/ |
| 6510 | F: mm/hugetlb.c | ||
| 6511 | F: include/linux/hugetlb.h | ||
| 6512 | F: Documentation/admin-guide/mm/hugetlbpage.rst | ||
| 6513 | F: Documentation/vm/hugetlbfs_reserv.rst | ||
| 6514 | F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages | ||
| 6510 | 6515 | ||
| 6511 | HVA ST MEDIA DRIVER | 6516 | HVA ST MEDIA DRIVER |
| 6512 | M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> | 6517 | M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> |
| @@ -7698,6 +7703,7 @@ KERNEL SELFTEST FRAMEWORK | |||
| 7698 | M: Shuah Khan <shuah@kernel.org> | 7703 | M: Shuah Khan <shuah@kernel.org> |
| 7699 | L: linux-kselftest@vger.kernel.org | 7704 | L: linux-kselftest@vger.kernel.org |
| 7700 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git | 7705 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git |
| 7706 | Q: https://patchwork.kernel.org/project/linux-kselftest/list/ | ||
| 7701 | S: Maintained | 7707 | S: Maintained |
| 7702 | F: tools/testing/selftests/ | 7708 | F: tools/testing/selftests/ |
| 7703 | F: Documentation/dev-tools/kselftest* | 7709 | F: Documentation/dev-tools/kselftest* |
| @@ -9020,7 +9026,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ | |||
| 9020 | F: drivers/net/ethernet/mellanox/mlx5/core/en_* | 9026 | F: drivers/net/ethernet/mellanox/mlx5/core/en_* |
| 9021 | 9027 | ||
| 9022 | MELLANOX ETHERNET INNOVA DRIVER | 9028 | MELLANOX ETHERNET INNOVA DRIVER |
| 9023 | M: Ilan Tayari <ilant@mellanox.com> | ||
| 9024 | R: Boris Pismenny <borisp@mellanox.com> | 9029 | R: Boris Pismenny <borisp@mellanox.com> |
| 9025 | L: netdev@vger.kernel.org | 9030 | L: netdev@vger.kernel.org |
| 9026 | S: Supported | 9031 | S: Supported |
| @@ -9030,7 +9035,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* | |||
| 9030 | F: include/linux/mlx5/mlx5_ifc_fpga.h | 9035 | F: include/linux/mlx5/mlx5_ifc_fpga.h |
| 9031 | 9036 | ||
| 9032 | MELLANOX ETHERNET INNOVA IPSEC DRIVER | 9037 | MELLANOX ETHERNET INNOVA IPSEC DRIVER |
| 9033 | M: Ilan Tayari <ilant@mellanox.com> | ||
| 9034 | R: Boris Pismenny <borisp@mellanox.com> | 9038 | R: Boris Pismenny <borisp@mellanox.com> |
| 9035 | L: netdev@vger.kernel.org | 9039 | L: netdev@vger.kernel.org |
| 9036 | S: Supported | 9040 | S: Supported |
| @@ -9086,7 +9090,6 @@ F: include/uapi/rdma/mlx4-abi.h | |||
| 9086 | 9090 | ||
| 9087 | MELLANOX MLX5 core VPI driver | 9091 | MELLANOX MLX5 core VPI driver |
| 9088 | M: Saeed Mahameed <saeedm@mellanox.com> | 9092 | M: Saeed Mahameed <saeedm@mellanox.com> |
| 9089 | M: Matan Barak <matanb@mellanox.com> | ||
| 9090 | M: Leon Romanovsky <leonro@mellanox.com> | 9093 | M: Leon Romanovsky <leonro@mellanox.com> |
| 9091 | L: netdev@vger.kernel.org | 9094 | L: netdev@vger.kernel.org |
| 9092 | L: linux-rdma@vger.kernel.org | 9095 | L: linux-rdma@vger.kernel.org |
| @@ -9097,7 +9100,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/ | |||
| 9097 | F: include/linux/mlx5/ | 9100 | F: include/linux/mlx5/ |
| 9098 | 9101 | ||
| 9099 | MELLANOX MLX5 IB driver | 9102 | MELLANOX MLX5 IB driver |
| 9100 | M: Matan Barak <matanb@mellanox.com> | ||
| 9101 | M: Leon Romanovsky <leonro@mellanox.com> | 9103 | M: Leon Romanovsky <leonro@mellanox.com> |
| 9102 | L: linux-rdma@vger.kernel.org | 9104 | L: linux-rdma@vger.kernel.org |
| 9103 | W: http://www.mellanox.com | 9105 | W: http://www.mellanox.com |
| @@ -9831,7 +9833,6 @@ F: net/netfilter/xt_CONNSECMARK.c | |||
| 9831 | F: net/netfilter/xt_SECMARK.c | 9833 | F: net/netfilter/xt_SECMARK.c |
| 9832 | 9834 | ||
| 9833 | NETWORKING [TLS] | 9835 | NETWORKING [TLS] |
| 9834 | M: Ilya Lesokhin <ilyal@mellanox.com> | ||
| 9835 | M: Aviad Yehezkel <aviadye@mellanox.com> | 9836 | M: Aviad Yehezkel <aviadye@mellanox.com> |
| 9836 | M: Dave Watson <davejwatson@fb.com> | 9837 | M: Dave Watson <davejwatson@fb.com> |
| 9837 | L: netdev@vger.kernel.org | 9838 | L: netdev@vger.kernel.org |
| @@ -11631,7 +11632,7 @@ S: Maintained | |||
| 11631 | F: drivers/media/tuners/qt1010* | 11632 | F: drivers/media/tuners/qt1010* |
| 11632 | 11633 | ||
| 11633 | QUALCOMM ATHEROS ATH10K WIRELESS DRIVER | 11634 | QUALCOMM ATHEROS ATH10K WIRELESS DRIVER |
| 11634 | M: Kalle Valo <kvalo@qca.qualcomm.com> | 11635 | M: Kalle Valo <kvalo@codeaurora.org> |
| 11635 | L: ath10k@lists.infradead.org | 11636 | L: ath10k@lists.infradead.org |
| 11636 | W: http://wireless.kernel.org/en/users/Drivers/ath10k | 11637 | W: http://wireless.kernel.org/en/users/Drivers/ath10k |
| 11637 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git | 11638 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git |
| @@ -11682,7 +11683,7 @@ S: Maintained | |||
| 11682 | F: drivers/media/platform/qcom/venus/ | 11683 | F: drivers/media/platform/qcom/venus/ |
| 11683 | 11684 | ||
| 11684 | QUALCOMM WCN36XX WIRELESS DRIVER | 11685 | QUALCOMM WCN36XX WIRELESS DRIVER |
| 11685 | M: Eugene Krasnikov <k.eugene.e@gmail.com> | 11686 | M: Kalle Valo <kvalo@codeaurora.org> |
| 11686 | L: wcn36xx@lists.infradead.org | 11687 | L: wcn36xx@lists.infradead.org |
| 11687 | W: http://wireless.kernel.org/en/users/Drivers/wcn36xx | 11688 | W: http://wireless.kernel.org/en/users/Drivers/wcn36xx |
| 11688 | T: git git://github.com/KrasnikovEugene/wcn36xx.git | 11689 | T: git git://github.com/KrasnikovEugene/wcn36xx.git |
| @@ -12220,7 +12221,7 @@ F: Documentation/s390/vfio-ccw.txt | |||
| 12220 | F: include/uapi/linux/vfio_ccw.h | 12221 | F: include/uapi/linux/vfio_ccw.h |
| 12221 | 12222 | ||
| 12222 | S390 ZCRYPT DRIVER | 12223 | S390 ZCRYPT DRIVER |
| 12223 | M: Harald Freudenberger <freude@de.ibm.com> | 12224 | M: Harald Freudenberger <freude@linux.ibm.com> |
| 12224 | L: linux-s390@vger.kernel.org | 12225 | L: linux-s390@vger.kernel.org |
| 12225 | W: http://www.ibm.com/developerworks/linux/linux390/ | 12226 | W: http://www.ibm.com/developerworks/linux/linux390/ |
| 12226 | S: Supported | 12227 | S: Supported |
| @@ -13264,6 +13265,12 @@ M: Jan-Benedict Glaw <jbglaw@lug-owl.de> | |||
| 13264 | S: Maintained | 13265 | S: Maintained |
| 13265 | F: arch/alpha/kernel/srm_env.c | 13266 | F: arch/alpha/kernel/srm_env.c |
| 13266 | 13267 | ||
| 13268 | ST STM32 I2C/SMBUS DRIVER | ||
| 13269 | M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com> | ||
| 13270 | L: linux-i2c@vger.kernel.org | ||
| 13271 | S: Maintained | ||
| 13272 | F: drivers/i2c/busses/i2c-stm32* | ||
| 13273 | |||
| 13267 | STABLE BRANCH | 13274 | STABLE BRANCH |
| 13268 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 13275 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| 13269 | L: stable@vger.kernel.org | 13276 | L: stable@vger.kernel.org |
| @@ -2,7 +2,7 @@ | |||
| 2 | VERSION = 4 | 2 | VERSION = 4 |
| 3 | PATCHLEVEL = 17 | 3 | PATCHLEVEL = 17 |
| 4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
| 5 | EXTRAVERSION = -rc5 | 5 | EXTRAVERSION = -rc7 |
| 6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
| 7 | 7 | ||
| 8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
| @@ -500,6 +500,9 @@ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk | |||
| 500 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) | 500 | RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) |
| 501 | export RETPOLINE_CFLAGS | 501 | export RETPOLINE_CFLAGS |
| 502 | 502 | ||
| 503 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | ||
| 504 | KBUILD_AFLAGS += $(call cc-option,-fno-PIE) | ||
| 505 | |||
| 503 | # check for 'asm goto' | 506 | # check for 'asm goto' |
| 504 | ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) | 507 | ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) |
| 505 | CC_HAVE_ASM_GOTO := 1 | 508 | CC_HAVE_ASM_GOTO := 1 |
| @@ -621,9 +624,9 @@ endif # $(dot-config) | |||
| 621 | # Defaults to vmlinux, but the arch makefile usually adds further targets | 624 | # Defaults to vmlinux, but the arch makefile usually adds further targets |
| 622 | all: vmlinux | 625 | all: vmlinux |
| 623 | 626 | ||
| 624 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | 627 | CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ |
| 625 | KBUILD_AFLAGS += $(call cc-option,-fno-PIE) | 628 | $(call cc-option,-fno-tree-loop-im) \ |
| 626 | CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) | 629 | $(call cc-disable-warning,maybe-uninitialized,) |
| 627 | export CFLAGS_GCOV CFLAGS_KCOV | 630 | export CFLAGS_GCOV CFLAGS_KCOV |
| 628 | 631 | ||
| 629 | # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default | 632 | # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index b2022885ced8..f19dc31288c8 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
| @@ -211,6 +211,7 @@ config ALPHA_EIGER | |||
| 211 | config ALPHA_JENSEN | 211 | config ALPHA_JENSEN |
| 212 | bool "Jensen" | 212 | bool "Jensen" |
| 213 | depends on BROKEN | 213 | depends on BROKEN |
| 214 | select DMA_DIRECT_OPS | ||
| 214 | help | 215 | help |
| 215 | DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one | 216 | DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one |
| 216 | of the first-generation Alpha systems. A number of these systems | 217 | of the first-generation Alpha systems. A number of these systems |
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index b78f61f20796..8beeafd4f68e 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h | |||
| @@ -2,11 +2,15 @@ | |||
| 2 | #ifndef _ALPHA_DMA_MAPPING_H | 2 | #ifndef _ALPHA_DMA_MAPPING_H |
| 3 | #define _ALPHA_DMA_MAPPING_H | 3 | #define _ALPHA_DMA_MAPPING_H |
| 4 | 4 | ||
| 5 | extern const struct dma_map_ops *dma_ops; | 5 | extern const struct dma_map_ops alpha_pci_ops; |
| 6 | 6 | ||
| 7 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) | 7 | static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) |
| 8 | { | 8 | { |
| 9 | return dma_ops; | 9 | #ifdef CONFIG_ALPHA_JENSEN |
| 10 | return &dma_direct_ops; | ||
| 11 | #else | ||
| 12 | return &alpha_pci_ops; | ||
| 13 | #endif | ||
| 10 | } | 14 | } |
| 11 | 15 | ||
| 12 | #endif /* _ALPHA_DMA_MAPPING_H */ | 16 | #endif /* _ALPHA_DMA_MAPPING_H */ |
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c index 3e3d49c254c5..c025a3e5e357 100644 --- a/arch/alpha/kernel/io.c +++ b/arch/alpha/kernel/io.c | |||
| @@ -37,20 +37,20 @@ unsigned int ioread32(void __iomem *addr) | |||
| 37 | 37 | ||
| 38 | void iowrite8(u8 b, void __iomem *addr) | 38 | void iowrite8(u8 b, void __iomem *addr) |
| 39 | { | 39 | { |
| 40 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
| 41 | mb(); | 40 | mb(); |
| 41 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | void iowrite16(u16 b, void __iomem *addr) | 44 | void iowrite16(u16 b, void __iomem *addr) |
| 45 | { | 45 | { |
| 46 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
| 47 | mb(); | 46 | mb(); |
| 47 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | void iowrite32(u32 b, void __iomem *addr) | 50 | void iowrite32(u32 b, void __iomem *addr) |
| 51 | { | 51 | { |
| 52 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | ||
| 53 | mb(); | 52 | mb(); |
| 53 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | ||
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | EXPORT_SYMBOL(ioread8); | 56 | EXPORT_SYMBOL(ioread8); |
| @@ -176,26 +176,26 @@ u64 readq(const volatile void __iomem *addr) | |||
| 176 | 176 | ||
| 177 | void writeb(u8 b, volatile void __iomem *addr) | 177 | void writeb(u8 b, volatile void __iomem *addr) |
| 178 | { | 178 | { |
| 179 | __raw_writeb(b, addr); | ||
| 180 | mb(); | 179 | mb(); |
| 180 | __raw_writeb(b, addr); | ||
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | void writew(u16 b, volatile void __iomem *addr) | 183 | void writew(u16 b, volatile void __iomem *addr) |
| 184 | { | 184 | { |
| 185 | __raw_writew(b, addr); | ||
| 186 | mb(); | 185 | mb(); |
| 186 | __raw_writew(b, addr); | ||
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | void writel(u32 b, volatile void __iomem *addr) | 189 | void writel(u32 b, volatile void __iomem *addr) |
| 190 | { | 190 | { |
| 191 | __raw_writel(b, addr); | ||
| 192 | mb(); | 191 | mb(); |
| 192 | __raw_writel(b, addr); | ||
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | void writeq(u64 b, volatile void __iomem *addr) | 195 | void writeq(u64 b, volatile void __iomem *addr) |
| 196 | { | 196 | { |
| 197 | __raw_writeq(b, addr); | ||
| 198 | mb(); | 197 | mb(); |
| 198 | __raw_writeq(b, addr); | ||
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | EXPORT_SYMBOL(readb); | 201 | EXPORT_SYMBOL(readb); |
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index b6ebb65127a8..c7c5879869d3 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c | |||
| @@ -102,36 +102,3 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, | |||
| 102 | else | 102 | else |
| 103 | return -ENODEV; | 103 | return -ENODEV; |
| 104 | } | 104 | } |
| 105 | |||
| 106 | static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, | ||
| 107 | dma_addr_t *dma_handle, gfp_t gfp, | ||
| 108 | unsigned long attrs) | ||
| 109 | { | ||
| 110 | void *ret; | ||
| 111 | |||
| 112 | if (!dev || *dev->dma_mask >= 0xffffffffUL) | ||
| 113 | gfp &= ~GFP_DMA; | ||
| 114 | ret = (void *)__get_free_pages(gfp, get_order(size)); | ||
| 115 | if (ret) { | ||
| 116 | memset(ret, 0, size); | ||
| 117 | *dma_handle = virt_to_phys(ret); | ||
| 118 | } | ||
| 119 | return ret; | ||
| 120 | } | ||
| 121 | |||
| 122 | static int alpha_noop_supported(struct device *dev, u64 mask) | ||
| 123 | { | ||
| 124 | return mask < 0x00ffffffUL ? 0 : 1; | ||
| 125 | } | ||
| 126 | |||
| 127 | const struct dma_map_ops alpha_noop_ops = { | ||
| 128 | .alloc = alpha_noop_alloc_coherent, | ||
| 129 | .free = dma_noop_free_coherent, | ||
| 130 | .map_page = dma_noop_map_page, | ||
| 131 | .map_sg = dma_noop_map_sg, | ||
| 132 | .mapping_error = dma_noop_mapping_error, | ||
| 133 | .dma_supported = alpha_noop_supported, | ||
| 134 | }; | ||
| 135 | |||
| 136 | const struct dma_map_ops *dma_ops = &alpha_noop_ops; | ||
| 137 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 83b34b9188ea..6923b0d9c1e1 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c | |||
| @@ -950,6 +950,4 @@ const struct dma_map_ops alpha_pci_ops = { | |||
| 950 | .mapping_error = alpha_pci_mapping_error, | 950 | .mapping_error = alpha_pci_mapping_error, |
| 951 | .dma_supported = alpha_pci_supported, | 951 | .dma_supported = alpha_pci_supported, |
| 952 | }; | 952 | }; |
| 953 | 953 | EXPORT_SYMBOL(alpha_pci_ops); | |
| 954 | const struct dma_map_ops *dma_ops = &alpha_pci_ops; | ||
| 955 | EXPORT_SYMBOL(dma_ops); | ||
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 45a6b9b7af2a..6a4e7341ecd3 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
| @@ -117,11 +117,9 @@ ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) | |||
| 117 | asflags-y := -DZIMAGE | 117 | asflags-y := -DZIMAGE |
| 118 | 118 | ||
| 119 | # Supply kernel BSS size to the decompressor via a linker symbol. | 119 | # Supply kernel BSS size to the decompressor via a linker symbol. |
| 120 | KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ | 120 | KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ |
| 121 | perl -e 'while (<>) { \ | 121 | sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ |
| 122 | $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \ | 122 | -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) |
| 123 | $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \ | ||
| 124 | }; printf "%d\n", $$bss_end - $$bss_start;') | ||
| 125 | LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) | 123 | LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) |
| 126 | # Supply ZRELADDR to the decompressor via a linker symbol. | 124 | # Supply ZRELADDR to the decompressor via a linker symbol. |
| 127 | ifneq ($(CONFIG_AUTO_ZRELADDR),y) | 125 | ifneq ($(CONFIG_AUTO_ZRELADDR),y) |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 45c8823c3750..517e0e18f0b8 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
| @@ -29,19 +29,19 @@ | |||
| 29 | #if defined(CONFIG_DEBUG_ICEDCC) | 29 | #if defined(CONFIG_DEBUG_ICEDCC) |
| 30 | 30 | ||
| 31 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) | 31 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) |
| 32 | .macro loadsp, rb, tmp | 32 | .macro loadsp, rb, tmp1, tmp2 |
| 33 | .endm | 33 | .endm |
| 34 | .macro writeb, ch, rb | 34 | .macro writeb, ch, rb |
| 35 | mcr p14, 0, \ch, c0, c5, 0 | 35 | mcr p14, 0, \ch, c0, c5, 0 |
| 36 | .endm | 36 | .endm |
| 37 | #elif defined(CONFIG_CPU_XSCALE) | 37 | #elif defined(CONFIG_CPU_XSCALE) |
| 38 | .macro loadsp, rb, tmp | 38 | .macro loadsp, rb, tmp1, tmp2 |
| 39 | .endm | 39 | .endm |
| 40 | .macro writeb, ch, rb | 40 | .macro writeb, ch, rb |
| 41 | mcr p14, 0, \ch, c8, c0, 0 | 41 | mcr p14, 0, \ch, c8, c0, 0 |
| 42 | .endm | 42 | .endm |
| 43 | #else | 43 | #else |
| 44 | .macro loadsp, rb, tmp | 44 | .macro loadsp, rb, tmp1, tmp2 |
| 45 | .endm | 45 | .endm |
| 46 | .macro writeb, ch, rb | 46 | .macro writeb, ch, rb |
| 47 | mcr p14, 0, \ch, c1, c0, 0 | 47 | mcr p14, 0, \ch, c1, c0, 0 |
| @@ -57,7 +57,7 @@ | |||
| 57 | .endm | 57 | .endm |
| 58 | 58 | ||
| 59 | #if defined(CONFIG_ARCH_SA1100) | 59 | #if defined(CONFIG_ARCH_SA1100) |
| 60 | .macro loadsp, rb, tmp | 60 | .macro loadsp, rb, tmp1, tmp2 |
| 61 | mov \rb, #0x80000000 @ physical base address | 61 | mov \rb, #0x80000000 @ physical base address |
| 62 | #ifdef CONFIG_DEBUG_LL_SER3 | 62 | #ifdef CONFIG_DEBUG_LL_SER3 |
| 63 | add \rb, \rb, #0x00050000 @ Ser3 | 63 | add \rb, \rb, #0x00050000 @ Ser3 |
| @@ -66,8 +66,8 @@ | |||
| 66 | #endif | 66 | #endif |
| 67 | .endm | 67 | .endm |
| 68 | #else | 68 | #else |
| 69 | .macro loadsp, rb, tmp | 69 | .macro loadsp, rb, tmp1, tmp2 |
| 70 | addruart \rb, \tmp | 70 | addruart \rb, \tmp1, \tmp2 |
| 71 | .endm | 71 | .endm |
| 72 | #endif | 72 | #endif |
| 73 | #endif | 73 | #endif |
| @@ -561,8 +561,6 @@ not_relocated: mov r0, #0 | |||
| 561 | bl decompress_kernel | 561 | bl decompress_kernel |
| 562 | bl cache_clean_flush | 562 | bl cache_clean_flush |
| 563 | bl cache_off | 563 | bl cache_off |
| 564 | mov r1, r7 @ restore architecture number | ||
| 565 | mov r2, r8 @ restore atags pointer | ||
| 566 | 564 | ||
| 567 | #ifdef CONFIG_ARM_VIRT_EXT | 565 | #ifdef CONFIG_ARM_VIRT_EXT |
| 568 | mrs r0, spsr @ Get saved CPU boot mode | 566 | mrs r0, spsr @ Get saved CPU boot mode |
| @@ -1297,7 +1295,7 @@ phex: adr r3, phexbuf | |||
| 1297 | b 1b | 1295 | b 1b |
| 1298 | 1296 | ||
| 1299 | @ puts corrupts {r0, r1, r2, r3} | 1297 | @ puts corrupts {r0, r1, r2, r3} |
| 1300 | puts: loadsp r3, r1 | 1298 | puts: loadsp r3, r2, r1 |
| 1301 | 1: ldrb r2, [r0], #1 | 1299 | 1: ldrb r2, [r0], #1 |
| 1302 | teq r2, #0 | 1300 | teq r2, #0 |
| 1303 | moveq pc, lr | 1301 | moveq pc, lr |
| @@ -1314,8 +1312,8 @@ puts: loadsp r3, r1 | |||
| 1314 | @ putc corrupts {r0, r1, r2, r3} | 1312 | @ putc corrupts {r0, r1, r2, r3} |
| 1315 | putc: | 1313 | putc: |
| 1316 | mov r2, r0 | 1314 | mov r2, r0 |
| 1315 | loadsp r3, r1, r0 | ||
| 1317 | mov r0, #0 | 1316 | mov r0, #0 |
| 1318 | loadsp r3, r1 | ||
| 1319 | b 2b | 1317 | b 2b |
| 1320 | 1318 | ||
| 1321 | @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} | 1319 | @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} |
| @@ -1365,6 +1363,8 @@ __hyp_reentry_vectors: | |||
| 1365 | 1363 | ||
| 1366 | __enter_kernel: | 1364 | __enter_kernel: |
| 1367 | mov r0, #0 @ must be 0 | 1365 | mov r0, #0 @ must be 0 |
| 1366 | mov r1, r7 @ restore architecture number | ||
| 1367 | mov r2, r8 @ restore atags pointer | ||
| 1368 | ARM( mov pc, r4 ) @ call kernel | 1368 | ARM( mov pc, r4 ) @ call kernel |
| 1369 | M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class | 1369 | M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class |
| 1370 | THUMB( bx r4 ) @ entry point is always ARM for A/R classes | 1370 | THUMB( bx r4 ) @ entry point is always ARM for A/R classes |
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 699fdf94d139..9fe4f5a6379e 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi | |||
| @@ -69,7 +69,7 @@ | |||
| 69 | timer@20200 { | 69 | timer@20200 { |
| 70 | compatible = "arm,cortex-a9-global-timer"; | 70 | compatible = "arm,cortex-a9-global-timer"; |
| 71 | reg = <0x20200 0x100>; | 71 | reg = <0x20200 0x100>; |
| 72 | interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; | 72 | interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; |
| 73 | clocks = <&periph_clk>; | 73 | clocks = <&periph_clk>; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts index a1f4d6d5a569..0edf769ea95c 100644 --- a/arch/arm/boot/dts/da850-lcdk.dts +++ b/arch/arm/boot/dts/da850-lcdk.dts | |||
| @@ -21,8 +21,8 @@ | |||
| 21 | stdout-path = "serial2:115200n8"; | 21 | stdout-path = "serial2:115200n8"; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | memory { | 24 | memory@c0000000 { |
| 25 | device_type = "memory"; | 25 | /* 128 MB DDR2 SDRAM @ 0xc0000000 */ |
| 26 | reg = <0xc0000000 0x08000000>; | 26 | reg = <0xc0000000 0x08000000>; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index c66cf7895363..12010002dbdb 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi | |||
| @@ -7,10 +7,19 @@ | |||
| 7 | * Free Software Foundation; either version 2 of the License, or (at your | 7 | * Free Software Foundation; either version 2 of the License, or (at your |
| 8 | * option) any later version. | 8 | * option) any later version. |
| 9 | */ | 9 | */ |
| 10 | #include "skeleton.dtsi" | ||
| 11 | #include <dt-bindings/interrupt-controller/irq.h> | 10 | #include <dt-bindings/interrupt-controller/irq.h> |
| 12 | 11 | ||
| 13 | / { | 12 | / { |
| 13 | #address-cells = <1>; | ||
| 14 | #size-cells = <1>; | ||
| 15 | chosen { }; | ||
| 16 | aliases { }; | ||
| 17 | |||
| 18 | memory@c0000000 { | ||
| 19 | device_type = "memory"; | ||
| 20 | reg = <0xc0000000 0x0>; | ||
| 21 | }; | ||
| 22 | |||
| 14 | arm { | 23 | arm { |
| 15 | #address-cells = <1>; | 24 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 25 | #size-cells = <1>; |
| @@ -46,8 +55,6 @@ | |||
| 46 | pmx_core: pinmux@14120 { | 55 | pmx_core: pinmux@14120 { |
| 47 | compatible = "pinctrl-single"; | 56 | compatible = "pinctrl-single"; |
| 48 | reg = <0x14120 0x50>; | 57 | reg = <0x14120 0x50>; |
| 49 | #address-cells = <1>; | ||
| 50 | #size-cells = <0>; | ||
| 51 | #pinctrl-cells = <2>; | 58 | #pinctrl-cells = <2>; |
| 52 | pinctrl-single,bit-per-mux; | 59 | pinctrl-single,bit-per-mux; |
| 53 | pinctrl-single,register-width = <32>; | 60 | pinctrl-single,register-width = <32>; |
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index d6657b3bae84..85d7b5148b0a 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | / { | 11 | / { |
| 12 | model = "DM8148 EVM"; | 12 | model = "DM8148 EVM"; |
| 13 | compatible = "ti,dm8148-evm", "ti,dm8148"; | 13 | compatible = "ti,dm8148-evm", "ti,dm8148", "ti,dm814"; |
| 14 | 14 | ||
| 15 | memory@80000000 { | 15 | memory@80000000 { |
| 16 | device_type = "memory"; | 16 | device_type = "memory"; |
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 63883b3479f9..6418f9cdbe83 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | / { | 10 | / { |
| 11 | model = "HP t410 Smart Zero Client"; | 11 | model = "HP t410 Smart Zero Client"; |
| 12 | compatible = "hp,t410", "ti,dm8148"; | 12 | compatible = "hp,t410", "ti,dm8148", "ti,dm814"; |
| 13 | 13 | ||
| 14 | memory@80000000 { | 14 | memory@80000000 { |
| 15 | device_type = "memory"; | 15 | device_type = "memory"; |
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index c72a2132aa82..1d030d567307 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | / { | 11 | / { |
| 12 | model = "DM8168 EVM"; | 12 | model = "DM8168 EVM"; |
| 13 | compatible = "ti,dm8168-evm", "ti,dm8168"; | 13 | compatible = "ti,dm8168-evm", "ti,dm8168", "ti,dm816"; |
| 14 | 14 | ||
| 15 | memory@80000000 { | 15 | memory@80000000 { |
| 16 | device_type = "memory"; | 16 | device_type = "memory"; |
diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts index fee0547f7302..31b824ad5d29 100644 --- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts +++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | / { | 11 | / { |
| 12 | model = "DRA62x J5 Eco EVM"; | 12 | model = "DRA62x J5 Eco EVM"; |
| 13 | compatible = "ti,dra62x-j5eco-evm", "ti,dra62x", "ti,dm8148"; | 13 | compatible = "ti,dra62x-j5eco-evm", "ti,dra62x", "ti,dm8148", "ti,dm814"; |
| 14 | 14 | ||
| 15 | memory@80000000 { | 15 | memory@80000000 { |
| 16 | device_type = "memory"; | 16 | device_type = "memory"; |
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index 0c99ac04ad08..6464f2560e06 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts | |||
| @@ -523,7 +523,7 @@ | |||
| 523 | }; | 523 | }; |
| 524 | 524 | ||
| 525 | touchscreen@20 { | 525 | touchscreen@20 { |
| 526 | compatible = "syna,rmi4_i2c"; | 526 | compatible = "syna,rmi4-i2c"; |
| 527 | reg = <0x20>; | 527 | reg = <0x20>; |
| 528 | pinctrl-names = "default"; | 528 | pinctrl-names = "default"; |
| 529 | pinctrl-0 = <&pinctrl_ts>; | 529 | pinctrl-0 = <&pinctrl_ts>; |
| @@ -541,8 +541,8 @@ | |||
| 541 | 541 | ||
| 542 | rmi4-f11@11 { | 542 | rmi4-f11@11 { |
| 543 | reg = <0x11>; | 543 | reg = <0x11>; |
| 544 | touch-inverted-y; | 544 | touchscreen-inverted-y; |
| 545 | touch-swapped-x-y; | 545 | touchscreen-swapped-x-y; |
| 546 | syna,sensor-type = <1>; | 546 | syna,sensor-type = <1>; |
| 547 | }; | 547 | }; |
| 548 | }; | 548 | }; |
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 4d42335c0dee..ce85b3ca1a55 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi | |||
| @@ -868,6 +868,7 @@ | |||
| 868 | 868 | ||
| 869 | crypto: caam@30900000 { | 869 | crypto: caam@30900000 { |
| 870 | compatible = "fsl,sec-v4.0"; | 870 | compatible = "fsl,sec-v4.0"; |
| 871 | fsl,sec-era = <8>; | ||
| 871 | #address-cells = <1>; | 872 | #address-cells = <1>; |
| 872 | #size-cells = <1>; | 873 | #size-cells = <1>; |
| 873 | reg = <0x30900000 0x40000>; | 874 | reg = <0x30900000 0x40000>; |
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index b47cac23a04b..6fa7bba3e801 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | gpio = <&gpio1 3 0>; /* gpio_3 */ | 26 | gpio = <&gpio1 3 0>; /* gpio_3 */ |
| 27 | startup-delay-us = <70000>; | 27 | startup-delay-us = <70000>; |
| 28 | enable-active-high; | 28 | enable-active-high; |
| 29 | vin-supply = <&vmmc2>; | 29 | vin-supply = <&vaux3>; |
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | /* HS USB Host PHY on PORT 1 */ | 32 | /* HS USB Host PHY on PORT 1 */ |
| @@ -82,6 +82,7 @@ | |||
| 82 | twl_audio: audio { | 82 | twl_audio: audio { |
| 83 | compatible = "ti,twl4030-audio"; | 83 | compatible = "ti,twl4030-audio"; |
| 84 | codec { | 84 | codec { |
| 85 | ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>; | ||
| 85 | }; | 86 | }; |
| 86 | }; | 87 | }; |
| 87 | }; | 88 | }; |
| @@ -199,6 +200,7 @@ | |||
| 199 | pinctrl-single,pins = < | 200 | pinctrl-single,pins = < |
| 200 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ | 201 | OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ |
| 201 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ | 202 | OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ |
| 203 | OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */ | ||
| 202 | >; | 204 | >; |
| 203 | }; | 205 | }; |
| 204 | }; | 206 | }; |
| @@ -213,7 +215,7 @@ | |||
| 213 | }; | 215 | }; |
| 214 | wl127x_gpio: pinmux_wl127x_gpio_pin { | 216 | wl127x_gpio: pinmux_wl127x_gpio_pin { |
| 215 | pinctrl-single,pins = < | 217 | pinctrl-single,pins = < |
| 216 | OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ | 218 | OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ |
| 217 | OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ | 219 | OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ |
| 218 | >; | 220 | >; |
| 219 | }; | 221 | }; |
| @@ -260,6 +262,11 @@ | |||
| 260 | #include "twl4030.dtsi" | 262 | #include "twl4030.dtsi" |
| 261 | #include "twl4030_omap3.dtsi" | 263 | #include "twl4030_omap3.dtsi" |
| 262 | 264 | ||
| 265 | &vaux3 { | ||
| 266 | regulator-min-microvolt = <2800000>; | ||
| 267 | regulator-max-microvolt = <2800000>; | ||
| 268 | }; | ||
| 269 | |||
| 263 | &twl { | 270 | &twl { |
| 264 | twl_power: power { | 271 | twl_power: power { |
| 265 | compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle"; | 272 | compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle"; |
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts index 063fdb65dc60..f07f9018c3e7 100644 --- a/arch/arm/boot/dts/r8a7790-lager.dts +++ b/arch/arm/boot/dts/r8a7790-lager.dts | |||
| @@ -379,7 +379,7 @@ | |||
| 379 | port@0 { | 379 | port@0 { |
| 380 | reg = <0>; | 380 | reg = <0>; |
| 381 | adv7511_in: endpoint { | 381 | adv7511_in: endpoint { |
| 382 | remote-endpoint = <&du_out_lvds0>; | 382 | remote-endpoint = <&lvds0_out>; |
| 383 | }; | 383 | }; |
| 384 | }; | 384 | }; |
| 385 | 385 | ||
| @@ -467,10 +467,8 @@ | |||
| 467 | status = "okay"; | 467 | status = "okay"; |
| 468 | 468 | ||
| 469 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 722>, | 469 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 722>, |
| 470 | <&cpg CPG_MOD 726>, <&cpg CPG_MOD 725>, | ||
| 471 | <&x13_clk>, <&x2_clk>; | 470 | <&x13_clk>, <&x2_clk>; |
| 472 | clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1", | 471 | clock-names = "du.0", "du.1", "du.2", "dclkin.0", "dclkin.1"; |
| 473 | "dclkin.0", "dclkin.1"; | ||
| 474 | 472 | ||
| 475 | ports { | 473 | ports { |
| 476 | port@0 { | 474 | port@0 { |
| @@ -478,12 +476,26 @@ | |||
| 478 | remote-endpoint = <&adv7123_in>; | 476 | remote-endpoint = <&adv7123_in>; |
| 479 | }; | 477 | }; |
| 480 | }; | 478 | }; |
| 479 | }; | ||
| 480 | }; | ||
| 481 | |||
| 482 | &lvds0 { | ||
| 483 | status = "okay"; | ||
| 484 | |||
| 485 | ports { | ||
| 481 | port@1 { | 486 | port@1 { |
| 482 | endpoint { | 487 | endpoint { |
| 483 | remote-endpoint = <&adv7511_in>; | 488 | remote-endpoint = <&adv7511_in>; |
| 484 | }; | 489 | }; |
| 485 | }; | 490 | }; |
| 486 | port@2 { | 491 | }; |
| 492 | }; | ||
| 493 | |||
| 494 | &lvds1 { | ||
| 495 | status = "okay"; | ||
| 496 | |||
| 497 | ports { | ||
| 498 | port@1 { | ||
| 487 | lvds_connector: endpoint { | 499 | lvds_connector: endpoint { |
| 488 | }; | 500 | }; |
| 489 | }; | 501 | }; |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index e4367cecad18..05a0fc23ac88 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
| @@ -1627,18 +1627,13 @@ | |||
| 1627 | 1627 | ||
| 1628 | du: display@feb00000 { | 1628 | du: display@feb00000 { |
| 1629 | compatible = "renesas,du-r8a7790"; | 1629 | compatible = "renesas,du-r8a7790"; |
| 1630 | reg = <0 0xfeb00000 0 0x70000>, | 1630 | reg = <0 0xfeb00000 0 0x70000>; |
| 1631 | <0 0xfeb90000 0 0x1c>, | ||
| 1632 | <0 0xfeb94000 0 0x1c>; | ||
| 1633 | reg-names = "du", "lvds.0", "lvds.1"; | ||
| 1634 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, | 1631 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, |
| 1635 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, | 1632 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, |
| 1636 | <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>; | 1633 | <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>; |
| 1637 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, | 1634 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, |
| 1638 | <&cpg CPG_MOD 722>, <&cpg CPG_MOD 726>, | 1635 | <&cpg CPG_MOD 722>; |
| 1639 | <&cpg CPG_MOD 725>; | 1636 | clock-names = "du.0", "du.1", "du.2"; |
| 1640 | clock-names = "du.0", "du.1", "du.2", "lvds.0", | ||
| 1641 | "lvds.1"; | ||
| 1642 | status = "disabled"; | 1637 | status = "disabled"; |
| 1643 | 1638 | ||
| 1644 | ports { | 1639 | ports { |
| @@ -1653,11 +1648,65 @@ | |||
| 1653 | port@1 { | 1648 | port@1 { |
| 1654 | reg = <1>; | 1649 | reg = <1>; |
| 1655 | du_out_lvds0: endpoint { | 1650 | du_out_lvds0: endpoint { |
| 1651 | remote-endpoint = <&lvds0_in>; | ||
| 1656 | }; | 1652 | }; |
| 1657 | }; | 1653 | }; |
| 1658 | port@2 { | 1654 | port@2 { |
| 1659 | reg = <2>; | 1655 | reg = <2>; |
| 1660 | du_out_lvds1: endpoint { | 1656 | du_out_lvds1: endpoint { |
| 1657 | remote-endpoint = <&lvds1_in>; | ||
| 1658 | }; | ||
| 1659 | }; | ||
| 1660 | }; | ||
| 1661 | }; | ||
| 1662 | |||
| 1663 | lvds0: lvds@feb90000 { | ||
| 1664 | compatible = "renesas,r8a7790-lvds"; | ||
| 1665 | reg = <0 0xfeb90000 0 0x1c>; | ||
| 1666 | clocks = <&cpg CPG_MOD 726>; | ||
| 1667 | power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; | ||
| 1668 | resets = <&cpg 726>; | ||
| 1669 | status = "disabled"; | ||
| 1670 | |||
| 1671 | ports { | ||
| 1672 | #address-cells = <1>; | ||
| 1673 | #size-cells = <0>; | ||
| 1674 | |||
| 1675 | port@0 { | ||
| 1676 | reg = <0>; | ||
| 1677 | lvds0_in: endpoint { | ||
| 1678 | remote-endpoint = <&du_out_lvds0>; | ||
| 1679 | }; | ||
| 1680 | }; | ||
| 1681 | port@1 { | ||
| 1682 | reg = <1>; | ||
| 1683 | lvds0_out: endpoint { | ||
| 1684 | }; | ||
| 1685 | }; | ||
| 1686 | }; | ||
| 1687 | }; | ||
| 1688 | |||
| 1689 | lvds1: lvds@feb94000 { | ||
| 1690 | compatible = "renesas,r8a7790-lvds"; | ||
| 1691 | reg = <0 0xfeb94000 0 0x1c>; | ||
| 1692 | clocks = <&cpg CPG_MOD 725>; | ||
| 1693 | power-domains = <&sysc R8A7790_PD_ALWAYS_ON>; | ||
| 1694 | resets = <&cpg 725>; | ||
| 1695 | status = "disabled"; | ||
| 1696 | |||
| 1697 | ports { | ||
| 1698 | #address-cells = <1>; | ||
| 1699 | #size-cells = <0>; | ||
| 1700 | |||
| 1701 | port@0 { | ||
| 1702 | reg = <0>; | ||
| 1703 | lvds1_in: endpoint { | ||
| 1704 | remote-endpoint = <&du_out_lvds1>; | ||
| 1705 | }; | ||
| 1706 | }; | ||
| 1707 | port@1 { | ||
| 1708 | reg = <1>; | ||
| 1709 | lvds1_out: endpoint { | ||
| 1661 | }; | 1710 | }; |
| 1662 | }; | 1711 | }; |
| 1663 | }; | 1712 | }; |
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index f40321a1c917..9d7213a0b8b8 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts | |||
| @@ -468,10 +468,9 @@ | |||
| 468 | pinctrl-names = "default"; | 468 | pinctrl-names = "default"; |
| 469 | status = "okay"; | 469 | status = "okay"; |
| 470 | 470 | ||
| 471 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 726>, | 471 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, |
| 472 | <&x13_clk>, <&x2_clk>; | 472 | <&x13_clk>, <&x2_clk>; |
| 473 | clock-names = "du.0", "du.1", "lvds.0", | 473 | clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1"; |
| 474 | "dclkin.0", "dclkin.1"; | ||
| 475 | 474 | ||
| 476 | ports { | 475 | ports { |
| 477 | port@0 { | 476 | port@0 { |
| @@ -479,6 +478,13 @@ | |||
| 479 | remote-endpoint = <&adv7511_in>; | 478 | remote-endpoint = <&adv7511_in>; |
| 480 | }; | 479 | }; |
| 481 | }; | 480 | }; |
| 481 | }; | ||
| 482 | }; | ||
| 483 | |||
| 484 | &lvds0 { | ||
| 485 | status = "okay"; | ||
| 486 | |||
| 487 | ports { | ||
| 482 | port@1 { | 488 | port@1 { |
| 483 | lvds_connector: endpoint { | 489 | lvds_connector: endpoint { |
| 484 | }; | 490 | }; |
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index c14e6fe9e4f6..ae9ed9ff53ef 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts | |||
| @@ -441,10 +441,9 @@ | |||
| 441 | pinctrl-names = "default"; | 441 | pinctrl-names = "default"; |
| 442 | status = "okay"; | 442 | status = "okay"; |
| 443 | 443 | ||
| 444 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 726>, | 444 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, |
| 445 | <&x3_clk>, <&x16_clk>; | 445 | <&x3_clk>, <&x16_clk>; |
| 446 | clock-names = "du.0", "du.1", "lvds.0", | 446 | clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1"; |
| 447 | "dclkin.0", "dclkin.1"; | ||
| 448 | 447 | ||
| 449 | ports { | 448 | ports { |
| 450 | port@0 { | 449 | port@0 { |
| @@ -455,6 +454,17 @@ | |||
| 455 | }; | 454 | }; |
| 456 | }; | 455 | }; |
| 457 | 456 | ||
| 457 | &lvds0 { | ||
| 458 | status = "okay"; | ||
| 459 | |||
| 460 | ports { | ||
| 461 | port@1 { | ||
| 462 | lvds_connector: endpoint { | ||
| 463 | }; | ||
| 464 | }; | ||
| 465 | }; | ||
| 466 | }; | ||
| 467 | |||
| 458 | &rcar_sound { | 468 | &rcar_sound { |
| 459 | pinctrl-0 = <&ssi_pins &audio_clk_pins>; | 469 | pinctrl-0 = <&ssi_pins &audio_clk_pins>; |
| 460 | pinctrl-names = "default"; | 470 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index f11dab71b03a..506b20885413 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi | |||
| @@ -1633,15 +1633,12 @@ | |||
| 1633 | 1633 | ||
| 1634 | du: display@feb00000 { | 1634 | du: display@feb00000 { |
| 1635 | compatible = "renesas,du-r8a7791"; | 1635 | compatible = "renesas,du-r8a7791"; |
| 1636 | reg = <0 0xfeb00000 0 0x40000>, | 1636 | reg = <0 0xfeb00000 0 0x40000>; |
| 1637 | <0 0xfeb90000 0 0x1c>; | ||
| 1638 | reg-names = "du", "lvds.0"; | ||
| 1639 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, | 1637 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, |
| 1640 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; | 1638 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; |
| 1641 | clocks = <&cpg CPG_MOD 724>, | 1639 | clocks = <&cpg CPG_MOD 724>, |
| 1642 | <&cpg CPG_MOD 723>, | 1640 | <&cpg CPG_MOD 723>; |
| 1643 | <&cpg CPG_MOD 726>; | 1641 | clock-names = "du.0", "du.1"; |
| 1644 | clock-names = "du.0", "du.1", "lvds.0"; | ||
| 1645 | status = "disabled"; | 1642 | status = "disabled"; |
| 1646 | 1643 | ||
| 1647 | ports { | 1644 | ports { |
| @@ -1656,6 +1653,33 @@ | |||
| 1656 | port@1 { | 1653 | port@1 { |
| 1657 | reg = <1>; | 1654 | reg = <1>; |
| 1658 | du_out_lvds0: endpoint { | 1655 | du_out_lvds0: endpoint { |
| 1656 | remote-endpoint = <&lvds0_in>; | ||
| 1657 | }; | ||
| 1658 | }; | ||
| 1659 | }; | ||
| 1660 | }; | ||
| 1661 | |||
| 1662 | lvds0: lvds@feb90000 { | ||
| 1663 | compatible = "renesas,r8a7791-lvds"; | ||
| 1664 | reg = <0 0xfeb90000 0 0x1c>; | ||
| 1665 | clocks = <&cpg CPG_MOD 726>; | ||
| 1666 | power-domains = <&sysc R8A7791_PD_ALWAYS_ON>; | ||
| 1667 | resets = <&cpg 726>; | ||
| 1668 | status = "disabled"; | ||
| 1669 | |||
| 1670 | ports { | ||
| 1671 | #address-cells = <1>; | ||
| 1672 | #size-cells = <0>; | ||
| 1673 | |||
| 1674 | port@0 { | ||
| 1675 | reg = <0>; | ||
| 1676 | lvds0_in: endpoint { | ||
| 1677 | remote-endpoint = <&du_out_lvds0>; | ||
| 1678 | }; | ||
| 1679 | }; | ||
| 1680 | port@1 { | ||
| 1681 | reg = <1>; | ||
| 1682 | lvds0_out: endpoint { | ||
| 1659 | }; | 1683 | }; |
| 1660 | }; | 1684 | }; |
| 1661 | }; | 1685 | }; |
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 9ed6961f2d9a..96e117d8b2cc 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts | |||
| @@ -447,10 +447,9 @@ | |||
| 447 | pinctrl-names = "default"; | 447 | pinctrl-names = "default"; |
| 448 | status = "okay"; | 448 | status = "okay"; |
| 449 | 449 | ||
| 450 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 726>, | 450 | clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, |
| 451 | <&x13_clk>, <&x2_clk>; | 451 | <&x13_clk>, <&x2_clk>; |
| 452 | clock-names = "du.0", "du.1", "lvds.0", | 452 | clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1"; |
| 453 | "dclkin.0", "dclkin.1"; | ||
| 454 | 453 | ||
| 455 | ports { | 454 | ports { |
| 456 | port@0 { | 455 | port@0 { |
| @@ -458,6 +457,11 @@ | |||
| 458 | remote-endpoint = <&adv7511_in>; | 457 | remote-endpoint = <&adv7511_in>; |
| 459 | }; | 458 | }; |
| 460 | }; | 459 | }; |
| 460 | }; | ||
| 461 | }; | ||
| 462 | |||
| 463 | &lvds0 { | ||
| 464 | ports { | ||
| 461 | port@1 { | 465 | port@1 { |
| 462 | lvds_connector: endpoint { | 466 | lvds_connector: endpoint { |
| 463 | }; | 467 | }; |
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi index f9c5a557107d..4f526030dc7c 100644 --- a/arch/arm/boot/dts/r8a7793.dtsi +++ b/arch/arm/boot/dts/r8a7793.dtsi | |||
| @@ -1292,15 +1292,12 @@ | |||
| 1292 | 1292 | ||
| 1293 | du: display@feb00000 { | 1293 | du: display@feb00000 { |
| 1294 | compatible = "renesas,du-r8a7793"; | 1294 | compatible = "renesas,du-r8a7793"; |
| 1295 | reg = <0 0xfeb00000 0 0x40000>, | 1295 | reg = <0 0xfeb00000 0 0x40000>; |
| 1296 | <0 0xfeb90000 0 0x1c>; | ||
| 1297 | reg-names = "du", "lvds.0"; | ||
| 1298 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, | 1296 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, |
| 1299 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; | 1297 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; |
| 1300 | clocks = <&cpg CPG_MOD 724>, | 1298 | clocks = <&cpg CPG_MOD 724>, |
| 1301 | <&cpg CPG_MOD 723>, | 1299 | <&cpg CPG_MOD 723>; |
| 1302 | <&cpg CPG_MOD 726>; | 1300 | clock-names = "du.0", "du.1"; |
| 1303 | clock-names = "du.0", "du.1", "lvds.0"; | ||
| 1304 | status = "disabled"; | 1301 | status = "disabled"; |
| 1305 | 1302 | ||
| 1306 | ports { | 1303 | ports { |
| @@ -1315,6 +1312,34 @@ | |||
| 1315 | port@1 { | 1312 | port@1 { |
| 1316 | reg = <1>; | 1313 | reg = <1>; |
| 1317 | du_out_lvds0: endpoint { | 1314 | du_out_lvds0: endpoint { |
| 1315 | remote-endpoint = <&lvds0_in>; | ||
| 1316 | }; | ||
| 1317 | }; | ||
| 1318 | }; | ||
| 1319 | }; | ||
| 1320 | |||
| 1321 | lvds0: lvds@feb90000 { | ||
| 1322 | compatible = "renesas,r8a7793-lvds"; | ||
| 1323 | reg = <0 0xfeb90000 0 0x1c>; | ||
| 1324 | clocks = <&cpg CPG_MOD 726>; | ||
| 1325 | power-domains = <&sysc R8A7793_PD_ALWAYS_ON>; | ||
| 1326 | resets = <&cpg 726>; | ||
| 1327 | |||
| 1328 | status = "disabled"; | ||
| 1329 | |||
| 1330 | ports { | ||
| 1331 | #address-cells = <1>; | ||
| 1332 | #size-cells = <0>; | ||
| 1333 | |||
| 1334 | port@0 { | ||
| 1335 | reg = <0>; | ||
| 1336 | lvds0_in: endpoint { | ||
| 1337 | remote-endpoint = <&du_out_lvds0>; | ||
| 1338 | }; | ||
| 1339 | }; | ||
| 1340 | port@1 { | ||
| 1341 | reg = <1>; | ||
| 1342 | lvds0_out: endpoint { | ||
| 1318 | }; | 1343 | }; |
| 1319 | }; | 1344 | }; |
| 1320 | }; | 1345 | }; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 77e8436beed4..3a1c6b45c9a1 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
| @@ -76,7 +76,7 @@ | |||
| 76 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; | 76 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; |
| 77 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, | 77 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, |
| 78 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 78 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, |
| 79 | <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 79 | <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, |
| 80 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, | 80 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, |
| 81 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 81 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
| 82 | status = "disabled"; | 82 | status = "disabled"; |
| @@ -88,7 +88,7 @@ | |||
| 88 | allwinner,pipeline = "de_fe0-de_be0-lcd0"; | 88 | allwinner,pipeline = "de_fe0-de_be0-lcd0"; |
| 89 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, | 89 | clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, |
| 90 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, | 90 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, |
| 91 | <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>, | 91 | <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>, |
| 92 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 92 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
| 93 | status = "disabled"; | 93 | status = "disabled"; |
| 94 | }; | 94 | }; |
| @@ -99,7 +99,7 @@ | |||
| 99 | allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; | 99 | allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; |
| 100 | clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, | 100 | clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, |
| 101 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 101 | <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, |
| 102 | <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, | 102 | <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>, |
| 103 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, | 103 | <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, |
| 104 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; | 104 | <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; |
| 105 | status = "disabled"; | 105 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts index 3328fe583c9b..232f124ce62c 100644 --- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts +++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts | |||
| @@ -117,6 +117,7 @@ | |||
| 117 | phy-handle = <&int_mii_phy>; | 117 | phy-handle = <&int_mii_phy>; |
| 118 | phy-mode = "mii"; | 118 | phy-mode = "mii"; |
| 119 | allwinner,leds-active-low; | 119 | allwinner,leds-active-low; |
| 120 | status = "okay"; | ||
| 120 | }; | 121 | }; |
| 121 | 122 | ||
| 122 | &hdmi { | 123 | &hdmi { |
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts index d1311098ea45..ad173605b1b8 100644 --- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts +++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | 51 | ||
| 52 | leds { | 52 | leds { |
| 53 | /* The LEDs use PG0~2 pins, which conflict with MMC1 */ | 53 | /* The LEDs use PG0~2 pins, which conflict with MMC1 */ |
| 54 | status = "disbaled"; | 54 | status = "disabled"; |
| 55 | }; | 55 | }; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 0a7136462a1a..983dd5c14794 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi | |||
| @@ -741,7 +741,7 @@ | |||
| 741 | phy_type = "ulpi"; | 741 | phy_type = "ulpi"; |
| 742 | clocks = <&tegra_car TEGRA20_CLK_USB2>, | 742 | clocks = <&tegra_car TEGRA20_CLK_USB2>, |
| 743 | <&tegra_car TEGRA20_CLK_PLL_U>, | 743 | <&tegra_car TEGRA20_CLK_PLL_U>, |
| 744 | <&tegra_car TEGRA20_CLK_PLL_P_OUT4>; | 744 | <&tegra_car TEGRA20_CLK_CDEV2>; |
| 745 | clock-names = "reg", "pll_u", "ulpi-link"; | 745 | clock-names = "reg", "pll_u", "ulpi-link"; |
| 746 | resets = <&tegra_car 58>, <&tegra_car 22>; | 746 | resets = <&tegra_car 58>, <&tegra_car 22>; |
| 747 | reset-names = "usb", "utmi-pads"; | 747 | reset-names = "usb", "utmi-pads"; |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc8d4bbd82e2..9342904cccca 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
| @@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) | |||
| 536 | #endif | 536 | #endif |
| 537 | .endm | 537 | .endm |
| 538 | 538 | ||
| 539 | #ifdef CONFIG_KPROBES | ||
| 540 | #define _ASM_NOKPROBE(entry) \ | ||
| 541 | .pushsection "_kprobe_blacklist", "aw" ; \ | ||
| 542 | .balign 4 ; \ | ||
| 543 | .long entry; \ | ||
| 544 | .popsection | ||
| 545 | #else | ||
| 546 | #define _ASM_NOKPROBE(entry) | ||
| 547 | #endif | ||
| 548 | |||
| 539 | #endif /* __ASM_ASSEMBLER_H__ */ | 549 | #endif /* __ASM_ASSEMBLER_H__ */ |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 707a1f06dc5d..f675162663f0 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
| @@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
| 309 | return 8; | 309 | return 8; |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | /* | ||
| 313 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
| 314 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
| 315 | * can immediately drop the lock again. | ||
| 316 | */ | ||
| 317 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
| 318 | gpa_t gpa, void *data, unsigned long len) | ||
| 319 | { | ||
| 320 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 321 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
| 322 | |||
| 323 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 324 | |||
| 325 | return ret; | ||
| 326 | } | ||
| 327 | |||
| 312 | static inline void *kvm_get_hyp_vector(void) | 328 | static inline void *kvm_get_hyp_vector(void) |
| 313 | { | 329 | { |
| 314 | return kvm_ksym_ref(__kvm_hyp_vector); | 330 | return kvm_ksym_ref(__kvm_hyp_vector); |
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h deleted file mode 100644 index d0513880be21..000000000000 --- a/arch/arm/include/uapi/asm/siginfo.h +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | #ifndef __ASM_SIGINFO_H | ||
| 2 | #define __ASM_SIGINFO_H | ||
| 3 | |||
| 4 | #include <asm-generic/siginfo.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * SIGFPE si_codes | ||
| 8 | */ | ||
| 9 | #ifdef __KERNEL__ | ||
| 10 | #define FPE_FIXME 0 /* Broken dup of SI_USER */ | ||
| 11 | #endif /* __KERNEL__ */ | ||
| 12 | |||
| 13 | #endif | ||
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 6b38d7a634c1..dd2eb5f76b9f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
| @@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused) | |||
| 83 | { | 83 | { |
| 84 | struct pt_regs regs; | 84 | struct pt_regs regs; |
| 85 | 85 | ||
| 86 | crash_setup_regs(®s, NULL); | 86 | crash_setup_regs(®s, get_irq_regs()); |
| 87 | printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", | 87 | printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", |
| 88 | smp_processor_id()); | 88 | smp_processor_id()); |
| 89 | crash_save_cpu(®s, smp_processor_id()); | 89 | crash_save_cpu(®s, smp_processor_id()); |
| @@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused) | |||
| 95 | cpu_relax(); | 95 | cpu_relax(); |
| 96 | } | 96 | } |
| 97 | 97 | ||
| 98 | void crash_smp_send_stop(void) | ||
| 99 | { | ||
| 100 | static int cpus_stopped; | ||
| 101 | unsigned long msecs; | ||
| 102 | |||
| 103 | if (cpus_stopped) | ||
| 104 | return; | ||
| 105 | |||
| 106 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
| 107 | smp_call_function(machine_crash_nonpanic_core, NULL, false); | ||
| 108 | msecs = 1000; /* Wait at most a second for the other cpus to stop */ | ||
| 109 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { | ||
| 110 | mdelay(1); | ||
| 111 | msecs--; | ||
| 112 | } | ||
| 113 | if (atomic_read(&waiting_for_crash_ipi) > 0) | ||
| 114 | pr_warn("Non-crashing CPUs did not react to IPI\n"); | ||
| 115 | |||
| 116 | cpus_stopped = 1; | ||
| 117 | } | ||
| 118 | |||
| 98 | static void machine_kexec_mask_interrupts(void) | 119 | static void machine_kexec_mask_interrupts(void) |
| 99 | { | 120 | { |
| 100 | unsigned int i; | 121 | unsigned int i; |
| @@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void) | |||
| 120 | 141 | ||
| 121 | void machine_crash_shutdown(struct pt_regs *regs) | 142 | void machine_crash_shutdown(struct pt_regs *regs) |
| 122 | { | 143 | { |
| 123 | unsigned long msecs; | ||
| 124 | |||
| 125 | local_irq_disable(); | 144 | local_irq_disable(); |
| 126 | 145 | crash_smp_send_stop(); | |
| 127 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
| 128 | smp_call_function(machine_crash_nonpanic_core, NULL, false); | ||
| 129 | msecs = 1000; /* Wait at most a second for the other cpus to stop */ | ||
| 130 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { | ||
| 131 | mdelay(1); | ||
| 132 | msecs--; | ||
| 133 | } | ||
| 134 | if (atomic_read(&waiting_for_crash_ipi) > 0) | ||
| 135 | pr_warn("Non-crashing CPUs did not react to IPI\n"); | ||
| 136 | 146 | ||
| 137 | crash_save_cpu(regs, smp_processor_id()); | 147 | crash_save_cpu(regs, smp_processor_id()); |
| 138 | machine_kexec_mask_interrupts(); | 148 | machine_kexec_mask_interrupts(); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 5e3633c24e63..2fe87109ae46 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
| 20 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
| 21 | #include <linux/kdebug.h> | 21 | #include <linux/kdebug.h> |
| 22 | #include <linux/kprobes.h> | ||
| 22 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 23 | #include <linux/kexec.h> | 24 | #include <linux/kexec.h> |
| 24 | #include <linux/bug.h> | 25 | #include <linux/bug.h> |
| @@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook) | |||
| 417 | raw_spin_unlock_irqrestore(&undef_lock, flags); | 418 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
| 418 | } | 419 | } |
| 419 | 420 | ||
| 420 | static int call_undef_hook(struct pt_regs *regs, unsigned int instr) | 421 | static nokprobe_inline |
| 422 | int call_undef_hook(struct pt_regs *regs, unsigned int instr) | ||
| 421 | { | 423 | { |
| 422 | struct undef_hook *hook; | 424 | struct undef_hook *hook; |
| 423 | unsigned long flags; | 425 | unsigned long flags; |
| @@ -490,6 +492,7 @@ die_sig: | |||
| 490 | 492 | ||
| 491 | arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); | 493 | arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); |
| 492 | } | 494 | } |
| 495 | NOKPROBE_SYMBOL(do_undefinstr) | ||
| 493 | 496 | ||
| 494 | /* | 497 | /* |
| 495 | * Handle FIQ similarly to NMI on x86 systems. | 498 | * Handle FIQ similarly to NMI on x86 systems. |
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index df73914e81c8..746e7801dcdf 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S | |||
| @@ -38,6 +38,7 @@ ENTRY(__get_user_1) | |||
| 38 | mov r0, #0 | 38 | mov r0, #0 |
| 39 | ret lr | 39 | ret lr |
| 40 | ENDPROC(__get_user_1) | 40 | ENDPROC(__get_user_1) |
| 41 | _ASM_NOKPROBE(__get_user_1) | ||
| 41 | 42 | ||
| 42 | ENTRY(__get_user_2) | 43 | ENTRY(__get_user_2) |
| 43 | check_uaccess r0, 2, r1, r2, __get_user_bad | 44 | check_uaccess r0, 2, r1, r2, __get_user_bad |
| @@ -58,6 +59,7 @@ rb .req r0 | |||
| 58 | mov r0, #0 | 59 | mov r0, #0 |
| 59 | ret lr | 60 | ret lr |
| 60 | ENDPROC(__get_user_2) | 61 | ENDPROC(__get_user_2) |
| 62 | _ASM_NOKPROBE(__get_user_2) | ||
| 61 | 63 | ||
| 62 | ENTRY(__get_user_4) | 64 | ENTRY(__get_user_4) |
| 63 | check_uaccess r0, 4, r1, r2, __get_user_bad | 65 | check_uaccess r0, 4, r1, r2, __get_user_bad |
| @@ -65,6 +67,7 @@ ENTRY(__get_user_4) | |||
| 65 | mov r0, #0 | 67 | mov r0, #0 |
| 66 | ret lr | 68 | ret lr |
| 67 | ENDPROC(__get_user_4) | 69 | ENDPROC(__get_user_4) |
| 70 | _ASM_NOKPROBE(__get_user_4) | ||
| 68 | 71 | ||
| 69 | ENTRY(__get_user_8) | 72 | ENTRY(__get_user_8) |
| 70 | check_uaccess r0, 8, r1, r2, __get_user_bad8 | 73 | check_uaccess r0, 8, r1, r2, __get_user_bad8 |
| @@ -78,6 +81,7 @@ ENTRY(__get_user_8) | |||
| 78 | mov r0, #0 | 81 | mov r0, #0 |
| 79 | ret lr | 82 | ret lr |
| 80 | ENDPROC(__get_user_8) | 83 | ENDPROC(__get_user_8) |
| 84 | _ASM_NOKPROBE(__get_user_8) | ||
| 81 | 85 | ||
| 82 | #ifdef __ARMEB__ | 86 | #ifdef __ARMEB__ |
| 83 | ENTRY(__get_user_32t_8) | 87 | ENTRY(__get_user_32t_8) |
| @@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8) | |||
| 91 | mov r0, #0 | 95 | mov r0, #0 |
| 92 | ret lr | 96 | ret lr |
| 93 | ENDPROC(__get_user_32t_8) | 97 | ENDPROC(__get_user_32t_8) |
| 98 | _ASM_NOKPROBE(__get_user_32t_8) | ||
| 94 | 99 | ||
| 95 | ENTRY(__get_user_64t_1) | 100 | ENTRY(__get_user_64t_1) |
| 96 | check_uaccess r0, 1, r1, r2, __get_user_bad8 | 101 | check_uaccess r0, 1, r1, r2, __get_user_bad8 |
| @@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1) | |||
| 98 | mov r0, #0 | 103 | mov r0, #0 |
| 99 | ret lr | 104 | ret lr |
| 100 | ENDPROC(__get_user_64t_1) | 105 | ENDPROC(__get_user_64t_1) |
| 106 | _ASM_NOKPROBE(__get_user_64t_1) | ||
| 101 | 107 | ||
| 102 | ENTRY(__get_user_64t_2) | 108 | ENTRY(__get_user_64t_2) |
| 103 | check_uaccess r0, 2, r1, r2, __get_user_bad8 | 109 | check_uaccess r0, 2, r1, r2, __get_user_bad8 |
| @@ -114,6 +120,7 @@ rb .req r0 | |||
| 114 | mov r0, #0 | 120 | mov r0, #0 |
| 115 | ret lr | 121 | ret lr |
| 116 | ENDPROC(__get_user_64t_2) | 122 | ENDPROC(__get_user_64t_2) |
| 123 | _ASM_NOKPROBE(__get_user_64t_2) | ||
| 117 | 124 | ||
| 118 | ENTRY(__get_user_64t_4) | 125 | ENTRY(__get_user_64t_4) |
| 119 | check_uaccess r0, 4, r1, r2, __get_user_bad8 | 126 | check_uaccess r0, 4, r1, r2, __get_user_bad8 |
| @@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4) | |||
| 121 | mov r0, #0 | 128 | mov r0, #0 |
| 122 | ret lr | 129 | ret lr |
| 123 | ENDPROC(__get_user_64t_4) | 130 | ENDPROC(__get_user_64t_4) |
| 131 | _ASM_NOKPROBE(__get_user_64t_4) | ||
| 124 | #endif | 132 | #endif |
| 125 | 133 | ||
| 126 | __get_user_bad8: | 134 | __get_user_bad8: |
| @@ -131,6 +139,8 @@ __get_user_bad: | |||
| 131 | ret lr | 139 | ret lr |
| 132 | ENDPROC(__get_user_bad) | 140 | ENDPROC(__get_user_bad) |
| 133 | ENDPROC(__get_user_bad8) | 141 | ENDPROC(__get_user_bad8) |
| 142 | _ASM_NOKPROBE(__get_user_bad) | ||
| 143 | _ASM_NOKPROBE(__get_user_bad8) | ||
| 134 | 144 | ||
| 135 | .pushsection __ex_table, "a" | 145 | .pushsection __ex_table, "a" |
| 136 | .long 1b, __get_user_bad | 146 | .long 1b, __get_user_bad |
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 004f9c8de032..d1e8ce7b4bd2 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c | |||
| @@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = { | |||
| 205 | -1 | 205 | -1 |
| 206 | }; | 206 | }; |
| 207 | 207 | ||
| 208 | #define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1) | ||
| 209 | #define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2) | ||
| 210 | |||
| 208 | static struct gpiod_lookup_table mmc_gpios_table = { | 211 | static struct gpiod_lookup_table mmc_gpios_table = { |
| 209 | .dev_id = "da830-mmc.0", | 212 | .dev_id = "da830-mmc.0", |
| 210 | .table = { | 213 | .table = { |
| 211 | /* gpio chip 1 contains gpio range 32-63 */ | 214 | /* gpio chip 1 contains gpio range 32-63 */ |
| 212 | GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW), | 215 | GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", |
| 213 | GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW), | 216 | GPIO_ACTIVE_LOW), |
| 217 | GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", | ||
| 218 | GPIO_ACTIVE_LOW), | ||
| 214 | }, | 219 | }, |
| 215 | }; | 220 | }; |
| 216 | 221 | ||
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 3063478bcc36..158ed9a1483f 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c | |||
| @@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = { | |||
| 763 | -1 | 763 | -1 |
| 764 | }; | 764 | }; |
| 765 | 765 | ||
| 766 | #define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0) | ||
| 767 | #define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1) | ||
| 768 | |||
| 766 | static struct gpiod_lookup_table mmc_gpios_table = { | 769 | static struct gpiod_lookup_table mmc_gpios_table = { |
| 767 | .dev_id = "da830-mmc.0", | 770 | .dev_id = "da830-mmc.0", |
| 768 | .table = { | 771 | .table = { |
| 769 | /* gpio chip 2 contains gpio range 64-95 */ | 772 | /* gpio chip 2 contains gpio range 64-95 */ |
| 770 | GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW), | 773 | GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", |
| 771 | GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW), | 774 | GPIO_ACTIVE_LOW), |
| 775 | GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", | ||
| 776 | GPIO_ACTIVE_LOW), | ||
| 772 | }, | 777 | }, |
| 773 | }; | 778 | }; |
| 774 | 779 | ||
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index cb30637d9eaf..23ab9e8bc04c 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
| 20 | #include <linux/gpio/machine.h> | 20 | #include <linux/gpio/machine.h> |
| 21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
| 22 | #include <linux/dm9000.h> | ||
| 22 | #include <linux/videodev2.h> | 23 | #include <linux/videodev2.h> |
| 23 | #include <media/i2c/tvp514x.h> | 24 | #include <media/i2c/tvp514x.h> |
| 24 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
| @@ -109,12 +110,15 @@ static struct platform_device davinci_nand_device = { | |||
| 109 | }, | 110 | }, |
| 110 | }; | 111 | }; |
| 111 | 112 | ||
| 113 | #define DM355_I2C_SDA_PIN GPIO_TO_PIN(0, 15) | ||
| 114 | #define DM355_I2C_SCL_PIN GPIO_TO_PIN(0, 14) | ||
| 115 | |||
| 112 | static struct gpiod_lookup_table i2c_recovery_gpiod_table = { | 116 | static struct gpiod_lookup_table i2c_recovery_gpiod_table = { |
| 113 | .dev_id = "i2c_davinci", | 117 | .dev_id = "i2c_davinci.1", |
| 114 | .table = { | 118 | .table = { |
| 115 | GPIO_LOOKUP("davinci_gpio", 15, "sda", | 119 | GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda", |
| 116 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 120 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
| 117 | GPIO_LOOKUP("davinci_gpio", 14, "scl", | 121 | GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl", |
| 118 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 122 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
| 119 | }, | 123 | }, |
| 120 | }; | 124 | }; |
| @@ -179,11 +183,16 @@ static struct resource dm355evm_dm9000_rsrc[] = { | |||
| 179 | }, | 183 | }, |
| 180 | }; | 184 | }; |
| 181 | 185 | ||
| 186 | static struct dm9000_plat_data dm335evm_dm9000_platdata; | ||
| 187 | |||
| 182 | static struct platform_device dm355evm_dm9000 = { | 188 | static struct platform_device dm355evm_dm9000 = { |
| 183 | .name = "dm9000", | 189 | .name = "dm9000", |
| 184 | .id = -1, | 190 | .id = -1, |
| 185 | .resource = dm355evm_dm9000_rsrc, | 191 | .resource = dm355evm_dm9000_rsrc, |
| 186 | .num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc), | 192 | .num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc), |
| 193 | .dev = { | ||
| 194 | .platform_data = &dm335evm_dm9000_platdata, | ||
| 195 | }, | ||
| 187 | }; | 196 | }; |
| 188 | 197 | ||
| 189 | static struct tvp514x_platform_data tvp5146_pdata = { | 198 | static struct tvp514x_platform_data tvp5146_pdata = { |
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 95b55aae1366..509e64ab1994 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
| 18 | #include <linux/platform_data/pcf857x.h> | 18 | #include <linux/platform_data/pcf857x.h> |
| 19 | #include <linux/platform_data/at24.h> | 19 | #include <linux/platform_data/at24.h> |
| 20 | #include <linux/platform_data/gpio-davinci.h> | ||
| 20 | #include <linux/mtd/mtd.h> | 21 | #include <linux/mtd/mtd.h> |
| 21 | #include <linux/mtd/rawnand.h> | 22 | #include <linux/mtd/rawnand.h> |
| 22 | #include <linux/mtd/partitions.h> | 23 | #include <linux/mtd/partitions.h> |
| @@ -596,12 +597,15 @@ static struct i2c_board_info __initdata i2c_info[] = { | |||
| 596 | }, | 597 | }, |
| 597 | }; | 598 | }; |
| 598 | 599 | ||
| 600 | #define DM644X_I2C_SDA_PIN GPIO_TO_PIN(2, 12) | ||
| 601 | #define DM644X_I2C_SCL_PIN GPIO_TO_PIN(2, 11) | ||
| 602 | |||
| 599 | static struct gpiod_lookup_table i2c_recovery_gpiod_table = { | 603 | static struct gpiod_lookup_table i2c_recovery_gpiod_table = { |
| 600 | .dev_id = "i2c_davinci", | 604 | .dev_id = "i2c_davinci.1", |
| 601 | .table = { | 605 | .table = { |
| 602 | GPIO_LOOKUP("davinci_gpio", 44, "sda", | 606 | GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda", |
| 603 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 607 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
| 604 | GPIO_LOOKUP("davinci_gpio", 43, "scl", | 608 | GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl", |
| 605 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 609 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
| 606 | }, | 610 | }, |
| 607 | }; | 611 | }; |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 2d37f5b0e1f5..a3c0d1e87647 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
| @@ -532,11 +532,12 @@ static struct vpif_display_config dm646x_vpif_display_config = { | |||
| 532 | .set_clock = set_vpif_clock, | 532 | .set_clock = set_vpif_clock, |
| 533 | .subdevinfo = dm646x_vpif_subdev, | 533 | .subdevinfo = dm646x_vpif_subdev, |
| 534 | .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev), | 534 | .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev), |
| 535 | .i2c_adapter_id = 1, | ||
| 535 | .chan_config[0] = { | 536 | .chan_config[0] = { |
| 536 | .outputs = dm6467_ch0_outputs, | 537 | .outputs = dm6467_ch0_outputs, |
| 537 | .output_count = ARRAY_SIZE(dm6467_ch0_outputs), | 538 | .output_count = ARRAY_SIZE(dm6467_ch0_outputs), |
| 538 | }, | 539 | }, |
| 539 | .card_name = "DM646x EVM", | 540 | .card_name = "DM646x EVM Video Display", |
| 540 | }; | 541 | }; |
| 541 | 542 | ||
| 542 | /** | 543 | /** |
| @@ -674,6 +675,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { | |||
| 674 | .setup_input_channel_mode = setup_vpif_input_channel_mode, | 675 | .setup_input_channel_mode = setup_vpif_input_channel_mode, |
| 675 | .subdev_info = vpif_capture_sdev_info, | 676 | .subdev_info = vpif_capture_sdev_info, |
| 676 | .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info), | 677 | .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info), |
| 678 | .i2c_adapter_id = 1, | ||
| 677 | .chan_config[0] = { | 679 | .chan_config[0] = { |
| 678 | .inputs = dm6467_ch0_inputs, | 680 | .inputs = dm6467_ch0_inputs, |
| 679 | .input_count = ARRAY_SIZE(dm6467_ch0_inputs), | 681 | .input_count = ARRAY_SIZE(dm6467_ch0_inputs), |
| @@ -694,6 +696,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { | |||
| 694 | .fid_pol = 0, | 696 | .fid_pol = 0, |
| 695 | }, | 697 | }, |
| 696 | }, | 698 | }, |
| 699 | .card_name = "DM646x EVM Video Capture", | ||
| 697 | }; | 700 | }; |
| 698 | 701 | ||
| 699 | static void __init evm_init_video(void) | 702 | static void __init evm_init_video(void) |
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 0d32042b728f..be8b892a6ea7 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c | |||
| @@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = { | |||
| 123 | -1 | 123 | -1 |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | #define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) | ||
| 127 | #define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) | ||
| 128 | |||
| 126 | static struct gpiod_lookup_table mmc_gpios_table = { | 129 | static struct gpiod_lookup_table mmc_gpios_table = { |
| 127 | .dev_id = "da830-mmc.0", | 130 | .dev_id = "da830-mmc.0", |
| 128 | .table = { | 131 | .table = { |
| 129 | /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ | 132 | GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", |
| 130 | GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW), | 133 | GPIO_ACTIVE_LOW), |
| 131 | GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW), | 134 | GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", |
| 135 | GPIO_ACTIVE_LOW), | ||
| 132 | }, | 136 | }, |
| 133 | }; | 137 | }; |
| 134 | 138 | ||
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 109ab1fa0d2c..c32ca27ab343 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
| @@ -488,7 +488,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
| 488 | [IRQ_DM646X_MCASP0TXINT] = 7, | 488 | [IRQ_DM646X_MCASP0TXINT] = 7, |
| 489 | [IRQ_DM646X_MCASP0RXINT] = 7, | 489 | [IRQ_DM646X_MCASP0RXINT] = 7, |
| 490 | [IRQ_DM646X_RESERVED_3] = 7, | 490 | [IRQ_DM646X_RESERVED_3] = 7, |
| 491 | [IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */ | 491 | [IRQ_DM646X_MCASP1TXINT] = 7, |
| 492 | [IRQ_TINT0_TINT12] = 7, /* clockevent */ | ||
| 492 | [IRQ_TINT0_TINT34] = 7, /* clocksource */ | 493 | [IRQ_TINT0_TINT34] = 7, /* clocksource */ |
| 493 | [IRQ_TINT1_TINT12] = 7, /* DSP timer */ | 494 | [IRQ_TINT1_TINT12] = 7, /* DSP timer */ |
| 494 | [IRQ_TINT1_TINT34] = 7, /* system tick */ | 495 | [IRQ_TINT1_TINT34] = 7, /* system tick */ |
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index e70feec6fad5..0581ffbedddd 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
| @@ -323,7 +323,7 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr) | |||
| 323 | 323 | ||
| 324 | /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ | 324 | /* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ |
| 325 | static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { | 325 | static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { |
| 326 | .dev_id = "i2c-gpio", | 326 | .dev_id = "i2c-gpio.0", |
| 327 | .table = { | 327 | .table = { |
| 328 | /* Use local offsets on gpiochip/port "G" */ | 328 | /* Use local offsets on gpiochip/port "G" */ |
| 329 | GPIO_LOOKUP_IDX("G", 1, NULL, 0, | 329 | GPIO_LOOKUP_IDX("G", 1, NULL, 0, |
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c index 77def6169f50..44cbbce6bda6 100644 --- a/arch/arm/mach-ixp4xx/avila-setup.c +++ b/arch/arm/mach-ixp4xx/avila-setup.c | |||
| @@ -51,7 +51,7 @@ static struct platform_device avila_flash = { | |||
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | static struct gpiod_lookup_table avila_i2c_gpiod_table = { | 53 | static struct gpiod_lookup_table avila_i2c_gpiod_table = { |
| 54 | .dev_id = "i2c-gpio", | 54 | .dev_id = "i2c-gpio.0", |
| 55 | .table = { | 55 | .table = { |
| 56 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, | 56 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, |
| 57 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 57 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 0f5c99941a7d..397190f3a8da 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c | |||
| @@ -70,7 +70,7 @@ static struct platform_device dsmg600_flash = { | |||
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { | 72 | static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { |
| 73 | .dev_id = "i2c-gpio", | 73 | .dev_id = "i2c-gpio.0", |
| 74 | .table = { | 74 | .table = { |
| 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, | 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, |
| 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c index 033f79b35d51..f0a152e365b1 100644 --- a/arch/arm/mach-ixp4xx/fsg-setup.c +++ b/arch/arm/mach-ixp4xx/fsg-setup.c | |||
| @@ -56,7 +56,7 @@ static struct platform_device fsg_flash = { | |||
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | static struct gpiod_lookup_table fsg_i2c_gpiod_table = { | 58 | static struct gpiod_lookup_table fsg_i2c_gpiod_table = { |
| 59 | .dev_id = "i2c-gpio", | 59 | .dev_id = "i2c-gpio.0", |
| 60 | .table = { | 60 | .table = { |
| 61 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, | 61 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, |
| 62 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 62 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index b168e2fbdbeb..3ec829d52cdd 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c | |||
| @@ -124,7 +124,7 @@ static struct platform_device ixdp425_flash_nand = { | |||
| 124 | #endif /* CONFIG_MTD_NAND_PLATFORM */ | 124 | #endif /* CONFIG_MTD_NAND_PLATFORM */ |
| 125 | 125 | ||
| 126 | static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { | 126 | static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { |
| 127 | .dev_id = "i2c-gpio", | 127 | .dev_id = "i2c-gpio.0", |
| 128 | .table = { | 128 | .table = { |
| 129 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, | 129 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, |
| 130 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 130 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index 76dfff03cb71..4138d6aa4c52 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c | |||
| @@ -102,7 +102,7 @@ static struct platform_device nas100d_leds = { | |||
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { | 104 | static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { |
| 105 | .dev_id = "i2c-gpio", | 105 | .dev_id = "i2c-gpio.0", |
| 106 | .table = { | 106 | .table = { |
| 107 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, | 107 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, |
| 108 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 108 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index 91da63a7d7b5..341b263482ef 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c | |||
| @@ -70,7 +70,7 @@ static struct platform_device nslu2_flash = { | |||
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { | 72 | static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { |
| 73 | .dev_id = "i2c-gpio", | 73 | .dev_id = "i2c-gpio.0", |
| 74 | .table = { | 74 | .table = { |
| 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, | 75 | GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, |
| 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 76 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index fe57e2692629..abca83d22ff3 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c | |||
| @@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = { | |||
| 29 | 29 | ||
| 30 | static struct pm_clk_notifier_block platform_domain_notifier = { | 30 | static struct pm_clk_notifier_block platform_domain_notifier = { |
| 31 | .pm_domain = &keystone_pm_domain, | 31 | .pm_domain = &keystone_pm_domain, |
| 32 | .con_ids = { NULL }, | ||
| 32 | }; | 33 | }; |
| 33 | 34 | ||
| 34 | static const struct of_device_id of_keystone_table[] = { | 35 | static const struct of_device_id of_keystone_table[] = { |
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index 793a24a53c52..d7ca9e2b40d2 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c | |||
| @@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) | |||
| 58 | irq_num = gpio_to_irq(gpio); | 58 | irq_num = gpio_to_irq(gpio); |
| 59 | fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; | 59 | fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; |
| 60 | 60 | ||
| 61 | while (irq_counter[gpio] < fiq_count) { | 61 | if (irq_counter[gpio] < fiq_count && |
| 62 | if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { | 62 | gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { |
| 63 | struct irq_data *d = irq_get_irq_data(irq_num); | 63 | struct irq_data *d = irq_get_irq_data(irq_num); |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * It looks like handle_edge_irq() that | 66 | * handle_simple_irq() that OMAP GPIO edge |
| 67 | * OMAP GPIO edge interrupts default to, | 67 | * interrupts default to since commit 80ac93c27441 |
| 68 | * expects interrupt already unmasked. | 68 | * requires interrupt already acked and unmasked. |
| 69 | */ | 69 | */ |
| 70 | if (irq_chip && irq_chip->irq_unmask) | 70 | if (irq_chip) { |
| 71 | if (irq_chip->irq_ack) | ||
| 72 | irq_chip->irq_ack(d); | ||
| 73 | if (irq_chip->irq_unmask) | ||
| 71 | irq_chip->irq_unmask(d); | 74 | irq_chip->irq_unmask(d); |
| 72 | } | 75 | } |
| 73 | generic_handle_irq(irq_num); | ||
| 74 | |||
| 75 | irq_counter[gpio]++; | ||
| 76 | } | 76 | } |
| 77 | for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) | ||
| 78 | generic_handle_irq(irq_num); | ||
| 77 | } | 79 | } |
| 78 | return IRQ_HANDLED; | 80 | return IRQ_HANDLED; |
| 79 | } | 81 | } |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 76eb6ec5f157..1e6a967cd2d5 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
| @@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) | |||
| 188 | ((prev & OMAP_POWERSTATE_MASK) << 0)); | 188 | ((prev & OMAP_POWERSTATE_MASK) << 0)); |
| 189 | trace_power_domain_target_rcuidle(pwrdm->name, | 189 | trace_power_domain_target_rcuidle(pwrdm->name, |
| 190 | trace_state, | 190 | trace_state, |
| 191 | smp_processor_id()); | 191 | raw_smp_processor_id()); |
| 192 | } | 192 | } |
| 193 | break; | 193 | break; |
| 194 | default: | 194 | default: |
| @@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) | |||
| 518 | if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { | 518 | if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { |
| 519 | /* Trace the pwrdm desired target state */ | 519 | /* Trace the pwrdm desired target state */ |
| 520 | trace_power_domain_target_rcuidle(pwrdm->name, pwrst, | 520 | trace_power_domain_target_rcuidle(pwrdm->name, pwrst, |
| 521 | smp_processor_id()); | 521 | raw_smp_processor_id()); |
| 522 | /* Program the pwrdm desired target state */ | 522 | /* Program the pwrdm desired target state */ |
| 523 | ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); | 523 | ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); |
| 524 | } | 524 | } |
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 5877e547cecd..0adb1bd6208e 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
| @@ -322,7 +322,7 @@ static struct soc_camera_link palmz72_iclink = { | |||
| 322 | }; | 322 | }; |
| 323 | 323 | ||
| 324 | static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { | 324 | static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { |
| 325 | .dev_id = "i2c-gpio", | 325 | .dev_id = "i2c-gpio.0", |
| 326 | .table = { | 326 | .table = { |
| 327 | GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, | 327 | GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, |
| 328 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 328 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 90d0f277de55..207dcc2e94e7 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c | |||
| @@ -460,7 +460,7 @@ static struct platform_device smc91x_device = { | |||
| 460 | 460 | ||
| 461 | /* i2c */ | 461 | /* i2c */ |
| 462 | static struct gpiod_lookup_table viper_i2c_gpiod_table = { | 462 | static struct gpiod_lookup_table viper_i2c_gpiod_table = { |
| 463 | .dev_id = "i2c-gpio", | 463 | .dev_id = "i2c-gpio.1", |
| 464 | .table = { | 464 | .table = { |
| 465 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, | 465 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, |
| 466 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 466 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
| @@ -789,7 +789,7 @@ static int __init viper_tpm_setup(char *str) | |||
| 789 | __setup("tpm=", viper_tpm_setup); | 789 | __setup("tpm=", viper_tpm_setup); |
| 790 | 790 | ||
| 791 | struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { | 791 | struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { |
| 792 | .dev_id = "i2c-gpio", | 792 | .dev_id = "i2c-gpio.2", |
| 793 | .table = { | 793 | .table = { |
| 794 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, | 794 | GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, |
| 795 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 795 | NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index ace010479eb6..f45aed2519ba 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c | |||
| @@ -327,7 +327,7 @@ static struct platform_device simpad_gpio_leds = { | |||
| 327 | * i2c | 327 | * i2c |
| 328 | */ | 328 | */ |
| 329 | static struct gpiod_lookup_table simpad_i2c_gpiod_table = { | 329 | static struct gpiod_lookup_table simpad_i2c_gpiod_table = { |
| 330 | .dev_id = "i2c-gpio", | 330 | .dev_id = "i2c-gpio.0", |
| 331 | .table = { | 331 | .table = { |
| 332 | GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, | 332 | GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, |
| 333 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), | 333 | GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 8c398fedbbb6..ada8eb206a90 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) | |||
| 466 | void __init dma_contiguous_remap(void) | 466 | void __init dma_contiguous_remap(void) |
| 467 | { | 467 | { |
| 468 | int i; | 468 | int i; |
| 469 | |||
| 470 | if (!dma_mmu_remap_num) | ||
| 471 | return; | ||
| 472 | |||
| 473 | /* call flush_cache_all() since CMA area would be large enough */ | ||
| 474 | flush_cache_all(); | ||
| 475 | for (i = 0; i < dma_mmu_remap_num; i++) { | 469 | for (i = 0; i < dma_mmu_remap_num; i++) { |
| 476 | phys_addr_t start = dma_mmu_remap[i].base; | 470 | phys_addr_t start = dma_mmu_remap[i].base; |
| 477 | phys_addr_t end = start + dma_mmu_remap[i].size; | 471 | phys_addr_t end = start + dma_mmu_remap[i].size; |
| @@ -504,15 +498,7 @@ void __init dma_contiguous_remap(void) | |||
| 504 | flush_tlb_kernel_range(__phys_to_virt(start), | 498 | flush_tlb_kernel_range(__phys_to_virt(start), |
| 505 | __phys_to_virt(end)); | 499 | __phys_to_virt(end)); |
| 506 | 500 | ||
| 507 | /* | 501 | iotable_init(&map, 1); |
| 508 | * All the memory in CMA region will be on ZONE_MOVABLE. | ||
| 509 | * If that zone is considered as highmem, the memory in CMA | ||
| 510 | * region is also considered as highmem even if it's | ||
| 511 | * physical address belong to lowmem. In this case, | ||
| 512 | * re-mapping isn't required. | ||
| 513 | */ | ||
| 514 | if (!is_highmem_idx(ZONE_MOVABLE)) | ||
| 515 | iotable_init(&map, 1); | ||
| 516 | } | 502 | } |
| 517 | } | 503 | } |
| 518 | 504 | ||
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index bcdecc25461b..b2aa9b32bff2 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
| @@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | |||
| 165 | { | 165 | { |
| 166 | unsigned long flags; | 166 | unsigned long flags; |
| 167 | struct kprobe *p = &op->kp; | 167 | struct kprobe *p = &op->kp; |
| 168 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 168 | struct kprobe_ctlblk *kcb; |
| 169 | 169 | ||
| 170 | /* Save skipped registers */ | 170 | /* Save skipped registers */ |
| 171 | regs->ARM_pc = (unsigned long)op->kp.addr; | 171 | regs->ARM_pc = (unsigned long)op->kp.addr; |
| 172 | regs->ARM_ORIG_r0 = ~0UL; | 172 | regs->ARM_ORIG_r0 = ~0UL; |
| 173 | 173 | ||
| 174 | local_irq_save(flags); | 174 | local_irq_save(flags); |
| 175 | kcb = get_kprobe_ctlblk(); | ||
| 175 | 176 | ||
| 176 | if (kprobe_running()) { | 177 | if (kprobe_running()) { |
| 177 | kprobes_inc_nmissed_count(&op->kp); | 178 | kprobes_inc_nmissed_count(&op->kp); |
| @@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) | |||
| 191 | 192 | ||
| 192 | local_irq_restore(flags); | 193 | local_irq_restore(flags); |
| 193 | } | 194 | } |
| 195 | NOKPROBE_SYMBOL(optimized_callback) | ||
| 194 | 196 | ||
| 195 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) | 197 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) |
| 196 | { | 198 | { |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 4c375e11ae95..af4ee2cef2f9 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_ | |||
| 257 | 257 | ||
| 258 | if (exceptions == VFP_EXCEPTION_ERROR) { | 258 | if (exceptions == VFP_EXCEPTION_ERROR) { |
| 259 | vfp_panic("unhandled bounce", inst); | 259 | vfp_panic("unhandled bounce", inst); |
| 260 | vfp_raise_sigfpe(FPE_FIXME, regs); | 260 | vfp_raise_sigfpe(FPE_FLTINV, regs); |
| 261 | return; | 261 | return; |
| 262 | } | 262 | } |
| 263 | 263 | ||
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index c0231d077fa6..1ad8677f6a0a 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi | |||
| @@ -1317,7 +1317,7 @@ | |||
| 1317 | reg = <0x14d60000 0x100>; | 1317 | reg = <0x14d60000 0x100>; |
| 1318 | dmas = <&pdma0 31 &pdma0 30>; | 1318 | dmas = <&pdma0 31 &pdma0 30>; |
| 1319 | dma-names = "tx", "rx"; | 1319 | dma-names = "tx", "rx"; |
| 1320 | interrupts = <GIC_SPI 435 IRQ_TYPE_NONE>; | 1320 | interrupts = <GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>; |
| 1321 | clocks = <&cmu_peric CLK_PCLK_I2S1>, | 1321 | clocks = <&cmu_peric CLK_PCLK_I2S1>, |
| 1322 | <&cmu_peric CLK_PCLK_I2S1>, | 1322 | <&cmu_peric CLK_PCLK_I2S1>, |
| 1323 | <&cmu_peric CLK_SCLK_I2S1>; | 1323 | <&cmu_peric CLK_SCLK_I2S1>; |
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index 724a0d3b7683..edb4ee0b8896 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | |||
| @@ -299,7 +299,6 @@ | |||
| 299 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ | 299 | /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ |
| 300 | 300 | ||
| 301 | dwmmc_0: dwmmc0@f723d000 { | 301 | dwmmc_0: dwmmc0@f723d000 { |
| 302 | max-frequency = <150000000>; | ||
| 303 | cap-mmc-highspeed; | 302 | cap-mmc-highspeed; |
| 304 | mmc-hs200-1_8v; | 303 | mmc-hs200-1_8v; |
| 305 | non-removable; | 304 | non-removable; |
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi index 48cad7919efa..ed2f1237ea1e 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi | |||
| @@ -38,9 +38,10 @@ | |||
| 38 | compatible = "marvell,armada-7k-pp22"; | 38 | compatible = "marvell,armada-7k-pp22"; |
| 39 | reg = <0x0 0x100000>, <0x129000 0xb000>; | 39 | reg = <0x0 0x100000>, <0x129000 0xb000>; |
| 40 | clocks = <&CP110_LABEL(clk) 1 3>, <&CP110_LABEL(clk) 1 9>, | 40 | clocks = <&CP110_LABEL(clk) 1 3>, <&CP110_LABEL(clk) 1 9>, |
| 41 | <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 18>; | 41 | <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>, |
| 42 | <&CP110_LABEL(clk) 1 18>; | ||
| 42 | clock-names = "pp_clk", "gop_clk", | 43 | clock-names = "pp_clk", "gop_clk", |
| 43 | "mg_clk", "axi_clk"; | 44 | "mg_clk", "mg_core_clk", "axi_clk"; |
| 44 | marvell,system-controller = <&CP110_LABEL(syscon0)>; | 45 | marvell,system-controller = <&CP110_LABEL(syscon0)>; |
| 45 | status = "disabled"; | 46 | status = "disabled"; |
| 46 | dma-coherent; | 47 | dma-coherent; |
| @@ -141,6 +142,8 @@ | |||
| 141 | #size-cells = <0>; | 142 | #size-cells = <0>; |
| 142 | compatible = "marvell,xmdio"; | 143 | compatible = "marvell,xmdio"; |
| 143 | reg = <0x12a600 0x10>; | 144 | reg = <0x12a600 0x10>; |
| 145 | clocks = <&CP110_LABEL(clk) 1 5>, | ||
| 146 | <&CP110_LABEL(clk) 1 6>, <&CP110_LABEL(clk) 1 18>; | ||
| 144 | status = "disabled"; | 147 | status = "disabled"; |
| 145 | }; | 148 | }; |
| 146 | 149 | ||
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi index a8baad7b80df..13f57fff1477 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi | |||
| @@ -46,7 +46,7 @@ | |||
| 46 | compatible = "ethernet-phy-ieee802.3-c22"; | 46 | compatible = "ethernet-phy-ieee802.3-c22"; |
| 47 | reg = <0x0>; | 47 | reg = <0x0>; |
| 48 | interrupt-parent = <&gpio>; | 48 | interrupt-parent = <&gpio>; |
| 49 | interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_HIGH>; | 49 | interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_LOW>; |
| 50 | }; | 50 | }; |
| 51 | }; | 51 | }; |
| 52 | }; | 52 | }; |
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi index e62bda1cf2d9..c32dd3419c87 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi | |||
| @@ -414,7 +414,7 @@ | |||
| 414 | mmc-ddr-1_8v; | 414 | mmc-ddr-1_8v; |
| 415 | mmc-hs200-1_8v; | 415 | mmc-hs200-1_8v; |
| 416 | mmc-pwrseq = <&emmc_pwrseq>; | 416 | mmc-pwrseq = <&emmc_pwrseq>; |
| 417 | cdns,phy-input-delay-legacy = <4>; | 417 | cdns,phy-input-delay-legacy = <9>; |
| 418 | cdns,phy-input-delay-mmc-highspeed = <2>; | 418 | cdns,phy-input-delay-mmc-highspeed = <2>; |
| 419 | cdns,phy-input-delay-mmc-ddr = <3>; | 419 | cdns,phy-input-delay-mmc-ddr = <3>; |
| 420 | cdns,phy-dll-delay-sdclk = <21>; | 420 | cdns,phy-dll-delay-sdclk = <21>; |
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts index 2c1a92fafbfb..440c2e6a638b 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts | |||
| @@ -67,3 +67,11 @@ | |||
| 67 | reg = <0>; | 67 | reg = <0>; |
| 68 | }; | 68 | }; |
| 69 | }; | 69 | }; |
| 70 | |||
| 71 | &pinctrl_ether_rgmii { | ||
| 72 | tx { | ||
| 73 | pins = "RGMII_TXCLK", "RGMII_TXD0", "RGMII_TXD1", | ||
| 74 | "RGMII_TXD2", "RGMII_TXD3", "RGMII_TXCTL"; | ||
| 75 | drive-strength = <9>; | ||
| 76 | }; | ||
| 77 | }; | ||
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index 9efe20d07589..3a5ed789c056 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi | |||
| @@ -519,7 +519,7 @@ | |||
| 519 | mmc-ddr-1_8v; | 519 | mmc-ddr-1_8v; |
| 520 | mmc-hs200-1_8v; | 520 | mmc-hs200-1_8v; |
| 521 | mmc-pwrseq = <&emmc_pwrseq>; | 521 | mmc-pwrseq = <&emmc_pwrseq>; |
| 522 | cdns,phy-input-delay-legacy = <4>; | 522 | cdns,phy-input-delay-legacy = <9>; |
| 523 | cdns,phy-input-delay-mmc-highspeed = <2>; | 523 | cdns,phy-input-delay-mmc-highspeed = <2>; |
| 524 | cdns,phy-input-delay-mmc-ddr = <3>; | 524 | cdns,phy-input-delay-mmc-ddr = <3>; |
| 525 | cdns,phy-dll-delay-sdclk = <21>; | 525 | cdns,phy-dll-delay-sdclk = <21>; |
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index 7c8f710d9bfa..e85d6ddea3c2 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi | |||
| @@ -334,7 +334,7 @@ | |||
| 334 | mmc-ddr-1_8v; | 334 | mmc-ddr-1_8v; |
| 335 | mmc-hs200-1_8v; | 335 | mmc-hs200-1_8v; |
| 336 | mmc-pwrseq = <&emmc_pwrseq>; | 336 | mmc-pwrseq = <&emmc_pwrseq>; |
| 337 | cdns,phy-input-delay-legacy = <4>; | 337 | cdns,phy-input-delay-legacy = <9>; |
| 338 | cdns,phy-input-delay-mmc-highspeed = <2>; | 338 | cdns,phy-input-delay-mmc-highspeed = <2>; |
| 339 | cdns,phy-input-delay-mmc-ddr = <3>; | 339 | cdns,phy-input-delay-mmc-ddr = <3>; |
| 340 | cdns,phy-dll-delay-sdclk = <21>; | 340 | cdns,phy-dll-delay-sdclk = <21>; |
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 9ef0797380cb..f9b0b09153e0 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h | |||
| @@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v) | |||
| 117 | /* LSE atomics */ | 117 | /* LSE atomics */ |
| 118 | " mvn %w[i], %w[i]\n" | 118 | " mvn %w[i], %w[i]\n" |
| 119 | " stclr %w[i], %[v]") | 119 | " stclr %w[i], %[v]") |
| 120 | : [i] "+r" (w0), [v] "+Q" (v->counter) | 120 | : [i] "+&r" (w0), [v] "+Q" (v->counter) |
| 121 | : "r" (x1) | 121 | : "r" (x1) |
| 122 | : __LL_SC_CLOBBERS); | 122 | : __LL_SC_CLOBBERS); |
| 123 | } | 123 | } |
| @@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ | |||
| 135 | /* LSE atomics */ \ | 135 | /* LSE atomics */ \ |
| 136 | " mvn %w[i], %w[i]\n" \ | 136 | " mvn %w[i], %w[i]\n" \ |
| 137 | " ldclr" #mb " %w[i], %w[i], %[v]") \ | 137 | " ldclr" #mb " %w[i], %w[i], %[v]") \ |
| 138 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | 138 | : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
| 139 | : "r" (x1) \ | 139 | : "r" (x1) \ |
| 140 | : __LL_SC_CLOBBERS, ##cl); \ | 140 | : __LL_SC_CLOBBERS, ##cl); \ |
| 141 | \ | 141 | \ |
| @@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
| 161 | /* LSE atomics */ | 161 | /* LSE atomics */ |
| 162 | " neg %w[i], %w[i]\n" | 162 | " neg %w[i], %w[i]\n" |
| 163 | " stadd %w[i], %[v]") | 163 | " stadd %w[i], %[v]") |
| 164 | : [i] "+r" (w0), [v] "+Q" (v->counter) | 164 | : [i] "+&r" (w0), [v] "+Q" (v->counter) |
| 165 | : "r" (x1) | 165 | : "r" (x1) |
| 166 | : __LL_SC_CLOBBERS); | 166 | : __LL_SC_CLOBBERS); |
| 167 | } | 167 | } |
| @@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ | |||
| 180 | " neg %w[i], %w[i]\n" \ | 180 | " neg %w[i], %w[i]\n" \ |
| 181 | " ldadd" #mb " %w[i], w30, %[v]\n" \ | 181 | " ldadd" #mb " %w[i], w30, %[v]\n" \ |
| 182 | " add %w[i], %w[i], w30") \ | 182 | " add %w[i], %w[i], w30") \ |
| 183 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | 183 | : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
| 184 | : "r" (x1) \ | 184 | : "r" (x1) \ |
| 185 | : __LL_SC_CLOBBERS , ##cl); \ | 185 | : __LL_SC_CLOBBERS , ##cl); \ |
| 186 | \ | 186 | \ |
| @@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ | |||
| 207 | /* LSE atomics */ \ | 207 | /* LSE atomics */ \ |
| 208 | " neg %w[i], %w[i]\n" \ | 208 | " neg %w[i], %w[i]\n" \ |
| 209 | " ldadd" #mb " %w[i], %w[i], %[v]") \ | 209 | " ldadd" #mb " %w[i], %w[i], %[v]") \ |
| 210 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | 210 | : [i] "+&r" (w0), [v] "+Q" (v->counter) \ |
| 211 | : "r" (x1) \ | 211 | : "r" (x1) \ |
| 212 | : __LL_SC_CLOBBERS, ##cl); \ | 212 | : __LL_SC_CLOBBERS, ##cl); \ |
| 213 | \ | 213 | \ |
| @@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v) | |||
| 314 | /* LSE atomics */ | 314 | /* LSE atomics */ |
| 315 | " mvn %[i], %[i]\n" | 315 | " mvn %[i], %[i]\n" |
| 316 | " stclr %[i], %[v]") | 316 | " stclr %[i], %[v]") |
| 317 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 317 | : [i] "+&r" (x0), [v] "+Q" (v->counter) |
| 318 | : "r" (x1) | 318 | : "r" (x1) |
| 319 | : __LL_SC_CLOBBERS); | 319 | : __LL_SC_CLOBBERS); |
| 320 | } | 320 | } |
| @@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ | |||
| 332 | /* LSE atomics */ \ | 332 | /* LSE atomics */ \ |
| 333 | " mvn %[i], %[i]\n" \ | 333 | " mvn %[i], %[i]\n" \ |
| 334 | " ldclr" #mb " %[i], %[i], %[v]") \ | 334 | " ldclr" #mb " %[i], %[i], %[v]") \ |
| 335 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | 335 | : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
| 336 | : "r" (x1) \ | 336 | : "r" (x1) \ |
| 337 | : __LL_SC_CLOBBERS, ##cl); \ | 337 | : __LL_SC_CLOBBERS, ##cl); \ |
| 338 | \ | 338 | \ |
| @@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) | |||
| 358 | /* LSE atomics */ | 358 | /* LSE atomics */ |
| 359 | " neg %[i], %[i]\n" | 359 | " neg %[i], %[i]\n" |
| 360 | " stadd %[i], %[v]") | 360 | " stadd %[i], %[v]") |
| 361 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 361 | : [i] "+&r" (x0), [v] "+Q" (v->counter) |
| 362 | : "r" (x1) | 362 | : "r" (x1) |
| 363 | : __LL_SC_CLOBBERS); | 363 | : __LL_SC_CLOBBERS); |
| 364 | } | 364 | } |
| @@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ | |||
| 377 | " neg %[i], %[i]\n" \ | 377 | " neg %[i], %[i]\n" \ |
| 378 | " ldadd" #mb " %[i], x30, %[v]\n" \ | 378 | " ldadd" #mb " %[i], x30, %[v]\n" \ |
| 379 | " add %[i], %[i], x30") \ | 379 | " add %[i], %[i], x30") \ |
| 380 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | 380 | : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
| 381 | : "r" (x1) \ | 381 | : "r" (x1) \ |
| 382 | : __LL_SC_CLOBBERS, ##cl); \ | 382 | : __LL_SC_CLOBBERS, ##cl); \ |
| 383 | \ | 383 | \ |
| @@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ | |||
| 404 | /* LSE atomics */ \ | 404 | /* LSE atomics */ \ |
| 405 | " neg %[i], %[i]\n" \ | 405 | " neg %[i], %[i]\n" \ |
| 406 | " ldadd" #mb " %[i], %[i], %[v]") \ | 406 | " ldadd" #mb " %[i], %[i], %[v]") \ |
| 407 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | 407 | : [i] "+&r" (x0), [v] "+Q" (v->counter) \ |
| 408 | : "r" (x1) \ | 408 | : "r" (x1) \ |
| 409 | : __LL_SC_CLOBBERS, ##cl); \ | 409 | : __LL_SC_CLOBBERS, ##cl); \ |
| 410 | \ | 410 | \ |
| @@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) | |||
| 435 | " sub x30, x30, %[ret]\n" | 435 | " sub x30, x30, %[ret]\n" |
| 436 | " cbnz x30, 1b\n" | 436 | " cbnz x30, 1b\n" |
| 437 | "2:") | 437 | "2:") |
| 438 | : [ret] "+r" (x0), [v] "+Q" (v->counter) | 438 | : [ret] "+&r" (x0), [v] "+Q" (v->counter) |
| 439 | : | 439 | : |
| 440 | : __LL_SC_CLOBBERS, "cc", "memory"); | 440 | : __LL_SC_CLOBBERS, "cc", "memory"); |
| 441 | 441 | ||
| @@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ | |||
| 516 | " eor %[old1], %[old1], %[oldval1]\n" \ | 516 | " eor %[old1], %[old1], %[oldval1]\n" \ |
| 517 | " eor %[old2], %[old2], %[oldval2]\n" \ | 517 | " eor %[old2], %[old2], %[oldval2]\n" \ |
| 518 | " orr %[old1], %[old1], %[old2]") \ | 518 | " orr %[old1], %[old1], %[old2]") \ |
| 519 | : [old1] "+r" (x0), [old2] "+r" (x1), \ | 519 | : [old1] "+&r" (x0), [old2] "+&r" (x1), \ |
| 520 | [v] "+Q" (*(unsigned long *)ptr) \ | 520 | [v] "+Q" (*(unsigned long *)ptr) \ |
| 521 | : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ | 521 | : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ |
| 522 | [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ | 522 | [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 082110993647..6128992c2ded 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
| @@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
| 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; | 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | /* | ||
| 364 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
| 365 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
| 366 | * can immediately drop the lock again. | ||
| 367 | */ | ||
| 368 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
| 369 | gpa_t gpa, void *data, unsigned long len) | ||
| 370 | { | ||
| 371 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 372 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
| 373 | |||
| 374 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 375 | |||
| 376 | return ret; | ||
| 377 | } | ||
| 378 | |||
| 363 | #ifdef CONFIG_KVM_INDIRECT_VECTORS | 379 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
| 364 | /* | 380 | /* |
| 365 | * EL2 vectors can be mapped and rerouted in a number of ways, | 381 | * EL2 vectors can be mapped and rerouted in a number of ways, |
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 66be504edb6c..d894a20b70b2 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
| @@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount); | |||
| 75 | /* arm-smccc */ | 75 | /* arm-smccc */ |
| 76 | EXPORT_SYMBOL(__arm_smccc_smc); | 76 | EXPORT_SYMBOL(__arm_smccc_smc); |
| 77 | EXPORT_SYMBOL(__arm_smccc_hvc); | 77 | EXPORT_SYMBOL(__arm_smccc_hvc); |
| 78 | |||
| 79 | /* tishift.S */ | ||
| 80 | extern long long __ashlti3(long long a, int b); | ||
| 81 | EXPORT_SYMBOL(__ashlti3); | ||
| 82 | extern long long __ashrti3(long long a, int b); | ||
| 83 | EXPORT_SYMBOL(__ashrti3); | ||
| 84 | extern long long __lshrti3(long long a, int b); | ||
| 85 | EXPORT_SYMBOL(__lshrti3); | ||
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S index d3db9b2cd479..0fdff97794de 100644 --- a/arch/arm64/lib/tishift.S +++ b/arch/arm64/lib/tishift.S | |||
| @@ -1,17 +1,6 @@ | |||
| 1 | /* | 1 | /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
| 2 | * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. | ||
| 3 | * | 2 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 3 | * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | 4 | */ |
| 16 | 5 | ||
| 17 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 4165485e8b6e..2af3dd89bcdb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, | |||
| 293 | static void __do_user_fault(struct siginfo *info, unsigned int esr) | 293 | static void __do_user_fault(struct siginfo *info, unsigned int esr) |
| 294 | { | 294 | { |
| 295 | current->thread.fault_address = (unsigned long)info->si_addr; | 295 | current->thread.fault_address = (unsigned long)info->si_addr; |
| 296 | |||
| 297 | /* | ||
| 298 | * If the faulting address is in the kernel, we must sanitize the ESR. | ||
| 299 | * From userspace's point of view, kernel-only mappings don't exist | ||
| 300 | * at all, so we report them as level 0 translation faults. | ||
| 301 | * (This is not quite the way that "no mapping there at all" behaves: | ||
| 302 | * an alignment fault not caused by the memory type would take | ||
| 303 | * precedence over translation fault for a real access to empty | ||
| 304 | * space. Unfortunately we can't easily distinguish "alignment fault | ||
| 305 | * not caused by memory type" from "alignment fault caused by memory | ||
| 306 | * type", so we ignore this wrinkle and just return the translation | ||
| 307 | * fault.) | ||
| 308 | */ | ||
| 309 | if (current->thread.fault_address >= TASK_SIZE) { | ||
| 310 | switch (ESR_ELx_EC(esr)) { | ||
| 311 | case ESR_ELx_EC_DABT_LOW: | ||
| 312 | /* | ||
| 313 | * These bits provide only information about the | ||
| 314 | * faulting instruction, which userspace knows already. | ||
| 315 | * We explicitly clear bits which are architecturally | ||
| 316 | * RES0 in case they are given meanings in future. | ||
| 317 | * We always report the ESR as if the fault was taken | ||
| 318 | * to EL1 and so ISV and the bits in ISS[23:14] are | ||
| 319 | * clear. (In fact it always will be a fault to EL1.) | ||
| 320 | */ | ||
| 321 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | | ||
| 322 | ESR_ELx_CM | ESR_ELx_WNR; | ||
| 323 | esr |= ESR_ELx_FSC_FAULT; | ||
| 324 | break; | ||
| 325 | case ESR_ELx_EC_IABT_LOW: | ||
| 326 | /* | ||
| 327 | * Claim a level 0 translation fault. | ||
| 328 | * All other bits are architecturally RES0 for faults | ||
| 329 | * reported with that DFSC value, so we clear them. | ||
| 330 | */ | ||
| 331 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; | ||
| 332 | esr |= ESR_ELx_FSC_FAULT; | ||
| 333 | break; | ||
| 334 | default: | ||
| 335 | /* | ||
| 336 | * This should never happen (entry.S only brings us | ||
| 337 | * into this code for insn and data aborts from a lower | ||
| 338 | * exception level). Fail safe by not providing an ESR | ||
| 339 | * context record at all. | ||
| 340 | */ | ||
| 341 | WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); | ||
| 342 | esr = 0; | ||
| 343 | break; | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 296 | current->thread.fault_code = esr; | 347 | current->thread.fault_code = esr; |
| 297 | arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); | 348 | arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); |
| 298 | } | 349 | } |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 2dbb2c9f1ec1..493ff75670ff 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
| @@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) | |||
| 933 | { | 933 | { |
| 934 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | | 934 | pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | |
| 935 | pgprot_val(mk_sect_prot(prot))); | 935 | pgprot_val(mk_sect_prot(prot))); |
| 936 | pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot); | ||
| 936 | 937 | ||
| 937 | /* ioremap_page_range doesn't honour BBM */ | 938 | /* Only allow permission changes for now */ |
| 938 | if (pud_present(READ_ONCE(*pudp))) | 939 | if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), |
| 940 | pud_val(new_pud))) | ||
| 939 | return 0; | 941 | return 0; |
| 940 | 942 | ||
| 941 | BUG_ON(phys & ~PUD_MASK); | 943 | BUG_ON(phys & ~PUD_MASK); |
| 942 | set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); | 944 | set_pud(pudp, new_pud); |
| 943 | return 1; | 945 | return 1; |
| 944 | } | 946 | } |
| 945 | 947 | ||
| @@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) | |||
| 947 | { | 949 | { |
| 948 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | | 950 | pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | |
| 949 | pgprot_val(mk_sect_prot(prot))); | 951 | pgprot_val(mk_sect_prot(prot))); |
| 952 | pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot); | ||
| 950 | 953 | ||
| 951 | /* ioremap_page_range doesn't honour BBM */ | 954 | /* Only allow permission changes for now */ |
| 952 | if (pmd_present(READ_ONCE(*pmdp))) | 955 | if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), |
| 956 | pmd_val(new_pmd))) | ||
| 953 | return 0; | 957 | return 0; |
| 954 | 958 | ||
| 955 | BUG_ON(phys & ~PMD_MASK); | 959 | BUG_ON(phys & ~PMD_MASK); |
| 956 | set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); | 960 | set_pmd(pmdp, new_pmd); |
| 957 | return 1; | 961 | return 1; |
| 958 | } | 962 | } |
| 959 | 963 | ||
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c index b3043c08f769..aee8d7b8f091 100644 --- a/arch/mips/boot/compressed/uart-16550.c +++ b/arch/mips/boot/compressed/uart-16550.c | |||
| @@ -18,9 +18,9 @@ | |||
| 18 | #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset)) | 18 | #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset)) |
| 19 | #endif | 19 | #endif |
| 20 | 20 | ||
| 21 | #if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780) | 21 | #ifdef CONFIG_MACH_INGENIC |
| 22 | #include <asm/mach-jz4740/base.h> | 22 | #define INGENIC_UART0_BASE_ADDR 0x10030000 |
| 23 | #define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset)) | 23 | #define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset)) |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #ifdef CONFIG_CPU_XLR | 26 | #ifdef CONFIG_CPU_XLR |
diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile index 9987e0e378c5..69ca00590b8d 100644 --- a/arch/mips/boot/dts/xilfpga/Makefile +++ b/arch/mips/boot/dts/xilfpga/Makefile | |||
| @@ -1,4 +1,2 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb | 2 | dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb |
| 3 | |||
| 4 | obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) | ||
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform index b51432dd10b6..0dd0d5d460a5 100644 --- a/arch/mips/generic/Platform +++ b/arch/mips/generic/Platform | |||
| @@ -16,3 +16,4 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb | |||
| 16 | its-y := vmlinux.its.S | 16 | its-y := vmlinux.its.S |
| 17 | its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S | 17 | its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S |
| 18 | its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S | 18 | its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S |
| 19 | its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0b23b1ad99e6..8d098b9f395c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
| @@ -463,7 +463,7 @@ static int fpr_get_msa(struct task_struct *target, | |||
| 463 | /* | 463 | /* |
| 464 | * Copy the floating-point context to the supplied NT_PRFPREG buffer. | 464 | * Copy the floating-point context to the supplied NT_PRFPREG buffer. |
| 465 | * Choose the appropriate helper for general registers, and then copy | 465 | * Choose the appropriate helper for general registers, and then copy |
| 466 | * the FCSR register separately. | 466 | * the FCSR and FIR registers separately. |
| 467 | */ | 467 | */ |
| 468 | static int fpr_get(struct task_struct *target, | 468 | static int fpr_get(struct task_struct *target, |
| 469 | const struct user_regset *regset, | 469 | const struct user_regset *regset, |
| @@ -471,6 +471,7 @@ static int fpr_get(struct task_struct *target, | |||
| 471 | void *kbuf, void __user *ubuf) | 471 | void *kbuf, void __user *ubuf) |
| 472 | { | 472 | { |
| 473 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); | 473 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); |
| 474 | const int fir_pos = fcr31_pos + sizeof(u32); | ||
| 474 | int err; | 475 | int err; |
| 475 | 476 | ||
| 476 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) | 477 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) |
| @@ -483,6 +484,12 @@ static int fpr_get(struct task_struct *target, | |||
| 483 | err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 484 | err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 484 | &target->thread.fpu.fcr31, | 485 | &target->thread.fpu.fcr31, |
| 485 | fcr31_pos, fcr31_pos + sizeof(u32)); | 486 | fcr31_pos, fcr31_pos + sizeof(u32)); |
| 487 | if (err) | ||
| 488 | return err; | ||
| 489 | |||
| 490 | err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
| 491 | &boot_cpu_data.fpu_id, | ||
| 492 | fir_pos, fir_pos + sizeof(u32)); | ||
| 486 | 493 | ||
| 487 | return err; | 494 | return err; |
| 488 | } | 495 | } |
| @@ -531,7 +538,8 @@ static int fpr_set_msa(struct task_struct *target, | |||
| 531 | /* | 538 | /* |
| 532 | * Copy the supplied NT_PRFPREG buffer to the floating-point context. | 539 | * Copy the supplied NT_PRFPREG buffer to the floating-point context. |
| 533 | * Choose the appropriate helper for general registers, and then copy | 540 | * Choose the appropriate helper for general registers, and then copy |
| 534 | * the FCSR register separately. | 541 | * the FCSR register separately. Ignore the incoming FIR register |
| 542 | * contents though, as the register is read-only. | ||
| 535 | * | 543 | * |
| 536 | * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', | 544 | * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', |
| 537 | * which is supposed to have been guaranteed by the kernel before | 545 | * which is supposed to have been guaranteed by the kernel before |
| @@ -545,6 +553,7 @@ static int fpr_set(struct task_struct *target, | |||
| 545 | const void *kbuf, const void __user *ubuf) | 553 | const void *kbuf, const void __user *ubuf) |
| 546 | { | 554 | { |
| 547 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); | 555 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); |
| 556 | const int fir_pos = fcr31_pos + sizeof(u32); | ||
| 548 | u32 fcr31; | 557 | u32 fcr31; |
| 549 | int err; | 558 | int err; |
| 550 | 559 | ||
| @@ -572,6 +581,11 @@ static int fpr_set(struct task_struct *target, | |||
| 572 | ptrace_setfcr31(target, fcr31); | 581 | ptrace_setfcr31(target, fcr31); |
| 573 | } | 582 | } |
| 574 | 583 | ||
| 584 | if (count > 0) | ||
| 585 | err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
| 586 | fir_pos, | ||
| 587 | fir_pos + sizeof(u32)); | ||
| 588 | |||
| 575 | return err; | 589 | return err; |
| 576 | } | 590 | } |
| 577 | 591 | ||
| @@ -793,7 +807,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 793 | fregs = get_fpu_regs(child); | 807 | fregs = get_fpu_regs(child); |
| 794 | 808 | ||
| 795 | #ifdef CONFIG_32BIT | 809 | #ifdef CONFIG_32BIT |
| 796 | if (test_thread_flag(TIF_32BIT_FPREGS)) { | 810 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { |
| 797 | /* | 811 | /* |
| 798 | * The odd registers are actually the high | 812 | * The odd registers are actually the high |
| 799 | * order bits of the values stored in the even | 813 | * order bits of the values stored in the even |
| @@ -888,7 +902,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 888 | 902 | ||
| 889 | init_fp_ctx(child); | 903 | init_fp_ctx(child); |
| 890 | #ifdef CONFIG_32BIT | 904 | #ifdef CONFIG_32BIT |
| 891 | if (test_thread_flag(TIF_32BIT_FPREGS)) { | 905 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { |
| 892 | /* | 906 | /* |
| 893 | * The odd registers are actually the high | 907 | * The odd registers are actually the high |
| 894 | * order bits of the values stored in the even | 908 | * order bits of the values stored in the even |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 2b9260f92ccd..656a137c1fe2 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
| @@ -99,7 +99,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 99 | break; | 99 | break; |
| 100 | } | 100 | } |
| 101 | fregs = get_fpu_regs(child); | 101 | fregs = get_fpu_regs(child); |
| 102 | if (test_thread_flag(TIF_32BIT_FPREGS)) { | 102 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { |
| 103 | /* | 103 | /* |
| 104 | * The odd registers are actually the high | 104 | * The odd registers are actually the high |
| 105 | * order bits of the values stored in the even | 105 | * order bits of the values stored in the even |
| @@ -212,7 +212,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
| 212 | sizeof(child->thread.fpu)); | 212 | sizeof(child->thread.fpu)); |
| 213 | child->thread.fpu.fcr31 = 0; | 213 | child->thread.fpu.fcr31 = 0; |
| 214 | } | 214 | } |
| 215 | if (test_thread_flag(TIF_32BIT_FPREGS)) { | 215 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { |
| 216 | /* | 216 | /* |
| 217 | * The odd registers are actually the high | 217 | * The odd registers are actually the high |
| 218 | * order bits of the values stored in the even | 218 | * order bits of the values stored in the even |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2549fdd27ee1..0f725e9cee8f 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
| 45 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, | 45 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, |
| 46 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, | 46 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, |
| 47 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, | 47 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, |
| 48 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, | 48 | { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, |
| 49 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, | 49 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, |
| 50 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, | 50 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, |
| 51 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, | 51 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6f534b209971..e12dfa48b478 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
| @@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
| 851 | /* | 851 | /* |
| 852 | * Either no secondary cache or the available caches don't have the | 852 | * Either no secondary cache or the available caches don't have the |
| 853 | * subset property so we have to flush the primary caches | 853 | * subset property so we have to flush the primary caches |
| 854 | * explicitly | 854 | * explicitly. |
| 855 | * If we would need IPI to perform an INDEX-type operation, then | ||
| 856 | * we have to use the HIT-type alternative as IPI cannot be used | ||
| 857 | * here due to interrupts possibly being disabled. | ||
| 855 | */ | 858 | */ |
| 856 | if (size >= dcache_size) { | 859 | if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { |
| 857 | r4k_blast_dcache(); | 860 | r4k_blast_dcache(); |
| 858 | } else { | 861 | } else { |
| 859 | R4600_HIT_CACHEOP_WAR_IMPL; | 862 | R4600_HIT_CACHEOP_WAR_IMPL; |
| @@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
| 890 | return; | 893 | return; |
| 891 | } | 894 | } |
| 892 | 895 | ||
| 893 | if (size >= dcache_size) { | 896 | if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { |
| 894 | r4k_blast_dcache(); | 897 | r4k_blast_dcache(); |
| 895 | } else { | 898 | } else { |
| 896 | R4600_HIT_CACHEOP_WAR_IMPL; | 899 | R4600_HIT_CACHEOP_WAR_IMPL; |
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 249f38d3388f..b7404f2dcf5b 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig | |||
| @@ -9,6 +9,12 @@ config NDS32 | |||
| 9 | select CLKSRC_MMIO | 9 | select CLKSRC_MMIO |
| 10 | select CLONE_BACKWARDS | 10 | select CLONE_BACKWARDS |
| 11 | select COMMON_CLK | 11 | select COMMON_CLK |
| 12 | select GENERIC_ASHLDI3 | ||
| 13 | select GENERIC_ASHRDI3 | ||
| 14 | select GENERIC_LSHRDI3 | ||
| 15 | select GENERIC_CMPDI2 | ||
| 16 | select GENERIC_MULDI3 | ||
| 17 | select GENERIC_UCMPDI2 | ||
| 12 | select GENERIC_ATOMIC64 | 18 | select GENERIC_ATOMIC64 |
| 13 | select GENERIC_CPU_DEVICES | 19 | select GENERIC_CPU_DEVICES |
| 14 | select GENERIC_CLOCKEVENTS | 20 | select GENERIC_CLOCKEVENTS |
| @@ -82,6 +88,7 @@ endmenu | |||
| 82 | 88 | ||
| 83 | menu "Kernel Features" | 89 | menu "Kernel Features" |
| 84 | source "kernel/Kconfig.preempt" | 90 | source "kernel/Kconfig.preempt" |
| 91 | source "kernel/Kconfig.freezer" | ||
| 85 | source "mm/Kconfig" | 92 | source "mm/Kconfig" |
| 86 | source "kernel/Kconfig.hz" | 93 | source "kernel/Kconfig.hz" |
| 87 | endmenu | 94 | endmenu |
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu index ba44cc539da9..b8c8984d1456 100644 --- a/arch/nds32/Kconfig.cpu +++ b/arch/nds32/Kconfig.cpu | |||
| @@ -1,10 +1,11 @@ | |||
| 1 | comment "Processor Features" | 1 | comment "Processor Features" |
| 2 | 2 | ||
| 3 | config CPU_BIG_ENDIAN | 3 | config CPU_BIG_ENDIAN |
| 4 | bool "Big endian" | 4 | def_bool !CPU_LITTLE_ENDIAN |
| 5 | 5 | ||
| 6 | config CPU_LITTLE_ENDIAN | 6 | config CPU_LITTLE_ENDIAN |
| 7 | def_bool !CPU_BIG_ENDIAN | 7 | bool "Little endian" |
| 8 | default y | ||
| 8 | 9 | ||
| 9 | config HWZOL | 10 | config HWZOL |
| 10 | bool "hardware zero overhead loop support" | 11 | bool "hardware zero overhead loop support" |
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 91f933d5a962..513bb2e9baf9 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile | |||
| @@ -23,9 +23,6 @@ export TEXTADDR | |||
| 23 | # If we have a machine-specific directory, then include it in the build. | 23 | # If we have a machine-specific directory, then include it in the build. |
| 24 | core-y += arch/nds32/kernel/ arch/nds32/mm/ | 24 | core-y += arch/nds32/kernel/ arch/nds32/mm/ |
| 25 | libs-y += arch/nds32/lib/ | 25 | libs-y += arch/nds32/lib/ |
| 26 | LIBGCC_PATH := \ | ||
| 27 | $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) | ||
| 28 | libs-y += $(LIBGCC_PATH) | ||
| 29 | 26 | ||
| 30 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' | 27 | ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' |
| 31 | BUILTIN_DTB := y | 28 | BUILTIN_DTB := y |
| @@ -35,8 +32,12 @@ endif | |||
| 35 | 32 | ||
| 36 | ifdef CONFIG_CPU_LITTLE_ENDIAN | 33 | ifdef CONFIG_CPU_LITTLE_ENDIAN |
| 37 | KBUILD_CFLAGS += $(call cc-option, -EL) | 34 | KBUILD_CFLAGS += $(call cc-option, -EL) |
| 35 | KBUILD_AFLAGS += $(call cc-option, -EL) | ||
| 36 | LDFLAGS += $(call cc-option, -EL) | ||
| 38 | else | 37 | else |
| 39 | KBUILD_CFLAGS += $(call cc-option, -EB) | 38 | KBUILD_CFLAGS += $(call cc-option, -EB) |
| 39 | KBUILD_AFLAGS += $(call cc-option, -EB) | ||
| 40 | LDFLAGS += $(call cc-option, -EB) | ||
| 40 | endif | 41 | endif |
| 41 | 42 | ||
| 42 | boot := arch/nds32/boot | 43 | boot := arch/nds32/boot |
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 06bdf8167f5a..142e612aa639 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild | |||
| @@ -16,6 +16,7 @@ generic-y += dma.h | |||
| 16 | generic-y += emergency-restart.h | 16 | generic-y += emergency-restart.h |
| 17 | generic-y += errno.h | 17 | generic-y += errno.h |
| 18 | generic-y += exec.h | 18 | generic-y += exec.h |
| 19 | generic-y += export.h | ||
| 19 | generic-y += fb.h | 20 | generic-y += fb.h |
| 20 | generic-y += fcntl.h | 21 | generic-y += fcntl.h |
| 21 | generic-y += ftrace.h | 22 | generic-y += ftrace.h |
| @@ -49,6 +50,7 @@ generic-y += switch_to.h | |||
| 49 | generic-y += timex.h | 50 | generic-y += timex.h |
| 50 | generic-y += topology.h | 51 | generic-y += topology.h |
| 51 | generic-y += trace_clock.h | 52 | generic-y += trace_clock.h |
| 53 | generic-y += xor.h | ||
| 52 | generic-y += unaligned.h | 54 | generic-y += unaligned.h |
| 53 | generic-y += user.h | 55 | generic-y += user.h |
| 54 | generic-y += vga.h | 56 | generic-y += vga.h |
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h index c73f71d67744..8e84fc385b94 100644 --- a/arch/nds32/include/asm/bitfield.h +++ b/arch/nds32/include/asm/bitfield.h | |||
| @@ -336,7 +336,7 @@ | |||
| 336 | #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) | 336 | #define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) |
| 337 | #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) | 337 | #define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) |
| 338 | 338 | ||
| 339 | #define INT_MASK_INITAIAL_VAL 0x10003 | 339 | #define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE) |
| 340 | 340 | ||
| 341 | /****************************************************************************** | 341 | /****************************************************************************** |
| 342 | * ir15: INT_PEND (Interrupt Pending Register) | 342 | * ir15: INT_PEND (Interrupt Pending Register) |
| @@ -396,6 +396,7 @@ | |||
| 396 | #define MMU_CTL_D8KB 1 | 396 | #define MMU_CTL_D8KB 1 |
| 397 | #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) | 397 | #define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) |
| 398 | 398 | ||
| 399 | #define MMU_CTL_CACHEABLE_NON 0 | ||
| 399 | #define MMU_CTL_CACHEABLE_WB 2 | 400 | #define MMU_CTL_CACHEABLE_WB 2 |
| 400 | #define MMU_CTL_CACHEABLE_WT 3 | 401 | #define MMU_CTL_CACHEABLE_WT 3 |
| 401 | 402 | ||
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h index 1240f148ec0f..10b48f0d8e85 100644 --- a/arch/nds32/include/asm/cacheflush.h +++ b/arch/nds32/include/asm/cacheflush.h | |||
| @@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma, | |||
| 32 | 32 | ||
| 33 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 33 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 34 | void flush_kernel_dcache_page(struct page *page); | 34 | void flush_kernel_dcache_page(struct page *page); |
| 35 | void flush_kernel_vmap_range(void *addr, int size); | ||
| 36 | void invalidate_kernel_vmap_range(void *addr, int size); | ||
| 35 | void flush_icache_range(unsigned long start, unsigned long end); | 37 | void flush_icache_range(unsigned long start, unsigned long end); |
| 36 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); | 38 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); |
| 37 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) | 39 | #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) |
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h index 966e71b3c960..71cd226d6863 100644 --- a/arch/nds32/include/asm/io.h +++ b/arch/nds32/include/asm/io.h | |||
| @@ -4,6 +4,8 @@ | |||
| 4 | #ifndef __ASM_NDS32_IO_H | 4 | #ifndef __ASM_NDS32_IO_H |
| 5 | #define __ASM_NDS32_IO_H | 5 | #define __ASM_NDS32_IO_H |
| 6 | 6 | ||
| 7 | #include <linux/types.h> | ||
| 8 | |||
| 7 | extern void iounmap(volatile void __iomem *addr); | 9 | extern void iounmap(volatile void __iomem *addr); |
| 8 | #define __raw_writeb __raw_writeb | 10 | #define __raw_writeb __raw_writeb |
| 9 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) | 11 | static inline void __raw_writeb(u8 val, volatile void __iomem *addr) |
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h index e27365c097b6..947f0491c9a7 100644 --- a/arch/nds32/include/asm/page.h +++ b/arch/nds32/include/asm/page.h | |||
| @@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from, | |||
| 27 | unsigned long vaddr, struct vm_area_struct *vma); | 27 | unsigned long vaddr, struct vm_area_struct *vma); |
| 28 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); | 28 | extern void clear_user_highpage(struct page *page, unsigned long vaddr); |
| 29 | 29 | ||
| 30 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
| 31 | struct page *to); | ||
| 32 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page); | ||
| 30 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 33 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
| 31 | #define clear_user_highpage clear_user_highpage | 34 | #define clear_user_highpage clear_user_highpage |
| 32 | #else | 35 | #else |
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index 6783937edbeb..d3e19a55cf53 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h | |||
| @@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
| 152 | #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) | 152 | #define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) |
| 153 | #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | 153 | #define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) |
| 154 | #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) | 154 | #define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) |
| 155 | #define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD) | ||
| 155 | #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) | 156 | #define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) |
| 156 | #endif /* __ASSEMBLY__ */ | 157 | #endif /* __ASSEMBLY__ */ |
| 157 | 158 | ||
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S index a72e83d804f5..b8ae4e9a6b93 100644 --- a/arch/nds32/kernel/ex-entry.S +++ b/arch/nds32/kernel/ex-entry.S | |||
| @@ -118,7 +118,7 @@ common_exception_handler: | |||
| 118 | /* interrupt */ | 118 | /* interrupt */ |
| 119 | 2: | 119 | 2: |
| 120 | #ifdef CONFIG_TRACE_IRQFLAGS | 120 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 121 | jal arch_trace_hardirqs_off | 121 | jal trace_hardirqs_off |
| 122 | #endif | 122 | #endif |
| 123 | move $r0, $sp | 123 | move $r0, $sp |
| 124 | sethi $lp, hi20(ret_from_intr) | 124 | sethi $lp, hi20(ret_from_intr) |
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S index 71f57bd70f3b..c5fdae174ced 100644 --- a/arch/nds32/kernel/head.S +++ b/arch/nds32/kernel/head.S | |||
| @@ -57,14 +57,32 @@ _nodtb: | |||
| 57 | isb | 57 | isb |
| 58 | mtsr $r4, $L1_PPTB ! load page table pointer\n" | 58 | mtsr $r4, $L1_PPTB ! load page table pointer\n" |
| 59 | 59 | ||
| 60 | /* set NTC0 cacheable/writeback, mutliple page size in use */ | 60 | #ifdef CONFIG_CPU_DCACHE_DISABLE |
| 61 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON | ||
| 62 | #else | ||
| 63 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 64 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT | ||
| 65 | #else | ||
| 66 | #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB | ||
| 67 | #endif | ||
| 68 | #endif | ||
| 69 | |||
| 70 | /* set NTC cacheability, mutliple page size in use */ | ||
| 61 | mfsr $r3, $MMU_CTL | 71 | mfsr $r3, $MMU_CTL |
| 62 | li $r0, #~MMU_CTL_mskNTC0 | 72 | #if CONFIG_MEMORY_START >= 0xc0000000 |
| 63 | and $r3, $r3, $r0 | 73 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3) |
| 74 | #elif CONFIG_MEMORY_START >= 0x80000000 | ||
| 75 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2) | ||
| 76 | #elif CONFIG_MEMORY_START >= 0x40000000 | ||
| 77 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1) | ||
| 78 | #else | ||
| 79 | ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0) | ||
| 80 | #endif | ||
| 81 | |||
| 64 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB | 82 | #ifdef CONFIG_ANDES_PAGE_SIZE_4KB |
| 65 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) | 83 | ori $r3, $r3, #(MMU_CTL_mskMPZIU) |
| 66 | #else | 84 | #else |
| 67 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) | 85 | ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB) |
| 68 | #endif | 86 | #endif |
| 69 | #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS | 87 | #ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS |
| 70 | li $r0, #MMU_CTL_UNA | 88 | li $r0, #MMU_CTL_UNA |
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c index ba910e9e4ecb..2f5b2ccebe47 100644 --- a/arch/nds32/kernel/setup.c +++ b/arch/nds32/kernel/setup.c | |||
| @@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p) | |||
| 293 | /* paging_init() sets up the MMU and marks all pages as reserved */ | 293 | /* paging_init() sets up the MMU and marks all pages as reserved */ |
| 294 | paging_init(); | 294 | paging_init(); |
| 295 | 295 | ||
| 296 | /* invalidate all TLB entries because the new mapping is created */ | ||
| 297 | __nds32__tlbop_flua(); | ||
| 298 | |||
| 296 | /* use generic way to parse */ | 299 | /* use generic way to parse */ |
| 297 | parse_early_param(); | 300 | parse_early_param(); |
| 298 | 301 | ||
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index bc70113c0e84..8b231e910ea6 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c | |||
| @@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
| 9 | { | 9 | { |
| 10 | save_stack_trace_tsk(current, trace); | 10 | save_stack_trace_tsk(current, trace); |
| 11 | } | 11 | } |
| 12 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
| 12 | 13 | ||
| 13 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 14 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 14 | { | 15 | { |
| @@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
| 45 | fpn = (unsigned long *)fpp; | 46 | fpn = (unsigned long *)fpp; |
| 46 | } | 47 | } |
| 47 | } | 48 | } |
| 49 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c index f1198d7a5654..016f15891f6d 100644 --- a/arch/nds32/kernel/vdso.c +++ b/arch/nds32/kernel/vdso.c | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | #include <asm/vdso_timer_info.h> | 23 | #include <asm/vdso_timer_info.h> |
| 24 | #include <asm/cache_info.h> | 24 | #include <asm/cache_info.h> |
| 25 | extern struct cache_info L1_cache_info[2]; | 25 | extern struct cache_info L1_cache_info[2]; |
| 26 | extern char vdso_start, vdso_end; | 26 | extern char vdso_start[], vdso_end[]; |
| 27 | static unsigned long vdso_pages __ro_after_init; | 27 | static unsigned long vdso_pages __ro_after_init; |
| 28 | static unsigned long timer_mapping_base; | 28 | static unsigned long timer_mapping_base; |
| 29 | 29 | ||
| @@ -66,16 +66,16 @@ static int __init vdso_init(void) | |||
| 66 | int i; | 66 | int i; |
| 67 | struct page **vdso_pagelist; | 67 | struct page **vdso_pagelist; |
| 68 | 68 | ||
| 69 | if (memcmp(&vdso_start, "\177ELF", 4)) { | 69 | if (memcmp(vdso_start, "\177ELF", 4)) { |
| 70 | pr_err("vDSO is not a valid ELF object!\n"); | 70 | pr_err("vDSO is not a valid ELF object!\n"); |
| 71 | return -EINVAL; | 71 | return -EINVAL; |
| 72 | } | 72 | } |
| 73 | /* Creat a timer io mapping to get clock cycles counter */ | 73 | /* Creat a timer io mapping to get clock cycles counter */ |
| 74 | get_timer_node_info(); | 74 | get_timer_node_info(); |
| 75 | 75 | ||
| 76 | vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; | 76 | vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; |
| 77 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", | 77 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", |
| 78 | vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); | 78 | vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data); |
| 79 | 79 | ||
| 80 | /* Allocate the vDSO pagelist */ | 80 | /* Allocate the vDSO pagelist */ |
| 81 | vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); | 81 | vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); |
| @@ -83,7 +83,7 @@ static int __init vdso_init(void) | |||
| 83 | return -ENOMEM; | 83 | return -ENOMEM; |
| 84 | 84 | ||
| 85 | for (i = 0; i < vdso_pages; i++) | 85 | for (i = 0; i < vdso_pages; i++) |
| 86 | vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); | 86 | vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE); |
| 87 | vdso_spec[1].pages = &vdso_pagelist[0]; | 87 | vdso_spec[1].pages = &vdso_pagelist[0]; |
| 88 | 88 | ||
| 89 | return 0; | 89 | return 0; |
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S index 4a2ff85f17ee..f8701ed161a8 100644 --- a/arch/nds32/lib/copy_page.S +++ b/arch/nds32/lib/copy_page.S | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | // Copyright (C) 2005-2017 Andes Technology Corporation | 2 | // Copyright (C) 2005-2017 Andes Technology Corporation |
| 3 | 3 | ||
| 4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
| 5 | #include <asm/export.h> | ||
| 5 | #include <asm/page.h> | 6 | #include <asm/page.h> |
| 6 | 7 | ||
| 7 | .text | 8 | .text |
| @@ -16,6 +17,7 @@ ENTRY(copy_page) | |||
| 16 | popm $r2, $r10 | 17 | popm $r2, $r10 |
| 17 | ret | 18 | ret |
| 18 | ENDPROC(copy_page) | 19 | ENDPROC(copy_page) |
| 20 | EXPORT_SYMBOL(copy_page) | ||
| 19 | 21 | ||
| 20 | ENTRY(clear_page) | 22 | ENTRY(clear_page) |
| 21 | pushm $r1, $r9 | 23 | pushm $r1, $r9 |
| @@ -35,3 +37,4 @@ ENTRY(clear_page) | |||
| 35 | popm $r1, $r9 | 37 | popm $r1, $r9 |
| 36 | ret | 38 | ret |
| 37 | ENDPROC(clear_page) | 39 | ENDPROC(clear_page) |
| 40 | EXPORT_SYMBOL(clear_page) | ||
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c index b96a01b10ca7..e1aed9dc692d 100644 --- a/arch/nds32/mm/alignment.c +++ b/arch/nds32/mm/alignment.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #define RA(inst) (((inst) >> 15) & 0x1FUL) | 19 | #define RA(inst) (((inst) >> 15) & 0x1FUL) |
| 20 | #define RB(inst) (((inst) >> 10) & 0x1FUL) | 20 | #define RB(inst) (((inst) >> 10) & 0x1FUL) |
| 21 | #define SV(inst) (((inst) >> 8) & 0x3UL) | 21 | #define SV(inst) (((inst) >> 8) & 0x3UL) |
| 22 | #define IMM(inst) (((inst) >> 0) & 0x3FFFUL) | 22 | #define IMM(inst) (((inst) >> 0) & 0x7FFFUL) |
| 23 | 23 | ||
| 24 | #define RA3(inst) (((inst) >> 3) & 0x7UL) | 24 | #define RA3(inst) (((inst) >> 3) & 0x7UL) |
| 25 | #define RT3(inst) (((inst) >> 6) & 0x7UL) | 25 | #define RT3(inst) (((inst) >> 6) & 0x7UL) |
| @@ -28,6 +28,9 @@ | |||
| 28 | #define RA5(inst) (((inst) >> 0) & 0x1FUL) | 28 | #define RA5(inst) (((inst) >> 0) & 0x1FUL) |
| 29 | #define RT4(inst) (((inst) >> 5) & 0xFUL) | 29 | #define RT4(inst) (((inst) >> 5) & 0xFUL) |
| 30 | 30 | ||
| 31 | #define GET_IMMSVAL(imm_value) \ | ||
| 32 | (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value) | ||
| 33 | |||
| 31 | #define __get8_data(val,addr,err) \ | 34 | #define __get8_data(val,addr,err) \ |
| 32 | __asm__( \ | 35 | __asm__( \ |
| 33 | "1: lbi.bi %1, [%2], #1\n" \ | 36 | "1: lbi.bi %1, [%2], #1\n" \ |
| @@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs) | |||
| 467 | } | 470 | } |
| 468 | 471 | ||
| 469 | if (imm) | 472 | if (imm) |
| 470 | shift = IMM(inst) * len; | 473 | shift = GET_IMMSVAL(IMM(inst)) * len; |
| 471 | else | 474 | else |
| 472 | shift = *idx_to_addr(regs, RB(inst)) << SV(inst); | 475 | shift = *idx_to_addr(regs, RB(inst)) << SV(inst); |
| 473 | 476 | ||
| @@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = { | |||
| 552 | 555 | ||
| 553 | static struct ctl_table nds32_sysctl_table[2] = { | 556 | static struct ctl_table nds32_sysctl_table[2] = { |
| 554 | { | 557 | { |
| 555 | .procname = "unaligned_acess", | 558 | .procname = "unaligned_access", |
| 556 | .mode = 0555, | 559 | .mode = 0555, |
| 557 | .child = alignment_tbl}, | 560 | .child = alignment_tbl}, |
| 558 | {} | 561 | {} |
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c index 6eb786a399a2..ce8fd34497bf 100644 --- a/arch/nds32/mm/cacheflush.c +++ b/arch/nds32/mm/cacheflush.c | |||
| @@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end) | |||
| 147 | cpu_icache_inval_all(); | 147 | cpu_icache_inval_all(); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
| 151 | struct page *to) | ||
| 152 | { | ||
| 153 | cpu_dcache_wbinval_page((unsigned long)vaddr); | ||
| 154 | cpu_icache_inval_page((unsigned long)vaddr); | ||
| 155 | copy_page(vto, vfrom); | ||
| 156 | cpu_dcache_wbinval_page((unsigned long)vto); | ||
| 157 | cpu_icache_inval_page((unsigned long)vto); | ||
| 158 | } | ||
| 159 | |||
| 160 | void clear_user_page(void *addr, unsigned long vaddr, struct page *page) | ||
| 161 | { | ||
| 162 | cpu_dcache_wbinval_page((unsigned long)vaddr); | ||
| 163 | cpu_icache_inval_page((unsigned long)vaddr); | ||
| 164 | clear_page(addr); | ||
| 165 | cpu_dcache_wbinval_page((unsigned long)addr); | ||
| 166 | cpu_icache_inval_page((unsigned long)addr); | ||
| 167 | } | ||
| 168 | |||
| 150 | void copy_user_highpage(struct page *to, struct page *from, | 169 | void copy_user_highpage(struct page *to, struct page *from, |
| 151 | unsigned long vaddr, struct vm_area_struct *vma) | 170 | unsigned long vaddr, struct vm_area_struct *vma) |
| 152 | { | 171 | { |
| @@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
| 156 | pto = page_to_phys(to); | 175 | pto = page_to_phys(to); |
| 157 | pfrom = page_to_phys(from); | 176 | pfrom = page_to_phys(from); |
| 158 | 177 | ||
| 178 | local_irq_save(flags); | ||
| 159 | if (aliasing(vaddr, (unsigned long)kfrom)) | 179 | if (aliasing(vaddr, (unsigned long)kfrom)) |
| 160 | cpu_dcache_wb_page((unsigned long)kfrom); | 180 | cpu_dcache_wb_page((unsigned long)kfrom); |
| 161 | if (aliasing(vaddr, (unsigned long)kto)) | ||
| 162 | cpu_dcache_inval_page((unsigned long)kto); | ||
| 163 | local_irq_save(flags); | ||
| 164 | vto = kremap0(vaddr, pto); | 181 | vto = kremap0(vaddr, pto); |
| 165 | vfrom = kremap1(vaddr, pfrom); | 182 | vfrom = kremap1(vaddr, pfrom); |
| 166 | copy_page((void *)vto, (void *)vfrom); | 183 | copy_page((void *)vto, (void *)vfrom); |
| @@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page) | |||
| 198 | if (mapping && !mapping_mapped(mapping)) | 215 | if (mapping && !mapping_mapped(mapping)) |
| 199 | set_bit(PG_dcache_dirty, &page->flags); | 216 | set_bit(PG_dcache_dirty, &page->flags); |
| 200 | else { | 217 | else { |
| 201 | int i, pc; | 218 | unsigned long kaddr, flags; |
| 202 | unsigned long vto, kaddr, flags; | 219 | |
| 203 | kaddr = (unsigned long)page_address(page); | 220 | kaddr = (unsigned long)page_address(page); |
| 204 | cpu_dcache_wbinval_page(kaddr); | ||
| 205 | pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE; | ||
| 206 | local_irq_save(flags); | 221 | local_irq_save(flags); |
| 207 | for (i = 0; i < pc; i++) { | 222 | cpu_dcache_wbinval_page(kaddr); |
| 208 | vto = | 223 | if (mapping) { |
| 209 | kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); | 224 | unsigned long vaddr, kto; |
| 210 | cpu_dcache_wbinval_page(vto); | 225 | |
| 211 | kunmap01(vto); | 226 | vaddr = page->index << PAGE_SHIFT; |
| 227 | if (aliasing(vaddr, kaddr)) { | ||
| 228 | kto = kremap0(vaddr, page_to_phys(page)); | ||
| 229 | cpu_dcache_wbinval_page(kto); | ||
| 230 | kunmap01(kto); | ||
| 231 | } | ||
| 212 | } | 232 | } |
| 213 | local_irq_restore(flags); | 233 | local_irq_restore(flags); |
| 214 | } | 234 | } |
| 215 | } | 235 | } |
| 236 | EXPORT_SYMBOL(flush_dcache_page); | ||
| 216 | 237 | ||
| 217 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 238 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
| 218 | unsigned long vaddr, void *dst, void *src, int len) | 239 | unsigned long vaddr, void *dst, void *src, int len) |
| @@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
| 251 | void flush_anon_page(struct vm_area_struct *vma, | 272 | void flush_anon_page(struct vm_area_struct *vma, |
| 252 | struct page *page, unsigned long vaddr) | 273 | struct page *page, unsigned long vaddr) |
| 253 | { | 274 | { |
| 254 | unsigned long flags; | 275 | unsigned long kaddr, flags, ktmp; |
| 255 | if (!PageAnon(page)) | 276 | if (!PageAnon(page)) |
| 256 | return; | 277 | return; |
| 257 | 278 | ||
| @@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma, | |||
| 261 | local_irq_save(flags); | 282 | local_irq_save(flags); |
| 262 | if (vma->vm_flags & VM_EXEC) | 283 | if (vma->vm_flags & VM_EXEC) |
| 263 | cpu_icache_inval_page(vaddr & PAGE_MASK); | 284 | cpu_icache_inval_page(vaddr & PAGE_MASK); |
| 264 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | 285 | kaddr = (unsigned long)page_address(page); |
| 286 | if (aliasing(vaddr, kaddr)) { | ||
| 287 | ktmp = kremap0(vaddr, page_to_phys(page)); | ||
| 288 | cpu_dcache_wbinval_page(ktmp); | ||
| 289 | kunmap01(ktmp); | ||
| 290 | } | ||
| 265 | local_irq_restore(flags); | 291 | local_irq_restore(flags); |
| 266 | } | 292 | } |
| 267 | 293 | ||
| @@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page) | |||
| 272 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); | 298 | cpu_dcache_wbinval_page((unsigned long)page_address(page)); |
| 273 | local_irq_restore(flags); | 299 | local_irq_restore(flags); |
| 274 | } | 300 | } |
| 301 | EXPORT_SYMBOL(flush_kernel_dcache_page); | ||
| 302 | |||
| 303 | void flush_kernel_vmap_range(void *addr, int size) | ||
| 304 | { | ||
| 305 | unsigned long flags; | ||
| 306 | local_irq_save(flags); | ||
| 307 | cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size); | ||
| 308 | local_irq_restore(flags); | ||
| 309 | } | ||
| 310 | EXPORT_SYMBOL(flush_kernel_vmap_range); | ||
| 311 | |||
| 312 | void invalidate_kernel_vmap_range(void *addr, int size) | ||
| 313 | { | ||
| 314 | unsigned long flags; | ||
| 315 | local_irq_save(flags); | ||
| 316 | cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size); | ||
| 317 | local_irq_restore(flags); | ||
| 318 | } | ||
| 319 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | ||
| 275 | 320 | ||
| 276 | void flush_icache_range(unsigned long start, unsigned long end) | 321 | void flush_icache_range(unsigned long start, unsigned long end) |
| 277 | { | 322 | { |
| @@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
| 283 | cpu_cache_wbinval_range(start, end, 1); | 328 | cpu_cache_wbinval_range(start, end, 1); |
| 284 | local_irq_restore(flags); | 329 | local_irq_restore(flags); |
| 285 | } | 330 | } |
| 331 | EXPORT_SYMBOL(flush_icache_range); | ||
| 286 | 332 | ||
| 287 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | 333 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) |
| 288 | { | 334 | { |
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index 93ee0160720b..c713d2ad55dc 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c | |||
| @@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size; | |||
| 30 | * zero-initialized data and COW. | 30 | * zero-initialized data and COW. |
| 31 | */ | 31 | */ |
| 32 | struct page *empty_zero_page; | 32 | struct page *empty_zero_page; |
| 33 | EXPORT_SYMBOL(empty_zero_page); | ||
| 33 | 34 | ||
| 34 | static void __init zone_sizes_init(void) | 35 | static void __init zone_sizes_init(void) |
| 35 | { | 36 | { |
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index ee5a78a151a6..e0e1c9775c32 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c | |||
| @@ -268,7 +268,7 @@ static struct parisc_device *find_device_by_addr(unsigned long hpa) | |||
| 268 | * Walks up the device tree looking for a device of the specified type. | 268 | * Walks up the device tree looking for a device of the specified type. |
| 269 | * If it finds it, it returns it. If not, it returns NULL. | 269 | * If it finds it, it returns it. If not, it returns NULL. |
| 270 | */ | 270 | */ |
| 271 | const struct parisc_device * __init | 271 | const struct parisc_device * |
| 272 | find_pa_parent_type(const struct parisc_device *padev, int type) | 272 | find_pa_parent_type(const struct parisc_device *padev, int type) |
| 273 | { | 273 | { |
| 274 | const struct device *dev = &padev->dev; | 274 | const struct device *dev = &padev->dev; |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 4065b5e48c9d..5e26dbede5fc 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
| @@ -423,8 +423,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | #ifdef CONFIG_PROC_FS | 425 | #ifdef CONFIG_PROC_FS |
| 426 | int __init | 426 | int setup_profiling_timer(unsigned int multiplier) |
| 427 | setup_profiling_timer(unsigned int multiplier) | ||
| 428 | { | 427 | { |
| 429 | return -EINVAL; | 428 | return -EINVAL; |
| 430 | } | 429 | } |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 471b2274fbeb..c40b4380951c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
| @@ -74,6 +74,27 @@ | |||
| 74 | */ | 74 | */ |
| 75 | #define EX_R3 EX_DAR | 75 | #define EX_R3 EX_DAR |
| 76 | 76 | ||
| 77 | #define STF_ENTRY_BARRIER_SLOT \ | ||
| 78 | STF_ENTRY_BARRIER_FIXUP_SECTION; \ | ||
| 79 | nop; \ | ||
| 80 | nop; \ | ||
| 81 | nop | ||
| 82 | |||
| 83 | #define STF_EXIT_BARRIER_SLOT \ | ||
| 84 | STF_EXIT_BARRIER_FIXUP_SECTION; \ | ||
| 85 | nop; \ | ||
| 86 | nop; \ | ||
| 87 | nop; \ | ||
| 88 | nop; \ | ||
| 89 | nop; \ | ||
| 90 | nop | ||
| 91 | |||
| 92 | /* | ||
| 93 | * r10 must be free to use, r13 must be paca | ||
| 94 | */ | ||
| 95 | #define INTERRUPT_TO_KERNEL \ | ||
| 96 | STF_ENTRY_BARRIER_SLOT | ||
| 97 | |||
| 77 | /* | 98 | /* |
| 78 | * Macros for annotating the expected destination of (h)rfid | 99 | * Macros for annotating the expected destination of (h)rfid |
| 79 | * | 100 | * |
| @@ -90,16 +111,19 @@ | |||
| 90 | rfid | 111 | rfid |
| 91 | 112 | ||
| 92 | #define RFI_TO_USER \ | 113 | #define RFI_TO_USER \ |
| 114 | STF_EXIT_BARRIER_SLOT; \ | ||
| 93 | RFI_FLUSH_SLOT; \ | 115 | RFI_FLUSH_SLOT; \ |
| 94 | rfid; \ | 116 | rfid; \ |
| 95 | b rfi_flush_fallback | 117 | b rfi_flush_fallback |
| 96 | 118 | ||
| 97 | #define RFI_TO_USER_OR_KERNEL \ | 119 | #define RFI_TO_USER_OR_KERNEL \ |
| 120 | STF_EXIT_BARRIER_SLOT; \ | ||
| 98 | RFI_FLUSH_SLOT; \ | 121 | RFI_FLUSH_SLOT; \ |
| 99 | rfid; \ | 122 | rfid; \ |
| 100 | b rfi_flush_fallback | 123 | b rfi_flush_fallback |
| 101 | 124 | ||
| 102 | #define RFI_TO_GUEST \ | 125 | #define RFI_TO_GUEST \ |
| 126 | STF_EXIT_BARRIER_SLOT; \ | ||
| 103 | RFI_FLUSH_SLOT; \ | 127 | RFI_FLUSH_SLOT; \ |
| 104 | rfid; \ | 128 | rfid; \ |
| 105 | b rfi_flush_fallback | 129 | b rfi_flush_fallback |
| @@ -108,21 +132,25 @@ | |||
| 108 | hrfid | 132 | hrfid |
| 109 | 133 | ||
| 110 | #define HRFI_TO_USER \ | 134 | #define HRFI_TO_USER \ |
| 135 | STF_EXIT_BARRIER_SLOT; \ | ||
| 111 | RFI_FLUSH_SLOT; \ | 136 | RFI_FLUSH_SLOT; \ |
| 112 | hrfid; \ | 137 | hrfid; \ |
| 113 | b hrfi_flush_fallback | 138 | b hrfi_flush_fallback |
| 114 | 139 | ||
| 115 | #define HRFI_TO_USER_OR_KERNEL \ | 140 | #define HRFI_TO_USER_OR_KERNEL \ |
| 141 | STF_EXIT_BARRIER_SLOT; \ | ||
| 116 | RFI_FLUSH_SLOT; \ | 142 | RFI_FLUSH_SLOT; \ |
| 117 | hrfid; \ | 143 | hrfid; \ |
| 118 | b hrfi_flush_fallback | 144 | b hrfi_flush_fallback |
| 119 | 145 | ||
| 120 | #define HRFI_TO_GUEST \ | 146 | #define HRFI_TO_GUEST \ |
| 147 | STF_EXIT_BARRIER_SLOT; \ | ||
| 121 | RFI_FLUSH_SLOT; \ | 148 | RFI_FLUSH_SLOT; \ |
| 122 | hrfid; \ | 149 | hrfid; \ |
| 123 | b hrfi_flush_fallback | 150 | b hrfi_flush_fallback |
| 124 | 151 | ||
| 125 | #define HRFI_TO_UNKNOWN \ | 152 | #define HRFI_TO_UNKNOWN \ |
| 153 | STF_EXIT_BARRIER_SLOT; \ | ||
| 126 | RFI_FLUSH_SLOT; \ | 154 | RFI_FLUSH_SLOT; \ |
| 127 | hrfid; \ | 155 | hrfid; \ |
| 128 | b hrfi_flush_fallback | 156 | b hrfi_flush_fallback |
| @@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
| 254 | #define __EXCEPTION_PROLOG_1_PRE(area) \ | 282 | #define __EXCEPTION_PROLOG_1_PRE(area) \ |
| 255 | OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ | 283 | OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ |
| 256 | OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ | 284 | OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ |
| 285 | INTERRUPT_TO_KERNEL; \ | ||
| 257 | SAVE_CTR(r10, area); \ | 286 | SAVE_CTR(r10, area); \ |
| 258 | mfcr r9; | 287 | mfcr r9; |
| 259 | 288 | ||
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 1e82eb3caabd..a9b64df34e2a 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h | |||
| @@ -187,6 +187,22 @@ label##3: \ | |||
| 187 | FTR_ENTRY_OFFSET label##1b-label##3b; \ | 187 | FTR_ENTRY_OFFSET label##1b-label##3b; \ |
| 188 | .popsection; | 188 | .popsection; |
| 189 | 189 | ||
| 190 | #define STF_ENTRY_BARRIER_FIXUP_SECTION \ | ||
| 191 | 953: \ | ||
| 192 | .pushsection __stf_entry_barrier_fixup,"a"; \ | ||
| 193 | .align 2; \ | ||
| 194 | 954: \ | ||
| 195 | FTR_ENTRY_OFFSET 953b-954b; \ | ||
| 196 | .popsection; | ||
| 197 | |||
| 198 | #define STF_EXIT_BARRIER_FIXUP_SECTION \ | ||
| 199 | 955: \ | ||
| 200 | .pushsection __stf_exit_barrier_fixup,"a"; \ | ||
| 201 | .align 2; \ | ||
| 202 | 956: \ | ||
| 203 | FTR_ENTRY_OFFSET 955b-956b; \ | ||
| 204 | .popsection; | ||
| 205 | |||
| 190 | #define RFI_FLUSH_FIXUP_SECTION \ | 206 | #define RFI_FLUSH_FIXUP_SECTION \ |
| 191 | 951: \ | 207 | 951: \ |
| 192 | .pushsection __rfi_flush_fixup,"a"; \ | 208 | .pushsection __rfi_flush_fixup,"a"; \ |
| @@ -199,6 +215,9 @@ label##3: \ | |||
| 199 | #ifndef __ASSEMBLY__ | 215 | #ifndef __ASSEMBLY__ |
| 200 | #include <linux/types.h> | 216 | #include <linux/types.h> |
| 201 | 217 | ||
| 218 | extern long stf_barrier_fallback; | ||
| 219 | extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; | ||
| 220 | extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; | ||
| 202 | extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; | 221 | extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; |
| 203 | 222 | ||
| 204 | void apply_feature_fixups(void); | 223 | void apply_feature_fixups(void); |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4c02a7378d06..e7377b73cfec 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -96,6 +96,7 @@ struct kvmppc_vcore { | |||
| 96 | struct kvm_vcpu *runner; | 96 | struct kvm_vcpu *runner; |
| 97 | struct kvm *kvm; | 97 | struct kvm *kvm; |
| 98 | u64 tb_offset; /* guest timebase - host timebase */ | 98 | u64 tb_offset; /* guest timebase - host timebase */ |
| 99 | u64 tb_offset_applied; /* timebase offset currently in force */ | ||
| 99 | ulong lpcr; | 100 | ulong lpcr; |
| 100 | u32 arch_compat; | 101 | u32 arch_compat; |
| 101 | ulong pcr; | 102 | ulong pcr; |
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index fa4d2e1cf772..44989b22383c 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h | |||
| @@ -12,6 +12,17 @@ | |||
| 12 | extern unsigned long powerpc_security_features; | 12 | extern unsigned long powerpc_security_features; |
| 13 | extern bool rfi_flush; | 13 | extern bool rfi_flush; |
| 14 | 14 | ||
| 15 | /* These are bit flags */ | ||
| 16 | enum stf_barrier_type { | ||
| 17 | STF_BARRIER_NONE = 0x1, | ||
| 18 | STF_BARRIER_FALLBACK = 0x2, | ||
| 19 | STF_BARRIER_EIEIO = 0x4, | ||
| 20 | STF_BARRIER_SYNC_ORI = 0x8, | ||
| 21 | }; | ||
| 22 | |||
| 23 | void setup_stf_barrier(void); | ||
| 24 | void do_stf_barrier_fixups(enum stf_barrier_type types); | ||
| 25 | |||
| 15 | static inline void security_ftr_set(unsigned long feature) | 26 | static inline void security_ftr_set(unsigned long feature) |
| 16 | { | 27 | { |
| 17 | powerpc_security_features |= feature; | 28 | powerpc_security_features |= feature; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6bee65f3cfd3..373dc1d6ef44 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
| @@ -562,6 +562,7 @@ int main(void) | |||
| 562 | OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); | 562 | OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); |
| 563 | OFFSET(VCORE_KVM, kvmppc_vcore, kvm); | 563 | OFFSET(VCORE_KVM, kvmppc_vcore, kvm); |
| 564 | OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); | 564 | OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); |
| 565 | OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied); | ||
| 565 | OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); | 566 | OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); |
| 566 | OFFSET(VCORE_PCR, kvmppc_vcore, pcr); | 567 | OFFSET(VCORE_PCR, kvmppc_vcore, pcr); |
| 567 | OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); | 568 | OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); |
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 3f30c994e931..458b928dbd84 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S | |||
| @@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7) | |||
| 28 | beqlr | 28 | beqlr |
| 29 | li r0,0 | 29 | li r0,0 |
| 30 | mtspr SPRN_LPID,r0 | 30 | mtspr SPRN_LPID,r0 |
| 31 | mtspr SPRN_PCR,r0 | ||
| 31 | mfspr r3,SPRN_LPCR | 32 | mfspr r3,SPRN_LPCR |
| 32 | li r4,(LPCR_LPES1 >> LPCR_LPES_SH) | 33 | li r4,(LPCR_LPES1 >> LPCR_LPES_SH) |
| 33 | bl __init_LPCR_ISA206 | 34 | bl __init_LPCR_ISA206 |
| @@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7) | |||
| 41 | beqlr | 42 | beqlr |
| 42 | li r0,0 | 43 | li r0,0 |
| 43 | mtspr SPRN_LPID,r0 | 44 | mtspr SPRN_LPID,r0 |
| 45 | mtspr SPRN_PCR,r0 | ||
| 44 | mfspr r3,SPRN_LPCR | 46 | mfspr r3,SPRN_LPCR |
| 45 | li r4,(LPCR_LPES1 >> LPCR_LPES_SH) | 47 | li r4,(LPCR_LPES1 >> LPCR_LPES_SH) |
| 46 | bl __init_LPCR_ISA206 | 48 | bl __init_LPCR_ISA206 |
| @@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8) | |||
| 57 | beqlr | 59 | beqlr |
| 58 | li r0,0 | 60 | li r0,0 |
| 59 | mtspr SPRN_LPID,r0 | 61 | mtspr SPRN_LPID,r0 |
| 62 | mtspr SPRN_PCR,r0 | ||
| 60 | mfspr r3,SPRN_LPCR | 63 | mfspr r3,SPRN_LPCR |
| 61 | ori r3, r3, LPCR_PECEDH | 64 | ori r3, r3, LPCR_PECEDH |
| 62 | li r4,0 /* LPES = 0 */ | 65 | li r4,0 /* LPES = 0 */ |
| @@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8) | |||
| 78 | beqlr | 81 | beqlr |
| 79 | li r0,0 | 82 | li r0,0 |
| 80 | mtspr SPRN_LPID,r0 | 83 | mtspr SPRN_LPID,r0 |
| 84 | mtspr SPRN_PCR,r0 | ||
| 81 | mfspr r3,SPRN_LPCR | 85 | mfspr r3,SPRN_LPCR |
| 82 | ori r3, r3, LPCR_PECEDH | 86 | ori r3, r3, LPCR_PECEDH |
| 83 | li r4,0 /* LPES = 0 */ | 87 | li r4,0 /* LPES = 0 */ |
| @@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9) | |||
| 99 | mtspr SPRN_PSSCR,r0 | 103 | mtspr SPRN_PSSCR,r0 |
| 100 | mtspr SPRN_LPID,r0 | 104 | mtspr SPRN_LPID,r0 |
| 101 | mtspr SPRN_PID,r0 | 105 | mtspr SPRN_PID,r0 |
| 106 | mtspr SPRN_PCR,r0 | ||
| 102 | mfspr r3,SPRN_LPCR | 107 | mfspr r3,SPRN_LPCR |
| 103 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) | 108 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) |
| 104 | or r3, r3, r4 | 109 | or r3, r3, r4 |
| @@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9) | |||
| 123 | mtspr SPRN_PSSCR,r0 | 128 | mtspr SPRN_PSSCR,r0 |
| 124 | mtspr SPRN_LPID,r0 | 129 | mtspr SPRN_LPID,r0 |
| 125 | mtspr SPRN_PID,r0 | 130 | mtspr SPRN_PID,r0 |
| 131 | mtspr SPRN_PCR,r0 | ||
| 126 | mfspr r3,SPRN_LPCR | 132 | mfspr r3,SPRN_LPCR |
| 127 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) | 133 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) |
| 128 | or r3, r3, r4 | 134 | or r3, r3, r4 |
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 8ab51f6ca03a..c904477abaf3 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c | |||
| @@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void) | |||
| 101 | if (hv_mode) { | 101 | if (hv_mode) { |
| 102 | mtspr(SPRN_LPID, 0); | 102 | mtspr(SPRN_LPID, 0); |
| 103 | mtspr(SPRN_HFSCR, system_registers.hfscr); | 103 | mtspr(SPRN_HFSCR, system_registers.hfscr); |
| 104 | mtspr(SPRN_PCR, 0); | ||
| 104 | } | 105 | } |
| 105 | mtspr(SPRN_FSCR, system_registers.fscr); | 106 | mtspr(SPRN_FSCR, system_registers.fscr); |
| 106 | 107 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ae6a849db60b..f283958129f2 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -885,7 +885,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) | |||
| 885 | #endif | 885 | #endif |
| 886 | 886 | ||
| 887 | 887 | ||
| 888 | EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) | 888 | EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) |
| 889 | EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) | 889 | EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) |
| 890 | TRAMP_KVM(PACA_EXGEN, 0x900) | 890 | TRAMP_KVM(PACA_EXGEN, 0x900) |
| 891 | EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) | 891 | EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) |
| @@ -961,6 +961,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) | |||
| 961 | mtctr r13; \ | 961 | mtctr r13; \ |
| 962 | GET_PACA(r13); \ | 962 | GET_PACA(r13); \ |
| 963 | std r10,PACA_EXGEN+EX_R10(r13); \ | 963 | std r10,PACA_EXGEN+EX_R10(r13); \ |
| 964 | INTERRUPT_TO_KERNEL; \ | ||
| 964 | KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ | 965 | KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ |
| 965 | HMT_MEDIUM; \ | 966 | HMT_MEDIUM; \ |
| 966 | mfctr r9; | 967 | mfctr r9; |
| @@ -969,7 +970,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) | |||
| 969 | #define SYSCALL_KVMTEST \ | 970 | #define SYSCALL_KVMTEST \ |
| 970 | HMT_MEDIUM; \ | 971 | HMT_MEDIUM; \ |
| 971 | mr r9,r13; \ | 972 | mr r9,r13; \ |
| 972 | GET_PACA(r13); | 973 | GET_PACA(r13); \ |
| 974 | INTERRUPT_TO_KERNEL; | ||
| 973 | #endif | 975 | #endif |
| 974 | 976 | ||
| 975 | #define LOAD_SYSCALL_HANDLER(reg) \ | 977 | #define LOAD_SYSCALL_HANDLER(reg) \ |
| @@ -1507,6 +1509,19 @@ masked_##_H##interrupt: \ | |||
| 1507 | b .; \ | 1509 | b .; \ |
| 1508 | MASKED_DEC_HANDLER(_H) | 1510 | MASKED_DEC_HANDLER(_H) |
| 1509 | 1511 | ||
| 1512 | TRAMP_REAL_BEGIN(stf_barrier_fallback) | ||
| 1513 | std r9,PACA_EXRFI+EX_R9(r13) | ||
| 1514 | std r10,PACA_EXRFI+EX_R10(r13) | ||
| 1515 | sync | ||
| 1516 | ld r9,PACA_EXRFI+EX_R9(r13) | ||
| 1517 | ld r10,PACA_EXRFI+EX_R10(r13) | ||
| 1518 | ori 31,31,0 | ||
| 1519 | .rept 14 | ||
| 1520 | b 1f | ||
| 1521 | 1: | ||
| 1522 | .endr | ||
| 1523 | blr | ||
| 1524 | |||
| 1510 | TRAMP_REAL_BEGIN(rfi_flush_fallback) | 1525 | TRAMP_REAL_BEGIN(rfi_flush_fallback) |
| 1511 | SET_SCRATCH0(r13); | 1526 | SET_SCRATCH0(r13); |
| 1512 | GET_PACA(r13); | 1527 | GET_PACA(r13); |
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index bab5a27ea805..b98a722da915 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
| 9 | #include <linux/seq_buf.h> | 9 | #include <linux/seq_buf.h> |
| 10 | 10 | ||
| 11 | #include <asm/debugfs.h> | ||
| 11 | #include <asm/security_features.h> | 12 | #include <asm/security_features.h> |
| 12 | 13 | ||
| 13 | 14 | ||
| @@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c | |||
| 86 | 87 | ||
| 87 | return s.len; | 88 | return s.len; |
| 88 | } | 89 | } |
| 90 | |||
| 91 | /* | ||
| 92 | * Store-forwarding barrier support. | ||
| 93 | */ | ||
| 94 | |||
| 95 | static enum stf_barrier_type stf_enabled_flush_types; | ||
| 96 | static bool no_stf_barrier; | ||
| 97 | bool stf_barrier; | ||
| 98 | |||
| 99 | static int __init handle_no_stf_barrier(char *p) | ||
| 100 | { | ||
| 101 | pr_info("stf-barrier: disabled on command line."); | ||
| 102 | no_stf_barrier = true; | ||
| 103 | return 0; | ||
| 104 | } | ||
| 105 | |||
| 106 | early_param("no_stf_barrier", handle_no_stf_barrier); | ||
| 107 | |||
| 108 | /* This is the generic flag used by other architectures */ | ||
| 109 | static int __init handle_ssbd(char *p) | ||
| 110 | { | ||
| 111 | if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { | ||
| 112 | /* Until firmware tells us, we have the barrier with auto */ | ||
| 113 | return 0; | ||
| 114 | } else if (strncmp(p, "off", 3) == 0) { | ||
| 115 | handle_no_stf_barrier(NULL); | ||
| 116 | return 0; | ||
| 117 | } else | ||
| 118 | return 1; | ||
| 119 | |||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | early_param("spec_store_bypass_disable", handle_ssbd); | ||
| 123 | |||
| 124 | /* This is the generic flag used by other architectures */ | ||
| 125 | static int __init handle_no_ssbd(char *p) | ||
| 126 | { | ||
| 127 | handle_no_stf_barrier(NULL); | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | early_param("nospec_store_bypass_disable", handle_no_ssbd); | ||
| 131 | |||
| 132 | static void stf_barrier_enable(bool enable) | ||
| 133 | { | ||
| 134 | if (enable) | ||
| 135 | do_stf_barrier_fixups(stf_enabled_flush_types); | ||
| 136 | else | ||
| 137 | do_stf_barrier_fixups(STF_BARRIER_NONE); | ||
| 138 | |||
| 139 | stf_barrier = enable; | ||
| 140 | } | ||
| 141 | |||
| 142 | void setup_stf_barrier(void) | ||
| 143 | { | ||
| 144 | enum stf_barrier_type type; | ||
| 145 | bool enable, hv; | ||
| 146 | |||
| 147 | hv = cpu_has_feature(CPU_FTR_HVMODE); | ||
| 148 | |||
| 149 | /* Default to fallback in case fw-features are not available */ | ||
| 150 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | ||
| 151 | type = STF_BARRIER_EIEIO; | ||
| 152 | else if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
| 153 | type = STF_BARRIER_SYNC_ORI; | ||
| 154 | else if (cpu_has_feature(CPU_FTR_ARCH_206)) | ||
| 155 | type = STF_BARRIER_FALLBACK; | ||
| 156 | else | ||
| 157 | type = STF_BARRIER_NONE; | ||
| 158 | |||
| 159 | enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && | ||
| 160 | (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || | ||
| 161 | (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); | ||
| 162 | |||
| 163 | if (type == STF_BARRIER_FALLBACK) { | ||
| 164 | pr_info("stf-barrier: fallback barrier available\n"); | ||
| 165 | } else if (type == STF_BARRIER_SYNC_ORI) { | ||
| 166 | pr_info("stf-barrier: hwsync barrier available\n"); | ||
| 167 | } else if (type == STF_BARRIER_EIEIO) { | ||
| 168 | pr_info("stf-barrier: eieio barrier available\n"); | ||
| 169 | } | ||
| 170 | |||
| 171 | stf_enabled_flush_types = type; | ||
| 172 | |||
| 173 | if (!no_stf_barrier) | ||
| 174 | stf_barrier_enable(enable); | ||
| 175 | } | ||
| 176 | |||
| 177 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 178 | { | ||
| 179 | if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { | ||
| 180 | const char *type; | ||
| 181 | switch (stf_enabled_flush_types) { | ||
| 182 | case STF_BARRIER_EIEIO: | ||
| 183 | type = "eieio"; | ||
| 184 | break; | ||
| 185 | case STF_BARRIER_SYNC_ORI: | ||
| 186 | type = "hwsync"; | ||
| 187 | break; | ||
| 188 | case STF_BARRIER_FALLBACK: | ||
| 189 | type = "fallback"; | ||
| 190 | break; | ||
| 191 | default: | ||
| 192 | type = "unknown"; | ||
| 193 | } | ||
| 194 | return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); | ||
| 195 | } | ||
| 196 | |||
| 197 | if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && | ||
| 198 | !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) | ||
| 199 | return sprintf(buf, "Not affected\n"); | ||
| 200 | |||
| 201 | return sprintf(buf, "Vulnerable\n"); | ||
| 202 | } | ||
| 203 | |||
| 204 | #ifdef CONFIG_DEBUG_FS | ||
| 205 | static int stf_barrier_set(void *data, u64 val) | ||
| 206 | { | ||
| 207 | bool enable; | ||
| 208 | |||
| 209 | if (val == 1) | ||
| 210 | enable = true; | ||
| 211 | else if (val == 0) | ||
| 212 | enable = false; | ||
| 213 | else | ||
| 214 | return -EINVAL; | ||
| 215 | |||
| 216 | /* Only do anything if we're changing state */ | ||
| 217 | if (enable != stf_barrier) | ||
| 218 | stf_barrier_enable(enable); | ||
| 219 | |||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | static int stf_barrier_get(void *data, u64 *val) | ||
| 224 | { | ||
| 225 | *val = stf_barrier ? 1 : 0; | ||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | |||
| 229 | DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); | ||
| 230 | |||
| 231 | static __init int stf_barrier_debugfs_init(void) | ||
| 232 | { | ||
| 233 | debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | device_initcall(stf_barrier_debugfs_init); | ||
| 237 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index c8af90ff49f0..b8d82678f8b4 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
| @@ -134,6 +134,20 @@ SECTIONS | |||
| 134 | 134 | ||
| 135 | #ifdef CONFIG_PPC64 | 135 | #ifdef CONFIG_PPC64 |
| 136 | . = ALIGN(8); | 136 | . = ALIGN(8); |
| 137 | __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { | ||
| 138 | __start___stf_entry_barrier_fixup = .; | ||
| 139 | *(__stf_entry_barrier_fixup) | ||
| 140 | __stop___stf_entry_barrier_fixup = .; | ||
| 141 | } | ||
| 142 | |||
| 143 | . = ALIGN(8); | ||
| 144 | __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { | ||
| 145 | __start___stf_exit_barrier_fixup = .; | ||
| 146 | *(__stf_exit_barrier_fixup) | ||
| 147 | __stop___stf_exit_barrier_fixup = .; | ||
| 148 | } | ||
| 149 | |||
| 150 | . = ALIGN(8); | ||
| 137 | __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { | 151 | __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { |
| 138 | __start___rfi_flush_fixup = .; | 152 | __start___rfi_flush_fixup = .; |
| 139 | *(__rfi_flush_fixup) | 153 | *(__rfi_flush_fixup) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index a57eafec4dc2..361f42c8c73e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c | |||
| @@ -162,7 +162,7 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, | |||
| 162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) | 162 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) |
| 163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) | 163 | asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) |
| 164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); | 164 | : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); |
| 165 | asm volatile("ptesync": : :"memory"); | 165 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | 168 | static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) |
| @@ -173,7 +173,7 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) | |||
| 173 | /* RIC=1 PRS=0 R=1 IS=2 */ | 173 | /* RIC=1 PRS=0 R=1 IS=2 */ |
| 174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) | 174 | asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) |
| 175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); | 175 | : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); |
| 176 | asm volatile("ptesync": : :"memory"); | 176 | asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, | 179 | unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, |
| @@ -584,7 +584,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, | |||
| 584 | 584 | ||
| 585 | ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); | 585 | ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
| 586 | if (ptep && pte_present(*ptep)) { | 586 | if (ptep && pte_present(*ptep)) { |
| 587 | old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, | 587 | old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, |
| 588 | gpa, shift); | 588 | gpa, shift); |
| 589 | kvmppc_radix_tlbie_page(kvm, gpa, shift); | 589 | kvmppc_radix_tlbie_page(kvm, gpa, shift); |
| 590 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { | 590 | if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 4d07fca5121c..9963f65c212b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -2441,6 +2441,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc) | |||
| 2441 | vc->in_guest = 0; | 2441 | vc->in_guest = 0; |
| 2442 | vc->napping_threads = 0; | 2442 | vc->napping_threads = 0; |
| 2443 | vc->conferring_threads = 0; | 2443 | vc->conferring_threads = 0; |
| 2444 | vc->tb_offset_applied = 0; | ||
| 2444 | } | 2445 | } |
| 2445 | 2446 | ||
| 2446 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) | 2447 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bd63fa8a08b5..07ca1b2a7966 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -692,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 692 | 22: ld r8,VCORE_TB_OFFSET(r5) | 692 | 22: ld r8,VCORE_TB_OFFSET(r5) |
| 693 | cmpdi r8,0 | 693 | cmpdi r8,0 |
| 694 | beq 37f | 694 | beq 37f |
| 695 | std r8, VCORE_TB_OFFSET_APPL(r5) | ||
| 695 | mftb r6 /* current host timebase */ | 696 | mftb r6 /* current host timebase */ |
| 696 | add r8,r8,r6 | 697 | add r8,r8,r6 |
| 697 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | 698 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| @@ -940,18 +941,6 @@ FTR_SECTION_ELSE | |||
| 940 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | 941 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
| 941 | 8: | 942 | 8: |
| 942 | 943 | ||
| 943 | /* | ||
| 944 | * Set the decrementer to the guest decrementer. | ||
| 945 | */ | ||
| 946 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
| 947 | /* r8 is a host timebase value here, convert to guest TB */ | ||
| 948 | ld r5,HSTATE_KVM_VCORE(r13) | ||
| 949 | ld r6,VCORE_TB_OFFSET(r5) | ||
| 950 | add r8,r8,r6 | ||
| 951 | mftb r7 | ||
| 952 | subf r3,r7,r8 | ||
| 953 | mtspr SPRN_DEC,r3 | ||
| 954 | |||
| 955 | ld r5, VCPU_SPRG0(r4) | 944 | ld r5, VCPU_SPRG0(r4) |
| 956 | ld r6, VCPU_SPRG1(r4) | 945 | ld r6, VCPU_SPRG1(r4) |
| 957 | ld r7, VCPU_SPRG2(r4) | 946 | ld r7, VCPU_SPRG2(r4) |
| @@ -1005,6 +994,18 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |||
| 1005 | mtspr SPRN_LPCR,r8 | 994 | mtspr SPRN_LPCR,r8 |
| 1006 | isync | 995 | isync |
| 1007 | 996 | ||
| 997 | /* | ||
| 998 | * Set the decrementer to the guest decrementer. | ||
| 999 | */ | ||
| 1000 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
| 1001 | /* r8 is a host timebase value here, convert to guest TB */ | ||
| 1002 | ld r5,HSTATE_KVM_VCORE(r13) | ||
| 1003 | ld r6,VCORE_TB_OFFSET_APPL(r5) | ||
| 1004 | add r8,r8,r6 | ||
| 1005 | mftb r7 | ||
| 1006 | subf r3,r7,r8 | ||
| 1007 | mtspr SPRN_DEC,r3 | ||
| 1008 | |||
| 1008 | /* Check if HDEC expires soon */ | 1009 | /* Check if HDEC expires soon */ |
| 1009 | mfspr r3, SPRN_HDEC | 1010 | mfspr r3, SPRN_HDEC |
| 1010 | EXTEND_HDEC(r3) | 1011 | EXTEND_HDEC(r3) |
| @@ -1597,8 +1598,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
| 1597 | 1598 | ||
| 1598 | guest_bypass: | 1599 | guest_bypass: |
| 1599 | stw r12, STACK_SLOT_TRAP(r1) | 1600 | stw r12, STACK_SLOT_TRAP(r1) |
| 1600 | mr r3, r12 | 1601 | |
| 1602 | /* Save DEC */ | ||
| 1603 | /* Do this before kvmhv_commence_exit so we know TB is guest TB */ | ||
| 1604 | ld r3, HSTATE_KVM_VCORE(r13) | ||
| 1605 | mfspr r5,SPRN_DEC | ||
| 1606 | mftb r6 | ||
| 1607 | /* On P9, if the guest has large decr enabled, don't sign extend */ | ||
| 1608 | BEGIN_FTR_SECTION | ||
| 1609 | ld r4, VCORE_LPCR(r3) | ||
| 1610 | andis. r4, r4, LPCR_LD@h | ||
| 1611 | bne 16f | ||
| 1612 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
| 1613 | extsw r5,r5 | ||
| 1614 | 16: add r5,r5,r6 | ||
| 1615 | /* r5 is a guest timebase value here, convert to host TB */ | ||
| 1616 | ld r4,VCORE_TB_OFFSET_APPL(r3) | ||
| 1617 | subf r5,r4,r5 | ||
| 1618 | std r5,VCPU_DEC_EXPIRES(r9) | ||
| 1619 | |||
| 1601 | /* Increment exit count, poke other threads to exit */ | 1620 | /* Increment exit count, poke other threads to exit */ |
| 1621 | mr r3, r12 | ||
| 1602 | bl kvmhv_commence_exit | 1622 | bl kvmhv_commence_exit |
| 1603 | nop | 1623 | nop |
| 1604 | ld r9, HSTATE_KVM_VCPU(r13) | 1624 | ld r9, HSTATE_KVM_VCPU(r13) |
| @@ -1639,23 +1659,6 @@ guest_bypass: | |||
| 1639 | mtspr SPRN_PURR,r3 | 1659 | mtspr SPRN_PURR,r3 |
| 1640 | mtspr SPRN_SPURR,r4 | 1660 | mtspr SPRN_SPURR,r4 |
| 1641 | 1661 | ||
| 1642 | /* Save DEC */ | ||
| 1643 | ld r3, HSTATE_KVM_VCORE(r13) | ||
| 1644 | mfspr r5,SPRN_DEC | ||
| 1645 | mftb r6 | ||
| 1646 | /* On P9, if the guest has large decr enabled, don't sign extend */ | ||
| 1647 | BEGIN_FTR_SECTION | ||
| 1648 | ld r4, VCORE_LPCR(r3) | ||
| 1649 | andis. r4, r4, LPCR_LD@h | ||
| 1650 | bne 16f | ||
| 1651 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
| 1652 | extsw r5,r5 | ||
| 1653 | 16: add r5,r5,r6 | ||
| 1654 | /* r5 is a guest timebase value here, convert to host TB */ | ||
| 1655 | ld r4,VCORE_TB_OFFSET(r3) | ||
| 1656 | subf r5,r4,r5 | ||
| 1657 | std r5,VCPU_DEC_EXPIRES(r9) | ||
| 1658 | |||
| 1659 | BEGIN_FTR_SECTION | 1662 | BEGIN_FTR_SECTION |
| 1660 | b 8f | 1663 | b 8f |
| 1661 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | 1664 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
| @@ -1905,6 +1908,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 1905 | cmpwi cr2, r0, 0 | 1908 | cmpwi cr2, r0, 0 |
| 1906 | beq cr2, 4f | 1909 | beq cr2, 4f |
| 1907 | 1910 | ||
| 1911 | /* | ||
| 1912 | * Radix: do eieio; tlbsync; ptesync sequence in case we | ||
| 1913 | * interrupted the guest between a tlbie and a ptesync. | ||
| 1914 | */ | ||
| 1915 | eieio | ||
| 1916 | tlbsync | ||
| 1917 | ptesync | ||
| 1918 | |||
| 1908 | /* Radix: Handle the case where the guest used an illegal PID */ | 1919 | /* Radix: Handle the case where the guest used an illegal PID */ |
| 1909 | LOAD_REG_ADDR(r4, mmu_base_pid) | 1920 | LOAD_REG_ADDR(r4, mmu_base_pid) |
| 1910 | lwz r3, VCPU_GUEST_PID(r9) | 1921 | lwz r3, VCPU_GUEST_PID(r9) |
| @@ -2017,9 +2028,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
| 2017 | 2028 | ||
| 2018 | 27: | 2029 | 27: |
| 2019 | /* Subtract timebase offset from timebase */ | 2030 | /* Subtract timebase offset from timebase */ |
| 2020 | ld r8,VCORE_TB_OFFSET(r5) | 2031 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
| 2021 | cmpdi r8,0 | 2032 | cmpdi r8,0 |
| 2022 | beq 17f | 2033 | beq 17f |
| 2034 | li r0, 0 | ||
| 2035 | std r0, VCORE_TB_OFFSET_APPL(r5) | ||
| 2023 | mftb r6 /* current guest timebase */ | 2036 | mftb r6 /* current guest timebase */ |
| 2024 | subf r8,r8,r6 | 2037 | subf r8,r8,r6 |
| 2025 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | 2038 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ |
| @@ -2700,7 +2713,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 2700 | add r3, r3, r5 | 2713 | add r3, r3, r5 |
| 2701 | ld r4, HSTATE_KVM_VCPU(r13) | 2714 | ld r4, HSTATE_KVM_VCPU(r13) |
| 2702 | ld r5, HSTATE_KVM_VCORE(r13) | 2715 | ld r5, HSTATE_KVM_VCORE(r13) |
| 2703 | ld r6, VCORE_TB_OFFSET(r5) | 2716 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
| 2704 | subf r3, r6, r3 /* convert to host TB value */ | 2717 | subf r3, r6, r3 /* convert to host TB value */ |
| 2705 | std r3, VCPU_DEC_EXPIRES(r4) | 2718 | std r3, VCPU_DEC_EXPIRES(r4) |
| 2706 | 2719 | ||
| @@ -2799,7 +2812,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |||
| 2799 | /* Restore guest decrementer */ | 2812 | /* Restore guest decrementer */ |
| 2800 | ld r3, VCPU_DEC_EXPIRES(r4) | 2813 | ld r3, VCPU_DEC_EXPIRES(r4) |
| 2801 | ld r5, HSTATE_KVM_VCORE(r13) | 2814 | ld r5, HSTATE_KVM_VCORE(r13) |
| 2802 | ld r6, VCORE_TB_OFFSET(r5) | 2815 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
| 2803 | add r3, r3, r6 /* convert host TB to guest TB value */ | 2816 | add r3, r3, r6 /* convert host TB to guest TB value */ |
| 2804 | mftb r7 | 2817 | mftb r7 |
| 2805 | subf r3, r7, r3 | 2818 | subf r3, r7, r3 |
| @@ -3606,12 +3619,9 @@ kvmppc_fix_pmao: | |||
| 3606 | */ | 3619 | */ |
| 3607 | kvmhv_start_timing: | 3620 | kvmhv_start_timing: |
| 3608 | ld r5, HSTATE_KVM_VCORE(r13) | 3621 | ld r5, HSTATE_KVM_VCORE(r13) |
| 3609 | lbz r6, VCORE_IN_GUEST(r5) | 3622 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
| 3610 | cmpwi r6, 0 | 3623 | mftb r5 |
| 3611 | beq 5f /* if in guest, need to */ | 3624 | subf r5, r6, r5 /* subtract current timebase offset */ |
| 3612 | ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | ||
| 3613 | 5: mftb r5 | ||
| 3614 | subf r5, r6, r5 | ||
| 3615 | std r3, VCPU_CUR_ACTIVITY(r4) | 3625 | std r3, VCPU_CUR_ACTIVITY(r4) |
| 3616 | std r5, VCPU_ACTIVITY_START(r4) | 3626 | std r5, VCPU_ACTIVITY_START(r4) |
| 3617 | blr | 3627 | blr |
| @@ -3622,15 +3632,12 @@ kvmhv_start_timing: | |||
| 3622 | */ | 3632 | */ |
| 3623 | kvmhv_accumulate_time: | 3633 | kvmhv_accumulate_time: |
| 3624 | ld r5, HSTATE_KVM_VCORE(r13) | 3634 | ld r5, HSTATE_KVM_VCORE(r13) |
| 3625 | lbz r8, VCORE_IN_GUEST(r5) | 3635 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
| 3626 | cmpwi r8, 0 | 3636 | ld r5, VCPU_CUR_ACTIVITY(r4) |
| 3627 | beq 4f /* if in guest, need to */ | ||
| 3628 | ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | ||
| 3629 | 4: ld r5, VCPU_CUR_ACTIVITY(r4) | ||
| 3630 | ld r6, VCPU_ACTIVITY_START(r4) | 3637 | ld r6, VCPU_ACTIVITY_START(r4) |
| 3631 | std r3, VCPU_CUR_ACTIVITY(r4) | 3638 | std r3, VCPU_CUR_ACTIVITY(r4) |
| 3632 | mftb r7 | 3639 | mftb r7 |
| 3633 | subf r7, r8, r7 | 3640 | subf r7, r8, r7 /* subtract current timebase offset */ |
| 3634 | std r7, VCPU_ACTIVITY_START(r4) | 3641 | std r7, VCPU_ACTIVITY_START(r4) |
| 3635 | cmpdi r5, 0 | 3642 | cmpdi r5, 0 |
| 3636 | beqlr | 3643 | beqlr |
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index c7a5deadd1cc..99c3620b40d9 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c | |||
| @@ -11,6 +11,9 @@ | |||
| 11 | #define XGLUE(a,b) a##b | 11 | #define XGLUE(a,b) a##b |
| 12 | #define GLUE(a,b) XGLUE(a,b) | 12 | #define GLUE(a,b) XGLUE(a,b) |
| 13 | 13 | ||
| 14 | /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */ | ||
| 15 | #define XICS_DUMMY 1 | ||
| 16 | |||
| 14 | static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) | 17 | static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) |
| 15 | { | 18 | { |
| 16 | u8 cppr; | 19 | u8 cppr; |
| @@ -205,6 +208,10 @@ skip_ipi: | |||
| 205 | goto skip_ipi; | 208 | goto skip_ipi; |
| 206 | } | 209 | } |
| 207 | 210 | ||
| 211 | /* If it's the dummy interrupt, continue searching */ | ||
| 212 | if (hirq == XICS_DUMMY) | ||
| 213 | goto skip_ipi; | ||
| 214 | |||
| 208 | /* If fetching, update queue pointers */ | 215 | /* If fetching, update queue pointers */ |
| 209 | if (scan_type == scan_fetch) { | 216 | if (scan_type == scan_fetch) { |
| 210 | q->idx = idx; | 217 | q->idx = idx; |
| @@ -385,9 +392,76 @@ static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) | |||
| 385 | __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); | 392 | __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); |
| 386 | } | 393 | } |
| 387 | 394 | ||
| 395 | static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive, | ||
| 396 | struct kvmppc_xive_vcpu *xc) | ||
| 397 | { | ||
| 398 | unsigned int prio; | ||
| 399 | |||
| 400 | /* For each priority that is now masked */ | ||
| 401 | for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) { | ||
| 402 | struct xive_q *q = &xc->queues[prio]; | ||
| 403 | struct kvmppc_xive_irq_state *state; | ||
| 404 | struct kvmppc_xive_src_block *sb; | ||
| 405 | u32 idx, toggle, entry, irq, hw_num; | ||
| 406 | struct xive_irq_data *xd; | ||
| 407 | __be32 *qpage; | ||
| 408 | u16 src; | ||
| 409 | |||
| 410 | idx = q->idx; | ||
| 411 | toggle = q->toggle; | ||
| 412 | qpage = READ_ONCE(q->qpage); | ||
| 413 | if (!qpage) | ||
| 414 | continue; | ||
| 415 | |||
| 416 | /* For each interrupt in the queue */ | ||
| 417 | for (;;) { | ||
| 418 | entry = be32_to_cpup(qpage + idx); | ||
| 419 | |||
| 420 | /* No more ? */ | ||
| 421 | if ((entry >> 31) == toggle) | ||
| 422 | break; | ||
| 423 | irq = entry & 0x7fffffff; | ||
| 424 | |||
| 425 | /* Skip dummies and IPIs */ | ||
| 426 | if (irq == XICS_DUMMY || irq == XICS_IPI) | ||
| 427 | goto next; | ||
| 428 | sb = kvmppc_xive_find_source(xive, irq, &src); | ||
| 429 | if (!sb) | ||
| 430 | goto next; | ||
| 431 | state = &sb->irq_state[src]; | ||
| 432 | |||
| 433 | /* Has it been rerouted ? */ | ||
| 434 | if (xc->server_num == state->act_server) | ||
| 435 | goto next; | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Allright, it *has* been re-routed, kill it from | ||
| 439 | * the queue. | ||
| 440 | */ | ||
| 441 | qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY); | ||
| 442 | |||
| 443 | /* Find the HW interrupt */ | ||
| 444 | kvmppc_xive_select_irq(state, &hw_num, &xd); | ||
| 445 | |||
| 446 | /* If it's not an LSI, set PQ to 11 the EOI will force a resend */ | ||
| 447 | if (!(xd->flags & XIVE_IRQ_FLAG_LSI)) | ||
| 448 | GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11); | ||
| 449 | |||
| 450 | /* EOI the source */ | ||
| 451 | GLUE(X_PFX,source_eoi)(hw_num, xd); | ||
| 452 | |||
| 453 | next: | ||
| 454 | idx = (idx + 1) & q->msk; | ||
| 455 | if (idx == 0) | ||
| 456 | toggle ^= 1; | ||
| 457 | } | ||
| 458 | } | ||
| 459 | } | ||
| 460 | |||
| 388 | X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) | 461 | X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) |
| 389 | { | 462 | { |
| 390 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | 463 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 464 | struct kvmppc_xive *xive = vcpu->kvm->arch.xive; | ||
| 391 | u8 old_cppr; | 465 | u8 old_cppr; |
| 392 | 466 | ||
| 393 | pr_devel("H_CPPR(cppr=%ld)\n", cppr); | 467 | pr_devel("H_CPPR(cppr=%ld)\n", cppr); |
| @@ -407,14 +481,34 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) | |||
| 407 | */ | 481 | */ |
| 408 | smp_mb(); | 482 | smp_mb(); |
| 409 | 483 | ||
| 410 | /* | 484 | if (cppr > old_cppr) { |
| 411 | * We are masking less, we need to look for pending things | 485 | /* |
| 412 | * to deliver and set VP pending bits accordingly to trigger | 486 | * We are masking less, we need to look for pending things |
| 413 | * a new interrupt otherwise we might miss MFRR changes for | 487 | * to deliver and set VP pending bits accordingly to trigger |
| 414 | * which we have optimized out sending an IPI signal. | 488 | * a new interrupt otherwise we might miss MFRR changes for |
| 415 | */ | 489 | * which we have optimized out sending an IPI signal. |
| 416 | if (cppr > old_cppr) | 490 | */ |
| 417 | GLUE(X_PFX,push_pending_to_hw)(xc); | 491 | GLUE(X_PFX,push_pending_to_hw)(xc); |
| 492 | } else { | ||
| 493 | /* | ||
| 494 | * We are masking more, we need to check the queue for any | ||
| 495 | * interrupt that has been routed to another CPU, take | ||
| 496 | * it out (replace it with the dummy) and retrigger it. | ||
| 497 | * | ||
| 498 | * This is necessary since those interrupts may otherwise | ||
| 499 | * never be processed, at least not until this CPU restores | ||
| 500 | * its CPPR. | ||
| 501 | * | ||
| 502 | * This is in theory racy vs. HW adding new interrupts to | ||
| 503 | * the queue. In practice this works because the interesting | ||
| 504 | * cases are when the guest has done a set_xive() to move the | ||
| 505 | * interrupt away, which flushes the xive, followed by the | ||
| 506 | * target CPU doing a H_CPPR. So any new interrupt coming into | ||
| 507 | * the queue must still be routed to us and isn't a source | ||
| 508 | * of concern. | ||
| 509 | */ | ||
| 510 | GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc); | ||
| 511 | } | ||
| 418 | 512 | ||
| 419 | /* Apply new CPPR */ | 513 | /* Apply new CPPR */ |
| 420 | xc->hw_cppr = cppr; | 514 | xc->hw_cppr = cppr; |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 288fe4f0db4e..e1bcdc32a851 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
| 24 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
| 25 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
| 26 | #include <asm/security_features.h> | ||
| 26 | #include <asm/firmware.h> | 27 | #include <asm/firmware.h> |
| 27 | 28 | ||
| 28 | struct fixup_entry { | 29 | struct fixup_entry { |
| @@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) | |||
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | #ifdef CONFIG_PPC_BOOK3S_64 | 120 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 121 | void do_stf_entry_barrier_fixups(enum stf_barrier_type types) | ||
| 122 | { | ||
| 123 | unsigned int instrs[3], *dest; | ||
| 124 | long *start, *end; | ||
| 125 | int i; | ||
| 126 | |||
| 127 | start = PTRRELOC(&__start___stf_entry_barrier_fixup), | ||
| 128 | end = PTRRELOC(&__stop___stf_entry_barrier_fixup); | ||
| 129 | |||
| 130 | instrs[0] = 0x60000000; /* nop */ | ||
| 131 | instrs[1] = 0x60000000; /* nop */ | ||
| 132 | instrs[2] = 0x60000000; /* nop */ | ||
| 133 | |||
| 134 | i = 0; | ||
| 135 | if (types & STF_BARRIER_FALLBACK) { | ||
| 136 | instrs[i++] = 0x7d4802a6; /* mflr r10 */ | ||
| 137 | instrs[i++] = 0x60000000; /* branch patched below */ | ||
| 138 | instrs[i++] = 0x7d4803a6; /* mtlr r10 */ | ||
| 139 | } else if (types & STF_BARRIER_EIEIO) { | ||
| 140 | instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ | ||
| 141 | } else if (types & STF_BARRIER_SYNC_ORI) { | ||
| 142 | instrs[i++] = 0x7c0004ac; /* hwsync */ | ||
| 143 | instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */ | ||
| 144 | instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ | ||
| 145 | } | ||
| 146 | |||
| 147 | for (i = 0; start < end; start++, i++) { | ||
| 148 | dest = (void *)start + *start; | ||
| 149 | |||
| 150 | pr_devel("patching dest %lx\n", (unsigned long)dest); | ||
| 151 | |||
| 152 | patch_instruction(dest, instrs[0]); | ||
| 153 | |||
| 154 | if (types & STF_BARRIER_FALLBACK) | ||
| 155 | patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, | ||
| 156 | BRANCH_SET_LINK); | ||
| 157 | else | ||
| 158 | patch_instruction(dest + 1, instrs[1]); | ||
| 159 | |||
| 160 | patch_instruction(dest + 2, instrs[2]); | ||
| 161 | } | ||
| 162 | |||
| 163 | printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, | ||
| 164 | (types == STF_BARRIER_NONE) ? "no" : | ||
| 165 | (types == STF_BARRIER_FALLBACK) ? "fallback" : | ||
| 166 | (types == STF_BARRIER_EIEIO) ? "eieio" : | ||
| 167 | (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" | ||
| 168 | : "unknown"); | ||
| 169 | } | ||
| 170 | |||
| 171 | void do_stf_exit_barrier_fixups(enum stf_barrier_type types) | ||
| 172 | { | ||
| 173 | unsigned int instrs[6], *dest; | ||
| 174 | long *start, *end; | ||
| 175 | int i; | ||
| 176 | |||
| 177 | start = PTRRELOC(&__start___stf_exit_barrier_fixup), | ||
| 178 | end = PTRRELOC(&__stop___stf_exit_barrier_fixup); | ||
| 179 | |||
| 180 | instrs[0] = 0x60000000; /* nop */ | ||
| 181 | instrs[1] = 0x60000000; /* nop */ | ||
| 182 | instrs[2] = 0x60000000; /* nop */ | ||
| 183 | instrs[3] = 0x60000000; /* nop */ | ||
| 184 | instrs[4] = 0x60000000; /* nop */ | ||
| 185 | instrs[5] = 0x60000000; /* nop */ | ||
| 186 | |||
| 187 | i = 0; | ||
| 188 | if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { | ||
| 189 | if (cpu_has_feature(CPU_FTR_HVMODE)) { | ||
| 190 | instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */ | ||
| 191 | instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */ | ||
| 192 | } else { | ||
| 193 | instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */ | ||
| 194 | instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */ | ||
| 195 | } | ||
| 196 | instrs[i++] = 0x7c0004ac; /* hwsync */ | ||
| 197 | instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */ | ||
| 198 | instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ | ||
| 199 | if (cpu_has_feature(CPU_FTR_HVMODE)) { | ||
| 200 | instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */ | ||
| 201 | } else { | ||
| 202 | instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */ | ||
| 203 | } | ||
| 204 | } else if (types & STF_BARRIER_EIEIO) { | ||
| 205 | instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ | ||
| 206 | } | ||
| 207 | |||
| 208 | for (i = 0; start < end; start++, i++) { | ||
| 209 | dest = (void *)start + *start; | ||
| 210 | |||
| 211 | pr_devel("patching dest %lx\n", (unsigned long)dest); | ||
| 212 | |||
| 213 | patch_instruction(dest, instrs[0]); | ||
| 214 | patch_instruction(dest + 1, instrs[1]); | ||
| 215 | patch_instruction(dest + 2, instrs[2]); | ||
| 216 | patch_instruction(dest + 3, instrs[3]); | ||
| 217 | patch_instruction(dest + 4, instrs[4]); | ||
| 218 | patch_instruction(dest + 5, instrs[5]); | ||
| 219 | } | ||
| 220 | printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, | ||
| 221 | (types == STF_BARRIER_NONE) ? "no" : | ||
| 222 | (types == STF_BARRIER_FALLBACK) ? "fallback" : | ||
| 223 | (types == STF_BARRIER_EIEIO) ? "eieio" : | ||
| 224 | (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" | ||
| 225 | : "unknown"); | ||
| 226 | } | ||
| 227 | |||
| 228 | |||
| 229 | void do_stf_barrier_fixups(enum stf_barrier_type types) | ||
| 230 | { | ||
| 231 | do_stf_entry_barrier_fixups(types); | ||
| 232 | do_stf_exit_barrier_fixups(types); | ||
| 233 | } | ||
| 234 | |||
| 120 | void do_rfi_flush_fixups(enum l1d_flush_type types) | 235 | void do_rfi_flush_fixups(enum l1d_flush_type types) |
| 121 | { | 236 | { |
| 122 | unsigned int instrs[3], *dest; | 237 | unsigned int instrs[3], *dest; |
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 1bceb95f422d..5584247f5029 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c | |||
| @@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index) | |||
| 44 | return count; | 44 | return count; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | /* | ||
| 48 | * This can be called in the panic path with interrupts off, so use | ||
| 49 | * mdelay in that case. | ||
| 50 | */ | ||
| 47 | static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) | 51 | static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) |
| 48 | { | 52 | { |
| 49 | s64 rc = OPAL_BUSY; | 53 | s64 rc = OPAL_BUSY; |
| @@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) | |||
| 58 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { | 62 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { |
| 59 | rc = opal_write_nvram(__pa(buf), count, off); | 63 | rc = opal_write_nvram(__pa(buf), count, off); |
| 60 | if (rc == OPAL_BUSY_EVENT) { | 64 | if (rc == OPAL_BUSY_EVENT) { |
| 61 | msleep(OPAL_BUSY_DELAY_MS); | 65 | if (in_interrupt() || irqs_disabled()) |
| 66 | mdelay(OPAL_BUSY_DELAY_MS); | ||
| 67 | else | ||
| 68 | msleep(OPAL_BUSY_DELAY_MS); | ||
| 62 | opal_poll_events(NULL); | 69 | opal_poll_events(NULL); |
| 63 | } else if (rc == OPAL_BUSY) { | 70 | } else if (rc == OPAL_BUSY) { |
| 64 | msleep(OPAL_BUSY_DELAY_MS); | 71 | if (in_interrupt() || irqs_disabled()) |
| 72 | mdelay(OPAL_BUSY_DELAY_MS); | ||
| 73 | else | ||
| 74 | msleep(OPAL_BUSY_DELAY_MS); | ||
| 65 | } | 75 | } |
| 66 | } | 76 | } |
| 67 | 77 | ||
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index ef8c9ce53a61..a6648ec99ca7 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
| @@ -131,6 +131,7 @@ static void __init pnv_setup_arch(void) | |||
| 131 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); | 131 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
| 132 | 132 | ||
| 133 | pnv_setup_rfi_flush(); | 133 | pnv_setup_rfi_flush(); |
| 134 | setup_stf_barrier(); | ||
| 134 | 135 | ||
| 135 | /* Initialize SMP */ | 136 | /* Initialize SMP */ |
| 136 | pnv_smp_init(); | 137 | pnv_smp_init(); |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index b55ad4286dc7..fdb32e056ef4 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
| @@ -710,6 +710,7 @@ static void __init pSeries_setup_arch(void) | |||
| 710 | fwnmi_init(); | 710 | fwnmi_init(); |
| 711 | 711 | ||
| 712 | pseries_setup_rfi_flush(); | 712 | pseries_setup_rfi_flush(); |
| 713 | setup_stf_barrier(); | ||
| 713 | 714 | ||
| 714 | /* By default, only probe PCI (can be overridden by rtas_pci) */ | 715 | /* By default, only probe PCI (can be overridden by rtas_pci) */ |
| 715 | pci_add_flags(PCI_PROBE_ONLY); | 716 | pci_add_flags(PCI_PROBE_ONLY); |
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 6176fe9795ca..941d8cc6c9f5 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig | |||
| @@ -261,9 +261,9 @@ CONFIG_IP_VS_NQ=m | |||
| 261 | CONFIG_IP_VS_FTP=m | 261 | CONFIG_IP_VS_FTP=m |
| 262 | CONFIG_IP_VS_PE_SIP=m | 262 | CONFIG_IP_VS_PE_SIP=m |
| 263 | CONFIG_NF_CONNTRACK_IPV4=m | 263 | CONFIG_NF_CONNTRACK_IPV4=m |
| 264 | CONFIG_NF_TABLES_IPV4=m | 264 | CONFIG_NF_TABLES_IPV4=y |
| 265 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 265 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
| 266 | CONFIG_NF_TABLES_ARP=m | 266 | CONFIG_NF_TABLES_ARP=y |
| 267 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 267 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
| 268 | CONFIG_IP_NF_IPTABLES=m | 268 | CONFIG_IP_NF_IPTABLES=m |
| 269 | CONFIG_IP_NF_MATCH_AH=m | 269 | CONFIG_IP_NF_MATCH_AH=m |
| @@ -284,7 +284,7 @@ CONFIG_IP_NF_ARPTABLES=m | |||
| 284 | CONFIG_IP_NF_ARPFILTER=m | 284 | CONFIG_IP_NF_ARPFILTER=m |
| 285 | CONFIG_IP_NF_ARP_MANGLE=m | 285 | CONFIG_IP_NF_ARP_MANGLE=m |
| 286 | CONFIG_NF_CONNTRACK_IPV6=m | 286 | CONFIG_NF_CONNTRACK_IPV6=m |
| 287 | CONFIG_NF_TABLES_IPV6=m | 287 | CONFIG_NF_TABLES_IPV6=y |
| 288 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 288 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
| 289 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 289 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
| 290 | CONFIG_IP6_NF_IPTABLES=m | 290 | CONFIG_IP6_NF_IPTABLES=m |
| @@ -305,7 +305,7 @@ CONFIG_IP6_NF_RAW=m | |||
| 305 | CONFIG_IP6_NF_SECURITY=m | 305 | CONFIG_IP6_NF_SECURITY=m |
| 306 | CONFIG_IP6_NF_NAT=m | 306 | CONFIG_IP6_NF_NAT=m |
| 307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 307 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
| 308 | CONFIG_NF_TABLES_BRIDGE=m | 308 | CONFIG_NF_TABLES_BRIDGE=y |
| 309 | CONFIG_RDS=m | 309 | CONFIG_RDS=m |
| 310 | CONFIG_RDS_RDMA=m | 310 | CONFIG_RDS_RDMA=m |
| 311 | CONFIG_RDS_TCP=m | 311 | CONFIG_RDS_TCP=m |
| @@ -604,7 +604,6 @@ CONFIG_DETECT_HUNG_TASK=y | |||
| 604 | CONFIG_WQ_WATCHDOG=y | 604 | CONFIG_WQ_WATCHDOG=y |
| 605 | CONFIG_PANIC_ON_OOPS=y | 605 | CONFIG_PANIC_ON_OOPS=y |
| 606 | CONFIG_DEBUG_TIMEKEEPING=y | 606 | CONFIG_DEBUG_TIMEKEEPING=y |
| 607 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y | ||
| 608 | CONFIG_PROVE_LOCKING=y | 607 | CONFIG_PROVE_LOCKING=y |
| 609 | CONFIG_LOCK_STAT=y | 608 | CONFIG_LOCK_STAT=y |
| 610 | CONFIG_DEBUG_LOCKDEP=y | 609 | CONFIG_DEBUG_LOCKDEP=y |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c105bcc6d7a6..eb6f75f24208 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -259,9 +259,9 @@ CONFIG_IP_VS_NQ=m | |||
| 259 | CONFIG_IP_VS_FTP=m | 259 | CONFIG_IP_VS_FTP=m |
| 260 | CONFIG_IP_VS_PE_SIP=m | 260 | CONFIG_IP_VS_PE_SIP=m |
| 261 | CONFIG_NF_CONNTRACK_IPV4=m | 261 | CONFIG_NF_CONNTRACK_IPV4=m |
| 262 | CONFIG_NF_TABLES_IPV4=m | 262 | CONFIG_NF_TABLES_IPV4=y |
| 263 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m | 263 | CONFIG_NFT_CHAIN_ROUTE_IPV4=m |
| 264 | CONFIG_NF_TABLES_ARP=m | 264 | CONFIG_NF_TABLES_ARP=y |
| 265 | CONFIG_NFT_CHAIN_NAT_IPV4=m | 265 | CONFIG_NFT_CHAIN_NAT_IPV4=m |
| 266 | CONFIG_IP_NF_IPTABLES=m | 266 | CONFIG_IP_NF_IPTABLES=m |
| 267 | CONFIG_IP_NF_MATCH_AH=m | 267 | CONFIG_IP_NF_MATCH_AH=m |
| @@ -282,7 +282,7 @@ CONFIG_IP_NF_ARPTABLES=m | |||
| 282 | CONFIG_IP_NF_ARPFILTER=m | 282 | CONFIG_IP_NF_ARPFILTER=m |
| 283 | CONFIG_IP_NF_ARP_MANGLE=m | 283 | CONFIG_IP_NF_ARP_MANGLE=m |
| 284 | CONFIG_NF_CONNTRACK_IPV6=m | 284 | CONFIG_NF_CONNTRACK_IPV6=m |
| 285 | CONFIG_NF_TABLES_IPV6=m | 285 | CONFIG_NF_TABLES_IPV6=y |
| 286 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m | 286 | CONFIG_NFT_CHAIN_ROUTE_IPV6=m |
| 287 | CONFIG_NFT_CHAIN_NAT_IPV6=m | 287 | CONFIG_NFT_CHAIN_NAT_IPV6=m |
| 288 | CONFIG_IP6_NF_IPTABLES=m | 288 | CONFIG_IP6_NF_IPTABLES=m |
| @@ -303,7 +303,7 @@ CONFIG_IP6_NF_RAW=m | |||
| 303 | CONFIG_IP6_NF_SECURITY=m | 303 | CONFIG_IP6_NF_SECURITY=m |
| 304 | CONFIG_IP6_NF_NAT=m | 304 | CONFIG_IP6_NF_NAT=m |
| 305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m | 305 | CONFIG_IP6_NF_TARGET_MASQUERADE=m |
| 306 | CONFIG_NF_TABLES_BRIDGE=m | 306 | CONFIG_NF_TABLES_BRIDGE=y |
| 307 | CONFIG_RDS=m | 307 | CONFIG_RDS=m |
| 308 | CONFIG_RDS_RDMA=m | 308 | CONFIG_RDS_RDMA=m |
| 309 | CONFIG_RDS_TCP=m | 309 | CONFIG_RDS_TCP=m |
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index e8077f0971f8..2bf01ba44107 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
| 16 | #include <asm/nospec-insn.h> | ||
| 16 | #include <asm/vx-insn.h> | 17 | #include <asm/vx-insn.h> |
| 17 | 18 | ||
| 18 | /* Vector register range containing CRC-32 constants */ | 19 | /* Vector register range containing CRC-32 constants */ |
| @@ -67,6 +68,8 @@ | |||
| 67 | 68 | ||
| 68 | .previous | 69 | .previous |
| 69 | 70 | ||
| 71 | GEN_BR_THUNK %r14 | ||
| 72 | |||
| 70 | .text | 73 | .text |
| 71 | /* | 74 | /* |
| 72 | * The CRC-32 function(s) use these calling conventions: | 75 | * The CRC-32 function(s) use these calling conventions: |
| @@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16) | |||
| 203 | 206 | ||
| 204 | .Ldone: | 207 | .Ldone: |
| 205 | VLGVF %r2,%v2,3 | 208 | VLGVF %r2,%v2,3 |
| 206 | br %r14 | 209 | BR_EX %r14 |
| 207 | 210 | ||
| 208 | .previous | 211 | .previous |
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S index d8c67a58c0c5..7d6f568bd3ad 100644 --- a/arch/s390/crypto/crc32le-vx.S +++ b/arch/s390/crypto/crc32le-vx.S | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
| 17 | #include <asm/nospec-insn.h> | ||
| 17 | #include <asm/vx-insn.h> | 18 | #include <asm/vx-insn.h> |
| 18 | 19 | ||
| 19 | /* Vector register range containing CRC-32 constants */ | 20 | /* Vector register range containing CRC-32 constants */ |
| @@ -76,6 +77,7 @@ | |||
| 76 | 77 | ||
| 77 | .previous | 78 | .previous |
| 78 | 79 | ||
| 80 | GEN_BR_THUNK %r14 | ||
| 79 | 81 | ||
| 80 | .text | 82 | .text |
| 81 | 83 | ||
| @@ -264,6 +266,6 @@ crc32_le_vgfm_generic: | |||
| 264 | 266 | ||
| 265 | .Ldone: | 267 | .Ldone: |
| 266 | VLGVF %r2,%v2,2 | 268 | VLGVF %r2,%v2,2 |
| 267 | br %r14 | 269 | BR_EX %r14 |
| 268 | 270 | ||
| 269 | .previous | 271 | .previous |
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h new file mode 100644 index 000000000000..a01f81186e86 --- /dev/null +++ b/arch/s390/include/asm/nospec-insn.h | |||
| @@ -0,0 +1,196 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ASM_S390_NOSPEC_ASM_H | ||
| 3 | #define _ASM_S390_NOSPEC_ASM_H | ||
| 4 | |||
| 5 | #include <asm/alternative-asm.h> | ||
| 6 | #include <asm/asm-offsets.h> | ||
| 7 | #include <asm/dwarf.h> | ||
| 8 | |||
| 9 | #ifdef __ASSEMBLY__ | ||
| 10 | |||
| 11 | #ifdef CONFIG_EXPOLINE | ||
| 12 | |||
| 13 | _LC_BR_R1 = __LC_BR_R1 | ||
| 14 | |||
| 15 | /* | ||
| 16 | * The expoline macros are used to create thunks in the same format | ||
| 17 | * as gcc generates them. The 'comdat' section flag makes sure that | ||
| 18 | * the various thunks are merged into a single copy. | ||
| 19 | */ | ||
| 20 | .macro __THUNK_PROLOG_NAME name | ||
| 21 | .pushsection .text.\name,"axG",@progbits,\name,comdat | ||
| 22 | .globl \name | ||
| 23 | .hidden \name | ||
| 24 | .type \name,@function | ||
| 25 | \name: | ||
| 26 | CFI_STARTPROC | ||
| 27 | .endm | ||
| 28 | |||
| 29 | .macro __THUNK_EPILOG | ||
| 30 | CFI_ENDPROC | ||
| 31 | .popsection | ||
| 32 | .endm | ||
| 33 | |||
| 34 | .macro __THUNK_PROLOG_BR r1,r2 | ||
| 35 | __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1 | ||
| 36 | .endm | ||
| 37 | |||
| 38 | .macro __THUNK_PROLOG_BC d0,r1,r2 | ||
| 39 | __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1 | ||
| 40 | .endm | ||
| 41 | |||
| 42 | .macro __THUNK_BR r1,r2 | ||
| 43 | jg __s390x_indirect_jump_r\r2\()use_r\r1 | ||
| 44 | .endm | ||
| 45 | |||
| 46 | .macro __THUNK_BC d0,r1,r2 | ||
| 47 | jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1 | ||
| 48 | .endm | ||
| 49 | |||
| 50 | .macro __THUNK_BRASL r1,r2,r3 | ||
| 51 | brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2 | ||
| 52 | .endm | ||
| 53 | |||
| 54 | .macro __DECODE_RR expand,reg,ruse | ||
| 55 | .set __decode_fail,1 | ||
| 56 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 57 | .ifc \reg,%r\r1 | ||
| 58 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 59 | .ifc \ruse,%r\r2 | ||
| 60 | \expand \r1,\r2 | ||
| 61 | .set __decode_fail,0 | ||
| 62 | .endif | ||
| 63 | .endr | ||
| 64 | .endif | ||
| 65 | .endr | ||
| 66 | .if __decode_fail == 1 | ||
| 67 | .error "__DECODE_RR failed" | ||
| 68 | .endif | ||
| 69 | .endm | ||
| 70 | |||
| 71 | .macro __DECODE_RRR expand,rsave,rtarget,ruse | ||
| 72 | .set __decode_fail,1 | ||
| 73 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 74 | .ifc \rsave,%r\r1 | ||
| 75 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 76 | .ifc \rtarget,%r\r2 | ||
| 77 | .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 78 | .ifc \ruse,%r\r3 | ||
| 79 | \expand \r1,\r2,\r3 | ||
| 80 | .set __decode_fail,0 | ||
| 81 | .endif | ||
| 82 | .endr | ||
| 83 | .endif | ||
| 84 | .endr | ||
| 85 | .endif | ||
| 86 | .endr | ||
| 87 | .if __decode_fail == 1 | ||
| 88 | .error "__DECODE_RRR failed" | ||
| 89 | .endif | ||
| 90 | .endm | ||
| 91 | |||
| 92 | .macro __DECODE_DRR expand,disp,reg,ruse | ||
| 93 | .set __decode_fail,1 | ||
| 94 | .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 95 | .ifc \reg,%r\r1 | ||
| 96 | .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 | ||
| 97 | .ifc \ruse,%r\r2 | ||
| 98 | \expand \disp,\r1,\r2 | ||
| 99 | .set __decode_fail,0 | ||
| 100 | .endif | ||
| 101 | .endr | ||
| 102 | .endif | ||
| 103 | .endr | ||
| 104 | .if __decode_fail == 1 | ||
| 105 | .error "__DECODE_DRR failed" | ||
| 106 | .endif | ||
| 107 | .endm | ||
| 108 | |||
| 109 | .macro __THUNK_EX_BR reg,ruse | ||
| 110 | # Be very careful when adding instructions to this macro! | ||
| 111 | # The ALTERNATIVE replacement code has a .+10 which targets | ||
| 112 | # the "br \reg" after the code has been patched. | ||
| 113 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
| 114 | exrl 0,555f | ||
| 115 | j . | ||
| 116 | #else | ||
| 117 | .ifc \reg,%r1 | ||
| 118 | ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 | ||
| 119 | j . | ||
| 120 | .else | ||
| 121 | larl \ruse,555f | ||
| 122 | ex 0,0(\ruse) | ||
| 123 | j . | ||
| 124 | .endif | ||
| 125 | #endif | ||
| 126 | 555: br \reg | ||
| 127 | .endm | ||
| 128 | |||
| 129 | .macro __THUNK_EX_BC disp,reg,ruse | ||
| 130 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
| 131 | exrl 0,556f | ||
| 132 | j . | ||
| 133 | #else | ||
| 134 | larl \ruse,556f | ||
| 135 | ex 0,0(\ruse) | ||
| 136 | j . | ||
| 137 | #endif | ||
| 138 | 556: b \disp(\reg) | ||
| 139 | .endm | ||
| 140 | |||
| 141 | .macro GEN_BR_THUNK reg,ruse=%r1 | ||
| 142 | __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse | ||
| 143 | __THUNK_EX_BR \reg,\ruse | ||
| 144 | __THUNK_EPILOG | ||
| 145 | .endm | ||
| 146 | |||
| 147 | .macro GEN_B_THUNK disp,reg,ruse=%r1 | ||
| 148 | __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse | ||
| 149 | __THUNK_EX_BC \disp,\reg,\ruse | ||
| 150 | __THUNK_EPILOG | ||
| 151 | .endm | ||
| 152 | |||
| 153 | .macro BR_EX reg,ruse=%r1 | ||
| 154 | 557: __DECODE_RR __THUNK_BR,\reg,\ruse | ||
| 155 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 156 | .long 557b-. | ||
| 157 | .popsection | ||
| 158 | .endm | ||
| 159 | |||
| 160 | .macro B_EX disp,reg,ruse=%r1 | ||
| 161 | 558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse | ||
| 162 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 163 | .long 558b-. | ||
| 164 | .popsection | ||
| 165 | .endm | ||
| 166 | |||
| 167 | .macro BASR_EX rsave,rtarget,ruse=%r1 | ||
| 168 | 559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse | ||
| 169 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 170 | .long 559b-. | ||
| 171 | .popsection | ||
| 172 | .endm | ||
| 173 | |||
| 174 | #else | ||
| 175 | .macro GEN_BR_THUNK reg,ruse=%r1 | ||
| 176 | .endm | ||
| 177 | |||
| 178 | .macro GEN_B_THUNK disp,reg,ruse=%r1 | ||
| 179 | .endm | ||
| 180 | |||
| 181 | .macro BR_EX reg,ruse=%r1 | ||
| 182 | br \reg | ||
| 183 | .endm | ||
| 184 | |||
| 185 | .macro B_EX disp,reg,ruse=%r1 | ||
| 186 | b \disp(\reg) | ||
| 187 | .endm | ||
| 188 | |||
| 189 | .macro BASR_EX rsave,rtarget,ruse=%r1 | ||
| 190 | basr \rsave,\rtarget | ||
| 191 | .endm | ||
| 192 | #endif | ||
| 193 | |||
| 194 | #endif /* __ASSEMBLY__ */ | ||
| 195 | |||
| 196 | #endif /* _ASM_S390_NOSPEC_ASM_H */ | ||
diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h index e297bcfc476f..6090670df51f 100644 --- a/arch/s390/include/asm/purgatory.h +++ b/arch/s390/include/asm/purgatory.h | |||
| @@ -13,5 +13,11 @@ | |||
| 13 | 13 | ||
| 14 | int verify_sha256_digest(void); | 14 | int verify_sha256_digest(void); |
| 15 | 15 | ||
| 16 | extern u64 kernel_entry; | ||
| 17 | extern u64 kernel_type; | ||
| 18 | |||
| 19 | extern u64 crash_start; | ||
| 20 | extern u64 crash_size; | ||
| 21 | |||
| 16 | #endif /* __ASSEMBLY__ */ | 22 | #endif /* __ASSEMBLY__ */ |
| 17 | #endif /* _S390_PURGATORY_H_ */ | 23 | #endif /* _S390_PURGATORY_H_ */ |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 84ea6225efb4..f92dd8ed3884 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
| @@ -65,6 +65,7 @@ obj-y += nospec-branch.o | |||
| 65 | 65 | ||
| 66 | extra-y += head.o head64.o vmlinux.lds | 66 | extra-y += head.o head64.o vmlinux.lds |
| 67 | 67 | ||
| 68 | obj-$(CONFIG_SYSFS) += nospec-sysfs.o | ||
| 68 | CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) | 69 | CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) |
| 69 | 70 | ||
| 70 | obj-$(CONFIG_MODULES) += module.o | 71 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index eb2a5c0443cd..11aea745a2a6 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
| @@ -181,6 +181,7 @@ int main(void) | |||
| 181 | OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); | 181 | OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); |
| 182 | OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); | 182 | OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); |
| 183 | OFFSET(__LC_GMAP, lowcore, gmap); | 183 | OFFSET(__LC_GMAP, lowcore, gmap); |
| 184 | OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); | ||
| 184 | /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ | 185 | /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ |
| 185 | OFFSET(__LC_DUMP_REIPL, lowcore, ipib); | 186 | OFFSET(__LC_DUMP_REIPL, lowcore, ipib); |
| 186 | /* hardware defined lowcore locations 0x1000 - 0x18ff */ | 187 | /* hardware defined lowcore locations 0x1000 - 0x18ff */ |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f6c56009e822..b65874b0b412 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S | |||
| @@ -9,18 +9,22 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
| 11 | #include <asm/asm-offsets.h> | 11 | #include <asm/asm-offsets.h> |
| 12 | #include <asm/nospec-insn.h> | ||
| 12 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
| 13 | #include <asm/sigp.h> | 14 | #include <asm/sigp.h> |
| 14 | 15 | ||
| 16 | GEN_BR_THUNK %r9 | ||
| 17 | GEN_BR_THUNK %r14 | ||
| 18 | |||
| 15 | ENTRY(s390_base_mcck_handler) | 19 | ENTRY(s390_base_mcck_handler) |
| 16 | basr %r13,0 | 20 | basr %r13,0 |
| 17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | 21 | 0: lg %r15,__LC_PANIC_STACK # load panic stack |
| 18 | aghi %r15,-STACK_FRAME_OVERHEAD | 22 | aghi %r15,-STACK_FRAME_OVERHEAD |
| 19 | larl %r1,s390_base_mcck_handler_fn | 23 | larl %r1,s390_base_mcck_handler_fn |
| 20 | lg %r1,0(%r1) | 24 | lg %r9,0(%r1) |
| 21 | ltgr %r1,%r1 | 25 | ltgr %r9,%r9 |
| 22 | jz 1f | 26 | jz 1f |
| 23 | basr %r14,%r1 | 27 | BASR_EX %r14,%r9 |
| 24 | 1: la %r1,4095 | 28 | 1: la %r1,4095 |
| 25 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | 29 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) |
| 26 | lpswe __LC_MCK_OLD_PSW | 30 | lpswe __LC_MCK_OLD_PSW |
| @@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler) | |||
| 37 | basr %r13,0 | 41 | basr %r13,0 |
| 38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 42 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
| 39 | larl %r1,s390_base_ext_handler_fn | 43 | larl %r1,s390_base_ext_handler_fn |
| 40 | lg %r1,0(%r1) | 44 | lg %r9,0(%r1) |
| 41 | ltgr %r1,%r1 | 45 | ltgr %r9,%r9 |
| 42 | jz 1f | 46 | jz 1f |
| 43 | basr %r14,%r1 | 47 | BASR_EX %r14,%r9 |
| 44 | 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC | 48 | 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC |
| 45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | 49 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit |
| 46 | lpswe __LC_EXT_OLD_PSW | 50 | lpswe __LC_EXT_OLD_PSW |
| @@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler) | |||
| 57 | basr %r13,0 | 61 | basr %r13,0 |
| 58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | 62 | 0: aghi %r15,-STACK_FRAME_OVERHEAD |
| 59 | larl %r1,s390_base_pgm_handler_fn | 63 | larl %r1,s390_base_pgm_handler_fn |
| 60 | lg %r1,0(%r1) | 64 | lg %r9,0(%r1) |
| 61 | ltgr %r1,%r1 | 65 | ltgr %r9,%r9 |
| 62 | jz 1f | 66 | jz 1f |
| 63 | basr %r14,%r1 | 67 | BASR_EX %r14,%r9 |
| 64 | lmg %r0,%r15,__LC_SAVE_AREA_SYNC | 68 | lmg %r0,%r15,__LC_SAVE_AREA_SYNC |
| 65 | lpswe __LC_PGM_OLD_PSW | 69 | lpswe __LC_PGM_OLD_PSW |
| 66 | 1: lpswe disabled_wait_psw-0b(%r13) | 70 | 1: lpswe disabled_wait_psw-0b(%r13) |
| @@ -117,7 +121,7 @@ ENTRY(diag308_reset) | |||
| 117 | larl %r4,.Lcontinue_psw # Restore PSW flags | 121 | larl %r4,.Lcontinue_psw # Restore PSW flags |
| 118 | lpswe 0(%r4) | 122 | lpswe 0(%r4) |
| 119 | .Lcontinue: | 123 | .Lcontinue: |
| 120 | br %r14 | 124 | BR_EX %r14 |
| 121 | .align 16 | 125 | .align 16 |
| 122 | .Lrestart_psw: | 126 | .Lrestart_psw: |
| 123 | .long 0x00080000,0x80000000 + .Lrestart_part2 | 127 | .long 0x00080000,0x80000000 + .Lrestart_part2 |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3f22f139a041..f03402efab4b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <asm/setup.h> | 28 | #include <asm/setup.h> |
| 29 | #include <asm/nmi.h> | 29 | #include <asm/nmi.h> |
| 30 | #include <asm/export.h> | 30 | #include <asm/export.h> |
| 31 | #include <asm/nospec-insn.h> | ||
| 31 | 32 | ||
| 32 | __PT_R0 = __PT_GPRS | 33 | __PT_R0 = __PT_GPRS |
| 33 | __PT_R1 = __PT_GPRS + 8 | 34 | __PT_R1 = __PT_GPRS + 8 |
| @@ -183,67 +184,9 @@ _LPP_OFFSET = __LC_LPP | |||
| 183 | "jnz .+8; .long 0xb2e8d000", 82 | 184 | "jnz .+8; .long 0xb2e8d000", 82 |
| 184 | .endm | 185 | .endm |
| 185 | 186 | ||
| 186 | #ifdef CONFIG_EXPOLINE | 187 | GEN_BR_THUNK %r9 |
| 187 | 188 | GEN_BR_THUNK %r14 | |
| 188 | .macro GEN_BR_THUNK name,reg,tmp | 189 | GEN_BR_THUNK %r14,%r11 |
| 189 | .section .text.\name,"axG",@progbits,\name,comdat | ||
| 190 | .globl \name | ||
| 191 | .hidden \name | ||
| 192 | .type \name,@function | ||
| 193 | \name: | ||
| 194 | CFI_STARTPROC | ||
| 195 | #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES | ||
| 196 | exrl 0,0f | ||
| 197 | #else | ||
| 198 | larl \tmp,0f | ||
| 199 | ex 0,0(\tmp) | ||
| 200 | #endif | ||
| 201 | j . | ||
| 202 | 0: br \reg | ||
| 203 | CFI_ENDPROC | ||
| 204 | .endm | ||
| 205 | |||
| 206 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 | ||
| 207 | GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 | ||
| 208 | GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 | ||
| 209 | |||
| 210 | .macro BASR_R14_R9 | ||
| 211 | 0: brasl %r14,__s390x_indirect_jump_r1use_r9 | ||
| 212 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 213 | .long 0b-. | ||
| 214 | .popsection | ||
| 215 | .endm | ||
| 216 | |||
| 217 | .macro BR_R1USE_R14 | ||
| 218 | 0: jg __s390x_indirect_jump_r1use_r14 | ||
| 219 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 220 | .long 0b-. | ||
| 221 | .popsection | ||
| 222 | .endm | ||
| 223 | |||
| 224 | .macro BR_R11USE_R14 | ||
| 225 | 0: jg __s390x_indirect_jump_r11use_r14 | ||
| 226 | .pushsection .s390_indirect_branches,"a",@progbits | ||
| 227 | .long 0b-. | ||
| 228 | .popsection | ||
| 229 | .endm | ||
| 230 | |||
| 231 | #else /* CONFIG_EXPOLINE */ | ||
| 232 | |||
| 233 | .macro BASR_R14_R9 | ||
| 234 | basr %r14,%r9 | ||
| 235 | .endm | ||
| 236 | |||
| 237 | .macro BR_R1USE_R14 | ||
| 238 | br %r14 | ||
| 239 | .endm | ||
| 240 | |||
| 241 | .macro BR_R11USE_R14 | ||
| 242 | br %r14 | ||
| 243 | .endm | ||
| 244 | |||
| 245 | #endif /* CONFIG_EXPOLINE */ | ||
| 246 | |||
| 247 | 190 | ||
| 248 | .section .kprobes.text, "ax" | 191 | .section .kprobes.text, "ax" |
| 249 | .Ldummy: | 192 | .Ldummy: |
| @@ -260,7 +203,7 @@ _LPP_OFFSET = __LC_LPP | |||
| 260 | ENTRY(__bpon) | 203 | ENTRY(__bpon) |
| 261 | .globl __bpon | 204 | .globl __bpon |
| 262 | BPON | 205 | BPON |
| 263 | BR_R1USE_R14 | 206 | BR_EX %r14 |
| 264 | 207 | ||
| 265 | /* | 208 | /* |
| 266 | * Scheduler resume function, called by switch_to | 209 | * Scheduler resume function, called by switch_to |
| @@ -284,7 +227,7 @@ ENTRY(__switch_to) | |||
| 284 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next | 227 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next |
| 285 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 228 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
| 286 | ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 | 229 | ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 |
| 287 | BR_R1USE_R14 | 230 | BR_EX %r14 |
| 288 | 231 | ||
| 289 | .L__critical_start: | 232 | .L__critical_start: |
| 290 | 233 | ||
| @@ -351,7 +294,7 @@ sie_exit: | |||
| 351 | xgr %r5,%r5 | 294 | xgr %r5,%r5 |
| 352 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers | 295 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
| 353 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code | 296 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code |
| 354 | BR_R1USE_R14 | 297 | BR_EX %r14 |
| 355 | .Lsie_fault: | 298 | .Lsie_fault: |
| 356 | lghi %r14,-EFAULT | 299 | lghi %r14,-EFAULT |
| 357 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code | 300 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code |
| @@ -410,7 +353,7 @@ ENTRY(system_call) | |||
| 410 | lgf %r9,0(%r8,%r10) # get system call add. | 353 | lgf %r9,0(%r8,%r10) # get system call add. |
| 411 | TSTMSK __TI_flags(%r12),_TIF_TRACE | 354 | TSTMSK __TI_flags(%r12),_TIF_TRACE |
| 412 | jnz .Lsysc_tracesys | 355 | jnz .Lsysc_tracesys |
| 413 | BASR_R14_R9 # call sys_xxxx | 356 | BASR_EX %r14,%r9 # call sys_xxxx |
| 414 | stg %r2,__PT_R2(%r11) # store return value | 357 | stg %r2,__PT_R2(%r11) # store return value |
| 415 | 358 | ||
| 416 | .Lsysc_return: | 359 | .Lsysc_return: |
| @@ -595,7 +538,7 @@ ENTRY(system_call) | |||
| 595 | lmg %r3,%r7,__PT_R3(%r11) | 538 | lmg %r3,%r7,__PT_R3(%r11) |
| 596 | stg %r7,STACK_FRAME_OVERHEAD(%r15) | 539 | stg %r7,STACK_FRAME_OVERHEAD(%r15) |
| 597 | lg %r2,__PT_ORIG_GPR2(%r11) | 540 | lg %r2,__PT_ORIG_GPR2(%r11) |
| 598 | BASR_R14_R9 # call sys_xxx | 541 | BASR_EX %r14,%r9 # call sys_xxx |
| 599 | stg %r2,__PT_R2(%r11) # store return value | 542 | stg %r2,__PT_R2(%r11) # store return value |
| 600 | .Lsysc_tracenogo: | 543 | .Lsysc_tracenogo: |
| 601 | TSTMSK __TI_flags(%r12),_TIF_TRACE | 544 | TSTMSK __TI_flags(%r12),_TIF_TRACE |
| @@ -619,7 +562,7 @@ ENTRY(ret_from_fork) | |||
| 619 | lmg %r9,%r10,__PT_R9(%r11) # load gprs | 562 | lmg %r9,%r10,__PT_R9(%r11) # load gprs |
| 620 | ENTRY(kernel_thread_starter) | 563 | ENTRY(kernel_thread_starter) |
| 621 | la %r2,0(%r10) | 564 | la %r2,0(%r10) |
| 622 | BASR_R14_R9 | 565 | BASR_EX %r14,%r9 |
| 623 | j .Lsysc_tracenogo | 566 | j .Lsysc_tracenogo |
| 624 | 567 | ||
| 625 | /* | 568 | /* |
| @@ -701,7 +644,7 @@ ENTRY(pgm_check_handler) | |||
| 701 | je .Lpgm_return | 644 | je .Lpgm_return |
| 702 | lgf %r9,0(%r10,%r1) # load address of handler routine | 645 | lgf %r9,0(%r10,%r1) # load address of handler routine |
| 703 | lgr %r2,%r11 # pass pointer to pt_regs | 646 | lgr %r2,%r11 # pass pointer to pt_regs |
| 704 | BASR_R14_R9 # branch to interrupt-handler | 647 | BASR_EX %r14,%r9 # branch to interrupt-handler |
| 705 | .Lpgm_return: | 648 | .Lpgm_return: |
| 706 | LOCKDEP_SYS_EXIT | 649 | LOCKDEP_SYS_EXIT |
| 707 | tm __PT_PSW+1(%r11),0x01 # returning to user ? | 650 | tm __PT_PSW+1(%r11),0x01 # returning to user ? |
| @@ -1019,7 +962,7 @@ ENTRY(psw_idle) | |||
| 1019 | stpt __TIMER_IDLE_ENTER(%r2) | 962 | stpt __TIMER_IDLE_ENTER(%r2) |
| 1020 | .Lpsw_idle_lpsw: | 963 | .Lpsw_idle_lpsw: |
| 1021 | lpswe __SF_EMPTY(%r15) | 964 | lpswe __SF_EMPTY(%r15) |
| 1022 | BR_R1USE_R14 | 965 | BR_EX %r14 |
| 1023 | .Lpsw_idle_end: | 966 | .Lpsw_idle_end: |
| 1024 | 967 | ||
| 1025 | /* | 968 | /* |
| @@ -1061,7 +1004,7 @@ ENTRY(save_fpu_regs) | |||
| 1061 | .Lsave_fpu_regs_done: | 1004 | .Lsave_fpu_regs_done: |
| 1062 | oi __LC_CPU_FLAGS+7,_CIF_FPU | 1005 | oi __LC_CPU_FLAGS+7,_CIF_FPU |
| 1063 | .Lsave_fpu_regs_exit: | 1006 | .Lsave_fpu_regs_exit: |
| 1064 | BR_R1USE_R14 | 1007 | BR_EX %r14 |
| 1065 | .Lsave_fpu_regs_end: | 1008 | .Lsave_fpu_regs_end: |
| 1066 | EXPORT_SYMBOL(save_fpu_regs) | 1009 | EXPORT_SYMBOL(save_fpu_regs) |
| 1067 | 1010 | ||
| @@ -1107,7 +1050,7 @@ load_fpu_regs: | |||
| 1107 | .Lload_fpu_regs_done: | 1050 | .Lload_fpu_regs_done: |
| 1108 | ni __LC_CPU_FLAGS+7,255-_CIF_FPU | 1051 | ni __LC_CPU_FLAGS+7,255-_CIF_FPU |
| 1109 | .Lload_fpu_regs_exit: | 1052 | .Lload_fpu_regs_exit: |
| 1110 | BR_R1USE_R14 | 1053 | BR_EX %r14 |
| 1111 | .Lload_fpu_regs_end: | 1054 | .Lload_fpu_regs_end: |
| 1112 | 1055 | ||
| 1113 | .L__critical_end: | 1056 | .L__critical_end: |
| @@ -1322,7 +1265,7 @@ cleanup_critical: | |||
| 1322 | jl 0f | 1265 | jl 0f |
| 1323 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end | 1266 | clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end |
| 1324 | jl .Lcleanup_load_fpu_regs | 1267 | jl .Lcleanup_load_fpu_regs |
| 1325 | 0: BR_R11USE_R14 | 1268 | 0: BR_EX %r14 |
| 1326 | 1269 | ||
| 1327 | .align 8 | 1270 | .align 8 |
| 1328 | .Lcleanup_table: | 1271 | .Lcleanup_table: |
| @@ -1358,7 +1301,7 @@ cleanup_critical: | |||
| 1358 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE | 1301 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
| 1359 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | 1302 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce |
| 1360 | larl %r9,sie_exit # skip forward to sie_exit | 1303 | larl %r9,sie_exit # skip forward to sie_exit |
| 1361 | BR_R11USE_R14 | 1304 | BR_EX %r14 |
| 1362 | #endif | 1305 | #endif |
| 1363 | 1306 | ||
| 1364 | .Lcleanup_system_call: | 1307 | .Lcleanup_system_call: |
| @@ -1412,7 +1355,7 @@ cleanup_critical: | |||
| 1412 | stg %r15,56(%r11) # r15 stack pointer | 1355 | stg %r15,56(%r11) # r15 stack pointer |
| 1413 | # set new psw address and exit | 1356 | # set new psw address and exit |
| 1414 | larl %r9,.Lsysc_do_svc | 1357 | larl %r9,.Lsysc_do_svc |
| 1415 | BR_R11USE_R14 | 1358 | BR_EX %r14,%r11 |
| 1416 | .Lcleanup_system_call_insn: | 1359 | .Lcleanup_system_call_insn: |
| 1417 | .quad system_call | 1360 | .quad system_call |
| 1418 | .quad .Lsysc_stmg | 1361 | .quad .Lsysc_stmg |
| @@ -1424,7 +1367,7 @@ cleanup_critical: | |||
| 1424 | 1367 | ||
| 1425 | .Lcleanup_sysc_tif: | 1368 | .Lcleanup_sysc_tif: |
| 1426 | larl %r9,.Lsysc_tif | 1369 | larl %r9,.Lsysc_tif |
| 1427 | BR_R11USE_R14 | 1370 | BR_EX %r14,%r11 |
| 1428 | 1371 | ||
| 1429 | .Lcleanup_sysc_restore: | 1372 | .Lcleanup_sysc_restore: |
| 1430 | # check if stpt has been executed | 1373 | # check if stpt has been executed |
| @@ -1441,14 +1384,14 @@ cleanup_critical: | |||
| 1441 | mvc 0(64,%r11),__PT_R8(%r9) | 1384 | mvc 0(64,%r11),__PT_R8(%r9) |
| 1442 | lmg %r0,%r7,__PT_R0(%r9) | 1385 | lmg %r0,%r7,__PT_R0(%r9) |
| 1443 | 1: lmg %r8,%r9,__LC_RETURN_PSW | 1386 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
| 1444 | BR_R11USE_R14 | 1387 | BR_EX %r14,%r11 |
| 1445 | .Lcleanup_sysc_restore_insn: | 1388 | .Lcleanup_sysc_restore_insn: |
| 1446 | .quad .Lsysc_exit_timer | 1389 | .quad .Lsysc_exit_timer |
| 1447 | .quad .Lsysc_done - 4 | 1390 | .quad .Lsysc_done - 4 |
| 1448 | 1391 | ||
| 1449 | .Lcleanup_io_tif: | 1392 | .Lcleanup_io_tif: |
| 1450 | larl %r9,.Lio_tif | 1393 | larl %r9,.Lio_tif |
| 1451 | BR_R11USE_R14 | 1394 | BR_EX %r14,%r11 |
| 1452 | 1395 | ||
| 1453 | .Lcleanup_io_restore: | 1396 | .Lcleanup_io_restore: |
| 1454 | # check if stpt has been executed | 1397 | # check if stpt has been executed |
| @@ -1462,7 +1405,7 @@ cleanup_critical: | |||
| 1462 | mvc 0(64,%r11),__PT_R8(%r9) | 1405 | mvc 0(64,%r11),__PT_R8(%r9) |
| 1463 | lmg %r0,%r7,__PT_R0(%r9) | 1406 | lmg %r0,%r7,__PT_R0(%r9) |
| 1464 | 1: lmg %r8,%r9,__LC_RETURN_PSW | 1407 | 1: lmg %r8,%r9,__LC_RETURN_PSW |
| 1465 | BR_R11USE_R14 | 1408 | BR_EX %r14,%r11 |
| 1466 | .Lcleanup_io_restore_insn: | 1409 | .Lcleanup_io_restore_insn: |
| 1467 | .quad .Lio_exit_timer | 1410 | .quad .Lio_exit_timer |
| 1468 | .quad .Lio_done - 4 | 1411 | .quad .Lio_done - 4 |
| @@ -1515,17 +1458,17 @@ cleanup_critical: | |||
| 1515 | # prepare return psw | 1458 | # prepare return psw |
| 1516 | nihh %r8,0xfcfd # clear irq & wait state bits | 1459 | nihh %r8,0xfcfd # clear irq & wait state bits |
| 1517 | lg %r9,48(%r11) # return from psw_idle | 1460 | lg %r9,48(%r11) # return from psw_idle |
| 1518 | BR_R11USE_R14 | 1461 | BR_EX %r14,%r11 |
| 1519 | .Lcleanup_idle_insn: | 1462 | .Lcleanup_idle_insn: |
| 1520 | .quad .Lpsw_idle_lpsw | 1463 | .quad .Lpsw_idle_lpsw |
| 1521 | 1464 | ||
| 1522 | .Lcleanup_save_fpu_regs: | 1465 | .Lcleanup_save_fpu_regs: |
| 1523 | larl %r9,save_fpu_regs | 1466 | larl %r9,save_fpu_regs |
| 1524 | BR_R11USE_R14 | 1467 | BR_EX %r14,%r11 |
| 1525 | 1468 | ||
| 1526 | .Lcleanup_load_fpu_regs: | 1469 | .Lcleanup_load_fpu_regs: |
| 1527 | larl %r9,load_fpu_regs | 1470 | larl %r9,load_fpu_regs |
| 1528 | BR_R11USE_R14 | 1471 | BR_EX %r14,%r11 |
| 1529 | 1472 | ||
| 1530 | /* | 1473 | /* |
| 1531 | * Integer constants | 1474 | * Integer constants |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 94f2099bceb0..3d17c41074ca 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
| @@ -176,10 +176,9 @@ void do_softirq_own_stack(void) | |||
| 176 | new -= STACK_FRAME_OVERHEAD; | 176 | new -= STACK_FRAME_OVERHEAD; |
| 177 | ((struct stack_frame *) new)->back_chain = old; | 177 | ((struct stack_frame *) new)->back_chain = old; |
| 178 | asm volatile(" la 15,0(%0)\n" | 178 | asm volatile(" la 15,0(%0)\n" |
| 179 | " basr 14,%2\n" | 179 | " brasl 14,__do_softirq\n" |
| 180 | " la 15,0(%1)\n" | 180 | " la 15,0(%1)\n" |
| 181 | : : "a" (new), "a" (old), | 181 | : : "a" (new), "a" (old) |
| 182 | "a" (__do_softirq) | ||
| 183 | : "0", "1", "2", "3", "4", "5", "14", | 182 | : "0", "1", "2", "3", "4", "5", "14", |
| 184 | "cc", "memory" ); | 183 | "cc", "memory" ); |
| 185 | } else { | 184 | } else { |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 82df7d80fab2..27110f3294ed 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
| @@ -9,13 +9,17 @@ | |||
| 9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 10 | #include <asm/asm-offsets.h> | 10 | #include <asm/asm-offsets.h> |
| 11 | #include <asm/ftrace.h> | 11 | #include <asm/ftrace.h> |
| 12 | #include <asm/nospec-insn.h> | ||
| 12 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
| 13 | #include <asm/export.h> | 14 | #include <asm/export.h> |
| 14 | 15 | ||
| 16 | GEN_BR_THUNK %r1 | ||
| 17 | GEN_BR_THUNK %r14 | ||
| 18 | |||
| 15 | .section .kprobes.text, "ax" | 19 | .section .kprobes.text, "ax" |
| 16 | 20 | ||
| 17 | ENTRY(ftrace_stub) | 21 | ENTRY(ftrace_stub) |
| 18 | br %r14 | 22 | BR_EX %r14 |
| 19 | 23 | ||
| 20 | #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) | 24 | #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) |
| 21 | #define STACK_PTREGS (STACK_FRAME_OVERHEAD) | 25 | #define STACK_PTREGS (STACK_FRAME_OVERHEAD) |
| @@ -23,7 +27,7 @@ ENTRY(ftrace_stub) | |||
| 23 | #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) | 27 | #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) |
| 24 | 28 | ||
| 25 | ENTRY(_mcount) | 29 | ENTRY(_mcount) |
| 26 | br %r14 | 30 | BR_EX %r14 |
| 27 | 31 | ||
| 28 | EXPORT_SYMBOL(_mcount) | 32 | EXPORT_SYMBOL(_mcount) |
| 29 | 33 | ||
| @@ -53,7 +57,7 @@ ENTRY(ftrace_caller) | |||
| 53 | #endif | 57 | #endif |
| 54 | lgr %r3,%r14 | 58 | lgr %r3,%r14 |
| 55 | la %r5,STACK_PTREGS(%r15) | 59 | la %r5,STACK_PTREGS(%r15) |
| 56 | basr %r14,%r1 | 60 | BASR_EX %r14,%r1 |
| 57 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 61 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 58 | # The j instruction gets runtime patched to a nop instruction. | 62 | # The j instruction gets runtime patched to a nop instruction. |
| 59 | # See ftrace_enable_ftrace_graph_caller. | 63 | # See ftrace_enable_ftrace_graph_caller. |
| @@ -68,7 +72,7 @@ ftrace_graph_caller_end: | |||
| 68 | #endif | 72 | #endif |
| 69 | lg %r1,(STACK_PTREGS_PSW+8)(%r15) | 73 | lg %r1,(STACK_PTREGS_PSW+8)(%r15) |
| 70 | lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) | 74 | lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) |
| 71 | br %r1 | 75 | BR_EX %r1 |
| 72 | 76 | ||
| 73 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 77 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 74 | 78 | ||
| @@ -81,6 +85,6 @@ ENTRY(return_to_handler) | |||
| 81 | aghi %r15,STACK_FRAME_OVERHEAD | 85 | aghi %r15,STACK_FRAME_OVERHEAD |
| 82 | lgr %r14,%r2 | 86 | lgr %r14,%r2 |
| 83 | lmg %r2,%r5,32(%r15) | 87 | lmg %r2,%r5,32(%r15) |
| 84 | br %r14 | 88 | BR_EX %r14 |
| 85 | 89 | ||
| 86 | #endif | 90 | #endif |
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 46d49a11663f..8ad6a7128b3a 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
| 3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
| 4 | #include <linux/cpu.h> | ||
| 5 | #include <asm/nospec-branch.h> | 4 | #include <asm/nospec-branch.h> |
| 6 | 5 | ||
| 7 | static int __init nobp_setup_early(char *str) | 6 | static int __init nobp_setup_early(char *str) |
| @@ -44,24 +43,6 @@ static int __init nospec_report(void) | |||
| 44 | } | 43 | } |
| 45 | arch_initcall(nospec_report); | 44 | arch_initcall(nospec_report); |
| 46 | 45 | ||
| 47 | #ifdef CONFIG_SYSFS | ||
| 48 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
| 49 | struct device_attribute *attr, char *buf) | ||
| 50 | { | ||
| 51 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
| 52 | } | ||
| 53 | |||
| 54 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
| 55 | struct device_attribute *attr, char *buf) | ||
| 56 | { | ||
| 57 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) | ||
| 58 | return sprintf(buf, "Mitigation: execute trampolines\n"); | ||
| 59 | if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) | ||
| 60 | return sprintf(buf, "Mitigation: limited branch prediction.\n"); | ||
| 61 | return sprintf(buf, "Vulnerable\n"); | ||
| 62 | } | ||
| 63 | #endif | ||
| 64 | |||
| 65 | #ifdef CONFIG_EXPOLINE | 46 | #ifdef CONFIG_EXPOLINE |
| 66 | 47 | ||
| 67 | int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); | 48 | int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); |
| @@ -112,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) | |||
| 112 | s32 *epo; | 93 | s32 *epo; |
| 113 | 94 | ||
| 114 | /* Second part of the instruction replace is always a nop */ | 95 | /* Second part of the instruction replace is always a nop */ |
| 115 | memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); | ||
| 116 | for (epo = start; epo < end; epo++) { | 96 | for (epo = start; epo < end; epo++) { |
| 117 | instr = (u8 *) epo + *epo; | 97 | instr = (u8 *) epo + *epo; |
| 118 | if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) | 98 | if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) |
| @@ -133,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) | |||
| 133 | br = thunk + (*(int *)(thunk + 2)) * 2; | 113 | br = thunk + (*(int *)(thunk + 2)) * 2; |
| 134 | else | 114 | else |
| 135 | continue; | 115 | continue; |
| 136 | if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) | 116 | /* Check for unconditional branch 0x07f? or 0x47f???? */ |
| 117 | if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) | ||
| 137 | continue; | 118 | continue; |
| 119 | |||
| 120 | memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); | ||
| 138 | switch (type) { | 121 | switch (type) { |
| 139 | case BRCL_EXPOLINE: | 122 | case BRCL_EXPOLINE: |
| 140 | /* brcl to thunk, replace with br + nop */ | ||
| 141 | insnbuf[0] = br[0]; | 123 | insnbuf[0] = br[0]; |
| 142 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); | 124 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); |
| 125 | if (br[0] == 0x47) { | ||
| 126 | /* brcl to b, replace with bc + nopr */ | ||
| 127 | insnbuf[2] = br[2]; | ||
| 128 | insnbuf[3] = br[3]; | ||
| 129 | } else { | ||
| 130 | /* brcl to br, replace with bcr + nop */ | ||
| 131 | } | ||
| 143 | break; | 132 | break; |
| 144 | case BRASL_EXPOLINE: | 133 | case BRASL_EXPOLINE: |
| 145 | /* brasl to thunk, replace with basr + nop */ | ||
| 146 | insnbuf[0] = 0x0d; | ||
| 147 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); | 134 | insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); |
| 135 | if (br[0] == 0x47) { | ||
| 136 | /* brasl to b, replace with bas + nopr */ | ||
| 137 | insnbuf[0] = 0x4d; | ||
| 138 | insnbuf[2] = br[2]; | ||
| 139 | insnbuf[3] = br[3]; | ||
| 140 | } else { | ||
| 141 | /* brasl to br, replace with basr + nop */ | ||
| 142 | insnbuf[0] = 0x0d; | ||
| 143 | } | ||
| 148 | break; | 144 | break; |
| 149 | } | 145 | } |
| 150 | 146 | ||
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c new file mode 100644 index 000000000000..8affad5f18cb --- /dev/null +++ b/arch/s390/kernel/nospec-sysfs.c | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | #include <linux/device.h> | ||
| 3 | #include <linux/cpu.h> | ||
| 4 | #include <asm/facility.h> | ||
| 5 | #include <asm/nospec-branch.h> | ||
| 6 | |||
| 7 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
| 8 | struct device_attribute *attr, char *buf) | ||
| 9 | { | ||
| 10 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
| 11 | } | ||
| 12 | |||
| 13 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
| 14 | struct device_attribute *attr, char *buf) | ||
| 15 | { | ||
| 16 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) | ||
| 17 | return sprintf(buf, "Mitigation: execute trampolines\n"); | ||
| 18 | if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) | ||
| 19 | return sprintf(buf, "Mitigation: limited branch prediction\n"); | ||
| 20 | return sprintf(buf, "Vulnerable\n"); | ||
| 21 | } | ||
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 1c9ddd7aa5ec..0292d68e7dde 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
| @@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 753 | */ | 753 | */ |
| 754 | rate = 0; | 754 | rate = 0; |
| 755 | if (attr->freq) { | 755 | if (attr->freq) { |
| 756 | if (!attr->sample_freq) { | ||
| 757 | err = -EINVAL; | ||
| 758 | goto out; | ||
| 759 | } | ||
| 756 | rate = freq_to_sample_rate(&si, attr->sample_freq); | 760 | rate = freq_to_sample_rate(&si, attr->sample_freq); |
| 757 | rate = hw_limit_rate(&si, rate); | 761 | rate = hw_limit_rate(&si, rate); |
| 758 | attr->freq = 0; | 762 | attr->freq = 0; |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 73cc3750f0d3..7f14adf512c6 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
| @@ -7,8 +7,11 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
| 9 | #include <asm/asm-offsets.h> | 9 | #include <asm/asm-offsets.h> |
| 10 | #include <asm/nospec-insn.h> | ||
| 10 | #include <asm/sigp.h> | 11 | #include <asm/sigp.h> |
| 11 | 12 | ||
| 13 | GEN_BR_THUNK %r9 | ||
| 14 | |||
| 12 | # | 15 | # |
| 13 | # Issue "store status" for the current CPU to its prefix page | 16 | # Issue "store status" for the current CPU to its prefix page |
| 14 | # and call passed function afterwards | 17 | # and call passed function afterwards |
| @@ -67,9 +70,9 @@ ENTRY(store_status) | |||
| 67 | st %r4,0(%r1) | 70 | st %r4,0(%r1) |
| 68 | st %r5,4(%r1) | 71 | st %r5,4(%r1) |
| 69 | stg %r2,8(%r1) | 72 | stg %r2,8(%r1) |
| 70 | lgr %r1,%r2 | 73 | lgr %r9,%r2 |
| 71 | lgr %r2,%r3 | 74 | lgr %r2,%r3 |
| 72 | br %r1 | 75 | BR_EX %r9 |
| 73 | 76 | ||
| 74 | .section .bss | 77 | .section .bss |
| 75 | .align 8 | 78 | .align 8 |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index e99187149f17..a049a7b9d6e8 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
| 14 | #include <asm/thread_info.h> | 14 | #include <asm/thread_info.h> |
| 15 | #include <asm/asm-offsets.h> | 15 | #include <asm/asm-offsets.h> |
| 16 | #include <asm/nospec-insn.h> | ||
| 16 | #include <asm/sigp.h> | 17 | #include <asm/sigp.h> |
| 17 | 18 | ||
| 18 | /* | 19 | /* |
| @@ -24,6 +25,8 @@ | |||
| 24 | * (see below) in the resume process. | 25 | * (see below) in the resume process. |
| 25 | * This function runs with disabled interrupts. | 26 | * This function runs with disabled interrupts. |
| 26 | */ | 27 | */ |
| 28 | GEN_BR_THUNK %r14 | ||
| 29 | |||
| 27 | .section .text | 30 | .section .text |
| 28 | ENTRY(swsusp_arch_suspend) | 31 | ENTRY(swsusp_arch_suspend) |
| 29 | stmg %r6,%r15,__SF_GPRS(%r15) | 32 | stmg %r6,%r15,__SF_GPRS(%r15) |
| @@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend) | |||
| 103 | spx 0x318(%r1) | 106 | spx 0x318(%r1) |
| 104 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 107 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
| 105 | lghi %r2,0 | 108 | lghi %r2,0 |
| 106 | br %r14 | 109 | BR_EX %r14 |
| 107 | 110 | ||
| 108 | /* | 111 | /* |
| 109 | * Restore saved memory image to correct place and restore register context. | 112 | * Restore saved memory image to correct place and restore register context. |
| @@ -197,11 +200,10 @@ pgm_check_entry: | |||
| 197 | larl %r15,init_thread_union | 200 | larl %r15,init_thread_union |
| 198 | ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) | 201 | ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) |
| 199 | larl %r2,.Lpanic_string | 202 | larl %r2,.Lpanic_string |
| 200 | larl %r3,sclp_early_printk | ||
| 201 | lghi %r1,0 | 203 | lghi %r1,0 |
| 202 | sam31 | 204 | sam31 |
| 203 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE | 205 | sigp %r1,%r0,SIGP_SET_ARCHITECTURE |
| 204 | basr %r14,%r3 | 206 | brasl %r14,sclp_early_printk |
| 205 | larl %r3,.Ldisabled_wait_31 | 207 | larl %r3,.Ldisabled_wait_31 |
| 206 | lpsw 0(%r3) | 208 | lpsw 0(%r3) |
| 207 | 4: | 209 | 4: |
| @@ -267,7 +269,7 @@ restore_registers: | |||
| 267 | /* Return 0 */ | 269 | /* Return 0 */ |
| 268 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 270 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
| 269 | lghi %r2,0 | 271 | lghi %r2,0 |
| 270 | br %r14 | 272 | BR_EX %r14 |
| 271 | 273 | ||
| 272 | .section .data..nosave,"aw",@progbits | 274 | .section .data..nosave,"aw",@progbits |
| 273 | .align 8 | 275 | .align 8 |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 8961e3970901..969882b54266 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
| @@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
| 578 | 578 | ||
| 579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; | 579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; |
| 580 | if (gpa && (scb_s->ecb & ECB_TE)) { | 580 | if (gpa && (scb_s->ecb & ECB_TE)) { |
| 581 | if (!(gpa & ~0x1fffU)) { | 581 | if (!(gpa & ~0x1fffUL)) { |
| 582 | rc = set_validity_icpt(scb_s, 0x0080U); | 582 | rc = set_validity_icpt(scb_s, 0x0080U); |
| 583 | goto unpin; | 583 | goto unpin; |
| 584 | } | 584 | } |
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index 495c9c4bacc7..2311f15be9cf 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S | |||
| @@ -7,6 +7,9 @@ | |||
| 7 | 7 | ||
| 8 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
| 9 | #include <asm/export.h> | 9 | #include <asm/export.h> |
| 10 | #include <asm/nospec-insn.h> | ||
| 11 | |||
| 12 | GEN_BR_THUNK %r14 | ||
| 10 | 13 | ||
| 11 | /* | 14 | /* |
| 12 | * void *memmove(void *dest, const void *src, size_t n) | 15 | * void *memmove(void *dest, const void *src, size_t n) |
| @@ -33,14 +36,14 @@ ENTRY(memmove) | |||
| 33 | .Lmemmove_forward_remainder: | 36 | .Lmemmove_forward_remainder: |
| 34 | larl %r5,.Lmemmove_mvc | 37 | larl %r5,.Lmemmove_mvc |
| 35 | ex %r4,0(%r5) | 38 | ex %r4,0(%r5) |
| 36 | br %r14 | 39 | BR_EX %r14 |
| 37 | .Lmemmove_reverse: | 40 | .Lmemmove_reverse: |
| 38 | ic %r0,0(%r4,%r3) | 41 | ic %r0,0(%r4,%r3) |
| 39 | stc %r0,0(%r4,%r1) | 42 | stc %r0,0(%r4,%r1) |
| 40 | brctg %r4,.Lmemmove_reverse | 43 | brctg %r4,.Lmemmove_reverse |
| 41 | ic %r0,0(%r4,%r3) | 44 | ic %r0,0(%r4,%r3) |
| 42 | stc %r0,0(%r4,%r1) | 45 | stc %r0,0(%r4,%r1) |
| 43 | br %r14 | 46 | BR_EX %r14 |
| 44 | .Lmemmove_mvc: | 47 | .Lmemmove_mvc: |
| 45 | mvc 0(1,%r1),0(%r3) | 48 | mvc 0(1,%r1),0(%r3) |
| 46 | EXPORT_SYMBOL(memmove) | 49 | EXPORT_SYMBOL(memmove) |
| @@ -77,7 +80,7 @@ ENTRY(memset) | |||
| 77 | .Lmemset_clear_remainder: | 80 | .Lmemset_clear_remainder: |
| 78 | larl %r3,.Lmemset_xc | 81 | larl %r3,.Lmemset_xc |
| 79 | ex %r4,0(%r3) | 82 | ex %r4,0(%r3) |
| 80 | br %r14 | 83 | BR_EX %r14 |
| 81 | .Lmemset_fill: | 84 | .Lmemset_fill: |
| 82 | cghi %r4,1 | 85 | cghi %r4,1 |
| 83 | lgr %r1,%r2 | 86 | lgr %r1,%r2 |
| @@ -95,10 +98,10 @@ ENTRY(memset) | |||
| 95 | stc %r3,0(%r1) | 98 | stc %r3,0(%r1) |
| 96 | larl %r5,.Lmemset_mvc | 99 | larl %r5,.Lmemset_mvc |
| 97 | ex %r4,0(%r5) | 100 | ex %r4,0(%r5) |
| 98 | br %r14 | 101 | BR_EX %r14 |
| 99 | .Lmemset_fill_exit: | 102 | .Lmemset_fill_exit: |
| 100 | stc %r3,0(%r1) | 103 | stc %r3,0(%r1) |
| 101 | br %r14 | 104 | BR_EX %r14 |
| 102 | .Lmemset_xc: | 105 | .Lmemset_xc: |
| 103 | xc 0(1,%r1),0(%r1) | 106 | xc 0(1,%r1),0(%r1) |
| 104 | .Lmemset_mvc: | 107 | .Lmemset_mvc: |
| @@ -121,7 +124,7 @@ ENTRY(memcpy) | |||
| 121 | .Lmemcpy_remainder: | 124 | .Lmemcpy_remainder: |
| 122 | larl %r5,.Lmemcpy_mvc | 125 | larl %r5,.Lmemcpy_mvc |
| 123 | ex %r4,0(%r5) | 126 | ex %r4,0(%r5) |
| 124 | br %r14 | 127 | BR_EX %r14 |
| 125 | .Lmemcpy_loop: | 128 | .Lmemcpy_loop: |
| 126 | mvc 0(256,%r1),0(%r3) | 129 | mvc 0(256,%r1),0(%r3) |
| 127 | la %r1,256(%r1) | 130 | la %r1,256(%r1) |
| @@ -159,10 +162,10 @@ ENTRY(__memset\bits) | |||
| 159 | \insn %r3,0(%r1) | 162 | \insn %r3,0(%r1) |
| 160 | larl %r5,.L__memset_mvc\bits | 163 | larl %r5,.L__memset_mvc\bits |
| 161 | ex %r4,0(%r5) | 164 | ex %r4,0(%r5) |
| 162 | br %r14 | 165 | BR_EX %r14 |
| 163 | .L__memset_exit\bits: | 166 | .L__memset_exit\bits: |
| 164 | \insn %r3,0(%r2) | 167 | \insn %r3,0(%r2) |
| 165 | br %r14 | 168 | BR_EX %r14 |
| 166 | .L__memset_mvc\bits: | 169 | .L__memset_mvc\bits: |
| 167 | mvc \bytes(1,%r1),0(%r1) | 170 | mvc \bytes(1,%r1),0(%r1) |
| 168 | .endm | 171 | .endm |
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S index 25bb4643c4f4..9f794869c1b0 100644 --- a/arch/s390/net/bpf_jit.S +++ b/arch/s390/net/bpf_jit.S | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
| 12 | #include <asm/nospec-insn.h> | ||
| 12 | #include "bpf_jit.h" | 13 | #include "bpf_jit.h" |
| 13 | 14 | ||
| 14 | /* | 15 | /* |
| @@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \ | |||
| 54 | clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ | 55 | clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ |
| 55 | jh sk_load_##NAME##_slow; \ | 56 | jh sk_load_##NAME##_slow; \ |
| 56 | LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ | 57 | LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ |
| 57 | b OFF_OK(%r6); /* Return */ \ | 58 | B_EX OFF_OK,%r6; /* Return */ \ |
| 58 | \ | 59 | \ |
| 59 | sk_load_##NAME##_slow:; \ | 60 | sk_load_##NAME##_slow:; \ |
| 60 | lgr %r2,%r7; /* Arg1 = skb pointer */ \ | 61 | lgr %r2,%r7; /* Arg1 = skb pointer */ \ |
| @@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \ | |||
| 64 | brasl %r14,skb_copy_bits; /* Get data from skb */ \ | 65 | brasl %r14,skb_copy_bits; /* Get data from skb */ \ |
| 65 | LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ | 66 | LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ |
| 66 | ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ | 67 | ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ |
| 67 | br %r6; /* Return */ | 68 | BR_EX %r6; /* Return */ |
| 68 | 69 | ||
| 69 | sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ | 70 | sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ |
| 70 | sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ | 71 | sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ |
| 71 | 72 | ||
| 73 | GEN_BR_THUNK %r6 | ||
| 74 | GEN_B_THUNK OFF_OK,%r6 | ||
| 75 | |||
| 72 | /* | 76 | /* |
| 73 | * Load 1 byte from SKB (optimized version) | 77 | * Load 1 byte from SKB (optimized version) |
| 74 | */ | 78 | */ |
| @@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos) | |||
| 80 | clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? | 84 | clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? |
| 81 | jnl sk_load_byte_slow | 85 | jnl sk_load_byte_slow |
| 82 | llgc %r14,0(%r3,%r12) # Get byte from skb | 86 | llgc %r14,0(%r3,%r12) # Get byte from skb |
| 83 | b OFF_OK(%r6) # Return OK | 87 | B_EX OFF_OK,%r6 # Return OK |
| 84 | 88 | ||
| 85 | sk_load_byte_slow: | 89 | sk_load_byte_slow: |
| 86 | lgr %r2,%r7 # Arg1 = skb pointer | 90 | lgr %r2,%r7 # Arg1 = skb pointer |
| @@ -90,7 +94,7 @@ sk_load_byte_slow: | |||
| 90 | brasl %r14,skb_copy_bits # Get data from skb | 94 | brasl %r14,skb_copy_bits # Get data from skb |
| 91 | llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer | 95 | llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer |
| 92 | ltgr %r2,%r2 # Set cc to (%r2 != 0) | 96 | ltgr %r2,%r2 # Set cc to (%r2 != 0) |
| 93 | br %r6 # Return cc | 97 | BR_EX %r6 # Return cc |
| 94 | 98 | ||
| 95 | #define sk_negative_common(NAME, SIZE, LOAD) \ | 99 | #define sk_negative_common(NAME, SIZE, LOAD) \ |
| 96 | sk_load_##NAME##_slow_neg:; \ | 100 | sk_load_##NAME##_slow_neg:; \ |
| @@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \ | |||
| 104 | jz bpf_error; \ | 108 | jz bpf_error; \ |
| 105 | LOAD %r14,0(%r2); /* Get data from pointer */ \ | 109 | LOAD %r14,0(%r2); /* Get data from pointer */ \ |
| 106 | xr %r3,%r3; /* Set cc to zero */ \ | 110 | xr %r3,%r3; /* Set cc to zero */ \ |
| 107 | br %r6; /* Return cc */ | 111 | BR_EX %r6; /* Return cc */ |
| 108 | 112 | ||
| 109 | sk_negative_common(word, 4, llgf) | 113 | sk_negative_common(word, 4, llgf) |
| 110 | sk_negative_common(half, 2, llgh) | 114 | sk_negative_common(half, 2, llgh) |
| @@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc) | |||
| 113 | bpf_error: | 117 | bpf_error: |
| 114 | # force a return 0 from jit handler | 118 | # force a return 0 from jit handler |
| 115 | ltgr %r15,%r15 # Set condition code | 119 | ltgr %r15,%r15 # Set condition code |
| 116 | br %r6 | 120 | BR_EX %r6 |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 78a19c93b380..dd2bcf0e7d00 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/bpf.h> | 25 | #include <linux/bpf.h> |
| 26 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
| 27 | #include <asm/dis.h> | 27 | #include <asm/dis.h> |
| 28 | #include <asm/facility.h> | ||
| 29 | #include <asm/nospec-branch.h> | ||
| 28 | #include <asm/set_memory.h> | 30 | #include <asm/set_memory.h> |
| 29 | #include "bpf_jit.h" | 31 | #include "bpf_jit.h" |
| 30 | 32 | ||
| @@ -41,6 +43,8 @@ struct bpf_jit { | |||
| 41 | int base_ip; /* Base address for literal pool */ | 43 | int base_ip; /* Base address for literal pool */ |
| 42 | int ret0_ip; /* Address of return 0 */ | 44 | int ret0_ip; /* Address of return 0 */ |
| 43 | int exit_ip; /* Address of exit */ | 45 | int exit_ip; /* Address of exit */ |
| 46 | int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ | ||
| 47 | int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ | ||
| 44 | int tail_call_start; /* Tail call start offset */ | 48 | int tail_call_start; /* Tail call start offset */ |
| 45 | int labels[1]; /* Labels for local jumps */ | 49 | int labels[1]; /* Labels for local jumps */ |
| 46 | }; | 50 | }; |
| @@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) | |||
| 250 | REG_SET_SEEN(b2); \ | 254 | REG_SET_SEEN(b2); \ |
| 251 | }) | 255 | }) |
| 252 | 256 | ||
| 257 | #define EMIT6_PCREL_RILB(op, b, target) \ | ||
| 258 | ({ \ | ||
| 259 | int rel = (target - jit->prg) / 2; \ | ||
| 260 | _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ | ||
| 261 | REG_SET_SEEN(b); \ | ||
| 262 | }) | ||
| 263 | |||
| 264 | #define EMIT6_PCREL_RIL(op, target) \ | ||
| 265 | ({ \ | ||
| 266 | int rel = (target - jit->prg) / 2; \ | ||
| 267 | _EMIT6(op | rel >> 16, rel & 0xffff); \ | ||
| 268 | }) | ||
| 269 | |||
| 253 | #define _EMIT6_IMM(op, imm) \ | 270 | #define _EMIT6_IMM(op, imm) \ |
| 254 | ({ \ | 271 | ({ \ |
| 255 | unsigned int __imm = (imm); \ | 272 | unsigned int __imm = (imm); \ |
| @@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) | |||
| 469 | EMIT4(0xb9040000, REG_2, BPF_REG_0); | 486 | EMIT4(0xb9040000, REG_2, BPF_REG_0); |
| 470 | /* Restore registers */ | 487 | /* Restore registers */ |
| 471 | save_restore_regs(jit, REGS_RESTORE, stack_depth); | 488 | save_restore_regs(jit, REGS_RESTORE, stack_depth); |
| 489 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { | ||
| 490 | jit->r14_thunk_ip = jit->prg; | ||
| 491 | /* Generate __s390_indirect_jump_r14 thunk */ | ||
| 492 | if (test_facility(35)) { | ||
| 493 | /* exrl %r0,.+10 */ | ||
| 494 | EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); | ||
| 495 | } else { | ||
| 496 | /* larl %r1,.+14 */ | ||
| 497 | EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); | ||
| 498 | /* ex 0,0(%r1) */ | ||
| 499 | EMIT4_DISP(0x44000000, REG_0, REG_1, 0); | ||
| 500 | } | ||
| 501 | /* j . */ | ||
| 502 | EMIT4_PCREL(0xa7f40000, 0); | ||
| 503 | } | ||
| 472 | /* br %r14 */ | 504 | /* br %r14 */ |
| 473 | _EMIT2(0x07fe); | 505 | _EMIT2(0x07fe); |
| 506 | |||
| 507 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable && | ||
| 508 | (jit->seen & SEEN_FUNC)) { | ||
| 509 | jit->r1_thunk_ip = jit->prg; | ||
| 510 | /* Generate __s390_indirect_jump_r1 thunk */ | ||
| 511 | if (test_facility(35)) { | ||
| 512 | /* exrl %r0,.+10 */ | ||
| 513 | EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); | ||
| 514 | /* j . */ | ||
| 515 | EMIT4_PCREL(0xa7f40000, 0); | ||
| 516 | /* br %r1 */ | ||
| 517 | _EMIT2(0x07f1); | ||
| 518 | } else { | ||
| 519 | /* larl %r1,.+14 */ | ||
| 520 | EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); | ||
| 521 | /* ex 0,S390_lowcore.br_r1_tampoline */ | ||
| 522 | EMIT4_DISP(0x44000000, REG_0, REG_0, | ||
| 523 | offsetof(struct lowcore, br_r1_trampoline)); | ||
| 524 | /* j . */ | ||
| 525 | EMIT4_PCREL(0xa7f40000, 0); | ||
| 526 | } | ||
| 527 | } | ||
| 474 | } | 528 | } |
| 475 | 529 | ||
| 476 | /* | 530 | /* |
| @@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i | |||
| 966 | /* lg %w1,<d(imm)>(%l) */ | 1020 | /* lg %w1,<d(imm)>(%l) */ |
| 967 | EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, | 1021 | EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, |
| 968 | EMIT_CONST_U64(func)); | 1022 | EMIT_CONST_U64(func)); |
| 969 | /* basr %r14,%w1 */ | 1023 | if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { |
| 970 | EMIT2(0x0d00, REG_14, REG_W1); | 1024 | /* brasl %r14,__s390_indirect_jump_r1 */ |
| 1025 | EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); | ||
| 1026 | } else { | ||
| 1027 | /* basr %r14,%w1 */ | ||
| 1028 | EMIT2(0x0d00, REG_14, REG_W1); | ||
| 1029 | } | ||
| 971 | /* lgr %b0,%r2: load return value into %b0 */ | 1030 | /* lgr %b0,%r2: load return value into %b0 */ |
| 972 | EMIT4(0xb9040000, BPF_REG_0, REG_2); | 1031 | EMIT4(0xb9040000, BPF_REG_0, REG_2); |
| 973 | if ((jit->seen & SEEN_SKB) && | 1032 | if ((jit->seen & SEEN_SKB) && |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 47d3efff6805..09f36c0d9d4f 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
| @@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) | |||
| 163 | if (status != EFI_SUCCESS) | 163 | if (status != EFI_SUCCESS) |
| 164 | goto free_struct; | 164 | goto free_struct; |
| 165 | 165 | ||
| 166 | memcpy(rom->romdata, pci->romimage, pci->romsize); | 166 | memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, |
| 167 | pci->romsize); | ||
| 167 | return status; | 168 | return status; |
| 168 | 169 | ||
| 169 | free_struct: | 170 | free_struct: |
| @@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) | |||
| 269 | if (status != EFI_SUCCESS) | 270 | if (status != EFI_SUCCESS) |
| 270 | goto free_struct; | 271 | goto free_struct; |
| 271 | 272 | ||
| 272 | memcpy(rom->romdata, pci->romimage, pci->romsize); | 273 | memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, |
| 274 | pci->romsize); | ||
| 273 | return status; | 275 | return status; |
| 274 | 276 | ||
| 275 | free_struct: | 277 | free_struct: |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index fca012baba19..8169e8b7a4dc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
| @@ -306,6 +306,25 @@ ENTRY(startup_64) | |||
| 306 | leaq boot_stack_end(%rbx), %rsp | 306 | leaq boot_stack_end(%rbx), %rsp |
| 307 | 307 | ||
| 308 | /* | 308 | /* |
| 309 | * paging_prepare() and cleanup_trampoline() below can have GOT | ||
| 310 | * references. Adjust the table with address we are running at. | ||
| 311 | * | ||
| 312 | * Zero RAX for adjust_got: the GOT was not adjusted before; | ||
| 313 | * there's no adjustment to undo. | ||
| 314 | */ | ||
| 315 | xorq %rax, %rax | ||
| 316 | |||
| 317 | /* | ||
| 318 | * Calculate the address the binary is loaded at and use it as | ||
| 319 | * a GOT adjustment. | ||
| 320 | */ | ||
| 321 | call 1f | ||
| 322 | 1: popq %rdi | ||
| 323 | subq $1b, %rdi | ||
| 324 | |||
| 325 | call adjust_got | ||
| 326 | |||
| 327 | /* | ||
| 309 | * At this point we are in long mode with 4-level paging enabled, | 328 | * At this point we are in long mode with 4-level paging enabled, |
| 310 | * but we might want to enable 5-level paging or vice versa. | 329 | * but we might want to enable 5-level paging or vice versa. |
| 311 | * | 330 | * |
| @@ -370,10 +389,14 @@ trampoline_return: | |||
| 370 | /* | 389 | /* |
| 371 | * cleanup_trampoline() would restore trampoline memory. | 390 | * cleanup_trampoline() would restore trampoline memory. |
| 372 | * | 391 | * |
| 392 | * RDI is address of the page table to use instead of page table | ||
| 393 | * in trampoline memory (if required). | ||
| 394 | * | ||
| 373 | * RSI holds real mode data and needs to be preserved across | 395 | * RSI holds real mode data and needs to be preserved across |
| 374 | * this function call. | 396 | * this function call. |
| 375 | */ | 397 | */ |
| 376 | pushq %rsi | 398 | pushq %rsi |
| 399 | leaq top_pgtable(%rbx), %rdi | ||
| 377 | call cleanup_trampoline | 400 | call cleanup_trampoline |
| 378 | popq %rsi | 401 | popq %rsi |
| 379 | 402 | ||
| @@ -381,6 +404,21 @@ trampoline_return: | |||
| 381 | pushq $0 | 404 | pushq $0 |
| 382 | popfq | 405 | popfq |
| 383 | 406 | ||
| 407 | /* | ||
| 408 | * Previously we've adjusted the GOT with address the binary was | ||
| 409 | * loaded at. Now we need to re-adjust for relocation address. | ||
| 410 | * | ||
| 411 | * Calculate the address the binary is loaded at, so that we can | ||
| 412 | * undo the previous GOT adjustment. | ||
| 413 | */ | ||
| 414 | call 1f | ||
| 415 | 1: popq %rax | ||
| 416 | subq $1b, %rax | ||
| 417 | |||
| 418 | /* The new adjustment is the relocation address */ | ||
| 419 | movq %rbx, %rdi | ||
| 420 | call adjust_got | ||
| 421 | |||
| 384 | /* | 422 | /* |
| 385 | * Copy the compressed kernel to the end of our buffer | 423 | * Copy the compressed kernel to the end of our buffer |
| 386 | * where decompression in place becomes safe. | 424 | * where decompression in place becomes safe. |
| @@ -482,19 +520,6 @@ relocated: | |||
| 482 | rep stosq | 520 | rep stosq |
| 483 | 521 | ||
| 484 | /* | 522 | /* |
| 485 | * Adjust our own GOT | ||
| 486 | */ | ||
| 487 | leaq _got(%rip), %rdx | ||
| 488 | leaq _egot(%rip), %rcx | ||
| 489 | 1: | ||
| 490 | cmpq %rcx, %rdx | ||
| 491 | jae 2f | ||
| 492 | addq %rbx, (%rdx) | ||
| 493 | addq $8, %rdx | ||
| 494 | jmp 1b | ||
| 495 | 2: | ||
| 496 | |||
| 497 | /* | ||
| 498 | * Do the extraction, and jump to the new kernel.. | 523 | * Do the extraction, and jump to the new kernel.. |
| 499 | */ | 524 | */ |
| 500 | pushq %rsi /* Save the real mode argument */ | 525 | pushq %rsi /* Save the real mode argument */ |
| @@ -512,6 +537,27 @@ relocated: | |||
| 512 | */ | 537 | */ |
| 513 | jmp *%rax | 538 | jmp *%rax |
| 514 | 539 | ||
| 540 | /* | ||
| 541 | * Adjust the global offset table | ||
| 542 | * | ||
| 543 | * RAX is the previous adjustment of the table to undo (use 0 if it's the | ||
| 544 | * first time we touch GOT). | ||
| 545 | * RDI is the new adjustment to apply. | ||
| 546 | */ | ||
| 547 | adjust_got: | ||
| 548 | /* Walk through the GOT adding the address to the entries */ | ||
| 549 | leaq _got(%rip), %rdx | ||
| 550 | leaq _egot(%rip), %rcx | ||
| 551 | 1: | ||
| 552 | cmpq %rcx, %rdx | ||
| 553 | jae 2f | ||
| 554 | subq %rax, (%rdx) /* Undo previous adjustment */ | ||
| 555 | addq %rdi, (%rdx) /* Apply the new adjustment */ | ||
| 556 | addq $8, %rdx | ||
| 557 | jmp 1b | ||
| 558 | 2: | ||
| 559 | ret | ||
| 560 | |||
| 515 | .code32 | 561 | .code32 |
| 516 | /* | 562 | /* |
| 517 | * This is the 32-bit trampoline that will be copied over to low memory. | 563 | * This is the 32-bit trampoline that will be copied over to low memory. |
| @@ -649,3 +695,10 @@ boot_stack_end: | |||
| 649 | .balign 4096 | 695 | .balign 4096 |
| 650 | pgtable: | 696 | pgtable: |
| 651 | .fill BOOT_PGT_SIZE, 1, 0 | 697 | .fill BOOT_PGT_SIZE, 1, 0 |
| 698 | |||
| 699 | /* | ||
| 700 | * The page table is going to be used instead of page table in the trampoline | ||
| 701 | * memory. | ||
| 702 | */ | ||
| 703 | top_pgtable: | ||
| 704 | .fill PAGE_SIZE, 1, 0 | ||
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 32af1cbcd903..a362fa0b849c 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c | |||
| @@ -23,14 +23,6 @@ struct paging_config { | |||
| 23 | static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; | 23 | static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * The page table is going to be used instead of page table in the trampoline | ||
| 27 | * memory. | ||
| 28 | * | ||
| 29 | * It must not be in BSS as BSS is cleared after cleanup_trampoline(). | ||
| 30 | */ | ||
| 31 | static char top_pgtable[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Trampoline address will be printed by extract_kernel() for debugging | 26 | * Trampoline address will be printed by extract_kernel() for debugging |
| 35 | * purposes. | 27 | * purposes. |
| 36 | * | 28 | * |
| @@ -134,7 +126,7 @@ out: | |||
| 134 | return paging_config; | 126 | return paging_config; |
| 135 | } | 127 | } |
| 136 | 128 | ||
| 137 | void cleanup_trampoline(void) | 129 | void cleanup_trampoline(void *pgtable) |
| 138 | { | 130 | { |
| 139 | void *trampoline_pgtable; | 131 | void *trampoline_pgtable; |
| 140 | 132 | ||
| @@ -145,8 +137,8 @@ void cleanup_trampoline(void) | |||
| 145 | * if it's there. | 137 | * if it's there. |
| 146 | */ | 138 | */ |
| 147 | if ((void *)__native_read_cr3() == trampoline_pgtable) { | 139 | if ((void *)__native_read_cr3() == trampoline_pgtable) { |
| 148 | memcpy(top_pgtable, trampoline_pgtable, PAGE_SIZE); | 140 | memcpy(pgtable, trampoline_pgtable, PAGE_SIZE); |
| 149 | native_write_cr3((unsigned long)top_pgtable); | 141 | native_write_cr3((unsigned long)pgtable); |
| 150 | } | 142 | } |
| 151 | 143 | ||
| 152 | /* Restore trampoline memory */ | 144 | /* Restore trampoline memory */ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index b27da9602a6d..aced6c9290d6 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
| @@ -140,6 +140,20 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); | |||
| 140 | 140 | ||
| 141 | #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) | 141 | #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) |
| 142 | 142 | ||
| 143 | #if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Workaround for the sake of BPF compilation which utilizes kernel | ||
| 147 | * headers, but clang does not support ASM GOTO and fails the build. | ||
| 148 | */ | ||
| 149 | #ifndef __BPF_TRACING__ | ||
| 150 | #warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments" | ||
| 151 | #endif | ||
| 152 | |||
| 153 | #define static_cpu_has(bit) boot_cpu_has(bit) | ||
| 154 | |||
| 155 | #else | ||
| 156 | |||
| 143 | /* | 157 | /* |
| 144 | * Static testing of CPU features. Used the same as boot_cpu_has(). | 158 | * Static testing of CPU features. Used the same as boot_cpu_has(). |
| 145 | * These will statically patch the target code for additional | 159 | * These will statically patch the target code for additional |
| @@ -195,6 +209,7 @@ t_no: | |||
| 195 | boot_cpu_has(bit) : \ | 209 | boot_cpu_has(bit) : \ |
| 196 | _static_cpu_has(bit) \ | 210 | _static_cpu_has(bit) \ |
| 197 | ) | 211 | ) |
| 212 | #endif | ||
| 198 | 213 | ||
| 199 | #define cpu_has_bug(c, bit) cpu_has(c, (bit)) | 214 | #define cpu_has_bug(c, bit) cpu_has(c, (bit)) |
| 200 | #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) | 215 | #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) |
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 578793e97431..fb00a2fca990 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
| @@ -198,7 +198,6 @@ | |||
| 198 | #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ | 198 | #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ |
| 199 | #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ | 199 | #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ |
| 200 | #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ | 200 | #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ |
| 201 | |||
| 202 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ | 201 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
| 203 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ | 202 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
| 204 | #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ | 203 | #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ |
| @@ -207,13 +206,19 @@ | |||
| 207 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ | 206 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ |
| 208 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ | 207 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ |
| 209 | #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ | 208 | #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ |
| 210 | 209 | #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ | |
| 210 | #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ | ||
| 211 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ | 211 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ |
| 212 | #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ | 212 | #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ |
| 213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ | 213 | #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ |
| 214 | |||
| 215 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ | 214 | #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ |
| 216 | #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ | 215 | #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ |
| 216 | #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ | ||
| 217 | #define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ | ||
| 218 | #define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ | ||
| 219 | #define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ | ||
| 220 | #define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ | ||
| 221 | #define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ | ||
| 217 | 222 | ||
| 218 | /* Virtualization flags: Linux defined, word 8 */ | 223 | /* Virtualization flags: Linux defined, word 8 */ |
| 219 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 224 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
| @@ -274,9 +279,10 @@ | |||
| 274 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ | 279 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ |
| 275 | #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ | 280 | #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ |
| 276 | #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ | 281 | #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ |
| 277 | #define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ | 282 | #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ |
| 278 | #define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ | 283 | #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ |
| 279 | #define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ | 284 | #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ |
| 285 | #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ | ||
| 280 | 286 | ||
| 281 | /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ | 287 | /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ |
| 282 | #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ | 288 | #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ |
| @@ -334,6 +340,7 @@ | |||
| 334 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ | 340 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
| 335 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ | 341 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
| 336 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ | 342 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
| 343 | #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ | ||
| 337 | 344 | ||
| 338 | /* | 345 | /* |
| 339 | * BUG word(s) | 346 | * BUG word(s) |
| @@ -363,5 +370,6 @@ | |||
| 363 | #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ | 370 | #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ |
| 364 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ | 371 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ |
| 365 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ | 372 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ |
| 373 | #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ | ||
| 366 | 374 | ||
| 367 | #endif /* _ASM_X86_CPUFEATURES_H */ | 375 | #endif /* _ASM_X86_CPUFEATURES_H */ |
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index b3e32b010ab1..c2c01f84df75 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h | |||
| @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) | |||
| 208 | return insn_offset_displacement(insn) + insn->displacement.nbytes; | 208 | return insn_offset_displacement(insn) + insn->displacement.nbytes; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | #define POP_SS_OPCODE 0x1f | ||
| 212 | #define MOV_SREG_OPCODE 0x8e | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Intel SDM Vol.3A 6.8.3 states; | ||
| 216 | * "Any single-step trap that would be delivered following the MOV to SS | ||
| 217 | * instruction or POP to SS instruction (because EFLAGS.TF is 1) is | ||
| 218 | * suppressed." | ||
| 219 | * This function returns true if @insn is MOV SS or POP SS. On these | ||
| 220 | * instructions, single stepping is suppressed. | ||
| 221 | */ | ||
| 222 | static inline int insn_masking_exception(struct insn *insn) | ||
| 223 | { | ||
| 224 | return insn->opcode.bytes[0] == POP_SS_OPCODE || | ||
| 225 | (insn->opcode.bytes[0] == MOV_SREG_OPCODE && | ||
| 226 | X86_MODRM_REG(insn->modrm.bytes[0]) == 2); | ||
| 227 | } | ||
| 228 | |||
| 211 | #endif /* _ASM_X86_INSN_H */ | 229 | #endif /* _ASM_X86_INSN_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c25775fad4ed..f4b2588865e9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -924,7 +924,7 @@ struct kvm_x86_ops { | |||
| 924 | int (*hardware_setup)(void); /* __init */ | 924 | int (*hardware_setup)(void); /* __init */ |
| 925 | void (*hardware_unsetup)(void); /* __exit */ | 925 | void (*hardware_unsetup)(void); /* __exit */ |
| 926 | bool (*cpu_has_accelerated_tpr)(void); | 926 | bool (*cpu_has_accelerated_tpr)(void); |
| 927 | bool (*cpu_has_high_real_mode_segbase)(void); | 927 | bool (*has_emulated_msr)(int index); |
| 928 | void (*cpuid_update)(struct kvm_vcpu *vcpu); | 928 | void (*cpuid_update)(struct kvm_vcpu *vcpu); |
| 929 | 929 | ||
| 930 | struct kvm *(*vm_alloc)(void); | 930 | struct kvm *(*vm_alloc)(void); |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 57e3785d0d26..cf9911b5a53c 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
| @@ -193,7 +193,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 193 | 193 | ||
| 194 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 194 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 195 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { | 195 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
| 196 | /* pkey 0 is the default and always allocated */ | 196 | /* pkey 0 is the default and allocated implicitly */ |
| 197 | mm->context.pkey_allocation_map = 0x1; | 197 | mm->context.pkey_allocation_map = 0x1; |
| 198 | /* -1 means unallocated or invalid */ | 198 | /* -1 means unallocated or invalid */ |
| 199 | mm->context.execute_only_pkey = -1; | 199 | mm->context.execute_only_pkey = -1; |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 53d5b1b9255e..fda2114197b3 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ | 42 | #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ |
| 43 | #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ | 43 | #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ |
| 44 | #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ | 44 | #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ |
| 45 | #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ | ||
| 46 | #define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ | ||
| 45 | 47 | ||
| 46 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ | 48 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
| 47 | #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ | 49 | #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ |
| @@ -68,6 +70,11 @@ | |||
| 68 | #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a | 70 | #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a |
| 69 | #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ | 71 | #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ |
| 70 | #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ | 72 | #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ |
| 73 | #define ARCH_CAP_SSB_NO (1 << 4) /* | ||
| 74 | * Not susceptible to Speculative Store Bypass | ||
| 75 | * attack, so no Speculative Store Bypass | ||
| 76 | * control required. | ||
| 77 | */ | ||
| 71 | 78 | ||
| 72 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 79 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
| 73 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e | 80 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
| @@ -340,6 +347,8 @@ | |||
| 340 | #define MSR_AMD64_SEV_ENABLED_BIT 0 | 347 | #define MSR_AMD64_SEV_ENABLED_BIT 0 |
| 341 | #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) | 348 | #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) |
| 342 | 349 | ||
| 350 | #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f | ||
| 351 | |||
| 343 | /* Fam 17h MSRs */ | 352 | /* Fam 17h MSRs */ |
| 344 | #define MSR_F17H_IRPERF 0xc00000e9 | 353 | #define MSR_F17H_IRPERF 0xc00000e9 |
| 345 | 354 | ||
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index f928ad9b143f..8b38df98548e 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
| @@ -217,6 +217,14 @@ enum spectre_v2_mitigation { | |||
| 217 | SPECTRE_V2_IBRS, | 217 | SPECTRE_V2_IBRS, |
| 218 | }; | 218 | }; |
| 219 | 219 | ||
| 220 | /* The Speculative Store Bypass disable variants */ | ||
| 221 | enum ssb_mitigation { | ||
| 222 | SPEC_STORE_BYPASS_NONE, | ||
| 223 | SPEC_STORE_BYPASS_DISABLE, | ||
| 224 | SPEC_STORE_BYPASS_PRCTL, | ||
| 225 | SPEC_STORE_BYPASS_SECCOMP, | ||
| 226 | }; | ||
| 227 | |||
| 220 | extern char __indirect_thunk_start[]; | 228 | extern char __indirect_thunk_start[]; |
| 221 | extern char __indirect_thunk_end[]; | 229 | extern char __indirect_thunk_end[]; |
| 222 | 230 | ||
| @@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void) | |||
| 241 | #endif | 249 | #endif |
| 242 | } | 250 | } |
| 243 | 251 | ||
| 244 | #define alternative_msr_write(_msr, _val, _feature) \ | 252 | static __always_inline |
| 245 | asm volatile(ALTERNATIVE("", \ | 253 | void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) |
| 246 | "movl %[msr], %%ecx\n\t" \ | 254 | { |
| 247 | "movl %[val], %%eax\n\t" \ | 255 | asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) |
| 248 | "movl $0, %%edx\n\t" \ | 256 | : : "c" (msr), |
| 249 | "wrmsr", \ | 257 | "a" ((u32)val), |
| 250 | _feature) \ | 258 | "d" ((u32)(val >> 32)), |
| 251 | : : [msr] "i" (_msr), [val] "i" (_val) \ | 259 | [feature] "i" (feature) |
| 252 | : "eax", "ecx", "edx", "memory") | 260 | : "memory"); |
| 261 | } | ||
| 253 | 262 | ||
| 254 | static inline void indirect_branch_prediction_barrier(void) | 263 | static inline void indirect_branch_prediction_barrier(void) |
| 255 | { | 264 | { |
| 256 | alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, | 265 | u64 val = PRED_CMD_IBPB; |
| 257 | X86_FEATURE_USE_IBPB); | 266 | |
| 267 | alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); | ||
| 258 | } | 268 | } |
| 259 | 269 | ||
| 270 | /* The Intel SPEC CTRL MSR base value cache */ | ||
| 271 | extern u64 x86_spec_ctrl_base; | ||
| 272 | |||
| 260 | /* | 273 | /* |
| 261 | * With retpoline, we must use IBRS to restrict branch prediction | 274 | * With retpoline, we must use IBRS to restrict branch prediction |
| 262 | * before calling into firmware. | 275 | * before calling into firmware. |
| @@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void) | |||
| 265 | */ | 278 | */ |
| 266 | #define firmware_restrict_branch_speculation_start() \ | 279 | #define firmware_restrict_branch_speculation_start() \ |
| 267 | do { \ | 280 | do { \ |
| 281 | u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ | ||
| 282 | \ | ||
| 268 | preempt_disable(); \ | 283 | preempt_disable(); \ |
| 269 | alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ | 284 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ |
| 270 | X86_FEATURE_USE_IBRS_FW); \ | 285 | X86_FEATURE_USE_IBRS_FW); \ |
| 271 | } while (0) | 286 | } while (0) |
| 272 | 287 | ||
| 273 | #define firmware_restrict_branch_speculation_end() \ | 288 | #define firmware_restrict_branch_speculation_end() \ |
| 274 | do { \ | 289 | do { \ |
| 275 | alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ | 290 | u64 val = x86_spec_ctrl_base; \ |
| 291 | \ | ||
| 292 | alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ | ||
| 276 | X86_FEATURE_USE_IBRS_FW); \ | 293 | X86_FEATURE_USE_IBRS_FW); \ |
| 277 | preempt_enable(); \ | 294 | preempt_enable(); \ |
| 278 | } while (0) | 295 | } while (0) |
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index a0ba1ffda0df..851c04b7a092 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | #ifndef _ASM_X86_PKEYS_H | 2 | #ifndef _ASM_X86_PKEYS_H |
| 3 | #define _ASM_X86_PKEYS_H | 3 | #define _ASM_X86_PKEYS_H |
| 4 | 4 | ||
| 5 | #define ARCH_DEFAULT_PKEY 0 | ||
| 6 | |||
| 5 | #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) | 7 | #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) |
| 6 | 8 | ||
| 7 | extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, | 9 | extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, |
| @@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm); | |||
| 15 | static inline int execute_only_pkey(struct mm_struct *mm) | 17 | static inline int execute_only_pkey(struct mm_struct *mm) |
| 16 | { | 18 | { |
| 17 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) | 19 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
| 18 | return 0; | 20 | return ARCH_DEFAULT_PKEY; |
| 19 | 21 | ||
| 20 | return __execute_only_pkey(mm); | 22 | return __execute_only_pkey(mm); |
| 21 | } | 23 | } |
| @@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) | |||
| 49 | { | 51 | { |
| 50 | /* | 52 | /* |
| 51 | * "Allocated" pkeys are those that have been returned | 53 | * "Allocated" pkeys are those that have been returned |
| 52 | * from pkey_alloc(). pkey 0 is special, and never | 54 | * from pkey_alloc() or pkey 0 which is allocated |
| 53 | * returned from pkey_alloc(). | 55 | * implicitly when the mm is created. |
| 54 | */ | 56 | */ |
| 55 | if (pkey <= 0) | 57 | if (pkey < 0) |
| 56 | return false; | 58 | return false; |
| 57 | if (pkey >= arch_max_pkey()) | 59 | if (pkey >= arch_max_pkey()) |
| 58 | return false; | 60 | return false; |
| 61 | /* | ||
| 62 | * The exec-only pkey is set in the allocation map, but | ||
| 63 | * is not available to any of the user interfaces like | ||
| 64 | * mprotect_pkey(). | ||
| 65 | */ | ||
| 66 | if (pkey == mm->context.execute_only_pkey) | ||
| 67 | return false; | ||
| 68 | |||
| 59 | return mm_pkey_allocation_map(mm) & (1U << pkey); | 69 | return mm_pkey_allocation_map(mm) & (1U << pkey); |
| 60 | } | 70 | } |
| 61 | 71 | ||
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h new file mode 100644 index 000000000000..ae7c2c5cd7f0 --- /dev/null +++ b/arch/x86/include/asm/spec-ctrl.h | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ASM_X86_SPECCTRL_H_ | ||
| 3 | #define _ASM_X86_SPECCTRL_H_ | ||
| 4 | |||
| 5 | #include <linux/thread_info.h> | ||
| 6 | #include <asm/nospec-branch.h> | ||
| 7 | |||
| 8 | /* | ||
| 9 | * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR | ||
| 10 | * the guest has, while on VMEXIT we restore the host view. This | ||
| 11 | * would be easier if SPEC_CTRL were architecturally maskable or | ||
| 12 | * shadowable for guests but this is not (currently) the case. | ||
| 13 | * Takes the guest view of SPEC_CTRL MSR as a parameter and also | ||
| 14 | * the guest's version of VIRT_SPEC_CTRL, if emulated. | ||
| 15 | */ | ||
| 16 | extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest); | ||
| 17 | |||
| 18 | /** | ||
| 19 | * x86_spec_ctrl_set_guest - Set speculation control registers for the guest | ||
| 20 | * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL | ||
| 21 | * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL | ||
| 22 | * (may get translated to MSR_AMD64_LS_CFG bits) | ||
| 23 | * | ||
| 24 | * Avoids writing to the MSR if the content/bits are the same | ||
| 25 | */ | ||
| 26 | static inline | ||
| 27 | void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) | ||
| 28 | { | ||
| 29 | x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); | ||
| 30 | } | ||
| 31 | |||
| 32 | /** | ||
| 33 | * x86_spec_ctrl_restore_host - Restore host speculation control registers | ||
| 34 | * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL | ||
| 35 | * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL | ||
| 36 | * (may get translated to MSR_AMD64_LS_CFG bits) | ||
| 37 | * | ||
| 38 | * Avoids writing to the MSR if the content/bits are the same | ||
| 39 | */ | ||
| 40 | static inline | ||
| 41 | void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) | ||
| 42 | { | ||
| 43 | x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* AMD specific Speculative Store Bypass MSR data */ | ||
| 47 | extern u64 x86_amd_ls_cfg_base; | ||
| 48 | extern u64 x86_amd_ls_cfg_ssbd_mask; | ||
| 49 | |||
| 50 | static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) | ||
| 51 | { | ||
| 52 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); | ||
| 53 | return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); | ||
| 54 | } | ||
| 55 | |||
| 56 | static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) | ||
| 57 | { | ||
| 58 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); | ||
| 59 | return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) | ||
| 63 | { | ||
| 64 | return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; | ||
| 65 | } | ||
| 66 | |||
| 67 | #ifdef CONFIG_SMP | ||
| 68 | extern void speculative_store_bypass_ht_init(void); | ||
| 69 | #else | ||
| 70 | static inline void speculative_store_bypass_ht_init(void) { } | ||
| 71 | #endif | ||
| 72 | |||
| 73 | extern void speculative_store_bypass_update(unsigned long tif); | ||
| 74 | |||
| 75 | static inline void speculative_store_bypass_update_current(void) | ||
| 76 | { | ||
| 77 | speculative_store_bypass_update(current_thread_info()->flags); | ||
| 78 | } | ||
| 79 | |||
| 80 | #endif | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index a5d9521bb2cb..2ff2a30a264f 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
| @@ -79,6 +79,7 @@ struct thread_info { | |||
| 79 | #define TIF_SIGPENDING 2 /* signal pending */ | 79 | #define TIF_SIGPENDING 2 /* signal pending */ |
| 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
| 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
| 82 | #define TIF_SSBD 5 /* Reduced data speculation */ | ||
| 82 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | 83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
| 83 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| 84 | #define TIF_SECCOMP 8 /* secure computing */ | 85 | #define TIF_SECCOMP 8 /* secure computing */ |
| @@ -105,6 +106,7 @@ struct thread_info { | |||
| 105 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 106 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| 106 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 107 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
| 107 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 108 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
| 109 | #define _TIF_SSBD (1 << TIF_SSBD) | ||
| 108 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | 110 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
| 109 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 111 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
| 110 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 112 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
| @@ -144,7 +146,7 @@ struct thread_info { | |||
| 144 | 146 | ||
| 145 | /* flags to check in __switch_to() */ | 147 | /* flags to check in __switch_to() */ |
| 146 | #define _TIF_WORK_CTXSW \ | 148 | #define _TIF_WORK_CTXSW \ |
| 147 | (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) | 149 | (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) |
| 148 | 150 | ||
| 149 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) | 151 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
| 150 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) | 152 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 4c851ebb3ceb..0ede697c3961 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 | 29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
| 30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 | 30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 |
| 31 | 31 | ||
| 32 | #define KVM_HINTS_DEDICATED 0 | 32 | #define KVM_HINTS_REALTIME 0 |
| 33 | 33 | ||
| 34 | /* The last 8 bits are used to indicate how to interpret the flags field | 34 | /* The last 8 bits are used to indicate how to interpret the flags field |
| 35 | * in pvclock structure. If no bits are set, all flags are ignored. | 35 | * in pvclock structure. If no bits are set, all flags are ignored. |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index c88e0b127810..b481b95bd8f6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
| @@ -14,8 +14,11 @@ | |||
| 14 | #include <asm/amd_nb.h> | 14 | #include <asm/amd_nb.h> |
| 15 | 15 | ||
| 16 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 | 16 | #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
| 17 | #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 | ||
| 17 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 | 18 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 |
| 18 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 | 19 | #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
| 20 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb | ||
| 21 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec | ||
| 19 | 22 | ||
| 20 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ | 23 | /* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
| 21 | static DEFINE_MUTEX(smn_mutex); | 24 | static DEFINE_MUTEX(smn_mutex); |
| @@ -24,6 +27,7 @@ static u32 *flush_words; | |||
| 24 | 27 | ||
| 25 | static const struct pci_device_id amd_root_ids[] = { | 28 | static const struct pci_device_id amd_root_ids[] = { |
| 26 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, | 29 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
| 30 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, | ||
| 27 | {} | 31 | {} |
| 28 | }; | 32 | }; |
| 29 | 33 | ||
| @@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { | |||
| 39 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 43 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
| 40 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 44 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
| 41 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, | 45 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
| 46 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, | ||
| 42 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | 47 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
| 43 | {} | 48 | {} |
| 44 | }; | 49 | }; |
| @@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { | |||
| 51 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 56 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
| 52 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, | 57 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
| 53 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, | 58 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
| 59 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, | ||
| 54 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, | 60 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
| 55 | {} | 61 | {} |
| 56 | }; | 62 | }; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 8b04234e010b..7685444a106b 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
| @@ -116,6 +116,7 @@ static void init_x2apic_ldr(void) | |||
| 116 | goto update; | 116 | goto update; |
| 117 | } | 117 | } |
| 118 | cmsk = cluster_hotplug_mask; | 118 | cmsk = cluster_hotplug_mask; |
| 119 | cmsk->clusterid = cluster; | ||
| 119 | cluster_hotplug_mask = NULL; | 120 | cluster_hotplug_mask = NULL; |
| 120 | update: | 121 | update: |
| 121 | this_cpu_write(cluster_masks, cmsk); | 122 | this_cpu_write(cluster_masks, cmsk); |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 12bc0a1139da..1b18be3f35a8 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
| 11 | #include <asm/apic.h> | 11 | #include <asm/apic.h> |
| 12 | #include <asm/cpu.h> | 12 | #include <asm/cpu.h> |
| 13 | #include <asm/spec-ctrl.h> | ||
| 13 | #include <asm/smp.h> | 14 | #include <asm/smp.h> |
| 14 | #include <asm/pci-direct.h> | 15 | #include <asm/pci-direct.h> |
| 15 | #include <asm/delay.h> | 16 | #include <asm/delay.h> |
| @@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) | |||
| 554 | rdmsrl(MSR_FAM10H_NODE_ID, value); | 555 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
| 555 | nodes_per_socket = ((value >> 3) & 7) + 1; | 556 | nodes_per_socket = ((value >> 3) & 7) + 1; |
| 556 | } | 557 | } |
| 558 | |||
| 559 | if (c->x86 >= 0x15 && c->x86 <= 0x17) { | ||
| 560 | unsigned int bit; | ||
| 561 | |||
| 562 | switch (c->x86) { | ||
| 563 | case 0x15: bit = 54; break; | ||
| 564 | case 0x16: bit = 33; break; | ||
| 565 | case 0x17: bit = 10; break; | ||
| 566 | default: return; | ||
| 567 | } | ||
| 568 | /* | ||
| 569 | * Try to cache the base value so further operations can | ||
| 570 | * avoid RMW. If that faults, do not enable SSBD. | ||
| 571 | */ | ||
| 572 | if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { | ||
| 573 | setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); | ||
| 574 | setup_force_cpu_cap(X86_FEATURE_SSBD); | ||
| 575 | x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; | ||
| 576 | } | ||
| 577 | } | ||
| 557 | } | 578 | } |
| 558 | 579 | ||
| 559 | static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) | 580 | static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) |
| @@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
| 791 | 812 | ||
| 792 | static void init_amd_zn(struct cpuinfo_x86 *c) | 813 | static void init_amd_zn(struct cpuinfo_x86 *c) |
| 793 | { | 814 | { |
| 815 | set_cpu_cap(c, X86_FEATURE_ZEN); | ||
| 794 | /* | 816 | /* |
| 795 | * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects | 817 | * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects |
| 796 | * all up to and including B1. | 818 | * all up to and including B1. |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index bfca937bdcc3..7416fc206b4a 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -12,8 +12,10 @@ | |||
| 12 | #include <linux/utsname.h> | 12 | #include <linux/utsname.h> |
| 13 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/nospec.h> | ||
| 16 | #include <linux/prctl.h> | ||
| 15 | 17 | ||
| 16 | #include <asm/nospec-branch.h> | 18 | #include <asm/spec-ctrl.h> |
| 17 | #include <asm/cmdline.h> | 19 | #include <asm/cmdline.h> |
| 18 | #include <asm/bugs.h> | 20 | #include <asm/bugs.h> |
| 19 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
| @@ -27,6 +29,27 @@ | |||
| 27 | #include <asm/intel-family.h> | 29 | #include <asm/intel-family.h> |
| 28 | 30 | ||
| 29 | static void __init spectre_v2_select_mitigation(void); | 31 | static void __init spectre_v2_select_mitigation(void); |
| 32 | static void __init ssb_select_mitigation(void); | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any | ||
| 36 | * writes to SPEC_CTRL contain whatever reserved bits have been set. | ||
| 37 | */ | ||
| 38 | u64 __ro_after_init x86_spec_ctrl_base; | ||
| 39 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); | ||
| 40 | |||
| 41 | /* | ||
| 42 | * The vendor and possibly platform specific bits which can be modified in | ||
| 43 | * x86_spec_ctrl_base. | ||
| 44 | */ | ||
| 45 | static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * AMD specific MSR info for Speculative Store Bypass control. | ||
| 49 | * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). | ||
| 50 | */ | ||
| 51 | u64 __ro_after_init x86_amd_ls_cfg_base; | ||
| 52 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; | ||
| 30 | 53 | ||
| 31 | void __init check_bugs(void) | 54 | void __init check_bugs(void) |
| 32 | { | 55 | { |
| @@ -37,9 +60,27 @@ void __init check_bugs(void) | |||
| 37 | print_cpu_info(&boot_cpu_data); | 60 | print_cpu_info(&boot_cpu_data); |
| 38 | } | 61 | } |
| 39 | 62 | ||
| 63 | /* | ||
| 64 | * Read the SPEC_CTRL MSR to account for reserved bits which may | ||
| 65 | * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD | ||
| 66 | * init code as it is not enumerated and depends on the family. | ||
| 67 | */ | ||
| 68 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) | ||
| 69 | rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | ||
| 70 | |||
| 71 | /* Allow STIBP in MSR_SPEC_CTRL if supported */ | ||
| 72 | if (boot_cpu_has(X86_FEATURE_STIBP)) | ||
| 73 | x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; | ||
| 74 | |||
| 40 | /* Select the proper spectre mitigation before patching alternatives */ | 75 | /* Select the proper spectre mitigation before patching alternatives */ |
| 41 | spectre_v2_select_mitigation(); | 76 | spectre_v2_select_mitigation(); |
| 42 | 77 | ||
| 78 | /* | ||
| 79 | * Select proper mitigation for any exposure to the Speculative Store | ||
| 80 | * Bypass vulnerability. | ||
| 81 | */ | ||
| 82 | ssb_select_mitigation(); | ||
| 83 | |||
| 43 | #ifdef CONFIG_X86_32 | 84 | #ifdef CONFIG_X86_32 |
| 44 | /* | 85 | /* |
| 45 | * Check whether we are able to run this kernel safely on SMP. | 86 | * Check whether we are able to run this kernel safely on SMP. |
| @@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = { | |||
| 93 | #undef pr_fmt | 134 | #undef pr_fmt |
| 94 | #define pr_fmt(fmt) "Spectre V2 : " fmt | 135 | #define pr_fmt(fmt) "Spectre V2 : " fmt |
| 95 | 136 | ||
| 96 | static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; | 137 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
| 138 | SPECTRE_V2_NONE; | ||
| 139 | |||
| 140 | void | ||
| 141 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | ||
| 142 | { | ||
| 143 | u64 msrval, guestval, hostval = x86_spec_ctrl_base; | ||
| 144 | struct thread_info *ti = current_thread_info(); | ||
| 145 | |||
| 146 | /* Is MSR_SPEC_CTRL implemented ? */ | ||
| 147 | if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { | ||
| 148 | /* | ||
| 149 | * Restrict guest_spec_ctrl to supported values. Clear the | ||
| 150 | * modifiable bits in the host base value and or the | ||
| 151 | * modifiable bits from the guest value. | ||
| 152 | */ | ||
| 153 | guestval = hostval & ~x86_spec_ctrl_mask; | ||
| 154 | guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; | ||
| 155 | |||
| 156 | /* SSBD controlled in MSR_SPEC_CTRL */ | ||
| 157 | if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) | ||
| 158 | hostval |= ssbd_tif_to_spec_ctrl(ti->flags); | ||
| 159 | |||
| 160 | if (hostval != guestval) { | ||
| 161 | msrval = setguest ? guestval : hostval; | ||
| 162 | wrmsrl(MSR_IA32_SPEC_CTRL, msrval); | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | /* | ||
| 167 | * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update | ||
| 168 | * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. | ||
| 169 | */ | ||
| 170 | if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && | ||
| 171 | !static_cpu_has(X86_FEATURE_VIRT_SSBD)) | ||
| 172 | return; | ||
| 173 | |||
| 174 | /* | ||
| 175 | * If the host has SSBD mitigation enabled, force it in the host's | ||
| 176 | * virtual MSR value. If its not permanently enabled, evaluate | ||
| 177 | * current's TIF_SSBD thread flag. | ||
| 178 | */ | ||
| 179 | if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) | ||
| 180 | hostval = SPEC_CTRL_SSBD; | ||
| 181 | else | ||
| 182 | hostval = ssbd_tif_to_spec_ctrl(ti->flags); | ||
| 183 | |||
| 184 | /* Sanitize the guest value */ | ||
| 185 | guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; | ||
| 186 | |||
| 187 | if (hostval != guestval) { | ||
| 188 | unsigned long tif; | ||
| 189 | |||
| 190 | tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : | ||
| 191 | ssbd_spec_ctrl_to_tif(hostval); | ||
| 192 | |||
| 193 | speculative_store_bypass_update(tif); | ||
| 194 | } | ||
| 195 | } | ||
| 196 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); | ||
| 197 | |||
| 198 | static void x86_amd_ssb_disable(void) | ||
| 199 | { | ||
| 200 | u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; | ||
| 201 | |||
| 202 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) | ||
| 203 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); | ||
| 204 | else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | ||
| 205 | wrmsrl(MSR_AMD64_LS_CFG, msrval); | ||
| 206 | } | ||
| 97 | 207 | ||
| 98 | #ifdef RETPOLINE | 208 | #ifdef RETPOLINE |
| 99 | static bool spectre_v2_bad_module; | 209 | static bool spectre_v2_bad_module; |
| @@ -312,32 +422,289 @@ retpoline_auto: | |||
| 312 | } | 422 | } |
| 313 | 423 | ||
| 314 | #undef pr_fmt | 424 | #undef pr_fmt |
| 425 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt | ||
| 426 | |||
| 427 | static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; | ||
| 428 | |||
| 429 | /* The kernel command line selection */ | ||
| 430 | enum ssb_mitigation_cmd { | ||
| 431 | SPEC_STORE_BYPASS_CMD_NONE, | ||
| 432 | SPEC_STORE_BYPASS_CMD_AUTO, | ||
| 433 | SPEC_STORE_BYPASS_CMD_ON, | ||
| 434 | SPEC_STORE_BYPASS_CMD_PRCTL, | ||
| 435 | SPEC_STORE_BYPASS_CMD_SECCOMP, | ||
| 436 | }; | ||
| 437 | |||
| 438 | static const char *ssb_strings[] = { | ||
| 439 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable", | ||
| 440 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", | ||
| 441 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", | ||
| 442 | [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", | ||
| 443 | }; | ||
| 444 | |||
| 445 | static const struct { | ||
| 446 | const char *option; | ||
| 447 | enum ssb_mitigation_cmd cmd; | ||
| 448 | } ssb_mitigation_options[] = { | ||
| 449 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ | ||
| 450 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ | ||
| 451 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ | ||
| 452 | { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ | ||
| 453 | { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ | ||
| 454 | }; | ||
| 455 | |||
| 456 | static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) | ||
| 457 | { | ||
| 458 | enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; | ||
| 459 | char arg[20]; | ||
| 460 | int ret, i; | ||
| 461 | |||
| 462 | if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { | ||
| 463 | return SPEC_STORE_BYPASS_CMD_NONE; | ||
| 464 | } else { | ||
| 465 | ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", | ||
| 466 | arg, sizeof(arg)); | ||
| 467 | if (ret < 0) | ||
| 468 | return SPEC_STORE_BYPASS_CMD_AUTO; | ||
| 469 | |||
| 470 | for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { | ||
| 471 | if (!match_option(arg, ret, ssb_mitigation_options[i].option)) | ||
| 472 | continue; | ||
| 473 | |||
| 474 | cmd = ssb_mitigation_options[i].cmd; | ||
| 475 | break; | ||
| 476 | } | ||
| 477 | |||
| 478 | if (i >= ARRAY_SIZE(ssb_mitigation_options)) { | ||
| 479 | pr_err("unknown option (%s). Switching to AUTO select\n", arg); | ||
| 480 | return SPEC_STORE_BYPASS_CMD_AUTO; | ||
| 481 | } | ||
| 482 | } | ||
| 483 | |||
| 484 | return cmd; | ||
| 485 | } | ||
| 486 | |||
| 487 | static enum ssb_mitigation __init __ssb_select_mitigation(void) | ||
| 488 | { | ||
| 489 | enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; | ||
| 490 | enum ssb_mitigation_cmd cmd; | ||
| 491 | |||
| 492 | if (!boot_cpu_has(X86_FEATURE_SSBD)) | ||
| 493 | return mode; | ||
| 494 | |||
| 495 | cmd = ssb_parse_cmdline(); | ||
| 496 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && | ||
| 497 | (cmd == SPEC_STORE_BYPASS_CMD_NONE || | ||
| 498 | cmd == SPEC_STORE_BYPASS_CMD_AUTO)) | ||
| 499 | return mode; | ||
| 500 | |||
| 501 | switch (cmd) { | ||
| 502 | case SPEC_STORE_BYPASS_CMD_AUTO: | ||
| 503 | case SPEC_STORE_BYPASS_CMD_SECCOMP: | ||
| 504 | /* | ||
| 505 | * Choose prctl+seccomp as the default mode if seccomp is | ||
| 506 | * enabled. | ||
| 507 | */ | ||
| 508 | if (IS_ENABLED(CONFIG_SECCOMP)) | ||
| 509 | mode = SPEC_STORE_BYPASS_SECCOMP; | ||
| 510 | else | ||
| 511 | mode = SPEC_STORE_BYPASS_PRCTL; | ||
| 512 | break; | ||
| 513 | case SPEC_STORE_BYPASS_CMD_ON: | ||
| 514 | mode = SPEC_STORE_BYPASS_DISABLE; | ||
| 515 | break; | ||
| 516 | case SPEC_STORE_BYPASS_CMD_PRCTL: | ||
| 517 | mode = SPEC_STORE_BYPASS_PRCTL; | ||
| 518 | break; | ||
| 519 | case SPEC_STORE_BYPASS_CMD_NONE: | ||
| 520 | break; | ||
| 521 | } | ||
| 522 | |||
| 523 | /* | ||
| 524 | * We have three CPU feature flags that are in play here: | ||
| 525 | * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. | ||
| 526 | * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass | ||
| 527 | * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation | ||
| 528 | */ | ||
| 529 | if (mode == SPEC_STORE_BYPASS_DISABLE) { | ||
| 530 | setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); | ||
| 531 | /* | ||
| 532 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses | ||
| 533 | * a completely different MSR and bit dependent on family. | ||
| 534 | */ | ||
| 535 | switch (boot_cpu_data.x86_vendor) { | ||
| 536 | case X86_VENDOR_INTEL: | ||
| 537 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; | ||
| 538 | x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; | ||
| 539 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | ||
| 540 | break; | ||
| 541 | case X86_VENDOR_AMD: | ||
| 542 | x86_amd_ssb_disable(); | ||
| 543 | break; | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | return mode; | ||
| 548 | } | ||
| 549 | |||
| 550 | static void ssb_select_mitigation(void) | ||
| 551 | { | ||
| 552 | ssb_mode = __ssb_select_mitigation(); | ||
| 553 | |||
| 554 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) | ||
| 555 | pr_info("%s\n", ssb_strings[ssb_mode]); | ||
| 556 | } | ||
| 557 | |||
| 558 | #undef pr_fmt | ||
| 559 | #define pr_fmt(fmt) "Speculation prctl: " fmt | ||
| 560 | |||
| 561 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) | ||
| 562 | { | ||
| 563 | bool update; | ||
| 564 | |||
| 565 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && | ||
| 566 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) | ||
| 567 | return -ENXIO; | ||
| 568 | |||
| 569 | switch (ctrl) { | ||
| 570 | case PR_SPEC_ENABLE: | ||
| 571 | /* If speculation is force disabled, enable is not allowed */ | ||
| 572 | if (task_spec_ssb_force_disable(task)) | ||
| 573 | return -EPERM; | ||
| 574 | task_clear_spec_ssb_disable(task); | ||
| 575 | update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); | ||
| 576 | break; | ||
| 577 | case PR_SPEC_DISABLE: | ||
| 578 | task_set_spec_ssb_disable(task); | ||
| 579 | update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); | ||
| 580 | break; | ||
| 581 | case PR_SPEC_FORCE_DISABLE: | ||
| 582 | task_set_spec_ssb_disable(task); | ||
| 583 | task_set_spec_ssb_force_disable(task); | ||
| 584 | update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); | ||
| 585 | break; | ||
| 586 | default: | ||
| 587 | return -ERANGE; | ||
| 588 | } | ||
| 589 | |||
| 590 | /* | ||
| 591 | * If being set on non-current task, delay setting the CPU | ||
| 592 | * mitigation until it is next scheduled. | ||
| 593 | */ | ||
| 594 | if (task == current && update) | ||
| 595 | speculative_store_bypass_update_current(); | ||
| 596 | |||
| 597 | return 0; | ||
| 598 | } | ||
| 599 | |||
| 600 | int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, | ||
| 601 | unsigned long ctrl) | ||
| 602 | { | ||
| 603 | switch (which) { | ||
| 604 | case PR_SPEC_STORE_BYPASS: | ||
| 605 | return ssb_prctl_set(task, ctrl); | ||
| 606 | default: | ||
| 607 | return -ENODEV; | ||
| 608 | } | ||
| 609 | } | ||
| 610 | |||
| 611 | #ifdef CONFIG_SECCOMP | ||
| 612 | void arch_seccomp_spec_mitigate(struct task_struct *task) | ||
| 613 | { | ||
| 614 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) | ||
| 615 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); | ||
| 616 | } | ||
| 617 | #endif | ||
| 618 | |||
| 619 | static int ssb_prctl_get(struct task_struct *task) | ||
| 620 | { | ||
| 621 | switch (ssb_mode) { | ||
| 622 | case SPEC_STORE_BYPASS_DISABLE: | ||
| 623 | return PR_SPEC_DISABLE; | ||
| 624 | case SPEC_STORE_BYPASS_SECCOMP: | ||
| 625 | case SPEC_STORE_BYPASS_PRCTL: | ||
| 626 | if (task_spec_ssb_force_disable(task)) | ||
| 627 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; | ||
| 628 | if (task_spec_ssb_disable(task)) | ||
| 629 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; | ||
| 630 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | ||
| 631 | default: | ||
| 632 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) | ||
| 633 | return PR_SPEC_ENABLE; | ||
| 634 | return PR_SPEC_NOT_AFFECTED; | ||
| 635 | } | ||
| 636 | } | ||
| 637 | |||
| 638 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) | ||
| 639 | { | ||
| 640 | switch (which) { | ||
| 641 | case PR_SPEC_STORE_BYPASS: | ||
| 642 | return ssb_prctl_get(task); | ||
| 643 | default: | ||
| 644 | return -ENODEV; | ||
| 645 | } | ||
| 646 | } | ||
| 647 | |||
| 648 | void x86_spec_ctrl_setup_ap(void) | ||
| 649 | { | ||
| 650 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) | ||
| 651 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | ||
| 652 | |||
| 653 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) | ||
| 654 | x86_amd_ssb_disable(); | ||
| 655 | } | ||
| 315 | 656 | ||
| 316 | #ifdef CONFIG_SYSFS | 657 | #ifdef CONFIG_SYSFS |
| 317 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) | 658 | |
| 659 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, | ||
| 660 | char *buf, unsigned int bug) | ||
| 318 | { | 661 | { |
| 319 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) | 662 | if (!boot_cpu_has_bug(bug)) |
| 320 | return sprintf(buf, "Not affected\n"); | 663 | return sprintf(buf, "Not affected\n"); |
| 321 | if (boot_cpu_has(X86_FEATURE_PTI)) | 664 | |
| 322 | return sprintf(buf, "Mitigation: PTI\n"); | 665 | switch (bug) { |
| 666 | case X86_BUG_CPU_MELTDOWN: | ||
| 667 | if (boot_cpu_has(X86_FEATURE_PTI)) | ||
| 668 | return sprintf(buf, "Mitigation: PTI\n"); | ||
| 669 | |||
| 670 | break; | ||
| 671 | |||
| 672 | case X86_BUG_SPECTRE_V1: | ||
| 673 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
| 674 | |||
| 675 | case X86_BUG_SPECTRE_V2: | ||
| 676 | return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], | ||
| 677 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", | ||
| 678 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", | ||
| 679 | spectre_v2_module_string()); | ||
| 680 | |||
| 681 | case X86_BUG_SPEC_STORE_BYPASS: | ||
| 682 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); | ||
| 683 | |||
| 684 | default: | ||
| 685 | break; | ||
| 686 | } | ||
| 687 | |||
| 323 | return sprintf(buf, "Vulnerable\n"); | 688 | return sprintf(buf, "Vulnerable\n"); |
| 324 | } | 689 | } |
| 325 | 690 | ||
| 691 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) | ||
| 692 | { | ||
| 693 | return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); | ||
| 694 | } | ||
| 695 | |||
| 326 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) | 696 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
| 327 | { | 697 | { |
| 328 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) | 698 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); |
| 329 | return sprintf(buf, "Not affected\n"); | ||
| 330 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | ||
| 331 | } | 699 | } |
| 332 | 700 | ||
| 333 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) | 701 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
| 334 | { | 702 | { |
| 335 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | 703 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
| 336 | return sprintf(buf, "Not affected\n"); | 704 | } |
| 337 | 705 | ||
| 338 | return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], | 706 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) |
| 339 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", | 707 | { |
| 340 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", | 708 | return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); |
| 341 | spectre_v2_module_string()); | ||
| 342 | } | 709 | } |
| 343 | #endif | 710 | #endif |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ce243f7d2d4e..38276f58d3bf 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c) | |||
| 757 | * and they also have a different bit for STIBP support. Also, | 757 | * and they also have a different bit for STIBP support. Also, |
| 758 | * a hypervisor might have set the individual AMD bits even on | 758 | * a hypervisor might have set the individual AMD bits even on |
| 759 | * Intel CPUs, for finer-grained selection of what's available. | 759 | * Intel CPUs, for finer-grained selection of what's available. |
| 760 | * | ||
| 761 | * We use the AMD bits in 0x8000_0008 EBX as the generic hardware | ||
| 762 | * features, which are visible in /proc/cpuinfo and used by the | ||
| 763 | * kernel. So set those accordingly from the Intel bits. | ||
| 764 | */ | 760 | */ |
| 765 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { | 761 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { |
| 766 | set_cpu_cap(c, X86_FEATURE_IBRS); | 762 | set_cpu_cap(c, X86_FEATURE_IBRS); |
| 767 | set_cpu_cap(c, X86_FEATURE_IBPB); | 763 | set_cpu_cap(c, X86_FEATURE_IBPB); |
| 764 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); | ||
| 768 | } | 765 | } |
| 766 | |||
| 769 | if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) | 767 | if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) |
| 770 | set_cpu_cap(c, X86_FEATURE_STIBP); | 768 | set_cpu_cap(c, X86_FEATURE_STIBP); |
| 769 | |||
| 770 | if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || | ||
| 771 | cpu_has(c, X86_FEATURE_VIRT_SSBD)) | ||
| 772 | set_cpu_cap(c, X86_FEATURE_SSBD); | ||
| 773 | |||
| 774 | if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { | ||
| 775 | set_cpu_cap(c, X86_FEATURE_IBRS); | ||
| 776 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); | ||
| 777 | } | ||
| 778 | |||
| 779 | if (cpu_has(c, X86_FEATURE_AMD_IBPB)) | ||
| 780 | set_cpu_cap(c, X86_FEATURE_IBPB); | ||
| 781 | |||
| 782 | if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { | ||
| 783 | set_cpu_cap(c, X86_FEATURE_STIBP); | ||
| 784 | set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); | ||
| 785 | } | ||
| 771 | } | 786 | } |
| 772 | 787 | ||
| 773 | void get_cpu_cap(struct cpuinfo_x86 *c) | 788 | void get_cpu_cap(struct cpuinfo_x86 *c) |
| @@ -927,21 +942,47 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { | |||
| 927 | {} | 942 | {} |
| 928 | }; | 943 | }; |
| 929 | 944 | ||
| 930 | static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) | 945 | /* Only list CPUs which speculate but are non susceptible to SSB */ |
| 946 | static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { | ||
| 947 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, | ||
| 948 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, | ||
| 949 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, | ||
| 950 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, | ||
| 951 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, | ||
| 952 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, | ||
| 953 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, | ||
| 954 | { X86_VENDOR_AMD, 0x12, }, | ||
| 955 | { X86_VENDOR_AMD, 0x11, }, | ||
| 956 | { X86_VENDOR_AMD, 0x10, }, | ||
| 957 | { X86_VENDOR_AMD, 0xf, }, | ||
| 958 | {} | ||
| 959 | }; | ||
| 960 | |||
| 961 | static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) | ||
| 931 | { | 962 | { |
| 932 | u64 ia32_cap = 0; | 963 | u64 ia32_cap = 0; |
| 933 | 964 | ||
| 934 | if (x86_match_cpu(cpu_no_meltdown)) | 965 | if (x86_match_cpu(cpu_no_speculation)) |
| 935 | return false; | 966 | return; |
| 967 | |||
| 968 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
| 969 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
| 936 | 970 | ||
| 937 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) | 971 | if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) |
| 938 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); | 972 | rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); |
| 939 | 973 | ||
| 974 | if (!x86_match_cpu(cpu_no_spec_store_bypass) && | ||
| 975 | !(ia32_cap & ARCH_CAP_SSB_NO)) | ||
| 976 | setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); | ||
| 977 | |||
| 978 | if (x86_match_cpu(cpu_no_meltdown)) | ||
| 979 | return; | ||
| 980 | |||
| 940 | /* Rogue Data Cache Load? No! */ | 981 | /* Rogue Data Cache Load? No! */ |
| 941 | if (ia32_cap & ARCH_CAP_RDCL_NO) | 982 | if (ia32_cap & ARCH_CAP_RDCL_NO) |
| 942 | return false; | 983 | return; |
| 943 | 984 | ||
| 944 | return true; | 985 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
| 945 | } | 986 | } |
| 946 | 987 | ||
| 947 | /* | 988 | /* |
| @@ -992,12 +1033,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
| 992 | 1033 | ||
| 993 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | 1034 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
| 994 | 1035 | ||
| 995 | if (!x86_match_cpu(cpu_no_speculation)) { | 1036 | cpu_set_bug_bits(c); |
| 996 | if (cpu_vulnerable_to_meltdown(c)) | ||
| 997 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); | ||
| 998 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
| 999 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
| 1000 | } | ||
| 1001 | 1037 | ||
| 1002 | fpu__init_system(c); | 1038 | fpu__init_system(c); |
| 1003 | 1039 | ||
| @@ -1359,6 +1395,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
| 1359 | #endif | 1395 | #endif |
| 1360 | mtrr_ap_init(); | 1396 | mtrr_ap_init(); |
| 1361 | validate_apic_and_package_id(c); | 1397 | validate_apic_and_package_id(c); |
| 1398 | x86_spec_ctrl_setup_ap(); | ||
| 1362 | } | 1399 | } |
| 1363 | 1400 | ||
| 1364 | static __init int setup_noclflush(char *arg) | 1401 | static __init int setup_noclflush(char *arg) |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e806b11a99af..37672d299e35 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
| @@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | |||
| 50 | 50 | ||
| 51 | unsigned int aperfmperf_get_khz(int cpu); | 51 | unsigned int aperfmperf_get_khz(int cpu); |
| 52 | 52 | ||
| 53 | extern void x86_spec_ctrl_setup_ap(void); | ||
| 54 | |||
| 53 | #endif /* ARCH_X86_CPU_H */ | 55 | #endif /* ARCH_X86_CPU_H */ |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 60d1897041da..577e7f7ae273 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) | |||
| 188 | setup_clear_cpu_cap(X86_FEATURE_IBPB); | 188 | setup_clear_cpu_cap(X86_FEATURE_IBPB); |
| 189 | setup_clear_cpu_cap(X86_FEATURE_STIBP); | 189 | setup_clear_cpu_cap(X86_FEATURE_STIBP); |
| 190 | setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); | 190 | setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); |
| 191 | setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); | ||
| 191 | setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); | 192 | setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); |
| 193 | setup_clear_cpu_cap(X86_FEATURE_SSBD); | ||
| 194 | setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); | ||
| 192 | } | 195 | } |
| 193 | 196 | ||
| 194 | /* | 197 | /* |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index f7666eef4a87..c8e038800591 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = { | |||
| 94 | [SMCA_SMU] = { "smu", "System Management Unit" }, | 94 | [SMCA_SMU] = { "smu", "System Management Unit" }, |
| 95 | }; | 95 | }; |
| 96 | 96 | ||
| 97 | static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = | ||
| 98 | { | ||
| 99 | [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } | ||
| 100 | }; | ||
| 101 | |||
| 97 | const char *smca_get_name(enum smca_bank_types t) | 102 | const char *smca_get_name(enum smca_bank_types t) |
| 98 | { | 103 | { |
| 99 | if (t >= N_SMCA_BANK_TYPES) | 104 | if (t >= N_SMCA_BANK_TYPES) |
| @@ -443,20 +448,26 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, | |||
| 443 | if (!block) | 448 | if (!block) |
| 444 | return MSR_AMD64_SMCA_MCx_MISC(bank); | 449 | return MSR_AMD64_SMCA_MCx_MISC(bank); |
| 445 | 450 | ||
| 451 | /* Check our cache first: */ | ||
| 452 | if (smca_bank_addrs[bank][block] != -1) | ||
| 453 | return smca_bank_addrs[bank][block]; | ||
| 454 | |||
| 446 | /* | 455 | /* |
| 447 | * For SMCA enabled processors, BLKPTR field of the first MISC register | 456 | * For SMCA enabled processors, BLKPTR field of the first MISC register |
| 448 | * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). | 457 | * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). |
| 449 | */ | 458 | */ |
| 450 | if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) | 459 | if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) |
| 451 | return addr; | 460 | goto out; |
| 452 | 461 | ||
| 453 | if (!(low & MCI_CONFIG_MCAX)) | 462 | if (!(low & MCI_CONFIG_MCAX)) |
| 454 | return addr; | 463 | goto out; |
| 455 | 464 | ||
| 456 | if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && | 465 | if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && |
| 457 | (low & MASK_BLKPTR_LO)) | 466 | (low & MASK_BLKPTR_LO)) |
| 458 | return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); | 467 | addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); |
| 459 | 468 | ||
| 469 | out: | ||
| 470 | smca_bank_addrs[bank][block] = addr; | ||
| 460 | return addr; | 471 | return addr; |
| 461 | } | 472 | } |
| 462 | 473 | ||
| @@ -468,18 +479,6 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi | |||
| 468 | if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) | 479 | if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) |
| 469 | return addr; | 480 | return addr; |
| 470 | 481 | ||
| 471 | /* Get address from already initialized block. */ | ||
| 472 | if (per_cpu(threshold_banks, cpu)) { | ||
| 473 | struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; | ||
| 474 | |||
| 475 | if (bankp && bankp->blocks) { | ||
| 476 | struct threshold_block *blockp = &bankp->blocks[block]; | ||
| 477 | |||
| 478 | if (blockp) | ||
| 479 | return blockp->address; | ||
| 480 | } | ||
| 481 | } | ||
| 482 | |||
| 483 | if (mce_flags.smca) | 482 | if (mce_flags.smca) |
| 484 | return smca_get_block_address(cpu, bank, block); | 483 | return smca_get_block_address(cpu, bank, block); |
| 485 | 484 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 0c408f8c4ed4..2d29e47c056e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -104,6 +104,12 @@ static bool __head check_la57_support(unsigned long physaddr) | |||
| 104 | } | 104 | } |
| 105 | #endif | 105 | #endif |
| 106 | 106 | ||
| 107 | /* Code in __startup_64() can be relocated during execution, but the compiler | ||
| 108 | * doesn't have to generate PC-relative relocations when accessing globals from | ||
| 109 | * that function. Clang actually does not generate them, which leads to | ||
| 110 | * boot-time crashes. To work around this problem, every global pointer must | ||
| 111 | * be adjusted using fixup_pointer(). | ||
| 112 | */ | ||
| 107 | unsigned long __head __startup_64(unsigned long physaddr, | 113 | unsigned long __head __startup_64(unsigned long physaddr, |
| 108 | struct boot_params *bp) | 114 | struct boot_params *bp) |
| 109 | { | 115 | { |
| @@ -113,6 +119,7 @@ unsigned long __head __startup_64(unsigned long physaddr, | |||
| 113 | p4dval_t *p4d; | 119 | p4dval_t *p4d; |
| 114 | pudval_t *pud; | 120 | pudval_t *pud; |
| 115 | pmdval_t *pmd, pmd_entry; | 121 | pmdval_t *pmd, pmd_entry; |
| 122 | pteval_t *mask_ptr; | ||
| 116 | bool la57; | 123 | bool la57; |
| 117 | int i; | 124 | int i; |
| 118 | unsigned int *next_pgt_ptr; | 125 | unsigned int *next_pgt_ptr; |
| @@ -196,7 +203,8 @@ unsigned long __head __startup_64(unsigned long physaddr, | |||
| 196 | 203 | ||
| 197 | pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; | 204 | pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; |
| 198 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ | 205 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ |
| 199 | pmd_entry &= __supported_pte_mask; | 206 | mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr); |
| 207 | pmd_entry &= *mask_ptr; | ||
| 200 | pmd_entry += sme_get_me_mask(); | 208 | pmd_entry += sme_get_me_mask(); |
| 201 | pmd_entry += physaddr; | 209 | pmd_entry += physaddr; |
| 202 | 210 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0715f827607c..6f4d42377fe5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
| @@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) | |||
| 370 | if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) | 370 | if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) |
| 371 | return 0; | 371 | return 0; |
| 372 | 372 | ||
| 373 | /* We should not singlestep on the exception masking instructions */ | ||
| 374 | if (insn_masking_exception(insn)) | ||
| 375 | return 0; | ||
| 376 | |||
| 373 | #ifdef CONFIG_X86_64 | 377 | #ifdef CONFIG_X86_64 |
| 374 | /* Only x86_64 has RIP relative instructions */ | 378 | /* Only x86_64 has RIP relative instructions */ |
| 375 | if (insn_rip_relative(insn)) { | 379 | if (insn_rip_relative(insn)) { |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 7867417cfaff..5b2300b818af 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
| @@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void) | |||
| 457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) | 457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) |
| 458 | { | 458 | { |
| 459 | native_smp_prepare_cpus(max_cpus); | 459 | native_smp_prepare_cpus(max_cpus); |
| 460 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 460 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
| 461 | static_branch_disable(&virt_spin_lock_key); | 461 | static_branch_disable(&virt_spin_lock_key); |
| 462 | } | 462 | } |
| 463 | 463 | ||
| @@ -553,7 +553,7 @@ static void __init kvm_guest_init(void) | |||
| 553 | } | 553 | } |
| 554 | 554 | ||
| 555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
| 556 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 556 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
| 557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) | 557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) |
| 558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; | 558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; |
| 559 | 559 | ||
| @@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void) | |||
| 649 | int cpu; | 649 | int cpu; |
| 650 | 650 | ||
| 651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
| 652 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 652 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
| 653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | 653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
| 654 | for_each_possible_cpu(cpu) { | 654 | for_each_possible_cpu(cpu) { |
| 655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), | 655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), |
| @@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void) | |||
| 745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
| 746 | return; | 746 | return; |
| 747 | 747 | ||
| 748 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 748 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
| 749 | return; | 749 | return; |
| 750 | 750 | ||
| 751 | __pv_init_lock_hash(); | 751 | __pv_init_lock_hash(); |
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 60cdec6628b0..d1ab07ec8c9a 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c | |||
| @@ -57,12 +57,17 @@ static void load_segments(void) | |||
| 57 | static void machine_kexec_free_page_tables(struct kimage *image) | 57 | static void machine_kexec_free_page_tables(struct kimage *image) |
| 58 | { | 58 | { |
| 59 | free_page((unsigned long)image->arch.pgd); | 59 | free_page((unsigned long)image->arch.pgd); |
| 60 | image->arch.pgd = NULL; | ||
| 60 | #ifdef CONFIG_X86_PAE | 61 | #ifdef CONFIG_X86_PAE |
| 61 | free_page((unsigned long)image->arch.pmd0); | 62 | free_page((unsigned long)image->arch.pmd0); |
| 63 | image->arch.pmd0 = NULL; | ||
| 62 | free_page((unsigned long)image->arch.pmd1); | 64 | free_page((unsigned long)image->arch.pmd1); |
| 65 | image->arch.pmd1 = NULL; | ||
| 63 | #endif | 66 | #endif |
| 64 | free_page((unsigned long)image->arch.pte0); | 67 | free_page((unsigned long)image->arch.pte0); |
| 68 | image->arch.pte0 = NULL; | ||
| 65 | free_page((unsigned long)image->arch.pte1); | 69 | free_page((unsigned long)image->arch.pte1); |
| 70 | image->arch.pte1 = NULL; | ||
| 66 | } | 71 | } |
| 67 | 72 | ||
| 68 | static int machine_kexec_alloc_page_tables(struct kimage *image) | 73 | static int machine_kexec_alloc_page_tables(struct kimage *image) |
| @@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image) | |||
| 79 | !image->arch.pmd0 || !image->arch.pmd1 || | 84 | !image->arch.pmd0 || !image->arch.pmd1 || |
| 80 | #endif | 85 | #endif |
| 81 | !image->arch.pte0 || !image->arch.pte1) { | 86 | !image->arch.pte0 || !image->arch.pte1) { |
| 82 | machine_kexec_free_page_tables(image); | ||
| 83 | return -ENOMEM; | 87 | return -ENOMEM; |
| 84 | } | 88 | } |
| 85 | return 0; | 89 | return 0; |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index a5e55d832d0a..6010449ca6d2 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
| @@ -39,9 +39,13 @@ const struct kexec_file_ops * const kexec_file_loaders[] = { | |||
| 39 | static void free_transition_pgtable(struct kimage *image) | 39 | static void free_transition_pgtable(struct kimage *image) |
| 40 | { | 40 | { |
| 41 | free_page((unsigned long)image->arch.p4d); | 41 | free_page((unsigned long)image->arch.p4d); |
| 42 | image->arch.p4d = NULL; | ||
| 42 | free_page((unsigned long)image->arch.pud); | 43 | free_page((unsigned long)image->arch.pud); |
| 44 | image->arch.pud = NULL; | ||
| 43 | free_page((unsigned long)image->arch.pmd); | 45 | free_page((unsigned long)image->arch.pmd); |
| 46 | image->arch.pmd = NULL; | ||
| 44 | free_page((unsigned long)image->arch.pte); | 47 | free_page((unsigned long)image->arch.pte); |
| 48 | image->arch.pte = NULL; | ||
| 45 | } | 49 | } |
| 46 | 50 | ||
| 47 | static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) | 51 | static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) |
| @@ -91,7 +95,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) | |||
| 91 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC)); | 95 | set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC)); |
| 92 | return 0; | 96 | return 0; |
| 93 | err: | 97 | err: |
| 94 | free_transition_pgtable(image); | ||
| 95 | return result; | 98 | return result; |
| 96 | } | 99 | } |
| 97 | 100 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 03408b942adb..30ca2d1a9231 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <asm/switch_to.h> | 38 | #include <asm/switch_to.h> |
| 39 | #include <asm/desc.h> | 39 | #include <asm/desc.h> |
| 40 | #include <asm/prctl.h> | 40 | #include <asm/prctl.h> |
| 41 | #include <asm/spec-ctrl.h> | ||
| 41 | 42 | ||
| 42 | /* | 43 | /* |
| 43 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, | 44 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
| @@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss, | |||
| 278 | } | 279 | } |
| 279 | } | 280 | } |
| 280 | 281 | ||
| 282 | #ifdef CONFIG_SMP | ||
| 283 | |||
| 284 | struct ssb_state { | ||
| 285 | struct ssb_state *shared_state; | ||
| 286 | raw_spinlock_t lock; | ||
| 287 | unsigned int disable_state; | ||
| 288 | unsigned long local_state; | ||
| 289 | }; | ||
| 290 | |||
| 291 | #define LSTATE_SSB 0 | ||
| 292 | |||
| 293 | static DEFINE_PER_CPU(struct ssb_state, ssb_state); | ||
| 294 | |||
| 295 | void speculative_store_bypass_ht_init(void) | ||
| 296 | { | ||
| 297 | struct ssb_state *st = this_cpu_ptr(&ssb_state); | ||
| 298 | unsigned int this_cpu = smp_processor_id(); | ||
| 299 | unsigned int cpu; | ||
| 300 | |||
| 301 | st->local_state = 0; | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Shared state setup happens once on the first bringup | ||
| 305 | * of the CPU. It's not destroyed on CPU hotunplug. | ||
| 306 | */ | ||
| 307 | if (st->shared_state) | ||
| 308 | return; | ||
| 309 | |||
| 310 | raw_spin_lock_init(&st->lock); | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Go over HT siblings and check whether one of them has set up the | ||
| 314 | * shared state pointer already. | ||
| 315 | */ | ||
| 316 | for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { | ||
| 317 | if (cpu == this_cpu) | ||
| 318 | continue; | ||
| 319 | |||
| 320 | if (!per_cpu(ssb_state, cpu).shared_state) | ||
| 321 | continue; | ||
| 322 | |||
| 323 | /* Link it to the state of the sibling: */ | ||
| 324 | st->shared_state = per_cpu(ssb_state, cpu).shared_state; | ||
| 325 | return; | ||
| 326 | } | ||
| 327 | |||
| 328 | /* | ||
| 329 | * First HT sibling to come up on the core. Link shared state of | ||
| 330 | * the first HT sibling to itself. The siblings on the same core | ||
| 331 | * which come up later will see the shared state pointer and link | ||
| 332 | * themself to the state of this CPU. | ||
| 333 | */ | ||
| 334 | st->shared_state = st; | ||
| 335 | } | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Logic is: First HT sibling enables SSBD for both siblings in the core | ||
| 339 | * and last sibling to disable it, disables it for the whole core. This how | ||
| 340 | * MSR_SPEC_CTRL works in "hardware": | ||
| 341 | * | ||
| 342 | * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL | ||
| 343 | */ | ||
| 344 | static __always_inline void amd_set_core_ssb_state(unsigned long tifn) | ||
| 345 | { | ||
| 346 | struct ssb_state *st = this_cpu_ptr(&ssb_state); | ||
| 347 | u64 msr = x86_amd_ls_cfg_base; | ||
| 348 | |||
| 349 | if (!static_cpu_has(X86_FEATURE_ZEN)) { | ||
| 350 | msr |= ssbd_tif_to_amd_ls_cfg(tifn); | ||
| 351 | wrmsrl(MSR_AMD64_LS_CFG, msr); | ||
| 352 | return; | ||
| 353 | } | ||
| 354 | |||
| 355 | if (tifn & _TIF_SSBD) { | ||
| 356 | /* | ||
| 357 | * Since this can race with prctl(), block reentry on the | ||
| 358 | * same CPU. | ||
| 359 | */ | ||
| 360 | if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) | ||
| 361 | return; | ||
| 362 | |||
| 363 | msr |= x86_amd_ls_cfg_ssbd_mask; | ||
| 364 | |||
| 365 | raw_spin_lock(&st->shared_state->lock); | ||
| 366 | /* First sibling enables SSBD: */ | ||
| 367 | if (!st->shared_state->disable_state) | ||
| 368 | wrmsrl(MSR_AMD64_LS_CFG, msr); | ||
| 369 | st->shared_state->disable_state++; | ||
| 370 | raw_spin_unlock(&st->shared_state->lock); | ||
| 371 | } else { | ||
| 372 | if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) | ||
| 373 | return; | ||
| 374 | |||
| 375 | raw_spin_lock(&st->shared_state->lock); | ||
| 376 | st->shared_state->disable_state--; | ||
| 377 | if (!st->shared_state->disable_state) | ||
| 378 | wrmsrl(MSR_AMD64_LS_CFG, msr); | ||
| 379 | raw_spin_unlock(&st->shared_state->lock); | ||
| 380 | } | ||
| 381 | } | ||
| 382 | #else | ||
| 383 | static __always_inline void amd_set_core_ssb_state(unsigned long tifn) | ||
| 384 | { | ||
| 385 | u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); | ||
| 386 | |||
| 387 | wrmsrl(MSR_AMD64_LS_CFG, msr); | ||
| 388 | } | ||
| 389 | #endif | ||
| 390 | |||
| 391 | static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) | ||
| 392 | { | ||
| 393 | /* | ||
| 394 | * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, | ||
| 395 | * so ssbd_tif_to_spec_ctrl() just works. | ||
| 396 | */ | ||
| 397 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); | ||
| 398 | } | ||
| 399 | |||
| 400 | static __always_inline void intel_set_ssb_state(unsigned long tifn) | ||
| 401 | { | ||
| 402 | u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); | ||
| 403 | |||
| 404 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); | ||
| 405 | } | ||
| 406 | |||
| 407 | static __always_inline void __speculative_store_bypass_update(unsigned long tifn) | ||
| 408 | { | ||
| 409 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) | ||
| 410 | amd_set_ssb_virt_state(tifn); | ||
| 411 | else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | ||
| 412 | amd_set_core_ssb_state(tifn); | ||
| 413 | else | ||
| 414 | intel_set_ssb_state(tifn); | ||
| 415 | } | ||
| 416 | |||
| 417 | void speculative_store_bypass_update(unsigned long tif) | ||
| 418 | { | ||
| 419 | preempt_disable(); | ||
| 420 | __speculative_store_bypass_update(tif); | ||
| 421 | preempt_enable(); | ||
| 422 | } | ||
| 423 | |||
| 281 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 424 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
| 282 | struct tss_struct *tss) | 425 | struct tss_struct *tss) |
| 283 | { | 426 | { |
| @@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
| 309 | 452 | ||
| 310 | if ((tifp ^ tifn) & _TIF_NOCPUID) | 453 | if ((tifp ^ tifn) & _TIF_NOCPUID) |
| 311 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); | 454 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); |
| 455 | |||
| 456 | if ((tifp ^ tifn) & _TIF_SSBD) | ||
| 457 | __speculative_store_bypass_update(tifn); | ||
| 312 | } | 458 | } |
| 313 | 459 | ||
| 314 | /* | 460 | /* |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4b100fe0f508..12bb445fb98d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -542,6 +542,7 @@ void set_personality_64bit(void) | |||
| 542 | clear_thread_flag(TIF_X32); | 542 | clear_thread_flag(TIF_X32); |
| 543 | /* Pretend that this comes from a 64bit execve */ | 543 | /* Pretend that this comes from a 64bit execve */ |
| 544 | task_pt_regs(current)->orig_ax = __NR_execve; | 544 | task_pt_regs(current)->orig_ax = __NR_execve; |
| 545 | current_thread_info()->status &= ~TS_COMPAT; | ||
| 545 | 546 | ||
| 546 | /* Ensure the corresponding mm is not marked. */ | 547 | /* Ensure the corresponding mm is not marked. */ |
| 547 | if (current->mm) | 548 | if (current->mm) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0f1cbb042f49..9dd324ae4832 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -79,6 +79,7 @@ | |||
| 79 | #include <asm/qspinlock.h> | 79 | #include <asm/qspinlock.h> |
| 80 | #include <asm/intel-family.h> | 80 | #include <asm/intel-family.h> |
| 81 | #include <asm/cpu_device_id.h> | 81 | #include <asm/cpu_device_id.h> |
| 82 | #include <asm/spec-ctrl.h> | ||
| 82 | 83 | ||
| 83 | /* Number of siblings per CPU package */ | 84 | /* Number of siblings per CPU package */ |
| 84 | int smp_num_siblings = 1; | 85 | int smp_num_siblings = 1; |
| @@ -244,6 +245,8 @@ static void notrace start_secondary(void *unused) | |||
| 244 | */ | 245 | */ |
| 245 | check_tsc_sync_target(); | 246 | check_tsc_sync_target(); |
| 246 | 247 | ||
| 248 | speculative_store_bypass_ht_init(); | ||
| 249 | |||
| 247 | /* | 250 | /* |
| 248 | * Lock vector_lock, set CPU online and bring the vector | 251 | * Lock vector_lock, set CPU online and bring the vector |
| 249 | * allocator online. Online must be set with vector_lock held | 252 | * allocator online. Online must be set with vector_lock held |
| @@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1292 | set_mtrr_aps_delayed_init(); | 1295 | set_mtrr_aps_delayed_init(); |
| 1293 | 1296 | ||
| 1294 | smp_quirk_init_udelay(); | 1297 | smp_quirk_init_udelay(); |
| 1298 | |||
| 1299 | speculative_store_bypass_ht_init(); | ||
| 1295 | } | 1300 | } |
| 1296 | 1301 | ||
| 1297 | void arch_enable_nonboot_cpus_begin(void) | 1302 | void arch_enable_nonboot_cpus_begin(void) |
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 85c7ef23d99f..c84bb5396958 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
| @@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool | |||
| 299 | if (is_prefix_bad(insn)) | 299 | if (is_prefix_bad(insn)) |
| 300 | return -ENOTSUPP; | 300 | return -ENOTSUPP; |
| 301 | 301 | ||
| 302 | /* We should not singlestep on the exception masking instructions */ | ||
| 303 | if (insn_masking_exception(insn)) | ||
| 304 | return -ENOTSUPP; | ||
| 305 | |||
| 302 | if (x86_64) | 306 | if (x86_64) |
| 303 | good_insns = good_insns_64; | 307 | good_insns = good_insns_64; |
| 304 | else | 308 | else |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 82055b90a8b3..92bf2f2e7cdd 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
| @@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
| 379 | 379 | ||
| 380 | /* cpuid 0x80000008.ebx */ | 380 | /* cpuid 0x80000008.ebx */ |
| 381 | const u32 kvm_cpuid_8000_0008_ebx_x86_features = | 381 | const u32 kvm_cpuid_8000_0008_ebx_x86_features = |
| 382 | F(IBPB) | F(IBRS); | 382 | F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD); |
| 383 | 383 | ||
| 384 | /* cpuid 0xC0000001.edx */ | 384 | /* cpuid 0xC0000001.edx */ |
| 385 | const u32 kvm_cpuid_C000_0001_edx_x86_features = | 385 | const u32 kvm_cpuid_C000_0001_edx_x86_features = |
| @@ -408,7 +408,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
| 408 | /* cpuid 7.0.edx*/ | 408 | /* cpuid 7.0.edx*/ |
| 409 | const u32 kvm_cpuid_7_0_edx_x86_features = | 409 | const u32 kvm_cpuid_7_0_edx_x86_features = |
| 410 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | | 410 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
| 411 | F(ARCH_CAPABILITIES); | 411 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); |
| 412 | 412 | ||
| 413 | /* all calls to cpuid_count() should be made on the same cpu */ | 413 | /* all calls to cpuid_count() should be made on the same cpu */ |
| 414 | get_cpu(); | 414 | get_cpu(); |
| @@ -495,6 +495,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
| 495 | entry->ecx &= ~F(PKU); | 495 | entry->ecx &= ~F(PKU); |
| 496 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; | 496 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; |
| 497 | cpuid_mask(&entry->edx, CPUID_7_EDX); | 497 | cpuid_mask(&entry->edx, CPUID_7_EDX); |
| 498 | /* | ||
| 499 | * We emulate ARCH_CAPABILITIES in software even | ||
| 500 | * if the host doesn't support it. | ||
| 501 | */ | ||
| 502 | entry->edx |= F(ARCH_CAPABILITIES); | ||
| 498 | } else { | 503 | } else { |
| 499 | entry->ebx = 0; | 504 | entry->ebx = 0; |
| 500 | entry->ecx = 0; | 505 | entry->ecx = 0; |
| @@ -647,13 +652,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
| 647 | g_phys_as = phys_as; | 652 | g_phys_as = phys_as; |
| 648 | entry->eax = g_phys_as | (virt_as << 8); | 653 | entry->eax = g_phys_as | (virt_as << 8); |
| 649 | entry->edx = 0; | 654 | entry->edx = 0; |
| 650 | /* IBRS and IBPB aren't necessarily present in hardware cpuid */ | 655 | /* |
| 651 | if (boot_cpu_has(X86_FEATURE_IBPB)) | 656 | * IBRS, IBPB and VIRT_SSBD aren't necessarily present in |
| 652 | entry->ebx |= F(IBPB); | 657 | * hardware cpuid |
| 653 | if (boot_cpu_has(X86_FEATURE_IBRS)) | 658 | */ |
| 654 | entry->ebx |= F(IBRS); | 659 | if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) |
| 660 | entry->ebx |= F(AMD_IBPB); | ||
| 661 | if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) | ||
| 662 | entry->ebx |= F(AMD_IBRS); | ||
| 663 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) | ||
| 664 | entry->ebx |= F(VIRT_SSBD); | ||
| 655 | entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; | 665 | entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; |
| 656 | cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); | 666 | cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); |
| 667 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | ||
| 668 | entry->ebx |= F(VIRT_SSBD); | ||
| 657 | break; | 669 | break; |
| 658 | } | 670 | } |
| 659 | case 0x80000019: | 671 | case 0x80000019: |
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 98618e397342..46ff64da44ca 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c | |||
| @@ -1260,12 +1260,16 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) | |||
| 1260 | } | 1260 | } |
| 1261 | } | 1261 | } |
| 1262 | 1262 | ||
| 1263 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) | 1263 | static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) |
| 1264 | { | 1264 | { |
| 1265 | struct kvm_run *run = vcpu->run; | 1265 | kvm_hv_hypercall_set_result(vcpu, result); |
| 1266 | ++vcpu->stat.hypercalls; | ||
| 1267 | return kvm_skip_emulated_instruction(vcpu); | ||
| 1268 | } | ||
| 1266 | 1269 | ||
| 1267 | kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); | 1270 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) |
| 1268 | return 1; | 1271 | { |
| 1272 | return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); | ||
| 1269 | } | 1273 | } |
| 1270 | 1274 | ||
| 1271 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) | 1275 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) |
| @@ -1296,8 +1300,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) | |||
| 1296 | if (param & ~KVM_HYPERV_CONN_ID_MASK) | 1300 | if (param & ~KVM_HYPERV_CONN_ID_MASK) |
| 1297 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | 1301 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
| 1298 | 1302 | ||
| 1299 | /* conn_to_evt is protected by vcpu->kvm->srcu */ | 1303 | /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ |
| 1304 | rcu_read_lock(); | ||
| 1300 | eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); | 1305 | eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); |
| 1306 | rcu_read_unlock(); | ||
| 1301 | if (!eventfd) | 1307 | if (!eventfd) |
| 1302 | return HV_STATUS_INVALID_PORT_ID; | 1308 | return HV_STATUS_INVALID_PORT_ID; |
| 1303 | 1309 | ||
| @@ -1348,7 +1354,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
| 1348 | /* Hypercall continuation is not supported yet */ | 1354 | /* Hypercall continuation is not supported yet */ |
| 1349 | if (rep_cnt || rep_idx) { | 1355 | if (rep_cnt || rep_idx) { |
| 1350 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; | 1356 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
| 1351 | goto set_result; | 1357 | goto out; |
| 1352 | } | 1358 | } |
| 1353 | 1359 | ||
| 1354 | switch (code) { | 1360 | switch (code) { |
| @@ -1379,9 +1385,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
| 1379 | break; | 1385 | break; |
| 1380 | } | 1386 | } |
| 1381 | 1387 | ||
| 1382 | set_result: | 1388 | out: |
| 1383 | kvm_hv_hypercall_set_result(vcpu, ret); | 1389 | return kvm_hv_hypercall_complete(vcpu, ret); |
| 1384 | return 1; | ||
| 1385 | } | 1390 | } |
| 1386 | 1391 | ||
| 1387 | void kvm_hv_init_vm(struct kvm *kvm) | 1392 | void kvm_hv_init_vm(struct kvm *kvm) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index b74c9c1405b9..3773c4625114 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -1522,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic) | |||
| 1522 | 1522 | ||
| 1523 | static void advance_periodic_target_expiration(struct kvm_lapic *apic) | 1523 | static void advance_periodic_target_expiration(struct kvm_lapic *apic) |
| 1524 | { | 1524 | { |
| 1525 | apic->lapic_timer.tscdeadline += | 1525 | ktime_t now = ktime_get(); |
| 1526 | nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); | 1526 | u64 tscl = rdtsc(); |
| 1527 | ktime_t delta; | ||
| 1528 | |||
| 1529 | /* | ||
| 1530 | * Synchronize both deadlines to the same time source or | ||
| 1531 | * differences in the periods (caused by differences in the | ||
| 1532 | * underlying clocks or numerical approximation errors) will | ||
| 1533 | * cause the two to drift apart over time as the errors | ||
| 1534 | * accumulate. | ||
| 1535 | */ | ||
| 1527 | apic->lapic_timer.target_expiration = | 1536 | apic->lapic_timer.target_expiration = |
| 1528 | ktime_add_ns(apic->lapic_timer.target_expiration, | 1537 | ktime_add_ns(apic->lapic_timer.target_expiration, |
| 1529 | apic->lapic_timer.period); | 1538 | apic->lapic_timer.period); |
| 1539 | delta = ktime_sub(apic->lapic_timer.target_expiration, now); | ||
| 1540 | apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + | ||
| 1541 | nsec_to_cycles(apic->vcpu, delta); | ||
| 1530 | } | 1542 | } |
| 1531 | 1543 | ||
| 1532 | static void start_sw_period(struct kvm_lapic *apic) | 1544 | static void start_sw_period(struct kvm_lapic *apic) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1fc05e428aba..26110c202b19 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | #include <asm/debugreg.h> | 49 | #include <asm/debugreg.h> |
| 50 | #include <asm/kvm_para.h> | 50 | #include <asm/kvm_para.h> |
| 51 | #include <asm/irq_remapping.h> | 51 | #include <asm/irq_remapping.h> |
| 52 | #include <asm/nospec-branch.h> | 52 | #include <asm/spec-ctrl.h> |
| 53 | 53 | ||
| 54 | #include <asm/virtext.h> | 54 | #include <asm/virtext.h> |
| 55 | #include "trace.h" | 55 | #include "trace.h" |
| @@ -213,6 +213,12 @@ struct vcpu_svm { | |||
| 213 | } host; | 213 | } host; |
| 214 | 214 | ||
| 215 | u64 spec_ctrl; | 215 | u64 spec_ctrl; |
| 216 | /* | ||
| 217 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be | ||
| 218 | * translated into the appropriate L2_CFG bits on the host to | ||
| 219 | * perform speculative control. | ||
| 220 | */ | ||
| 221 | u64 virt_spec_ctrl; | ||
| 216 | 222 | ||
| 217 | u32 *msrpm; | 223 | u32 *msrpm; |
| 218 | 224 | ||
| @@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
| 2060 | 2066 | ||
| 2061 | vcpu->arch.microcode_version = 0x01000065; | 2067 | vcpu->arch.microcode_version = 0x01000065; |
| 2062 | svm->spec_ctrl = 0; | 2068 | svm->spec_ctrl = 0; |
| 2069 | svm->virt_spec_ctrl = 0; | ||
| 2063 | 2070 | ||
| 2064 | if (!init_event) { | 2071 | if (!init_event) { |
| 2065 | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | | 2072 | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | |
| @@ -4108,11 +4115,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 4108 | break; | 4115 | break; |
| 4109 | case MSR_IA32_SPEC_CTRL: | 4116 | case MSR_IA32_SPEC_CTRL: |
| 4110 | if (!msr_info->host_initiated && | 4117 | if (!msr_info->host_initiated && |
| 4111 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) | 4118 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) |
| 4112 | return 1; | 4119 | return 1; |
| 4113 | 4120 | ||
| 4114 | msr_info->data = svm->spec_ctrl; | 4121 | msr_info->data = svm->spec_ctrl; |
| 4115 | break; | 4122 | break; |
| 4123 | case MSR_AMD64_VIRT_SPEC_CTRL: | ||
| 4124 | if (!msr_info->host_initiated && | ||
| 4125 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) | ||
| 4126 | return 1; | ||
| 4127 | |||
| 4128 | msr_info->data = svm->virt_spec_ctrl; | ||
| 4129 | break; | ||
| 4116 | case MSR_F15H_IC_CFG: { | 4130 | case MSR_F15H_IC_CFG: { |
| 4117 | 4131 | ||
| 4118 | int family, model; | 4132 | int family, model; |
| @@ -4203,7 +4217,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 4203 | break; | 4217 | break; |
| 4204 | case MSR_IA32_SPEC_CTRL: | 4218 | case MSR_IA32_SPEC_CTRL: |
| 4205 | if (!msr->host_initiated && | 4219 | if (!msr->host_initiated && |
| 4206 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) | 4220 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) |
| 4207 | return 1; | 4221 | return 1; |
| 4208 | 4222 | ||
| 4209 | /* The STIBP bit doesn't fault even if it's not advertised */ | 4223 | /* The STIBP bit doesn't fault even if it's not advertised */ |
| @@ -4230,7 +4244,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 4230 | break; | 4244 | break; |
| 4231 | case MSR_IA32_PRED_CMD: | 4245 | case MSR_IA32_PRED_CMD: |
| 4232 | if (!msr->host_initiated && | 4246 | if (!msr->host_initiated && |
| 4233 | !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) | 4247 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) |
| 4234 | return 1; | 4248 | return 1; |
| 4235 | 4249 | ||
| 4236 | if (data & ~PRED_CMD_IBPB) | 4250 | if (data & ~PRED_CMD_IBPB) |
| @@ -4244,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
| 4244 | break; | 4258 | break; |
| 4245 | set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); | 4259 | set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
| 4246 | break; | 4260 | break; |
| 4261 | case MSR_AMD64_VIRT_SPEC_CTRL: | ||
| 4262 | if (!msr->host_initiated && | ||
| 4263 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) | ||
| 4264 | return 1; | ||
| 4265 | |||
| 4266 | if (data & ~SPEC_CTRL_SSBD) | ||
| 4267 | return 1; | ||
| 4268 | |||
| 4269 | svm->virt_spec_ctrl = data; | ||
| 4270 | break; | ||
| 4247 | case MSR_STAR: | 4271 | case MSR_STAR: |
| 4248 | svm->vmcb->save.star = data; | 4272 | svm->vmcb->save.star = data; |
| 4249 | break; | 4273 | break; |
| @@ -5557,8 +5581,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 5557 | * is no need to worry about the conditional branch over the wrmsr | 5581 | * is no need to worry about the conditional branch over the wrmsr |
| 5558 | * being speculatively taken. | 5582 | * being speculatively taken. |
| 5559 | */ | 5583 | */ |
| 5560 | if (svm->spec_ctrl) | 5584 | x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); |
| 5561 | native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); | ||
| 5562 | 5585 | ||
| 5563 | asm volatile ( | 5586 | asm volatile ( |
| 5564 | "push %%" _ASM_BP "; \n\t" | 5587 | "push %%" _ASM_BP "; \n\t" |
| @@ -5652,6 +5675,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 5652 | #endif | 5675 | #endif |
| 5653 | ); | 5676 | ); |
| 5654 | 5677 | ||
| 5678 | /* Eliminate branch target predictions from guest mode */ | ||
| 5679 | vmexit_fill_RSB(); | ||
| 5680 | |||
| 5681 | #ifdef CONFIG_X86_64 | ||
| 5682 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); | ||
| 5683 | #else | ||
| 5684 | loadsegment(fs, svm->host.fs); | ||
| 5685 | #ifndef CONFIG_X86_32_LAZY_GS | ||
| 5686 | loadsegment(gs, svm->host.gs); | ||
| 5687 | #endif | ||
| 5688 | #endif | ||
| 5689 | |||
| 5655 | /* | 5690 | /* |
| 5656 | * We do not use IBRS in the kernel. If this vCPU has used the | 5691 | * We do not use IBRS in the kernel. If this vCPU has used the |
| 5657 | * SPEC_CTRL MSR it may have left it on; save the value and | 5692 | * SPEC_CTRL MSR it may have left it on; save the value and |
| @@ -5670,20 +5705,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 5670 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) | 5705 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
| 5671 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); | 5706 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
| 5672 | 5707 | ||
| 5673 | if (svm->spec_ctrl) | 5708 | x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); |
| 5674 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); | ||
| 5675 | |||
| 5676 | /* Eliminate branch target predictions from guest mode */ | ||
| 5677 | vmexit_fill_RSB(); | ||
| 5678 | |||
| 5679 | #ifdef CONFIG_X86_64 | ||
| 5680 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); | ||
| 5681 | #else | ||
| 5682 | loadsegment(fs, svm->host.fs); | ||
| 5683 | #ifndef CONFIG_X86_32_LAZY_GS | ||
| 5684 | loadsegment(gs, svm->host.gs); | ||
| 5685 | #endif | ||
| 5686 | #endif | ||
| 5687 | 5709 | ||
| 5688 | reload_tss(vcpu); | 5710 | reload_tss(vcpu); |
| 5689 | 5711 | ||
| @@ -5786,7 +5808,7 @@ static bool svm_cpu_has_accelerated_tpr(void) | |||
| 5786 | return false; | 5808 | return false; |
| 5787 | } | 5809 | } |
| 5788 | 5810 | ||
| 5789 | static bool svm_has_high_real_mode_segbase(void) | 5811 | static bool svm_has_emulated_msr(int index) |
| 5790 | { | 5812 | { |
| 5791 | return true; | 5813 | return true; |
| 5792 | } | 5814 | } |
| @@ -7012,7 +7034,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 7012 | .hardware_enable = svm_hardware_enable, | 7034 | .hardware_enable = svm_hardware_enable, |
| 7013 | .hardware_disable = svm_hardware_disable, | 7035 | .hardware_disable = svm_hardware_disable, |
| 7014 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, | 7036 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
| 7015 | .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, | 7037 | .has_emulated_msr = svm_has_emulated_msr, |
| 7016 | 7038 | ||
| 7017 | .vcpu_create = svm_create_vcpu, | 7039 | .vcpu_create = svm_create_vcpu, |
| 7018 | .vcpu_free = svm_free_vcpu, | 7040 | .vcpu_free = svm_free_vcpu, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c7668806163f..40aa29204baf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | #include <asm/apic.h> | 51 | #include <asm/apic.h> |
| 52 | #include <asm/irq_remapping.h> | 52 | #include <asm/irq_remapping.h> |
| 53 | #include <asm/mmu_context.h> | 53 | #include <asm/mmu_context.h> |
| 54 | #include <asm/nospec-branch.h> | 54 | #include <asm/spec-ctrl.h> |
| 55 | #include <asm/mshyperv.h> | 55 | #include <asm/mshyperv.h> |
| 56 | 56 | ||
| 57 | #include "trace.h" | 57 | #include "trace.h" |
| @@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void) | |||
| 1494 | SECONDARY_EXEC_ENABLE_VMFUNC; | 1494 | SECONDARY_EXEC_ENABLE_VMFUNC; |
| 1495 | } | 1495 | } |
| 1496 | 1496 | ||
| 1497 | static bool vmx_umip_emulated(void) | ||
| 1498 | { | ||
| 1499 | return vmcs_config.cpu_based_2nd_exec_ctrl & | ||
| 1500 | SECONDARY_EXEC_DESC; | ||
| 1501 | } | ||
| 1502 | |||
| 1497 | static inline bool report_flexpriority(void) | 1503 | static inline bool report_flexpriority(void) |
| 1498 | { | 1504 | { |
| 1499 | return flexpriority_enabled; | 1505 | return flexpriority_enabled; |
| @@ -3523,7 +3529,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 3523 | return kvm_get_msr_common(vcpu, msr_info); | 3529 | return kvm_get_msr_common(vcpu, msr_info); |
| 3524 | case MSR_IA32_SPEC_CTRL: | 3530 | case MSR_IA32_SPEC_CTRL: |
| 3525 | if (!msr_info->host_initiated && | 3531 | if (!msr_info->host_initiated && |
| 3526 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | ||
| 3527 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) | 3532 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
| 3528 | return 1; | 3533 | return 1; |
| 3529 | 3534 | ||
| @@ -3642,12 +3647,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 3642 | break; | 3647 | break; |
| 3643 | case MSR_IA32_SPEC_CTRL: | 3648 | case MSR_IA32_SPEC_CTRL: |
| 3644 | if (!msr_info->host_initiated && | 3649 | if (!msr_info->host_initiated && |
| 3645 | !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && | ||
| 3646 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) | 3650 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
| 3647 | return 1; | 3651 | return 1; |
| 3648 | 3652 | ||
| 3649 | /* The STIBP bit doesn't fault even if it's not advertised */ | 3653 | /* The STIBP bit doesn't fault even if it's not advertised */ |
| 3650 | if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) | 3654 | if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) |
| 3651 | return 1; | 3655 | return 1; |
| 3652 | 3656 | ||
| 3653 | vmx->spec_ctrl = data; | 3657 | vmx->spec_ctrl = data; |
| @@ -3673,7 +3677,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 3673 | break; | 3677 | break; |
| 3674 | case MSR_IA32_PRED_CMD: | 3678 | case MSR_IA32_PRED_CMD: |
| 3675 | if (!msr_info->host_initiated && | 3679 | if (!msr_info->host_initiated && |
| 3676 | !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) && | ||
| 3677 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) | 3680 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) |
| 3678 | return 1; | 3681 | return 1; |
| 3679 | 3682 | ||
| @@ -4761,14 +4764,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
| 4761 | else | 4764 | else |
| 4762 | hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; | 4765 | hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; |
| 4763 | 4766 | ||
| 4764 | if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { | 4767 | if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { |
| 4765 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, | 4768 | if (cr4 & X86_CR4_UMIP) { |
| 4766 | SECONDARY_EXEC_DESC); | 4769 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, |
| 4767 | hw_cr4 &= ~X86_CR4_UMIP; | ||
| 4768 | } else if (!is_guest_mode(vcpu) || | ||
| 4769 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
| 4770 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | ||
| 4771 | SECONDARY_EXEC_DESC); | 4770 | SECONDARY_EXEC_DESC); |
| 4771 | hw_cr4 &= ~X86_CR4_UMIP; | ||
| 4772 | } else if (!is_guest_mode(vcpu) || | ||
| 4773 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
| 4774 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | ||
| 4775 | SECONDARY_EXEC_DESC); | ||
| 4776 | } | ||
| 4772 | 4777 | ||
| 4773 | if (cr4 & X86_CR4_VMXE) { | 4778 | if (cr4 & X86_CR4_VMXE) { |
| 4774 | /* | 4779 | /* |
| @@ -9480,9 +9485,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) | |||
| 9480 | } | 9485 | } |
| 9481 | STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); | 9486 | STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); |
| 9482 | 9487 | ||
| 9483 | static bool vmx_has_high_real_mode_segbase(void) | 9488 | static bool vmx_has_emulated_msr(int index) |
| 9484 | { | 9489 | { |
| 9485 | return enable_unrestricted_guest || emulate_invalid_guest_state; | 9490 | switch (index) { |
| 9491 | case MSR_IA32_SMBASE: | ||
| 9492 | /* | ||
| 9493 | * We cannot do SMM unless we can run the guest in big | ||
| 9494 | * real mode. | ||
| 9495 | */ | ||
| 9496 | return enable_unrestricted_guest || emulate_invalid_guest_state; | ||
| 9497 | case MSR_AMD64_VIRT_SPEC_CTRL: | ||
| 9498 | /* This is AMD only. */ | ||
| 9499 | return false; | ||
| 9500 | default: | ||
| 9501 | return true; | ||
| 9502 | } | ||
| 9486 | } | 9503 | } |
| 9487 | 9504 | ||
| 9488 | static bool vmx_mpx_supported(void) | 9505 | static bool vmx_mpx_supported(void) |
| @@ -9497,12 +9514,6 @@ static bool vmx_xsaves_supported(void) | |||
| 9497 | SECONDARY_EXEC_XSAVES; | 9514 | SECONDARY_EXEC_XSAVES; |
| 9498 | } | 9515 | } |
| 9499 | 9516 | ||
| 9500 | static bool vmx_umip_emulated(void) | ||
| 9501 | { | ||
| 9502 | return vmcs_config.cpu_based_2nd_exec_ctrl & | ||
| 9503 | SECONDARY_EXEC_DESC; | ||
| 9504 | } | ||
| 9505 | |||
| 9506 | static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) | 9517 | static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) |
| 9507 | { | 9518 | { |
| 9508 | u32 exit_intr_info; | 9519 | u32 exit_intr_info; |
| @@ -9720,8 +9731,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 9720 | * is no need to worry about the conditional branch over the wrmsr | 9731 | * is no need to worry about the conditional branch over the wrmsr |
| 9721 | * being speculatively taken. | 9732 | * being speculatively taken. |
| 9722 | */ | 9733 | */ |
| 9723 | if (vmx->spec_ctrl) | 9734 | x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); |
| 9724 | native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); | ||
| 9725 | 9735 | ||
| 9726 | vmx->__launched = vmx->loaded_vmcs->launched; | 9736 | vmx->__launched = vmx->loaded_vmcs->launched; |
| 9727 | 9737 | ||
| @@ -9869,8 +9879,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 9869 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) | 9879 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
| 9870 | vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); | 9880 | vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
| 9871 | 9881 | ||
| 9872 | if (vmx->spec_ctrl) | 9882 | x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); |
| 9873 | native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); | ||
| 9874 | 9883 | ||
| 9875 | /* Eliminate branch target predictions from guest mode */ | 9884 | /* Eliminate branch target predictions from guest mode */ |
| 9876 | vmexit_fill_RSB(); | 9885 | vmexit_fill_RSB(); |
| @@ -12630,7 +12639,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 12630 | .hardware_enable = hardware_enable, | 12639 | .hardware_enable = hardware_enable, |
| 12631 | .hardware_disable = hardware_disable, | 12640 | .hardware_disable = hardware_disable, |
| 12632 | .cpu_has_accelerated_tpr = report_flexpriority, | 12641 | .cpu_has_accelerated_tpr = report_flexpriority, |
| 12633 | .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, | 12642 | .has_emulated_msr = vmx_has_emulated_msr, |
| 12634 | 12643 | ||
| 12635 | .vm_init = vmx_vm_init, | 12644 | .vm_init = vmx_vm_init, |
| 12636 | .vm_alloc = vmx_vm_alloc, | 12645 | .vm_alloc = vmx_vm_alloc, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 51ecd381793b..71e7cda6d014 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); | |||
| 114 | static bool __read_mostly report_ignored_msrs = true; | 114 | static bool __read_mostly report_ignored_msrs = true; |
| 115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); | 115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); |
| 116 | 116 | ||
| 117 | unsigned int min_timer_period_us = 500; | 117 | unsigned int min_timer_period_us = 200; |
| 118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); | 118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); |
| 119 | 119 | ||
| 120 | static bool __read_mostly kvmclock_periodic_sync = true; | 120 | static bool __read_mostly kvmclock_periodic_sync = true; |
| @@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); | |||
| 843 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 843 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
| 844 | { | 844 | { |
| 845 | #ifdef CONFIG_X86_64 | 845 | #ifdef CONFIG_X86_64 |
| 846 | cr3 &= ~CR3_PCID_INVD; | 846 | bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); |
| 847 | |||
| 848 | if (pcid_enabled) | ||
| 849 | cr3 &= ~CR3_PCID_INVD; | ||
| 847 | #endif | 850 | #endif |
| 848 | 851 | ||
| 849 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { | 852 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { |
| @@ -1058,6 +1061,7 @@ static u32 emulated_msrs[] = { | |||
| 1058 | MSR_SMI_COUNT, | 1061 | MSR_SMI_COUNT, |
| 1059 | MSR_PLATFORM_INFO, | 1062 | MSR_PLATFORM_INFO, |
| 1060 | MSR_MISC_FEATURES_ENABLES, | 1063 | MSR_MISC_FEATURES_ENABLES, |
| 1064 | MSR_AMD64_VIRT_SPEC_CTRL, | ||
| 1061 | }; | 1065 | }; |
| 1062 | 1066 | ||
| 1063 | static unsigned num_emulated_msrs; | 1067 | static unsigned num_emulated_msrs; |
| @@ -2903,7 +2907,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 2903 | * fringe case that is not enabled except via specific settings | 2907 | * fringe case that is not enabled except via specific settings |
| 2904 | * of the module parameters. | 2908 | * of the module parameters. |
| 2905 | */ | 2909 | */ |
| 2906 | r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); | 2910 | r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); |
| 2907 | break; | 2911 | break; |
| 2908 | case KVM_CAP_VAPIC: | 2912 | case KVM_CAP_VAPIC: |
| 2909 | r = !kvm_x86_ops->cpu_has_accelerated_tpr(); | 2913 | r = !kvm_x86_ops->cpu_has_accelerated_tpr(); |
| @@ -4603,14 +4607,8 @@ static void kvm_init_msr_list(void) | |||
| 4603 | num_msrs_to_save = j; | 4607 | num_msrs_to_save = j; |
| 4604 | 4608 | ||
| 4605 | for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { | 4609 | for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { |
| 4606 | switch (emulated_msrs[i]) { | 4610 | if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) |
| 4607 | case MSR_IA32_SMBASE: | 4611 | continue; |
| 4608 | if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) | ||
| 4609 | continue; | ||
| 4610 | break; | ||
| 4611 | default: | ||
| 4612 | break; | ||
| 4613 | } | ||
| 4614 | 4612 | ||
| 4615 | if (j < i) | 4613 | if (j < i) |
| 4616 | emulated_msrs[j] = emulated_msrs[i]; | 4614 | emulated_msrs[j] = emulated_msrs[i]; |
| @@ -6671,9 +6669,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) | |||
| 6671 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 6669 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
| 6672 | { | 6670 | { |
| 6673 | unsigned long nr, a0, a1, a2, a3, ret; | 6671 | unsigned long nr, a0, a1, a2, a3, ret; |
| 6674 | int op_64_bit, r; | 6672 | int op_64_bit; |
| 6675 | |||
| 6676 | r = kvm_skip_emulated_instruction(vcpu); | ||
| 6677 | 6673 | ||
| 6678 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 6674 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
| 6679 | return kvm_hv_hypercall(vcpu); | 6675 | return kvm_hv_hypercall(vcpu); |
| @@ -6721,8 +6717,9 @@ out: | |||
| 6721 | if (!op_64_bit) | 6717 | if (!op_64_bit) |
| 6722 | ret = (u32)ret; | 6718 | ret = (u32)ret; |
| 6723 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | 6719 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); |
| 6720 | |||
| 6724 | ++vcpu->stat.hypercalls; | 6721 | ++vcpu->stat.hypercalls; |
| 6725 | return r; | 6722 | return kvm_skip_emulated_instruction(vcpu); |
| 6726 | } | 6723 | } |
| 6727 | EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); | 6724 | EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); |
| 6728 | 6725 | ||
| @@ -7979,6 +7976,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
| 7979 | { | 7976 | { |
| 7980 | struct msr_data apic_base_msr; | 7977 | struct msr_data apic_base_msr; |
| 7981 | int mmu_reset_needed = 0; | 7978 | int mmu_reset_needed = 0; |
| 7979 | int cpuid_update_needed = 0; | ||
| 7982 | int pending_vec, max_bits, idx; | 7980 | int pending_vec, max_bits, idx; |
| 7983 | struct desc_ptr dt; | 7981 | struct desc_ptr dt; |
| 7984 | int ret = -EINVAL; | 7982 | int ret = -EINVAL; |
| @@ -8017,8 +8015,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
| 8017 | vcpu->arch.cr0 = sregs->cr0; | 8015 | vcpu->arch.cr0 = sregs->cr0; |
| 8018 | 8016 | ||
| 8019 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; | 8017 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; |
| 8018 | cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & | ||
| 8019 | (X86_CR4_OSXSAVE | X86_CR4_PKE)); | ||
| 8020 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 8020 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
| 8021 | if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) | 8021 | if (cpuid_update_needed) |
| 8022 | kvm_update_cpuid(vcpu); | 8022 | kvm_update_cpuid(vcpu); |
| 8023 | 8023 | ||
| 8024 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 8024 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index d7bc0eea20a5..6e98e0a7c923 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c | |||
| @@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey | |||
| 94 | */ | 94 | */ |
| 95 | if (pkey != -1) | 95 | if (pkey != -1) |
| 96 | return pkey; | 96 | return pkey; |
| 97 | /* | 97 | |
| 98 | * Look for a protection-key-drive execute-only mapping | ||
| 99 | * which is now being given permissions that are not | ||
| 100 | * execute-only. Move it back to the default pkey. | ||
| 101 | */ | ||
| 102 | if (vma_is_pkey_exec_only(vma) && | ||
| 103 | (prot & (PROT_READ|PROT_WRITE))) { | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | /* | 98 | /* |
| 107 | * The mapping is execute-only. Go try to get the | 99 | * The mapping is execute-only. Go try to get the |
| 108 | * execute-only protection key. If we fail to do that, | 100 | * execute-only protection key. If we fail to do that, |
| 109 | * fall through as if we do not have execute-only | 101 | * fall through as if we do not have execute-only |
| 110 | * support. | 102 | * support in this mm. |
| 111 | */ | 103 | */ |
| 112 | if (prot == PROT_EXEC) { | 104 | if (prot == PROT_EXEC) { |
| 113 | pkey = execute_only_pkey(vma->vm_mm); | 105 | pkey = execute_only_pkey(vma->vm_mm); |
| 114 | if (pkey > 0) | 106 | if (pkey > 0) |
| 115 | return pkey; | 107 | return pkey; |
| 108 | } else if (vma_is_pkey_exec_only(vma)) { | ||
| 109 | /* | ||
| 110 | * Protections are *not* PROT_EXEC, but the mapping | ||
| 111 | * is using the exec-only pkey. This mapping was | ||
| 112 | * PROT_EXEC and will no longer be. Move back to | ||
| 113 | * the default pkey. | ||
| 114 | */ | ||
| 115 | return ARCH_DEFAULT_PKEY; | ||
| 116 | } | 116 | } |
| 117 | |||
| 117 | /* | 118 | /* |
| 118 | * This is a vanilla, non-pkey mprotect (or we failed to | 119 | * This is a vanilla, non-pkey mprotect (or we failed to |
| 119 | * setup execute-only), inherit the pkey from the VMA we | 120 | * setup execute-only), inherit the pkey from the VMA we |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d33e7dbe3129..2d76106788a3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
| 42 | } | 42 | } |
| 43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); | 43 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
| 44 | 44 | ||
| 45 | static void xen_flush_tlb_all(void) | 45 | static noinline void xen_flush_tlb_all(void) |
| 46 | { | 46 | { |
| 47 | struct mmuext_op *op; | 47 | struct mmuext_op *op; |
| 48 | struct multicall_space mcs; | 48 | struct multicall_space mcs; |
| 49 | 49 | ||
| 50 | trace_xen_mmu_flush_tlb_all(0); | ||
| 51 | |||
| 52 | preempt_disable(); | 50 | preempt_disable(); |
| 53 | 51 | ||
| 54 | mcs = xen_mc_entry(sizeof(*op)); | 52 | mcs = xen_mc_entry(sizeof(*op)); |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 486c0a34d00b..2c30cabfda90 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
| @@ -1310,13 +1310,11 @@ unsigned long xen_read_cr2_direct(void) | |||
| 1310 | return this_cpu_read(xen_vcpu_info.arch.cr2); | 1310 | return this_cpu_read(xen_vcpu_info.arch.cr2); |
| 1311 | } | 1311 | } |
| 1312 | 1312 | ||
| 1313 | static void xen_flush_tlb(void) | 1313 | static noinline void xen_flush_tlb(void) |
| 1314 | { | 1314 | { |
| 1315 | struct mmuext_op *op; | 1315 | struct mmuext_op *op; |
| 1316 | struct multicall_space mcs; | 1316 | struct multicall_space mcs; |
| 1317 | 1317 | ||
| 1318 | trace_xen_mmu_flush_tlb(0); | ||
| 1319 | |||
| 1320 | preempt_disable(); | 1318 | preempt_disable(); |
| 1321 | 1319 | ||
| 1322 | mcs = xen_mc_entry(sizeof(*op)); | 1320 | mcs = xen_mc_entry(sizeof(*op)); |
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 514aaf948ea9..3825df923480 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h | |||
| @@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void); | |||
| 56 | 56 | ||
| 57 | acpi_status acpi_ns_initialize_devices(u32 flags); | 57 | acpi_status acpi_ns_initialize_devices(u32 flags); |
| 58 | 58 | ||
| 59 | acpi_status | ||
| 60 | acpi_ns_init_one_package(acpi_handle obj_handle, | ||
| 61 | u32 level, void *context, void **return_value); | ||
| 62 | |||
| 59 | /* | 63 | /* |
| 60 | * nsload - Namespace loading | 64 | * nsload - Namespace loading |
| 61 | */ | 65 | */ |
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 99d92cb32803..f85c6f3271f6 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
| @@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, | |||
| 174 | return_ACPI_STATUS(status); | 174 | return_ACPI_STATUS(status); |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | /* Complete the initialization/resolution of package objects */ | ||
| 178 | |||
| 179 | status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, | ||
| 180 | ACPI_UINT32_MAX, 0, | ||
| 181 | acpi_ns_init_one_package, NULL, NULL, | ||
| 182 | NULL); | ||
| 183 | |||
| 177 | /* Parameter Data (optional) */ | 184 | /* Parameter Data (optional) */ |
| 178 | 185 | ||
| 179 | if (parameter_node) { | 186 | if (parameter_node) { |
| @@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
| 430 | return_ACPI_STATUS(status); | 437 | return_ACPI_STATUS(status); |
| 431 | } | 438 | } |
| 432 | 439 | ||
| 440 | /* Complete the initialization/resolution of package objects */ | ||
| 441 | |||
| 442 | status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT, | ||
| 443 | ACPI_UINT32_MAX, 0, | ||
| 444 | acpi_ns_init_one_package, NULL, NULL, | ||
| 445 | NULL); | ||
| 446 | |||
| 433 | /* Store the ddb_handle into the Target operand */ | 447 | /* Store the ddb_handle into the Target operand */ |
| 434 | 448 | ||
| 435 | status = acpi_ex_store(ddb_handle, target, walk_state); | 449 | status = acpi_ex_store(ddb_handle, target, walk_state); |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 77f2b5f4948a..d77257d1c827 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
| @@ -242,6 +242,58 @@ error_exit: | |||
| 242 | 242 | ||
| 243 | /******************************************************************************* | 243 | /******************************************************************************* |
| 244 | * | 244 | * |
| 245 | * FUNCTION: acpi_ns_init_one_package | ||
| 246 | * | ||
| 247 | * PARAMETERS: obj_handle - Node | ||
| 248 | * level - Current nesting level | ||
| 249 | * context - Not used | ||
| 250 | * return_value - Not used | ||
| 251 | * | ||
| 252 | * RETURN: Status | ||
| 253 | * | ||
| 254 | * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package | ||
| 255 | * within the namespace. Used during dynamic load of an SSDT. | ||
| 256 | * | ||
| 257 | ******************************************************************************/ | ||
| 258 | |||
| 259 | acpi_status | ||
| 260 | acpi_ns_init_one_package(acpi_handle obj_handle, | ||
| 261 | u32 level, void *context, void **return_value) | ||
| 262 | { | ||
| 263 | acpi_status status; | ||
| 264 | union acpi_operand_object *obj_desc; | ||
| 265 | struct acpi_namespace_node *node = | ||
| 266 | (struct acpi_namespace_node *)obj_handle; | ||
| 267 | |||
| 268 | obj_desc = acpi_ns_get_attached_object(node); | ||
| 269 | if (!obj_desc) { | ||
| 270 | return (AE_OK); | ||
| 271 | } | ||
| 272 | |||
| 273 | /* Exit if package is already initialized */ | ||
| 274 | |||
| 275 | if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { | ||
| 276 | return (AE_OK); | ||
| 277 | } | ||
| 278 | |||
| 279 | status = acpi_ds_get_package_arguments(obj_desc); | ||
| 280 | if (ACPI_FAILURE(status)) { | ||
| 281 | return (AE_OK); | ||
| 282 | } | ||
| 283 | |||
| 284 | status = | ||
| 285 | acpi_ut_walk_package_tree(obj_desc, NULL, | ||
| 286 | acpi_ds_init_package_element, NULL); | ||
| 287 | if (ACPI_FAILURE(status)) { | ||
| 288 | return (AE_OK); | ||
| 289 | } | ||
| 290 | |||
| 291 | obj_desc->package.flags |= AOPOBJ_DATA_VALID; | ||
| 292 | return (AE_OK); | ||
| 293 | } | ||
| 294 | |||
| 295 | /******************************************************************************* | ||
| 296 | * | ||
| 245 | * FUNCTION: acpi_ns_init_one_object | 297 | * FUNCTION: acpi_ns_init_one_object |
| 246 | * | 298 | * |
| 247 | * PARAMETERS: obj_handle - Node | 299 | * PARAMETERS: obj_handle - Node |
| @@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle, | |||
| 360 | 412 | ||
| 361 | case ACPI_TYPE_PACKAGE: | 413 | case ACPI_TYPE_PACKAGE: |
| 362 | 414 | ||
| 363 | info->package_init++; | 415 | /* Complete the initialization/resolution of the package object */ |
| 364 | status = acpi_ds_get_package_arguments(obj_desc); | ||
| 365 | if (ACPI_FAILURE(status)) { | ||
| 366 | break; | ||
| 367 | } | ||
| 368 | |||
| 369 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, | ||
| 370 | "%s: Completing resolution of Package elements\n", | ||
| 371 | ACPI_GET_FUNCTION_NAME)); | ||
| 372 | 416 | ||
| 373 | /* | 417 | info->package_init++; |
| 374 | * Resolve all named references in package objects (and all | 418 | status = |
| 375 | * sub-packages). This action has been deferred until the entire | 419 | acpi_ns_init_one_package(obj_handle, level, NULL, NULL); |
| 376 | * namespace has been loaded, in order to support external and | ||
| 377 | * forward references from individual package elements (05/2017). | ||
| 378 | */ | ||
| 379 | status = acpi_ut_walk_package_tree(obj_desc, NULL, | ||
| 380 | acpi_ds_init_package_element, | ||
| 381 | NULL); | ||
| 382 | |||
| 383 | obj_desc->package.flags |= AOPOBJ_DATA_VALID; | ||
| 384 | break; | 420 | break; |
| 385 | 421 | ||
| 386 | default: | 422 | default: |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 6389c88b3500..738fb22978dd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 334 | { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ | 334 | { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ |
| 335 | { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ | 335 | { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ |
| 336 | { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ | 336 | { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ |
| 337 | { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */ | ||
| 337 | { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ | 338 | { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ |
| 338 | { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ | 339 | { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ |
| 339 | { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ | 340 | { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 68596bd4cf06..346b163f6e89 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4493 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ | 4493 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ |
| 4494 | { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, | 4494 | { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, |
| 4495 | 4495 | ||
| 4496 | /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on | ||
| 4497 | SD7SN6S256G and SD8SN8U256G */ | ||
| 4498 | { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, | ||
| 4499 | |||
| 4496 | /* devices which puke on READ_NATIVE_MAX */ | 4500 | /* devices which puke on READ_NATIVE_MAX */ |
| 4497 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, | 4501 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, |
| 4498 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, | 4502 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, |
| @@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4549 | ATA_HORKAGE_ZERO_AFTER_TRIM | | 4553 | ATA_HORKAGE_ZERO_AFTER_TRIM | |
| 4550 | ATA_HORKAGE_NOLPM, }, | 4554 | ATA_HORKAGE_NOLPM, }, |
| 4551 | 4555 | ||
| 4552 | /* This specific Samsung model/firmware-rev does not handle LPM well */ | 4556 | /* These specific Samsung models/firmware-revs do not handle LPM well */ |
| 4553 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, | 4557 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, |
| 4558 | { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, | ||
| 4554 | 4559 | ||
| 4555 | /* Sandisk devices which are known to not handle LPM well */ | 4560 | /* Sandisk devices which are known to not handle LPM well */ |
| 4556 | { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, | 4561 | { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, |
| 4557 | 4562 | ||
| 4558 | /* devices that don't properly handle queued TRIM commands */ | 4563 | /* devices that don't properly handle queued TRIM commands */ |
| 4564 | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
| 4565 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4559 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4566 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
| 4560 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4567 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4561 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4568 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2da998baa75c..30cc9c877ebb 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
| @@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev, | |||
| 534 | return sprintf(buf, "Not affected\n"); | 534 | return sprintf(buf, "Not affected\n"); |
| 535 | } | 535 | } |
| 536 | 536 | ||
| 537 | ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, | ||
| 538 | struct device_attribute *attr, char *buf) | ||
| 539 | { | ||
| 540 | return sprintf(buf, "Not affected\n"); | ||
| 541 | } | ||
| 542 | |||
| 537 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); | 543 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); |
| 538 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); | 544 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); |
| 539 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); | 545 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); |
| 546 | static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); | ||
| 540 | 547 | ||
| 541 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { | 548 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { |
| 542 | &dev_attr_meltdown.attr, | 549 | &dev_attr_meltdown.attr, |
| 543 | &dev_attr_spectre_v1.attr, | 550 | &dev_attr_spectre_v1.attr, |
| 544 | &dev_attr_spectre_v2.attr, | 551 | &dev_attr_spectre_v2.attr, |
| 552 | &dev_attr_spec_store_bypass.attr, | ||
| 545 | NULL | 553 | NULL |
| 546 | }; | 554 | }; |
| 547 | 555 | ||
diff --git a/drivers/base/node.c b/drivers/base/node.c index 7a3a580821e0..a5e821d09656 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, | |||
| 490 | return 0; | 490 | return 0; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | 493 | int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages, |
| 494 | bool check_nid) | ||
| 494 | { | 495 | { |
| 495 | unsigned long end_pfn = start_pfn + nr_pages; | 496 | unsigned long end_pfn = start_pfn + nr_pages; |
| 496 | unsigned long pfn; | 497 | unsigned long pfn; |
| @@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | |||
| 514 | 515 | ||
| 515 | mem_blk = find_memory_block_hinted(mem_sect, mem_blk); | 516 | mem_blk = find_memory_block_hinted(mem_sect, mem_blk); |
| 516 | 517 | ||
| 517 | ret = register_mem_sect_under_node(mem_blk, nid, true); | 518 | ret = register_mem_sect_under_node(mem_blk, nid, check_nid); |
| 518 | if (!err) | 519 | if (!err) |
| 519 | err = ret; | 520 | err = ret; |
| 520 | 521 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 02a497e7c785..e5e067091572 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 1923 | 1923 | ||
| 1924 | dev->power.wakeup_path = false; | 1924 | dev->power.wakeup_path = false; |
| 1925 | 1925 | ||
| 1926 | if (dev->power.no_pm_callbacks) { | 1926 | if (dev->power.no_pm_callbacks) |
| 1927 | ret = 1; /* Let device go direct_complete */ | ||
| 1928 | goto unlock; | 1927 | goto unlock; |
| 1929 | } | ||
| 1930 | 1928 | ||
| 1931 | if (dev->pm_domain) | 1929 | if (dev->pm_domain) |
| 1932 | callback = dev->pm_domain->ops.prepare; | 1930 | callback = dev->pm_domain->ops.prepare; |
| @@ -1960,7 +1958,8 @@ unlock: | |||
| 1960 | */ | 1958 | */ |
| 1961 | spin_lock_irq(&dev->power.lock); | 1959 | spin_lock_irq(&dev->power.lock); |
| 1962 | dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && | 1960 | dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && |
| 1963 | pm_runtime_suspended(dev) && ret > 0 && | 1961 | ((pm_runtime_suspended(dev) && ret > 0) || |
| 1962 | dev->power.no_pm_callbacks) && | ||
| 1964 | !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); | 1963 | !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); |
| 1965 | spin_unlock_irq(&dev->power.lock); | 1964 | spin_unlock_irq(&dev->power.lock); |
| 1966 | return 0; | 1965 | return 0; |
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index f040aba48d50..27e9686b6d3a 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c | |||
| @@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq) | |||
| 184 | { | 184 | { |
| 185 | int i; | 185 | int i; |
| 186 | static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; | 186 | static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; |
| 187 | char interrupts[20]; | 187 | char interrupts[25]; |
| 188 | char *ints = interrupts; | 188 | char *ints = interrupts; |
| 189 | 189 | ||
| 190 | for (i = 0; i < ARRAY_SIZE(irq_name); i++) | 190 | for (i = 0; i < ARRAY_SIZE(irq_name); i++) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5d4e31655d96..55cf554bc914 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -1068,6 +1068,7 @@ static int loop_clr_fd(struct loop_device *lo) | |||
| 1068 | if (bdev) { | 1068 | if (bdev) { |
| 1069 | bdput(bdev); | 1069 | bdput(bdev); |
| 1070 | invalidate_bdev(bdev); | 1070 | invalidate_bdev(bdev); |
| 1071 | bdev->bd_inode->i_mapping->wb_err = 0; | ||
| 1071 | } | 1072 | } |
| 1072 | set_capacity(lo->lo_disk, 0); | 1073 | set_capacity(lo->lo_disk, 0); |
| 1073 | loop_sysfs_exit(lo); | 1074 | loop_sysfs_exit(lo); |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 41492e980ef4..34968a381d0f 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -266,15 +266,13 @@ config COMMON_CLK_STM32MP157 | |||
| 266 | Support for stm32mp157 SoC family clocks | 266 | Support for stm32mp157 SoC family clocks |
| 267 | 267 | ||
| 268 | config COMMON_CLK_STM32F | 268 | config COMMON_CLK_STM32F |
| 269 | bool "Clock driver for stm32f4 and stm32f7 SoC families" | 269 | def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746) |
| 270 | depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746 | ||
| 271 | help | 270 | help |
| 272 | ---help--- | 271 | ---help--- |
| 273 | Support for stm32f4 and stm32f7 SoC families clocks | 272 | Support for stm32f4 and stm32f7 SoC families clocks |
| 274 | 273 | ||
| 275 | config COMMON_CLK_STM32H7 | 274 | config COMMON_CLK_STM32H7 |
| 276 | bool "Clock driver for stm32h7 SoC family" | 275 | def_bool COMMON_CLK && MACH_STM32H743 |
| 277 | depends on MACH_STM32H743 | ||
| 278 | help | 276 | help |
| 279 | ---help--- | 277 | ---help--- |
| 280 | Support for stm32h7 SoC family clocks | 278 | Support for stm32h7 SoC family clocks |
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 114ecbb94ec5..12320118f8de 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c | |||
| @@ -464,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) | |||
| 464 | clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); | 464 | clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); |
| 465 | 465 | ||
| 466 | /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ | 466 | /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ |
| 467 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); | 467 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]); |
| 468 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); | 468 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); |
| 469 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); | 469 | clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); |
| 470 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); | 470 | clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index de55c7d57438..96b35b8b3606 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ | |||
| 20 | 20 | ||
| 21 | config ARM_ARMADA_37XX_CPUFREQ | 21 | config ARM_ARMADA_37XX_CPUFREQ |
| 22 | tristate "Armada 37xx CPUFreq support" | 22 | tristate "Armada 37xx CPUFreq support" |
| 23 | depends on ARCH_MVEBU | 23 | depends on ARCH_MVEBU && CPUFREQ_DT |
| 24 | help | 24 | help |
| 25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. | 25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. |
| 26 | The Armada 37xx PMU supports 4 frequency and VDD levels. | 26 | The Armada 37xx PMU supports 4 frequency and VDD levels. |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d29275b97e84..4a828c18099a 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
| @@ -524,6 +524,14 @@ static int bam_alloc_chan(struct dma_chan *chan) | |||
| 524 | return 0; | 524 | return 0; |
| 525 | } | 525 | } |
| 526 | 526 | ||
| 527 | static int bam_pm_runtime_get_sync(struct device *dev) | ||
| 528 | { | ||
| 529 | if (pm_runtime_enabled(dev)) | ||
| 530 | return pm_runtime_get_sync(dev); | ||
| 531 | |||
| 532 | return 0; | ||
| 533 | } | ||
| 534 | |||
| 527 | /** | 535 | /** |
| 528 | * bam_free_chan - Frees dma resources associated with specific channel | 536 | * bam_free_chan - Frees dma resources associated with specific channel |
| 529 | * @chan: specified channel | 537 | * @chan: specified channel |
| @@ -539,7 +547,7 @@ static void bam_free_chan(struct dma_chan *chan) | |||
| 539 | unsigned long flags; | 547 | unsigned long flags; |
| 540 | int ret; | 548 | int ret; |
| 541 | 549 | ||
| 542 | ret = pm_runtime_get_sync(bdev->dev); | 550 | ret = bam_pm_runtime_get_sync(bdev->dev); |
| 543 | if (ret < 0) | 551 | if (ret < 0) |
| 544 | return; | 552 | return; |
| 545 | 553 | ||
| @@ -720,7 +728,7 @@ static int bam_pause(struct dma_chan *chan) | |||
| 720 | unsigned long flag; | 728 | unsigned long flag; |
| 721 | int ret; | 729 | int ret; |
| 722 | 730 | ||
| 723 | ret = pm_runtime_get_sync(bdev->dev); | 731 | ret = bam_pm_runtime_get_sync(bdev->dev); |
| 724 | if (ret < 0) | 732 | if (ret < 0) |
| 725 | return ret; | 733 | return ret; |
| 726 | 734 | ||
| @@ -746,7 +754,7 @@ static int bam_resume(struct dma_chan *chan) | |||
| 746 | unsigned long flag; | 754 | unsigned long flag; |
| 747 | int ret; | 755 | int ret; |
| 748 | 756 | ||
| 749 | ret = pm_runtime_get_sync(bdev->dev); | 757 | ret = bam_pm_runtime_get_sync(bdev->dev); |
| 750 | if (ret < 0) | 758 | if (ret < 0) |
| 751 | return ret; | 759 | return ret; |
| 752 | 760 | ||
| @@ -852,7 +860,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data) | |||
| 852 | if (srcs & P_IRQ) | 860 | if (srcs & P_IRQ) |
| 853 | tasklet_schedule(&bdev->task); | 861 | tasklet_schedule(&bdev->task); |
| 854 | 862 | ||
| 855 | ret = pm_runtime_get_sync(bdev->dev); | 863 | ret = bam_pm_runtime_get_sync(bdev->dev); |
| 856 | if (ret < 0) | 864 | if (ret < 0) |
| 857 | return ret; | 865 | return ret; |
| 858 | 866 | ||
| @@ -969,7 +977,7 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
| 969 | if (!vd) | 977 | if (!vd) |
| 970 | return; | 978 | return; |
| 971 | 979 | ||
| 972 | ret = pm_runtime_get_sync(bdev->dev); | 980 | ret = bam_pm_runtime_get_sync(bdev->dev); |
| 973 | if (ret < 0) | 981 | if (ret < 0) |
| 974 | return; | 982 | return; |
| 975 | 983 | ||
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 14b147135a0c..2455be8cbc4f 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c | |||
| @@ -778,6 +778,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, | |||
| 778 | if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) { | 778 | if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) { |
| 779 | dev_err(&sdev->dev, "failed to setup transport\n"); | 779 | dev_err(&sdev->dev, "failed to setup transport\n"); |
| 780 | scmi_device_destroy(sdev); | 780 | scmi_device_destroy(sdev); |
| 781 | return; | ||
| 781 | } | 782 | } |
| 782 | 783 | ||
| 783 | /* setup handle now as the transport is ready */ | 784 | /* setup handle now as the transport is ready */ |
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index b9bd827caa22..1b4d465cc5d9 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c | |||
| @@ -98,6 +98,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, | |||
| 98 | (phys_seed >> 32) & mask : TEXT_OFFSET; | 98 | (phys_seed >> 32) & mask : TEXT_OFFSET; |
| 99 | 99 | ||
| 100 | /* | 100 | /* |
| 101 | * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not | ||
| 102 | * be a multiple of EFI_KIMG_ALIGN, and we must ensure that | ||
| 103 | * we preserve the misalignment of 'offset' relative to | ||
| 104 | * EFI_KIMG_ALIGN so that statically allocated objects whose | ||
| 105 | * alignment exceeds PAGE_SIZE appear correctly aligned in | ||
| 106 | * memory. | ||
| 107 | */ | ||
| 108 | offset |= TEXT_OFFSET % EFI_KIMG_ALIGN; | ||
| 109 | |||
| 110 | /* | ||
| 101 | * If KASLR is enabled, and we have some randomness available, | 111 | * If KASLR is enabled, and we have some randomness available, |
| 102 | * locate the kernel at a randomized offset in physical memory. | 112 | * locate the kernel at a randomized offset in physical memory. |
| 103 | */ | 113 | */ |
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index dfbd894d5bb7..4e24e591ae74 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c | |||
| @@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr) | |||
| 147 | "smc #0 @ switch to secure world\n" | 147 | "smc #0 @ switch to secure world\n" |
| 148 | : "=r" (r0) | 148 | : "=r" (r0) |
| 149 | : "r" (r0), "r" (r1), "r" (r2) | 149 | : "r" (r0), "r" (r1), "r" (r2) |
| 150 | : "r3"); | 150 | : "r3", "r12"); |
| 151 | } while (r0 == QCOM_SCM_INTERRUPTED); | 151 | } while (r0 == QCOM_SCM_INTERRUPTED); |
| 152 | 152 | ||
| 153 | return r0; | 153 | return r0; |
| @@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) | |||
| 263 | "smc #0 @ switch to secure world\n" | 263 | "smc #0 @ switch to secure world\n" |
| 264 | : "=r" (r0) | 264 | : "=r" (r0) |
| 265 | : "r" (r0), "r" (r1), "r" (r2) | 265 | : "r" (r0), "r" (r1), "r" (r2) |
| 266 | : "r3"); | 266 | : "r3", "r12"); |
| 267 | return r0; | 267 | return r0; |
| 268 | } | 268 | } |
| 269 | 269 | ||
| @@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) | |||
| 298 | "smc #0 @ switch to secure world\n" | 298 | "smc #0 @ switch to secure world\n" |
| 299 | : "=r" (r0) | 299 | : "=r" (r0) |
| 300 | : "r" (r0), "r" (r1), "r" (r2), "r" (r3) | 300 | : "r" (r0), "r" (r1), "r" (r2), "r" (r3) |
| 301 | ); | 301 | : "r12"); |
| 302 | return r0; | 302 | return r0; |
| 303 | } | 303 | } |
| 304 | 304 | ||
| @@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void) | |||
| 328 | "smc #0 @ switch to secure world\n" | 328 | "smc #0 @ switch to secure world\n" |
| 329 | : "=r" (r0), "=r" (r1) | 329 | : "=r" (r0), "=r" (r1) |
| 330 | : "r" (r0), "r" (r1) | 330 | : "r" (r0), "r" (r1) |
| 331 | : "r2", "r3"); | 331 | : "r2", "r3", "r12"); |
| 332 | } while (r0 == QCOM_SCM_INTERRUPTED); | 332 | } while (r0 == QCOM_SCM_INTERRUPTED); |
| 333 | 333 | ||
| 334 | version = r1; | 334 | version = r1; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a1b9338736e3..c2c21d839727 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev) | |||
| 716 | if (!minor) | 716 | if (!minor) |
| 717 | return; | 717 | return; |
| 718 | 718 | ||
| 719 | name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); | 719 | name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); |
| 720 | if (!name) | 720 | if (!name) |
| 721 | return; | 721 | return; |
| 722 | 722 | ||
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 39ac15ce4702..9e2ae02f31e0 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c | |||
| @@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, | |||
| 65 | return -EINVAL; | 65 | return -EINVAL; |
| 66 | 66 | ||
| 67 | /* overflow checks for 32bit size calculations */ | 67 | /* overflow checks for 32bit size calculations */ |
| 68 | /* NOTE: DIV_ROUND_UP() can overflow */ | 68 | if (args->bpp > U32_MAX - 8) |
| 69 | return -EINVAL; | ||
| 69 | cpp = DIV_ROUND_UP(args->bpp, 8); | 70 | cpp = DIV_ROUND_UP(args->bpp, 8); |
| 70 | if (!cpp || cpp > 0xffffffffU / args->width) | 71 | if (cpp > U32_MAX / args->width) |
| 71 | return -EINVAL; | 72 | return -EINVAL; |
| 72 | stride = cpp * args->width; | 73 | stride = cpp * args->width; |
| 73 | if (args->height > 0xffffffffU / stride) | 74 | if (args->height > U32_MAX / stride) |
| 74 | return -EINVAL; | 75 | return -EINVAL; |
| 75 | 76 | ||
| 76 | /* test for wrap-around */ | 77 | /* test for wrap-around */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d596a8302ca3..854bd51b9478 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
| @@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, | |||
| 778 | I915_USERPTR_UNSYNCHRONIZED)) | 778 | I915_USERPTR_UNSYNCHRONIZED)) |
| 779 | return -EINVAL; | 779 | return -EINVAL; |
| 780 | 780 | ||
| 781 | if (!args->user_size) | ||
| 782 | return -EINVAL; | ||
| 783 | |||
| 781 | if (offset_in_page(args->user_ptr | args->user_size)) | 784 | if (offset_in_page(args->user_ptr | args->user_size)) |
| 782 | return -EINVAL; | 785 | return -EINVAL; |
| 783 | 786 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e6a8c0ee7df1..8a69a9275e28 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -7326,6 +7326,9 @@ enum { | |||
| 7326 | #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) | 7326 | #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) |
| 7327 | #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) | 7327 | #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) |
| 7328 | 7328 | ||
| 7329 | #define GEN9_WM_CHICKEN3 _MMIO(0x5588) | ||
| 7330 | #define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) | ||
| 7331 | |||
| 7329 | /* WaCatErrorRejectionIssue */ | 7332 | /* WaCatErrorRejectionIssue */ |
| 7330 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) | 7333 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) |
| 7331 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 7334 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 4ba139c27fba..f7c25828d3bb 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
| @@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
| 1149 | WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, | 1149 | WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, |
| 1150 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); | 1150 | GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); |
| 1151 | 1151 | ||
| 1152 | /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ | ||
| 1153 | if (IS_GEN9_LP(dev_priv)) | ||
| 1154 | WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); | ||
| 1155 | |||
| 1152 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ | 1156 | /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ |
| 1153 | ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); | 1157 | ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); |
| 1154 | if (ret) | 1158 | if (ret) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e3a5f673ff67..8704f7f8d072 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -884,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data) | |||
| 884 | 884 | ||
| 885 | head = execlists->csb_head; | 885 | head = execlists->csb_head; |
| 886 | tail = READ_ONCE(buf[write_idx]); | 886 | tail = READ_ONCE(buf[write_idx]); |
| 887 | rmb(); /* Hopefully paired with a wmb() in HW */ | ||
| 887 | } | 888 | } |
| 888 | GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", | 889 | GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", |
| 889 | engine->name, | 890 | engine->name, |
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 3d2d3bbd1342..155ad840f3c5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c | |||
| @@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector, | |||
| 88 | const struct drm_display_mode *panel_mode; | 88 | const struct drm_display_mode *panel_mode; |
| 89 | struct drm_crtc_state *crtc_state; | 89 | struct drm_crtc_state *crtc_state; |
| 90 | 90 | ||
| 91 | if (!state->crtc) | ||
| 92 | return 0; | ||
| 93 | |||
| 91 | if (list_empty(&connector->modes)) { | 94 | if (list_empty(&connector->modes)) { |
| 92 | dev_dbg(lvds->dev, "connector: empty modes list\n"); | 95 | dev_dbg(lvds->dev, "connector: empty modes list\n"); |
| 93 | return -EINVAL; | 96 | return -EINVAL; |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 94b99c90425a..7c95ed5c5cac 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) | |||
| 130 | struct vc4_file *vc4file = file->driver_priv; | 130 | struct vc4_file *vc4file = file->driver_priv; |
| 131 | 131 | ||
| 132 | vc4_perfmon_close_file(vc4file); | 132 | vc4_perfmon_close_file(vc4file); |
| 133 | kfree(vc4file); | ||
| 133 | } | 134 | } |
| 134 | 135 | ||
| 135 | static const struct vm_operations_struct vc4_vm_ops = { | 136 | static const struct vm_operations_struct vc4_vm_ops = { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 70e1a8820a7c..8b770a8e02cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 1278 | dev_priv->active_master = &dev_priv->fbdev_master; | 1278 | dev_priv->active_master = &dev_priv->fbdev_master; |
| 1279 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 1279 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
| 1280 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | 1280 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); |
| 1281 | |||
| 1282 | vmw_fb_refresh(dev_priv); | ||
| 1283 | } | 1281 | } |
| 1284 | 1282 | ||
| 1285 | /** | 1283 | /** |
| @@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev) | |||
| 1483 | vmw_kms_resume(dev); | 1481 | vmw_kms_resume(dev); |
| 1484 | if (dev_priv->enable_fb) | 1482 | if (dev_priv->enable_fb) |
| 1485 | vmw_fb_on(dev_priv); | 1483 | vmw_fb_on(dev_priv); |
| 1486 | vmw_fb_refresh(dev_priv); | ||
| 1487 | return -EBUSY; | 1484 | return -EBUSY; |
| 1488 | } | 1485 | } |
| 1489 | 1486 | ||
| @@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev) | |||
| 1523 | if (dev_priv->enable_fb) | 1520 | if (dev_priv->enable_fb) |
| 1524 | vmw_fb_on(dev_priv); | 1521 | vmw_fb_on(dev_priv); |
| 1525 | 1522 | ||
| 1526 | vmw_fb_refresh(dev_priv); | ||
| 1527 | |||
| 1528 | return 0; | 1523 | return 0; |
| 1529 | } | 1524 | } |
| 1530 | 1525 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index f34f368c1a2e..5fcbe1620d50 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv); | |||
| 910 | int vmw_fb_close(struct vmw_private *dev_priv); | 910 | int vmw_fb_close(struct vmw_private *dev_priv); |
| 911 | int vmw_fb_off(struct vmw_private *vmw_priv); | 911 | int vmw_fb_off(struct vmw_private *vmw_priv); |
| 912 | int vmw_fb_on(struct vmw_private *vmw_priv); | 912 | int vmw_fb_on(struct vmw_private *vmw_priv); |
| 913 | void vmw_fb_refresh(struct vmw_private *vmw_priv); | ||
| 914 | 913 | ||
| 915 | /** | 914 | /** |
| 916 | * Kernel modesetting - vmwgfx_kms.c | 915 | * Kernel modesetting - vmwgfx_kms.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index ba0cdb743c3e..54e300365a5c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv) | |||
| 866 | spin_lock_irqsave(&par->dirty.lock, flags); | 866 | spin_lock_irqsave(&par->dirty.lock, flags); |
| 867 | par->dirty.active = true; | 867 | par->dirty.active = true; |
| 868 | spin_unlock_irqrestore(&par->dirty.lock, flags); | 868 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
| 869 | |||
| 870 | return 0; | ||
| 871 | } | ||
| 872 | 869 | ||
| 873 | /** | 870 | /* |
| 874 | * vmw_fb_refresh - Refresh fb display | 871 | * Need to reschedule a dirty update, because otherwise that's |
| 875 | * | 872 | * only done in dirty_mark() if the previous coalesced |
| 876 | * @vmw_priv: Pointer to device private | 873 | * dirty region was empty. |
| 877 | * | 874 | */ |
| 878 | * Call into kms to show the fbdev display(s). | 875 | schedule_delayed_work(&par->local_work, 0); |
| 879 | */ | ||
| 880 | void vmw_fb_refresh(struct vmw_private *vmw_priv) | ||
| 881 | { | ||
| 882 | if (!vmw_priv->fb_info) | ||
| 883 | return; | ||
| 884 | 876 | ||
| 885 | vmw_fb_set_par(vmw_priv->fb_info); | 877 | return 0; |
| 886 | } | 878 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index cdff99211602..21d746bdc922 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param, | |||
| 329 | struct rpc_channel channel; | 329 | struct rpc_channel channel; |
| 330 | char *msg, *reply = NULL; | 330 | char *msg, *reply = NULL; |
| 331 | size_t reply_len = 0; | 331 | size_t reply_len = 0; |
| 332 | int ret = 0; | ||
| 333 | |||
| 334 | 332 | ||
| 335 | if (!vmw_msg_enabled) | 333 | if (!vmw_msg_enabled) |
| 336 | return -ENODEV; | 334 | return -ENODEV; |
| @@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param, | |||
| 344 | return -ENOMEM; | 342 | return -ENOMEM; |
| 345 | } | 343 | } |
| 346 | 344 | ||
| 347 | if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || | 345 | if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) |
| 348 | vmw_send_msg(&channel, msg) || | 346 | goto out_open; |
| 349 | vmw_recv_msg(&channel, (void *) &reply, &reply_len) || | ||
| 350 | vmw_close_channel(&channel)) { | ||
| 351 | DRM_ERROR("Failed to get %s", guest_info_param); | ||
| 352 | 347 | ||
| 353 | ret = -EINVAL; | 348 | if (vmw_send_msg(&channel, msg) || |
| 354 | } | 349 | vmw_recv_msg(&channel, (void *) &reply, &reply_len)) |
| 350 | goto out_msg; | ||
| 355 | 351 | ||
| 352 | vmw_close_channel(&channel); | ||
| 356 | if (buffer && reply && reply_len > 0) { | 353 | if (buffer && reply && reply_len > 0) { |
| 357 | /* Remove reply code, which are the first 2 characters of | 354 | /* Remove reply code, which are the first 2 characters of |
| 358 | * the reply | 355 | * the reply |
| @@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param, | |||
| 369 | kfree(reply); | 366 | kfree(reply); |
| 370 | kfree(msg); | 367 | kfree(msg); |
| 371 | 368 | ||
| 372 | return ret; | 369 | return 0; |
| 370 | |||
| 371 | out_msg: | ||
| 372 | vmw_close_channel(&channel); | ||
| 373 | kfree(reply); | ||
| 374 | out_open: | ||
| 375 | *length = 0; | ||
| 376 | kfree(msg); | ||
| 377 | DRM_ERROR("Failed to get %s", guest_info_param); | ||
| 378 | |||
| 379 | return -EINVAL; | ||
| 373 | } | 380 | } |
| 374 | 381 | ||
| 375 | 382 | ||
| @@ -400,15 +407,22 @@ int vmw_host_log(const char *log) | |||
| 400 | return -ENOMEM; | 407 | return -ENOMEM; |
| 401 | } | 408 | } |
| 402 | 409 | ||
| 403 | if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || | 410 | if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) |
| 404 | vmw_send_msg(&channel, msg) || | 411 | goto out_open; |
| 405 | vmw_close_channel(&channel)) { | ||
| 406 | DRM_ERROR("Failed to send log\n"); | ||
| 407 | 412 | ||
| 408 | ret = -EINVAL; | 413 | if (vmw_send_msg(&channel, msg)) |
| 409 | } | 414 | goto out_msg; |
| 410 | 415 | ||
| 416 | vmw_close_channel(&channel); | ||
| 411 | kfree(msg); | 417 | kfree(msg); |
| 412 | 418 | ||
| 413 | return ret; | 419 | return 0; |
| 420 | |||
| 421 | out_msg: | ||
| 422 | vmw_close_channel(&channel); | ||
| 423 | out_open: | ||
| 424 | kfree(msg); | ||
| 425 | DRM_ERROR("Failed to send log\n"); | ||
| 426 | |||
| 427 | return -EINVAL; | ||
| 414 | } | 428 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h index 557a033fb610..8545488aa0cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h | |||
| @@ -135,17 +135,24 @@ | |||
| 135 | 135 | ||
| 136 | #else | 136 | #else |
| 137 | 137 | ||
| 138 | /* In the 32-bit version of this macro, we use "m" because there is no | 138 | /* |
| 139 | * more register left for bp | 139 | * In the 32-bit version of this macro, we store bp in a memory location |
| 140 | * because we've ran out of registers. | ||
| 141 | * Now we can't reference that memory location while we've modified | ||
| 142 | * %esp or %ebp, so we first push it on the stack, just before we push | ||
| 143 | * %ebp, and then when we need it we read it from the stack where we | ||
| 144 | * just pushed it. | ||
| 140 | */ | 145 | */ |
| 141 | #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ | 146 | #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ |
| 142 | port_num, magic, bp, \ | 147 | port_num, magic, bp, \ |
| 143 | eax, ebx, ecx, edx, si, di) \ | 148 | eax, ebx, ecx, edx, si, di) \ |
| 144 | ({ \ | 149 | ({ \ |
| 145 | asm volatile ("push %%ebp;" \ | 150 | asm volatile ("push %12;" \ |
| 146 | "mov %12, %%ebp;" \ | 151 | "push %%ebp;" \ |
| 152 | "mov 0x04(%%esp), %%ebp;" \ | ||
| 147 | "rep outsb;" \ | 153 | "rep outsb;" \ |
| 148 | "pop %%ebp;" : \ | 154 | "pop %%ebp;" \ |
| 155 | "add $0x04, %%esp;" : \ | ||
| 149 | "=a"(eax), \ | 156 | "=a"(eax), \ |
| 150 | "=b"(ebx), \ | 157 | "=b"(ebx), \ |
| 151 | "=c"(ecx), \ | 158 | "=c"(ecx), \ |
| @@ -167,10 +174,12 @@ | |||
| 167 | port_num, magic, bp, \ | 174 | port_num, magic, bp, \ |
| 168 | eax, ebx, ecx, edx, si, di) \ | 175 | eax, ebx, ecx, edx, si, di) \ |
| 169 | ({ \ | 176 | ({ \ |
| 170 | asm volatile ("push %%ebp;" \ | 177 | asm volatile ("push %12;" \ |
| 171 | "mov %12, %%ebp;" \ | 178 | "push %%ebp;" \ |
| 179 | "mov 0x04(%%esp), %%ebp;" \ | ||
| 172 | "rep insb;" \ | 180 | "rep insb;" \ |
| 173 | "pop %%ebp" : \ | 181 | "pop %%ebp;" \ |
| 182 | "add $0x04, %%esp;" : \ | ||
| 174 | "=a"(eax), \ | 183 | "=a"(eax), \ |
| 175 | "=b"(ebx), \ | 184 | "=b"(ebx), \ |
| 176 | "=c"(ecx), \ | 185 | "=c"(ecx), \ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 648f8127f65a..3d667e903beb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
| @@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, | |||
| 482 | return ret; | 482 | return ret; |
| 483 | } | 483 | } |
| 484 | 484 | ||
| 485 | vps->dmabuf_size = size; | ||
| 486 | |||
| 485 | /* | 487 | /* |
| 486 | * TTM already thinks the buffer is pinned, but make sure the | 488 | * TTM already thinks the buffer is pinned, but make sure the |
| 487 | * pin_count is upped. | 489 | * pin_count is upped. |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index f249a4428458..6ec307c93ece 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -272,7 +272,7 @@ config SENSORS_K8TEMP | |||
| 272 | 272 | ||
| 273 | config SENSORS_K10TEMP | 273 | config SENSORS_K10TEMP |
| 274 | tristate "AMD Family 10h+ temperature sensor" | 274 | tristate "AMD Family 10h+ temperature sensor" |
| 275 | depends on X86 && PCI | 275 | depends on X86 && PCI && AMD_NB |
| 276 | help | 276 | help |
| 277 | If you say yes here you get support for the temperature | 277 | If you say yes here you get support for the temperature |
| 278 | sensor(s) inside your CPU. Supported are later revisions of | 278 | sensor(s) inside your CPU. Supported are later revisions of |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index d2cc55e21374..3b73dee6fdc6 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
| 26 | #include <asm/amd_nb.h> | ||
| 26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
| 27 | 28 | ||
| 28 | MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); | 29 | MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); |
| @@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
| 40 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 | 41 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 |
| 41 | #endif | 42 | #endif |
| 42 | 43 | ||
| 43 | #ifndef PCI_DEVICE_ID_AMD_17H_RR_NB | 44 | #ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 |
| 44 | #define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 | 45 | #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb |
| 45 | #endif | 46 | #endif |
| 46 | 47 | ||
| 47 | /* CPUID function 0x80000001, ebx */ | 48 | /* CPUID function 0x80000001, ebx */ |
| @@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
| 63 | #define NB_CAP_HTC 0x00000400 | 64 | #define NB_CAP_HTC 0x00000400 |
| 64 | 65 | ||
| 65 | /* | 66 | /* |
| 66 | * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE | 67 | * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL |
| 67 | * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature | 68 | * and REG_REPORTED_TEMPERATURE have been moved to |
| 68 | * Control] | 69 | * D0F0xBC_xD820_0C64 [Hardware Temperature Control] |
| 70 | * D0F0xBC_xD820_0CA4 [Reported Temperature Control] | ||
| 69 | */ | 71 | */ |
| 72 | #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 | ||
| 70 | #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 | 73 | #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 |
| 71 | 74 | ||
| 72 | /* F17h M01h Access througn SMN */ | 75 | /* F17h M01h Access througn SMN */ |
| @@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); | |||
| 74 | 77 | ||
| 75 | struct k10temp_data { | 78 | struct k10temp_data { |
| 76 | struct pci_dev *pdev; | 79 | struct pci_dev *pdev; |
| 80 | void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); | ||
| 77 | void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); | 81 | void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); |
| 78 | int temp_offset; | 82 | int temp_offset; |
| 79 | u32 temp_adjust_mask; | 83 | u32 temp_adjust_mask; |
| @@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = { | |||
| 98 | { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, | 102 | { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, |
| 99 | }; | 103 | }; |
| 100 | 104 | ||
| 105 | static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) | ||
| 106 | { | ||
| 107 | pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); | ||
| 108 | } | ||
| 109 | |||
| 101 | static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) | 110 | static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) |
| 102 | { | 111 | { |
| 103 | pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); | 112 | pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); |
| @@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, | |||
| 114 | mutex_unlock(&nb_smu_ind_mutex); | 123 | mutex_unlock(&nb_smu_ind_mutex); |
| 115 | } | 124 | } |
| 116 | 125 | ||
| 126 | static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval) | ||
| 127 | { | ||
| 128 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, | ||
| 129 | F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval); | ||
| 130 | } | ||
| 131 | |||
| 117 | static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) | 132 | static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) |
| 118 | { | 133 | { |
| 119 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, | 134 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, |
| @@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) | |||
| 122 | 137 | ||
| 123 | static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) | 138 | static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) |
| 124 | { | 139 | { |
| 125 | amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60, | 140 | amd_smn_read(amd_pci_dev_to_node_id(pdev), |
| 126 | F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); | 141 | F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); |
| 127 | } | 142 | } |
| 128 | 143 | ||
| 129 | static ssize_t temp1_input_show(struct device *dev, | 144 | static ssize_t temp1_input_show(struct device *dev, |
| @@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev, | |||
| 160 | u32 regval; | 175 | u32 regval; |
| 161 | int value; | 176 | int value; |
| 162 | 177 | ||
| 163 | pci_read_config_dword(data->pdev, | 178 | data->read_htcreg(data->pdev, ®val); |
| 164 | REG_HARDWARE_THERMAL_CONTROL, ®val); | ||
| 165 | value = ((regval >> 16) & 0x7f) * 500 + 52000; | 179 | value = ((regval >> 16) & 0x7f) * 500 + 52000; |
| 166 | if (show_hyst) | 180 | if (show_hyst) |
| 167 | value -= ((regval >> 24) & 0xf) * 500; | 181 | value -= ((regval >> 24) & 0xf) * 500; |
| @@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj, | |||
| 181 | struct pci_dev *pdev = data->pdev; | 195 | struct pci_dev *pdev = data->pdev; |
| 182 | 196 | ||
| 183 | if (index >= 2) { | 197 | if (index >= 2) { |
| 184 | u32 reg_caps, reg_htc; | 198 | u32 reg; |
| 199 | |||
| 200 | if (!data->read_htcreg) | ||
| 201 | return 0; | ||
| 185 | 202 | ||
| 186 | pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, | 203 | pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, |
| 187 | ®_caps); | 204 | ®); |
| 188 | pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, | 205 | if (!(reg & NB_CAP_HTC)) |
| 189 | ®_htc); | 206 | return 0; |
| 190 | if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE)) | 207 | |
| 208 | data->read_htcreg(data->pdev, ®); | ||
| 209 | if (!(reg & HTC_ENABLE)) | ||
| 191 | return 0; | 210 | return 0; |
| 192 | } | 211 | } |
| 193 | return attr->mode; | 212 | return attr->mode; |
| @@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev, | |||
| 268 | 287 | ||
| 269 | if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || | 288 | if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || |
| 270 | boot_cpu_data.x86_model == 0x70)) { | 289 | boot_cpu_data.x86_model == 0x70)) { |
| 290 | data->read_htcreg = read_htcreg_nb_f15; | ||
| 271 | data->read_tempreg = read_tempreg_nb_f15; | 291 | data->read_tempreg = read_tempreg_nb_f15; |
| 272 | } else if (boot_cpu_data.x86 == 0x17) { | 292 | } else if (boot_cpu_data.x86 == 0x17) { |
| 273 | data->temp_adjust_mask = 0x80000; | 293 | data->temp_adjust_mask = 0x80000; |
| 274 | data->read_tempreg = read_tempreg_nb_f17; | 294 | data->read_tempreg = read_tempreg_nb_f17; |
| 275 | } else { | 295 | } else { |
| 296 | data->read_htcreg = read_htcreg_pci; | ||
| 276 | data->read_tempreg = read_tempreg_pci; | 297 | data->read_tempreg = read_tempreg_pci; |
| 277 | } | 298 | } |
| 278 | 299 | ||
| @@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = { | |||
| 302 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 323 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
| 303 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 324 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
| 304 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, | 325 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
| 305 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, | 326 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
| 306 | {} | 327 | {} |
| 307 | }; | 328 | }; |
| 308 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); | 329 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index fd36c39ddf4e..0cdba29ae0a9 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
| @@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
| 209 | i2c_dw_disable_int(dev); | 209 | i2c_dw_disable_int(dev); |
| 210 | 210 | ||
| 211 | /* Enable the adapter */ | 211 | /* Enable the adapter */ |
| 212 | __i2c_dw_enable_and_wait(dev, true); | 212 | __i2c_dw_enable(dev, true); |
| 213 | |||
| 214 | /* Dummy read to avoid the register getting stuck on Bay Trail */ | ||
| 215 | dw_readl(dev, DW_IC_ENABLE_STATUS); | ||
| 213 | 216 | ||
| 214 | /* Clear and enable interrupts */ | 217 | /* Clear and enable interrupts */ |
| 215 | dw_readl(dev, DW_IC_CLR_INTR); | 218 | dw_readl(dev, DW_IC_CLR_INTR); |
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 2aa0e83174c5..dae8ac618a52 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c | |||
| @@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap, | |||
| 564 | * TODO: We could potentially loop and retry in the case | 564 | * TODO: We could potentially loop and retry in the case |
| 565 | * of MSP_TWI_XFER_TIMEOUT. | 565 | * of MSP_TWI_XFER_TIMEOUT. |
| 566 | */ | 566 | */ |
| 567 | return -1; | 567 | return -EIO; |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | return 0; | 570 | return num; |
| 571 | } | 571 | } |
| 572 | 572 | ||
| 573 | static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) | 573 | static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) |
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index e4be86b3de9a..7235c7302bb7 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c | |||
| @@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, | |||
| 337 | } | 337 | } |
| 338 | mutex_unlock(&vb->lock); | 338 | mutex_unlock(&vb->lock); |
| 339 | } | 339 | } |
| 340 | return 0; | 340 | return num; |
| 341 | error: | 341 | error: |
| 342 | mutex_unlock(&vb->lock); | 342 | mutex_unlock(&vb->lock); |
| 343 | return error; | 343 | return error; |
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index a9126b3cda61..7c3b4740b94b 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c | |||
| @@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, | |||
| 445 | msgs[1].buf = buffer; | 445 | msgs[1].buf = buffer; |
| 446 | 446 | ||
| 447 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); | 447 | ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); |
| 448 | if (ret < 0) | 448 | if (ret < 0) { |
| 449 | dev_err(&client->adapter->dev, "i2c read failed\n"); | 449 | /* Getting a NACK is unfortunately normal with some DSTDs */ |
| 450 | else | 450 | if (ret == -EREMOTEIO) |
| 451 | dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", | ||
| 452 | data_len, client->addr, cmd, ret); | ||
| 453 | else | ||
| 454 | dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", | ||
| 455 | data_len, client->addr, cmd, ret); | ||
| 456 | } else { | ||
| 451 | memcpy(data, buffer, data_len); | 457 | memcpy(data, buffer, data_len); |
| 458 | } | ||
| 452 | 459 | ||
| 453 | kfree(buffer); | 460 | kfree(buffer); |
| 454 | return ret; | 461 | return ret; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 9a4e899d94b3..2b6c9b516070 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
| @@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 119 | umem->length = size; | 119 | umem->length = size; |
| 120 | umem->address = addr; | 120 | umem->address = addr; |
| 121 | umem->page_shift = PAGE_SHIFT; | 121 | umem->page_shift = PAGE_SHIFT; |
| 122 | umem->pid = get_task_pid(current, PIDTYPE_PID); | ||
| 123 | /* | 122 | /* |
| 124 | * We ask for writable memory if any of the following | 123 | * We ask for writable memory if any of the following |
| 125 | * access flags are set. "Local write" and "remote write" | 124 | * access flags are set. "Local write" and "remote write" |
| @@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 132 | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); | 131 | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); |
| 133 | 132 | ||
| 134 | if (access & IB_ACCESS_ON_DEMAND) { | 133 | if (access & IB_ACCESS_ON_DEMAND) { |
| 135 | put_pid(umem->pid); | ||
| 136 | ret = ib_umem_odp_get(context, umem, access); | 134 | ret = ib_umem_odp_get(context, umem, access); |
| 137 | if (ret) { | 135 | if (ret) { |
| 138 | kfree(umem); | 136 | kfree(umem); |
| @@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
| 148 | 146 | ||
| 149 | page_list = (struct page **) __get_free_page(GFP_KERNEL); | 147 | page_list = (struct page **) __get_free_page(GFP_KERNEL); |
| 150 | if (!page_list) { | 148 | if (!page_list) { |
| 151 | put_pid(umem->pid); | ||
| 152 | kfree(umem); | 149 | kfree(umem); |
| 153 | return ERR_PTR(-ENOMEM); | 150 | return ERR_PTR(-ENOMEM); |
| 154 | } | 151 | } |
| @@ -231,7 +228,6 @@ out: | |||
| 231 | if (ret < 0) { | 228 | if (ret < 0) { |
| 232 | if (need_release) | 229 | if (need_release) |
| 233 | __ib_umem_release(context->device, umem, 0); | 230 | __ib_umem_release(context->device, umem, 0); |
| 234 | put_pid(umem->pid); | ||
| 235 | kfree(umem); | 231 | kfree(umem); |
| 236 | } else | 232 | } else |
| 237 | current->mm->pinned_vm = locked; | 233 | current->mm->pinned_vm = locked; |
| @@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
| 274 | 270 | ||
| 275 | __ib_umem_release(umem->context->device, umem, 1); | 271 | __ib_umem_release(umem->context->device, umem, 1); |
| 276 | 272 | ||
| 277 | task = get_pid_task(umem->pid, PIDTYPE_PID); | 273 | task = get_pid_task(umem->context->tgid, PIDTYPE_PID); |
| 278 | put_pid(umem->pid); | ||
| 279 | if (!task) | 274 | if (!task) |
| 280 | goto out; | 275 | goto out; |
| 281 | mm = get_task_mm(task); | 276 | mm = get_task_mm(task); |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index e90f2fd8dc16..1445918e3239 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) | |||
| 489 | err_dereg_mem: | 489 | err_dereg_mem: |
| 490 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | 490 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
| 491 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); | 491 | mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); |
| 492 | err_free_wr_wait: | ||
| 493 | c4iw_put_wr_wait(mhp->wr_waitp); | ||
| 494 | err_free_skb: | 492 | err_free_skb: |
| 495 | kfree_skb(mhp->dereg_skb); | 493 | kfree_skb(mhp->dereg_skb); |
| 494 | err_free_wr_wait: | ||
| 495 | c4iw_put_wr_wait(mhp->wr_waitp); | ||
| 496 | err_free_mhp: | 496 | err_free_mhp: |
| 497 | kfree(mhp); | 497 | kfree(mhp); |
| 498 | return ERR_PTR(ret); | 498 | return ERR_PTR(ret); |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index e6a60fa59f2b..e6bdd0c1e80a 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, | |||
| 5944 | u64 status; | 5944 | u64 status; |
| 5945 | u32 sw_index; | 5945 | u32 sw_index; |
| 5946 | int i = 0; | 5946 | int i = 0; |
| 5947 | unsigned long irq_flags; | ||
| 5947 | 5948 | ||
| 5948 | sw_index = dd->hw_to_sw[hw_context]; | 5949 | sw_index = dd->hw_to_sw[hw_context]; |
| 5949 | if (sw_index >= dd->num_send_contexts) { | 5950 | if (sw_index >= dd->num_send_contexts) { |
| @@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, | |||
| 5953 | return; | 5954 | return; |
| 5954 | } | 5955 | } |
| 5955 | sci = &dd->send_contexts[sw_index]; | 5956 | sci = &dd->send_contexts[sw_index]; |
| 5957 | spin_lock_irqsave(&dd->sc_lock, irq_flags); | ||
| 5956 | sc = sci->sc; | 5958 | sc = sci->sc; |
| 5957 | if (!sc) { | 5959 | if (!sc) { |
| 5958 | dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, | 5960 | dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, |
| 5959 | sw_index, hw_context); | 5961 | sw_index, hw_context); |
| 5962 | spin_unlock_irqrestore(&dd->sc_lock, irq_flags); | ||
| 5960 | return; | 5963 | return; |
| 5961 | } | 5964 | } |
| 5962 | 5965 | ||
| @@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, | |||
| 5978 | */ | 5981 | */ |
| 5979 | if (sc->type != SC_USER) | 5982 | if (sc->type != SC_USER) |
| 5980 | queue_work(dd->pport->hfi1_wq, &sc->halt_work); | 5983 | queue_work(dd->pport->hfi1_wq, &sc->halt_work); |
| 5984 | spin_unlock_irqrestore(&dd->sc_lock, irq_flags); | ||
| 5981 | 5985 | ||
| 5982 | /* | 5986 | /* |
| 5983 | * Update the counters for the corresponding status bits. | 5987 | * Update the counters for the corresponding status bits. |
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 14734d0d0b76..3a485f50fede 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c | |||
| @@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, | |||
| 377 | 377 | ||
| 378 | hr_cq->set_ci_db = hr_cq->db.db_record; | 378 | hr_cq->set_ci_db = hr_cq->db.db_record; |
| 379 | *hr_cq->set_ci_db = 0; | 379 | *hr_cq->set_ci_db = 0; |
| 380 | hr_cq->db_en = 1; | ||
| 380 | } | 381 | } |
| 381 | 382 | ||
| 382 | /* Init mmt table and write buff address to mtt table */ | 383 | /* Init mmt table and write buff address to mtt table */ |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 47e1b6ac1e1a..8013d69c5ac4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
| @@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
| 722 | free_mr->mr_free_pd = to_hr_pd(pd); | 722 | free_mr->mr_free_pd = to_hr_pd(pd); |
| 723 | free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; | 723 | free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; |
| 724 | free_mr->mr_free_pd->ibpd.uobject = NULL; | 724 | free_mr->mr_free_pd->ibpd.uobject = NULL; |
| 725 | free_mr->mr_free_pd->ibpd.__internal_mr = NULL; | ||
| 725 | atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); | 726 | atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); |
| 726 | 727 | ||
| 727 | attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; | 728 | attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; |
| @@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) | |||
| 1036 | 1037 | ||
| 1037 | do { | 1038 | do { |
| 1038 | ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); | 1039 | ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); |
| 1039 | if (ret < 0) { | 1040 | if (ret < 0 && hr_qp) { |
| 1040 | dev_err(dev, | 1041 | dev_err(dev, |
| 1041 | "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", | 1042 | "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", |
| 1042 | hr_qp->qpn, ret, hr_mr->key, ne); | 1043 | hr_qp->qpn, ret, hr_mr->key, ne); |
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 25916e8522ed..1f0965bb64ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c | |||
| @@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 142 | unsigned long flags; | 142 | unsigned long flags; |
| 143 | unsigned int ind; | 143 | unsigned int ind; |
| 144 | void *wqe = NULL; | 144 | void *wqe = NULL; |
| 145 | u32 tmp_len = 0; | ||
| 146 | bool loopback; | 145 | bool loopback; |
| 146 | u32 tmp_len; | ||
| 147 | int ret = 0; | 147 | int ret = 0; |
| 148 | u8 *smac; | 148 | u8 *smac; |
| 149 | int nreq; | 149 | int nreq; |
| @@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 189 | 189 | ||
| 190 | owner_bit = | 190 | owner_bit = |
| 191 | ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); | 191 | ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); |
| 192 | tmp_len = 0; | ||
| 192 | 193 | ||
| 193 | /* Corresponding to the QP type, wqe process separately */ | 194 | /* Corresponding to the QP type, wqe process separately */ |
| 194 | if (ibqp->qp_type == IB_QPT_GSI) { | 195 | if (ibqp->qp_type == IB_QPT_GSI) { |
| @@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 547 | } | 548 | } |
| 548 | 549 | ||
| 549 | if (i < hr_qp->rq.max_gs) { | 550 | if (i < hr_qp->rq.max_gs) { |
| 550 | dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); | 551 | dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); |
| 551 | dseg[i].addr = 0; | 552 | dseg->addr = 0; |
| 552 | } | 553 | } |
| 553 | 554 | ||
| 554 | /* rq support inline data */ | 555 | /* rq support inline data */ |
| 555 | sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; | 556 | if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { |
| 556 | hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge; | 557 | sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; |
| 557 | for (i = 0; i < wr->num_sge; i++) { | 558 | hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = |
| 558 | sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; | 559 | (u32)wr->num_sge; |
| 559 | sge_list[i].len = wr->sg_list[i].length; | 560 | for (i = 0; i < wr->num_sge; i++) { |
| 561 | sge_list[i].addr = | ||
| 562 | (void *)(u64)wr->sg_list[i].addr; | ||
| 563 | sge_list[i].len = wr->sg_list[i].length; | ||
| 564 | } | ||
| 560 | } | 565 | } |
| 561 | 566 | ||
| 562 | hr_qp->rq.wrid[ind] = wr->wr_id; | 567 | hr_qp->rq.wrid[ind] = wr->wr_id; |
| @@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, | |||
| 613 | dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, | 618 | dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, |
| 614 | ring->desc_num * sizeof(struct hns_roce_cmq_desc), | 619 | ring->desc_num * sizeof(struct hns_roce_cmq_desc), |
| 615 | DMA_BIDIRECTIONAL); | 620 | DMA_BIDIRECTIONAL); |
| 621 | |||
| 622 | ring->desc_dma_addr = 0; | ||
| 616 | kfree(ring->desc); | 623 | kfree(ring->desc); |
| 617 | } | 624 | } |
| 618 | 625 | ||
| @@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) | |||
| 1081 | if (ret) { | 1088 | if (ret) { |
| 1082 | dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", | 1089 | dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", |
| 1083 | ret); | 1090 | ret); |
| 1091 | return ret; | ||
| 1084 | } | 1092 | } |
| 1085 | 1093 | ||
| 1086 | /* Get pf resource owned by every pf */ | 1094 | /* Get pf resource owned by every pf */ |
| @@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, | |||
| 1372 | 1380 | ||
| 1373 | roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, | 1381 | roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, |
| 1374 | mr->type == MR_TYPE_MR ? 0 : 1); | 1382 | mr->type == MR_TYPE_MR ? 0 : 1); |
| 1383 | roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S, | ||
| 1384 | 1); | ||
| 1375 | mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); | 1385 | mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); |
| 1376 | 1386 | ||
| 1377 | mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); | 1387 | mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); |
| @@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, | |||
| 2169 | struct hns_roce_v2_qp_context *context, | 2179 | struct hns_roce_v2_qp_context *context, |
| 2170 | struct hns_roce_v2_qp_context *qpc_mask) | 2180 | struct hns_roce_v2_qp_context *qpc_mask) |
| 2171 | { | 2181 | { |
| 2182 | struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); | ||
| 2172 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); | 2183 | struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); |
| 2173 | 2184 | ||
| 2174 | /* | 2185 | /* |
| @@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, | |||
| 2281 | context->rq_db_record_addr = hr_qp->rdb.dma >> 32; | 2292 | context->rq_db_record_addr = hr_qp->rdb.dma >> 32; |
| 2282 | qpc_mask->rq_db_record_addr = 0; | 2293 | qpc_mask->rq_db_record_addr = 0; |
| 2283 | 2294 | ||
| 2284 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1); | 2295 | roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, |
| 2296 | (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0); | ||
| 2285 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); | 2297 | roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); |
| 2286 | 2298 | ||
| 2287 | roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, | 2299 | roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, |
| @@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { | |||
| 4703 | {0, } | 4715 | {0, } |
| 4704 | }; | 4716 | }; |
| 4705 | 4717 | ||
| 4718 | MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); | ||
| 4719 | |||
| 4706 | static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, | 4720 | static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, |
| 4707 | struct hnae3_handle *handle) | 4721 | struct hnae3_handle *handle) |
| 4708 | { | 4722 | { |
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 9d48bc07a9e6..96fb6a9ed93c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c | |||
| @@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, | |||
| 199 | 199 | ||
| 200 | memset(props, 0, sizeof(*props)); | 200 | memset(props, 0, sizeof(*props)); |
| 201 | 201 | ||
| 202 | props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); | 202 | props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); |
| 203 | props->max_mr_size = (u64)(~(0ULL)); | 203 | props->max_mr_size = (u64)(~(0ULL)); |
| 204 | props->page_size_cap = hr_dev->caps.page_size_cap; | 204 | props->page_size_cap = hr_dev->caps.page_size_cap; |
| 205 | props->vendor_id = hr_dev->vendor_id; | 205 | props->vendor_id = hr_dev->vendor_id; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index d4aad34c21e2..baaf906f7c2e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c | |||
| @@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, | |||
| 660 | goto err_rq_sge_list; | 660 | goto err_rq_sge_list; |
| 661 | } | 661 | } |
| 662 | *hr_qp->rdb.db_record = 0; | 662 | *hr_qp->rdb.db_record = 0; |
| 663 | hr_qp->rdb_en = 1; | ||
| 663 | } | 664 | } |
| 664 | 665 | ||
| 665 | /* Allocate QP buf */ | 666 | /* Allocate QP buf */ |
| @@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
| 955 | } | 956 | } |
| 956 | 957 | ||
| 957 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { | 958 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
| 958 | ret = 0; | 959 | if (hr_dev->caps.min_wqes) { |
| 960 | ret = -EPERM; | ||
| 961 | dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, | ||
| 962 | new_state); | ||
| 963 | } else { | ||
| 964 | ret = 0; | ||
| 965 | } | ||
| 966 | |||
| 959 | goto out; | 967 | goto out; |
| 960 | } | 968 | } |
| 961 | 969 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index d5d8c1be345a..2f2b4426ded7 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
| @@ -207,6 +207,7 @@ struct i40iw_msix_vector { | |||
| 207 | u32 irq; | 207 | u32 irq; |
| 208 | u32 cpu_affinity; | 208 | u32 cpu_affinity; |
| 209 | u32 ceq_id; | 209 | u32 ceq_id; |
| 210 | cpumask_t mask; | ||
| 210 | }; | 211 | }; |
| 211 | 212 | ||
| 212 | struct l2params_work { | 213 | struct l2params_work { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 4cfa8f4647e2..f7c6fd9ff6e2 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
| @@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, | |||
| 2093 | if (netif_is_bond_slave(netdev)) | 2093 | if (netif_is_bond_slave(netdev)) |
| 2094 | netdev = netdev_master_upper_dev_get(netdev); | 2094 | netdev = netdev_master_upper_dev_get(netdev); |
| 2095 | 2095 | ||
| 2096 | neigh = dst_neigh_lookup(dst, &dst_addr); | 2096 | neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); |
| 2097 | 2097 | ||
| 2098 | rcu_read_lock(); | 2098 | rcu_read_lock(); |
| 2099 | if (neigh) { | 2099 | if (neigh) { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 6139836fb533..c9f62ca7643c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c | |||
| @@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) | |||
| 331 | switch (info->ae_id) { | 331 | switch (info->ae_id) { |
| 332 | case I40IW_AE_LLP_FIN_RECEIVED: | 332 | case I40IW_AE_LLP_FIN_RECEIVED: |
| 333 | if (qp->term_flags) | 333 | if (qp->term_flags) |
| 334 | continue; | 334 | break; |
| 335 | if (atomic_inc_return(&iwqp->close_timer_started) == 1) { | 335 | if (atomic_inc_return(&iwqp->close_timer_started) == 1) { |
| 336 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; | 336 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; |
| 337 | if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && | 337 | if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && |
| @@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev) | |||
| 360 | break; | 360 | break; |
| 361 | case I40IW_AE_LLP_CONNECTION_RESET: | 361 | case I40IW_AE_LLP_CONNECTION_RESET: |
| 362 | if (atomic_read(&iwqp->close_timer_started)) | 362 | if (atomic_read(&iwqp->close_timer_started)) |
| 363 | continue; | 363 | break; |
| 364 | i40iw_cm_disconn(iwqp); | 364 | i40iw_cm_disconn(iwqp); |
| 365 | break; | 365 | break; |
| 366 | case I40IW_AE_QP_SUSPEND_COMPLETE: | 366 | case I40IW_AE_QP_SUSPEND_COMPLETE: |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9cd0d3ef9057..05001e6da1f8 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
| @@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw | |||
| 687 | struct i40iw_msix_vector *msix_vec) | 687 | struct i40iw_msix_vector *msix_vec) |
| 688 | { | 688 | { |
| 689 | enum i40iw_status_code status; | 689 | enum i40iw_status_code status; |
| 690 | cpumask_t mask; | ||
| 691 | 690 | ||
| 692 | if (iwdev->msix_shared && !ceq_id) { | 691 | if (iwdev->msix_shared && !ceq_id) { |
| 693 | tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); | 692 | tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); |
| @@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw | |||
| 697 | status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); | 696 | status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); |
| 698 | } | 697 | } |
| 699 | 698 | ||
| 700 | cpumask_clear(&mask); | 699 | cpumask_clear(&msix_vec->mask); |
| 701 | cpumask_set_cpu(msix_vec->cpu_affinity, &mask); | 700 | cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); |
| 702 | irq_set_affinity_hint(msix_vec->irq, &mask); | 701 | irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); |
| 703 | 702 | ||
| 704 | if (status) { | 703 | if (status) { |
| 705 | i40iw_pr_err("ceq irq config fail\n"); | 704 | i40iw_pr_err("ceq irq config fail\n"); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 40e4f5ab2b46..68679ad4c6da 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
| @@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, | |||
| 394 | 394 | ||
| 395 | list_for_each_entry(iwpbl, pbl_list, list) { | 395 | list_for_each_entry(iwpbl, pbl_list, list) { |
| 396 | if (iwpbl->user_base == va) { | 396 | if (iwpbl->user_base == va) { |
| 397 | iwpbl->on_list = false; | ||
| 397 | list_del(&iwpbl->list); | 398 | list_del(&iwpbl->list); |
| 398 | return iwpbl; | 399 | return iwpbl; |
| 399 | } | 400 | } |
| @@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
| 614 | return ERR_PTR(-ENOMEM); | 615 | return ERR_PTR(-ENOMEM); |
| 615 | 616 | ||
| 616 | iwqp = (struct i40iw_qp *)mem; | 617 | iwqp = (struct i40iw_qp *)mem; |
| 618 | iwqp->allocated_buffer = mem; | ||
| 617 | qp = &iwqp->sc_qp; | 619 | qp = &iwqp->sc_qp; |
| 618 | qp->back_qp = (void *)iwqp; | 620 | qp->back_qp = (void *)iwqp; |
| 619 | qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; | 621 | qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; |
| @@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
| 642 | goto error; | 644 | goto error; |
| 643 | } | 645 | } |
| 644 | 646 | ||
| 645 | iwqp->allocated_buffer = mem; | ||
| 646 | iwqp->iwdev = iwdev; | 647 | iwqp->iwdev = iwdev; |
| 647 | iwqp->iwpd = iwpd; | 648 | iwqp->iwpd = iwpd; |
| 648 | iwqp->ibqp.qp_num = qp_num; | 649 | iwqp->ibqp.qp_num = qp_num; |
| @@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, | |||
| 1898 | goto error; | 1899 | goto error; |
| 1899 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); | 1900 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); |
| 1900 | list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); | 1901 | list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); |
| 1902 | iwpbl->on_list = true; | ||
| 1901 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | 1903 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); |
| 1902 | break; | 1904 | break; |
| 1903 | case IW_MEMREG_TYPE_CQ: | 1905 | case IW_MEMREG_TYPE_CQ: |
| @@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, | |||
| 1908 | 1910 | ||
| 1909 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | 1911 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); |
| 1910 | list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); | 1912 | list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); |
| 1913 | iwpbl->on_list = true; | ||
| 1911 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | 1914 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); |
| 1912 | break; | 1915 | break; |
| 1913 | case IW_MEMREG_TYPE_MEM: | 1916 | case IW_MEMREG_TYPE_MEM: |
| @@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr, | |||
| 2045 | switch (iwmr->type) { | 2048 | switch (iwmr->type) { |
| 2046 | case IW_MEMREG_TYPE_CQ: | 2049 | case IW_MEMREG_TYPE_CQ: |
| 2047 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); | 2050 | spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); |
| 2048 | if (!list_empty(&ucontext->cq_reg_mem_list)) | 2051 | if (iwpbl->on_list) { |
| 2052 | iwpbl->on_list = false; | ||
| 2049 | list_del(&iwpbl->list); | 2053 | list_del(&iwpbl->list); |
| 2054 | } | ||
| 2050 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); | 2055 | spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); |
| 2051 | break; | 2056 | break; |
| 2052 | case IW_MEMREG_TYPE_QP: | 2057 | case IW_MEMREG_TYPE_QP: |
| 2053 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); | 2058 | spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); |
| 2054 | if (!list_empty(&ucontext->qp_reg_mem_list)) | 2059 | if (iwpbl->on_list) { |
| 2060 | iwpbl->on_list = false; | ||
| 2055 | list_del(&iwpbl->list); | 2061 | list_del(&iwpbl->list); |
| 2062 | } | ||
| 2056 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); | 2063 | spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); |
| 2057 | break; | 2064 | break; |
| 2058 | default: | 2065 | default: |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h index 9067443cd311..76cf173377ab 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h | |||
| @@ -78,6 +78,7 @@ struct i40iw_pbl { | |||
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | bool pbl_allocated; | 80 | bool pbl_allocated; |
| 81 | bool on_list; | ||
| 81 | u64 user_base; | 82 | u64 user_base; |
| 82 | struct i40iw_pble_alloc pble_alloc; | 83 | struct i40iw_pble_alloc pble_alloc; |
| 83 | struct i40iw_mr *iwmr; | 84 | struct i40iw_mr *iwmr; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b4d8ff8ab807..69716a7ea993 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) | |||
| 2416 | MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); | 2416 | MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); |
| 2417 | } | 2417 | } |
| 2418 | 2418 | ||
| 2419 | static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, | 2419 | static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val, |
| 2420 | bool inner) | 2420 | bool inner) |
| 2421 | { | 2421 | { |
| 2422 | if (inner) { | 2422 | if (inner) { |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 87b7c1be2a11..2193dc1765fb 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr) | |||
| 484 | return 1; | 484 | return 1; |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | static int first_med_bfreg(void) | ||
| 488 | { | ||
| 489 | return 1; | ||
| 490 | } | ||
| 491 | |||
| 492 | enum { | 487 | enum { |
| 493 | /* this is the first blue flame register in the array of bfregs assigned | 488 | /* this is the first blue flame register in the array of bfregs assigned |
| 494 | * to a processes. Since we do not use it for blue flame but rather | 489 | * to a processes. Since we do not use it for blue flame but rather |
| @@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev, | |||
| 514 | return n >= 0 ? n : 0; | 509 | return n >= 0 ? n : 0; |
| 515 | } | 510 | } |
| 516 | 511 | ||
| 512 | static int first_med_bfreg(struct mlx5_ib_dev *dev, | ||
| 513 | struct mlx5_bfreg_info *bfregi) | ||
| 514 | { | ||
| 515 | return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; | ||
| 516 | } | ||
| 517 | |||
| 517 | static int first_hi_bfreg(struct mlx5_ib_dev *dev, | 518 | static int first_hi_bfreg(struct mlx5_ib_dev *dev, |
| 518 | struct mlx5_bfreg_info *bfregi) | 519 | struct mlx5_bfreg_info *bfregi) |
| 519 | { | 520 | { |
| @@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, | |||
| 541 | static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, | 542 | static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, |
| 542 | struct mlx5_bfreg_info *bfregi) | 543 | struct mlx5_bfreg_info *bfregi) |
| 543 | { | 544 | { |
| 544 | int minidx = first_med_bfreg(); | 545 | int minidx = first_med_bfreg(dev, bfregi); |
| 545 | int i; | 546 | int i; |
| 546 | 547 | ||
| 547 | for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { | 548 | if (minidx < 0) |
| 549 | return minidx; | ||
| 550 | |||
| 551 | for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { | ||
| 548 | if (bfregi->count[i] < bfregi->count[minidx]) | 552 | if (bfregi->count[i] < bfregi->count[minidx]) |
| 549 | minidx = i; | 553 | minidx = i; |
| 550 | if (!bfregi->count[minidx]) | 554 | if (!bfregi->count[minidx]) |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7d3763b2e01c..3f9afc02d166 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
| @@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
| 401 | { | 401 | { |
| 402 | struct qedr_ucontext *ucontext = get_qedr_ucontext(context); | 402 | struct qedr_ucontext *ucontext = get_qedr_ucontext(context); |
| 403 | struct qedr_dev *dev = get_qedr_dev(context->device); | 403 | struct qedr_dev *dev = get_qedr_dev(context->device); |
| 404 | unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; | 404 | unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT; |
| 405 | u64 unmapped_db = dev->db_phys_addr; | ||
| 406 | unsigned long len = (vma->vm_end - vma->vm_start); | 405 | unsigned long len = (vma->vm_end - vma->vm_start); |
| 407 | int rc = 0; | 406 | unsigned long dpi_start; |
| 408 | bool found; | 407 | |
| 408 | dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size); | ||
| 409 | 409 | ||
| 410 | DP_DEBUG(dev, QEDR_MSG_INIT, | 410 | DP_DEBUG(dev, QEDR_MSG_INIT, |
| 411 | "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", | 411 | "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n", |
| 412 | vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); | 412 | (void *)vma->vm_start, (void *)vma->vm_end, |
| 413 | if (vma->vm_start & (PAGE_SIZE - 1)) { | 413 | (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size); |
| 414 | DP_ERR(dev, "Vma_start not page aligned = %ld\n", | 414 | |
| 415 | vma->vm_start); | 415 | if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) { |
| 416 | DP_ERR(dev, | ||
| 417 | "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n", | ||
| 418 | (void *)vma->vm_start, (void *)vma->vm_end); | ||
| 416 | return -EINVAL; | 419 | return -EINVAL; |
| 417 | } | 420 | } |
| 418 | 421 | ||
| 419 | found = qedr_search_mmap(ucontext, vm_page, len); | 422 | if (!qedr_search_mmap(ucontext, phys_addr, len)) { |
| 420 | if (!found) { | 423 | DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n", |
| 421 | DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n", | ||
| 422 | vma->vm_pgoff); | 424 | vma->vm_pgoff); |
| 423 | return -EINVAL; | 425 | return -EINVAL; |
| 424 | } | 426 | } |
| 425 | 427 | ||
| 426 | DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); | 428 | if (phys_addr < dpi_start || |
| 427 | 429 | ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) { | |
| 428 | if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + | 430 | DP_ERR(dev, |
| 429 | dev->db_size))) { | 431 | "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n", |
| 430 | DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); | 432 | (void *)phys_addr, (void *)dpi_start, |
| 431 | if (vma->vm_flags & VM_READ) { | 433 | ucontext->dpi_size); |
| 432 | DP_ERR(dev, "Trying to map doorbell bar for read\n"); | 434 | return -EINVAL; |
| 433 | return -EPERM; | 435 | } |
| 434 | } | ||
| 435 | |||
| 436 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
| 437 | 436 | ||
| 438 | rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 437 | if (vma->vm_flags & VM_READ) { |
| 439 | PAGE_SIZE, vma->vm_page_prot); | 438 | DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n"); |
| 440 | } else { | 439 | return -EINVAL; |
| 441 | DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n"); | ||
| 442 | rc = remap_pfn_range(vma, vma->vm_start, | ||
| 443 | vma->vm_pgoff, len, vma->vm_page_prot); | ||
| 444 | } | 440 | } |
| 445 | DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); | 441 | |
| 446 | return rc; | 442 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
| 443 | return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, | ||
| 444 | vma->vm_page_prot); | ||
| 447 | } | 445 | } |
| 448 | 446 | ||
| 449 | struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, | 447 | struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, |
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 2cb52fd48cf1..73a00a1c06f6 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c | |||
| @@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, | |||
| 761 | unsigned int mask; | 761 | unsigned int mask; |
| 762 | unsigned int length = 0; | 762 | unsigned int length = 0; |
| 763 | int i; | 763 | int i; |
| 764 | int must_sched; | ||
| 765 | 764 | ||
| 766 | while (wr) { | 765 | while (wr) { |
| 767 | mask = wr_opcode_mask(wr->opcode, qp); | 766 | mask = wr_opcode_mask(wr->opcode, qp); |
| @@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, | |||
| 791 | wr = wr->next; | 790 | wr = wr->next; |
| 792 | } | 791 | } |
| 793 | 792 | ||
| 794 | /* | 793 | rxe_run_task(&qp->req.task, 1); |
| 795 | * Must sched in case of GSI QP because ib_send_mad() hold irq lock, | ||
| 796 | * and the requester call ip_local_out_sk() that takes spin_lock_bh. | ||
| 797 | */ | ||
| 798 | must_sched = (qp_type(qp) == IB_QPT_GSI) || | ||
| 799 | (queue_count(qp->sq.queue) > 1); | ||
| 800 | |||
| 801 | rxe_run_task(&qp->req.task, must_sched); | ||
| 802 | if (unlikely(qp->req.state == QP_STATE_ERROR)) | 794 | if (unlikely(qp->req.state == QP_STATE_ERROR)) |
| 803 | rxe_run_task(&qp->comp.task, 1); | 795 | rxe_run_task(&qp->comp.task, 1); |
| 804 | 796 | ||
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index fb8b7182f05e..25bf6955b6d0 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config INFINIBAND_SRPT | 1 | config INFINIBAND_SRPT |
| 2 | tristate "InfiniBand SCSI RDMA Protocol target support" | 2 | tristate "InfiniBand SCSI RDMA Protocol target support" |
| 3 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE | 3 | depends on INFINIBAND_ADDR_TRANS && TARGET_CORE |
| 4 | ---help--- | 4 | ---help--- |
| 5 | 5 | ||
| 6 | Support for the SCSI RDMA Protocol (SRP) Target driver. The | 6 | Support for the SCSI RDMA Protocol (SRP) Target driver. The |
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c index 944a7f338099..1b25d8bc153a 100644 --- a/drivers/isdn/hardware/eicon/diva.c +++ b/drivers/isdn/hardware/eicon/diva.c | |||
| @@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void) | |||
| 388 | ** Receive and process command from user mode utility | 388 | ** Receive and process command from user mode utility |
| 389 | */ | 389 | */ |
| 390 | void *diva_xdi_open_adapter(void *os_handle, const void __user *src, | 390 | void *diva_xdi_open_adapter(void *os_handle, const void __user *src, |
| 391 | int length, | 391 | int length, void *mptr, |
| 392 | divas_xdi_copy_from_user_fn_t cp_fn) | 392 | divas_xdi_copy_from_user_fn_t cp_fn) |
| 393 | { | 393 | { |
| 394 | diva_xdi_um_cfg_cmd_t msg; | 394 | diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; |
| 395 | diva_os_xdi_adapter_t *a = NULL; | 395 | diva_os_xdi_adapter_t *a = NULL; |
| 396 | diva_os_spin_lock_magic_t old_irql; | 396 | diva_os_spin_lock_magic_t old_irql; |
| 397 | struct list_head *tmp; | 397 | struct list_head *tmp; |
| @@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src, | |||
| 401 | length, sizeof(diva_xdi_um_cfg_cmd_t))) | 401 | length, sizeof(diva_xdi_um_cfg_cmd_t))) |
| 402 | return NULL; | 402 | return NULL; |
| 403 | } | 403 | } |
| 404 | if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { | 404 | if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) { |
| 405 | DBG_ERR(("A: A(?) open, write error")) | 405 | DBG_ERR(("A: A(?) open, write error")) |
| 406 | return NULL; | 406 | return NULL; |
| 407 | } | 407 | } |
| 408 | diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); | 408 | diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); |
| 409 | list_for_each(tmp, &adapter_queue) { | 409 | list_for_each(tmp, &adapter_queue) { |
| 410 | a = list_entry(tmp, diva_os_xdi_adapter_t, link); | 410 | a = list_entry(tmp, diva_os_xdi_adapter_t, link); |
| 411 | if (a->controller == (int)msg.adapter) | 411 | if (a->controller == (int)msg->adapter) |
| 412 | break; | 412 | break; |
| 413 | a = NULL; | 413 | a = NULL; |
| 414 | } | 414 | } |
| 415 | diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); | 415 | diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); |
| 416 | 416 | ||
| 417 | if (!a) { | 417 | if (!a) { |
| 418 | DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) | 418 | DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter)) |
| 419 | } | 419 | } |
| 420 | 420 | ||
| 421 | return (a); | 421 | return (a); |
| @@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle) | |||
| 437 | 437 | ||
| 438 | int | 438 | int |
| 439 | diva_xdi_write(void *adapter, void *os_handle, const void __user *src, | 439 | diva_xdi_write(void *adapter, void *os_handle, const void __user *src, |
| 440 | int length, divas_xdi_copy_from_user_fn_t cp_fn) | 440 | int length, void *mptr, |
| 441 | divas_xdi_copy_from_user_fn_t cp_fn) | ||
| 441 | { | 442 | { |
| 443 | diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; | ||
| 442 | diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; | 444 | diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; |
| 443 | void *data; | 445 | void *data; |
| 444 | 446 | ||
| @@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src, | |||
| 459 | return (-2); | 461 | return (-2); |
| 460 | } | 462 | } |
| 461 | 463 | ||
| 462 | length = (*cp_fn) (os_handle, data, src, length); | 464 | if (msg) { |
| 465 | *(diva_xdi_um_cfg_cmd_t *)data = *msg; | ||
| 466 | length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg), | ||
| 467 | src + sizeof(*msg), length - sizeof(*msg)); | ||
| 468 | } else { | ||
| 469 | length = (*cp_fn) (os_handle, data, src, length); | ||
| 470 | } | ||
| 463 | if (length > 0) { | 471 | if (length > 0) { |
| 464 | if ((*(a->interface.cmd_proc)) | 472 | if ((*(a->interface.cmd_proc)) |
| 465 | (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { | 473 | (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { |
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h index b067032093a8..1ad76650fbf9 100644 --- a/drivers/isdn/hardware/eicon/diva.h +++ b/drivers/isdn/hardware/eicon/diva.h | |||
| @@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst, | |||
| 20 | int max_length, divas_xdi_copy_to_user_fn_t cp_fn); | 20 | int max_length, divas_xdi_copy_to_user_fn_t cp_fn); |
| 21 | 21 | ||
| 22 | int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, | 22 | int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, |
| 23 | int length, divas_xdi_copy_from_user_fn_t cp_fn); | 23 | int length, void *msg, |
| 24 | divas_xdi_copy_from_user_fn_t cp_fn); | ||
| 24 | 25 | ||
| 25 | void *diva_xdi_open_adapter(void *os_handle, const void __user *src, | 26 | void *diva_xdi_open_adapter(void *os_handle, const void __user *src, |
| 26 | int length, | 27 | int length, void *msg, |
| 27 | divas_xdi_copy_from_user_fn_t cp_fn); | 28 | divas_xdi_copy_from_user_fn_t cp_fn); |
| 28 | 29 | ||
| 29 | void diva_xdi_close_adapter(void *adapter, void *os_handle); | 30 | void diva_xdi_close_adapter(void *adapter, void *os_handle); |
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index b9980e84f9db..b6a3950b2564 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c | |||
| @@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file) | |||
| 591 | static ssize_t divas_write(struct file *file, const char __user *buf, | 591 | static ssize_t divas_write(struct file *file, const char __user *buf, |
| 592 | size_t count, loff_t *ppos) | 592 | size_t count, loff_t *ppos) |
| 593 | { | 593 | { |
| 594 | diva_xdi_um_cfg_cmd_t msg; | ||
| 594 | int ret = -EINVAL; | 595 | int ret = -EINVAL; |
| 595 | 596 | ||
| 596 | if (!file->private_data) { | 597 | if (!file->private_data) { |
| 597 | file->private_data = diva_xdi_open_adapter(file, buf, | 598 | file->private_data = diva_xdi_open_adapter(file, buf, |
| 598 | count, | 599 | count, &msg, |
| 599 | xdi_copy_from_user); | 600 | xdi_copy_from_user); |
| 600 | } | 601 | if (!file->private_data) |
| 601 | if (!file->private_data) { | 602 | return (-ENODEV); |
| 602 | return (-ENODEV); | 603 | ret = diva_xdi_write(file->private_data, file, |
| 604 | buf, count, &msg, xdi_copy_from_user); | ||
| 605 | } else { | ||
| 606 | ret = diva_xdi_write(file->private_data, file, | ||
| 607 | buf, count, NULL, xdi_copy_from_user); | ||
| 603 | } | 608 | } |
| 604 | 609 | ||
| 605 | ret = diva_xdi_write(file->private_data, file, | ||
| 606 | buf, count, xdi_copy_from_user); | ||
| 607 | switch (ret) { | 610 | switch (ret) { |
| 608 | case -1: /* Message should be removed from rx mailbox first */ | 611 | case -1: /* Message should be removed from rx mailbox first */ |
| 609 | ret = -EBUSY; | 612 | ret = -EBUSY; |
| @@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf, | |||
| 622 | static ssize_t divas_read(struct file *file, char __user *buf, | 625 | static ssize_t divas_read(struct file *file, char __user *buf, |
| 623 | size_t count, loff_t *ppos) | 626 | size_t count, loff_t *ppos) |
| 624 | { | 627 | { |
| 628 | diva_xdi_um_cfg_cmd_t msg; | ||
| 625 | int ret = -EINVAL; | 629 | int ret = -EINVAL; |
| 626 | 630 | ||
| 627 | if (!file->private_data) { | 631 | if (!file->private_data) { |
| 628 | file->private_data = diva_xdi_open_adapter(file, buf, | 632 | file->private_data = diva_xdi_open_adapter(file, buf, |
| 629 | count, | 633 | count, &msg, |
| 630 | xdi_copy_from_user); | 634 | xdi_copy_from_user); |
| 631 | } | 635 | } |
| 632 | if (!file->private_data) { | 636 | if (!file->private_data) { |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 4e63c6f6c04d..d030ce3025a6 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
| @@ -250,7 +250,9 @@ void bch_debug_exit(void) | |||
| 250 | 250 | ||
| 251 | int __init bch_debug_init(struct kobject *kobj) | 251 | int __init bch_debug_init(struct kobject *kobj) |
| 252 | { | 252 | { |
| 253 | bcache_debug = debugfs_create_dir("bcache", NULL); | 253 | if (!IS_ENABLED(CONFIG_DEBUG_FS)) |
| 254 | return 0; | ||
| 254 | 255 | ||
| 256 | bcache_debug = debugfs_create_dir("bcache", NULL); | ||
| 255 | return IS_ERR_OR_NULL(bcache_debug); | 257 | return IS_ERR_OR_NULL(bcache_debug); |
| 256 | } | 258 | } |
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index 1b52b8557034..2060d1483043 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c | |||
| @@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev, | |||
| 419 | /* Verify that EC can process command */ | 419 | /* Verify that EC can process command */ |
| 420 | for (i = 0; i < len; i++) { | 420 | for (i = 0; i < len; i++) { |
| 421 | rx_byte = rx_buf[i]; | 421 | rx_byte = rx_buf[i]; |
| 422 | /* | ||
| 423 | * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY | ||
| 424 | * markers are all signs that the EC didn't fully | ||
| 425 | * receive our command. e.g., if the EC is flashing | ||
| 426 | * itself, it can't respond to any commands and instead | ||
| 427 | * clocks out EC_SPI_PAST_END from its SPI hardware | ||
| 428 | * buffer. Similar occurrences can happen if the AP is | ||
| 429 | * too slow to clock out data after asserting CS -- the | ||
| 430 | * EC will abort and fill its buffer with | ||
| 431 | * EC_SPI_RX_BAD_DATA. | ||
| 432 | * | ||
| 433 | * In all cases, these errors should be safe to retry. | ||
| 434 | * Report -EAGAIN and let the caller decide what to do | ||
| 435 | * about that. | ||
| 436 | */ | ||
| 422 | if (rx_byte == EC_SPI_PAST_END || | 437 | if (rx_byte == EC_SPI_PAST_END || |
| 423 | rx_byte == EC_SPI_RX_BAD_DATA || | 438 | rx_byte == EC_SPI_RX_BAD_DATA || |
| 424 | rx_byte == EC_SPI_NOT_READY) { | 439 | rx_byte == EC_SPI_NOT_READY) { |
| 425 | ret = -EREMOTEIO; | 440 | ret = -EAGAIN; |
| 426 | break; | 441 | break; |
| 427 | } | 442 | } |
| 428 | } | 443 | } |
| @@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev, | |||
| 431 | if (!ret) | 446 | if (!ret) |
| 432 | ret = cros_ec_spi_receive_packet(ec_dev, | 447 | ret = cros_ec_spi_receive_packet(ec_dev, |
| 433 | ec_msg->insize + sizeof(*response)); | 448 | ec_msg->insize + sizeof(*response)); |
| 434 | else | 449 | else if (ret != -EAGAIN) |
| 435 | dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); | 450 | dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); |
| 436 | 451 | ||
| 437 | final_ret = terminate_request(ec_dev); | 452 | final_ret = terminate_request(ec_dev); |
| @@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev, | |||
| 537 | /* Verify that EC can process command */ | 552 | /* Verify that EC can process command */ |
| 538 | for (i = 0; i < len; i++) { | 553 | for (i = 0; i < len; i++) { |
| 539 | rx_byte = rx_buf[i]; | 554 | rx_byte = rx_buf[i]; |
| 555 | /* See comments in cros_ec_pkt_xfer_spi() */ | ||
| 540 | if (rx_byte == EC_SPI_PAST_END || | 556 | if (rx_byte == EC_SPI_PAST_END || |
| 541 | rx_byte == EC_SPI_RX_BAD_DATA || | 557 | rx_byte == EC_SPI_RX_BAD_DATA || |
| 542 | rx_byte == EC_SPI_NOT_READY) { | 558 | rx_byte == EC_SPI_NOT_READY) { |
| 543 | ret = -EREMOTEIO; | 559 | ret = -EAGAIN; |
| 544 | break; | 560 | break; |
| 545 | } | 561 | } |
| 546 | } | 562 | } |
| @@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev, | |||
| 549 | if (!ret) | 565 | if (!ret) |
| 550 | ret = cros_ec_spi_receive_response(ec_dev, | 566 | ret = cros_ec_spi_receive_response(ec_dev, |
| 551 | ec_msg->insize + EC_MSG_TX_PROTO_BYTES); | 567 | ec_msg->insize + EC_MSG_TX_PROTO_BYTES); |
| 552 | else | 568 | else if (ret != -EAGAIN) |
| 553 | dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); | 569 | dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); |
| 554 | 570 | ||
| 555 | final_ret = terminate_request(ec_dev); | 571 | final_ret = terminate_request(ec_dev); |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index a4c9c8297a6d..918d4fb742d1 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
| @@ -717,6 +717,7 @@ struct cxl { | |||
| 717 | bool perst_select_user; | 717 | bool perst_select_user; |
| 718 | bool perst_same_image; | 718 | bool perst_same_image; |
| 719 | bool psl_timebase_synced; | 719 | bool psl_timebase_synced; |
| 720 | bool tunneled_ops_supported; | ||
| 720 | 721 | ||
| 721 | /* | 722 | /* |
| 722 | * number of contexts mapped on to this card. Possible values are: | 723 | * number of contexts mapped on to this card. Possible values are: |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 83f1d08058fc..4d6736f9d463 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
| @@ -1742,6 +1742,15 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
| 1742 | /* Required for devices using CAPP DMA mode, harmless for others */ | 1742 | /* Required for devices using CAPP DMA mode, harmless for others */ |
| 1743 | pci_set_master(dev); | 1743 | pci_set_master(dev); |
| 1744 | 1744 | ||
| 1745 | adapter->tunneled_ops_supported = false; | ||
| 1746 | |||
| 1747 | if (cxl_is_power9()) { | ||
| 1748 | if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1)) | ||
| 1749 | dev_info(&dev->dev, "Tunneled operations unsupported\n"); | ||
| 1750 | else | ||
| 1751 | adapter->tunneled_ops_supported = true; | ||
| 1752 | } | ||
| 1753 | |||
| 1745 | if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) | 1754 | if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) |
| 1746 | goto err; | 1755 | goto err; |
| 1747 | 1756 | ||
| @@ -1768,6 +1777,9 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) | |||
| 1768 | { | 1777 | { |
| 1769 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); | 1778 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); |
| 1770 | 1779 | ||
| 1780 | if (cxl_is_power9()) | ||
| 1781 | pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0); | ||
| 1782 | |||
| 1771 | cxl_native_release_psl_err_irq(adapter); | 1783 | cxl_native_release_psl_err_irq(adapter); |
| 1772 | cxl_unmap_adapter_regs(adapter); | 1784 | cxl_unmap_adapter_regs(adapter); |
| 1773 | 1785 | ||
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 95285b7f636f..4b5a4c5d3c01 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
| @@ -78,6 +78,15 @@ static ssize_t psl_timebase_synced_show(struct device *device, | |||
| 78 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); | 78 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static ssize_t tunneled_ops_supported_show(struct device *device, | ||
| 82 | struct device_attribute *attr, | ||
| 83 | char *buf) | ||
| 84 | { | ||
| 85 | struct cxl *adapter = to_cxl_adapter(device); | ||
| 86 | |||
| 87 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported); | ||
| 88 | } | ||
| 89 | |||
| 81 | static ssize_t reset_adapter_store(struct device *device, | 90 | static ssize_t reset_adapter_store(struct device *device, |
| 82 | struct device_attribute *attr, | 91 | struct device_attribute *attr, |
| 83 | const char *buf, size_t count) | 92 | const char *buf, size_t count) |
| @@ -183,6 +192,7 @@ static struct device_attribute adapter_attrs[] = { | |||
| 183 | __ATTR_RO(base_image), | 192 | __ATTR_RO(base_image), |
| 184 | __ATTR_RO(image_loaded), | 193 | __ATTR_RO(image_loaded), |
| 185 | __ATTR_RO(psl_timebase_synced), | 194 | __ATTR_RO(psl_timebase_synced), |
| 195 | __ATTR_RO(tunneled_ops_supported), | ||
| 186 | __ATTR_RW(load_image_on_perst), | 196 | __ATTR_RW(load_image_on_perst), |
| 187 | __ATTR_RW(perst_reloads_same_image), | 197 | __ATTR_RW(perst_reloads_same_image), |
| 188 | __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), | 198 | __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 0c125f207aea..33053b0d1fdf 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
| @@ -518,7 +518,7 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata) | |||
| 518 | if (of_node && of_match_device(at24_of_match, dev)) | 518 | if (of_node && of_match_device(at24_of_match, dev)) |
| 519 | cdata = of_device_get_match_data(dev); | 519 | cdata = of_device_get_match_data(dev); |
| 520 | else if (id) | 520 | else if (id) |
| 521 | cdata = (void *)&id->driver_data; | 521 | cdata = (void *)id->driver_data; |
| 522 | else | 522 | else |
| 523 | cdata = acpi_device_get_match_data(dev); | 523 | cdata = acpi_device_get_match_data(dev); |
| 524 | 524 | ||
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9e923cd1d80e..38a7586b00cc 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, | |||
| 2485 | break; | 2485 | break; |
| 2486 | } | 2486 | } |
| 2487 | 2487 | ||
| 2488 | return 0; | 2488 | return ret; |
| 2489 | } | 2489 | } |
| 2490 | 2490 | ||
| 2491 | #ifdef CONFIG_COMPAT | 2491 | #ifdef CONFIG_COMPAT |
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 0ef741bc515d..d0e83db42ae5 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c | |||
| @@ -33,6 +33,8 @@ struct sdhci_iproc_host { | |||
| 33 | const struct sdhci_iproc_data *data; | 33 | const struct sdhci_iproc_data *data; |
| 34 | u32 shadow_cmd; | 34 | u32 shadow_cmd; |
| 35 | u32 shadow_blk; | 35 | u32 shadow_blk; |
| 36 | bool is_cmd_shadowed; | ||
| 37 | bool is_blk_shadowed; | ||
| 36 | }; | 38 | }; |
| 37 | 39 | ||
| 38 | #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) | 40 | #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) |
| @@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) | |||
| 48 | 50 | ||
| 49 | static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) | 51 | static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) |
| 50 | { | 52 | { |
| 51 | u32 val = sdhci_iproc_readl(host, (reg & ~3)); | 53 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| 52 | u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; | 54 | struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); |
| 55 | u32 val; | ||
| 56 | u16 word; | ||
| 57 | |||
| 58 | if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) { | ||
| 59 | /* Get the saved transfer mode */ | ||
| 60 | val = iproc_host->shadow_cmd; | ||
| 61 | } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && | ||
| 62 | iproc_host->is_blk_shadowed) { | ||
| 63 | /* Get the saved block info */ | ||
| 64 | val = iproc_host->shadow_blk; | ||
| 65 | } else { | ||
| 66 | val = sdhci_iproc_readl(host, (reg & ~3)); | ||
| 67 | } | ||
| 68 | word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; | ||
| 53 | return word; | 69 | return word; |
| 54 | } | 70 | } |
| 55 | 71 | ||
| @@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) | |||
| 105 | 121 | ||
| 106 | if (reg == SDHCI_COMMAND) { | 122 | if (reg == SDHCI_COMMAND) { |
| 107 | /* Write the block now as we are issuing a command */ | 123 | /* Write the block now as we are issuing a command */ |
| 108 | if (iproc_host->shadow_blk != 0) { | 124 | if (iproc_host->is_blk_shadowed) { |
| 109 | sdhci_iproc_writel(host, iproc_host->shadow_blk, | 125 | sdhci_iproc_writel(host, iproc_host->shadow_blk, |
| 110 | SDHCI_BLOCK_SIZE); | 126 | SDHCI_BLOCK_SIZE); |
| 111 | iproc_host->shadow_blk = 0; | 127 | iproc_host->is_blk_shadowed = false; |
| 112 | } | 128 | } |
| 113 | oldval = iproc_host->shadow_cmd; | 129 | oldval = iproc_host->shadow_cmd; |
| 114 | } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { | 130 | iproc_host->is_cmd_shadowed = false; |
| 131 | } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && | ||
| 132 | iproc_host->is_blk_shadowed) { | ||
| 115 | /* Block size and count are stored in shadow reg */ | 133 | /* Block size and count are stored in shadow reg */ |
| 116 | oldval = iproc_host->shadow_blk; | 134 | oldval = iproc_host->shadow_blk; |
| 117 | } else { | 135 | } else { |
| @@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) | |||
| 123 | if (reg == SDHCI_TRANSFER_MODE) { | 141 | if (reg == SDHCI_TRANSFER_MODE) { |
| 124 | /* Save the transfer mode until the command is issued */ | 142 | /* Save the transfer mode until the command is issued */ |
| 125 | iproc_host->shadow_cmd = newval; | 143 | iproc_host->shadow_cmd = newval; |
| 144 | iproc_host->is_cmd_shadowed = true; | ||
| 126 | } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { | 145 | } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { |
| 127 | /* Save the block info until the command is issued */ | 146 | /* Save the block info until the command is issued */ |
| 128 | iproc_host->shadow_blk = newval; | 147 | iproc_host->shadow_blk = newval; |
| 148 | iproc_host->is_blk_shadowed = true; | ||
| 129 | } else { | 149 | } else { |
| 130 | /* Command or other regular 32-bit write */ | 150 | /* Command or other regular 32-bit write */ |
| 131 | sdhci_iproc_writel(host, newval, reg & ~3); | 151 | sdhci_iproc_writel(host, newval, reg & ~3); |
| @@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { | |||
| 166 | 186 | ||
| 167 | static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { | 187 | static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { |
| 168 | .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, | 188 | .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, |
| 169 | .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, | 189 | .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, |
| 170 | .ops = &sdhci_iproc_32only_ops, | 190 | .ops = &sdhci_iproc_32only_ops, |
| 171 | }; | 191 | }; |
| 172 | 192 | ||
| @@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = { | |||
| 206 | .caps1 = SDHCI_DRIVER_TYPE_C | | 226 | .caps1 = SDHCI_DRIVER_TYPE_C | |
| 207 | SDHCI_DRIVER_TYPE_D | | 227 | SDHCI_DRIVER_TYPE_D | |
| 208 | SDHCI_SUPPORT_DDR50, | 228 | SDHCI_SUPPORT_DDR50, |
| 209 | .mmc_caps = MMC_CAP_1_8V_DDR, | ||
| 210 | }; | 229 | }; |
| 211 | 230 | ||
| 212 | static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { | 231 | static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { |
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index db5ec4e8bde9..ebb1d141b900 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c | |||
| @@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk, | |||
| 1194 | NDCB0_CMD2(NAND_CMD_READSTART); | 1194 | NDCB0_CMD2(NAND_CMD_READSTART); |
| 1195 | 1195 | ||
| 1196 | /* | 1196 | /* |
| 1197 | * Trigger the naked read operation only on the last chunk. | 1197 | * Trigger the monolithic read on the first chunk, then naked read on |
| 1198 | * Otherwise, use monolithic read. | 1198 | * intermediate chunks and finally a last naked read on the last chunk. |
| 1199 | */ | 1199 | */ |
| 1200 | if (lt->nchunks == 1 || (chunk < lt->nchunks - 1)) | 1200 | if (chunk == 0) |
| 1201 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); | 1201 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); |
| 1202 | else if (chunk < lt->nchunks - 1) | ||
| 1203 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW); | ||
| 1202 | else | 1204 | else |
| 1203 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); | 1205 | nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); |
| 1204 | 1206 | ||
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 23b45da784cb..b89acaee12d4 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c | |||
| @@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, | |||
| 354 | /* Locate the first rule available */ | 354 | /* Locate the first rule available */ |
| 355 | if (fs->location == RX_CLS_LOC_ANY) | 355 | if (fs->location == RX_CLS_LOC_ANY) |
| 356 | rule_index = find_first_zero_bit(priv->cfp.used, | 356 | rule_index = find_first_zero_bit(priv->cfp.used, |
| 357 | bcm_sf2_cfp_rule_size(priv)); | 357 | priv->num_cfp_rules); |
| 358 | else | 358 | else |
| 359 | rule_index = fs->location; | 359 | rule_index = fs->location; |
| 360 | 360 | ||
| 361 | if (rule_index > bcm_sf2_cfp_rule_size(priv)) | ||
| 362 | return -ENOSPC; | ||
| 363 | |||
| 361 | layout = &udf_tcpip4_layout; | 364 | layout = &udf_tcpip4_layout; |
| 362 | /* We only use one UDF slice for now */ | 365 | /* We only use one UDF slice for now */ |
| 363 | slice_num = bcm_sf2_get_slice_number(layout, 0); | 366 | slice_num = bcm_sf2_get_slice_number(layout, 0); |
| @@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, | |||
| 562 | * first half because the HW search is by incrementing addresses. | 565 | * first half because the HW search is by incrementing addresses. |
| 563 | */ | 566 | */ |
| 564 | if (fs->location == RX_CLS_LOC_ANY) | 567 | if (fs->location == RX_CLS_LOC_ANY) |
| 565 | rule_index[0] = find_first_zero_bit(priv->cfp.used, | 568 | rule_index[1] = find_first_zero_bit(priv->cfp.used, |
| 566 | bcm_sf2_cfp_rule_size(priv)); | 569 | priv->num_cfp_rules); |
| 567 | else | 570 | else |
| 568 | rule_index[0] = fs->location; | 571 | rule_index[1] = fs->location; |
| 572 | if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) | ||
| 573 | return -ENOSPC; | ||
| 569 | 574 | ||
| 570 | /* Flag it as used (cleared on error path) such that we can immediately | 575 | /* Flag it as used (cleared on error path) such that we can immediately |
| 571 | * obtain a second one to chain from. | 576 | * obtain a second one to chain from. |
| 572 | */ | 577 | */ |
| 573 | set_bit(rule_index[0], priv->cfp.used); | 578 | set_bit(rule_index[1], priv->cfp.used); |
| 574 | 579 | ||
| 575 | rule_index[1] = find_first_zero_bit(priv->cfp.used, | 580 | rule_index[0] = find_first_zero_bit(priv->cfp.used, |
| 576 | bcm_sf2_cfp_rule_size(priv)); | 581 | priv->num_cfp_rules); |
| 577 | if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) { | 582 | if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { |
| 578 | ret = -ENOSPC; | 583 | ret = -ENOSPC; |
| 579 | goto out_err; | 584 | goto out_err; |
| 580 | } | 585 | } |
| @@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, | |||
| 712 | /* Flag the second half rule as being used now, return it as the | 717 | /* Flag the second half rule as being used now, return it as the |
| 713 | * location, and flag it as unique while dumping rules | 718 | * location, and flag it as unique while dumping rules |
| 714 | */ | 719 | */ |
| 715 | set_bit(rule_index[1], priv->cfp.used); | 720 | set_bit(rule_index[0], priv->cfp.used); |
| 716 | set_bit(rule_index[1], priv->cfp.unique); | 721 | set_bit(rule_index[1], priv->cfp.unique); |
| 717 | fs->location = rule_index[1]; | 722 | fs->location = rule_index[1]; |
| 718 | 723 | ||
| 719 | return ret; | 724 | return ret; |
| 720 | 725 | ||
| 721 | out_err: | 726 | out_err: |
| 722 | clear_bit(rule_index[0], priv->cfp.used); | 727 | clear_bit(rule_index[1], priv->cfp.used); |
| 723 | return ret; | 728 | return ret; |
| 724 | } | 729 | } |
| 725 | 730 | ||
| @@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, | |||
| 785 | int ret; | 790 | int ret; |
| 786 | u32 reg; | 791 | u32 reg; |
| 787 | 792 | ||
| 788 | /* Refuse deletion of unused rules, and the default reserved rule */ | ||
| 789 | if (!test_bit(loc, priv->cfp.used) || loc == 0) | ||
| 790 | return -EINVAL; | ||
| 791 | |||
| 792 | /* Indicate which rule we want to read */ | 793 | /* Indicate which rule we want to read */ |
| 793 | bcm_sf2_cfp_rule_addr_set(priv, loc); | 794 | bcm_sf2_cfp_rule_addr_set(priv, loc); |
| 794 | 795 | ||
| @@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, | |||
| 826 | u32 next_loc = 0; | 827 | u32 next_loc = 0; |
| 827 | int ret; | 828 | int ret; |
| 828 | 829 | ||
| 830 | /* Refuse deleting unused rules, and those that are not unique since | ||
| 831 | * that could leave IPv6 rules with one of the chained rule in the | ||
| 832 | * table. | ||
| 833 | */ | ||
| 834 | if (!test_bit(loc, priv->cfp.unique) || loc == 0) | ||
| 835 | return -EINVAL; | ||
| 836 | |||
| 829 | ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); | 837 | ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); |
| 830 | if (ret) | 838 | if (ret) |
| 831 | return ret; | 839 | return ret; |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 36c8950dbd2d..176861bd2252 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
| @@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | |||
| 1212 | vp->mii.reg_num_mask = 0x1f; | 1212 | vp->mii.reg_num_mask = 0x1f; |
| 1213 | 1213 | ||
| 1214 | /* Makes sure rings are at least 16 byte aligned. */ | 1214 | /* Makes sure rings are at least 16 byte aligned. */ |
| 1215 | vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE | 1215 | vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE |
| 1216 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | 1216 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, |
| 1217 | &vp->rx_ring_dma); | 1217 | &vp->rx_ring_dma, GFP_KERNEL); |
| 1218 | retval = -ENOMEM; | 1218 | retval = -ENOMEM; |
| 1219 | if (!vp->rx_ring) | 1219 | if (!vp->rx_ring) |
| 1220 | goto free_device; | 1220 | goto free_device; |
| @@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | |||
| 1476 | return 0; | 1476 | return 0; |
| 1477 | 1477 | ||
| 1478 | free_ring: | 1478 | free_ring: |
| 1479 | pci_free_consistent(pdev, | 1479 | dma_free_coherent(&pdev->dev, |
| 1480 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | 1480 | sizeof(struct boom_rx_desc) * RX_RING_SIZE + |
| 1481 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | 1481 | sizeof(struct boom_tx_desc) * TX_RING_SIZE, |
| 1482 | vp->rx_ring, | 1482 | vp->rx_ring, vp->rx_ring_dma); |
| 1483 | vp->rx_ring_dma); | ||
| 1484 | free_device: | 1483 | free_device: |
| 1485 | free_netdev(dev); | 1484 | free_netdev(dev); |
| 1486 | pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); | 1485 | pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); |
| @@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev) | |||
| 1751 | break; /* Bad news! */ | 1750 | break; /* Bad news! */ |
| 1752 | 1751 | ||
| 1753 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ | 1752 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ |
| 1754 | dma = pci_map_single(VORTEX_PCI(vp), skb->data, | 1753 | dma = dma_map_single(vp->gendev, skb->data, |
| 1755 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 1754 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 1756 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) | 1755 | if (dma_mapping_error(vp->gendev, dma)) |
| 1757 | break; | 1756 | break; |
| 1758 | vp->rx_ring[i].addr = cpu_to_le32(dma); | 1757 | vp->rx_ring[i].addr = cpu_to_le32(dma); |
| 1759 | } | 1758 | } |
| @@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2067 | if (vp->bus_master) { | 2066 | if (vp->bus_master) { |
| 2068 | /* Set the bus-master controller to transfer the packet. */ | 2067 | /* Set the bus-master controller to transfer the packet. */ |
| 2069 | int len = (skb->len + 3) & ~3; | 2068 | int len = (skb->len + 3) & ~3; |
| 2070 | vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, | 2069 | vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len, |
| 2071 | PCI_DMA_TODEVICE); | 2070 | DMA_TO_DEVICE); |
| 2072 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { | 2071 | if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) { |
| 2073 | dev_kfree_skb_any(skb); | 2072 | dev_kfree_skb_any(skb); |
| 2074 | dev->stats.tx_dropped++; | 2073 | dev->stats.tx_dropped++; |
| 2075 | return NETDEV_TX_OK; | 2074 | return NETDEV_TX_OK; |
| @@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2168 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | 2167 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); |
| 2169 | 2168 | ||
| 2170 | if (!skb_shinfo(skb)->nr_frags) { | 2169 | if (!skb_shinfo(skb)->nr_frags) { |
| 2171 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, | 2170 | dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, |
| 2172 | PCI_DMA_TODEVICE); | 2171 | DMA_TO_DEVICE); |
| 2173 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | 2172 | if (dma_mapping_error(vp->gendev, dma_addr)) |
| 2174 | goto out_dma_err; | 2173 | goto out_dma_err; |
| 2175 | 2174 | ||
| 2176 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | 2175 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); |
| @@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2178 | } else { | 2177 | } else { |
| 2179 | int i; | 2178 | int i; |
| 2180 | 2179 | ||
| 2181 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, | 2180 | dma_addr = dma_map_single(vp->gendev, skb->data, |
| 2182 | skb_headlen(skb), PCI_DMA_TODEVICE); | 2181 | skb_headlen(skb), DMA_TO_DEVICE); |
| 2183 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | 2182 | if (dma_mapping_error(vp->gendev, dma_addr)) |
| 2184 | goto out_dma_err; | 2183 | goto out_dma_err; |
| 2185 | 2184 | ||
| 2186 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); | 2185 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); |
| @@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2189 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2188 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 2190 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2189 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2191 | 2190 | ||
| 2192 | dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, | 2191 | dma_addr = skb_frag_dma_map(vp->gendev, frag, |
| 2193 | 0, | 2192 | 0, |
| 2194 | frag->size, | 2193 | frag->size, |
| 2195 | DMA_TO_DEVICE); | 2194 | DMA_TO_DEVICE); |
| 2196 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { | 2195 | if (dma_mapping_error(vp->gendev, dma_addr)) { |
| 2197 | for(i = i-1; i >= 0; i--) | 2196 | for(i = i-1; i >= 0; i--) |
| 2198 | dma_unmap_page(&VORTEX_PCI(vp)->dev, | 2197 | dma_unmap_page(vp->gendev, |
| 2199 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), | 2198 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), |
| 2200 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), | 2199 | le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), |
| 2201 | DMA_TO_DEVICE); | 2200 | DMA_TO_DEVICE); |
| 2202 | 2201 | ||
| 2203 | pci_unmap_single(VORTEX_PCI(vp), | 2202 | dma_unmap_single(vp->gendev, |
| 2204 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | 2203 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), |
| 2205 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), | 2204 | le32_to_cpu(vp->tx_ring[entry].frag[0].length), |
| 2206 | PCI_DMA_TODEVICE); | 2205 | DMA_TO_DEVICE); |
| 2207 | 2206 | ||
| 2208 | goto out_dma_err; | 2207 | goto out_dma_err; |
| 2209 | } | 2208 | } |
| @@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2218 | } | 2217 | } |
| 2219 | } | 2218 | } |
| 2220 | #else | 2219 | #else |
| 2221 | dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); | 2220 | dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE); |
| 2222 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) | 2221 | if (dma_mapping_error(vp->gendev, dma_addr)) |
| 2223 | goto out_dma_err; | 2222 | goto out_dma_err; |
| 2224 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); | 2223 | vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); |
| 2225 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | 2224 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); |
| @@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2254 | out: | 2253 | out: |
| 2255 | return NETDEV_TX_OK; | 2254 | return NETDEV_TX_OK; |
| 2256 | out_dma_err: | 2255 | out_dma_err: |
| 2257 | dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); | 2256 | dev_err(vp->gendev, "Error mapping dma buffer\n"); |
| 2258 | goto out; | 2257 | goto out; |
| 2259 | } | 2258 | } |
| 2260 | 2259 | ||
| @@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id) | |||
| 2322 | if (status & DMADone) { | 2321 | if (status & DMADone) { |
| 2323 | if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { | 2322 | if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { |
| 2324 | iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ | 2323 | iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ |
| 2325 | pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); | 2324 | dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE); |
| 2326 | pkts_compl++; | 2325 | pkts_compl++; |
| 2327 | bytes_compl += vp->tx_skb->len; | 2326 | bytes_compl += vp->tx_skb->len; |
| 2328 | dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ | 2327 | dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ |
| @@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id) | |||
| 2459 | struct sk_buff *skb = vp->tx_skbuff[entry]; | 2458 | struct sk_buff *skb = vp->tx_skbuff[entry]; |
| 2460 | #if DO_ZEROCOPY | 2459 | #if DO_ZEROCOPY |
| 2461 | int i; | 2460 | int i; |
| 2462 | pci_unmap_single(VORTEX_PCI(vp), | 2461 | dma_unmap_single(vp->gendev, |
| 2463 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), | 2462 | le32_to_cpu(vp->tx_ring[entry].frag[0].addr), |
| 2464 | le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, | 2463 | le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, |
| 2465 | PCI_DMA_TODEVICE); | 2464 | DMA_TO_DEVICE); |
| 2466 | 2465 | ||
| 2467 | for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) | 2466 | for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) |
| 2468 | pci_unmap_page(VORTEX_PCI(vp), | 2467 | dma_unmap_page(vp->gendev, |
| 2469 | le32_to_cpu(vp->tx_ring[entry].frag[i].addr), | 2468 | le32_to_cpu(vp->tx_ring[entry].frag[i].addr), |
| 2470 | le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, | 2469 | le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, |
| 2471 | PCI_DMA_TODEVICE); | 2470 | DMA_TO_DEVICE); |
| 2472 | #else | 2471 | #else |
| 2473 | pci_unmap_single(VORTEX_PCI(vp), | 2472 | dma_unmap_single(vp->gendev, |
| 2474 | le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); | 2473 | le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE); |
| 2475 | #endif | 2474 | #endif |
| 2476 | pkts_compl++; | 2475 | pkts_compl++; |
| 2477 | bytes_compl += skb->len; | 2476 | bytes_compl += skb->len; |
| @@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev) | |||
| 2561 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2560 | /* 'skb_put()' points to the start of sk_buff data area. */ |
| 2562 | if (vp->bus_master && | 2561 | if (vp->bus_master && |
| 2563 | ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { | 2562 | ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { |
| 2564 | dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), | 2563 | dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len), |
| 2565 | pkt_len, PCI_DMA_FROMDEVICE); | 2564 | pkt_len, DMA_FROM_DEVICE); |
| 2566 | iowrite32(dma, ioaddr + Wn7_MasterAddr); | 2565 | iowrite32(dma, ioaddr + Wn7_MasterAddr); |
| 2567 | iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); | 2566 | iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); |
| 2568 | iowrite16(StartDMAUp, ioaddr + EL3_CMD); | 2567 | iowrite16(StartDMAUp, ioaddr + EL3_CMD); |
| 2569 | while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) | 2568 | while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) |
| 2570 | ; | 2569 | ; |
| 2571 | pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); | 2570 | dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE); |
| 2572 | } else { | 2571 | } else { |
| 2573 | ioread32_rep(ioaddr + RX_FIFO, | 2572 | ioread32_rep(ioaddr + RX_FIFO, |
| 2574 | skb_put(skb, pkt_len), | 2573 | skb_put(skb, pkt_len), |
| @@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev) | |||
| 2635 | if (pkt_len < rx_copybreak && | 2634 | if (pkt_len < rx_copybreak && |
| 2636 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { | 2635 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
| 2637 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 2636 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
| 2638 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2637 | dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 2639 | /* 'skb_put()' points to the start of sk_buff data area. */ | 2638 | /* 'skb_put()' points to the start of sk_buff data area. */ |
| 2640 | skb_put_data(skb, vp->rx_skbuff[entry]->data, | 2639 | skb_put_data(skb, vp->rx_skbuff[entry]->data, |
| 2641 | pkt_len); | 2640 | pkt_len); |
| 2642 | pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2641 | dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 2643 | vp->rx_copy++; | 2642 | vp->rx_copy++; |
| 2644 | } else { | 2643 | } else { |
| 2645 | /* Pre-allocate the replacement skb. If it or its | 2644 | /* Pre-allocate the replacement skb. If it or its |
| @@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev) | |||
| 2651 | dev->stats.rx_dropped++; | 2650 | dev->stats.rx_dropped++; |
| 2652 | goto clear_complete; | 2651 | goto clear_complete; |
| 2653 | } | 2652 | } |
| 2654 | newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, | 2653 | newdma = dma_map_single(vp->gendev, newskb->data, |
| 2655 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2654 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 2656 | if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { | 2655 | if (dma_mapping_error(vp->gendev, newdma)) { |
| 2657 | dev->stats.rx_dropped++; | 2656 | dev->stats.rx_dropped++; |
| 2658 | consume_skb(newskb); | 2657 | consume_skb(newskb); |
| 2659 | goto clear_complete; | 2658 | goto clear_complete; |
| @@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev) | |||
| 2664 | vp->rx_skbuff[entry] = newskb; | 2663 | vp->rx_skbuff[entry] = newskb; |
| 2665 | vp->rx_ring[entry].addr = cpu_to_le32(newdma); | 2664 | vp->rx_ring[entry].addr = cpu_to_le32(newdma); |
| 2666 | skb_put(skb, pkt_len); | 2665 | skb_put(skb, pkt_len); |
| 2667 | pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2666 | dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 2668 | vp->rx_nocopy++; | 2667 | vp->rx_nocopy++; |
| 2669 | } | 2668 | } |
| 2670 | skb->protocol = eth_type_trans(skb, dev); | 2669 | skb->protocol = eth_type_trans(skb, dev); |
| @@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev) | |||
| 2761 | if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ | 2760 | if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ |
| 2762 | for (i = 0; i < RX_RING_SIZE; i++) | 2761 | for (i = 0; i < RX_RING_SIZE; i++) |
| 2763 | if (vp->rx_skbuff[i]) { | 2762 | if (vp->rx_skbuff[i]) { |
| 2764 | pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), | 2763 | dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr), |
| 2765 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | 2764 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 2766 | dev_kfree_skb(vp->rx_skbuff[i]); | 2765 | dev_kfree_skb(vp->rx_skbuff[i]); |
| 2767 | vp->rx_skbuff[i] = NULL; | 2766 | vp->rx_skbuff[i] = NULL; |
| 2768 | } | 2767 | } |
| @@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev) | |||
| 2775 | int k; | 2774 | int k; |
| 2776 | 2775 | ||
| 2777 | for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) | 2776 | for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) |
| 2778 | pci_unmap_single(VORTEX_PCI(vp), | 2777 | dma_unmap_single(vp->gendev, |
| 2779 | le32_to_cpu(vp->tx_ring[i].frag[k].addr), | 2778 | le32_to_cpu(vp->tx_ring[i].frag[k].addr), |
| 2780 | le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, | 2779 | le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, |
| 2781 | PCI_DMA_TODEVICE); | 2780 | DMA_TO_DEVICE); |
| 2782 | #else | 2781 | #else |
| 2783 | pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); | 2782 | dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE); |
| 2784 | #endif | 2783 | #endif |
| 2785 | dev_kfree_skb(skb); | 2784 | dev_kfree_skb(skb); |
| 2786 | vp->tx_skbuff[i] = NULL; | 2785 | vp->tx_skbuff[i] = NULL; |
| @@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev) | |||
| 3288 | 3287 | ||
| 3289 | pci_iounmap(pdev, vp->ioaddr); | 3288 | pci_iounmap(pdev, vp->ioaddr); |
| 3290 | 3289 | ||
| 3291 | pci_free_consistent(pdev, | 3290 | dma_free_coherent(&pdev->dev, |
| 3292 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | 3291 | sizeof(struct boom_rx_desc) * RX_RING_SIZE + |
| 3293 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | 3292 | sizeof(struct boom_tx_desc) * TX_RING_SIZE, |
| 3294 | vp->rx_ring, | 3293 | vp->rx_ring, vp->rx_ring_dma); |
| 3295 | vp->rx_ring_dma); | ||
| 3296 | 3294 | ||
| 3297 | pci_release_regions(pdev); | 3295 | pci_release_regions(pdev); |
| 3298 | 3296 | ||
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index ac99d089ac72..1c97e39b478e 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c | |||
| @@ -164,7 +164,9 @@ bad_clone_list[] __initdata = { | |||
| 164 | #define NESM_START_PG 0x40 /* First page of TX buffer */ | 164 | #define NESM_START_PG 0x40 /* First page of TX buffer */ |
| 165 | #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ | 165 | #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ |
| 166 | 166 | ||
| 167 | #if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ | 167 | #if defined(CONFIG_MACH_TX49XX) |
| 168 | # define DCR_VAL 0x48 /* 8-bit mode */ | ||
| 169 | #elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ | ||
| 168 | # define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) | 170 | # define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) |
| 169 | #else | 171 | #else |
| 170 | # define DCR_VAL 0x49 | 172 | # define DCR_VAL 0x49 |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index a561705f232c..be198cc0b10c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
| @@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1552 | if (!ioaddr) { | 1552 | if (!ioaddr) { |
| 1553 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1553 | if (pcnet32_debug & NETIF_MSG_PROBE) |
| 1554 | pr_err("card has no PCI IO resources, aborting\n"); | 1554 | pr_err("card has no PCI IO resources, aborting\n"); |
| 1555 | return -ENODEV; | 1555 | err = -ENODEV; |
| 1556 | goto err_disable_dev; | ||
| 1556 | } | 1557 | } |
| 1557 | 1558 | ||
| 1558 | err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); | 1559 | err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); |
| 1559 | if (err) { | 1560 | if (err) { |
| 1560 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1561 | if (pcnet32_debug & NETIF_MSG_PROBE) |
| 1561 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); | 1562 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); |
| 1562 | return err; | 1563 | goto err_disable_dev; |
| 1563 | } | 1564 | } |
| 1564 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { | 1565 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { |
| 1565 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1566 | if (pcnet32_debug & NETIF_MSG_PROBE) |
| 1566 | pr_err("io address range already allocated\n"); | 1567 | pr_err("io address range already allocated\n"); |
| 1567 | return -EBUSY; | 1568 | err = -EBUSY; |
| 1569 | goto err_disable_dev; | ||
| 1568 | } | 1570 | } |
| 1569 | 1571 | ||
| 1570 | err = pcnet32_probe1(ioaddr, 1, pdev); | 1572 | err = pcnet32_probe1(ioaddr, 1, pdev); |
| 1573 | |||
| 1574 | err_disable_dev: | ||
| 1571 | if (err < 0) | 1575 | if (err < 0) |
| 1572 | pci_disable_device(pdev); | 1576 | pci_disable_device(pdev); |
| 1573 | 1577 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index b57acb8dc35b..dc25066c59a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | |||
| @@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { | |||
| 419 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ | 419 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ |
| 420 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ | 420 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ |
| 421 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ | 421 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ |
| 422 | {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ | 422 | {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ |
| 423 | {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ | 423 | {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ |
| 424 | {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ | 424 | {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ |
| 425 | {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ | 425 | {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ |
| 426 | {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ | 426 | {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ |
| 427 | {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ | 427 | {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ |
| 428 | {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ | 428 | {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ |
| 429 | {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ | 429 | {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ |
| 430 | {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ | 430 | {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ |
| 431 | }; | 431 | }; |
| 432 | 432 | ||
| 433 | static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { | 433 | static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { |
| @@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { | |||
| 444 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ | 444 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ |
| 445 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ | 445 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ |
| 446 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ | 446 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ |
| 447 | {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ | ||
| 448 | {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ | ||
| 449 | {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ | ||
| 450 | {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ | ||
| 451 | {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ | ||
| 452 | {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */ | ||
| 453 | {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */ | ||
| 454 | {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */ | ||
| 455 | {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ | ||
| 456 | {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ | ||
| 457 | }; | 447 | }; |
| 458 | 448 | ||
| 459 | static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { | 449 | static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index db92f1858060..b76447baccaf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | |||
| @@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap, | |||
| 836 | { | 836 | { |
| 837 | struct tp_params *tp = &adap->params.tp; | 837 | struct tp_params *tp = &adap->params.tp; |
| 838 | u64 hash_filter_mask = tp->hash_filter_mask; | 838 | u64 hash_filter_mask = tp->hash_filter_mask; |
| 839 | u32 mask; | 839 | u64 ntuple_mask = 0; |
| 840 | 840 | ||
| 841 | if (!is_hashfilter(adap)) | 841 | if (!is_hashfilter(adap)) |
| 842 | return false; | 842 | return false; |
| @@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap, | |||
| 865 | if (!fs->val.fport || fs->mask.fport != 0xffff) | 865 | if (!fs->val.fport || fs->mask.fport != 0xffff) |
| 866 | return false; | 866 | return false; |
| 867 | 867 | ||
| 868 | if (tp->fcoe_shift >= 0) { | 868 | /* calculate tuple mask and compare with mask configured in hw */ |
| 869 | mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; | 869 | if (tp->fcoe_shift >= 0) |
| 870 | if (mask && !fs->mask.fcoe) | 870 | ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift; |
| 871 | return false; | ||
| 872 | } | ||
| 873 | 871 | ||
| 874 | if (tp->port_shift >= 0) { | 872 | if (tp->port_shift >= 0) |
| 875 | mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; | 873 | ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; |
| 876 | if (mask && !fs->mask.iport) | ||
| 877 | return false; | ||
| 878 | } | ||
| 879 | 874 | ||
| 880 | if (tp->vnic_shift >= 0) { | 875 | if (tp->vnic_shift >= 0) { |
| 881 | mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; | 876 | if ((adap->params.tp.ingress_config & VNIC_F)) |
| 882 | 877 | ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift; | |
| 883 | if ((adap->params.tp.ingress_config & VNIC_F)) { | 878 | else |
| 884 | if (mask && !fs->mask.pfvf_vld) | 879 | ntuple_mask |= (u64)fs->mask.ovlan_vld << |
| 885 | return false; | 880 | tp->vnic_shift; |
| 886 | } else { | ||
| 887 | if (mask && !fs->mask.ovlan_vld) | ||
| 888 | return false; | ||
| 889 | } | ||
| 890 | } | 881 | } |
| 891 | 882 | ||
| 892 | if (tp->vlan_shift >= 0) { | 883 | if (tp->vlan_shift >= 0) |
| 893 | mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; | 884 | ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift; |
| 894 | if (mask && !fs->mask.ivlan) | ||
| 895 | return false; | ||
| 896 | } | ||
| 897 | 885 | ||
| 898 | if (tp->tos_shift >= 0) { | 886 | if (tp->tos_shift >= 0) |
| 899 | mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; | 887 | ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift; |
| 900 | if (mask && !fs->mask.tos) | ||
| 901 | return false; | ||
| 902 | } | ||
| 903 | 888 | ||
| 904 | if (tp->protocol_shift >= 0) { | 889 | if (tp->protocol_shift >= 0) |
| 905 | mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; | 890 | ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; |
| 906 | if (mask && !fs->mask.proto) | ||
| 907 | return false; | ||
| 908 | } | ||
| 909 | 891 | ||
| 910 | if (tp->ethertype_shift >= 0) { | 892 | if (tp->ethertype_shift >= 0) |
| 911 | mask = (hash_filter_mask >> tp->ethertype_shift) & | 893 | ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; |
| 912 | FT_ETHERTYPE_W; | ||
| 913 | if (mask && !fs->mask.ethtype) | ||
| 914 | return false; | ||
| 915 | } | ||
| 916 | 894 | ||
| 917 | if (tp->macmatch_shift >= 0) { | 895 | if (tp->macmatch_shift >= 0) |
| 918 | mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; | 896 | ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; |
| 919 | if (mask && !fs->mask.macidx) | 897 | |
| 920 | return false; | 898 | if (tp->matchtype_shift >= 0) |
| 921 | } | 899 | ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift; |
| 900 | |||
| 901 | if (tp->frag_shift >= 0) | ||
| 902 | ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift; | ||
| 903 | |||
| 904 | if (ntuple_mask != hash_filter_mask) | ||
| 905 | return false; | ||
| 922 | 906 | ||
| 923 | if (tp->matchtype_shift >= 0) { | ||
| 924 | mask = (hash_filter_mask >> tp->matchtype_shift) & | ||
| 925 | FT_MPSHITTYPE_W; | ||
| 926 | if (mask && !fs->mask.matchtype) | ||
| 927 | return false; | ||
| 928 | } | ||
| 929 | if (tp->frag_shift >= 0) { | ||
| 930 | mask = (hash_filter_mask >> tp->frag_shift) & | ||
| 931 | FT_FRAGMENTATION_W; | ||
| 932 | if (mask && !fs->mask.frag) | ||
| 933 | return false; | ||
| 934 | } | ||
| 935 | return true; | 907 | return true; |
| 936 | } | 908 | } |
| 937 | 909 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 81684acf52af..8a8b12b720ef 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2747 | pci_set_master(pdev); | 2747 | pci_set_master(pdev); |
| 2748 | 2748 | ||
| 2749 | /* Query PCI controller on system for DMA addressing | 2749 | /* Query PCI controller on system for DMA addressing |
| 2750 | * limitation for the device. Try 64-bit first, and | 2750 | * limitation for the device. Try 47-bit first, and |
| 2751 | * fail to 32-bit. | 2751 | * fail to 32-bit. |
| 2752 | */ | 2752 | */ |
| 2753 | 2753 | ||
| 2754 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 2754 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); |
| 2755 | if (err) { | 2755 | if (err) { |
| 2756 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 2756 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 2757 | if (err) { | 2757 | if (err) { |
| @@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2765 | goto err_out_release_regions; | 2765 | goto err_out_release_regions; |
| 2766 | } | 2766 | } |
| 2767 | } else { | 2767 | } else { |
| 2768 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 2768 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); |
| 2769 | if (err) { | 2769 | if (err) { |
| 2770 | dev_err(dev, "Unable to obtain %u-bit DMA " | 2770 | dev_err(dev, "Unable to obtain %u-bit DMA " |
| 2771 | "for consistent allocations, aborting\n", 64); | 2771 | "for consistent allocations, aborting\n", 47); |
| 2772 | goto err_out_release_regions; | 2772 | goto err_out_release_regions; |
| 2773 | } | 2773 | } |
| 2774 | using_dac = 1; | 2774 | using_dac = 1; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4604bc8eb5b..9d3eed46830d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* | 2 | /* |
| 2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | 3 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. |
| 3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | 4 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) |
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f81439796ac7..43d973215040 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c | |||
| @@ -1,20 +1,8 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * Fast Ethernet Controller (ENET) PTP driver for MX6x. | 3 | * Fast Ethernet Controller (ENET) PTP driver for MX6x. |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2012 Freescale Semiconductor, Inc. | 5 | * Copyright (C) 2012 Freescale Semiconductor, Inc. |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License along with | ||
| 16 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 18 | */ | 6 | */ |
| 19 | 7 | ||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6e8d6a6f6aaf..5ec1185808e5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, | |||
| 192 | if (adapter->fw_done_rc) { | 192 | if (adapter->fw_done_rc) { |
| 193 | dev_err(dev, "Couldn't map long term buffer,rc = %d\n", | 193 | dev_err(dev, "Couldn't map long term buffer,rc = %d\n", |
| 194 | adapter->fw_done_rc); | 194 | adapter->fw_done_rc); |
| 195 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); | ||
| 195 | return -1; | 196 | return -1; |
| 196 | } | 197 | } |
| 197 | return 0; | 198 | return 0; |
| @@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 795 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 796 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 796 | unsigned long timeout = msecs_to_jiffies(30000); | 797 | unsigned long timeout = msecs_to_jiffies(30000); |
| 797 | int retry_count = 0; | 798 | int retry_count = 0; |
| 799 | bool retry; | ||
| 798 | int rc; | 800 | int rc; |
| 799 | 801 | ||
| 800 | do { | 802 | do { |
| 803 | retry = false; | ||
| 801 | if (retry_count > IBMVNIC_MAX_QUEUES) { | 804 | if (retry_count > IBMVNIC_MAX_QUEUES) { |
| 802 | netdev_warn(netdev, "Login attempts exceeded\n"); | 805 | netdev_warn(netdev, "Login attempts exceeded\n"); |
| 803 | return -1; | 806 | return -1; |
| @@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 821 | retry_count++; | 824 | retry_count++; |
| 822 | release_sub_crqs(adapter, 1); | 825 | release_sub_crqs(adapter, 1); |
| 823 | 826 | ||
| 827 | retry = true; | ||
| 828 | netdev_dbg(netdev, | ||
| 829 | "Received partial success, retrying...\n"); | ||
| 824 | adapter->init_done_rc = 0; | 830 | adapter->init_done_rc = 0; |
| 825 | reinit_completion(&adapter->init_done); | 831 | reinit_completion(&adapter->init_done); |
| 826 | send_cap_queries(adapter); | 832 | send_cap_queries(adapter); |
| @@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 848 | netdev_warn(netdev, "Adapter login failed\n"); | 854 | netdev_warn(netdev, "Adapter login failed\n"); |
| 849 | return -1; | 855 | return -1; |
| 850 | } | 856 | } |
| 851 | } while (adapter->init_done_rc == PARTIALSUCCESS); | 857 | } while (retry); |
| 852 | 858 | ||
| 853 | /* handle pending MAC address changes after successful login */ | 859 | /* handle pending MAC address changes after successful login */ |
| 854 | if (adapter->mac_change_pending) { | 860 | if (adapter->mac_change_pending) { |
| @@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1821 | if (rc) | 1827 | if (rc) |
| 1822 | return rc; | 1828 | return rc; |
| 1823 | } | 1829 | } |
| 1830 | ibmvnic_disable_irqs(adapter); | ||
| 1824 | } | 1831 | } |
| 1825 | |||
| 1826 | ibmvnic_disable_irqs(adapter); | ||
| 1827 | adapter->state = VNIC_CLOSED; | 1832 | adapter->state = VNIC_CLOSED; |
| 1828 | 1833 | ||
| 1829 | if (reset_state == VNIC_CLOSED) | 1834 | if (reset_state == VNIC_CLOSED) |
| @@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, | |||
| 2617 | { | 2622 | { |
| 2618 | struct device *dev = &adapter->vdev->dev; | 2623 | struct device *dev = &adapter->vdev->dev; |
| 2619 | unsigned long rc; | 2624 | unsigned long rc; |
| 2620 | u64 val; | ||
| 2621 | 2625 | ||
| 2622 | if (scrq->hw_irq > 0x100000000ULL) { | 2626 | if (scrq->hw_irq > 0x100000000ULL) { |
| 2623 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); | 2627 | dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); |
| 2624 | return 1; | 2628 | return 1; |
| 2625 | } | 2629 | } |
| 2626 | 2630 | ||
| 2627 | val = (0xff000000) | scrq->hw_irq; | 2631 | if (adapter->resetting && |
| 2628 | rc = plpar_hcall_norets(H_EOI, val); | 2632 | adapter->reset_reason == VNIC_RESET_MOBILITY) { |
| 2629 | if (rc) | 2633 | u64 val = (0xff000000) | scrq->hw_irq; |
| 2630 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", | 2634 | |
| 2631 | val, rc); | 2635 | rc = plpar_hcall_norets(H_EOI, val); |
| 2636 | if (rc) | ||
| 2637 | dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", | ||
| 2638 | val, rc); | ||
| 2639 | } | ||
| 2632 | 2640 | ||
| 2633 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, | 2641 | rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, |
| 2634 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); | 2642 | H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); |
| @@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
| 4586 | release_crq_queue(adapter); | 4594 | release_crq_queue(adapter); |
| 4587 | } | 4595 | } |
| 4588 | 4596 | ||
| 4589 | rc = init_stats_buffers(adapter); | ||
| 4590 | if (rc) | ||
| 4591 | return rc; | ||
| 4592 | |||
| 4593 | rc = init_stats_token(adapter); | ||
| 4594 | if (rc) | ||
| 4595 | return rc; | ||
| 4596 | |||
| 4597 | return rc; | 4597 | return rc; |
| 4598 | } | 4598 | } |
| 4599 | 4599 | ||
| @@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4662 | goto ibmvnic_init_fail; | 4662 | goto ibmvnic_init_fail; |
| 4663 | } while (rc == EAGAIN); | 4663 | } while (rc == EAGAIN); |
| 4664 | 4664 | ||
| 4665 | rc = init_stats_buffers(adapter); | ||
| 4666 | if (rc) | ||
| 4667 | goto ibmvnic_init_fail; | ||
| 4668 | |||
| 4669 | rc = init_stats_token(adapter); | ||
| 4670 | if (rc) | ||
| 4671 | goto ibmvnic_stats_fail; | ||
| 4672 | |||
| 4665 | netdev->mtu = adapter->req_mtu - ETH_HLEN; | 4673 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
| 4666 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; | 4674 | netdev->min_mtu = adapter->min_mtu - ETH_HLEN; |
| 4667 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; | 4675 | netdev->max_mtu = adapter->max_mtu - ETH_HLEN; |
| 4668 | 4676 | ||
| 4669 | rc = device_create_file(&dev->dev, &dev_attr_failover); | 4677 | rc = device_create_file(&dev->dev, &dev_attr_failover); |
| 4670 | if (rc) | 4678 | if (rc) |
| 4671 | goto ibmvnic_init_fail; | 4679 | goto ibmvnic_dev_file_err; |
| 4672 | 4680 | ||
| 4673 | netif_carrier_off(netdev); | 4681 | netif_carrier_off(netdev); |
| 4674 | rc = register_netdev(netdev); | 4682 | rc = register_netdev(netdev); |
| @@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4687 | ibmvnic_register_fail: | 4695 | ibmvnic_register_fail: |
| 4688 | device_remove_file(&dev->dev, &dev_attr_failover); | 4696 | device_remove_file(&dev->dev, &dev_attr_failover); |
| 4689 | 4697 | ||
| 4698 | ibmvnic_dev_file_err: | ||
| 4699 | release_stats_token(adapter); | ||
| 4700 | |||
| 4701 | ibmvnic_stats_fail: | ||
| 4702 | release_stats_buffers(adapter); | ||
| 4703 | |||
| 4690 | ibmvnic_init_fail: | 4704 | ibmvnic_init_fail: |
| 4691 | release_sub_crqs(adapter, 1); | 4705 | release_sub_crqs(adapter, 1); |
| 4692 | release_crq_queue(adapter); | 4706 | release_crq_queue(adapter); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index a822f7a56bc5..685337d58276 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c | |||
| @@ -43,12 +43,12 @@ | |||
| 43 | #include "fw.h" | 43 | #include "fw.h" |
| 44 | 44 | ||
| 45 | /* | 45 | /* |
| 46 | * We allocate in as big chunks as we can, up to a maximum of 256 KB | 46 | * We allocate in page size (default 4KB on many archs) chunks to avoid high |
| 47 | * per chunk. | 47 | * order memory allocations in fragmented/high usage memory situation. |
| 48 | */ | 48 | */ |
| 49 | enum { | 49 | enum { |
| 50 | MLX4_ICM_ALLOC_SIZE = 1 << 18, | 50 | MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, |
| 51 | MLX4_TABLE_CHUNK_SIZE = 1 << 18 | 51 | MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) | 54 | static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) |
| @@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, | |||
| 398 | u64 size; | 398 | u64 size; |
| 399 | 399 | ||
| 400 | obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; | 400 | obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; |
| 401 | if (WARN_ON(!obj_per_chunk)) | ||
| 402 | return -EINVAL; | ||
| 401 | num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; | 403 | num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; |
| 402 | 404 | ||
| 403 | table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); | 405 | table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL); |
| 404 | if (!table->icm) | 406 | if (!table->icm) |
| 405 | return -ENOMEM; | 407 | return -ENOMEM; |
| 406 | table->virt = virt; | 408 | table->virt = virt; |
| @@ -446,7 +448,7 @@ err: | |||
| 446 | mlx4_free_icm(dev, table->icm[i], use_coherent); | 448 | mlx4_free_icm(dev, table->icm[i], use_coherent); |
| 447 | } | 449 | } |
| 448 | 450 | ||
| 449 | kfree(table->icm); | 451 | kvfree(table->icm); |
| 450 | 452 | ||
| 451 | return -ENOMEM; | 453 | return -ENOMEM; |
| 452 | } | 454 | } |
| @@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) | |||
| 462 | mlx4_free_icm(dev, table->icm[i], table->coherent); | 464 | mlx4_free_icm(dev, table->icm[i], table->coherent); |
| 463 | } | 465 | } |
| 464 | 466 | ||
| 465 | kfree(table->icm); | 467 | kvfree(table->icm); |
| 466 | } | 468 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 2edcce98ab2d..65482f004e50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c | |||
| @@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) | |||
| 172 | list_add_tail(&dev_ctx->list, &priv->ctx_list); | 172 | list_add_tail(&dev_ctx->list, &priv->ctx_list); |
| 173 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | 173 | spin_unlock_irqrestore(&priv->ctx_lock, flags); |
| 174 | 174 | ||
| 175 | mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", | 175 | mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", |
| 176 | dev_ctx->intf->protocol, enable ? | 176 | dev_ctx->intf->protocol, enable ? |
| 177 | "enabled" : "disabled"); | 177 | "enabled" : "disabled"); |
| 178 | } | 178 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 211578ffc70d..60172a38c4a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
| 2929 | mlx4_err(dev, "Failed to create file for port %d\n", port); | 2929 | mlx4_err(dev, "Failed to create file for port %d\n", port); |
| 2930 | devlink_port_unregister(&info->devlink_port); | 2930 | devlink_port_unregister(&info->devlink_port); |
| 2931 | info->port = -1; | 2931 | info->port = -1; |
| 2932 | return err; | ||
| 2932 | } | 2933 | } |
| 2933 | 2934 | ||
| 2934 | sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); | 2935 | sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); |
| @@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
| 2950 | &info->port_attr); | 2951 | &info->port_attr); |
| 2951 | devlink_port_unregister(&info->devlink_port); | 2952 | devlink_port_unregister(&info->devlink_port); |
| 2952 | info->port = -1; | 2953 | info->port = -1; |
| 2954 | return err; | ||
| 2953 | } | 2955 | } |
| 2954 | 2956 | ||
| 2955 | return err; | 2957 | return 0; |
| 2956 | } | 2958 | } |
| 2957 | 2959 | ||
| 2958 | static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | 2960 | static void mlx4_cleanup_port_info(struct mlx4_port_info *info) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 3aaf4bad6c5a..427e7a31862c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
| @@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) | |||
| 393 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | 393 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; |
| 394 | struct mlx4_qp *qp; | 394 | struct mlx4_qp *qp; |
| 395 | 395 | ||
| 396 | spin_lock(&qp_table->lock); | 396 | spin_lock_irq(&qp_table->lock); |
| 397 | 397 | ||
| 398 | qp = __mlx4_qp_lookup(dev, qpn); | 398 | qp = __mlx4_qp_lookup(dev, qpn); |
| 399 | 399 | ||
| 400 | spin_unlock(&qp_table->lock); | 400 | spin_unlock_irq(&qp_table->lock); |
| 401 | return qp; | 401 | return qp; |
| 402 | } | 402 | } |
| 403 | 403 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 176645762e49..1ff0b0e93804 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) | |||
| 615 | return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); | 615 | return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); |
| 616 | } | 616 | } |
| 617 | 617 | ||
| 618 | static __be32 mlx5e_get_fcs(struct sk_buff *skb) | ||
| 619 | { | ||
| 620 | int last_frag_sz, bytes_in_prev, nr_frags; | ||
| 621 | u8 *fcs_p1, *fcs_p2; | ||
| 622 | skb_frag_t *last_frag; | ||
| 623 | __be32 fcs_bytes; | ||
| 624 | |||
| 625 | if (!skb_is_nonlinear(skb)) | ||
| 626 | return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); | ||
| 627 | |||
| 628 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
| 629 | last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; | ||
| 630 | last_frag_sz = skb_frag_size(last_frag); | ||
| 631 | |||
| 632 | /* If all FCS data is in last frag */ | ||
| 633 | if (last_frag_sz >= ETH_FCS_LEN) | ||
| 634 | return *(__be32 *)(skb_frag_address(last_frag) + | ||
| 635 | last_frag_sz - ETH_FCS_LEN); | ||
| 636 | |||
| 637 | fcs_p2 = (u8 *)skb_frag_address(last_frag); | ||
| 638 | bytes_in_prev = ETH_FCS_LEN - last_frag_sz; | ||
| 639 | |||
| 640 | /* Find where the other part of the FCS is - Linear or another frag */ | ||
| 641 | if (nr_frags == 1) { | ||
| 642 | fcs_p1 = skb_tail_pointer(skb); | ||
| 643 | } else { | ||
| 644 | skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; | ||
| 645 | |||
| 646 | fcs_p1 = skb_frag_address(prev_frag) + | ||
| 647 | skb_frag_size(prev_frag); | ||
| 648 | } | ||
| 649 | fcs_p1 -= bytes_in_prev; | ||
| 650 | |||
| 651 | memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); | ||
| 652 | memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); | ||
| 653 | |||
| 654 | return fcs_bytes; | ||
| 655 | } | ||
| 656 | |||
| 618 | static inline void mlx5e_handle_csum(struct net_device *netdev, | 657 | static inline void mlx5e_handle_csum(struct net_device *netdev, |
| 619 | struct mlx5_cqe64 *cqe, | 658 | struct mlx5_cqe64 *cqe, |
| 620 | struct mlx5e_rq *rq, | 659 | struct mlx5e_rq *rq, |
| @@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 643 | skb->csum = csum_partial(skb->data + ETH_HLEN, | 682 | skb->csum = csum_partial(skb->data + ETH_HLEN, |
| 644 | network_depth - ETH_HLEN, | 683 | network_depth - ETH_HLEN, |
| 645 | skb->csum); | 684 | skb->csum); |
| 685 | if (unlikely(netdev->features & NETIF_F_RXFCS)) | ||
| 686 | skb->csum = csum_add(skb->csum, | ||
| 687 | (__force __wsum)mlx5e_get_fcs(skb)); | ||
| 646 | rq->stats.csum_complete++; | 688 | rq->stats.csum_complete++; |
| 647 | return; | 689 | return; |
| 648 | } | 690 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0f5da499a223..fad8c2e3804e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | |||
| @@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, | |||
| 237 | context->buf.sg[0].data = &context->command; | 237 | context->buf.sg[0].data = &context->command; |
| 238 | 238 | ||
| 239 | spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); | 239 | spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); |
| 240 | list_add_tail(&context->list, &fdev->ipsec->pending_cmds); | 240 | res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); |
| 241 | if (!res) | ||
| 242 | list_add_tail(&context->list, &fdev->ipsec->pending_cmds); | ||
| 241 | spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); | 243 | spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); |
| 242 | 244 | ||
| 243 | res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); | ||
| 244 | if (res) { | 245 | if (res) { |
| 245 | mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", | 246 | mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); |
| 246 | res); | ||
| 247 | spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); | ||
| 248 | list_del(&context->list); | ||
| 249 | spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); | ||
| 250 | kfree(context); | 247 | kfree(context); |
| 251 | return ERR_PTR(res); | 248 | return ERR_PTR(res); |
| 252 | } | 249 | } |
| 250 | |||
| 253 | /* Context will be freed by wait func after completion */ | 251 | /* Context will be freed by wait func after completion */ |
| 254 | return context; | 252 | return context; |
| 255 | } | 253 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1dc424685f4e..35fb31f682af 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c | |||
| @@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) | |||
| 335 | return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); | 335 | return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); |
| 336 | 336 | ||
| 337 | start = mem; | 337 | start = mem; |
| 338 | while (mem - start + 8 < nfp_cpp_area_size(area)) { | 338 | while (mem - start + 8 <= nfp_cpp_area_size(area)) { |
| 339 | u8 __iomem *value; | 339 | u8 __iomem *value; |
| 340 | u32 type, length; | 340 | u32 type, length; |
| 341 | 341 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 00f41c145d4d..820b226d6ff8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
| @@ -77,7 +77,7 @@ | |||
| 77 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET | 77 | #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET |
| 78 | 78 | ||
| 79 | /* ILT entry structure */ | 79 | /* ILT entry structure */ |
| 80 | #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL | 80 | #define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) |
| 81 | #define ILT_ENTRY_PHY_ADDR_SHIFT 0 | 81 | #define ILT_ENTRY_PHY_ADDR_SHIFT 0 |
| 82 | #define ILT_ENTRY_VALID_MASK 0x1ULL | 82 | #define ILT_ENTRY_VALID_MASK 0x1ULL |
| 83 | #define ILT_ENTRY_VALID_SHIFT 52 | 83 | #define ILT_ENTRY_VALID_SHIFT 52 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 38502815d681..468c59d2e491 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
| @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 292 | struct qed_ll2_tx_packet *p_pkt = NULL; | 292 | struct qed_ll2_tx_packet *p_pkt = NULL; |
| 293 | struct qed_ll2_info *p_ll2_conn; | 293 | struct qed_ll2_info *p_ll2_conn; |
| 294 | struct qed_ll2_tx_queue *p_tx; | 294 | struct qed_ll2_tx_queue *p_tx; |
| 295 | unsigned long flags = 0; | ||
| 295 | dma_addr_t tx_frag; | 296 | dma_addr_t tx_frag; |
| 296 | 297 | ||
| 297 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); | 298 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); |
| @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 300 | 301 | ||
| 301 | p_tx = &p_ll2_conn->tx_queue; | 302 | p_tx = &p_ll2_conn->tx_queue; |
| 302 | 303 | ||
| 304 | spin_lock_irqsave(&p_tx->lock, flags); | ||
| 303 | while (!list_empty(&p_tx->active_descq)) { | 305 | while (!list_empty(&p_tx->active_descq)) { |
| 304 | p_pkt = list_first_entry(&p_tx->active_descq, | 306 | p_pkt = list_first_entry(&p_tx->active_descq, |
| 305 | struct qed_ll2_tx_packet, list_entry); | 307 | struct qed_ll2_tx_packet, list_entry); |
| @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 309 | list_del(&p_pkt->list_entry); | 311 | list_del(&p_pkt->list_entry); |
| 310 | b_last_packet = list_empty(&p_tx->active_descq); | 312 | b_last_packet = list_empty(&p_tx->active_descq); |
| 311 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); | 313 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); |
| 314 | spin_unlock_irqrestore(&p_tx->lock, flags); | ||
| 312 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { | 315 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { |
| 313 | struct qed_ooo_buffer *p_buffer; | 316 | struct qed_ooo_buffer *p_buffer; |
| 314 | 317 | ||
| @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 328 | b_last_frag, | 331 | b_last_frag, |
| 329 | b_last_packet); | 332 | b_last_packet); |
| 330 | } | 333 | } |
| 334 | spin_lock_irqsave(&p_tx->lock, flags); | ||
| 331 | } | 335 | } |
| 336 | spin_unlock_irqrestore(&p_tx->lock, flags); | ||
| 332 | } | 337 | } |
| 333 | 338 | ||
| 334 | static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) | 339 | static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) |
| @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 556 | struct qed_ll2_info *p_ll2_conn = NULL; | 561 | struct qed_ll2_info *p_ll2_conn = NULL; |
| 557 | struct qed_ll2_rx_packet *p_pkt = NULL; | 562 | struct qed_ll2_rx_packet *p_pkt = NULL; |
| 558 | struct qed_ll2_rx_queue *p_rx; | 563 | struct qed_ll2_rx_queue *p_rx; |
| 564 | unsigned long flags = 0; | ||
| 559 | 565 | ||
| 560 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); | 566 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); |
| 561 | if (!p_ll2_conn) | 567 | if (!p_ll2_conn) |
| @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 563 | 569 | ||
| 564 | p_rx = &p_ll2_conn->rx_queue; | 570 | p_rx = &p_ll2_conn->rx_queue; |
| 565 | 571 | ||
| 572 | spin_lock_irqsave(&p_rx->lock, flags); | ||
| 566 | while (!list_empty(&p_rx->active_descq)) { | 573 | while (!list_empty(&p_rx->active_descq)) { |
| 567 | p_pkt = list_first_entry(&p_rx->active_descq, | 574 | p_pkt = list_first_entry(&p_rx->active_descq, |
| 568 | struct qed_ll2_rx_packet, list_entry); | 575 | struct qed_ll2_rx_packet, list_entry); |
| 569 | if (!p_pkt) | 576 | if (!p_pkt) |
| 570 | break; | 577 | break; |
| 571 | |||
| 572 | list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); | 578 | list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); |
| 579 | spin_unlock_irqrestore(&p_rx->lock, flags); | ||
| 573 | 580 | ||
| 574 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { | 581 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { |
| 575 | struct qed_ooo_buffer *p_buffer; | 582 | struct qed_ooo_buffer *p_buffer; |
| @@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | |||
| 588 | cookie, | 595 | cookie, |
| 589 | rx_buf_addr, b_last); | 596 | rx_buf_addr, b_last); |
| 590 | } | 597 | } |
| 598 | spin_lock_irqsave(&p_rx->lock, flags); | ||
| 591 | } | 599 | } |
| 600 | spin_unlock_irqrestore(&p_rx->lock, flags); | ||
| 601 | } | ||
| 602 | |||
| 603 | static bool | ||
| 604 | qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, | ||
| 605 | struct core_rx_slow_path_cqe *p_cqe) | ||
| 606 | { | ||
| 607 | struct ooo_opaque *iscsi_ooo; | ||
| 608 | u32 cid; | ||
| 609 | |||
| 610 | if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) | ||
| 611 | return false; | ||
| 612 | |||
| 613 | iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; | ||
| 614 | if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) | ||
| 615 | return false; | ||
| 616 | |||
| 617 | /* Need to make a flush */ | ||
| 618 | cid = le32_to_cpu(iscsi_ooo->cid); | ||
| 619 | qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); | ||
| 620 | |||
| 621 | return true; | ||
| 592 | } | 622 | } |
| 593 | 623 | ||
| 594 | static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, | 624 | static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, |
| @@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, | |||
| 617 | cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); | 647 | cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); |
| 618 | cqe_type = cqe->rx_cqe_sp.type; | 648 | cqe_type = cqe->rx_cqe_sp.type; |
| 619 | 649 | ||
| 650 | if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) | ||
| 651 | if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, | ||
| 652 | &cqe->rx_cqe_sp)) | ||
| 653 | continue; | ||
| 654 | |||
| 620 | if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { | 655 | if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { |
| 621 | DP_NOTICE(p_hwfn, | 656 | DP_NOTICE(p_hwfn, |
| 622 | "Got a non-regular LB LL2 completion [type 0x%02x]\n", | 657 | "Got a non-regular LB LL2 completion [type 0x%02x]\n", |
| @@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) | |||
| 794 | struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; | 829 | struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; |
| 795 | int rc; | 830 | int rc; |
| 796 | 831 | ||
| 832 | if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) | ||
| 833 | return 0; | ||
| 834 | |||
| 797 | rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); | 835 | rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); |
| 798 | if (rc) | 836 | if (rc) |
| 799 | return rc; | 837 | return rc; |
| @@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) | |||
| 814 | u16 new_idx = 0, num_bds = 0; | 852 | u16 new_idx = 0, num_bds = 0; |
| 815 | int rc; | 853 | int rc; |
| 816 | 854 | ||
| 855 | if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) | ||
| 856 | return 0; | ||
| 857 | |||
| 817 | new_idx = le16_to_cpu(*p_tx->p_fw_cons); | 858 | new_idx = le16_to_cpu(*p_tx->p_fw_cons); |
| 818 | num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); | 859 | num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); |
| 819 | 860 | ||
| @@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) | |||
| 1867 | 1908 | ||
| 1868 | /* Stop Tx & Rx of connection, if needed */ | 1909 | /* Stop Tx & Rx of connection, if needed */ |
| 1869 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { | 1910 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { |
| 1911 | p_ll2_conn->tx_queue.b_cb_registred = false; | ||
| 1912 | smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ | ||
| 1870 | rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); | 1913 | rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); |
| 1871 | if (rc) | 1914 | if (rc) |
| 1872 | goto out; | 1915 | goto out; |
| 1916 | |||
| 1873 | qed_ll2_txq_flush(p_hwfn, connection_handle); | 1917 | qed_ll2_txq_flush(p_hwfn, connection_handle); |
| 1918 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); | ||
| 1874 | } | 1919 | } |
| 1875 | 1920 | ||
| 1876 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { | 1921 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { |
| 1922 | p_ll2_conn->rx_queue.b_cb_registred = false; | ||
| 1923 | smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ | ||
| 1877 | rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); | 1924 | rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); |
| 1878 | if (rc) | 1925 | if (rc) |
| 1879 | goto out; | 1926 | goto out; |
| 1927 | |||
| 1880 | qed_ll2_rxq_flush(p_hwfn, connection_handle); | 1928 | qed_ll2_rxq_flush(p_hwfn, connection_handle); |
| 1929 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); | ||
| 1881 | } | 1930 | } |
| 1882 | 1931 | ||
| 1883 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) | 1932 | if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) |
| @@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) | |||
| 1925 | if (!p_ll2_conn) | 1974 | if (!p_ll2_conn) |
| 1926 | return; | 1975 | return; |
| 1927 | 1976 | ||
| 1928 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { | ||
| 1929 | p_ll2_conn->rx_queue.b_cb_registred = false; | ||
| 1930 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { | ||
| 1934 | p_ll2_conn->tx_queue.b_cb_registred = false; | ||
| 1935 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | kfree(p_ll2_conn->tx_queue.descq_mem); | 1977 | kfree(p_ll2_conn->tx_queue.descq_mem); |
| 1939 | qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); | 1978 | qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); |
| 1940 | 1979 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..f6655e251bbd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) | |||
| 1066 | 1066 | ||
| 1067 | DP_INFO(edev, "Starting qede_remove\n"); | 1067 | DP_INFO(edev, "Starting qede_remove\n"); |
| 1068 | 1068 | ||
| 1069 | qede_rdma_dev_remove(edev); | ||
| 1069 | unregister_netdev(ndev); | 1070 | unregister_netdev(ndev); |
| 1070 | cancel_delayed_work_sync(&edev->sp_task); | 1071 | cancel_delayed_work_sync(&edev->sp_task); |
| 1071 | 1072 | ||
| 1072 | qede_ptp_disable(edev); | 1073 | qede_ptp_disable(edev); |
| 1073 | 1074 | ||
| 1074 | qede_rdma_dev_remove(edev); | ||
| 1075 | |||
| 1076 | edev->ops->common->set_power_state(cdev, PCI_D0); | 1075 | edev->ops->common->set_power_state(cdev, PCI_D0); |
| 1077 | 1076 | ||
| 1078 | pci_set_drvdata(pdev, NULL); | 1077 | pci_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a5b792ce2ae7..1bf930d4a1e5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
| @@ -163,7 +163,7 @@ enum { | |||
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| 165 | /* Driver's parameters */ | 165 | /* Driver's parameters */ |
| 166 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 166 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS) |
| 167 | #define SH_ETH_RX_ALIGN 32 | 167 | #define SH_ETH_RX_ALIGN 32 |
| 168 | #else | 168 | #else |
| 169 | #define SH_ETH_RX_ALIGN 2 | 169 | #define SH_ETH_RX_ALIGN 2 |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 450eec264a5e..4377c26f714d 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
| @@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused, | |||
| 792 | break; | 792 | break; |
| 793 | 793 | ||
| 794 | case NETDEV_CHANGEADDR: | 794 | case NETDEV_CHANGEADDR: |
| 795 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) | 795 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { |
| 796 | ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); | 796 | ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); |
| 797 | call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev); | ||
| 798 | } | ||
| 797 | break; | 799 | break; |
| 798 | 800 | ||
| 799 | case NETDEV_PRE_TYPE_CHANGE: | 801 | case NETDEV_PRE_TYPE_CHANGE: |
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 6838129839ca..e757b09f1889 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c | |||
| @@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev) | |||
| 61 | return rc; | 61 | return rc; |
| 62 | 62 | ||
| 63 | /* make rcal=100, since rdb default is 000 */ | 63 | /* make rcal=100, since rdb default is 000 */ |
| 64 | rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); | 64 | rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10); |
| 65 | if (rc < 0) | 65 | if (rc < 0) |
| 66 | return rc; | 66 | return rc; |
| 67 | 67 | ||
| 68 | /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ | 68 | /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ |
| 69 | rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); | 69 | rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10); |
| 70 | if (rc < 0) | 70 | if (rc < 0) |
| 71 | return rc; | 71 | return rc; |
| 72 | 72 | ||
| 73 | /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ | 73 | /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ |
| 74 | rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); | 74 | rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00); |
| 75 | 75 | ||
| 76 | return 0; | 76 | return 0; |
| 77 | } | 77 | } |
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 5ad130c3da43..d5e0833d69b9 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c | |||
| @@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum) | |||
| 56 | /* The register must be written to both the Shadow Register Select and | 56 | /* The register must be written to both the Shadow Register Select and |
| 57 | * the Shadow Read Register Selector | 57 | * the Shadow Read Register Selector |
| 58 | */ | 58 | */ |
| 59 | phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | | 59 | phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK | |
| 60 | regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); | 60 | regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); |
| 61 | return phy_read(phydev, MII_BCM54XX_AUX_CTL); | 61 | return phy_read(phydev, MII_BCM54XX_AUX_CTL); |
| 62 | } | 62 | } |
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 7c73808cbbde..81cceaa412fe 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h | |||
| @@ -14,11 +14,18 @@ | |||
| 14 | #ifndef _LINUX_BCM_PHY_LIB_H | 14 | #ifndef _LINUX_BCM_PHY_LIB_H |
| 15 | #define _LINUX_BCM_PHY_LIB_H | 15 | #define _LINUX_BCM_PHY_LIB_H |
| 16 | 16 | ||
| 17 | #include <linux/brcmphy.h> | ||
| 17 | #include <linux/phy.h> | 18 | #include <linux/phy.h> |
| 18 | 19 | ||
| 19 | int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); | 20 | int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); |
| 20 | int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); | 21 | int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); |
| 21 | 22 | ||
| 23 | static inline int bcm_phy_write_exp_sel(struct phy_device *phydev, | ||
| 24 | u16 reg, u16 val) | ||
| 25 | { | ||
| 26 | return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val); | ||
| 27 | } | ||
| 28 | |||
| 22 | int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); | 29 | int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); |
| 23 | int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); | 30 | int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); |
| 24 | 31 | ||
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 29b1c88b55cc..01d2ff2f6241 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c | |||
| @@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv { | |||
| 65 | static void r_rc_cal_reset(struct phy_device *phydev) | 65 | static void r_rc_cal_reset(struct phy_device *phydev) |
| 66 | { | 66 | { |
| 67 | /* Reset R_CAL/RC_CAL Engine */ | 67 | /* Reset R_CAL/RC_CAL Engine */ |
| 68 | bcm_phy_write_exp(phydev, 0x00b0, 0x0010); | 68 | bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); |
| 69 | 69 | ||
| 70 | /* Disable Reset R_AL/RC_CAL Engine */ | 70 | /* Disable Reset R_AL/RC_CAL Engine */ |
| 71 | bcm_phy_write_exp(phydev, 0x00b0, 0x0000); | 71 | bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) | 74 | static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index f41b224a9cdb..ab195f0916d6 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev) | |||
| 573 | ksz9031_of_load_skew_values(phydev, of_node, | 573 | ksz9031_of_load_skew_values(phydev, of_node, |
| 574 | MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, | 574 | MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, |
| 575 | tx_data_skews, 4); | 575 | tx_data_skews, 4); |
| 576 | |||
| 577 | /* Silicon Errata Sheet (DS80000691D or DS80000692D): | ||
| 578 | * When the device links in the 1000BASE-T slave mode only, | ||
| 579 | * the optional 125MHz reference output clock (CLK125_NDO) | ||
| 580 | * has wide duty cycle variation. | ||
| 581 | * | ||
| 582 | * The optional CLK125_NDO clock does not meet the RGMII | ||
| 583 | * 45/55 percent (min/max) duty cycle requirement and therefore | ||
| 584 | * cannot be used directly by the MAC side for clocking | ||
| 585 | * applications that have setup/hold time requirements on | ||
| 586 | * rising and falling clock edges. | ||
| 587 | * | ||
| 588 | * Workaround: | ||
| 589 | * Force the phy to be the master to receive a stable clock | ||
| 590 | * which meets the duty cycle requirement. | ||
| 591 | */ | ||
| 592 | if (of_property_read_bool(of_node, "micrel,force-master")) { | ||
| 593 | result = phy_read(phydev, MII_CTRL1000); | ||
| 594 | if (result < 0) | ||
| 595 | goto err_force_master; | ||
| 596 | |||
| 597 | /* enable master mode, config & prefer master */ | ||
| 598 | result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER; | ||
| 599 | result = phy_write(phydev, MII_CTRL1000, result); | ||
| 600 | if (result < 0) | ||
| 601 | goto err_force_master; | ||
| 602 | } | ||
| 576 | } | 603 | } |
| 577 | 604 | ||
| 578 | return ksz9031_center_flp_timing(phydev); | 605 | return ksz9031_center_flp_timing(phydev); |
| 606 | |||
| 607 | err_force_master: | ||
| 608 | phydev_err(phydev, "failed to force the phy to master mode\n"); | ||
| 609 | return result; | ||
| 579 | } | 610 | } |
| 580 | 611 | ||
| 581 | #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 | 612 | #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index dc7c7ec43202..02ad03a2fab7 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
| @@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 605 | 605 | ||
| 606 | if (cmd == PPPIOCDETACH) { | 606 | if (cmd == PPPIOCDETACH) { |
| 607 | /* | 607 | /* |
| 608 | * We have to be careful here... if the file descriptor | 608 | * PPPIOCDETACH is no longer supported as it was heavily broken, |
| 609 | * has been dup'd, we could have another process in the | 609 | * and is only known to have been used by pppd older than |
| 610 | * middle of a poll using the same file *, so we had | 610 | * ppp-2.4.2 (released November 2003). |
| 611 | * better not free the interface data structures - | ||
| 612 | * instead we fail the ioctl. Even in this case, we | ||
| 613 | * shut down the interface if we are the owner of it. | ||
| 614 | * Actually, we should get rid of PPPIOCDETACH, userland | ||
| 615 | * (i.e. pppd) could achieve the same effect by closing | ||
| 616 | * this fd and reopening /dev/ppp. | ||
| 617 | */ | 611 | */ |
| 612 | pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n", | ||
| 613 | current->comm, current->pid); | ||
| 618 | err = -EINVAL; | 614 | err = -EINVAL; |
| 619 | if (pf->kind == INTERFACE) { | ||
| 620 | ppp = PF_TO_PPP(pf); | ||
| 621 | rtnl_lock(); | ||
| 622 | if (file == ppp->owner) | ||
| 623 | unregister_netdevice(ppp->dev); | ||
| 624 | rtnl_unlock(); | ||
| 625 | } | ||
| 626 | if (atomic_long_read(&file->f_count) < 2) { | ||
| 627 | ppp_release(NULL, file); | ||
| 628 | err = 0; | ||
| 629 | } else | ||
| 630 | pr_warn("PPPIOCDETACH file->f_count=%ld\n", | ||
| 631 | atomic_long_read(&file->f_count)); | ||
| 632 | goto out; | 615 | goto out; |
| 633 | } | 616 | } |
| 634 | 617 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ef33950a45d9..45d807796a18 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile) | |||
| 681 | skb_queue_purge(&tfile->sk.sk_error_queue); | 681 | skb_queue_purge(&tfile->sk.sk_error_queue); |
| 682 | } | 682 | } |
| 683 | 683 | ||
| 684 | static void tun_cleanup_tx_ring(struct tun_file *tfile) | ||
| 685 | { | ||
| 686 | if (tfile->tx_ring.queue) { | ||
| 687 | ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); | ||
| 688 | xdp_rxq_info_unreg(&tfile->xdp_rxq); | ||
| 689 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); | ||
| 690 | } | ||
| 691 | } | ||
| 692 | |||
| 693 | static void __tun_detach(struct tun_file *tfile, bool clean) | 684 | static void __tun_detach(struct tun_file *tfile, bool clean) |
| 694 | { | 685 | { |
| 695 | struct tun_file *ntfile; | 686 | struct tun_file *ntfile; |
| @@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
| 736 | tun->dev->reg_state == NETREG_REGISTERED) | 727 | tun->dev->reg_state == NETREG_REGISTERED) |
| 737 | unregister_netdevice(tun->dev); | 728 | unregister_netdevice(tun->dev); |
| 738 | } | 729 | } |
| 739 | tun_cleanup_tx_ring(tfile); | 730 | if (tun) |
| 731 | xdp_rxq_info_unreg(&tfile->xdp_rxq); | ||
| 732 | ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); | ||
| 740 | sock_put(&tfile->sk); | 733 | sock_put(&tfile->sk); |
| 741 | } | 734 | } |
| 742 | } | 735 | } |
| @@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev) | |||
| 783 | tun_napi_del(tun, tfile); | 776 | tun_napi_del(tun, tfile); |
| 784 | /* Drop read queue */ | 777 | /* Drop read queue */ |
| 785 | tun_queue_purge(tfile); | 778 | tun_queue_purge(tfile); |
| 779 | xdp_rxq_info_unreg(&tfile->xdp_rxq); | ||
| 786 | sock_put(&tfile->sk); | 780 | sock_put(&tfile->sk); |
| 787 | tun_cleanup_tx_ring(tfile); | ||
| 788 | } | 781 | } |
| 789 | list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { | 782 | list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { |
| 790 | tun_enable_queue(tfile); | 783 | tun_enable_queue(tfile); |
| 791 | tun_queue_purge(tfile); | 784 | tun_queue_purge(tfile); |
| 785 | xdp_rxq_info_unreg(&tfile->xdp_rxq); | ||
| 792 | sock_put(&tfile->sk); | 786 | sock_put(&tfile->sk); |
| 793 | tun_cleanup_tx_ring(tfile); | ||
| 794 | } | 787 | } |
| 795 | BUG_ON(tun->numdisabled != 0); | 788 | BUG_ON(tun->numdisabled != 0); |
| 796 | 789 | ||
| @@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
| 834 | } | 827 | } |
| 835 | 828 | ||
| 836 | if (!tfile->detached && | 829 | if (!tfile->detached && |
| 837 | ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { | 830 | ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, |
| 831 | GFP_KERNEL, tun_ptr_free)) { | ||
| 838 | err = -ENOMEM; | 832 | err = -ENOMEM; |
| 839 | goto out; | 833 | goto out; |
| 840 | } | 834 | } |
| @@ -1429,6 +1423,13 @@ static void tun_net_init(struct net_device *dev) | |||
| 1429 | dev->max_mtu = MAX_MTU - dev->hard_header_len; | 1423 | dev->max_mtu = MAX_MTU - dev->hard_header_len; |
| 1430 | } | 1424 | } |
| 1431 | 1425 | ||
| 1426 | static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) | ||
| 1427 | { | ||
| 1428 | struct sock *sk = tfile->socket.sk; | ||
| 1429 | |||
| 1430 | return (tun->dev->flags & IFF_UP) && sock_writeable(sk); | ||
| 1431 | } | ||
| 1432 | |||
| 1432 | /* Character device part */ | 1433 | /* Character device part */ |
| 1433 | 1434 | ||
| 1434 | /* Poll */ | 1435 | /* Poll */ |
| @@ -1451,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) | |||
| 1451 | if (!ptr_ring_empty(&tfile->tx_ring)) | 1452 | if (!ptr_ring_empty(&tfile->tx_ring)) |
| 1452 | mask |= EPOLLIN | EPOLLRDNORM; | 1453 | mask |= EPOLLIN | EPOLLRDNORM; |
| 1453 | 1454 | ||
| 1454 | if (tun->dev->flags & IFF_UP && | 1455 | /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to |
| 1455 | (sock_writeable(sk) || | 1456 | * guarantee EPOLLOUT to be raised by either here or |
| 1456 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && | 1457 | * tun_sock_write_space(). Then process could get notification |
| 1457 | sock_writeable(sk)))) | 1458 | * after it writes to a down device and meets -EIO. |
| 1459 | */ | ||
| 1460 | if (tun_sock_writeable(tun, tfile) || | ||
| 1461 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && | ||
| 1462 | tun_sock_writeable(tun, tfile))) | ||
| 1458 | mask |= EPOLLOUT | EPOLLWRNORM; | 1463 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 1459 | 1464 | ||
| 1460 | if (tun->dev->reg_state != NETREG_REGISTERED) | 1465 | if (tun->dev->reg_state != NETREG_REGISTERED) |
| @@ -3219,6 +3224,11 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
| 3219 | &tun_proto, 0); | 3224 | &tun_proto, 0); |
| 3220 | if (!tfile) | 3225 | if (!tfile) |
| 3221 | return -ENOMEM; | 3226 | return -ENOMEM; |
| 3227 | if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { | ||
| 3228 | sk_free(&tfile->sk); | ||
| 3229 | return -ENOMEM; | ||
| 3230 | } | ||
| 3231 | |||
| 3222 | RCU_INIT_POINTER(tfile->tun, NULL); | 3232 | RCU_INIT_POINTER(tfile->tun, NULL); |
| 3223 | tfile->flags = 0; | 3233 | tfile->flags = 0; |
| 3224 | tfile->ifindex = 0; | 3234 | tfile->ifindex = 0; |
| @@ -3239,8 +3249,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
| 3239 | 3249 | ||
| 3240 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); | 3250 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); |
| 3241 | 3251 | ||
| 3242 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); | ||
| 3243 | |||
| 3244 | return 0; | 3252 | return 0; |
| 3245 | } | 3253 | } |
| 3246 | 3254 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 770422e953f7..032e1ac10a30 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 707 | void *data; | 707 | void *data; |
| 708 | u32 act; | 708 | u32 act; |
| 709 | 709 | ||
| 710 | /* Transient failure which in theory could occur if | ||
| 711 | * in-flight packets from before XDP was enabled reach | ||
| 712 | * the receive path after XDP is loaded. | ||
| 713 | */ | ||
| 714 | if (unlikely(hdr->hdr.gso_type)) | ||
| 715 | goto err_xdp; | ||
| 716 | |||
| 710 | /* This happens when rx buffer size is underestimated | 717 | /* This happens when rx buffer size is underestimated |
| 711 | * or headroom is not enough because of the buffer | 718 | * or headroom is not enough because of the buffer |
| 712 | * was refilled before XDP is set. This should only | 719 | * was refilled before XDP is set. This should only |
| @@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 727 | xdp_page = page; | 734 | xdp_page = page; |
| 728 | } | 735 | } |
| 729 | 736 | ||
| 730 | /* Transient failure which in theory could occur if | ||
| 731 | * in-flight packets from before XDP was enabled reach | ||
| 732 | * the receive path after XDP is loaded. In practice I | ||
| 733 | * was not able to create this condition. | ||
| 734 | */ | ||
| 735 | if (unlikely(hdr->hdr.gso_type)) | ||
| 736 | goto err_xdp; | ||
| 737 | |||
| 738 | /* Allow consuming headroom but reserve enough space to push | 737 | /* Allow consuming headroom but reserve enough space to push |
| 739 | * the descriptor on if we get an XDP_TX return code. | 738 | * the descriptor on if we get an XDP_TX return code. |
| 740 | */ | 739 | */ |
| @@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 775 | } | 774 | } |
| 776 | *xdp_xmit = true; | 775 | *xdp_xmit = true; |
| 777 | if (unlikely(xdp_page != page)) | 776 | if (unlikely(xdp_page != page)) |
| 778 | goto err_xdp; | 777 | put_page(page); |
| 779 | rcu_read_unlock(); | 778 | rcu_read_unlock(); |
| 780 | goto xdp_xmit; | 779 | goto xdp_xmit; |
| 781 | case XDP_REDIRECT: | 780 | case XDP_REDIRECT: |
| @@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
| 787 | } | 786 | } |
| 788 | *xdp_xmit = true; | 787 | *xdp_xmit = true; |
| 789 | if (unlikely(xdp_page != page)) | 788 | if (unlikely(xdp_page != page)) |
| 790 | goto err_xdp; | 789 | put_page(page); |
| 791 | rcu_read_unlock(); | 790 | rcu_read_unlock(); |
| 792 | goto xdp_xmit; | 791 | goto xdp_xmit; |
| 793 | default: | 792 | default: |
| @@ -875,7 +874,7 @@ err_xdp: | |||
| 875 | rcu_read_unlock(); | 874 | rcu_read_unlock(); |
| 876 | err_skb: | 875 | err_skb: |
| 877 | put_page(page); | 876 | put_page(page); |
| 878 | while (--num_buf) { | 877 | while (num_buf-- > 1) { |
| 879 | buf = virtqueue_get_buf(rq->vq, &len); | 878 | buf = virtqueue_get_buf(rq->vq, &len); |
| 880 | if (unlikely(!buf)) { | 879 | if (unlikely(!buf)) { |
| 881 | pr_debug("%s: rx error: %d buffers missing\n", | 880 | pr_debug("%s: rx error: %d buffers missing\n", |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 9ebe2a689966..27a9bb8c9611 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, | |||
| 369 | 369 | ||
| 370 | gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; | 370 | gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; |
| 371 | while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { | 371 | while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { |
| 372 | /* Prevent any &gdesc->tcd field from being (speculatively) | ||
| 373 | * read before (&gdesc->tcd)->gen is read. | ||
| 374 | */ | ||
| 375 | dma_rmb(); | ||
| 376 | |||
| 372 | completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( | 377 | completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( |
| 373 | &gdesc->tcd), tq, adapter->pdev, | 378 | &gdesc->tcd), tq, adapter->pdev, |
| 374 | adapter); | 379 | adapter); |
| @@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
| 1103 | gdesc->txd.tci = skb_vlan_tag_get(skb); | 1108 | gdesc->txd.tci = skb_vlan_tag_get(skb); |
| 1104 | } | 1109 | } |
| 1105 | 1110 | ||
| 1111 | /* Ensure that the write to (&gdesc->txd)->gen will be observed after | ||
| 1112 | * all other writes to &gdesc->txd. | ||
| 1113 | */ | ||
| 1114 | dma_wmb(); | ||
| 1115 | |||
| 1106 | /* finally flips the GEN bit of the SOP desc. */ | 1116 | /* finally flips the GEN bit of the SOP desc. */ |
| 1107 | gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ | 1117 | gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ |
| 1108 | VMXNET3_TXD_GEN); | 1118 | VMXNET3_TXD_GEN); |
| @@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1298 | */ | 1308 | */ |
| 1299 | break; | 1309 | break; |
| 1300 | } | 1310 | } |
| 1311 | |||
| 1312 | /* Prevent any rcd field from being (speculatively) read before | ||
| 1313 | * rcd->gen is read. | ||
| 1314 | */ | ||
| 1315 | dma_rmb(); | ||
| 1316 | |||
| 1301 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && | 1317 | BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && |
| 1302 | rcd->rqID != rq->dataRingQid); | 1318 | rcd->rqID != rq->dataRingQid); |
| 1303 | idx = rcd->rxdIdx; | 1319 | idx = rcd->rxdIdx; |
| @@ -1528,6 +1544,12 @@ rcd_done: | |||
| 1528 | ring->next2comp = idx; | 1544 | ring->next2comp = idx; |
| 1529 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); | 1545 | num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); |
| 1530 | ring = rq->rx_ring + ring_idx; | 1546 | ring = rq->rx_ring + ring_idx; |
| 1547 | |||
| 1548 | /* Ensure that the writes to rxd->gen bits will be observed | ||
| 1549 | * after all other writes to rxd objects. | ||
| 1550 | */ | ||
| 1551 | dma_wmb(); | ||
| 1552 | |||
| 1531 | while (num_to_alloc) { | 1553 | while (num_to_alloc) { |
| 1532 | vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, | 1554 | vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, |
| 1533 | &rxCmdDesc); | 1555 | &rxCmdDesc); |
| @@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p) | |||
| 2688 | /* ==================== initialization and cleanup routines ============ */ | 2710 | /* ==================== initialization and cleanup routines ============ */ |
| 2689 | 2711 | ||
| 2690 | static int | 2712 | static int |
| 2691 | vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) | 2713 | vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter) |
| 2692 | { | 2714 | { |
| 2693 | int err; | 2715 | int err; |
| 2694 | unsigned long mmio_start, mmio_len; | 2716 | unsigned long mmio_start, mmio_len; |
| @@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) | |||
| 2700 | return err; | 2722 | return err; |
| 2701 | } | 2723 | } |
| 2702 | 2724 | ||
| 2703 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | ||
| 2704 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { | ||
| 2705 | dev_err(&pdev->dev, | ||
| 2706 | "pci_set_consistent_dma_mask failed\n"); | ||
| 2707 | err = -EIO; | ||
| 2708 | goto err_set_mask; | ||
| 2709 | } | ||
| 2710 | *dma64 = true; | ||
| 2711 | } else { | ||
| 2712 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { | ||
| 2713 | dev_err(&pdev->dev, | ||
| 2714 | "pci_set_dma_mask failed\n"); | ||
| 2715 | err = -EIO; | ||
| 2716 | goto err_set_mask; | ||
| 2717 | } | ||
| 2718 | *dma64 = false; | ||
| 2719 | } | ||
| 2720 | |||
| 2721 | err = pci_request_selected_regions(pdev, (1 << 2) - 1, | 2725 | err = pci_request_selected_regions(pdev, (1 << 2) - 1, |
| 2722 | vmxnet3_driver_name); | 2726 | vmxnet3_driver_name); |
| 2723 | if (err) { | 2727 | if (err) { |
| 2724 | dev_err(&pdev->dev, | 2728 | dev_err(&pdev->dev, |
| 2725 | "Failed to request region for adapter: error %d\n", err); | 2729 | "Failed to request region for adapter: error %d\n", err); |
| 2726 | goto err_set_mask; | 2730 | goto err_enable_device; |
| 2727 | } | 2731 | } |
| 2728 | 2732 | ||
| 2729 | pci_set_master(pdev); | 2733 | pci_set_master(pdev); |
| @@ -2751,7 +2755,7 @@ err_bar1: | |||
| 2751 | iounmap(adapter->hw_addr0); | 2755 | iounmap(adapter->hw_addr0); |
| 2752 | err_ioremap: | 2756 | err_ioremap: |
| 2753 | pci_release_selected_regions(pdev, (1 << 2) - 1); | 2757 | pci_release_selected_regions(pdev, (1 << 2) - 1); |
| 2754 | err_set_mask: | 2758 | err_enable_device: |
| 2755 | pci_disable_device(pdev); | 2759 | pci_disable_device(pdev); |
| 2756 | return err; | 2760 | return err; |
| 2757 | } | 2761 | } |
| @@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3254 | #endif | 3258 | #endif |
| 3255 | }; | 3259 | }; |
| 3256 | int err; | 3260 | int err; |
| 3257 | bool dma64 = false; /* stupid gcc */ | 3261 | bool dma64; |
| 3258 | u32 ver; | 3262 | u32 ver; |
| 3259 | struct net_device *netdev; | 3263 | struct net_device *netdev; |
| 3260 | struct vmxnet3_adapter *adapter; | 3264 | struct vmxnet3_adapter *adapter; |
| @@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3300 | adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; | 3304 | adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; |
| 3301 | adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; | 3305 | adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; |
| 3302 | 3306 | ||
| 3307 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | ||
| 3308 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { | ||
| 3309 | dev_err(&pdev->dev, | ||
| 3310 | "pci_set_consistent_dma_mask failed\n"); | ||
| 3311 | err = -EIO; | ||
| 3312 | goto err_set_mask; | ||
| 3313 | } | ||
| 3314 | dma64 = true; | ||
| 3315 | } else { | ||
| 3316 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { | ||
| 3317 | dev_err(&pdev->dev, | ||
| 3318 | "pci_set_dma_mask failed\n"); | ||
| 3319 | err = -EIO; | ||
| 3320 | goto err_set_mask; | ||
| 3321 | } | ||
| 3322 | dma64 = false; | ||
| 3323 | } | ||
| 3324 | |||
| 3303 | spin_lock_init(&adapter->cmd_lock); | 3325 | spin_lock_init(&adapter->cmd_lock); |
| 3304 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, | 3326 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, |
| 3305 | sizeof(struct vmxnet3_adapter), | 3327 | sizeof(struct vmxnet3_adapter), |
| @@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3307 | if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { | 3329 | if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { |
| 3308 | dev_err(&pdev->dev, "Failed to map dma\n"); | 3330 | dev_err(&pdev->dev, "Failed to map dma\n"); |
| 3309 | err = -EFAULT; | 3331 | err = -EFAULT; |
| 3310 | goto err_dma_map; | 3332 | goto err_set_mask; |
| 3311 | } | 3333 | } |
| 3312 | adapter->shared = dma_alloc_coherent( | 3334 | adapter->shared = dma_alloc_coherent( |
| 3313 | &adapter->pdev->dev, | 3335 | &adapter->pdev->dev, |
| @@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
| 3358 | } | 3380 | } |
| 3359 | #endif /* VMXNET3_RSS */ | 3381 | #endif /* VMXNET3_RSS */ |
| 3360 | 3382 | ||
| 3361 | err = vmxnet3_alloc_pci_resources(adapter, &dma64); | 3383 | err = vmxnet3_alloc_pci_resources(adapter); |
| 3362 | if (err < 0) | 3384 | if (err < 0) |
| 3363 | goto err_alloc_pci; | 3385 | goto err_alloc_pci; |
| 3364 | 3386 | ||
| @@ -3504,7 +3526,7 @@ err_alloc_queue_desc: | |||
| 3504 | err_alloc_shared: | 3526 | err_alloc_shared: |
| 3505 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, | 3527 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, |
| 3506 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); | 3528 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); |
| 3507 | err_dma_map: | 3529 | err_set_mask: |
| 3508 | free_netdev(netdev); | 3530 | free_netdev(netdev); |
| 3509 | return err; | 3531 | return err; |
| 3510 | } | 3532 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index a3326463b71f..a2c554f8a61b 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,12 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* Each byte of this 32-bit integer encodes a version number in |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 | 75 | * VMXNET3_DRIVER_VERSION_STRING. |
| 76 | */ | ||
| 77 | #define VMXNET3_DRIVER_VERSION_NUM 0x01041000 | ||
| 76 | 78 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 79 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 80 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a017a0d71ea..920c23e542a5 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -3340,7 +3340,7 @@ out_err: | |||
| 3340 | static int hwsim_dump_radio_nl(struct sk_buff *skb, | 3340 | static int hwsim_dump_radio_nl(struct sk_buff *skb, |
| 3341 | struct netlink_callback *cb) | 3341 | struct netlink_callback *cb) |
| 3342 | { | 3342 | { |
| 3343 | int last_idx = cb->args[0]; | 3343 | int last_idx = cb->args[0] - 1; |
| 3344 | struct mac80211_hwsim_data *data = NULL; | 3344 | struct mac80211_hwsim_data *data = NULL; |
| 3345 | int res = 0; | 3345 | int res = 0; |
| 3346 | void *hdr; | 3346 | void *hdr; |
| @@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, | |||
| 3368 | last_idx = data->idx; | 3368 | last_idx = data->idx; |
| 3369 | } | 3369 | } |
| 3370 | 3370 | ||
| 3371 | cb->args[0] = last_idx; | 3371 | cb->args[0] = last_idx + 1; |
| 3372 | 3372 | ||
| 3373 | /* list changed, but no new element sent, set interrupted flag */ | 3373 | /* list changed, but no new element sent, set interrupted flag */ |
| 3374 | if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { | 3374 | if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { |
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 88a8b5916624..dbb7464c018c 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig | |||
| @@ -27,7 +27,7 @@ config NVME_FABRICS | |||
| 27 | 27 | ||
| 28 | config NVME_RDMA | 28 | config NVME_RDMA |
| 29 | tristate "NVM Express over Fabrics RDMA host driver" | 29 | tristate "NVM Express over Fabrics RDMA host driver" |
| 30 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK | 30 | depends on INFINIBAND_ADDR_TRANS && BLOCK |
| 31 | select NVME_CORE | 31 | select NVME_CORE |
| 32 | select NVME_FABRICS | 32 | select NVME_FABRICS |
| 33 | select SG_POOL | 33 | select SG_POOL |
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 3c7b61ddb0d1..7595664ee753 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig | |||
| @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP | |||
| 27 | 27 | ||
| 28 | config NVME_TARGET_RDMA | 28 | config NVME_TARGET_RDMA |
| 29 | tristate "NVMe over Fabrics RDMA target support" | 29 | tristate "NVMe over Fabrics RDMA target support" |
| 30 | depends on INFINIBAND && INFINIBAND_ADDR_TRANS | 30 | depends on INFINIBAND_ADDR_TRANS |
| 31 | depends on NVME_TARGET | 31 | depends on NVME_TARGET |
| 32 | select SGL_ALLOC | 32 | select SGL_ALLOC |
| 33 | help | 33 | help |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 126cf19e869b..297599fcbc32 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
| @@ -1195,7 +1195,7 @@ void * ccio_get_iommu(const struct parisc_device *dev) | |||
| 1195 | * to/from certain pages. To avoid this happening, we mark these pages | 1195 | * to/from certain pages. To avoid this happening, we mark these pages |
| 1196 | * as `used', and ensure that nothing will try to allocate from them. | 1196 | * as `used', and ensure that nothing will try to allocate from them. |
| 1197 | */ | 1197 | */ |
| 1198 | void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp) | 1198 | void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp) |
| 1199 | { | 1199 | { |
| 1200 | unsigned int idx; | 1200 | unsigned int idx; |
| 1201 | struct parisc_device *dev = parisc_parent(cujo); | 1201 | struct parisc_device *dev = parisc_parent(cujo); |
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index e7bbdf947bbc..8350ca2311c7 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c | |||
| @@ -91,6 +91,8 @@ static int send_command(struct cros_ec_device *ec_dev, | |||
| 91 | usleep_range(10000, 11000); | 91 | usleep_range(10000, 11000); |
| 92 | 92 | ||
| 93 | ret = (*xfer_fxn)(ec_dev, status_msg); | 93 | ret = (*xfer_fxn)(ec_dev, status_msg); |
| 94 | if (ret == -EAGAIN) | ||
| 95 | continue; | ||
| 94 | if (ret < 0) | 96 | if (ret < 0) |
| 95 | break; | 97 | break; |
| 96 | 98 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index bc309c5327ff..566644bb496a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -168,8 +168,8 @@ config DELL_WMI | |||
| 168 | depends on DMI | 168 | depends on DMI |
| 169 | depends on INPUT | 169 | depends on INPUT |
| 170 | depends on ACPI_VIDEO || ACPI_VIDEO = n | 170 | depends on ACPI_VIDEO || ACPI_VIDEO = n |
| 171 | depends on DELL_SMBIOS | ||
| 171 | select DELL_WMI_DESCRIPTOR | 172 | select DELL_WMI_DESCRIPTOR |
| 172 | select DELL_SMBIOS | ||
| 173 | select INPUT_SPARSEKMAP | 173 | select INPUT_SPARSEKMAP |
| 174 | ---help--- | 174 | ---help--- |
| 175 | Say Y here if you want to support WMI-based hotkeys on Dell laptops. | 175 | Say Y here if you want to support WMI-based hotkeys on Dell laptops. |
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c index 360e06b20c53..ac18f2f27881 100644 --- a/drivers/reset/reset-uniphier.c +++ b/drivers/reset/reset-uniphier.c | |||
| @@ -110,7 +110,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { | |||
| 110 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ | 110 | UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ |
| 111 | UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ | 111 | UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ |
| 112 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ | 112 | UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ |
| 113 | UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */ | 113 | UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */ |
| 114 | UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ | 114 | UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ |
| 115 | UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ | 115 | UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ |
| 116 | UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ | 116 | UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ |
| @@ -127,8 +127,8 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = { | |||
| 127 | UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */ | 127 | UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */ |
| 128 | UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */ | 128 | UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */ |
| 129 | UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */ | 129 | UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */ |
| 130 | UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */ | 130 | UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link */ |
| 131 | UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */ | 131 | UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link */ |
| 132 | UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */ | 132 | UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */ |
| 133 | UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */ | 133 | UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */ |
| 134 | UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */ | 134 | UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */ |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 439991d71b14..4c14ce428e92 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
| @@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) | |||
| 141 | int i; | 141 | int i; |
| 142 | 142 | ||
| 143 | for (i = 0; i < nr_queues; i++) { | 143 | for (i = 0; i < nr_queues; i++) { |
| 144 | q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); | 144 | q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); |
| 145 | if (!q) | 145 | if (!q) |
| 146 | return -ENOMEM; | 146 | return -ENOMEM; |
| 147 | 147 | ||
| @@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
| 456 | { | 456 | { |
| 457 | struct ciw *ciw; | 457 | struct ciw *ciw; |
| 458 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; | 458 | struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; |
| 459 | int rc; | ||
| 460 | 459 | ||
| 461 | memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); | 460 | memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); |
| 462 | memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); | 461 | memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); |
| @@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
| 493 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | 492 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); |
| 494 | if (!ciw) { | 493 | if (!ciw) { |
| 495 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); | 494 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); |
| 496 | rc = -EINVAL; | 495 | return -EINVAL; |
| 497 | goto out_err; | ||
| 498 | } | 496 | } |
| 499 | irq_ptr->equeue = *ciw; | 497 | irq_ptr->equeue = *ciw; |
| 500 | 498 | ||
| 501 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | 499 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); |
| 502 | if (!ciw) { | 500 | if (!ciw) { |
| 503 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); | 501 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); |
| 504 | rc = -EINVAL; | 502 | return -EINVAL; |
| 505 | goto out_err; | ||
| 506 | } | 503 | } |
| 507 | irq_ptr->aqueue = *ciw; | 504 | irq_ptr->aqueue = *ciw; |
| 508 | 505 | ||
| @@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
| 512 | init_data->cdev->handler = qdio_int_handler; | 509 | init_data->cdev->handler = qdio_int_handler; |
| 513 | spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); | 510 | spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); |
| 514 | return 0; | 511 | return 0; |
| 515 | out_err: | ||
| 516 | qdio_release_memory(irq_ptr); | ||
| 517 | return rc; | ||
| 518 | } | 512 | } |
| 519 | 513 | ||
| 520 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | 514 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 2c7550797ec2..dce92b2a895d 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
| @@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp) | |||
| 715 | * and stores the result to ccwchain list. @cp must have been | 715 | * and stores the result to ccwchain list. @cp must have been |
| 716 | * initialized by a previous call with cp_init(). Otherwise, undefined | 716 | * initialized by a previous call with cp_init(). Otherwise, undefined |
| 717 | * behavior occurs. | 717 | * behavior occurs. |
| 718 | * For each chain composing the channel program: | ||
| 719 | * - On entry ch_len holds the count of CCWs to be translated. | ||
| 720 | * - On exit ch_len is adjusted to the count of successfully translated CCWs. | ||
| 721 | * This allows cp_free to find in ch_len the count of CCWs to free in a chain. | ||
| 718 | * | 722 | * |
| 719 | * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced | 723 | * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced |
| 720 | * as helpers to do ccw chain translation inside the kernel. Basically | 724 | * as helpers to do ccw chain translation inside the kernel. Basically |
| @@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp) | |||
| 749 | for (idx = 0; idx < len; idx++) { | 753 | for (idx = 0; idx < len; idx++) { |
| 750 | ret = ccwchain_fetch_one(chain, idx, cp); | 754 | ret = ccwchain_fetch_one(chain, idx, cp); |
| 751 | if (ret) | 755 | if (ret) |
| 752 | return ret; | 756 | goto out_err; |
| 753 | } | 757 | } |
| 754 | } | 758 | } |
| 755 | 759 | ||
| 756 | return 0; | 760 | return 0; |
| 761 | out_err: | ||
| 762 | /* Only cleanup the chain elements that were actually translated. */ | ||
| 763 | chain->ch_len = idx; | ||
| 764 | list_for_each_entry_continue(chain, &cp->ccwchain_list, next) { | ||
| 765 | chain->ch_len = 0; | ||
| 766 | } | ||
| 767 | return ret; | ||
| 757 | } | 768 | } |
| 758 | 769 | ||
| 759 | /** | 770 | /** |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a8b831000b2d..18c4f933e8b9 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * | 4 | * |
| 5 | * Debug traces for zfcp. | 5 | * Debug traces for zfcp. |
| 6 | * | 6 | * |
| 7 | * Copyright IBM Corp. 2002, 2017 | 7 | * Copyright IBM Corp. 2002, 2018 |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define KMSG_COMPONENT "zfcp" | 10 | #define KMSG_COMPONENT "zfcp" |
| @@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, | |||
| 308 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | 308 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | /** | ||
| 312 | * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock | ||
| 313 | * @tag: identifier for event | ||
| 314 | * @adapter: adapter on which the erp_action should run | ||
| 315 | * @port: remote port involved in the erp_action | ||
| 316 | * @sdev: scsi device involved in the erp_action | ||
| 317 | * @want: wanted erp_action | ||
| 318 | * @need: required erp_action | ||
| 319 | * | ||
| 320 | * The adapter->erp_lock must not be held. | ||
| 321 | */ | ||
| 322 | void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, | ||
| 323 | struct zfcp_port *port, struct scsi_device *sdev, | ||
| 324 | u8 want, u8 need) | ||
| 325 | { | ||
| 326 | unsigned long flags; | ||
| 327 | |||
| 328 | read_lock_irqsave(&adapter->erp_lock, flags); | ||
| 329 | zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need); | ||
| 330 | read_unlock_irqrestore(&adapter->erp_lock, flags); | ||
| 331 | } | ||
| 311 | 332 | ||
| 312 | /** | 333 | /** |
| 313 | * zfcp_dbf_rec_run_lvl - trace event related to running recovery | 334 | * zfcp_dbf_rec_run_lvl - trace event related to running recovery |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index bf8ea4df2bb8..e5eed8aac0ce 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * | 4 | * |
| 5 | * External function declarations. | 5 | * External function declarations. |
| 6 | * | 6 | * |
| 7 | * Copyright IBM Corp. 2002, 2016 | 7 | * Copyright IBM Corp. 2002, 2018 |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #ifndef ZFCP_EXT_H | 10 | #ifndef ZFCP_EXT_H |
| @@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); | |||
| 35 | extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); | 35 | extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); |
| 36 | extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, | 36 | extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, |
| 37 | struct zfcp_port *, struct scsi_device *, u8, u8); | 37 | struct zfcp_port *, struct scsi_device *, u8, u8); |
| 38 | extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, | ||
| 39 | struct zfcp_port *port, | ||
| 40 | struct scsi_device *sdev, u8 want, u8 need); | ||
| 38 | extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); | 41 | extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); |
| 39 | extern void zfcp_dbf_rec_run_lvl(int level, char *tag, | 42 | extern void zfcp_dbf_rec_run_lvl(int level, char *tag, |
| 40 | struct zfcp_erp_action *erp); | 43 | struct zfcp_erp_action *erp); |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 4d2ba5682493..22f9562f415c 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | * | 4 | * |
| 5 | * Interface to Linux SCSI midlayer. | 5 | * Interface to Linux SCSI midlayer. |
| 6 | * | 6 | * |
| 7 | * Copyright IBM Corp. 2002, 2017 | 7 | * Copyright IBM Corp. 2002, 2018 |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define KMSG_COMPONENT "zfcp" | 10 | #define KMSG_COMPONENT "zfcp" |
| @@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) | |||
| 618 | ids.port_id = port->d_id; | 618 | ids.port_id = port->d_id; |
| 619 | ids.roles = FC_RPORT_ROLE_FCP_TARGET; | 619 | ids.roles = FC_RPORT_ROLE_FCP_TARGET; |
| 620 | 620 | ||
| 621 | zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, | 621 | zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL, |
| 622 | ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, | 622 | ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, |
| 623 | ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); | 623 | ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); |
| 624 | rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); | 624 | rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); |
| 625 | if (!rport) { | 625 | if (!rport) { |
| 626 | dev_err(&port->adapter->ccw_device->dev, | 626 | dev_err(&port->adapter->ccw_device->dev, |
| @@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) | |||
| 642 | struct fc_rport *rport = port->rport; | 642 | struct fc_rport *rport = port->rport; |
| 643 | 643 | ||
| 644 | if (rport) { | 644 | if (rport) { |
| 645 | zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, | 645 | zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL, |
| 646 | ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, | 646 | ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, |
| 647 | ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); | 647 | ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); |
| 648 | fc_remote_port_delete(rport); | 648 | fc_remote_port_delete(rport); |
| 649 | port->rport = NULL; | 649 | port->rport = NULL; |
| 650 | } | 650 | } |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index e29f9b8fd66d..56c940394729 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
| @@ -182,7 +182,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o | |||
| 182 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o | 182 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o |
| 183 | 183 | ||
| 184 | # Files generated that shall be removed upon make clean | 184 | # Files generated that shall be removed upon make clean |
| 185 | clean-files := 53c700_d.h 53c700_u.h | 185 | clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c |
| 186 | 186 | ||
| 187 | $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h | 187 | $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h |
| 188 | 188 | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index c198b96368dd..5c40d809830f 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
| @@ -1894,7 +1894,7 @@ retry: | |||
| 1894 | num = (rem_sz > scatter_elem_sz_prev) ? | 1894 | num = (rem_sz > scatter_elem_sz_prev) ? |
| 1895 | scatter_elem_sz_prev : rem_sz; | 1895 | scatter_elem_sz_prev : rem_sz; |
| 1896 | 1896 | ||
| 1897 | schp->pages[k] = alloc_pages(gfp_mask, order); | 1897 | schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order); |
| 1898 | if (!schp->pages[k]) | 1898 | if (!schp->pages[k]) |
| 1899 | goto out; | 1899 | goto out; |
| 1900 | 1900 | ||
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 2a21f2d48592..35fab1e18adc 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c | |||
| @@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 188 | struct scsi_device *SDev; | 188 | struct scsi_device *SDev; |
| 189 | struct scsi_sense_hdr sshdr; | 189 | struct scsi_sense_hdr sshdr; |
| 190 | int result, err = 0, retries = 0; | 190 | int result, err = 0, retries = 0; |
| 191 | unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; | ||
| 191 | 192 | ||
| 192 | SDev = cd->device; | 193 | SDev = cd->device; |
| 193 | 194 | ||
| 195 | if (cgc->sense) | ||
| 196 | senseptr = sense_buffer; | ||
| 197 | |||
| 194 | retry: | 198 | retry: |
| 195 | if (!scsi_block_when_processing_errors(SDev)) { | 199 | if (!scsi_block_when_processing_errors(SDev)) { |
| 196 | err = -ENODEV; | 200 | err = -ENODEV; |
| @@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
| 198 | } | 202 | } |
| 199 | 203 | ||
| 200 | result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, | 204 | result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, |
| 201 | cgc->buffer, cgc->buflen, | 205 | cgc->buffer, cgc->buflen, senseptr, &sshdr, |
| 202 | (unsigned char *)cgc->sense, &sshdr, | ||
| 203 | cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); | 206 | cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); |
| 204 | 207 | ||
| 208 | if (cgc->sense) | ||
| 209 | memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); | ||
| 210 | |||
| 205 | /* Minimal error checking. Ignore cases we know about, and report the rest. */ | 211 | /* Minimal error checking. Ignore cases we know about, and report the rest. */ |
| 206 | if (driver_byte(result) != 0) { | 212 | if (driver_byte(result) != 0) { |
| 207 | switch (sshdr.sense_key) { | 213 | switch (sshdr.sense_key) { |
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 1596d35498c5..6573152ce893 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c | |||
| @@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi, | |||
| 490 | 490 | ||
| 491 | static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) | 491 | static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) |
| 492 | { | 492 | { |
| 493 | if (!has_bspi(qspi) || (qspi->bspi_enabled)) | 493 | if (!has_bspi(qspi)) |
| 494 | return; | 494 | return; |
| 495 | 495 | ||
| 496 | qspi->bspi_enabled = 1; | 496 | qspi->bspi_enabled = 1; |
| @@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) | |||
| 505 | 505 | ||
| 506 | static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) | 506 | static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) |
| 507 | { | 507 | { |
| 508 | if (!has_bspi(qspi) || (!qspi->bspi_enabled)) | 508 | if (!has_bspi(qspi)) |
| 509 | return; | 509 | return; |
| 510 | 510 | ||
| 511 | qspi->bspi_enabled = 0; | 511 | qspi->bspi_enabled = 0; |
| @@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) | |||
| 519 | 519 | ||
| 520 | static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) | 520 | static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) |
| 521 | { | 521 | { |
| 522 | u32 data = 0; | 522 | u32 rd = 0; |
| 523 | u32 wr = 0; | ||
| 523 | 524 | ||
| 524 | if (qspi->curr_cs == cs) | ||
| 525 | return; | ||
| 526 | if (qspi->base[CHIP_SELECT]) { | 525 | if (qspi->base[CHIP_SELECT]) { |
| 527 | data = bcm_qspi_read(qspi, CHIP_SELECT, 0); | 526 | rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); |
| 528 | data = (data & ~0xff) | (1 << cs); | 527 | wr = (rd & ~0xff) | (1 << cs); |
| 529 | bcm_qspi_write(qspi, CHIP_SELECT, 0, data); | 528 | if (rd == wr) |
| 529 | return; | ||
| 530 | bcm_qspi_write(qspi, CHIP_SELECT, 0, wr); | ||
| 530 | usleep_range(10, 20); | 531 | usleep_range(10, 20); |
| 531 | } | 532 | } |
| 533 | |||
| 534 | dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs); | ||
| 532 | qspi->curr_cs = cs; | 535 | qspi->curr_cs = cs; |
| 533 | } | 536 | } |
| 534 | 537 | ||
| @@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) | |||
| 755 | dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); | 758 | dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); |
| 756 | } | 759 | } |
| 757 | mspi_cdram = MSPI_CDRAM_CONT_BIT; | 760 | mspi_cdram = MSPI_CDRAM_CONT_BIT; |
| 758 | mspi_cdram |= (~(1 << spi->chip_select) & | 761 | |
| 759 | MSPI_CDRAM_PCS); | 762 | if (has_bspi(qspi)) |
| 763 | mspi_cdram &= ~1; | ||
| 764 | else | ||
| 765 | mspi_cdram |= (~(1 << spi->chip_select) & | ||
| 766 | MSPI_CDRAM_PCS); | ||
| 767 | |||
| 760 | mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : | 768 | mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : |
| 761 | MSPI_CDRAM_BITSE_BIT); | 769 | MSPI_CDRAM_BITSE_BIT); |
| 762 | 770 | ||
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 1431cb98fe40..3094d818cf06 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c | |||
| @@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) | |||
| 184 | struct bcm2835aux_spi *bs = spi_master_get_devdata(master); | 184 | struct bcm2835aux_spi *bs = spi_master_get_devdata(master); |
| 185 | irqreturn_t ret = IRQ_NONE; | 185 | irqreturn_t ret = IRQ_NONE; |
| 186 | 186 | ||
| 187 | /* IRQ may be shared, so return if our interrupts are disabled */ | ||
| 188 | if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & | ||
| 189 | (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) | ||
| 190 | return ret; | ||
| 191 | |||
| 187 | /* check if we have data to read */ | 192 | /* check if we have data to read */ |
| 188 | while (bs->rx_len && | 193 | while (bs->rx_len && |
| 189 | (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & | 194 | (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & |
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 5c9516ae4942..4a001634023e 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c | |||
| @@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) | |||
| 313 | 313 | ||
| 314 | while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && | 314 | while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && |
| 315 | (xspi->tx_bytes > 0)) { | 315 | (xspi->tx_bytes > 0)) { |
| 316 | |||
| 317 | /* When xspi in busy condition, bytes may send failed, | ||
| 318 | * then spi control did't work thoroughly, add one byte delay | ||
| 319 | */ | ||
| 320 | if (cdns_spi_read(xspi, CDNS_SPI_ISR) & | ||
| 321 | CDNS_SPI_IXR_TXFULL) | ||
| 322 | usleep_range(10, 20); | ||
| 323 | |||
| 316 | if (xspi->txbuf) | 324 | if (xspi->txbuf) |
| 317 | cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); | 325 | cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); |
| 318 | else | 326 | else |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 6f57592a7f95..a056ee88a960 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
| @@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = { | |||
| 1701 | }; | 1701 | }; |
| 1702 | module_platform_driver(spi_imx_driver); | 1702 | module_platform_driver(spi_imx_driver); |
| 1703 | 1703 | ||
| 1704 | MODULE_DESCRIPTION("SPI Master Controller driver"); | 1704 | MODULE_DESCRIPTION("SPI Controller driver"); |
| 1705 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); | 1705 | MODULE_AUTHOR("Sascha Hauer, Pengutronix"); |
| 1706 | MODULE_LICENSE("GPL"); | 1706 | MODULE_LICENSE("GPL"); |
| 1707 | MODULE_ALIAS("platform:" DRIVER_NAME); | 1707 | MODULE_ALIAS("platform:" DRIVER_NAME); |
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 513ec6c6e25b..0ae7defd3492 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h | |||
| @@ -38,7 +38,7 @@ struct driver_data { | |||
| 38 | 38 | ||
| 39 | /* SSP register addresses */ | 39 | /* SSP register addresses */ |
| 40 | void __iomem *ioaddr; | 40 | void __iomem *ioaddr; |
| 41 | u32 ssdr_physical; | 41 | phys_addr_t ssdr_physical; |
| 42 | 42 | ||
| 43 | /* SSP masks*/ | 43 | /* SSP masks*/ |
| 44 | u32 dma_cr1; | 44 | u32 dma_cr1; |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index ae086aab57d5..8171eedbfc90 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
| @@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, | |||
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); | 285 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); |
| 286 | brps = min_t(int, brps, 32); | ||
| 286 | 287 | ||
| 287 | scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); | 288 | scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); |
| 288 | sh_msiof_write(p, TSCR, scr); | 289 | sh_msiof_write(p, TSCR, scr); |
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 9371651d8017..c574dd210500 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig | |||
| @@ -117,7 +117,7 @@ config SSB_SERIAL | |||
| 117 | 117 | ||
| 118 | config SSB_DRIVER_PCICORE_POSSIBLE | 118 | config SSB_DRIVER_PCICORE_POSSIBLE |
| 119 | bool | 119 | bool |
| 120 | depends on SSB_PCIHOST && SSB = y | 120 | depends on SSB_PCIHOST |
| 121 | default y | 121 | default y |
| 122 | 122 | ||
| 123 | config SSB_DRIVER_PCICORE | 123 | config SSB_DRIVER_PCICORE |
| @@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE | |||
| 131 | 131 | ||
| 132 | config SSB_PCICORE_HOSTMODE | 132 | config SSB_PCICORE_HOSTMODE |
| 133 | bool "Hostmode support for SSB PCI core" | 133 | bool "Hostmode support for SSB PCI core" |
| 134 | depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS | 134 | depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y |
| 135 | help | 135 | help |
| 136 | PCIcore hostmode operation (external PCI bus). | 136 | PCIcore hostmode operation (external PCI bus). |
| 137 | 137 | ||
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig index ad049e6f24e4..f3b1ad4bd3dc 100644 --- a/drivers/staging/lustre/lnet/Kconfig +++ b/drivers/staging/lustre/lnet/Kconfig | |||
| @@ -34,7 +34,7 @@ config LNET_SELFTEST | |||
| 34 | 34 | ||
| 35 | config LNET_XPRT_IB | 35 | config LNET_XPRT_IB |
| 36 | tristate "LNET infiniband support" | 36 | tristate "LNET infiniband support" |
| 37 | depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS | 37 | depends on LNET && PCI && INFINIBAND_ADDR_TRANS |
| 38 | default LNET && INFINIBAND | 38 | default LNET && INFINIBAND |
| 39 | help | 39 | help |
| 40 | This option allows the LNET users to use infiniband as an | 40 | This option allows the LNET users to use infiniband as an |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 4ad89ea71a70..4f26bdc3d1dc 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -2121,6 +2121,8 @@ static ssize_t tcmu_qfull_time_out_store(struct config_item *item, | |||
| 2121 | 2121 | ||
| 2122 | if (val >= 0) { | 2122 | if (val >= 0) { |
| 2123 | udev->qfull_time_out = val * MSEC_PER_SEC; | 2123 | udev->qfull_time_out = val * MSEC_PER_SEC; |
| 2124 | } else if (val == -1) { | ||
| 2125 | udev->qfull_time_out = val; | ||
| 2124 | } else { | 2126 | } else { |
| 2125 | printk(KERN_ERR "Invalid qfull timeout value %d\n", val); | 2127 | printk(KERN_ERR "Invalid qfull timeout value %d\n", val); |
| 2126 | return -EINVAL; | 2128 | return -EINVAL; |
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 0124a91c8d71..dd46b758852a 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c | |||
| @@ -238,6 +238,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, | |||
| 238 | if (IS_ERR(shm)) | 238 | if (IS_ERR(shm)) |
| 239 | return PTR_ERR(shm); | 239 | return PTR_ERR(shm); |
| 240 | 240 | ||
| 241 | /* | ||
| 242 | * Ensure offset + size does not overflow offset | ||
| 243 | * and does not overflow the size of the referred | ||
| 244 | * shared memory object. | ||
| 245 | */ | ||
| 246 | if ((ip.a + ip.b) < ip.a || | ||
| 247 | (ip.a + ip.b) > shm->size) { | ||
| 248 | tee_shm_put(shm); | ||
| 249 | return -EINVAL; | ||
| 250 | } | ||
| 251 | |||
| 241 | params[n].u.memref.shm_offs = ip.a; | 252 | params[n].u.memref.shm_offs = ip.a; |
| 242 | params[n].u.memref.size = ip.b; | 253 | params[n].u.memref.size = ip.b; |
| 243 | params[n].u.memref.shm = shm; | 254 | params[n].u.memref.shm = shm; |
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 556960a1bab3..07d3be6f0780 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c | |||
| @@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm) | |||
| 360 | if (!(shm->flags & TEE_SHM_DMA_BUF)) | 360 | if (!(shm->flags & TEE_SHM_DMA_BUF)) |
| 361 | return -EINVAL; | 361 | return -EINVAL; |
| 362 | 362 | ||
| 363 | get_dma_buf(shm->dmabuf); | ||
| 363 | fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); | 364 | fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); |
| 364 | if (fd >= 0) | 365 | if (fd < 0) |
| 365 | get_dma_buf(shm->dmabuf); | 366 | dma_buf_put(shm->dmabuf); |
| 366 | return fd; | 367 | return fd; |
| 367 | } | 368 | } |
| 368 | 369 | ||
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 72ebbc908e19..32cd52ca8318 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, | |||
| 354 | 354 | ||
| 355 | slot_id = 0; | 355 | slot_id = 0; |
| 356 | for (i = 0; i < MAX_HC_SLOTS; i++) { | 356 | for (i = 0; i < MAX_HC_SLOTS; i++) { |
| 357 | if (!xhci->devs[i]) | 357 | if (!xhci->devs[i] || !xhci->devs[i]->udev) |
| 358 | continue; | 358 | continue; |
| 359 | speed = xhci->devs[i]->udev->speed; | 359 | speed = xhci->devs[i]->udev->speed; |
| 360 | if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) | 360 | if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index e7f99d55922a..15a42cee0a9c 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -2524,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd) | |||
| 2524 | { | 2524 | { |
| 2525 | struct musb *musb = hcd_to_musb(hcd); | 2525 | struct musb *musb = hcd_to_musb(hcd); |
| 2526 | u8 devctl; | 2526 | u8 devctl; |
| 2527 | int ret; | ||
| 2527 | 2528 | ||
| 2528 | musb_port_suspend(musb, true); | 2529 | ret = musb_port_suspend(musb, true); |
| 2530 | if (ret) | ||
| 2531 | return ret; | ||
| 2529 | 2532 | ||
| 2530 | if (!is_host_active(musb)) | 2533 | if (!is_host_active(musb)) |
| 2531 | return 0; | 2534 | return 0; |
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h index 72392bbcd0a4..2999845632ce 100644 --- a/drivers/usb/musb/musb_host.h +++ b/drivers/usb/musb/musb_host.h | |||
| @@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8); | |||
| 67 | extern void musb_root_disconnect(struct musb *musb); | 67 | extern void musb_root_disconnect(struct musb *musb); |
| 68 | extern void musb_host_resume_root_hub(struct musb *musb); | 68 | extern void musb_host_resume_root_hub(struct musb *musb); |
| 69 | extern void musb_host_poke_root_hub(struct musb *musb); | 69 | extern void musb_host_poke_root_hub(struct musb *musb); |
| 70 | extern void musb_port_suspend(struct musb *musb, bool do_suspend); | 70 | extern int musb_port_suspend(struct musb *musb, bool do_suspend); |
| 71 | extern void musb_port_reset(struct musb *musb, bool do_reset); | 71 | extern void musb_port_reset(struct musb *musb, bool do_reset); |
| 72 | extern void musb_host_finish_resume(struct work_struct *work); | 72 | extern void musb_host_finish_resume(struct work_struct *work); |
| 73 | #else | 73 | #else |
| @@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {} | |||
| 99 | static inline void musb_host_resume_root_hub(struct musb *musb) {} | 99 | static inline void musb_host_resume_root_hub(struct musb *musb) {} |
| 100 | static inline void musb_host_poll_rh_status(struct musb *musb) {} | 100 | static inline void musb_host_poll_rh_status(struct musb *musb) {} |
| 101 | static inline void musb_host_poke_root_hub(struct musb *musb) {} | 101 | static inline void musb_host_poke_root_hub(struct musb *musb) {} |
| 102 | static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} | 102 | static inline int musb_port_suspend(struct musb *musb, bool do_suspend) |
| 103 | { | ||
| 104 | return 0; | ||
| 105 | } | ||
| 103 | static inline void musb_port_reset(struct musb *musb, bool do_reset) {} | 106 | static inline void musb_port_reset(struct musb *musb, bool do_reset) {} |
| 104 | static inline void musb_host_finish_resume(struct work_struct *work) {} | 107 | static inline void musb_host_finish_resume(struct work_struct *work) {} |
| 105 | #endif | 108 | #endif |
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 5165d2b07ade..2f8dd9826e94 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c | |||
| @@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work) | |||
| 48 | spin_unlock_irqrestore(&musb->lock, flags); | 48 | spin_unlock_irqrestore(&musb->lock, flags); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | void musb_port_suspend(struct musb *musb, bool do_suspend) | 51 | int musb_port_suspend(struct musb *musb, bool do_suspend) |
| 52 | { | 52 | { |
| 53 | struct usb_otg *otg = musb->xceiv->otg; | 53 | struct usb_otg *otg = musb->xceiv->otg; |
| 54 | u8 power; | 54 | u8 power; |
| 55 | void __iomem *mbase = musb->mregs; | 55 | void __iomem *mbase = musb->mregs; |
| 56 | 56 | ||
| 57 | if (!is_host_active(musb)) | 57 | if (!is_host_active(musb)) |
| 58 | return; | 58 | return 0; |
| 59 | 59 | ||
| 60 | /* NOTE: this doesn't necessarily put PHY into low power mode, | 60 | /* NOTE: this doesn't necessarily put PHY into low power mode, |
| 61 | * turning off its clock; that's a function of PHY integration and | 61 | * turning off its clock; that's a function of PHY integration and |
| @@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
| 66 | if (do_suspend) { | 66 | if (do_suspend) { |
| 67 | int retries = 10000; | 67 | int retries = 10000; |
| 68 | 68 | ||
| 69 | power &= ~MUSB_POWER_RESUME; | 69 | if (power & MUSB_POWER_RESUME) |
| 70 | power |= MUSB_POWER_SUSPENDM; | 70 | return -EBUSY; |
| 71 | musb_writeb(mbase, MUSB_POWER, power); | ||
| 72 | 71 | ||
| 73 | /* Needed for OPT A tests */ | 72 | if (!(power & MUSB_POWER_SUSPENDM)) { |
| 74 | power = musb_readb(mbase, MUSB_POWER); | 73 | power |= MUSB_POWER_SUSPENDM; |
| 75 | while (power & MUSB_POWER_SUSPENDM) { | 74 | musb_writeb(mbase, MUSB_POWER, power); |
| 75 | |||
| 76 | /* Needed for OPT A tests */ | ||
| 76 | power = musb_readb(mbase, MUSB_POWER); | 77 | power = musb_readb(mbase, MUSB_POWER); |
| 77 | if (retries-- < 1) | 78 | while (power & MUSB_POWER_SUSPENDM) { |
| 78 | break; | 79 | power = musb_readb(mbase, MUSB_POWER); |
| 80 | if (retries-- < 1) | ||
| 81 | break; | ||
| 82 | } | ||
| 79 | } | 83 | } |
| 80 | 84 | ||
| 81 | musb_dbg(musb, "Root port suspended, power %02x", power); | 85 | musb_dbg(musb, "Root port suspended, power %02x", power); |
| @@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) | |||
| 111 | schedule_delayed_work(&musb->finish_resume_work, | 115 | schedule_delayed_work(&musb->finish_resume_work, |
| 112 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | 116 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
| 113 | } | 117 | } |
| 118 | return 0; | ||
| 114 | } | 119 | } |
| 115 | 120 | ||
| 116 | void musb_port_reset(struct musb *musb, bool do_reset) | 121 | void musb_port_reset(struct musb *musb, bool do_reset) |
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 14a72357800a..35618ceb2791 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h | |||
| @@ -73,6 +73,7 @@ struct bus_id_priv { | |||
| 73 | struct stub_device *sdev; | 73 | struct stub_device *sdev; |
| 74 | struct usb_device *udev; | 74 | struct usb_device *udev; |
| 75 | char shutdown_busid; | 75 | char shutdown_busid; |
| 76 | spinlock_t busid_lock; | ||
| 76 | }; | 77 | }; |
| 77 | 78 | ||
| 78 | /* stub_priv is allocated from stub_priv_cache */ | 79 | /* stub_priv is allocated from stub_priv_cache */ |
| @@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver; | |||
| 83 | 84 | ||
| 84 | /* stub_main.c */ | 85 | /* stub_main.c */ |
| 85 | struct bus_id_priv *get_busid_priv(const char *busid); | 86 | struct bus_id_priv *get_busid_priv(const char *busid); |
| 87 | void put_busid_priv(struct bus_id_priv *bid); | ||
| 86 | int del_match_busid(char *busid); | 88 | int del_match_busid(char *busid); |
| 87 | void stub_device_cleanup_urbs(struct stub_device *sdev); | 89 | void stub_device_cleanup_urbs(struct stub_device *sdev); |
| 88 | 90 | ||
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index dd8ef36ab10e..c0d6ff1baa72 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c | |||
| @@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev) | |||
| 300 | struct stub_device *sdev = NULL; | 300 | struct stub_device *sdev = NULL; |
| 301 | const char *udev_busid = dev_name(&udev->dev); | 301 | const char *udev_busid = dev_name(&udev->dev); |
| 302 | struct bus_id_priv *busid_priv; | 302 | struct bus_id_priv *busid_priv; |
| 303 | int rc; | 303 | int rc = 0; |
| 304 | 304 | ||
| 305 | dev_dbg(&udev->dev, "Enter\n"); | 305 | dev_dbg(&udev->dev, "Enter probe\n"); |
| 306 | 306 | ||
| 307 | /* check we should claim or not by busid_table */ | 307 | /* check we should claim or not by busid_table */ |
| 308 | busid_priv = get_busid_priv(udev_busid); | 308 | busid_priv = get_busid_priv(udev_busid); |
| @@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev) | |||
| 317 | * other matched drivers by the driver core. | 317 | * other matched drivers by the driver core. |
| 318 | * See driver_probe_device() in driver/base/dd.c | 318 | * See driver_probe_device() in driver/base/dd.c |
| 319 | */ | 319 | */ |
| 320 | return -ENODEV; | 320 | rc = -ENODEV; |
| 321 | goto call_put_busid_priv; | ||
| 321 | } | 322 | } |
| 322 | 323 | ||
| 323 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { | 324 | if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { |
| 324 | dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", | 325 | dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", |
| 325 | udev_busid); | 326 | udev_busid); |
| 326 | return -ENODEV; | 327 | rc = -ENODEV; |
| 328 | goto call_put_busid_priv; | ||
| 327 | } | 329 | } |
| 328 | 330 | ||
| 329 | if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { | 331 | if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { |
| @@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev) | |||
| 331 | "%s is attached on vhci_hcd... skip!\n", | 333 | "%s is attached on vhci_hcd... skip!\n", |
| 332 | udev_busid); | 334 | udev_busid); |
| 333 | 335 | ||
| 334 | return -ENODEV; | 336 | rc = -ENODEV; |
| 337 | goto call_put_busid_priv; | ||
| 335 | } | 338 | } |
| 336 | 339 | ||
| 337 | /* ok, this is my device */ | 340 | /* ok, this is my device */ |
| 338 | sdev = stub_device_alloc(udev); | 341 | sdev = stub_device_alloc(udev); |
| 339 | if (!sdev) | 342 | if (!sdev) { |
| 340 | return -ENOMEM; | 343 | rc = -ENOMEM; |
| 344 | goto call_put_busid_priv; | ||
| 345 | } | ||
| 341 | 346 | ||
| 342 | dev_info(&udev->dev, | 347 | dev_info(&udev->dev, |
| 343 | "usbip-host: register new device (bus %u dev %u)\n", | 348 | "usbip-host: register new device (bus %u dev %u)\n", |
| @@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev) | |||
| 369 | } | 374 | } |
| 370 | busid_priv->status = STUB_BUSID_ALLOC; | 375 | busid_priv->status = STUB_BUSID_ALLOC; |
| 371 | 376 | ||
| 372 | return 0; | 377 | rc = 0; |
| 378 | goto call_put_busid_priv; | ||
| 379 | |||
| 373 | err_files: | 380 | err_files: |
| 374 | usb_hub_release_port(udev->parent, udev->portnum, | 381 | usb_hub_release_port(udev->parent, udev->portnum, |
| 375 | (struct usb_dev_state *) udev); | 382 | (struct usb_dev_state *) udev); |
| @@ -379,6 +386,9 @@ err_port: | |||
| 379 | 386 | ||
| 380 | busid_priv->sdev = NULL; | 387 | busid_priv->sdev = NULL; |
| 381 | stub_device_free(sdev); | 388 | stub_device_free(sdev); |
| 389 | |||
| 390 | call_put_busid_priv: | ||
| 391 | put_busid_priv(busid_priv); | ||
| 382 | return rc; | 392 | return rc; |
| 383 | } | 393 | } |
| 384 | 394 | ||
| @@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev) | |||
| 404 | struct bus_id_priv *busid_priv; | 414 | struct bus_id_priv *busid_priv; |
| 405 | int rc; | 415 | int rc; |
| 406 | 416 | ||
| 407 | dev_dbg(&udev->dev, "Enter\n"); | 417 | dev_dbg(&udev->dev, "Enter disconnect\n"); |
| 408 | 418 | ||
| 409 | busid_priv = get_busid_priv(udev_busid); | 419 | busid_priv = get_busid_priv(udev_busid); |
| 410 | if (!busid_priv) { | 420 | if (!busid_priv) { |
| @@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev) | |||
| 417 | /* get stub_device */ | 427 | /* get stub_device */ |
| 418 | if (!sdev) { | 428 | if (!sdev) { |
| 419 | dev_err(&udev->dev, "could not get device"); | 429 | dev_err(&udev->dev, "could not get device"); |
| 420 | return; | 430 | goto call_put_busid_priv; |
| 421 | } | 431 | } |
| 422 | 432 | ||
| 423 | dev_set_drvdata(&udev->dev, NULL); | 433 | dev_set_drvdata(&udev->dev, NULL); |
| @@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev) | |||
| 432 | (struct usb_dev_state *) udev); | 442 | (struct usb_dev_state *) udev); |
| 433 | if (rc) { | 443 | if (rc) { |
| 434 | dev_dbg(&udev->dev, "unable to release port\n"); | 444 | dev_dbg(&udev->dev, "unable to release port\n"); |
| 435 | return; | 445 | goto call_put_busid_priv; |
| 436 | } | 446 | } |
| 437 | 447 | ||
| 438 | /* If usb reset is called from event handler */ | 448 | /* If usb reset is called from event handler */ |
| 439 | if (usbip_in_eh(current)) | 449 | if (usbip_in_eh(current)) |
| 440 | return; | 450 | goto call_put_busid_priv; |
| 441 | 451 | ||
| 442 | /* shutdown the current connection */ | 452 | /* shutdown the current connection */ |
| 443 | shutdown_busid(busid_priv); | 453 | shutdown_busid(busid_priv); |
| @@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev) | |||
| 448 | busid_priv->sdev = NULL; | 458 | busid_priv->sdev = NULL; |
| 449 | stub_device_free(sdev); | 459 | stub_device_free(sdev); |
| 450 | 460 | ||
| 451 | if (busid_priv->status == STUB_BUSID_ALLOC) { | 461 | if (busid_priv->status == STUB_BUSID_ALLOC) |
| 452 | busid_priv->status = STUB_BUSID_ADDED; | 462 | busid_priv->status = STUB_BUSID_ADDED; |
| 453 | } else { | 463 | |
| 454 | busid_priv->status = STUB_BUSID_OTHER; | 464 | call_put_busid_priv: |
| 455 | del_match_busid((char *)udev_busid); | 465 | put_busid_priv(busid_priv); |
| 456 | } | ||
| 457 | } | 466 | } |
| 458 | 467 | ||
| 459 | #ifdef CONFIG_PM | 468 | #ifdef CONFIG_PM |
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index d41d0cdeec0f..bf8a5feb0ee9 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #define DRIVER_DESC "USB/IP Host Driver" | 14 | #define DRIVER_DESC "USB/IP Host Driver" |
| 15 | 15 | ||
| 16 | struct kmem_cache *stub_priv_cache; | 16 | struct kmem_cache *stub_priv_cache; |
| 17 | |||
| 17 | /* | 18 | /* |
| 18 | * busid_tables defines matching busids that usbip can grab. A user can change | 19 | * busid_tables defines matching busids that usbip can grab. A user can change |
| 19 | * dynamically what device is locally used and what device is exported to a | 20 | * dynamically what device is locally used and what device is exported to a |
| @@ -25,6 +26,8 @@ static spinlock_t busid_table_lock; | |||
| 25 | 26 | ||
| 26 | static void init_busid_table(void) | 27 | static void init_busid_table(void) |
| 27 | { | 28 | { |
| 29 | int i; | ||
| 30 | |||
| 28 | /* | 31 | /* |
| 29 | * This also sets the bus_table[i].status to | 32 | * This also sets the bus_table[i].status to |
| 30 | * STUB_BUSID_OTHER, which is 0. | 33 | * STUB_BUSID_OTHER, which is 0. |
| @@ -32,6 +35,9 @@ static void init_busid_table(void) | |||
| 32 | memset(busid_table, 0, sizeof(busid_table)); | 35 | memset(busid_table, 0, sizeof(busid_table)); |
| 33 | 36 | ||
| 34 | spin_lock_init(&busid_table_lock); | 37 | spin_lock_init(&busid_table_lock); |
| 38 | |||
| 39 | for (i = 0; i < MAX_BUSID; i++) | ||
| 40 | spin_lock_init(&busid_table[i].busid_lock); | ||
| 35 | } | 41 | } |
| 36 | 42 | ||
| 37 | /* | 43 | /* |
| @@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid) | |||
| 43 | int i; | 49 | int i; |
| 44 | int idx = -1; | 50 | int idx = -1; |
| 45 | 51 | ||
| 46 | for (i = 0; i < MAX_BUSID; i++) | 52 | for (i = 0; i < MAX_BUSID; i++) { |
| 53 | spin_lock(&busid_table[i].busid_lock); | ||
| 47 | if (busid_table[i].name[0]) | 54 | if (busid_table[i].name[0]) |
| 48 | if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { | 55 | if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { |
| 49 | idx = i; | 56 | idx = i; |
| 57 | spin_unlock(&busid_table[i].busid_lock); | ||
| 50 | break; | 58 | break; |
| 51 | } | 59 | } |
| 60 | spin_unlock(&busid_table[i].busid_lock); | ||
| 61 | } | ||
| 52 | return idx; | 62 | return idx; |
| 53 | } | 63 | } |
| 54 | 64 | ||
| 65 | /* Returns holding busid_lock. Should call put_busid_priv() to unlock */ | ||
| 55 | struct bus_id_priv *get_busid_priv(const char *busid) | 66 | struct bus_id_priv *get_busid_priv(const char *busid) |
| 56 | { | 67 | { |
| 57 | int idx; | 68 | int idx; |
| @@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid) | |||
| 59 | 70 | ||
| 60 | spin_lock(&busid_table_lock); | 71 | spin_lock(&busid_table_lock); |
| 61 | idx = get_busid_idx(busid); | 72 | idx = get_busid_idx(busid); |
| 62 | if (idx >= 0) | 73 | if (idx >= 0) { |
| 63 | bid = &(busid_table[idx]); | 74 | bid = &(busid_table[idx]); |
| 75 | /* get busid_lock before returning */ | ||
| 76 | spin_lock(&bid->busid_lock); | ||
| 77 | } | ||
| 64 | spin_unlock(&busid_table_lock); | 78 | spin_unlock(&busid_table_lock); |
| 65 | 79 | ||
| 66 | return bid; | 80 | return bid; |
| 67 | } | 81 | } |
| 68 | 82 | ||
| 83 | void put_busid_priv(struct bus_id_priv *bid) | ||
| 84 | { | ||
| 85 | if (bid) | ||
| 86 | spin_unlock(&bid->busid_lock); | ||
| 87 | } | ||
| 88 | |||
| 69 | static int add_match_busid(char *busid) | 89 | static int add_match_busid(char *busid) |
| 70 | { | 90 | { |
| 71 | int i; | 91 | int i; |
| @@ -78,15 +98,19 @@ static int add_match_busid(char *busid) | |||
| 78 | goto out; | 98 | goto out; |
| 79 | } | 99 | } |
| 80 | 100 | ||
| 81 | for (i = 0; i < MAX_BUSID; i++) | 101 | for (i = 0; i < MAX_BUSID; i++) { |
| 102 | spin_lock(&busid_table[i].busid_lock); | ||
| 82 | if (!busid_table[i].name[0]) { | 103 | if (!busid_table[i].name[0]) { |
| 83 | strlcpy(busid_table[i].name, busid, BUSID_SIZE); | 104 | strlcpy(busid_table[i].name, busid, BUSID_SIZE); |
| 84 | if ((busid_table[i].status != STUB_BUSID_ALLOC) && | 105 | if ((busid_table[i].status != STUB_BUSID_ALLOC) && |
| 85 | (busid_table[i].status != STUB_BUSID_REMOV)) | 106 | (busid_table[i].status != STUB_BUSID_REMOV)) |
| 86 | busid_table[i].status = STUB_BUSID_ADDED; | 107 | busid_table[i].status = STUB_BUSID_ADDED; |
| 87 | ret = 0; | 108 | ret = 0; |
| 109 | spin_unlock(&busid_table[i].busid_lock); | ||
| 88 | break; | 110 | break; |
| 89 | } | 111 | } |
| 112 | spin_unlock(&busid_table[i].busid_lock); | ||
| 113 | } | ||
| 90 | 114 | ||
| 91 | out: | 115 | out: |
| 92 | spin_unlock(&busid_table_lock); | 116 | spin_unlock(&busid_table_lock); |
| @@ -107,6 +131,8 @@ int del_match_busid(char *busid) | |||
| 107 | /* found */ | 131 | /* found */ |
| 108 | ret = 0; | 132 | ret = 0; |
| 109 | 133 | ||
| 134 | spin_lock(&busid_table[idx].busid_lock); | ||
| 135 | |||
| 110 | if (busid_table[idx].status == STUB_BUSID_OTHER) | 136 | if (busid_table[idx].status == STUB_BUSID_OTHER) |
| 111 | memset(busid_table[idx].name, 0, BUSID_SIZE); | 137 | memset(busid_table[idx].name, 0, BUSID_SIZE); |
| 112 | 138 | ||
| @@ -114,6 +140,7 @@ int del_match_busid(char *busid) | |||
| 114 | (busid_table[idx].status != STUB_BUSID_ADDED)) | 140 | (busid_table[idx].status != STUB_BUSID_ADDED)) |
| 115 | busid_table[idx].status = STUB_BUSID_REMOV; | 141 | busid_table[idx].status = STUB_BUSID_REMOV; |
| 116 | 142 | ||
| 143 | spin_unlock(&busid_table[idx].busid_lock); | ||
| 117 | out: | 144 | out: |
| 118 | spin_unlock(&busid_table_lock); | 145 | spin_unlock(&busid_table_lock); |
| 119 | 146 | ||
| @@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf) | |||
| 126 | char *out = buf; | 153 | char *out = buf; |
| 127 | 154 | ||
| 128 | spin_lock(&busid_table_lock); | 155 | spin_lock(&busid_table_lock); |
| 129 | for (i = 0; i < MAX_BUSID; i++) | 156 | for (i = 0; i < MAX_BUSID; i++) { |
| 157 | spin_lock(&busid_table[i].busid_lock); | ||
| 130 | if (busid_table[i].name[0]) | 158 | if (busid_table[i].name[0]) |
| 131 | out += sprintf(out, "%s ", busid_table[i].name); | 159 | out += sprintf(out, "%s ", busid_table[i].name); |
| 160 | spin_unlock(&busid_table[i].busid_lock); | ||
| 161 | } | ||
| 132 | spin_unlock(&busid_table_lock); | 162 | spin_unlock(&busid_table_lock); |
| 133 | out += sprintf(out, "\n"); | 163 | out += sprintf(out, "\n"); |
| 134 | 164 | ||
| @@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf, | |||
| 169 | } | 199 | } |
| 170 | static DRIVER_ATTR_RW(match_busid); | 200 | static DRIVER_ATTR_RW(match_busid); |
| 171 | 201 | ||
| 202 | static int do_rebind(char *busid, struct bus_id_priv *busid_priv) | ||
| 203 | { | ||
| 204 | int ret; | ||
| 205 | |||
| 206 | /* device_attach() callers should hold parent lock for USB */ | ||
| 207 | if (busid_priv->udev->dev.parent) | ||
| 208 | device_lock(busid_priv->udev->dev.parent); | ||
| 209 | ret = device_attach(&busid_priv->udev->dev); | ||
| 210 | if (busid_priv->udev->dev.parent) | ||
| 211 | device_unlock(busid_priv->udev->dev.parent); | ||
| 212 | if (ret < 0) { | ||
| 213 | dev_err(&busid_priv->udev->dev, "rebind failed\n"); | ||
| 214 | return ret; | ||
| 215 | } | ||
| 216 | return 0; | ||
| 217 | } | ||
| 218 | |||
| 219 | static void stub_device_rebind(void) | ||
| 220 | { | ||
| 221 | #if IS_MODULE(CONFIG_USBIP_HOST) | ||
| 222 | struct bus_id_priv *busid_priv; | ||
| 223 | int i; | ||
| 224 | |||
| 225 | /* update status to STUB_BUSID_OTHER so probe ignores the device */ | ||
| 226 | spin_lock(&busid_table_lock); | ||
| 227 | for (i = 0; i < MAX_BUSID; i++) { | ||
| 228 | if (busid_table[i].name[0] && | ||
| 229 | busid_table[i].shutdown_busid) { | ||
| 230 | busid_priv = &(busid_table[i]); | ||
| 231 | busid_priv->status = STUB_BUSID_OTHER; | ||
| 232 | } | ||
| 233 | } | ||
| 234 | spin_unlock(&busid_table_lock); | ||
| 235 | |||
| 236 | /* now run rebind - no need to hold locks. driver files are removed */ | ||
| 237 | for (i = 0; i < MAX_BUSID; i++) { | ||
| 238 | if (busid_table[i].name[0] && | ||
| 239 | busid_table[i].shutdown_busid) { | ||
| 240 | busid_priv = &(busid_table[i]); | ||
| 241 | do_rebind(busid_table[i].name, busid_priv); | ||
| 242 | } | ||
| 243 | } | ||
| 244 | #endif | ||
| 245 | } | ||
| 246 | |||
| 172 | static ssize_t rebind_store(struct device_driver *dev, const char *buf, | 247 | static ssize_t rebind_store(struct device_driver *dev, const char *buf, |
| 173 | size_t count) | 248 | size_t count) |
| 174 | { | 249 | { |
| @@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, | |||
| 186 | if (!bid) | 261 | if (!bid) |
| 187 | return -ENODEV; | 262 | return -ENODEV; |
| 188 | 263 | ||
| 189 | /* device_attach() callers should hold parent lock for USB */ | 264 | /* mark the device for deletion so probe ignores it during rescan */ |
| 190 | if (bid->udev->dev.parent) | 265 | bid->status = STUB_BUSID_OTHER; |
| 191 | device_lock(bid->udev->dev.parent); | 266 | /* release the busid lock */ |
| 192 | ret = device_attach(&bid->udev->dev); | 267 | put_busid_priv(bid); |
| 193 | if (bid->udev->dev.parent) | 268 | |
| 194 | device_unlock(bid->udev->dev.parent); | 269 | ret = do_rebind((char *) buf, bid); |
| 195 | if (ret < 0) { | 270 | if (ret < 0) |
| 196 | dev_err(&bid->udev->dev, "rebind failed\n"); | ||
| 197 | return ret; | 271 | return ret; |
| 198 | } | 272 | |
| 273 | /* delete device from busid_table */ | ||
| 274 | del_match_busid((char *) buf); | ||
| 199 | 275 | ||
| 200 | return count; | 276 | return count; |
| 201 | } | 277 | } |
| @@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void) | |||
| 317 | */ | 393 | */ |
| 318 | usb_deregister_device_driver(&stub_driver); | 394 | usb_deregister_device_driver(&stub_driver); |
| 319 | 395 | ||
| 396 | /* initiate scan to attach devices */ | ||
| 397 | stub_device_rebind(); | ||
| 398 | |||
| 320 | kmem_cache_destroy(stub_priv_cache); | 399 | kmem_cache_destroy(stub_priv_cache); |
| 321 | } | 400 | } |
| 322 | 401 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f3bd8e941224..f0be5f35ab28 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, | |||
| 981 | { | 981 | { |
| 982 | int ret = 0; | 982 | int ret = 0; |
| 983 | 983 | ||
| 984 | mutex_lock(&dev->mutex); | ||
| 984 | vhost_dev_lock_vqs(dev); | 985 | vhost_dev_lock_vqs(dev); |
| 985 | switch (msg->type) { | 986 | switch (msg->type) { |
| 986 | case VHOST_IOTLB_UPDATE: | 987 | case VHOST_IOTLB_UPDATE: |
| @@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, | |||
| 1016 | } | 1017 | } |
| 1017 | 1018 | ||
| 1018 | vhost_dev_unlock_vqs(dev); | 1019 | vhost_dev_unlock_vqs(dev); |
| 1020 | mutex_unlock(&dev->mutex); | ||
| 1021 | |||
| 1019 | return ret; | 1022 | return ret; |
| 1020 | } | 1023 | } |
| 1021 | ssize_t vhost_chr_write_iter(struct vhost_dev *dev, | 1024 | ssize_t vhost_chr_write_iter(struct vhost_dev *dev, |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index e1c60899fdbc..a6f9ba85dc4b 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
| @@ -351,7 +351,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
| 351 | * physical address */ | 351 | * physical address */ |
| 352 | phys = xen_bus_to_phys(dev_addr); | 352 | phys = xen_bus_to_phys(dev_addr); |
| 353 | 353 | ||
| 354 | if (((dev_addr + size - 1 > dma_mask)) || | 354 | if (((dev_addr + size - 1 <= dma_mask)) || |
| 355 | range_straddles_page_boundary(phys, size)) | 355 | range_straddles_page_boundary(phys, size)) |
| 356 | xen_destroy_contiguous_region(phys, order); | 356 | xen_destroy_contiguous_region(phys, order); |
| 357 | 357 | ||
diff --git a/fs/affs/namei.c b/fs/affs/namei.c index d8aa0ae3d037..41c5749f4db7 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c | |||
| @@ -201,14 +201,16 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 201 | struct super_block *sb = dir->i_sb; | 201 | struct super_block *sb = dir->i_sb; |
| 202 | struct buffer_head *bh; | 202 | struct buffer_head *bh; |
| 203 | struct inode *inode = NULL; | 203 | struct inode *inode = NULL; |
| 204 | struct dentry *res; | ||
| 204 | 205 | ||
| 205 | pr_debug("%s(\"%pd\")\n", __func__, dentry); | 206 | pr_debug("%s(\"%pd\")\n", __func__, dentry); |
| 206 | 207 | ||
| 207 | affs_lock_dir(dir); | 208 | affs_lock_dir(dir); |
| 208 | bh = affs_find_entry(dir, dentry); | 209 | bh = affs_find_entry(dir, dentry); |
| 209 | affs_unlock_dir(dir); | 210 | if (IS_ERR(bh)) { |
| 210 | if (IS_ERR(bh)) | 211 | affs_unlock_dir(dir); |
| 211 | return ERR_CAST(bh); | 212 | return ERR_CAST(bh); |
| 213 | } | ||
| 212 | if (bh) { | 214 | if (bh) { |
| 213 | u32 ino = bh->b_blocknr; | 215 | u32 ino = bh->b_blocknr; |
| 214 | 216 | ||
| @@ -222,11 +224,12 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 222 | } | 224 | } |
| 223 | affs_brelse(bh); | 225 | affs_brelse(bh); |
| 224 | inode = affs_iget(sb, ino); | 226 | inode = affs_iget(sb, ino); |
| 225 | if (IS_ERR(inode)) | ||
| 226 | return ERR_CAST(inode); | ||
| 227 | } | 227 | } |
| 228 | d_add(dentry, inode); | 228 | res = d_splice_alias(inode, dentry); |
| 229 | return NULL; | 229 | if (!IS_ERR_OR_NULL(res)) |
| 230 | res->d_fsdata = dentry->d_fsdata; | ||
| 231 | affs_unlock_dir(dir); | ||
| 232 | return res; | ||
| 230 | } | 233 | } |
| 231 | 234 | ||
| 232 | int | 235 | int |
| @@ -1078,8 +1078,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
| 1078 | 1078 | ||
| 1079 | ctx = rcu_dereference(table->table[id]); | 1079 | ctx = rcu_dereference(table->table[id]); |
| 1080 | if (ctx && ctx->user_id == ctx_id) { | 1080 | if (ctx && ctx->user_id == ctx_id) { |
| 1081 | percpu_ref_get(&ctx->users); | 1081 | if (percpu_ref_tryget_live(&ctx->users)) |
| 1082 | ret = ctx; | 1082 | ret = ctx; |
| 1083 | } | 1083 | } |
| 1084 | out: | 1084 | out: |
| 1085 | rcu_read_unlock(); | 1085 | rcu_read_unlock(); |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index af2832aaeec5..4700b4534439 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
| @@ -198,23 +198,16 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) | |||
| 198 | 198 | ||
| 199 | if (ret == BEFS_BT_NOT_FOUND) { | 199 | if (ret == BEFS_BT_NOT_FOUND) { |
| 200 | befs_debug(sb, "<--- %s %pd not found", __func__, dentry); | 200 | befs_debug(sb, "<--- %s %pd not found", __func__, dentry); |
| 201 | d_add(dentry, NULL); | 201 | inode = NULL; |
| 202 | return ERR_PTR(-ENOENT); | ||
| 203 | |||
| 204 | } else if (ret != BEFS_OK || offset == 0) { | 202 | } else if (ret != BEFS_OK || offset == 0) { |
| 205 | befs_error(sb, "<--- %s Error", __func__); | 203 | befs_error(sb, "<--- %s Error", __func__); |
| 206 | return ERR_PTR(-ENODATA); | 204 | inode = ERR_PTR(-ENODATA); |
| 205 | } else { | ||
| 206 | inode = befs_iget(dir->i_sb, (ino_t) offset); | ||
| 207 | } | 207 | } |
| 208 | |||
| 209 | inode = befs_iget(dir->i_sb, (ino_t) offset); | ||
| 210 | if (IS_ERR(inode)) | ||
| 211 | return ERR_CAST(inode); | ||
| 212 | |||
| 213 | d_add(dentry, inode); | ||
| 214 | |||
| 215 | befs_debug(sb, "<--- %s", __func__); | 208 | befs_debug(sb, "<--- %s", __func__); |
| 216 | 209 | ||
| 217 | return NULL; | 210 | return d_splice_alias(inode, dentry); |
| 218 | } | 211 | } |
| 219 | 212 | ||
| 220 | static int | 213 | static int |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 3fd44835b386..8c68961925b1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -2436,10 +2436,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, | |||
| 2436 | if (p->reada != READA_NONE) | 2436 | if (p->reada != READA_NONE) |
| 2437 | reada_for_search(fs_info, p, level, slot, key->objectid); | 2437 | reada_for_search(fs_info, p, level, slot, key->objectid); |
| 2438 | 2438 | ||
| 2439 | btrfs_release_path(p); | ||
| 2440 | |||
| 2441 | ret = -EAGAIN; | 2439 | ret = -EAGAIN; |
| 2442 | tmp = read_tree_block(fs_info, blocknr, 0, parent_level - 1, | 2440 | tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1, |
| 2443 | &first_key); | 2441 | &first_key); |
| 2444 | if (!IS_ERR(tmp)) { | 2442 | if (!IS_ERR(tmp)) { |
| 2445 | /* | 2443 | /* |
| @@ -2454,6 +2452,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, | |||
| 2454 | } else { | 2452 | } else { |
| 2455 | ret = PTR_ERR(tmp); | 2453 | ret = PTR_ERR(tmp); |
| 2456 | } | 2454 | } |
| 2455 | |||
| 2456 | btrfs_release_path(p); | ||
| 2457 | return ret; | 2457 | return ret; |
| 2458 | } | 2458 | } |
| 2459 | 2459 | ||
| @@ -5414,12 +5414,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root, | |||
| 5414 | down_read(&fs_info->commit_root_sem); | 5414 | down_read(&fs_info->commit_root_sem); |
| 5415 | left_level = btrfs_header_level(left_root->commit_root); | 5415 | left_level = btrfs_header_level(left_root->commit_root); |
| 5416 | left_root_level = left_level; | 5416 | left_root_level = left_level; |
| 5417 | left_path->nodes[left_level] = left_root->commit_root; | 5417 | left_path->nodes[left_level] = |
| 5418 | btrfs_clone_extent_buffer(left_root->commit_root); | ||
| 5419 | if (!left_path->nodes[left_level]) { | ||
| 5420 | up_read(&fs_info->commit_root_sem); | ||
| 5421 | ret = -ENOMEM; | ||
| 5422 | goto out; | ||
| 5423 | } | ||
| 5418 | extent_buffer_get(left_path->nodes[left_level]); | 5424 | extent_buffer_get(left_path->nodes[left_level]); |
| 5419 | 5425 | ||
| 5420 | right_level = btrfs_header_level(right_root->commit_root); | 5426 | right_level = btrfs_header_level(right_root->commit_root); |
| 5421 | right_root_level = right_level; | 5427 | right_root_level = right_level; |
| 5422 | right_path->nodes[right_level] = right_root->commit_root; | 5428 | right_path->nodes[right_level] = |
| 5429 | btrfs_clone_extent_buffer(right_root->commit_root); | ||
| 5430 | if (!right_path->nodes[right_level]) { | ||
| 5431 | up_read(&fs_info->commit_root_sem); | ||
| 5432 | ret = -ENOMEM; | ||
| 5433 | goto out; | ||
| 5434 | } | ||
| 5423 | extent_buffer_get(right_path->nodes[right_level]); | 5435 | extent_buffer_get(right_path->nodes[right_level]); |
| 5424 | up_read(&fs_info->commit_root_sem); | 5436 | up_read(&fs_info->commit_root_sem); |
| 5425 | 5437 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2771cc56a622..0d422c9908b8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -3182,6 +3182,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, | |||
| 3182 | u64 *orig_start, u64 *orig_block_len, | 3182 | u64 *orig_start, u64 *orig_block_len, |
| 3183 | u64 *ram_bytes); | 3183 | u64 *ram_bytes); |
| 3184 | 3184 | ||
| 3185 | void __btrfs_del_delalloc_inode(struct btrfs_root *root, | ||
| 3186 | struct btrfs_inode *inode); | ||
| 3185 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); | 3187 | struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); |
| 3186 | int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); | 3188 | int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); |
| 3187 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, | 3189 | int btrfs_unlink_inode(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 60caa68c3618..c3504b4d281b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -3818,6 +3818,7 @@ void close_ctree(struct btrfs_fs_info *fs_info) | |||
| 3818 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); | 3818 | set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); |
| 3819 | 3819 | ||
| 3820 | btrfs_free_qgroup_config(fs_info); | 3820 | btrfs_free_qgroup_config(fs_info); |
| 3821 | ASSERT(list_empty(&fs_info->delalloc_roots)); | ||
| 3821 | 3822 | ||
| 3822 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { | 3823 | if (percpu_counter_sum(&fs_info->delalloc_bytes)) { |
| 3823 | btrfs_info(fs_info, "at unmount delalloc count %lld", | 3824 | btrfs_info(fs_info, "at unmount delalloc count %lld", |
| @@ -4125,15 +4126,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) | |||
| 4125 | 4126 | ||
| 4126 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) | 4127 | static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) |
| 4127 | { | 4128 | { |
| 4129 | /* cleanup FS via transaction */ | ||
| 4130 | btrfs_cleanup_transaction(fs_info); | ||
| 4131 | |||
| 4128 | mutex_lock(&fs_info->cleaner_mutex); | 4132 | mutex_lock(&fs_info->cleaner_mutex); |
| 4129 | btrfs_run_delayed_iputs(fs_info); | 4133 | btrfs_run_delayed_iputs(fs_info); |
| 4130 | mutex_unlock(&fs_info->cleaner_mutex); | 4134 | mutex_unlock(&fs_info->cleaner_mutex); |
| 4131 | 4135 | ||
| 4132 | down_write(&fs_info->cleanup_work_sem); | 4136 | down_write(&fs_info->cleanup_work_sem); |
| 4133 | up_write(&fs_info->cleanup_work_sem); | 4137 | up_write(&fs_info->cleanup_work_sem); |
| 4134 | |||
| 4135 | /* cleanup FS via transaction */ | ||
| 4136 | btrfs_cleanup_transaction(fs_info); | ||
| 4137 | } | 4138 | } |
| 4138 | 4139 | ||
| 4139 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) | 4140 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
| @@ -4258,19 +4259,23 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) | |||
| 4258 | list_splice_init(&root->delalloc_inodes, &splice); | 4259 | list_splice_init(&root->delalloc_inodes, &splice); |
| 4259 | 4260 | ||
| 4260 | while (!list_empty(&splice)) { | 4261 | while (!list_empty(&splice)) { |
| 4262 | struct inode *inode = NULL; | ||
| 4261 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, | 4263 | btrfs_inode = list_first_entry(&splice, struct btrfs_inode, |
| 4262 | delalloc_inodes); | 4264 | delalloc_inodes); |
| 4263 | 4265 | __btrfs_del_delalloc_inode(root, btrfs_inode); | |
| 4264 | list_del_init(&btrfs_inode->delalloc_inodes); | ||
| 4265 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, | ||
| 4266 | &btrfs_inode->runtime_flags); | ||
| 4267 | spin_unlock(&root->delalloc_lock); | 4266 | spin_unlock(&root->delalloc_lock); |
| 4268 | 4267 | ||
| 4269 | btrfs_invalidate_inodes(btrfs_inode->root); | 4268 | /* |
| 4270 | 4269 | * Make sure we get a live inode and that it'll not disappear | |
| 4270 | * meanwhile. | ||
| 4271 | */ | ||
| 4272 | inode = igrab(&btrfs_inode->vfs_inode); | ||
| 4273 | if (inode) { | ||
| 4274 | invalidate_inode_pages2(inode->i_mapping); | ||
| 4275 | iput(inode); | ||
| 4276 | } | ||
| 4271 | spin_lock(&root->delalloc_lock); | 4277 | spin_lock(&root->delalloc_lock); |
| 4272 | } | 4278 | } |
| 4273 | |||
| 4274 | spin_unlock(&root->delalloc_lock); | 4279 | spin_unlock(&root->delalloc_lock); |
| 4275 | } | 4280 | } |
| 4276 | 4281 | ||
| @@ -4286,7 +4291,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) | |||
| 4286 | while (!list_empty(&splice)) { | 4291 | while (!list_empty(&splice)) { |
| 4287 | root = list_first_entry(&splice, struct btrfs_root, | 4292 | root = list_first_entry(&splice, struct btrfs_root, |
| 4288 | delalloc_root); | 4293 | delalloc_root); |
| 4289 | list_del_init(&root->delalloc_root); | ||
| 4290 | root = btrfs_grab_fs_root(root); | 4294 | root = btrfs_grab_fs_root(root); |
| 4291 | BUG_ON(!root); | 4295 | BUG_ON(!root); |
| 4292 | spin_unlock(&fs_info->delalloc_root_lock); | 4296 | spin_unlock(&fs_info->delalloc_root_lock); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d241285a0d2a..0b86cf10cf2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -1742,12 +1742,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root, | |||
| 1742 | spin_unlock(&root->delalloc_lock); | 1742 | spin_unlock(&root->delalloc_lock); |
| 1743 | } | 1743 | } |
| 1744 | 1744 | ||
| 1745 | static void btrfs_del_delalloc_inode(struct btrfs_root *root, | 1745 | |
| 1746 | struct btrfs_inode *inode) | 1746 | void __btrfs_del_delalloc_inode(struct btrfs_root *root, |
| 1747 | struct btrfs_inode *inode) | ||
| 1747 | { | 1748 | { |
| 1748 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); | 1749 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); |
| 1749 | 1750 | ||
| 1750 | spin_lock(&root->delalloc_lock); | ||
| 1751 | if (!list_empty(&inode->delalloc_inodes)) { | 1751 | if (!list_empty(&inode->delalloc_inodes)) { |
| 1752 | list_del_init(&inode->delalloc_inodes); | 1752 | list_del_init(&inode->delalloc_inodes); |
| 1753 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, | 1753 | clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, |
| @@ -1760,6 +1760,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root, | |||
| 1760 | spin_unlock(&fs_info->delalloc_root_lock); | 1760 | spin_unlock(&fs_info->delalloc_root_lock); |
| 1761 | } | 1761 | } |
| 1762 | } | 1762 | } |
| 1763 | } | ||
| 1764 | |||
| 1765 | static void btrfs_del_delalloc_inode(struct btrfs_root *root, | ||
| 1766 | struct btrfs_inode *inode) | ||
| 1767 | { | ||
| 1768 | spin_lock(&root->delalloc_lock); | ||
| 1769 | __btrfs_del_delalloc_inode(root, inode); | ||
| 1763 | spin_unlock(&root->delalloc_lock); | 1770 | spin_unlock(&root->delalloc_lock); |
| 1764 | } | 1771 | } |
| 1765 | 1772 | ||
| @@ -6579,8 +6586,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 6579 | goto out_unlock_inode; | 6586 | goto out_unlock_inode; |
| 6580 | } else { | 6587 | } else { |
| 6581 | btrfs_update_inode(trans, root, inode); | 6588 | btrfs_update_inode(trans, root, inode); |
| 6582 | unlock_new_inode(inode); | 6589 | d_instantiate_new(dentry, inode); |
| 6583 | d_instantiate(dentry, inode); | ||
| 6584 | } | 6590 | } |
| 6585 | 6591 | ||
| 6586 | out_unlock: | 6592 | out_unlock: |
| @@ -6656,8 +6662,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
| 6656 | goto out_unlock_inode; | 6662 | goto out_unlock_inode; |
| 6657 | 6663 | ||
| 6658 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | 6664 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
| 6659 | unlock_new_inode(inode); | 6665 | d_instantiate_new(dentry, inode); |
| 6660 | d_instantiate(dentry, inode); | ||
| 6661 | 6666 | ||
| 6662 | out_unlock: | 6667 | out_unlock: |
| 6663 | btrfs_end_transaction(trans); | 6668 | btrfs_end_transaction(trans); |
| @@ -6802,12 +6807,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 6802 | if (err) | 6807 | if (err) |
| 6803 | goto out_fail_inode; | 6808 | goto out_fail_inode; |
| 6804 | 6809 | ||
| 6805 | d_instantiate(dentry, inode); | 6810 | d_instantiate_new(dentry, inode); |
| 6806 | /* | ||
| 6807 | * mkdir is special. We're unlocking after we call d_instantiate | ||
| 6808 | * to avoid a race with nfsd calling d_instantiate. | ||
| 6809 | */ | ||
| 6810 | unlock_new_inode(inode); | ||
| 6811 | drop_on_err = 0; | 6811 | drop_on_err = 0; |
| 6812 | 6812 | ||
| 6813 | out_fail: | 6813 | out_fail: |
| @@ -9117,7 +9117,8 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback) | |||
| 9117 | BTRFS_EXTENT_DATA_KEY); | 9117 | BTRFS_EXTENT_DATA_KEY); |
| 9118 | trans->block_rsv = &fs_info->trans_block_rsv; | 9118 | trans->block_rsv = &fs_info->trans_block_rsv; |
| 9119 | if (ret != -ENOSPC && ret != -EAGAIN) { | 9119 | if (ret != -ENOSPC && ret != -EAGAIN) { |
| 9120 | err = ret; | 9120 | if (ret < 0) |
| 9121 | err = ret; | ||
| 9121 | break; | 9122 | break; |
| 9122 | } | 9123 | } |
| 9123 | 9124 | ||
| @@ -10250,8 +10251,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 10250 | goto out_unlock_inode; | 10251 | goto out_unlock_inode; |
| 10251 | } | 10252 | } |
| 10252 | 10253 | ||
| 10253 | unlock_new_inode(inode); | 10254 | d_instantiate_new(dentry, inode); |
| 10254 | d_instantiate(dentry, inode); | ||
| 10255 | 10255 | ||
| 10256 | out_unlock: | 10256 | out_unlock: |
| 10257 | btrfs_end_transaction(trans); | 10257 | btrfs_end_transaction(trans); |
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index 53a8c95828e3..dc6140013ae8 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c | |||
| @@ -380,6 +380,7 @@ static int prop_compression_apply(struct inode *inode, | |||
| 380 | const char *value, | 380 | const char *value, |
| 381 | size_t len) | 381 | size_t len) |
| 382 | { | 382 | { |
| 383 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); | ||
| 383 | int type; | 384 | int type; |
| 384 | 385 | ||
| 385 | if (len == 0) { | 386 | if (len == 0) { |
| @@ -390,14 +391,17 @@ static int prop_compression_apply(struct inode *inode, | |||
| 390 | return 0; | 391 | return 0; |
| 391 | } | 392 | } |
| 392 | 393 | ||
| 393 | if (!strncmp("lzo", value, 3)) | 394 | if (!strncmp("lzo", value, 3)) { |
| 394 | type = BTRFS_COMPRESS_LZO; | 395 | type = BTRFS_COMPRESS_LZO; |
| 395 | else if (!strncmp("zlib", value, 4)) | 396 | btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); |
| 397 | } else if (!strncmp("zlib", value, 4)) { | ||
| 396 | type = BTRFS_COMPRESS_ZLIB; | 398 | type = BTRFS_COMPRESS_ZLIB; |
| 397 | else if (!strncmp("zstd", value, len)) | 399 | } else if (!strncmp("zstd", value, len)) { |
| 398 | type = BTRFS_COMPRESS_ZSTD; | 400 | type = BTRFS_COMPRESS_ZSTD; |
| 399 | else | 401 | btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); |
| 402 | } else { | ||
| 400 | return -EINVAL; | 403 | return -EINVAL; |
| 404 | } | ||
| 401 | 405 | ||
| 402 | BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; | 406 | BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; |
| 403 | BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; | 407 | BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 43758e30aa7a..8f23a94dab77 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -4320,6 +4320,110 @@ static int log_one_extent(struct btrfs_trans_handle *trans, | |||
| 4320 | return ret; | 4320 | return ret; |
| 4321 | } | 4321 | } |
| 4322 | 4322 | ||
| 4323 | /* | ||
| 4324 | * Log all prealloc extents beyond the inode's i_size to make sure we do not | ||
| 4325 | * lose them after doing a fast fsync and replaying the log. We scan the | ||
| 4326 | * subvolume's root instead of iterating the inode's extent map tree because | ||
| 4327 | * otherwise we can log incorrect extent items based on extent map conversion. | ||
| 4328 | * That can happen due to the fact that extent maps are merged when they | ||
| 4329 | * are not in the extent map tree's list of modified extents. | ||
| 4330 | */ | ||
| 4331 | static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, | ||
| 4332 | struct btrfs_inode *inode, | ||
| 4333 | struct btrfs_path *path) | ||
| 4334 | { | ||
| 4335 | struct btrfs_root *root = inode->root; | ||
| 4336 | struct btrfs_key key; | ||
| 4337 | const u64 i_size = i_size_read(&inode->vfs_inode); | ||
| 4338 | const u64 ino = btrfs_ino(inode); | ||
| 4339 | struct btrfs_path *dst_path = NULL; | ||
| 4340 | u64 last_extent = (u64)-1; | ||
| 4341 | int ins_nr = 0; | ||
| 4342 | int start_slot; | ||
| 4343 | int ret; | ||
| 4344 | |||
| 4345 | if (!(inode->flags & BTRFS_INODE_PREALLOC)) | ||
| 4346 | return 0; | ||
| 4347 | |||
| 4348 | key.objectid = ino; | ||
| 4349 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
| 4350 | key.offset = i_size; | ||
| 4351 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
| 4352 | if (ret < 0) | ||
| 4353 | goto out; | ||
| 4354 | |||
| 4355 | while (true) { | ||
| 4356 | struct extent_buffer *leaf = path->nodes[0]; | ||
| 4357 | int slot = path->slots[0]; | ||
| 4358 | |||
| 4359 | if (slot >= btrfs_header_nritems(leaf)) { | ||
| 4360 | if (ins_nr > 0) { | ||
| 4361 | ret = copy_items(trans, inode, dst_path, path, | ||
| 4362 | &last_extent, start_slot, | ||
| 4363 | ins_nr, 1, 0); | ||
| 4364 | if (ret < 0) | ||
| 4365 | goto out; | ||
| 4366 | ins_nr = 0; | ||
| 4367 | } | ||
| 4368 | ret = btrfs_next_leaf(root, path); | ||
| 4369 | if (ret < 0) | ||
| 4370 | goto out; | ||
| 4371 | if (ret > 0) { | ||
| 4372 | ret = 0; | ||
| 4373 | break; | ||
| 4374 | } | ||
| 4375 | continue; | ||
| 4376 | } | ||
| 4377 | |||
| 4378 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
| 4379 | if (key.objectid > ino) | ||
| 4380 | break; | ||
| 4381 | if (WARN_ON_ONCE(key.objectid < ino) || | ||
| 4382 | key.type < BTRFS_EXTENT_DATA_KEY || | ||
| 4383 | key.offset < i_size) { | ||
| 4384 | path->slots[0]++; | ||
| 4385 | continue; | ||
| 4386 | } | ||
| 4387 | if (last_extent == (u64)-1) { | ||
| 4388 | last_extent = key.offset; | ||
| 4389 | /* | ||
| 4390 | * Avoid logging extent items logged in past fsync calls | ||
| 4391 | * and leading to duplicate keys in the log tree. | ||
| 4392 | */ | ||
| 4393 | do { | ||
| 4394 | ret = btrfs_truncate_inode_items(trans, | ||
| 4395 | root->log_root, | ||
| 4396 | &inode->vfs_inode, | ||
| 4397 | i_size, | ||
| 4398 | BTRFS_EXTENT_DATA_KEY); | ||
| 4399 | } while (ret == -EAGAIN); | ||
| 4400 | if (ret) | ||
| 4401 | goto out; | ||
| 4402 | } | ||
| 4403 | if (ins_nr == 0) | ||
| 4404 | start_slot = slot; | ||
| 4405 | ins_nr++; | ||
| 4406 | path->slots[0]++; | ||
| 4407 | if (!dst_path) { | ||
| 4408 | dst_path = btrfs_alloc_path(); | ||
| 4409 | if (!dst_path) { | ||
| 4410 | ret = -ENOMEM; | ||
| 4411 | goto out; | ||
| 4412 | } | ||
| 4413 | } | ||
| 4414 | } | ||
| 4415 | if (ins_nr > 0) { | ||
| 4416 | ret = copy_items(trans, inode, dst_path, path, &last_extent, | ||
| 4417 | start_slot, ins_nr, 1, 0); | ||
| 4418 | if (ret > 0) | ||
| 4419 | ret = 0; | ||
| 4420 | } | ||
| 4421 | out: | ||
| 4422 | btrfs_release_path(path); | ||
| 4423 | btrfs_free_path(dst_path); | ||
| 4424 | return ret; | ||
| 4425 | } | ||
| 4426 | |||
| 4323 | static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | 4427 | static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, |
| 4324 | struct btrfs_root *root, | 4428 | struct btrfs_root *root, |
| 4325 | struct btrfs_inode *inode, | 4429 | struct btrfs_inode *inode, |
| @@ -4362,6 +4466,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
| 4362 | if (em->generation <= test_gen) | 4466 | if (em->generation <= test_gen) |
| 4363 | continue; | 4467 | continue; |
| 4364 | 4468 | ||
| 4469 | /* We log prealloc extents beyond eof later. */ | ||
| 4470 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && | ||
| 4471 | em->start >= i_size_read(&inode->vfs_inode)) | ||
| 4472 | continue; | ||
| 4473 | |||
| 4365 | if (em->start < logged_start) | 4474 | if (em->start < logged_start) |
| 4366 | logged_start = em->start; | 4475 | logged_start = em->start; |
| 4367 | if ((em->start + em->len - 1) > logged_end) | 4476 | if ((em->start + em->len - 1) > logged_end) |
| @@ -4374,31 +4483,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
| 4374 | num++; | 4483 | num++; |
| 4375 | } | 4484 | } |
| 4376 | 4485 | ||
| 4377 | /* | ||
| 4378 | * Add all prealloc extents beyond the inode's i_size to make sure we | ||
| 4379 | * don't lose them after doing a fast fsync and replaying the log. | ||
| 4380 | */ | ||
| 4381 | if (inode->flags & BTRFS_INODE_PREALLOC) { | ||
| 4382 | struct rb_node *node; | ||
| 4383 | |||
| 4384 | for (node = rb_last(&tree->map); node; node = rb_prev(node)) { | ||
| 4385 | em = rb_entry(node, struct extent_map, rb_node); | ||
| 4386 | if (em->start < i_size_read(&inode->vfs_inode)) | ||
| 4387 | break; | ||
| 4388 | if (!list_empty(&em->list)) | ||
| 4389 | continue; | ||
| 4390 | /* Same as above loop. */ | ||
| 4391 | if (++num > 32768) { | ||
| 4392 | list_del_init(&tree->modified_extents); | ||
| 4393 | ret = -EFBIG; | ||
| 4394 | goto process; | ||
| 4395 | } | ||
| 4396 | refcount_inc(&em->refs); | ||
| 4397 | set_bit(EXTENT_FLAG_LOGGING, &em->flags); | ||
| 4398 | list_add_tail(&em->list, &extents); | ||
| 4399 | } | ||
| 4400 | } | ||
| 4401 | |||
| 4402 | list_sort(NULL, &extents, extent_cmp); | 4486 | list_sort(NULL, &extents, extent_cmp); |
| 4403 | btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); | 4487 | btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); |
| 4404 | /* | 4488 | /* |
| @@ -4443,6 +4527,9 @@ process: | |||
| 4443 | up_write(&inode->dio_sem); | 4527 | up_write(&inode->dio_sem); |
| 4444 | 4528 | ||
| 4445 | btrfs_release_path(path); | 4529 | btrfs_release_path(path); |
| 4530 | if (!ret) | ||
| 4531 | ret = btrfs_log_prealloc_extents(trans, inode, path); | ||
| 4532 | |||
| 4446 | return ret; | 4533 | return ret; |
| 4447 | } | 4534 | } |
| 4448 | 4535 | ||
| @@ -4827,6 +4914,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
| 4827 | struct extent_map_tree *em_tree = &inode->extent_tree; | 4914 | struct extent_map_tree *em_tree = &inode->extent_tree; |
| 4828 | u64 logged_isize = 0; | 4915 | u64 logged_isize = 0; |
| 4829 | bool need_log_inode_item = true; | 4916 | bool need_log_inode_item = true; |
| 4917 | bool xattrs_logged = false; | ||
| 4830 | 4918 | ||
| 4831 | path = btrfs_alloc_path(); | 4919 | path = btrfs_alloc_path(); |
| 4832 | if (!path) | 4920 | if (!path) |
| @@ -5128,6 +5216,7 @@ next_key: | |||
| 5128 | err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); | 5216 | err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); |
| 5129 | if (err) | 5217 | if (err) |
| 5130 | goto out_unlock; | 5218 | goto out_unlock; |
| 5219 | xattrs_logged = true; | ||
| 5131 | if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { | 5220 | if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { |
| 5132 | btrfs_release_path(path); | 5221 | btrfs_release_path(path); |
| 5133 | btrfs_release_path(dst_path); | 5222 | btrfs_release_path(dst_path); |
| @@ -5140,6 +5229,11 @@ log_extents: | |||
| 5140 | btrfs_release_path(dst_path); | 5229 | btrfs_release_path(dst_path); |
| 5141 | if (need_log_inode_item) { | 5230 | if (need_log_inode_item) { |
| 5142 | err = log_inode_item(trans, log, dst_path, inode); | 5231 | err = log_inode_item(trans, log, dst_path, inode); |
| 5232 | if (!err && !xattrs_logged) { | ||
| 5233 | err = btrfs_log_all_xattrs(trans, root, inode, path, | ||
| 5234 | dst_path); | ||
| 5235 | btrfs_release_path(path); | ||
| 5236 | } | ||
| 5143 | if (err) | 5237 | if (err) |
| 5144 | goto out_unlock; | 5238 | goto out_unlock; |
| 5145 | } | 5239 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 292266f6ab9c..be3fc701f389 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -4052,6 +4052,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) | |||
| 4052 | return 0; | 4052 | return 0; |
| 4053 | } | 4053 | } |
| 4054 | 4054 | ||
| 4055 | /* | ||
| 4056 | * A ro->rw remount sequence should continue with the paused balance | ||
| 4057 | * regardless of who pauses it, system or the user as of now, so set | ||
| 4058 | * the resume flag. | ||
| 4059 | */ | ||
| 4060 | spin_lock(&fs_info->balance_lock); | ||
| 4061 | fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; | ||
| 4062 | spin_unlock(&fs_info->balance_lock); | ||
| 4063 | |||
| 4055 | tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); | 4064 | tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); |
| 4056 | return PTR_ERR_OR_ZERO(tsk); | 4065 | return PTR_ERR_OR_ZERO(tsk); |
| 4057 | } | 4066 | } |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 0daa1e3fe0df..ab0bbe93b398 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
| @@ -572,6 +572,11 @@ lookup_again: | |||
| 572 | if (ret < 0) | 572 | if (ret < 0) |
| 573 | goto create_error; | 573 | goto create_error; |
| 574 | 574 | ||
| 575 | if (unlikely(d_unhashed(next))) { | ||
| 576 | dput(next); | ||
| 577 | inode_unlock(d_inode(dir)); | ||
| 578 | goto lookup_again; | ||
| 579 | } | ||
| 575 | ASSERT(d_backing_inode(next)); | 580 | ASSERT(d_backing_inode(next)); |
| 576 | 581 | ||
| 577 | _debug("mkdir -> %p{%p{ino=%lu}}", | 582 | _debug("mkdir -> %p{%p{ino=%lu}}", |
| @@ -764,6 +769,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
| 764 | /* search the current directory for the element name */ | 769 | /* search the current directory for the element name */ |
| 765 | inode_lock(d_inode(dir)); | 770 | inode_lock(d_inode(dir)); |
| 766 | 771 | ||
| 772 | retry: | ||
| 767 | start = jiffies; | 773 | start = jiffies; |
| 768 | subdir = lookup_one_len(dirname, dir, strlen(dirname)); | 774 | subdir = lookup_one_len(dirname, dir, strlen(dirname)); |
| 769 | cachefiles_hist(cachefiles_lookup_histogram, start); | 775 | cachefiles_hist(cachefiles_lookup_histogram, start); |
| @@ -793,6 +799,10 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, | |||
| 793 | if (ret < 0) | 799 | if (ret < 0) |
| 794 | goto mkdir_error; | 800 | goto mkdir_error; |
| 795 | 801 | ||
| 802 | if (unlikely(d_unhashed(subdir))) { | ||
| 803 | dput(subdir); | ||
| 804 | goto retry; | ||
| 805 | } | ||
| 796 | ASSERT(d_backing_inode(subdir)); | 806 | ASSERT(d_backing_inode(subdir)); |
| 797 | 807 | ||
| 798 | _debug("mkdir -> %p{%p{ino=%lu}}", | 808 | _debug("mkdir -> %p{%p{ino=%lu}}", |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 5f132d59dfc2..d61e2de8d0eb 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
| @@ -197,7 +197,7 @@ config CIFS_SMB311 | |||
| 197 | 197 | ||
| 198 | config CIFS_SMB_DIRECT | 198 | config CIFS_SMB_DIRECT |
| 199 | bool "SMB Direct support (Experimental)" | 199 | bool "SMB Direct support (Experimental)" |
| 200 | depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y | 200 | depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y |
| 201 | help | 201 | help |
| 202 | Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. | 202 | Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. |
| 203 | SMB Direct allows transferring SMB packets over RDMA. If unsure, | 203 | SMB Direct allows transferring SMB packets over RDMA. If unsure, |
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 017b0ab19bc4..124b093d14e5 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c | |||
| @@ -492,7 +492,7 @@ static void cramfs_kill_sb(struct super_block *sb) | |||
| 492 | { | 492 | { |
| 493 | struct cramfs_sb_info *sbi = CRAMFS_SB(sb); | 493 | struct cramfs_sb_info *sbi = CRAMFS_SB(sb); |
| 494 | 494 | ||
| 495 | if (IS_ENABLED(CCONFIG_CRAMFS_MTD) && sb->s_mtd) { | 495 | if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) { |
| 496 | if (sbi && sbi->mtd_point_size) | 496 | if (sbi && sbi->mtd_point_size) |
| 497 | mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); | 497 | mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); |
| 498 | kill_mtd_super(sb); | 498 | kill_mtd_super(sb); |
diff --git a/fs/dcache.c b/fs/dcache.c index 86d2de63461e..2acfc69878f5 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -1899,6 +1899,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode) | |||
| 1899 | } | 1899 | } |
| 1900 | EXPORT_SYMBOL(d_instantiate); | 1900 | EXPORT_SYMBOL(d_instantiate); |
| 1901 | 1901 | ||
| 1902 | /* | ||
| 1903 | * This should be equivalent to d_instantiate() + unlock_new_inode(), | ||
| 1904 | * with lockdep-related part of unlock_new_inode() done before | ||
| 1905 | * anything else. Use that instead of open-coding d_instantiate()/ | ||
| 1906 | * unlock_new_inode() combinations. | ||
| 1907 | */ | ||
| 1908 | void d_instantiate_new(struct dentry *entry, struct inode *inode) | ||
| 1909 | { | ||
| 1910 | BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); | ||
| 1911 | BUG_ON(!inode); | ||
| 1912 | lockdep_annotate_inode_mutex_key(inode); | ||
| 1913 | security_d_instantiate(entry, inode); | ||
| 1914 | spin_lock(&inode->i_lock); | ||
| 1915 | __d_instantiate(entry, inode); | ||
| 1916 | WARN_ON(!(inode->i_state & I_NEW)); | ||
| 1917 | inode->i_state &= ~I_NEW; | ||
| 1918 | smp_mb(); | ||
| 1919 | wake_up_bit(&inode->i_state, __I_NEW); | ||
| 1920 | spin_unlock(&inode->i_lock); | ||
| 1921 | } | ||
| 1922 | EXPORT_SYMBOL(d_instantiate_new); | ||
| 1923 | |||
| 1902 | /** | 1924 | /** |
| 1903 | * d_instantiate_no_diralias - instantiate a non-aliased dentry | 1925 | * d_instantiate_no_diralias - instantiate a non-aliased dentry |
| 1904 | * @entry: dentry to complete | 1926 | * @entry: dentry to complete |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 97d17eaeba07..49121e5a8de2 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
| @@ -283,8 +283,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, | |||
| 283 | iget_failed(ecryptfs_inode); | 283 | iget_failed(ecryptfs_inode); |
| 284 | goto out; | 284 | goto out; |
| 285 | } | 285 | } |
| 286 | unlock_new_inode(ecryptfs_inode); | 286 | d_instantiate_new(ecryptfs_dentry, ecryptfs_inode); |
| 287 | d_instantiate(ecryptfs_dentry, ecryptfs_inode); | ||
| 288 | out: | 287 | out: |
| 289 | return rc; | 288 | return rc; |
| 290 | } | 289 | } |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 1e01fabef130..71635909df3b 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -1264,21 +1264,11 @@ do_indirects: | |||
| 1264 | 1264 | ||
| 1265 | static void ext2_truncate_blocks(struct inode *inode, loff_t offset) | 1265 | static void ext2_truncate_blocks(struct inode *inode, loff_t offset) |
| 1266 | { | 1266 | { |
| 1267 | /* | ||
| 1268 | * XXX: it seems like a bug here that we don't allow | ||
| 1269 | * IS_APPEND inode to have blocks-past-i_size trimmed off. | ||
| 1270 | * review and fix this. | ||
| 1271 | * | ||
| 1272 | * Also would be nice to be able to handle IO errors and such, | ||
| 1273 | * but that's probably too much to ask. | ||
| 1274 | */ | ||
| 1275 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1267 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
| 1276 | S_ISLNK(inode->i_mode))) | 1268 | S_ISLNK(inode->i_mode))) |
| 1277 | return; | 1269 | return; |
| 1278 | if (ext2_inode_is_fast_symlink(inode)) | 1270 | if (ext2_inode_is_fast_symlink(inode)) |
| 1279 | return; | 1271 | return; |
| 1280 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | ||
| 1281 | return; | ||
| 1282 | 1272 | ||
| 1283 | dax_sem_down_write(EXT2_I(inode)); | 1273 | dax_sem_down_write(EXT2_I(inode)); |
| 1284 | __ext2_truncate_blocks(inode, offset); | 1274 | __ext2_truncate_blocks(inode, offset); |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 55f7caadb093..152453a91877 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
| @@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) | |||
| 41 | { | 41 | { |
| 42 | int err = ext2_add_link(dentry, inode); | 42 | int err = ext2_add_link(dentry, inode); |
| 43 | if (!err) { | 43 | if (!err) { |
| 44 | unlock_new_inode(inode); | 44 | d_instantiate_new(dentry, inode); |
| 45 | d_instantiate(dentry, inode); | ||
| 46 | return 0; | 45 | return 0; |
| 47 | } | 46 | } |
| 48 | inode_dec_link_count(inode); | 47 | inode_dec_link_count(inode); |
| @@ -255,8 +254,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
| 255 | if (err) | 254 | if (err) |
| 256 | goto out_fail; | 255 | goto out_fail; |
| 257 | 256 | ||
| 258 | unlock_new_inode(inode); | 257 | d_instantiate_new(dentry, inode); |
| 259 | d_instantiate(dentry, inode); | ||
| 260 | out: | 258 | out: |
| 261 | return err; | 259 | return err; |
| 262 | 260 | ||
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b1f21e3a0763..4a09063ce1d2 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
| @@ -2411,8 +2411,7 @@ static int ext4_add_nondir(handle_t *handle, | |||
| 2411 | int err = ext4_add_entry(handle, dentry, inode); | 2411 | int err = ext4_add_entry(handle, dentry, inode); |
| 2412 | if (!err) { | 2412 | if (!err) { |
| 2413 | ext4_mark_inode_dirty(handle, inode); | 2413 | ext4_mark_inode_dirty(handle, inode); |
| 2414 | unlock_new_inode(inode); | 2414 | d_instantiate_new(dentry, inode); |
| 2415 | d_instantiate(dentry, inode); | ||
| 2416 | return 0; | 2415 | return 0; |
| 2417 | } | 2416 | } |
| 2418 | drop_nlink(inode); | 2417 | drop_nlink(inode); |
| @@ -2651,8 +2650,7 @@ out_clear_inode: | |||
| 2651 | err = ext4_mark_inode_dirty(handle, dir); | 2650 | err = ext4_mark_inode_dirty(handle, dir); |
| 2652 | if (err) | 2651 | if (err) |
| 2653 | goto out_clear_inode; | 2652 | goto out_clear_inode; |
| 2654 | unlock_new_inode(inode); | 2653 | d_instantiate_new(dentry, inode); |
| 2655 | d_instantiate(dentry, inode); | ||
| 2656 | if (IS_DIRSYNC(dir)) | 2654 | if (IS_DIRSYNC(dir)) |
| 2657 | ext4_handle_sync(handle); | 2655 | ext4_handle_sync(handle); |
| 2658 | 2656 | ||
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index d5098efe577c..75e37fd720b2 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
| @@ -294,8 +294,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
| 294 | 294 | ||
| 295 | alloc_nid_done(sbi, ino); | 295 | alloc_nid_done(sbi, ino); |
| 296 | 296 | ||
| 297 | d_instantiate(dentry, inode); | 297 | d_instantiate_new(dentry, inode); |
| 298 | unlock_new_inode(inode); | ||
| 299 | 298 | ||
| 300 | if (IS_DIRSYNC(dir)) | 299 | if (IS_DIRSYNC(dir)) |
| 301 | f2fs_sync_fs(sbi->sb, 1); | 300 | f2fs_sync_fs(sbi->sb, 1); |
| @@ -597,8 +596,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, | |||
| 597 | err = page_symlink(inode, disk_link.name, disk_link.len); | 596 | err = page_symlink(inode, disk_link.name, disk_link.len); |
| 598 | 597 | ||
| 599 | err_out: | 598 | err_out: |
| 600 | d_instantiate(dentry, inode); | 599 | d_instantiate_new(dentry, inode); |
| 601 | unlock_new_inode(inode); | ||
| 602 | 600 | ||
| 603 | /* | 601 | /* |
| 604 | * Let's flush symlink data in order to avoid broken symlink as much as | 602 | * Let's flush symlink data in order to avoid broken symlink as much as |
| @@ -661,8 +659,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 661 | 659 | ||
| 662 | alloc_nid_done(sbi, inode->i_ino); | 660 | alloc_nid_done(sbi, inode->i_ino); |
| 663 | 661 | ||
| 664 | d_instantiate(dentry, inode); | 662 | d_instantiate_new(dentry, inode); |
| 665 | unlock_new_inode(inode); | ||
| 666 | 663 | ||
| 667 | if (IS_DIRSYNC(dir)) | 664 | if (IS_DIRSYNC(dir)) |
| 668 | f2fs_sync_fs(sbi->sb, 1); | 665 | f2fs_sync_fs(sbi->sb, 1); |
| @@ -713,8 +710,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 713 | 710 | ||
| 714 | alloc_nid_done(sbi, inode->i_ino); | 711 | alloc_nid_done(sbi, inode->i_ino); |
| 715 | 712 | ||
| 716 | d_instantiate(dentry, inode); | 713 | d_instantiate_new(dentry, inode); |
| 717 | unlock_new_inode(inode); | ||
| 718 | 714 | ||
| 719 | if (IS_DIRSYNC(dir)) | 715 | if (IS_DIRSYNC(dir)) |
| 720 | f2fs_sync_fs(sbi->sb, 1); | 716 | f2fs_sync_fs(sbi->sb, 1); |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 513c357c734b..a6c0f54c48c3 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
| @@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
| 588 | return 0; | 588 | return 0; |
| 589 | 589 | ||
| 590 | out_put_hidden_dir: | 590 | out_put_hidden_dir: |
| 591 | cancel_delayed_work_sync(&sbi->sync_work); | ||
| 591 | iput(sbi->hidden_dir); | 592 | iput(sbi->hidden_dir); |
| 592 | out_put_root: | 593 | out_put_root: |
| 593 | dput(sb->s_root); | 594 | dput(sb->s_root); |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 0a754f38462e..e5a6deb38e1e 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
| @@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, | |||
| 209 | __func__, inode->i_ino, inode->i_mode, inode->i_nlink, | 209 | __func__, inode->i_ino, inode->i_mode, inode->i_nlink, |
| 210 | f->inocache->pino_nlink, inode->i_mapping->nrpages); | 210 | f->inocache->pino_nlink, inode->i_mapping->nrpages); |
| 211 | 211 | ||
| 212 | unlock_new_inode(inode); | 212 | d_instantiate_new(dentry, inode); |
| 213 | d_instantiate(dentry, inode); | ||
| 214 | return 0; | 213 | return 0; |
| 215 | 214 | ||
| 216 | fail: | 215 | fail: |
| @@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
| 430 | mutex_unlock(&dir_f->sem); | 429 | mutex_unlock(&dir_f->sem); |
| 431 | jffs2_complete_reservation(c); | 430 | jffs2_complete_reservation(c); |
| 432 | 431 | ||
| 433 | unlock_new_inode(inode); | 432 | d_instantiate_new(dentry, inode); |
| 434 | d_instantiate(dentry, inode); | ||
| 435 | return 0; | 433 | return 0; |
| 436 | 434 | ||
| 437 | fail: | 435 | fail: |
| @@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode | |||
| 575 | mutex_unlock(&dir_f->sem); | 573 | mutex_unlock(&dir_f->sem); |
| 576 | jffs2_complete_reservation(c); | 574 | jffs2_complete_reservation(c); |
| 577 | 575 | ||
| 578 | unlock_new_inode(inode); | 576 | d_instantiate_new(dentry, inode); |
| 579 | d_instantiate(dentry, inode); | ||
| 580 | return 0; | 577 | return 0; |
| 581 | 578 | ||
| 582 | fail: | 579 | fail: |
| @@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode | |||
| 747 | mutex_unlock(&dir_f->sem); | 744 | mutex_unlock(&dir_f->sem); |
| 748 | jffs2_complete_reservation(c); | 745 | jffs2_complete_reservation(c); |
| 749 | 746 | ||
| 750 | unlock_new_inode(inode); | 747 | d_instantiate_new(dentry, inode); |
| 751 | d_instantiate(dentry, inode); | ||
| 752 | return 0; | 748 | return 0; |
| 753 | 749 | ||
| 754 | fail: | 750 | fail: |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index b41596d71858..56c3fcbfe80e 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
| @@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, | |||
| 178 | unlock_new_inode(ip); | 178 | unlock_new_inode(ip); |
| 179 | iput(ip); | 179 | iput(ip); |
| 180 | } else { | 180 | } else { |
| 181 | unlock_new_inode(ip); | 181 | d_instantiate_new(dentry, ip); |
| 182 | d_instantiate(dentry, ip); | ||
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | out2: | 184 | out2: |
| @@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) | |||
| 313 | unlock_new_inode(ip); | 312 | unlock_new_inode(ip); |
| 314 | iput(ip); | 313 | iput(ip); |
| 315 | } else { | 314 | } else { |
| 316 | unlock_new_inode(ip); | 315 | d_instantiate_new(dentry, ip); |
| 317 | d_instantiate(dentry, ip); | ||
| 318 | } | 316 | } |
| 319 | 317 | ||
| 320 | out2: | 318 | out2: |
| @@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, | |||
| 1059 | unlock_new_inode(ip); | 1057 | unlock_new_inode(ip); |
| 1060 | iput(ip); | 1058 | iput(ip); |
| 1061 | } else { | 1059 | } else { |
| 1062 | unlock_new_inode(ip); | 1060 | d_instantiate_new(dentry, ip); |
| 1063 | d_instantiate(dentry, ip); | ||
| 1064 | } | 1061 | } |
| 1065 | 1062 | ||
| 1066 | out2: | 1063 | out2: |
| @@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, | |||
| 1447 | unlock_new_inode(ip); | 1444 | unlock_new_inode(ip); |
| 1448 | iput(ip); | 1445 | iput(ip); |
| 1449 | } else { | 1446 | } else { |
| 1450 | unlock_new_inode(ip); | 1447 | d_instantiate_new(dentry, ip); |
| 1451 | d_instantiate(dentry, ip); | ||
| 1452 | } | 1448 | } |
| 1453 | 1449 | ||
| 1454 | out1: | 1450 | out1: |
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index 26dd9a50f383..ff2716f9322e 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c | |||
| @@ -316,6 +316,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, | |||
| 316 | 316 | ||
| 317 | info->root = root; | 317 | info->root = root; |
| 318 | info->ns = ns; | 318 | info->ns = ns; |
| 319 | INIT_LIST_HEAD(&info->node); | ||
| 319 | 320 | ||
| 320 | sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags, | 321 | sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags, |
| 321 | &init_user_ns, info); | 322 | &init_user_ns, info); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2410b093a2e6..b0555d7d8200 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
| @@ -1201,6 +1201,28 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
| 1201 | break; | 1201 | break; |
| 1202 | case S_IFDIR: | 1202 | case S_IFDIR: |
| 1203 | host_err = vfs_mkdir(dirp, dchild, iap->ia_mode); | 1203 | host_err = vfs_mkdir(dirp, dchild, iap->ia_mode); |
| 1204 | if (!host_err && unlikely(d_unhashed(dchild))) { | ||
| 1205 | struct dentry *d; | ||
| 1206 | d = lookup_one_len(dchild->d_name.name, | ||
| 1207 | dchild->d_parent, | ||
| 1208 | dchild->d_name.len); | ||
| 1209 | if (IS_ERR(d)) { | ||
| 1210 | host_err = PTR_ERR(d); | ||
| 1211 | break; | ||
| 1212 | } | ||
| 1213 | if (unlikely(d_is_negative(d))) { | ||
| 1214 | dput(d); | ||
| 1215 | err = nfserr_serverfault; | ||
| 1216 | goto out; | ||
| 1217 | } | ||
| 1218 | dput(resfhp->fh_dentry); | ||
| 1219 | resfhp->fh_dentry = dget(d); | ||
| 1220 | err = fh_update(resfhp); | ||
| 1221 | dput(dchild); | ||
| 1222 | dchild = d; | ||
| 1223 | if (err) | ||
| 1224 | goto out; | ||
| 1225 | } | ||
| 1204 | break; | 1226 | break; |
| 1205 | case S_IFCHR: | 1227 | case S_IFCHR: |
| 1206 | case S_IFBLK: | 1228 | case S_IFBLK: |
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1a2894aa0194..dd52d3f82e8d 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
| @@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) | |||
| 46 | int err = nilfs_add_link(dentry, inode); | 46 | int err = nilfs_add_link(dentry, inode); |
| 47 | 47 | ||
| 48 | if (!err) { | 48 | if (!err) { |
| 49 | d_instantiate(dentry, inode); | 49 | d_instantiate_new(dentry, inode); |
| 50 | unlock_new_inode(inode); | ||
| 51 | return 0; | 50 | return 0; |
| 52 | } | 51 | } |
| 53 | inode_dec_link_count(inode); | 52 | inode_dec_link_count(inode); |
| @@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 243 | goto out_fail; | 242 | goto out_fail; |
| 244 | 243 | ||
| 245 | nilfs_mark_inode_dirty(inode); | 244 | nilfs_mark_inode_dirty(inode); |
| 246 | d_instantiate(dentry, inode); | 245 | d_instantiate_new(dentry, inode); |
| 247 | unlock_new_inode(inode); | ||
| 248 | out: | 246 | out: |
| 249 | if (!err) | 247 | if (!err) |
| 250 | err = nilfs_transaction_commit(dir->i_sb); | 248 | err = nilfs_transaction_commit(dir->i_sb); |
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 91a8889abf9b..ea8c551bcd7e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
| @@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, | |||
| 570 | current_page, vec_len, vec_start); | 570 | current_page, vec_len, vec_start); |
| 571 | 571 | ||
| 572 | len = bio_add_page(bio, page, vec_len, vec_start); | 572 | len = bio_add_page(bio, page, vec_len, vec_start); |
| 573 | if (len != vec_len) { | 573 | if (len != vec_len) break; |
| 574 | mlog(ML_ERROR, "Adding page[%d] to bio failed, " | ||
| 575 | "page %p, len %d, vec_len %u, vec_start %u, " | ||
| 576 | "bi_sector %llu\n", current_page, page, len, | ||
| 577 | vec_len, vec_start, | ||
| 578 | (unsigned long long)bio->bi_iter.bi_sector); | ||
| 579 | bio_put(bio); | ||
| 580 | bio = ERR_PTR(-EIO); | ||
| 581 | return bio; | ||
| 582 | } | ||
| 583 | 574 | ||
| 584 | cs += vec_len / (PAGE_SIZE/spp); | 575 | cs += vec_len / (PAGE_SIZE/spp); |
| 585 | vec_start = 0; | 576 | vec_start = 0; |
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c index 6e3134e6d98a..1b5707c44c3f 100644 --- a/fs/orangefs/namei.c +++ b/fs/orangefs/namei.c | |||
| @@ -75,8 +75,7 @@ static int orangefs_create(struct inode *dir, | |||
| 75 | get_khandle_from_ino(inode), | 75 | get_khandle_from_ino(inode), |
| 76 | dentry); | 76 | dentry); |
| 77 | 77 | ||
| 78 | d_instantiate(dentry, inode); | 78 | d_instantiate_new(dentry, inode); |
| 79 | unlock_new_inode(inode); | ||
| 80 | orangefs_set_timeout(dentry); | 79 | orangefs_set_timeout(dentry); |
| 81 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; | 80 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; |
| 82 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; | 81 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; |
| @@ -332,8 +331,7 @@ static int orangefs_symlink(struct inode *dir, | |||
| 332 | "Assigned symlink inode new number of %pU\n", | 331 | "Assigned symlink inode new number of %pU\n", |
| 333 | get_khandle_from_ino(inode)); | 332 | get_khandle_from_ino(inode)); |
| 334 | 333 | ||
| 335 | d_instantiate(dentry, inode); | 334 | d_instantiate_new(dentry, inode); |
| 336 | unlock_new_inode(inode); | ||
| 337 | orangefs_set_timeout(dentry); | 335 | orangefs_set_timeout(dentry); |
| 338 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; | 336 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; |
| 339 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; | 337 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; |
| @@ -402,8 +400,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode | |||
| 402 | "Assigned dir inode new number of %pU\n", | 400 | "Assigned dir inode new number of %pU\n", |
| 403 | get_khandle_from_ino(inode)); | 401 | get_khandle_from_ino(inode)); |
| 404 | 402 | ||
| 405 | d_instantiate(dentry, inode); | 403 | d_instantiate_new(dentry, inode); |
| 406 | unlock_new_inode(inode); | ||
| 407 | orangefs_set_timeout(dentry); | 404 | orangefs_set_timeout(dentry); |
| 408 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; | 405 | ORANGEFS_I(inode)->getattr_time = jiffies - 1; |
| 409 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; | 406 | ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; |
diff --git a/fs/proc/array.c b/fs/proc/array.c index ae2c807fd719..72391b3f6927 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
| @@ -85,6 +85,7 @@ | |||
| 85 | #include <linux/delayacct.h> | 85 | #include <linux/delayacct.h> |
| 86 | #include <linux/seq_file.h> | 86 | #include <linux/seq_file.h> |
| 87 | #include <linux/pid_namespace.h> | 87 | #include <linux/pid_namespace.h> |
| 88 | #include <linux/prctl.h> | ||
| 88 | #include <linux/ptrace.h> | 89 | #include <linux/ptrace.h> |
| 89 | #include <linux/tracehook.h> | 90 | #include <linux/tracehook.h> |
| 90 | #include <linux/string_helpers.h> | 91 | #include <linux/string_helpers.h> |
| @@ -335,6 +336,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) | |||
| 335 | #ifdef CONFIG_SECCOMP | 336 | #ifdef CONFIG_SECCOMP |
| 336 | seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); | 337 | seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); |
| 337 | #endif | 338 | #endif |
| 339 | seq_printf(m, "\nSpeculation_Store_Bypass:\t"); | ||
| 340 | switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { | ||
| 341 | case -EINVAL: | ||
| 342 | seq_printf(m, "unknown"); | ||
| 343 | break; | ||
| 344 | case PR_SPEC_NOT_AFFECTED: | ||
| 345 | seq_printf(m, "not vulnerable"); | ||
| 346 | break; | ||
| 347 | case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: | ||
| 348 | seq_printf(m, "thread force mitigated"); | ||
| 349 | break; | ||
| 350 | case PR_SPEC_PRCTL | PR_SPEC_DISABLE: | ||
| 351 | seq_printf(m, "thread mitigated"); | ||
| 352 | break; | ||
| 353 | case PR_SPEC_PRCTL | PR_SPEC_ENABLE: | ||
| 354 | seq_printf(m, "thread vulnerable"); | ||
| 355 | break; | ||
| 356 | case PR_SPEC_DISABLE: | ||
| 357 | seq_printf(m, "globally mitigated"); | ||
| 358 | break; | ||
| 359 | default: | ||
| 360 | seq_printf(m, "vulnerable"); | ||
| 361 | break; | ||
| 362 | } | ||
| 338 | seq_putc(m, '\n'); | 363 | seq_putc(m, '\n'); |
| 339 | } | 364 | } |
| 340 | 365 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index 1b2ede6abcdf..1a76d751cf3c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -261,7 +261,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 261 | * Inherently racy -- command line shares address space | 261 | * Inherently racy -- command line shares address space |
| 262 | * with code and data. | 262 | * with code and data. |
| 263 | */ | 263 | */ |
| 264 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); | 264 | rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON); |
| 265 | if (rv <= 0) | 265 | if (rv <= 0) |
| 266 | goto out_free_page; | 266 | goto out_free_page; |
| 267 | 267 | ||
| @@ -279,7 +279,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 279 | int nr_read; | 279 | int nr_read; |
| 280 | 280 | ||
| 281 | _count = min3(count, len, PAGE_SIZE); | 281 | _count = min3(count, len, PAGE_SIZE); |
| 282 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 282 | nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); |
| 283 | if (nr_read < 0) | 283 | if (nr_read < 0) |
| 284 | rv = nr_read; | 284 | rv = nr_read; |
| 285 | if (nr_read <= 0) | 285 | if (nr_read <= 0) |
| @@ -325,7 +325,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, | |||
| 325 | bool final; | 325 | bool final; |
| 326 | 326 | ||
| 327 | _count = min3(count, len, PAGE_SIZE); | 327 | _count = min3(count, len, PAGE_SIZE); |
| 328 | nr_read = access_remote_vm(mm, p, page, _count, 0); | 328 | nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); |
| 329 | if (nr_read < 0) | 329 | if (nr_read < 0) |
| 330 | rv = nr_read; | 330 | rv = nr_read; |
| 331 | if (nr_read <= 0) | 331 | if (nr_read <= 0) |
| @@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
| 946 | max_len = min_t(size_t, PAGE_SIZE, count); | 946 | max_len = min_t(size_t, PAGE_SIZE, count); |
| 947 | this_len = min(max_len, this_len); | 947 | this_len = min(max_len, this_len); |
| 948 | 948 | ||
| 949 | retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); | 949 | retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON); |
| 950 | 950 | ||
| 951 | if (retval <= 0) { | 951 | if (retval <= 0) { |
| 952 | ret = retval; | 952 | ret = retval; |
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index bd39a998843d..5089dac02660 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c | |||
| @@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod | |||
| 687 | reiserfs_update_inode_transaction(inode); | 687 | reiserfs_update_inode_transaction(inode); |
| 688 | reiserfs_update_inode_transaction(dir); | 688 | reiserfs_update_inode_transaction(dir); |
| 689 | 689 | ||
| 690 | unlock_new_inode(inode); | 690 | d_instantiate_new(dentry, inode); |
| 691 | d_instantiate(dentry, inode); | ||
| 692 | retval = journal_end(&th); | 691 | retval = journal_end(&th); |
| 693 | 692 | ||
| 694 | out_failed: | 693 | out_failed: |
| @@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode | |||
| 771 | goto out_failed; | 770 | goto out_failed; |
| 772 | } | 771 | } |
| 773 | 772 | ||
| 774 | unlock_new_inode(inode); | 773 | d_instantiate_new(dentry, inode); |
| 775 | d_instantiate(dentry, inode); | ||
| 776 | retval = journal_end(&th); | 774 | retval = journal_end(&th); |
| 777 | 775 | ||
| 778 | out_failed: | 776 | out_failed: |
| @@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode | |||
| 871 | /* the above add_entry did not update dir's stat data */ | 869 | /* the above add_entry did not update dir's stat data */ |
| 872 | reiserfs_update_sd(&th, dir); | 870 | reiserfs_update_sd(&th, dir); |
| 873 | 871 | ||
| 874 | unlock_new_inode(inode); | 872 | d_instantiate_new(dentry, inode); |
| 875 | d_instantiate(dentry, inode); | ||
| 876 | retval = journal_end(&th); | 873 | retval = journal_end(&th); |
| 877 | out_failed: | 874 | out_failed: |
| 878 | reiserfs_write_unlock(dir->i_sb); | 875 | reiserfs_write_unlock(dir->i_sb); |
| @@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir, | |||
| 1187 | goto out_failed; | 1184 | goto out_failed; |
| 1188 | } | 1185 | } |
| 1189 | 1186 | ||
| 1190 | unlock_new_inode(inode); | 1187 | d_instantiate_new(dentry, inode); |
| 1191 | d_instantiate(dentry, inode); | ||
| 1192 | retval = journal_end(&th); | 1188 | retval = journal_end(&th); |
| 1193 | out_failed: | 1189 | out_failed: |
| 1194 | reiserfs_write_unlock(parent_dir->i_sb); | 1190 | reiserfs_write_unlock(parent_dir->i_sb); |
diff --git a/fs/seq_file.c b/fs/seq_file.c index c6c27f1f9c98..4cc090b50cc5 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
| @@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, | |||
| 709 | if (m->count + width >= m->size) | 709 | if (m->count + width >= m->size) |
| 710 | goto overflow; | 710 | goto overflow; |
| 711 | 711 | ||
| 712 | if (num < 10) { | ||
| 713 | m->buf[m->count++] = num + '0'; | ||
| 714 | return; | ||
| 715 | } | ||
| 716 | |||
| 717 | len = num_to_str(m->buf + m->count, m->size - m->count, num, width); | 712 | len = num_to_str(m->buf + m->count, m->size - m->count, num, width); |
| 718 | if (!len) | 713 | if (!len) |
| 719 | goto overflow; | 714 | goto overflow; |
diff --git a/fs/super.c b/fs/super.c index 122c402049a2..4b5b562176d0 100644 --- a/fs/super.c +++ b/fs/super.c | |||
| @@ -121,13 +121,23 @@ static unsigned long super_cache_count(struct shrinker *shrink, | |||
| 121 | sb = container_of(shrink, struct super_block, s_shrink); | 121 | sb = container_of(shrink, struct super_block, s_shrink); |
| 122 | 122 | ||
| 123 | /* | 123 | /* |
| 124 | * Don't call trylock_super as it is a potential | 124 | * We don't call trylock_super() here as it is a scalability bottleneck, |
| 125 | * scalability bottleneck. The counts could get updated | 125 | * so we're exposed to partial setup state. The shrinker rwsem does not |
| 126 | * between super_cache_count and super_cache_scan anyway. | 126 | * protect filesystem operations backing list_lru_shrink_count() or |
| 127 | * Call to super_cache_count with shrinker_rwsem held | 127 | * s_op->nr_cached_objects(). Counts can change between |
| 128 | * ensures the safety of call to list_lru_shrink_count() and | 128 | * super_cache_count and super_cache_scan, so we really don't need locks |
| 129 | * s_op->nr_cached_objects(). | 129 | * here. |
| 130 | * | ||
| 131 | * However, if we are currently mounting the superblock, the underlying | ||
| 132 | * filesystem might be in a state of partial construction and hence it | ||
| 133 | * is dangerous to access it. trylock_super() uses a SB_BORN check to | ||
| 134 | * avoid this situation, so do the same here. The memory barrier is | ||
| 135 | * matched with the one in mount_fs() as we don't hold locks here. | ||
| 130 | */ | 136 | */ |
| 137 | if (!(sb->s_flags & SB_BORN)) | ||
| 138 | return 0; | ||
| 139 | smp_rmb(); | ||
| 140 | |||
| 131 | if (sb->s_op && sb->s_op->nr_cached_objects) | 141 | if (sb->s_op && sb->s_op->nr_cached_objects) |
| 132 | total_objects = sb->s_op->nr_cached_objects(sb, sc); | 142 | total_objects = sb->s_op->nr_cached_objects(sb, sc); |
| 133 | 143 | ||
| @@ -1272,6 +1282,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) | |||
| 1272 | sb = root->d_sb; | 1282 | sb = root->d_sb; |
| 1273 | BUG_ON(!sb); | 1283 | BUG_ON(!sb); |
| 1274 | WARN_ON(!sb->s_bdi); | 1284 | WARN_ON(!sb->s_bdi); |
| 1285 | |||
| 1286 | /* | ||
| 1287 | * Write barrier is for super_cache_count(). We place it before setting | ||
| 1288 | * SB_BORN as the data dependency between the two functions is the | ||
| 1289 | * superblock structure contents that we just set up, not the SB_BORN | ||
| 1290 | * flag. | ||
| 1291 | */ | ||
| 1292 | smp_wmb(); | ||
| 1275 | sb->s_flags |= SB_BORN; | 1293 | sb->s_flags |= SB_BORN; |
| 1276 | 1294 | ||
| 1277 | error = security_sb_kern_mount(sb, flags, secdata); | 1295 | error = security_sb_kern_mount(sb, flags, secdata); |
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index b428d317ae92..92682fcc41f6 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c | |||
| @@ -25,7 +25,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, | |||
| 25 | { | 25 | { |
| 26 | struct dentry *root; | 26 | struct dentry *root; |
| 27 | void *ns; | 27 | void *ns; |
| 28 | bool new_sb; | 28 | bool new_sb = false; |
| 29 | 29 | ||
| 30 | if (!(flags & SB_KERNMOUNT)) { | 30 | if (!(flags & SB_KERNMOUNT)) { |
| 31 | if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET)) | 31 | if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET)) |
| @@ -35,9 +35,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, | |||
| 35 | ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); | 35 | ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); |
| 36 | root = kernfs_mount_ns(fs_type, flags, sysfs_root, | 36 | root = kernfs_mount_ns(fs_type, flags, sysfs_root, |
| 37 | SYSFS_MAGIC, &new_sb, ns); | 37 | SYSFS_MAGIC, &new_sb, ns); |
| 38 | if (IS_ERR(root) || !new_sb) | 38 | if (!new_sb) |
| 39 | kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); | 39 | kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); |
| 40 | else if (new_sb) | 40 | else if (!IS_ERR(root)) |
| 41 | root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE; | 41 | root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE; |
| 42 | 42 | ||
| 43 | return root; | 43 | return root; |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 0458dd47e105..c586026508db 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -622,8 +622,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode) | |||
| 622 | if (fibh.sbh != fibh.ebh) | 622 | if (fibh.sbh != fibh.ebh) |
| 623 | brelse(fibh.ebh); | 623 | brelse(fibh.ebh); |
| 624 | brelse(fibh.sbh); | 624 | brelse(fibh.sbh); |
| 625 | unlock_new_inode(inode); | 625 | d_instantiate_new(dentry, inode); |
| 626 | d_instantiate(dentry, inode); | ||
| 627 | 626 | ||
| 628 | return 0; | 627 | return 0; |
| 629 | } | 628 | } |
| @@ -733,8 +732,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
| 733 | inc_nlink(dir); | 732 | inc_nlink(dir); |
| 734 | dir->i_ctime = dir->i_mtime = current_time(dir); | 733 | dir->i_ctime = dir->i_mtime = current_time(dir); |
| 735 | mark_inode_dirty(dir); | 734 | mark_inode_dirty(dir); |
| 736 | unlock_new_inode(inode); | 735 | d_instantiate_new(dentry, inode); |
| 737 | d_instantiate(dentry, inode); | ||
| 738 | if (fibh.sbh != fibh.ebh) | 736 | if (fibh.sbh != fibh.ebh) |
| 739 | brelse(fibh.ebh); | 737 | brelse(fibh.ebh); |
| 740 | brelse(fibh.sbh); | 738 | brelse(fibh.sbh); |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 32545cd00ceb..d5f43ba76c59 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
| @@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) | |||
| 39 | { | 39 | { |
| 40 | int err = ufs_add_link(dentry, inode); | 40 | int err = ufs_add_link(dentry, inode); |
| 41 | if (!err) { | 41 | if (!err) { |
| 42 | unlock_new_inode(inode); | 42 | d_instantiate_new(dentry, inode); |
| 43 | d_instantiate(dentry, inode); | ||
| 44 | return 0; | 43 | return 0; |
| 45 | } | 44 | } |
| 46 | inode_dec_link_count(inode); | 45 | inode_dec_link_count(inode); |
| @@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
| 193 | if (err) | 192 | if (err) |
| 194 | goto out_fail; | 193 | goto out_fail; |
| 195 | 194 | ||
| 196 | unlock_new_inode(inode); | 195 | d_instantiate_new(dentry, inode); |
| 197 | d_instantiate(dentry, inode); | ||
| 198 | return 0; | 196 | return 0; |
| 199 | 197 | ||
| 200 | out_fail: | 198 | out_fail: |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7e61c395fddf..df36b1b08af0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -142,10 +142,11 @@ struct bpf_verifier_state_list { | |||
| 142 | struct bpf_insn_aux_data { | 142 | struct bpf_insn_aux_data { |
| 143 | union { | 143 | union { |
| 144 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | 144 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ |
| 145 | struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ | 145 | unsigned long map_state; /* pointer/poison value for maps */ |
| 146 | s32 call_imm; /* saved imm field of call insn */ | 146 | s32 call_imm; /* saved imm field of call insn */ |
| 147 | }; | 147 | }; |
| 148 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ | 148 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
| 149 | int sanitize_stack_off; /* stack slot to be cleared */ | ||
| 149 | bool seen; /* this insn was processed by the verifier */ | 150 | bool seen; /* this insn was processed by the verifier */ |
| 150 | }; | 151 | }; |
| 151 | 152 | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7b01bc11c692..a97a63eef59f 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev, | |||
| 53 | struct device_attribute *attr, char *buf); | 53 | struct device_attribute *attr, char *buf); |
| 54 | extern ssize_t cpu_show_spectre_v2(struct device *dev, | 54 | extern ssize_t cpu_show_spectre_v2(struct device *dev, |
| 55 | struct device_attribute *attr, char *buf); | 55 | struct device_attribute *attr, char *buf); |
| 56 | extern ssize_t cpu_show_spec_store_bypass(struct device *dev, | ||
| 57 | struct device_attribute *attr, char *buf); | ||
| 56 | 58 | ||
| 57 | extern __printf(4, 5) | 59 | extern __printf(4, 5) |
| 58 | struct device *cpu_device_create(struct device *parent, void *drvdata, | 60 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 94acbde17bb1..66c6e17e61e5 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -224,6 +224,7 @@ extern seqlock_t rename_lock; | |||
| 224 | * These are the low-level FS interfaces to the dcache.. | 224 | * These are the low-level FS interfaces to the dcache.. |
| 225 | */ | 225 | */ |
| 226 | extern void d_instantiate(struct dentry *, struct inode *); | 226 | extern void d_instantiate(struct dentry *, struct inode *); |
| 227 | extern void d_instantiate_new(struct dentry *, struct inode *); | ||
| 227 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); | 228 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); |
| 228 | extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); | 229 | extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); |
| 229 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); | 230 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); |
diff --git a/include/linux/efi.h b/include/linux/efi.h index f1b7d68ac460..3016d8c456bc 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -395,8 +395,8 @@ typedef struct { | |||
| 395 | u32 attributes; | 395 | u32 attributes; |
| 396 | u32 get_bar_attributes; | 396 | u32 get_bar_attributes; |
| 397 | u32 set_bar_attributes; | 397 | u32 set_bar_attributes; |
| 398 | uint64_t romsize; | 398 | u64 romsize; |
| 399 | void *romimage; | 399 | u32 romimage; |
| 400 | } efi_pci_io_protocol_32; | 400 | } efi_pci_io_protocol_32; |
| 401 | 401 | ||
| 402 | typedef struct { | 402 | typedef struct { |
| @@ -415,8 +415,8 @@ typedef struct { | |||
| 415 | u64 attributes; | 415 | u64 attributes; |
| 416 | u64 get_bar_attributes; | 416 | u64 get_bar_attributes; |
| 417 | u64 set_bar_attributes; | 417 | u64 set_bar_attributes; |
| 418 | uint64_t romsize; | 418 | u64 romsize; |
| 419 | void *romimage; | 419 | u64 romimage; |
| 420 | } efi_pci_io_protocol_64; | 420 | } efi_pci_io_protocol_64; |
| 421 | 421 | ||
| 422 | typedef struct { | 422 | typedef struct { |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 1a4582b44d32..fc5ab85278d5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -464,7 +464,7 @@ static inline struct page * | |||
| 464 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) | 464 | __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) |
| 465 | { | 465 | { |
| 466 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); | 466 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); |
| 467 | VM_WARN_ON(!node_online(nid)); | 467 | VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); |
| 468 | 468 | ||
| 469 | return __alloc_pages(gfp_mask, order, nid); | 469 | return __alloc_pages(gfp_mask, order, nid); |
| 470 | } | 470 | } |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6930c63126c7..6d6e79c59e68 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) | |||
| 1045 | 1045 | ||
| 1046 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | 1046 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 1047 | 1047 | ||
| 1048 | #ifdef CONFIG_S390 | 1048 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
| 1049 | #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... | ||
| 1050 | #elif defined(CONFIG_ARM64) | ||
| 1051 | #define KVM_MAX_IRQ_ROUTES 4096 | ||
| 1052 | #else | ||
| 1053 | #define KVM_MAX_IRQ_ROUTES 1024 | ||
| 1054 | #endif | ||
| 1055 | 1049 | ||
| 1056 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); | 1050 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
| 1057 | int kvm_set_irq_routing(struct kvm *kvm, | 1051 | int kvm_set_irq_routing(struct kvm *kvm, |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index e0e49b5b1ee1..2b0265265c28 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -216,6 +216,9 @@ void put_online_mems(void); | |||
| 216 | void mem_hotplug_begin(void); | 216 | void mem_hotplug_begin(void); |
| 217 | void mem_hotplug_done(void); | 217 | void mem_hotplug_done(void); |
| 218 | 218 | ||
| 219 | extern void set_zone_contiguous(struct zone *zone); | ||
| 220 | extern void clear_zone_contiguous(struct zone *zone); | ||
| 221 | |||
| 219 | #else /* ! CONFIG_MEMORY_HOTPLUG */ | 222 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
| 220 | #define pfn_to_online_page(pfn) \ | 223 | #define pfn_to_online_page(pfn) \ |
| 221 | ({ \ | 224 | ({ \ |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2a156c5dfadd..d703774982ca 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -1286,17 +1286,7 @@ enum { | |||
| 1286 | static inline const struct cpumask * | 1286 | static inline const struct cpumask * |
| 1287 | mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) | 1287 | mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) |
| 1288 | { | 1288 | { |
| 1289 | struct irq_desc *desc; | 1289 | return dev->priv.irq_info[vector].mask; |
| 1290 | unsigned int irq; | ||
| 1291 | int eqn; | ||
| 1292 | int err; | ||
| 1293 | |||
| 1294 | err = mlx5_vector2eqn(dev, vector, &eqn, &irq); | ||
| 1295 | if (err) | ||
| 1296 | return NULL; | ||
| 1297 | |||
| 1298 | desc = irq_to_desc(irq); | ||
| 1299 | return desc->affinity_hint; | ||
| 1300 | } | 1290 | } |
| 1301 | 1291 | ||
| 1302 | #endif /* MLX5_DRIVER_H */ | 1292 | #endif /* MLX5_DRIVER_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1ac1f06a4be6..02a616e2f17d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -2109,7 +2109,6 @@ extern void setup_per_cpu_pageset(void); | |||
| 2109 | 2109 | ||
| 2110 | extern void zone_pcp_update(struct zone *zone); | 2110 | extern void zone_pcp_update(struct zone *zone); |
| 2111 | extern void zone_pcp_reset(struct zone *zone); | 2111 | extern void zone_pcp_reset(struct zone *zone); |
| 2112 | extern void setup_zone_pageset(struct zone *zone); | ||
| 2113 | 2112 | ||
| 2114 | /* page_alloc.c */ | 2113 | /* page_alloc.c */ |
| 2115 | extern int min_free_kbytes; | 2114 | extern int min_free_kbytes; |
| @@ -2466,6 +2465,13 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, | |||
| 2466 | return VM_FAULT_NOPAGE; | 2465 | return VM_FAULT_NOPAGE; |
| 2467 | } | 2466 | } |
| 2468 | 2467 | ||
| 2468 | static inline vm_fault_t vmf_error(int err) | ||
| 2469 | { | ||
| 2470 | if (err == -ENOMEM) | ||
| 2471 | return VM_FAULT_OOM; | ||
| 2472 | return VM_FAULT_SIGBUS; | ||
| 2473 | } | ||
| 2474 | |||
| 2469 | struct page *follow_page_mask(struct vm_area_struct *vma, | 2475 | struct page *follow_page_mask(struct vm_area_struct *vma, |
| 2470 | unsigned long address, unsigned int foll_flags, | 2476 | unsigned long address, unsigned int foll_flags, |
| 2471 | unsigned int *page_mask); | 2477 | unsigned int *page_mask); |
| @@ -2493,6 +2499,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
| 2493 | #define FOLL_MLOCK 0x1000 /* lock present pages */ | 2499 | #define FOLL_MLOCK 0x1000 /* lock present pages */ |
| 2494 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ | 2500 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ |
| 2495 | #define FOLL_COW 0x4000 /* internal GUP flag */ | 2501 | #define FOLL_COW 0x4000 /* internal GUP flag */ |
| 2502 | #define FOLL_ANON 0x8000 /* don't do file mappings */ | ||
| 2496 | 2503 | ||
| 2497 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) | 2504 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) |
| 2498 | { | 2505 | { |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index b5b43f94f311..01b990e4b228 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
| @@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); | |||
| 312 | ({ \ | 312 | ({ \ |
| 313 | int i, ret = 1; \ | 313 | int i, ret = 1; \ |
| 314 | for (i = 0; i < map_words(map); i++) { \ | 314 | for (i = 0; i < map_words(map); i++) { \ |
| 315 | if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ | 315 | if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ |
| 316 | ret = 0; \ | 316 | ret = 0; \ |
| 317 | break; \ | 317 | break; \ |
| 318 | } \ | 318 | } \ |
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 5dad59b31244..17c919436f48 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h | |||
| @@ -867,12 +867,18 @@ struct nand_op_instr { | |||
| 867 | * tBERS (during an erase) which all of them are u64 values that cannot be | 867 | * tBERS (during an erase) which all of them are u64 values that cannot be |
| 868 | * divided by usual kernel macros and must be handled with the special | 868 | * divided by usual kernel macros and must be handled with the special |
| 869 | * DIV_ROUND_UP_ULL() macro. | 869 | * DIV_ROUND_UP_ULL() macro. |
| 870 | * | ||
| 871 | * Cast to type of dividend is needed here to guarantee that the result won't | ||
| 872 | * be an unsigned long long when the dividend is an unsigned long (or smaller), | ||
| 873 | * which is what the compiler does when it sees ternary operator with 2 | ||
| 874 | * different return types (picks the largest type to make sure there's no | ||
| 875 | * loss). | ||
| 870 | */ | 876 | */ |
| 871 | #define __DIVIDE(dividend, divisor) ({ \ | 877 | #define __DIVIDE(dividend, divisor) ({ \ |
| 872 | sizeof(dividend) == sizeof(u32) ? \ | 878 | (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \ |
| 873 | DIV_ROUND_UP(dividend, divisor) : \ | 879 | DIV_ROUND_UP(dividend, divisor) : \ |
| 874 | DIV_ROUND_UP_ULL(dividend, divisor); \ | 880 | DIV_ROUND_UP_ULL(dividend, divisor)); \ |
| 875 | }) | 881 | }) |
| 876 | #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) | 882 | #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) |
| 877 | #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) | 883 | #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) |
| 878 | 884 | ||
diff --git a/include/linux/node.h b/include/linux/node.h index 41f171861dcc..6d336e38d155 100644 --- a/include/linux/node.h +++ b/include/linux/node.h | |||
| @@ -32,9 +32,11 @@ extern struct node *node_devices[]; | |||
| 32 | typedef void (*node_registration_func_t)(struct node *); | 32 | typedef void (*node_registration_func_t)(struct node *); |
| 33 | 33 | ||
| 34 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) | 34 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) |
| 35 | extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages); | 35 | extern int link_mem_sections(int nid, unsigned long start_pfn, |
| 36 | unsigned long nr_pages, bool check_nid); | ||
| 36 | #else | 37 | #else |
| 37 | static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) | 38 | static inline int link_mem_sections(int nid, unsigned long start_pfn, |
| 39 | unsigned long nr_pages, bool check_nid) | ||
| 38 | { | 40 | { |
| 39 | return 0; | 41 | return 0; |
| 40 | } | 42 | } |
| @@ -57,7 +59,7 @@ static inline int register_one_node(int nid) | |||
| 57 | if (error) | 59 | if (error) |
| 58 | return error; | 60 | return error; |
| 59 | /* link memory sections under this node */ | 61 | /* link memory sections under this node */ |
| 60 | error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages); | 62 | error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true); |
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | return error; | 65 | return error; |
diff --git a/include/linux/nospec.h b/include/linux/nospec.h index e791ebc65c9c..0c5ef54fd416 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | #define _LINUX_NOSPEC_H | 7 | #define _LINUX_NOSPEC_H |
| 8 | #include <asm/barrier.h> | 8 | #include <asm/barrier.h> |
| 9 | 9 | ||
| 10 | struct task_struct; | ||
| 11 | |||
| 10 | /** | 12 | /** |
| 11 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise | 13 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise |
| 12 | * @index: array element index | 14 | * @index: array element index |
| @@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, | |||
| 55 | \ | 57 | \ |
| 56 | (typeof(_i)) (_i & _mask); \ | 58 | (typeof(_i)) (_i & _mask); \ |
| 57 | }) | 59 | }) |
| 60 | |||
| 61 | /* Speculation control prctl */ | ||
| 62 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); | ||
| 63 | int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, | ||
| 64 | unsigned long ctrl); | ||
| 65 | /* Speculation control for seccomp enforced mitigation */ | ||
| 66 | void arch_seccomp_spec_mitigate(struct task_struct *task); | ||
| 67 | |||
| 58 | #endif /* _LINUX_NOSPEC_H */ | 68 | #endif /* _LINUX_NOSPEC_H */ |
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index b1f37a89e368..79b99d653e03 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h | |||
| @@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, | |||
| 133 | lock_release(&sem->rw_sem.dep_map, 1, ip); | 133 | lock_release(&sem->rw_sem.dep_map, 1, ip); |
| 134 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | 134 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 135 | if (!read) | 135 | if (!read) |
| 136 | sem->rw_sem.owner = NULL; | 136 | sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; |
| 137 | #endif | 137 | #endif |
| 138 | } | 138 | } |
| 139 | 139 | ||
| @@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, | |||
| 141 | bool read, unsigned long ip) | 141 | bool read, unsigned long ip) |
| 142 | { | 142 | { |
| 143 | lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); | 143 | lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); |
| 144 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER | ||
| 145 | if (!read) | ||
| 146 | sem->rw_sem.owner = current; | ||
| 147 | #endif | ||
| 144 | } | 148 | } |
| 145 | 149 | ||
| 146 | #endif | 150 | #endif |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 56707d5ff6ad..ab93b6eae696 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
| @@ -44,6 +44,12 @@ struct rw_semaphore { | |||
| 44 | #endif | 44 | #endif |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | /* | ||
| 48 | * Setting bit 0 of the owner field with other non-zero bits will indicate | ||
| 49 | * that the rwsem is writer-owned with an unknown owner. | ||
| 50 | */ | ||
| 51 | #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) | ||
| 52 | |||
| 47 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | 53 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
| 48 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); | 54 | extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); |
| 49 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | 55 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index c2413703f45d..ca3f3eae8980 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1433,7 +1433,8 @@ static inline bool is_percpu_thread(void) | |||
| 1433 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ | 1433 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
| 1434 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ | 1434 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
| 1435 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ | 1435 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ |
| 1436 | 1436 | #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ | |
| 1437 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ | ||
| 1437 | 1438 | ||
| 1438 | #define TASK_PFA_TEST(name, func) \ | 1439 | #define TASK_PFA_TEST(name, func) \ |
| 1439 | static inline bool task_##func(struct task_struct *p) \ | 1440 | static inline bool task_##func(struct task_struct *p) \ |
| @@ -1458,6 +1459,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) | |||
| 1458 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) | 1459 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) |
| 1459 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) | 1460 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) |
| 1460 | 1461 | ||
| 1462 | TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) | ||
| 1463 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) | ||
| 1464 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) | ||
| 1465 | |||
| 1466 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) | ||
| 1467 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) | ||
| 1468 | |||
| 1461 | static inline void | 1469 | static inline void |
| 1462 | current_restore_flags(unsigned long orig_flags, unsigned long flags) | 1470 | current_restore_flags(unsigned long orig_flags, unsigned long flags) |
| 1463 | { | 1471 | { |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index c723a5c4e3ff..e5320f6c8654 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
| @@ -4,8 +4,9 @@ | |||
| 4 | 4 | ||
| 5 | #include <uapi/linux/seccomp.h> | 5 | #include <uapi/linux/seccomp.h> |
| 6 | 6 | ||
| 7 | #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ | 7 | #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ |
| 8 | SECCOMP_FILTER_FLAG_LOG) | 8 | SECCOMP_FILTER_FLAG_LOG | \ |
| 9 | SECCOMP_FILTER_FLAG_SPEC_ALLOW) | ||
| 9 | 10 | ||
| 10 | #ifdef CONFIG_SECCOMP | 11 | #ifdef CONFIG_SECCOMP |
| 11 | 12 | ||
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index cd368d1b8cb8..a1e28dd5d0bf 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
| @@ -170,6 +170,7 @@ struct nft_data_desc { | |||
| 170 | int nft_data_init(const struct nft_ctx *ctx, | 170 | int nft_data_init(const struct nft_ctx *ctx, |
| 171 | struct nft_data *data, unsigned int size, | 171 | struct nft_data *data, unsigned int size, |
| 172 | struct nft_data_desc *desc, const struct nlattr *nla); | 172 | struct nft_data_desc *desc, const struct nlattr *nla); |
| 173 | void nft_data_hold(const struct nft_data *data, enum nft_data_types type); | ||
| 173 | void nft_data_release(const struct nft_data *data, enum nft_data_types type); | 174 | void nft_data_release(const struct nft_data *data, enum nft_data_types type); |
| 174 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, | 175 | int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, |
| 175 | enum nft_data_types type, unsigned int len); | 176 | enum nft_data_types type, unsigned int len); |
| @@ -736,6 +737,10 @@ struct nft_expr_ops { | |||
| 736 | int (*init)(const struct nft_ctx *ctx, | 737 | int (*init)(const struct nft_ctx *ctx, |
| 737 | const struct nft_expr *expr, | 738 | const struct nft_expr *expr, |
| 738 | const struct nlattr * const tb[]); | 739 | const struct nlattr * const tb[]); |
| 740 | void (*activate)(const struct nft_ctx *ctx, | ||
| 741 | const struct nft_expr *expr); | ||
| 742 | void (*deactivate)(const struct nft_ctx *ctx, | ||
| 743 | const struct nft_expr *expr); | ||
| 739 | void (*destroy)(const struct nft_ctx *ctx, | 744 | void (*destroy)(const struct nft_ctx *ctx, |
| 740 | const struct nft_expr *expr); | 745 | const struct nft_expr *expr); |
| 741 | int (*dump)(struct sk_buff *skb, | 746 | int (*dump)(struct sk_buff *skb, |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 28b996d63490..35498e613ff5 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int); | |||
| 103 | /* | 103 | /* |
| 104 | * sctp/socket.c | 104 | * sctp/socket.c |
| 105 | */ | 105 | */ |
| 106 | int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, | ||
| 107 | int addr_len, int flags); | ||
| 106 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 108 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
| 107 | int sctp_inet_listen(struct socket *sock, int backlog); | 109 | int sctp_inet_listen(struct socket *sock, int backlog); |
| 108 | void sctp_write_space(struct sock *sk); | 110 | void sctp_write_space(struct sock *sk); |
diff --git a/include/net/tls.h b/include/net/tls.h index b400d0bb7448..f5fb16da3860 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
| @@ -97,6 +97,9 @@ struct tls_sw_context { | |||
| 97 | u8 control; | 97 | u8 control; |
| 98 | bool decrypted; | 98 | bool decrypted; |
| 99 | 99 | ||
| 100 | char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE]; | ||
| 101 | char rx_aad_plaintext[TLS_AAD_SPACE_SIZE]; | ||
| 102 | |||
| 100 | /* Sending context */ | 103 | /* Sending context */ |
| 101 | char aad_space[TLS_AAD_SPACE_SIZE]; | 104 | char aad_space[TLS_AAD_SPACE_SIZE]; |
| 102 | 105 | ||
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 23159dd5be18..a1fd63871d17 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
| @@ -48,7 +48,6 @@ struct ib_umem { | |||
| 48 | int writable; | 48 | int writable; |
| 49 | int hugetlb; | 49 | int hugetlb; |
| 50 | struct work_struct work; | 50 | struct work_struct work; |
| 51 | struct pid *pid; | ||
| 52 | struct mm_struct *mm; | 51 | struct mm_struct *mm; |
| 53 | unsigned long diff; | 52 | unsigned long diff; |
| 54 | struct ib_umem_odp *odp_data; | 53 | struct ib_umem_odp *odp_data; |
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 4a4201d997a7..095383a4bd1a 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h | |||
| @@ -411,13 +411,13 @@ static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs | |||
| 411 | static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, | 411 | static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, |
| 412 | u16 idx) | 412 | u16 idx) |
| 413 | { | 413 | { |
| 414 | struct ib_uobject *uobj = | 414 | const struct uverbs_attr *attr; |
| 415 | uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject; | ||
| 416 | 415 | ||
| 417 | if (IS_ERR(uobj)) | 416 | attr = uverbs_attr_get(attrs_bundle, idx); |
| 418 | return uobj; | 417 | if (IS_ERR(attr)) |
| 418 | return ERR_CAST(attr); | ||
| 419 | 419 | ||
| 420 | return uobj->object; | 420 | return attr->obj_attr.uobject->object; |
| 421 | } | 421 | } |
| 422 | 422 | ||
| 423 | static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, | 423 | static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index bc01e06bc716..0be866c91f62 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
| @@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio, | |||
| 435 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | 435 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 436 | __entry->pid = tsk->pid; | 436 | __entry->pid = tsk->pid; |
| 437 | __entry->oldprio = tsk->prio; | 437 | __entry->oldprio = tsk->prio; |
| 438 | __entry->newprio = pi_task ? pi_task->prio : tsk->prio; | 438 | __entry->newprio = pi_task ? |
| 439 | min(tsk->normal_prio, pi_task->prio) : | ||
| 440 | tsk->normal_prio; | ||
| 439 | /* XXX SCHED_DEADLINE bits missing */ | 441 | /* XXX SCHED_DEADLINE bits missing */ |
| 440 | ), | 442 | ), |
| 441 | 443 | ||
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 7dd8f34c37df..fdcf88bcf0ea 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h | |||
| @@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd, | |||
| 352 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); | 352 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); |
| 353 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); | 353 | DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); |
| 354 | 354 | ||
| 355 | TRACE_EVENT(xen_mmu_flush_tlb_all, | ||
| 356 | TP_PROTO(int x), | ||
| 357 | TP_ARGS(x), | ||
| 358 | TP_STRUCT__entry(__array(char, x, 0)), | ||
| 359 | TP_fast_assign((void)x), | ||
| 360 | TP_printk("%s", "") | ||
| 361 | ); | ||
| 362 | |||
| 363 | TRACE_EVENT(xen_mmu_flush_tlb, | ||
| 364 | TP_PROTO(int x), | ||
| 365 | TP_ARGS(x), | ||
| 366 | TP_STRUCT__entry(__array(char, x, 0)), | ||
| 367 | TP_fast_assign((void)x), | ||
| 368 | TP_printk("%s", "") | ||
| 369 | ); | ||
| 370 | |||
| 371 | TRACE_EVENT(xen_mmu_flush_tlb_one_user, | 355 | TRACE_EVENT(xen_mmu_flush_tlb_one_user, |
| 372 | TP_PROTO(unsigned long addr), | 356 | TP_PROTO(unsigned long addr), |
| 373 | TP_ARGS(addr), | 357 | TP_ARGS(addr), |
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h index 74b91151d494..bcba72def817 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h +++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h | |||
| @@ -46,6 +46,9 @@ enum tcp_conntrack { | |||
| 46 | /* Marks possibility for expected RFC5961 challenge ACK */ | 46 | /* Marks possibility for expected RFC5961 challenge ACK */ |
| 47 | #define IP_CT_EXP_CHALLENGE_ACK 0x40 | 47 | #define IP_CT_EXP_CHALLENGE_ACK 0x40 |
| 48 | 48 | ||
| 49 | /* Simultaneous open initialized */ | ||
| 50 | #define IP_CT_TCP_SIMULTANEOUS_OPEN 0x80 | ||
| 51 | |||
| 49 | struct nf_ct_tcp_flags { | 52 | struct nf_ct_tcp_flags { |
| 50 | __u8 flags; | 53 | __u8 flags; |
| 51 | __u8 mask; | 54 | __u8 mask; |
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 9c3630146cec..271b93783d28 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h | |||
| @@ -2698,7 +2698,7 @@ enum nl80211_attrs { | |||
| 2698 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS | 2698 | #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS |
| 2699 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS | 2699 | #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS |
| 2700 | 2700 | ||
| 2701 | #define NL80211_WIPHY_NAME_MAXLEN 128 | 2701 | #define NL80211_WIPHY_NAME_MAXLEN 64 |
| 2702 | 2702 | ||
| 2703 | #define NL80211_MAX_SUPP_RATES 32 | 2703 | #define NL80211_MAX_SUPP_RATES 32 |
| 2704 | #define NL80211_MAX_SUPP_HT_RATES 77 | 2704 | #define NL80211_MAX_SUPP_HT_RATES 77 |
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h index b19a9c249b15..784c2e3e572e 100644 --- a/include/uapi/linux/ppp-ioctl.h +++ b/include/uapi/linux/ppp-ioctl.h | |||
| @@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats { | |||
| 106 | #define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */ | 106 | #define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */ |
| 107 | #define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */ | 107 | #define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */ |
| 108 | #define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */ | 108 | #define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */ |
| 109 | #define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */ | 109 | #define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */ |
| 110 | #define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */ | 110 | #define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */ |
| 111 | #define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */ | 111 | #define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */ |
| 112 | #define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ | 112 | #define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ |
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index af5f8c2df87a..db9f15f5db04 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h | |||
| @@ -207,4 +207,16 @@ struct prctl_mm_map { | |||
| 207 | # define PR_SVE_VL_LEN_MASK 0xffff | 207 | # define PR_SVE_VL_LEN_MASK 0xffff |
| 208 | # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ | 208 | # define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ |
| 209 | 209 | ||
| 210 | /* Per task speculation control */ | ||
| 211 | #define PR_GET_SPECULATION_CTRL 52 | ||
| 212 | #define PR_SET_SPECULATION_CTRL 53 | ||
| 213 | /* Speculation control variants */ | ||
| 214 | # define PR_SPEC_STORE_BYPASS 0 | ||
| 215 | /* Return and control values for PR_SET/GET_SPECULATION_CTRL */ | ||
| 216 | # define PR_SPEC_NOT_AFFECTED 0 | ||
| 217 | # define PR_SPEC_PRCTL (1UL << 0) | ||
| 218 | # define PR_SPEC_ENABLE (1UL << 1) | ||
| 219 | # define PR_SPEC_DISABLE (1UL << 2) | ||
| 220 | # define PR_SPEC_FORCE_DISABLE (1UL << 3) | ||
| 221 | |||
| 210 | #endif /* _LINUX_PRCTL_H */ | 222 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 2a0bd9dd104d..9efc0e73d50b 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h | |||
| @@ -17,8 +17,9 @@ | |||
| 17 | #define SECCOMP_GET_ACTION_AVAIL 2 | 17 | #define SECCOMP_GET_ACTION_AVAIL 2 |
| 18 | 18 | ||
| 19 | /* Valid flags for SECCOMP_SET_MODE_FILTER */ | 19 | /* Valid flags for SECCOMP_SET_MODE_FILTER */ |
| 20 | #define SECCOMP_FILTER_FLAG_TSYNC 1 | 20 | #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) |
| 21 | #define SECCOMP_FILTER_FLAG_LOG 2 | 21 | #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) |
| 22 | #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) | ||
| 22 | 23 | ||
| 23 | /* | 24 | /* |
| 24 | * All BPF programs must return a 32-bit value. | 25 | * All BPF programs must return a 32-bit value. |
diff --git a/init/Kconfig b/init/Kconfig index f013afc74b11..18b151f0ddc1 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -738,7 +738,7 @@ config CFS_BANDWIDTH | |||
| 738 | tasks running within the fair group scheduler. Groups with no limit | 738 | tasks running within the fair group scheduler. Groups with no limit |
| 739 | set are considered to be unconstrained and will run with no | 739 | set are considered to be unconstrained and will run with no |
| 740 | restriction. | 740 | restriction. |
| 741 | See tip/Documentation/scheduler/sched-bwc.txt for more information. | 741 | See Documentation/scheduler/sched-bwc.txt for more information. |
| 742 | 742 | ||
| 743 | config RT_GROUP_SCHED | 743 | config RT_GROUP_SCHED |
| 744 | bool "Group scheduling for SCHED_RR/FIFO" | 744 | bool "Group scheduling for SCHED_RR/FIFO" |
diff --git a/init/main.c b/init/main.c index fd37315835b4..3b4ada11ed52 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -91,6 +91,7 @@ | |||
| 91 | #include <linux/cache.h> | 91 | #include <linux/cache.h> |
| 92 | #include <linux/rodata_test.h> | 92 | #include <linux/rodata_test.h> |
| 93 | #include <linux/jump_label.h> | 93 | #include <linux/jump_label.h> |
| 94 | #include <linux/mem_encrypt.h> | ||
| 94 | 95 | ||
| 95 | #include <asm/io.h> | 96 | #include <asm/io.h> |
| 96 | #include <asm/bugs.h> | 97 | #include <asm/bugs.h> |
| @@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, | |||
| 1363 | 1363 | ||
| 1364 | if (addr) { | 1364 | if (addr) { |
| 1365 | if (addr & (shmlba - 1)) { | 1365 | if (addr & (shmlba - 1)) { |
| 1366 | /* | 1366 | if (shmflg & SHM_RND) { |
| 1367 | * Round down to the nearest multiple of shmlba. | 1367 | addr &= ~(shmlba - 1); /* round down */ |
| 1368 | * For sane do_mmap_pgoff() parameters, avoid | 1368 | |
| 1369 | * round downs that trigger nil-page and MAP_FIXED. | 1369 | /* |
| 1370 | */ | 1370 | * Ensure that the round-down is non-nil |
| 1371 | if ((shmflg & SHM_RND) && addr >= shmlba) | 1371 | * when remapping. This can happen for |
| 1372 | addr &= ~(shmlba - 1); | 1372 | * cases when addr < shmlba. |
| 1373 | else | 1373 | */ |
| 1374 | if (!addr && (shmflg & SHM_REMAP)) | ||
| 1375 | goto out; | ||
| 1376 | } else | ||
| 1374 | #ifndef __ARCH_FORCE_SHMLBA | 1377 | #ifndef __ARCH_FORCE_SHMLBA |
| 1375 | if (addr & ~PAGE_MASK) | 1378 | if (addr & ~PAGE_MASK) |
| 1376 | #endif | 1379 | #endif |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba03ec39efb3..6ef6746a7871 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
| @@ -218,47 +218,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) | |||
| 218 | return 0; | 218 | return 0; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) | 221 | static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, |
| 222 | u32 curr, const bool probe_pass) | ||
| 222 | { | 223 | { |
| 224 | const s64 imm_min = S32_MIN, imm_max = S32_MAX; | ||
| 225 | s64 imm = insn->imm; | ||
| 226 | |||
| 227 | if (curr < pos && curr + imm + 1 > pos) | ||
| 228 | imm += delta; | ||
| 229 | else if (curr > pos + delta && curr + imm + 1 <= pos + delta) | ||
| 230 | imm -= delta; | ||
| 231 | if (imm < imm_min || imm > imm_max) | ||
| 232 | return -ERANGE; | ||
| 233 | if (!probe_pass) | ||
| 234 | insn->imm = imm; | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, | ||
| 239 | u32 curr, const bool probe_pass) | ||
| 240 | { | ||
| 241 | const s32 off_min = S16_MIN, off_max = S16_MAX; | ||
| 242 | s32 off = insn->off; | ||
| 243 | |||
| 244 | if (curr < pos && curr + off + 1 > pos) | ||
| 245 | off += delta; | ||
| 246 | else if (curr > pos + delta && curr + off + 1 <= pos + delta) | ||
| 247 | off -= delta; | ||
| 248 | if (off < off_min || off > off_max) | ||
| 249 | return -ERANGE; | ||
| 250 | if (!probe_pass) | ||
| 251 | insn->off = off; | ||
| 252 | return 0; | ||
| 253 | } | ||
| 254 | |||
| 255 | static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, | ||
| 256 | const bool probe_pass) | ||
| 257 | { | ||
| 258 | u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); | ||
| 223 | struct bpf_insn *insn = prog->insnsi; | 259 | struct bpf_insn *insn = prog->insnsi; |
| 224 | u32 i, insn_cnt = prog->len; | 260 | int ret = 0; |
| 225 | bool pseudo_call; | ||
| 226 | u8 code; | ||
| 227 | int off; | ||
| 228 | 261 | ||
| 229 | for (i = 0; i < insn_cnt; i++, insn++) { | 262 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 263 | u8 code; | ||
| 264 | |||
| 265 | /* In the probing pass we still operate on the original, | ||
| 266 | * unpatched image in order to check overflows before we | ||
| 267 | * do any other adjustments. Therefore skip the patchlet. | ||
| 268 | */ | ||
| 269 | if (probe_pass && i == pos) { | ||
| 270 | i += delta + 1; | ||
| 271 | insn++; | ||
| 272 | } | ||
| 230 | code = insn->code; | 273 | code = insn->code; |
| 231 | if (BPF_CLASS(code) != BPF_JMP) | 274 | if (BPF_CLASS(code) != BPF_JMP || |
| 232 | continue; | 275 | BPF_OP(code) == BPF_EXIT) |
| 233 | if (BPF_OP(code) == BPF_EXIT) | ||
| 234 | continue; | 276 | continue; |
| 277 | /* Adjust offset of jmps if we cross patch boundaries. */ | ||
| 235 | if (BPF_OP(code) == BPF_CALL) { | 278 | if (BPF_OP(code) == BPF_CALL) { |
| 236 | if (insn->src_reg == BPF_PSEUDO_CALL) | 279 | if (insn->src_reg != BPF_PSEUDO_CALL) |
| 237 | pseudo_call = true; | ||
| 238 | else | ||
| 239 | continue; | 280 | continue; |
| 281 | ret = bpf_adj_delta_to_imm(insn, pos, delta, i, | ||
| 282 | probe_pass); | ||
| 240 | } else { | 283 | } else { |
| 241 | pseudo_call = false; | 284 | ret = bpf_adj_delta_to_off(insn, pos, delta, i, |
| 285 | probe_pass); | ||
| 242 | } | 286 | } |
| 243 | off = pseudo_call ? insn->imm : insn->off; | 287 | if (ret) |
| 244 | 288 | break; | |
| 245 | /* Adjust offset of jmps if we cross boundaries. */ | ||
| 246 | if (i < pos && i + off + 1 > pos) | ||
| 247 | off += delta; | ||
| 248 | else if (i > pos + delta && i + off + 1 <= pos + delta) | ||
| 249 | off -= delta; | ||
| 250 | |||
| 251 | if (pseudo_call) | ||
| 252 | insn->imm = off; | ||
| 253 | else | ||
| 254 | insn->off = off; | ||
| 255 | } | 289 | } |
| 290 | |||
| 291 | return ret; | ||
| 256 | } | 292 | } |
| 257 | 293 | ||
| 258 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 294 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
| 259 | const struct bpf_insn *patch, u32 len) | 295 | const struct bpf_insn *patch, u32 len) |
| 260 | { | 296 | { |
| 261 | u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; | 297 | u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; |
| 298 | const u32 cnt_max = S16_MAX; | ||
| 262 | struct bpf_prog *prog_adj; | 299 | struct bpf_prog *prog_adj; |
| 263 | 300 | ||
| 264 | /* Since our patchlet doesn't expand the image, we're done. */ | 301 | /* Since our patchlet doesn't expand the image, we're done. */ |
| @@ -269,6 +306,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | |||
| 269 | 306 | ||
| 270 | insn_adj_cnt = prog->len + insn_delta; | 307 | insn_adj_cnt = prog->len + insn_delta; |
| 271 | 308 | ||
| 309 | /* Reject anything that would potentially let the insn->off | ||
| 310 | * target overflow when we have excessive program expansions. | ||
| 311 | * We need to probe here before we do any reallocation where | ||
| 312 | * we afterwards may not fail anymore. | ||
| 313 | */ | ||
| 314 | if (insn_adj_cnt > cnt_max && | ||
| 315 | bpf_adj_branches(prog, off, insn_delta, true)) | ||
| 316 | return NULL; | ||
| 317 | |||
| 272 | /* Several new instructions need to be inserted. Make room | 318 | /* Several new instructions need to be inserted. Make room |
| 273 | * for them. Likely, there's no need for a new allocation as | 319 | * for them. Likely, there's no need for a new allocation as |
| 274 | * last page could have large enough tailroom. | 320 | * last page could have large enough tailroom. |
| @@ -294,7 +340,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | |||
| 294 | sizeof(*patch) * insn_rest); | 340 | sizeof(*patch) * insn_rest); |
| 295 | memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); | 341 | memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); |
| 296 | 342 | ||
| 297 | bpf_adj_branches(prog_adj, off, insn_delta); | 343 | /* We are guaranteed to not fail at this point, otherwise |
| 344 | * the ship has sailed to reverse to the original state. An | ||
| 345 | * overflow cannot happen at this point. | ||
| 346 | */ | ||
| 347 | BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); | ||
| 298 | 348 | ||
| 299 | return prog_adj; | 349 | return prog_adj; |
| 300 | } | 350 | } |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 098eca568c2b..95a84b2f10ce 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -1703,11 +1703,11 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 1703 | * we increment the refcnt. If this is the case abort with an | 1703 | * we increment the refcnt. If this is the case abort with an |
| 1704 | * error. | 1704 | * error. |
| 1705 | */ | 1705 | */ |
| 1706 | verdict = bpf_prog_inc_not_zero(stab->bpf_verdict); | 1706 | verdict = bpf_prog_inc_not_zero(verdict); |
| 1707 | if (IS_ERR(verdict)) | 1707 | if (IS_ERR(verdict)) |
| 1708 | return PTR_ERR(verdict); | 1708 | return PTR_ERR(verdict); |
| 1709 | 1709 | ||
| 1710 | parse = bpf_prog_inc_not_zero(stab->bpf_parse); | 1710 | parse = bpf_prog_inc_not_zero(parse); |
| 1711 | if (IS_ERR(parse)) { | 1711 | if (IS_ERR(parse)) { |
| 1712 | bpf_prog_put(verdict); | 1712 | bpf_prog_put(verdict); |
| 1713 | return PTR_ERR(parse); | 1713 | return PTR_ERR(parse); |
| @@ -1715,12 +1715,12 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 1715 | } | 1715 | } |
| 1716 | 1716 | ||
| 1717 | if (tx_msg) { | 1717 | if (tx_msg) { |
| 1718 | tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg); | 1718 | tx_msg = bpf_prog_inc_not_zero(tx_msg); |
| 1719 | if (IS_ERR(tx_msg)) { | 1719 | if (IS_ERR(tx_msg)) { |
| 1720 | if (verdict) | 1720 | if (parse && verdict) { |
| 1721 | bpf_prog_put(verdict); | ||
| 1722 | if (parse) | ||
| 1723 | bpf_prog_put(parse); | 1721 | bpf_prog_put(parse); |
| 1722 | bpf_prog_put(verdict); | ||
| 1723 | } | ||
| 1724 | return PTR_ERR(tx_msg); | 1724 | return PTR_ERR(tx_msg); |
| 1725 | } | 1725 | } |
| 1726 | } | 1726 | } |
| @@ -1805,10 +1805,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, | |||
| 1805 | out_free: | 1805 | out_free: |
| 1806 | smap_release_sock(psock, sock); | 1806 | smap_release_sock(psock, sock); |
| 1807 | out_progs: | 1807 | out_progs: |
| 1808 | if (verdict) | 1808 | if (parse && verdict) { |
| 1809 | bpf_prog_put(verdict); | ||
| 1810 | if (parse) | ||
| 1811 | bpf_prog_put(parse); | 1809 | bpf_prog_put(parse); |
| 1810 | bpf_prog_put(verdict); | ||
| 1811 | } | ||
| 1812 | if (tx_msg) | 1812 | if (tx_msg) |
| 1813 | bpf_prog_put(tx_msg); | 1813 | bpf_prog_put(tx_msg); |
| 1814 | write_unlock_bh(&sock->sk_callback_lock); | 1814 | write_unlock_bh(&sock->sk_callback_lock); |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5dd1dcb902bf..1904e814f282 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem { | |||
| 156 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 | 156 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 |
| 157 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 | 157 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 |
| 158 | 158 | ||
| 159 | #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) | 159 | #define BPF_MAP_PTR_UNPRIV 1UL |
| 160 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ | ||
| 161 | POISON_POINTER_DELTA)) | ||
| 162 | #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) | ||
| 163 | |||
| 164 | static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) | ||
| 165 | { | ||
| 166 | return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; | ||
| 167 | } | ||
| 168 | |||
| 169 | static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) | ||
| 170 | { | ||
| 171 | return aux->map_state & BPF_MAP_PTR_UNPRIV; | ||
| 172 | } | ||
| 173 | |||
| 174 | static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, | ||
| 175 | const struct bpf_map *map, bool unpriv) | ||
| 176 | { | ||
| 177 | BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); | ||
| 178 | unpriv |= bpf_map_ptr_unpriv(aux); | ||
| 179 | aux->map_state = (unsigned long)map | | ||
| 180 | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); | ||
| 181 | } | ||
| 160 | 182 | ||
| 161 | struct bpf_call_arg_meta { | 183 | struct bpf_call_arg_meta { |
| 162 | struct bpf_map *map_ptr; | 184 | struct bpf_map *map_ptr; |
| @@ -978,7 +1000,7 @@ static bool register_is_null(struct bpf_reg_state *reg) | |||
| 978 | */ | 1000 | */ |
| 979 | static int check_stack_write(struct bpf_verifier_env *env, | 1001 | static int check_stack_write(struct bpf_verifier_env *env, |
| 980 | struct bpf_func_state *state, /* func where register points to */ | 1002 | struct bpf_func_state *state, /* func where register points to */ |
| 981 | int off, int size, int value_regno) | 1003 | int off, int size, int value_regno, int insn_idx) |
| 982 | { | 1004 | { |
| 983 | struct bpf_func_state *cur; /* state of the current function */ | 1005 | struct bpf_func_state *cur; /* state of the current function */ |
| 984 | int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; | 1006 | int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; |
| @@ -1017,8 +1039,33 @@ static int check_stack_write(struct bpf_verifier_env *env, | |||
| 1017 | state->stack[spi].spilled_ptr = cur->regs[value_regno]; | 1039 | state->stack[spi].spilled_ptr = cur->regs[value_regno]; |
| 1018 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; | 1040 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 1019 | 1041 | ||
| 1020 | for (i = 0; i < BPF_REG_SIZE; i++) | 1042 | for (i = 0; i < BPF_REG_SIZE; i++) { |
| 1043 | if (state->stack[spi].slot_type[i] == STACK_MISC && | ||
| 1044 | !env->allow_ptr_leaks) { | ||
| 1045 | int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; | ||
| 1046 | int soff = (-spi - 1) * BPF_REG_SIZE; | ||
| 1047 | |||
| 1048 | /* detected reuse of integer stack slot with a pointer | ||
| 1049 | * which means either llvm is reusing stack slot or | ||
| 1050 | * an attacker is trying to exploit CVE-2018-3639 | ||
| 1051 | * (speculative store bypass) | ||
| 1052 | * Have to sanitize that slot with preemptive | ||
| 1053 | * store of zero. | ||
| 1054 | */ | ||
| 1055 | if (*poff && *poff != soff) { | ||
| 1056 | /* disallow programs where single insn stores | ||
| 1057 | * into two different stack slots, since verifier | ||
| 1058 | * cannot sanitize them | ||
| 1059 | */ | ||
| 1060 | verbose(env, | ||
| 1061 | "insn %d cannot access two stack slots fp%d and fp%d", | ||
| 1062 | insn_idx, *poff, soff); | ||
| 1063 | return -EINVAL; | ||
| 1064 | } | ||
| 1065 | *poff = soff; | ||
| 1066 | } | ||
| 1021 | state->stack[spi].slot_type[i] = STACK_SPILL; | 1067 | state->stack[spi].slot_type[i] = STACK_SPILL; |
| 1068 | } | ||
| 1022 | } else { | 1069 | } else { |
| 1023 | u8 type = STACK_MISC; | 1070 | u8 type = STACK_MISC; |
| 1024 | 1071 | ||
| @@ -1694,7 +1741,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
| 1694 | 1741 | ||
| 1695 | if (t == BPF_WRITE) | 1742 | if (t == BPF_WRITE) |
| 1696 | err = check_stack_write(env, state, off, size, | 1743 | err = check_stack_write(env, state, off, size, |
| 1697 | value_regno); | 1744 | value_regno, insn_idx); |
| 1698 | else | 1745 | else |
| 1699 | err = check_stack_read(env, state, off, size, | 1746 | err = check_stack_read(env, state, off, size, |
| 1700 | value_regno); | 1747 | value_regno); |
| @@ -2333,6 +2380,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) | |||
| 2333 | return 0; | 2380 | return 0; |
| 2334 | } | 2381 | } |
| 2335 | 2382 | ||
| 2383 | static int | ||
| 2384 | record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, | ||
| 2385 | int func_id, int insn_idx) | ||
| 2386 | { | ||
| 2387 | struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; | ||
| 2388 | |||
| 2389 | if (func_id != BPF_FUNC_tail_call && | ||
| 2390 | func_id != BPF_FUNC_map_lookup_elem) | ||
| 2391 | return 0; | ||
| 2392 | if (meta->map_ptr == NULL) { | ||
| 2393 | verbose(env, "kernel subsystem misconfigured verifier\n"); | ||
| 2394 | return -EINVAL; | ||
| 2395 | } | ||
| 2396 | |||
| 2397 | if (!BPF_MAP_PTR(aux->map_state)) | ||
| 2398 | bpf_map_ptr_store(aux, meta->map_ptr, | ||
| 2399 | meta->map_ptr->unpriv_array); | ||
| 2400 | else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) | ||
| 2401 | bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, | ||
| 2402 | meta->map_ptr->unpriv_array); | ||
| 2403 | return 0; | ||
| 2404 | } | ||
| 2405 | |||
| 2336 | static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) | 2406 | static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) |
| 2337 | { | 2407 | { |
| 2338 | const struct bpf_func_proto *fn = NULL; | 2408 | const struct bpf_func_proto *fn = NULL; |
| @@ -2387,13 +2457,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 2387 | err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); | 2457 | err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); |
| 2388 | if (err) | 2458 | if (err) |
| 2389 | return err; | 2459 | return err; |
| 2390 | if (func_id == BPF_FUNC_tail_call) { | ||
| 2391 | if (meta.map_ptr == NULL) { | ||
| 2392 | verbose(env, "verifier bug\n"); | ||
| 2393 | return -EINVAL; | ||
| 2394 | } | ||
| 2395 | env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr; | ||
| 2396 | } | ||
| 2397 | err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); | 2460 | err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); |
| 2398 | if (err) | 2461 | if (err) |
| 2399 | return err; | 2462 | return err; |
| @@ -2404,6 +2467,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 2404 | if (err) | 2467 | if (err) |
| 2405 | return err; | 2468 | return err; |
| 2406 | 2469 | ||
| 2470 | err = record_func_map(env, &meta, func_id, insn_idx); | ||
| 2471 | if (err) | ||
| 2472 | return err; | ||
| 2473 | |||
| 2407 | /* Mark slots with STACK_MISC in case of raw mode, stack offset | 2474 | /* Mark slots with STACK_MISC in case of raw mode, stack offset |
| 2408 | * is inferred from register state. | 2475 | * is inferred from register state. |
| 2409 | */ | 2476 | */ |
| @@ -2428,8 +2495,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 2428 | } else if (fn->ret_type == RET_VOID) { | 2495 | } else if (fn->ret_type == RET_VOID) { |
| 2429 | regs[BPF_REG_0].type = NOT_INIT; | 2496 | regs[BPF_REG_0].type = NOT_INIT; |
| 2430 | } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { | 2497 | } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { |
| 2431 | struct bpf_insn_aux_data *insn_aux; | ||
| 2432 | |||
| 2433 | regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; | 2498 | regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; |
| 2434 | /* There is no offset yet applied, variable or fixed */ | 2499 | /* There is no offset yet applied, variable or fixed */ |
| 2435 | mark_reg_known_zero(env, regs, BPF_REG_0); | 2500 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| @@ -2445,11 +2510,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 2445 | } | 2510 | } |
| 2446 | regs[BPF_REG_0].map_ptr = meta.map_ptr; | 2511 | regs[BPF_REG_0].map_ptr = meta.map_ptr; |
| 2447 | regs[BPF_REG_0].id = ++env->id_gen; | 2512 | regs[BPF_REG_0].id = ++env->id_gen; |
| 2448 | insn_aux = &env->insn_aux_data[insn_idx]; | ||
| 2449 | if (!insn_aux->map_ptr) | ||
| 2450 | insn_aux->map_ptr = meta.map_ptr; | ||
| 2451 | else if (insn_aux->map_ptr != meta.map_ptr) | ||
| 2452 | insn_aux->map_ptr = BPF_MAP_PTR_POISON; | ||
| 2453 | } else { | 2513 | } else { |
| 2454 | verbose(env, "unknown return type %d of func %s#%d\n", | 2514 | verbose(env, "unknown return type %d of func %s#%d\n", |
| 2455 | fn->ret_type, func_id_name(func_id), func_id); | 2515 | fn->ret_type, func_id_name(func_id), func_id); |
| @@ -5169,6 +5229,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) | |||
| 5169 | else | 5229 | else |
| 5170 | continue; | 5230 | continue; |
| 5171 | 5231 | ||
| 5232 | if (type == BPF_WRITE && | ||
| 5233 | env->insn_aux_data[i + delta].sanitize_stack_off) { | ||
| 5234 | struct bpf_insn patch[] = { | ||
| 5235 | /* Sanitize suspicious stack slot with zero. | ||
| 5236 | * There are no memory dependencies for this store, | ||
| 5237 | * since it's only using frame pointer and immediate | ||
| 5238 | * constant of zero | ||
| 5239 | */ | ||
| 5240 | BPF_ST_MEM(BPF_DW, BPF_REG_FP, | ||
| 5241 | env->insn_aux_data[i + delta].sanitize_stack_off, | ||
| 5242 | 0), | ||
| 5243 | /* the original STX instruction will immediately | ||
| 5244 | * overwrite the same stack slot with appropriate value | ||
| 5245 | */ | ||
| 5246 | *insn, | ||
| 5247 | }; | ||
| 5248 | |||
| 5249 | cnt = ARRAY_SIZE(patch); | ||
| 5250 | new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); | ||
| 5251 | if (!new_prog) | ||
| 5252 | return -ENOMEM; | ||
| 5253 | |||
| 5254 | delta += cnt - 1; | ||
| 5255 | env->prog = new_prog; | ||
| 5256 | insn = new_prog->insnsi + i + delta; | ||
| 5257 | continue; | ||
| 5258 | } | ||
| 5259 | |||
| 5172 | if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) | 5260 | if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) |
| 5173 | continue; | 5261 | continue; |
| 5174 | 5262 | ||
| @@ -5417,6 +5505,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
| 5417 | struct bpf_insn *insn = prog->insnsi; | 5505 | struct bpf_insn *insn = prog->insnsi; |
| 5418 | const struct bpf_func_proto *fn; | 5506 | const struct bpf_func_proto *fn; |
| 5419 | const int insn_cnt = prog->len; | 5507 | const int insn_cnt = prog->len; |
| 5508 | struct bpf_insn_aux_data *aux; | ||
| 5420 | struct bpf_insn insn_buf[16]; | 5509 | struct bpf_insn insn_buf[16]; |
| 5421 | struct bpf_prog *new_prog; | 5510 | struct bpf_prog *new_prog; |
| 5422 | struct bpf_map *map_ptr; | 5511 | struct bpf_map *map_ptr; |
| @@ -5491,19 +5580,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
| 5491 | insn->imm = 0; | 5580 | insn->imm = 0; |
| 5492 | insn->code = BPF_JMP | BPF_TAIL_CALL; | 5581 | insn->code = BPF_JMP | BPF_TAIL_CALL; |
| 5493 | 5582 | ||
| 5583 | aux = &env->insn_aux_data[i + delta]; | ||
| 5584 | if (!bpf_map_ptr_unpriv(aux)) | ||
| 5585 | continue; | ||
| 5586 | |||
| 5494 | /* instead of changing every JIT dealing with tail_call | 5587 | /* instead of changing every JIT dealing with tail_call |
| 5495 | * emit two extra insns: | 5588 | * emit two extra insns: |
| 5496 | * if (index >= max_entries) goto out; | 5589 | * if (index >= max_entries) goto out; |
| 5497 | * index &= array->index_mask; | 5590 | * index &= array->index_mask; |
| 5498 | * to avoid out-of-bounds cpu speculation | 5591 | * to avoid out-of-bounds cpu speculation |
| 5499 | */ | 5592 | */ |
| 5500 | map_ptr = env->insn_aux_data[i + delta].map_ptr; | 5593 | if (bpf_map_ptr_poisoned(aux)) { |
| 5501 | if (map_ptr == BPF_MAP_PTR_POISON) { | ||
| 5502 | verbose(env, "tail_call abusing map_ptr\n"); | 5594 | verbose(env, "tail_call abusing map_ptr\n"); |
| 5503 | return -EINVAL; | 5595 | return -EINVAL; |
| 5504 | } | 5596 | } |
| 5505 | if (!map_ptr->unpriv_array) | 5597 | |
| 5506 | continue; | 5598 | map_ptr = BPF_MAP_PTR(aux->map_state); |
| 5507 | insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, | 5599 | insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, |
| 5508 | map_ptr->max_entries, 2); | 5600 | map_ptr->max_entries, 2); |
| 5509 | insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, | 5601 | insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, |
| @@ -5527,9 +5619,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
| 5527 | */ | 5619 | */ |
| 5528 | if (prog->jit_requested && BITS_PER_LONG == 64 && | 5620 | if (prog->jit_requested && BITS_PER_LONG == 64 && |
| 5529 | insn->imm == BPF_FUNC_map_lookup_elem) { | 5621 | insn->imm == BPF_FUNC_map_lookup_elem) { |
| 5530 | map_ptr = env->insn_aux_data[i + delta].map_ptr; | 5622 | aux = &env->insn_aux_data[i + delta]; |
| 5531 | if (map_ptr == BPF_MAP_PTR_POISON || | 5623 | if (bpf_map_ptr_poisoned(aux)) |
| 5532 | !map_ptr->ops->map_gen_lookup) | 5624 | goto patch_call_imm; |
| 5625 | |||
| 5626 | map_ptr = BPF_MAP_PTR(aux->map_state); | ||
| 5627 | if (!map_ptr->ops->map_gen_lookup) | ||
| 5533 | goto patch_call_imm; | 5628 | goto patch_call_imm; |
| 5534 | 5629 | ||
| 5535 | cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); | 5630 | cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 2017a39ab490..481951bf091d 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -193,7 +193,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme); | |||
| 193 | 193 | ||
| 194 | void kthread_park_complete(struct task_struct *k) | 194 | void kthread_park_complete(struct task_struct *k) |
| 195 | { | 195 | { |
| 196 | complete(&to_kthread(k)->parked); | 196 | complete_all(&to_kthread(k)->parked); |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static int kthread(void *_create) | 199 | static int kthread(void *_create) |
| @@ -459,6 +459,7 @@ void kthread_unpark(struct task_struct *k) | |||
| 459 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | 459 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) |
| 460 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | 460 | __kthread_bind(k, kthread->cpu, TASK_PARKED); |
| 461 | 461 | ||
| 462 | reinit_completion(&kthread->parked); | ||
| 462 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 463 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
| 463 | wake_up_state(k, TASK_PARKED); | 464 | wake_up_state(k, TASK_PARKED); |
| 464 | } | 465 | } |
| @@ -483,9 +484,6 @@ int kthread_park(struct task_struct *k) | |||
| 483 | if (WARN_ON(k->flags & PF_EXITING)) | 484 | if (WARN_ON(k->flags & PF_EXITING)) |
| 484 | return -ENOSYS; | 485 | return -ENOSYS; |
| 485 | 486 | ||
| 486 | if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) | ||
| 487 | return -EBUSY; | ||
| 488 | |||
| 489 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 487 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
| 490 | if (k != current) { | 488 | if (k != current) { |
| 491 | wake_up_process(k); | 489 | wake_up_process(k); |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index e795908f3607..a90336779375 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -352,16 +352,15 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |||
| 352 | struct task_struct *owner; | 352 | struct task_struct *owner; |
| 353 | bool ret = true; | 353 | bool ret = true; |
| 354 | 354 | ||
| 355 | BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN)); | ||
| 356 | |||
| 355 | if (need_resched()) | 357 | if (need_resched()) |
| 356 | return false; | 358 | return false; |
| 357 | 359 | ||
| 358 | rcu_read_lock(); | 360 | rcu_read_lock(); |
| 359 | owner = READ_ONCE(sem->owner); | 361 | owner = READ_ONCE(sem->owner); |
| 360 | if (!rwsem_owner_is_writer(owner)) { | 362 | if (!owner || !is_rwsem_owner_spinnable(owner)) { |
| 361 | /* | 363 | ret = !owner; /* !owner is spinnable */ |
| 362 | * Don't spin if the rwsem is readers owned. | ||
| 363 | */ | ||
| 364 | ret = !rwsem_owner_is_reader(owner); | ||
| 365 | goto done; | 364 | goto done; |
| 366 | } | 365 | } |
| 367 | 366 | ||
| @@ -382,11 +381,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) | |||
| 382 | { | 381 | { |
| 383 | struct task_struct *owner = READ_ONCE(sem->owner); | 382 | struct task_struct *owner = READ_ONCE(sem->owner); |
| 384 | 383 | ||
| 385 | if (!rwsem_owner_is_writer(owner)) | 384 | if (!is_rwsem_owner_spinnable(owner)) |
| 386 | goto out; | 385 | return false; |
| 387 | 386 | ||
| 388 | rcu_read_lock(); | 387 | rcu_read_lock(); |
| 389 | while (sem->owner == owner) { | 388 | while (owner && (READ_ONCE(sem->owner) == owner)) { |
| 390 | /* | 389 | /* |
| 391 | * Ensure we emit the owner->on_cpu, dereference _after_ | 390 | * Ensure we emit the owner->on_cpu, dereference _after_ |
| 392 | * checking sem->owner still matches owner, if that fails, | 391 | * checking sem->owner still matches owner, if that fails, |
| @@ -408,12 +407,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) | |||
| 408 | cpu_relax(); | 407 | cpu_relax(); |
| 409 | } | 408 | } |
| 410 | rcu_read_unlock(); | 409 | rcu_read_unlock(); |
| 411 | out: | 410 | |
| 412 | /* | 411 | /* |
| 413 | * If there is a new owner or the owner is not set, we continue | 412 | * If there is a new owner or the owner is not set, we continue |
| 414 | * spinning. | 413 | * spinning. |
| 415 | */ | 414 | */ |
| 416 | return !rwsem_owner_is_reader(READ_ONCE(sem->owner)); | 415 | return is_rwsem_owner_spinnable(READ_ONCE(sem->owner)); |
| 417 | } | 416 | } |
| 418 | 417 | ||
| 419 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) | 418 | static bool rwsem_optimistic_spin(struct rw_semaphore *sem) |
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 30465a2f2b6c..bc1e507be9ff 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c | |||
| @@ -221,5 +221,3 @@ void up_read_non_owner(struct rw_semaphore *sem) | |||
| 221 | EXPORT_SYMBOL(up_read_non_owner); | 221 | EXPORT_SYMBOL(up_read_non_owner); |
| 222 | 222 | ||
| 223 | #endif | 223 | #endif |
| 224 | |||
| 225 | |||
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index a17cba8d94bb..b9d0e72aa80f 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h | |||
| @@ -1,20 +1,24 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * The owner field of the rw_semaphore structure will be set to | 3 | * The owner field of the rw_semaphore structure will be set to |
| 4 | * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear | 4 | * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear |
| 5 | * the owner field when it unlocks. A reader, on the other hand, will | 5 | * the owner field when it unlocks. A reader, on the other hand, will |
| 6 | * not touch the owner field when it unlocks. | 6 | * not touch the owner field when it unlocks. |
| 7 | * | 7 | * |
| 8 | * In essence, the owner field now has the following 3 states: | 8 | * In essence, the owner field now has the following 4 states: |
| 9 | * 1) 0 | 9 | * 1) 0 |
| 10 | * - lock is free or the owner hasn't set the field yet | 10 | * - lock is free or the owner hasn't set the field yet |
| 11 | * 2) RWSEM_READER_OWNED | 11 | * 2) RWSEM_READER_OWNED |
| 12 | * - lock is currently or previously owned by readers (lock is free | 12 | * - lock is currently or previously owned by readers (lock is free |
| 13 | * or not set by owner yet) | 13 | * or not set by owner yet) |
| 14 | * 3) Other non-zero value | 14 | * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well |
| 15 | * - a writer owns the lock | 15 | * - lock is owned by an anonymous writer, so spinning on the lock |
| 16 | * owner should be disabled. | ||
| 17 | * 4) Other non-zero value | ||
| 18 | * - a writer owns the lock and other writers can spin on the lock owner. | ||
| 16 | */ | 19 | */ |
| 17 | #define RWSEM_READER_OWNED ((struct task_struct *)1UL) | 20 | #define RWSEM_ANONYMOUSLY_OWNED (1UL << 0) |
| 21 | #define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED) | ||
| 18 | 22 | ||
| 19 | #ifdef CONFIG_DEBUG_RWSEMS | 23 | #ifdef CONFIG_DEBUG_RWSEMS |
| 20 | # define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) | 24 | # define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) |
| @@ -51,14 +55,22 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) | |||
| 51 | WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); | 55 | WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); |
| 52 | } | 56 | } |
| 53 | 57 | ||
| 54 | static inline bool rwsem_owner_is_writer(struct task_struct *owner) | 58 | /* |
| 59 | * Return true if the a rwsem waiter can spin on the rwsem's owner | ||
| 60 | * and steal the lock, i.e. the lock is not anonymously owned. | ||
| 61 | * N.B. !owner is considered spinnable. | ||
| 62 | */ | ||
| 63 | static inline bool is_rwsem_owner_spinnable(struct task_struct *owner) | ||
| 55 | { | 64 | { |
| 56 | return owner && owner != RWSEM_READER_OWNED; | 65 | return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED); |
| 57 | } | 66 | } |
| 58 | 67 | ||
| 59 | static inline bool rwsem_owner_is_reader(struct task_struct *owner) | 68 | /* |
| 69 | * Return true if rwsem is owned by an anonymous writer or readers. | ||
| 70 | */ | ||
| 71 | static inline bool rwsem_has_anonymous_owner(struct task_struct *owner) | ||
| 60 | { | 72 | { |
| 61 | return owner == RWSEM_READER_OWNED; | 73 | return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED; |
| 62 | } | 74 | } |
| 63 | #else | 75 | #else |
| 64 | static inline void rwsem_set_owner(struct rw_semaphore *sem) | 76 | static inline void rwsem_set_owner(struct rw_semaphore *sem) |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e7b3008b85bb..1356afd1eeb6 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -1117,7 +1117,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | |||
| 1117 | * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. | 1117 | * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. |
| 1118 | * So, overflow is not an issue here. | 1118 | * So, overflow is not an issue here. |
| 1119 | */ | 1119 | */ |
| 1120 | u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) | 1120 | static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) |
| 1121 | { | 1121 | { |
| 1122 | u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ | 1122 | u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ |
| 1123 | u64 u_act; | 1123 | u64 u_act; |
| @@ -2731,8 +2731,6 @@ bool dl_cpu_busy(unsigned int cpu) | |||
| 2731 | #endif | 2731 | #endif |
| 2732 | 2732 | ||
| 2733 | #ifdef CONFIG_SCHED_DEBUG | 2733 | #ifdef CONFIG_SCHED_DEBUG |
| 2734 | extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); | ||
| 2735 | |||
| 2736 | void print_dl_stats(struct seq_file *m, int cpu) | 2734 | void print_dl_stats(struct seq_file *m, int cpu) |
| 2737 | { | 2735 | { |
| 2738 | print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); | 2736 | print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 7aef6b4e885a..ef3c4e6f5345 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -2701,8 +2701,6 @@ int sched_rr_handler(struct ctl_table *table, int write, | |||
| 2701 | } | 2701 | } |
| 2702 | 2702 | ||
| 2703 | #ifdef CONFIG_SCHED_DEBUG | 2703 | #ifdef CONFIG_SCHED_DEBUG |
| 2704 | extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); | ||
| 2705 | |||
| 2706 | void print_rt_stats(struct seq_file *m, int cpu) | 2704 | void print_rt_stats(struct seq_file *m, int cpu) |
| 2707 | { | 2705 | { |
| 2708 | rt_rq_iter_t iter; | 2706 | rt_rq_iter_t iter; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 15750c222ca2..1f0a4bc6a39d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -2025,8 +2025,9 @@ extern bool sched_debug_enabled; | |||
| 2025 | extern void print_cfs_stats(struct seq_file *m, int cpu); | 2025 | extern void print_cfs_stats(struct seq_file *m, int cpu); |
| 2026 | extern void print_rt_stats(struct seq_file *m, int cpu); | 2026 | extern void print_rt_stats(struct seq_file *m, int cpu); |
| 2027 | extern void print_dl_stats(struct seq_file *m, int cpu); | 2027 | extern void print_dl_stats(struct seq_file *m, int cpu); |
| 2028 | extern void | 2028 | extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
| 2029 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | 2029 | extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); |
| 2030 | extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); | ||
| 2030 | #ifdef CONFIG_NUMA_BALANCING | 2031 | #ifdef CONFIG_NUMA_BALANCING |
| 2031 | extern void | 2032 | extern void |
| 2032 | show_numa_stats(struct task_struct *p, struct seq_file *m); | 2033 | show_numa_stats(struct task_struct *p, struct seq_file *m); |
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 64cc564f5255..61a1125c1ae4 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c | |||
| @@ -1708,7 +1708,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att | |||
| 1708 | rcu_read_unlock(); | 1708 | rcu_read_unlock(); |
| 1709 | 1709 | ||
| 1710 | if (rq && sched_debug_enabled) { | 1710 | if (rq && sched_debug_enabled) { |
| 1711 | pr_info("span: %*pbl (max cpu_capacity = %lu)\n", | 1711 | pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", |
| 1712 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); | 1712 | cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); |
| 1713 | } | 1713 | } |
| 1714 | 1714 | ||
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index dc77548167ef..e691d9a6c58d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -19,6 +19,8 @@ | |||
| 19 | #include <linux/compat.h> | 19 | #include <linux/compat.h> |
| 20 | #include <linux/coredump.h> | 20 | #include <linux/coredump.h> |
| 21 | #include <linux/kmemleak.h> | 21 | #include <linux/kmemleak.h> |
| 22 | #include <linux/nospec.h> | ||
| 23 | #include <linux/prctl.h> | ||
| 22 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
| 23 | #include <linux/sched/task_stack.h> | 25 | #include <linux/sched/task_stack.h> |
| 24 | #include <linux/seccomp.h> | 26 | #include <linux/seccomp.h> |
| @@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) | |||
| 227 | return true; | 229 | return true; |
| 228 | } | 230 | } |
| 229 | 231 | ||
| 232 | void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } | ||
| 233 | |||
| 230 | static inline void seccomp_assign_mode(struct task_struct *task, | 234 | static inline void seccomp_assign_mode(struct task_struct *task, |
| 231 | unsigned long seccomp_mode) | 235 | unsigned long seccomp_mode, |
| 236 | unsigned long flags) | ||
| 232 | { | 237 | { |
| 233 | assert_spin_locked(&task->sighand->siglock); | 238 | assert_spin_locked(&task->sighand->siglock); |
| 234 | 239 | ||
| @@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task, | |||
| 238 | * filter) is set. | 243 | * filter) is set. |
| 239 | */ | 244 | */ |
| 240 | smp_mb__before_atomic(); | 245 | smp_mb__before_atomic(); |
| 246 | /* Assume default seccomp processes want spec flaw mitigation. */ | ||
| 247 | if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) | ||
| 248 | arch_seccomp_spec_mitigate(task); | ||
| 241 | set_tsk_thread_flag(task, TIF_SECCOMP); | 249 | set_tsk_thread_flag(task, TIF_SECCOMP); |
| 242 | } | 250 | } |
| 243 | 251 | ||
| @@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void) | |||
| 305 | * without dropping the locks. | 313 | * without dropping the locks. |
| 306 | * | 314 | * |
| 307 | */ | 315 | */ |
| 308 | static inline void seccomp_sync_threads(void) | 316 | static inline void seccomp_sync_threads(unsigned long flags) |
| 309 | { | 317 | { |
| 310 | struct task_struct *thread, *caller; | 318 | struct task_struct *thread, *caller; |
| 311 | 319 | ||
| @@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void) | |||
| 346 | * allow one thread to transition the other. | 354 | * allow one thread to transition the other. |
| 347 | */ | 355 | */ |
| 348 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) | 356 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) |
| 349 | seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); | 357 | seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, |
| 358 | flags); | ||
| 350 | } | 359 | } |
| 351 | } | 360 | } |
| 352 | 361 | ||
| @@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags, | |||
| 469 | 478 | ||
| 470 | /* Now that the new filter is in place, synchronize to all threads. */ | 479 | /* Now that the new filter is in place, synchronize to all threads. */ |
| 471 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) | 480 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) |
| 472 | seccomp_sync_threads(); | 481 | seccomp_sync_threads(flags); |
| 473 | 482 | ||
| 474 | return 0; | 483 | return 0; |
| 475 | } | 484 | } |
| @@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void) | |||
| 818 | #ifdef TIF_NOTSC | 827 | #ifdef TIF_NOTSC |
| 819 | disable_TSC(); | 828 | disable_TSC(); |
| 820 | #endif | 829 | #endif |
| 821 | seccomp_assign_mode(current, seccomp_mode); | 830 | seccomp_assign_mode(current, seccomp_mode, 0); |
| 822 | ret = 0; | 831 | ret = 0; |
| 823 | 832 | ||
| 824 | out: | 833 | out: |
| @@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags, | |||
| 876 | /* Do not free the successfully attached filter. */ | 885 | /* Do not free the successfully attached filter. */ |
| 877 | prepared = NULL; | 886 | prepared = NULL; |
| 878 | 887 | ||
| 879 | seccomp_assign_mode(current, seccomp_mode); | 888 | seccomp_assign_mode(current, seccomp_mode, flags); |
| 880 | out: | 889 | out: |
| 881 | spin_unlock_irq(¤t->sighand->siglock); | 890 | spin_unlock_irq(¤t->sighand->siglock); |
| 882 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) | 891 | if (flags & SECCOMP_FILTER_FLAG_TSYNC) |
diff --git a/kernel/sys.c b/kernel/sys.c index ad692183dfe9..d1b2b8d934bb 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -61,6 +61,8 @@ | |||
| 61 | #include <linux/uidgid.h> | 61 | #include <linux/uidgid.h> |
| 62 | #include <linux/cred.h> | 62 | #include <linux/cred.h> |
| 63 | 63 | ||
| 64 | #include <linux/nospec.h> | ||
| 65 | |||
| 64 | #include <linux/kmsg_dump.h> | 66 | #include <linux/kmsg_dump.h> |
| 65 | /* Move somewhere else to avoid recompiling? */ | 67 | /* Move somewhere else to avoid recompiling? */ |
| 66 | #include <generated/utsrelease.h> | 68 | #include <generated/utsrelease.h> |
| @@ -69,6 +71,9 @@ | |||
| 69 | #include <asm/io.h> | 71 | #include <asm/io.h> |
| 70 | #include <asm/unistd.h> | 72 | #include <asm/unistd.h> |
| 71 | 73 | ||
| 74 | /* Hardening for Spectre-v1 */ | ||
| 75 | #include <linux/nospec.h> | ||
| 76 | |||
| 72 | #include "uid16.h" | 77 | #include "uid16.h" |
| 73 | 78 | ||
| 74 | #ifndef SET_UNALIGN_CTL | 79 | #ifndef SET_UNALIGN_CTL |
| @@ -1451,6 +1456,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | |||
| 1451 | if (resource >= RLIM_NLIMITS) | 1456 | if (resource >= RLIM_NLIMITS) |
| 1452 | return -EINVAL; | 1457 | return -EINVAL; |
| 1453 | 1458 | ||
| 1459 | resource = array_index_nospec(resource, RLIM_NLIMITS); | ||
| 1454 | task_lock(current->group_leader); | 1460 | task_lock(current->group_leader); |
| 1455 | x = current->signal->rlim[resource]; | 1461 | x = current->signal->rlim[resource]; |
| 1456 | task_unlock(current->group_leader); | 1462 | task_unlock(current->group_leader); |
| @@ -1470,6 +1476,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | |||
| 1470 | if (resource >= RLIM_NLIMITS) | 1476 | if (resource >= RLIM_NLIMITS) |
| 1471 | return -EINVAL; | 1477 | return -EINVAL; |
| 1472 | 1478 | ||
| 1479 | resource = array_index_nospec(resource, RLIM_NLIMITS); | ||
| 1473 | task_lock(current->group_leader); | 1480 | task_lock(current->group_leader); |
| 1474 | r = current->signal->rlim[resource]; | 1481 | r = current->signal->rlim[resource]; |
| 1475 | task_unlock(current->group_leader); | 1482 | task_unlock(current->group_leader); |
| @@ -2242,6 +2249,17 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data) | |||
| 2242 | return 1; | 2249 | return 1; |
| 2243 | } | 2250 | } |
| 2244 | 2251 | ||
| 2252 | int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) | ||
| 2253 | { | ||
| 2254 | return -EINVAL; | ||
| 2255 | } | ||
| 2256 | |||
| 2257 | int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, | ||
| 2258 | unsigned long ctrl) | ||
| 2259 | { | ||
| 2260 | return -EINVAL; | ||
| 2261 | } | ||
| 2262 | |||
| 2245 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | 2263 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
| 2246 | unsigned long, arg4, unsigned long, arg5) | 2264 | unsigned long, arg4, unsigned long, arg5) |
| 2247 | { | 2265 | { |
| @@ -2450,6 +2468,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 2450 | case PR_SVE_GET_VL: | 2468 | case PR_SVE_GET_VL: |
| 2451 | error = SVE_GET_VL(); | 2469 | error = SVE_GET_VL(); |
| 2452 | break; | 2470 | break; |
| 2471 | case PR_GET_SPECULATION_CTRL: | ||
| 2472 | if (arg3 || arg4 || arg5) | ||
| 2473 | return -EINVAL; | ||
| 2474 | error = arch_prctl_spec_ctrl_get(me, arg2); | ||
| 2475 | break; | ||
| 2476 | case PR_SET_SPECULATION_CTRL: | ||
| 2477 | if (arg4 || arg5) | ||
| 2478 | return -EINVAL; | ||
| 2479 | error = arch_prctl_spec_ctrl_set(me, arg2, arg3); | ||
| 2480 | break; | ||
| 2453 | default: | 2481 | default: |
| 2454 | error = -EINVAL; | 2482 | error = -EINVAL; |
| 2455 | break; | 2483 | break; |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b398c2ea69b2..aa2094d5dd27 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) | |||
| 612 | now = ktime_get(); | 612 | now = ktime_get(); |
| 613 | /* Find all expired events */ | 613 | /* Find all expired events */ |
| 614 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { | 614 | for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
| 615 | /* | ||
| 616 | * Required for !SMP because for_each_cpu() reports | ||
| 617 | * unconditionally CPU0 as set on UP kernels. | ||
| 618 | */ | ||
| 619 | if (!IS_ENABLED(CONFIG_SMP) && | ||
| 620 | cpumask_empty(tick_broadcast_oneshot_mask)) | ||
| 621 | break; | ||
| 622 | |||
| 615 | td = &per_cpu(tick_cpu_device, cpu); | 623 | td = &per_cpu(tick_cpu_device, cpu); |
| 616 | if (td->evtdev->next_event <= now) { | 624 | if (td->evtdev->next_event <= now) { |
| 617 | cpumask_set_cpu(cpu, tmpmask); | 625 | cpumask_set_cpu(cpu, tmpmask); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 414d7210b2ec..bcd93031d042 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -893,7 +893,7 @@ int __trace_bputs(unsigned long ip, const char *str) | |||
| 893 | EXPORT_SYMBOL_GPL(__trace_bputs); | 893 | EXPORT_SYMBOL_GPL(__trace_bputs); |
| 894 | 894 | ||
| 895 | #ifdef CONFIG_TRACER_SNAPSHOT | 895 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 896 | static void tracing_snapshot_instance(struct trace_array *tr) | 896 | void tracing_snapshot_instance(struct trace_array *tr) |
| 897 | { | 897 | { |
| 898 | struct tracer *tracer = tr->current_trace; | 898 | struct tracer *tracer = tr->current_trace; |
| 899 | unsigned long flags; | 899 | unsigned long flags; |
| @@ -949,7 +949,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, | |||
| 949 | struct trace_buffer *size_buf, int cpu_id); | 949 | struct trace_buffer *size_buf, int cpu_id); |
| 950 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); | 950 | static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); |
| 951 | 951 | ||
| 952 | static int alloc_snapshot(struct trace_array *tr) | 952 | int tracing_alloc_snapshot_instance(struct trace_array *tr) |
| 953 | { | 953 | { |
| 954 | int ret; | 954 | int ret; |
| 955 | 955 | ||
| @@ -995,7 +995,7 @@ int tracing_alloc_snapshot(void) | |||
| 995 | struct trace_array *tr = &global_trace; | 995 | struct trace_array *tr = &global_trace; |
| 996 | int ret; | 996 | int ret; |
| 997 | 997 | ||
| 998 | ret = alloc_snapshot(tr); | 998 | ret = tracing_alloc_snapshot_instance(tr); |
| 999 | WARN_ON(ret < 0); | 999 | WARN_ON(ret < 0); |
| 1000 | 1000 | ||
| 1001 | return ret; | 1001 | return ret; |
| @@ -5408,7 +5408,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
| 5408 | 5408 | ||
| 5409 | #ifdef CONFIG_TRACER_MAX_TRACE | 5409 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 5410 | if (t->use_max_tr && !had_max_tr) { | 5410 | if (t->use_max_tr && !had_max_tr) { |
| 5411 | ret = alloc_snapshot(tr); | 5411 | ret = tracing_alloc_snapshot_instance(tr); |
| 5412 | if (ret < 0) | 5412 | if (ret < 0) |
| 5413 | goto out; | 5413 | goto out; |
| 5414 | } | 5414 | } |
| @@ -6451,7 +6451,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 6451 | } | 6451 | } |
| 6452 | #endif | 6452 | #endif |
| 6453 | if (!tr->allocated_snapshot) { | 6453 | if (!tr->allocated_snapshot) { |
| 6454 | ret = alloc_snapshot(tr); | 6454 | ret = tracing_alloc_snapshot_instance(tr); |
| 6455 | if (ret < 0) | 6455 | if (ret < 0) |
| 6456 | break; | 6456 | break; |
| 6457 | } | 6457 | } |
| @@ -7179,7 +7179,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, | |||
| 7179 | return ret; | 7179 | return ret; |
| 7180 | 7180 | ||
| 7181 | out_reg: | 7181 | out_reg: |
| 7182 | ret = alloc_snapshot(tr); | 7182 | ret = tracing_alloc_snapshot_instance(tr); |
| 7183 | if (ret < 0) | 7183 | if (ret < 0) |
| 7184 | goto out; | 7184 | goto out; |
| 7185 | 7185 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6fb46a06c9dc..507954b4e058 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -1817,6 +1817,17 @@ static inline void __init trace_event_init(void) { } | |||
| 1817 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } | 1817 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
| 1818 | #endif | 1818 | #endif |
| 1819 | 1819 | ||
| 1820 | #ifdef CONFIG_TRACER_SNAPSHOT | ||
| 1821 | void tracing_snapshot_instance(struct trace_array *tr); | ||
| 1822 | int tracing_alloc_snapshot_instance(struct trace_array *tr); | ||
| 1823 | #else | ||
| 1824 | static inline void tracing_snapshot_instance(struct trace_array *tr) { } | ||
| 1825 | static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) | ||
| 1826 | { | ||
| 1827 | return 0; | ||
| 1828 | } | ||
| 1829 | #endif | ||
| 1830 | |||
| 1820 | extern struct trace_iterator *tracepoint_print_iter; | 1831 | extern struct trace_iterator *tracepoint_print_iter; |
| 1821 | 1832 | ||
| 1822 | #endif /* _LINUX_KERNEL_TRACE_H */ | 1833 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index d251cabcf69a..8b5bdcf64871 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -483,9 +483,10 @@ clear_event_triggers(struct trace_array *tr) | |||
| 483 | struct trace_event_file *file; | 483 | struct trace_event_file *file; |
| 484 | 484 | ||
| 485 | list_for_each_entry(file, &tr->events, list) { | 485 | list_for_each_entry(file, &tr->events, list) { |
| 486 | struct event_trigger_data *data; | 486 | struct event_trigger_data *data, *n; |
| 487 | list_for_each_entry_rcu(data, &file->triggers, list) { | 487 | list_for_each_entry_safe(data, n, &file->triggers, list) { |
| 488 | trace_event_trigger_enable_disable(file, 0); | 488 | trace_event_trigger_enable_disable(file, 0); |
| 489 | list_del_rcu(&data->list); | ||
| 489 | if (data->ops->free) | 490 | if (data->ops->free) |
| 490 | data->ops->free(data->ops, data); | 491 | data->ops->free(data->ops, data); |
| 491 | } | 492 | } |
| @@ -642,6 +643,7 @@ event_trigger_callback(struct event_command *cmd_ops, | |||
| 642 | trigger_data->count = -1; | 643 | trigger_data->count = -1; |
| 643 | trigger_data->ops = trigger_ops; | 644 | trigger_data->ops = trigger_ops; |
| 644 | trigger_data->cmd_ops = cmd_ops; | 645 | trigger_data->cmd_ops = cmd_ops; |
| 646 | trigger_data->private_data = file; | ||
| 645 | INIT_LIST_HEAD(&trigger_data->list); | 647 | INIT_LIST_HEAD(&trigger_data->list); |
| 646 | INIT_LIST_HEAD(&trigger_data->named_list); | 648 | INIT_LIST_HEAD(&trigger_data->named_list); |
| 647 | 649 | ||
| @@ -1053,7 +1055,12 @@ static void | |||
| 1053 | snapshot_trigger(struct event_trigger_data *data, void *rec, | 1055 | snapshot_trigger(struct event_trigger_data *data, void *rec, |
| 1054 | struct ring_buffer_event *event) | 1056 | struct ring_buffer_event *event) |
| 1055 | { | 1057 | { |
| 1056 | tracing_snapshot(); | 1058 | struct trace_event_file *file = data->private_data; |
| 1059 | |||
| 1060 | if (file) | ||
| 1061 | tracing_snapshot_instance(file->tr); | ||
| 1062 | else | ||
| 1063 | tracing_snapshot(); | ||
| 1057 | } | 1064 | } |
| 1058 | 1065 | ||
| 1059 | static void | 1066 | static void |
| @@ -1076,7 +1083,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, | |||
| 1076 | { | 1083 | { |
| 1077 | int ret = register_trigger(glob, ops, data, file); | 1084 | int ret = register_trigger(glob, ops, data, file); |
| 1078 | 1085 | ||
| 1079 | if (ret > 0 && tracing_alloc_snapshot() != 0) { | 1086 | if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { |
| 1080 | unregister_trigger(glob, ops, data, file); | 1087 | unregister_trigger(glob, ops, data, file); |
| 1081 | ret = 0; | 1088 | ret = 0; |
| 1082 | } | 1089 | } |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 970212670b6a..fdae394172fa 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1012,7 +1012,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) | |||
| 1012 | } | 1012 | } |
| 1013 | EXPORT_SYMBOL(iov_iter_gap_alignment); | 1013 | EXPORT_SYMBOL(iov_iter_gap_alignment); |
| 1014 | 1014 | ||
| 1015 | static inline size_t __pipe_get_pages(struct iov_iter *i, | 1015 | static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
| 1016 | size_t maxsize, | 1016 | size_t maxsize, |
| 1017 | struct page **pages, | 1017 | struct page **pages, |
| 1018 | int idx, | 1018 | int idx, |
| @@ -1102,7 +1102,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i, | |||
| 1102 | size_t *start) | 1102 | size_t *start) |
| 1103 | { | 1103 | { |
| 1104 | struct page **p; | 1104 | struct page **p; |
| 1105 | size_t n; | 1105 | ssize_t n; |
| 1106 | int idx; | 1106 | int idx; |
| 1107 | int npages; | 1107 | int npages; |
| 1108 | 1108 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index da9e10c827df..a9e41aed6de4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter, | |||
| 1612 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, | 1612 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
| 1613 | void __rcu **slot, struct radix_tree_iter *iter) | 1613 | void __rcu **slot, struct radix_tree_iter *iter) |
| 1614 | { | 1614 | { |
| 1615 | void *sib = node_to_entry(slot - 1); | ||
| 1616 | |||
| 1617 | while (iter->index < iter->next_index) { | 1615 | while (iter->index < iter->next_index) { |
| 1618 | *nodep = rcu_dereference_raw(*slot); | 1616 | *nodep = rcu_dereference_raw(*slot); |
| 1619 | if (*nodep && *nodep != sib) | 1617 | if (*nodep && !is_sibling_entry(iter->node, *nodep)) |
| 1620 | return slot; | 1618 | return slot; |
| 1621 | slot++; | 1619 | slot++; |
| 1622 | iter->index = __radix_tree_iter_add(iter, 1); | 1620 | iter->index = __radix_tree_iter_add(iter, 1); |
| @@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot, | |||
| 1631 | struct radix_tree_iter *iter, unsigned flags) | 1629 | struct radix_tree_iter *iter, unsigned flags) |
| 1632 | { | 1630 | { |
| 1633 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | 1631 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
| 1634 | struct radix_tree_node *node = rcu_dereference_raw(*slot); | 1632 | struct radix_tree_node *node; |
| 1635 | 1633 | ||
| 1636 | slot = skip_siblings(&node, slot, iter); | 1634 | slot = skip_siblings(&node, slot, iter); |
| 1637 | 1635 | ||
| @@ -2036,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root, | |||
| 2036 | unsigned long index, void *item) | 2034 | unsigned long index, void *item) |
| 2037 | { | 2035 | { |
| 2038 | struct radix_tree_node *node = NULL; | 2036 | struct radix_tree_node *node = NULL; |
| 2039 | void __rcu **slot; | 2037 | void __rcu **slot = NULL; |
| 2040 | void *entry; | 2038 | void *entry; |
| 2041 | 2039 | ||
| 2042 | entry = __radix_tree_lookup(root, index, &node, &slot); | 2040 | entry = __radix_tree_lookup(root, index, &node, &slot); |
| 2041 | if (!slot) | ||
| 2042 | return NULL; | ||
| 2043 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, | 2043 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
| 2044 | get_slot_offset(node, slot)))) | 2044 | get_slot_offset(node, slot)))) |
| 2045 | return NULL; | 2045 | return NULL; |
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index de16f7869fb1..6cd7d0740005 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c | |||
| @@ -331,23 +331,32 @@ static void noinline __init test_mem_optimisations(void) | |||
| 331 | unsigned int start, nbits; | 331 | unsigned int start, nbits; |
| 332 | 332 | ||
| 333 | for (start = 0; start < 1024; start += 8) { | 333 | for (start = 0; start < 1024; start += 8) { |
| 334 | memset(bmap1, 0x5a, sizeof(bmap1)); | ||
| 335 | memset(bmap2, 0x5a, sizeof(bmap2)); | ||
| 336 | for (nbits = 0; nbits < 1024 - start; nbits += 8) { | 334 | for (nbits = 0; nbits < 1024 - start; nbits += 8) { |
| 335 | memset(bmap1, 0x5a, sizeof(bmap1)); | ||
| 336 | memset(bmap2, 0x5a, sizeof(bmap2)); | ||
| 337 | |||
| 337 | bitmap_set(bmap1, start, nbits); | 338 | bitmap_set(bmap1, start, nbits); |
| 338 | __bitmap_set(bmap2, start, nbits); | 339 | __bitmap_set(bmap2, start, nbits); |
| 339 | if (!bitmap_equal(bmap1, bmap2, 1024)) | 340 | if (!bitmap_equal(bmap1, bmap2, 1024)) { |
| 340 | printk("set not equal %d %d\n", start, nbits); | 341 | printk("set not equal %d %d\n", start, nbits); |
| 341 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | 342 | failed_tests++; |
| 343 | } | ||
| 344 | if (!__bitmap_equal(bmap1, bmap2, 1024)) { | ||
| 342 | printk("set not __equal %d %d\n", start, nbits); | 345 | printk("set not __equal %d %d\n", start, nbits); |
| 346 | failed_tests++; | ||
| 347 | } | ||
| 343 | 348 | ||
| 344 | bitmap_clear(bmap1, start, nbits); | 349 | bitmap_clear(bmap1, start, nbits); |
| 345 | __bitmap_clear(bmap2, start, nbits); | 350 | __bitmap_clear(bmap2, start, nbits); |
| 346 | if (!bitmap_equal(bmap1, bmap2, 1024)) | 351 | if (!bitmap_equal(bmap1, bmap2, 1024)) { |
| 347 | printk("clear not equal %d %d\n", start, nbits); | 352 | printk("clear not equal %d %d\n", start, nbits); |
| 348 | if (!__bitmap_equal(bmap1, bmap2, 1024)) | 353 | failed_tests++; |
| 354 | } | ||
| 355 | if (!__bitmap_equal(bmap1, bmap2, 1024)) { | ||
| 349 | printk("clear not __equal %d %d\n", start, | 356 | printk("clear not __equal %d %d\n", start, |
| 350 | nbits); | 357 | nbits); |
| 358 | failed_tests++; | ||
| 359 | } | ||
| 351 | } | 360 | } |
| 352 | } | 361 | } |
| 353 | } | 362 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 30c0cb8cc9bc..23920c5ff728 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr, | |||
| 1669 | return number(buf, end, (unsigned long int)ptr, spec); | 1669 | return number(buf, end, (unsigned long int)ptr, spec); |
| 1670 | } | 1670 | } |
| 1671 | 1671 | ||
| 1672 | static bool have_filled_random_ptr_key __read_mostly; | 1672 | static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); |
| 1673 | static siphash_key_t ptr_key __read_mostly; | 1673 | static siphash_key_t ptr_key __read_mostly; |
| 1674 | 1674 | ||
| 1675 | static void fill_random_ptr_key(struct random_ready_callback *unused) | 1675 | static void enable_ptr_key_workfn(struct work_struct *work) |
| 1676 | { | 1676 | { |
| 1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); | 1677 | get_random_bytes(&ptr_key, sizeof(ptr_key)); |
| 1678 | /* | 1678 | /* Needs to run from preemptible context */ |
| 1679 | * have_filled_random_ptr_key==true is dependent on get_random_bytes(). | 1679 | static_branch_disable(¬_filled_random_ptr_key); |
| 1680 | * ptr_to_id() needs to see have_filled_random_ptr_key==true | 1680 | } |
| 1681 | * after get_random_bytes() returns. | 1681 | |
| 1682 | */ | 1682 | static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); |
| 1683 | smp_mb(); | 1683 | |
| 1684 | WRITE_ONCE(have_filled_random_ptr_key, true); | 1684 | static void fill_random_ptr_key(struct random_ready_callback *unused) |
| 1685 | { | ||
| 1686 | /* This may be in an interrupt handler. */ | ||
| 1687 | queue_work(system_unbound_wq, &enable_ptr_key_work); | ||
| 1685 | } | 1688 | } |
| 1686 | 1689 | ||
| 1687 | static struct random_ready_callback random_ready = { | 1690 | static struct random_ready_callback random_ready = { |
| @@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void) | |||
| 1695 | if (!ret) { | 1698 | if (!ret) { |
| 1696 | return 0; | 1699 | return 0; |
| 1697 | } else if (ret == -EALREADY) { | 1700 | } else if (ret == -EALREADY) { |
| 1698 | fill_random_ptr_key(&random_ready); | 1701 | /* This is in preemptible context */ |
| 1702 | enable_ptr_key_workfn(&enable_ptr_key_work); | ||
| 1699 | return 0; | 1703 | return 0; |
| 1700 | } | 1704 | } |
| 1701 | 1705 | ||
| @@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | |||
| 1709 | unsigned long hashval; | 1713 | unsigned long hashval; |
| 1710 | const int default_width = 2 * sizeof(ptr); | 1714 | const int default_width = 2 * sizeof(ptr); |
| 1711 | 1715 | ||
| 1712 | if (unlikely(!have_filled_random_ptr_key)) { | 1716 | if (static_branch_unlikely(¬_filled_random_ptr_key)) { |
| 1713 | spec.field_width = default_width; | 1717 | spec.field_width = default_width; |
| 1714 | /* string length must be less than default_width */ | 1718 | /* string length must be less than default_width */ |
| 1715 | return string(buf, end, "(ptrval)", spec); | 1719 | return string(buf, end, "(ptrval)", spec); |
diff --git a/mm/Kconfig b/mm/Kconfig index d5004d82a1d6..e14c01513bfd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -636,6 +636,7 @@ config DEFERRED_STRUCT_PAGE_INIT | |||
| 636 | default n | 636 | default n |
| 637 | depends on NO_BOOTMEM | 637 | depends on NO_BOOTMEM |
| 638 | depends on !FLATMEM | 638 | depends on !FLATMEM |
| 639 | depends on !NEED_PER_CPU_KM | ||
| 639 | help | 640 | help |
| 640 | Ordinarily all struct pages are initialised during early boot in a | 641 | Ordinarily all struct pages are initialised during early boot in a |
| 641 | single thread. On very large machines this can take a considerable | 642 | single thread. On very large machines this can take a considerable |
| @@ -39,7 +39,6 @@ | |||
| 39 | #include <trace/events/cma.h> | 39 | #include <trace/events/cma.h> |
| 40 | 40 | ||
| 41 | #include "cma.h" | 41 | #include "cma.h" |
| 42 | #include "internal.h" | ||
| 43 | 42 | ||
| 44 | struct cma cma_areas[MAX_CMA_AREAS]; | 43 | struct cma cma_areas[MAX_CMA_AREAS]; |
| 45 | unsigned cma_area_count; | 44 | unsigned cma_area_count; |
| @@ -110,25 +109,23 @@ static int __init cma_activate_area(struct cma *cma) | |||
| 110 | if (!cma->bitmap) | 109 | if (!cma->bitmap) |
| 111 | return -ENOMEM; | 110 | return -ENOMEM; |
| 112 | 111 | ||
| 112 | WARN_ON_ONCE(!pfn_valid(pfn)); | ||
| 113 | zone = page_zone(pfn_to_page(pfn)); | ||
| 114 | |||
| 113 | do { | 115 | do { |
| 114 | unsigned j; | 116 | unsigned j; |
| 115 | 117 | ||
| 116 | base_pfn = pfn; | 118 | base_pfn = pfn; |
| 117 | if (!pfn_valid(base_pfn)) | ||
| 118 | goto err; | ||
| 119 | |||
| 120 | zone = page_zone(pfn_to_page(base_pfn)); | ||
| 121 | for (j = pageblock_nr_pages; j; --j, pfn++) { | 119 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
| 122 | if (!pfn_valid(pfn)) | 120 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 123 | goto err; | ||
| 124 | |||
| 125 | /* | 121 | /* |
| 126 | * In init_cma_reserved_pageblock(), present_pages | 122 | * alloc_contig_range requires the pfn range |
| 127 | * is adjusted with assumption that all pages in | 123 | * specified to be in the same zone. Make this |
| 128 | * the pageblock come from a single zone. | 124 | * simple by forcing the entire CMA resv range |
| 125 | * to be in the same zone. | ||
| 129 | */ | 126 | */ |
| 130 | if (page_zone(pfn_to_page(pfn)) != zone) | 127 | if (page_zone(pfn_to_page(pfn)) != zone) |
| 131 | goto err; | 128 | goto not_in_zone; |
| 132 | } | 129 | } |
| 133 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | 130 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
| 134 | } while (--i); | 131 | } while (--i); |
| @@ -142,7 +139,7 @@ static int __init cma_activate_area(struct cma *cma) | |||
| 142 | 139 | ||
| 143 | return 0; | 140 | return 0; |
| 144 | 141 | ||
| 145 | err: | 142 | not_in_zone: |
| 146 | pr_err("CMA area %s could not be activated\n", cma->name); | 143 | pr_err("CMA area %s could not be activated\n", cma->name); |
| 147 | kfree(cma->bitmap); | 144 | kfree(cma->bitmap); |
| 148 | cma->count = 0; | 145 | cma->count = 0; |
| @@ -152,41 +149,6 @@ err: | |||
| 152 | static int __init cma_init_reserved_areas(void) | 149 | static int __init cma_init_reserved_areas(void) |
| 153 | { | 150 | { |
| 154 | int i; | 151 | int i; |
| 155 | struct zone *zone; | ||
| 156 | pg_data_t *pgdat; | ||
| 157 | |||
| 158 | if (!cma_area_count) | ||
| 159 | return 0; | ||
| 160 | |||
| 161 | for_each_online_pgdat(pgdat) { | ||
| 162 | unsigned long start_pfn = UINT_MAX, end_pfn = 0; | ||
| 163 | |||
| 164 | zone = &pgdat->node_zones[ZONE_MOVABLE]; | ||
| 165 | |||
| 166 | /* | ||
| 167 | * In this case, we cannot adjust the zone range | ||
| 168 | * since it is now maximum node span and we don't | ||
| 169 | * know original zone range. | ||
| 170 | */ | ||
| 171 | if (populated_zone(zone)) | ||
| 172 | continue; | ||
| 173 | |||
| 174 | for (i = 0; i < cma_area_count; i++) { | ||
| 175 | if (pfn_to_nid(cma_areas[i].base_pfn) != | ||
| 176 | pgdat->node_id) | ||
| 177 | continue; | ||
| 178 | |||
| 179 | start_pfn = min(start_pfn, cma_areas[i].base_pfn); | ||
| 180 | end_pfn = max(end_pfn, cma_areas[i].base_pfn + | ||
| 181 | cma_areas[i].count); | ||
| 182 | } | ||
| 183 | |||
| 184 | if (!end_pfn) | ||
| 185 | continue; | ||
| 186 | |||
| 187 | zone->zone_start_pfn = start_pfn; | ||
| 188 | zone->spanned_pages = end_pfn - start_pfn; | ||
| 189 | } | ||
| 190 | 152 | ||
| 191 | for (i = 0; i < cma_area_count; i++) { | 153 | for (i = 0; i < cma_area_count; i++) { |
| 192 | int ret = cma_activate_area(&cma_areas[i]); | 154 | int ret = cma_activate_area(&cma_areas[i]); |
| @@ -195,32 +157,9 @@ static int __init cma_init_reserved_areas(void) | |||
| 195 | return ret; | 157 | return ret; |
| 196 | } | 158 | } |
| 197 | 159 | ||
| 198 | /* | ||
| 199 | * Reserved pages for ZONE_MOVABLE are now activated and | ||
| 200 | * this would change ZONE_MOVABLE's managed page counter and | ||
| 201 | * the other zones' present counter. We need to re-calculate | ||
| 202 | * various zone information that depends on this initialization. | ||
| 203 | */ | ||
| 204 | build_all_zonelists(NULL); | ||
| 205 | for_each_populated_zone(zone) { | ||
| 206 | if (zone_idx(zone) == ZONE_MOVABLE) { | ||
| 207 | zone_pcp_reset(zone); | ||
| 208 | setup_zone_pageset(zone); | ||
| 209 | } else | ||
| 210 | zone_pcp_update(zone); | ||
| 211 | |||
| 212 | set_zone_contiguous(zone); | ||
| 213 | } | ||
| 214 | |||
| 215 | /* | ||
| 216 | * We need to re-init per zone wmark by calling | ||
| 217 | * init_per_zone_wmark_min() but doesn't call here because it is | ||
| 218 | * registered on core_initcall and it will be called later than us. | ||
| 219 | */ | ||
| 220 | |||
| 221 | return 0; | 160 | return 0; |
| 222 | } | 161 | } |
| 223 | pure_initcall(cma_init_reserved_areas); | 162 | core_initcall(cma_init_reserved_areas); |
| 224 | 163 | ||
| 225 | /** | 164 | /** |
| 226 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory | 165 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
diff --git a/mm/compaction.c b/mm/compaction.c index 028b7210a669..29bd1df18b98 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -1450,12 +1450,14 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, | |||
| 1450 | * if compaction succeeds. | 1450 | * if compaction succeeds. |
| 1451 | * For costly orders, we require low watermark instead of min for | 1451 | * For costly orders, we require low watermark instead of min for |
| 1452 | * compaction to proceed to increase its chances. | 1452 | * compaction to proceed to increase its chances. |
| 1453 | * ALLOC_CMA is used, as pages in CMA pageblocks are considered | ||
| 1454 | * suitable migration targets | ||
| 1453 | */ | 1455 | */ |
| 1454 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? | 1456 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
| 1455 | low_wmark_pages(zone) : min_wmark_pages(zone); | 1457 | low_wmark_pages(zone) : min_wmark_pages(zone); |
| 1456 | watermark += compact_gap(order); | 1458 | watermark += compact_gap(order); |
| 1457 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, | 1459 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, |
| 1458 | 0, wmark_target)) | 1460 | ALLOC_CMA, wmark_target)) |
| 1459 | return COMPACT_SKIPPED; | 1461 | return COMPACT_SKIPPED; |
| 1460 | 1462 | ||
| 1461 | return COMPACT_CONTINUE; | 1463 | return COMPACT_CONTINUE; |
| @@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) | |||
| 544 | if (vm_flags & (VM_IO | VM_PFNMAP)) | 544 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
| 545 | return -EFAULT; | 545 | return -EFAULT; |
| 546 | 546 | ||
| 547 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) | ||
| 548 | return -EFAULT; | ||
| 549 | |||
| 547 | if (write) { | 550 | if (write) { |
| 548 | if (!(vm_flags & VM_WRITE)) { | 551 | if (!(vm_flags & VM_WRITE)) { |
| 549 | if (!(gup_flags & FOLL_FORCE)) | 552 | if (!(gup_flags & FOLL_FORCE)) |
diff --git a/mm/internal.h b/mm/internal.h index 62d8c34e63d5..502d14189794 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
| @@ -168,9 +168,6 @@ extern void post_alloc_hook(struct page *page, unsigned int order, | |||
| 168 | gfp_t gfp_flags); | 168 | gfp_t gfp_flags); |
| 169 | extern int user_min_free_kbytes; | 169 | extern int user_min_free_kbytes; |
| 170 | 170 | ||
| 171 | extern void set_zone_contiguous(struct zone *zone); | ||
| 172 | extern void clear_zone_contiguous(struct zone *zone); | ||
| 173 | |||
| 174 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 171 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 175 | 172 | ||
| 176 | /* | 173 | /* |
| @@ -498,6 +495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | |||
| 498 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | 495 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 499 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | 496 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 500 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | 497 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 498 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | ||
| 501 | 499 | ||
| 502 | enum ttu_flags; | 500 | enum ttu_flags; |
| 503 | struct tlbflush_unmap_batch; | 501 | struct tlbflush_unmap_batch; |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index bc0e68f7dc75..f185455b3406 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5); | |||
| 792 | DEFINE_ASAN_SET_SHADOW(f8); | 792 | DEFINE_ASAN_SET_SHADOW(f8); |
| 793 | 793 | ||
| 794 | #ifdef CONFIG_MEMORY_HOTPLUG | 794 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 795 | static bool shadow_mapped(unsigned long addr) | ||
| 796 | { | ||
| 797 | pgd_t *pgd = pgd_offset_k(addr); | ||
| 798 | p4d_t *p4d; | ||
| 799 | pud_t *pud; | ||
| 800 | pmd_t *pmd; | ||
| 801 | pte_t *pte; | ||
| 802 | |||
| 803 | if (pgd_none(*pgd)) | ||
| 804 | return false; | ||
| 805 | p4d = p4d_offset(pgd, addr); | ||
| 806 | if (p4d_none(*p4d)) | ||
| 807 | return false; | ||
| 808 | pud = pud_offset(p4d, addr); | ||
| 809 | if (pud_none(*pud)) | ||
| 810 | return false; | ||
| 811 | |||
| 812 | /* | ||
| 813 | * We can't use pud_large() or pud_huge(), the first one is | ||
| 814 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse | ||
| 815 | * pud_bad(), if pud is bad then it's bad because it's huge. | ||
| 816 | */ | ||
| 817 | if (pud_bad(*pud)) | ||
| 818 | return true; | ||
| 819 | pmd = pmd_offset(pud, addr); | ||
| 820 | if (pmd_none(*pmd)) | ||
| 821 | return false; | ||
| 822 | |||
| 823 | if (pmd_bad(*pmd)) | ||
| 824 | return true; | ||
| 825 | pte = pte_offset_kernel(pmd, addr); | ||
| 826 | return !pte_none(*pte); | ||
| 827 | } | ||
| 828 | |||
| 795 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | 829 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, |
| 796 | unsigned long action, void *data) | 830 | unsigned long action, void *data) |
| 797 | { | 831 | { |
| @@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |||
| 813 | case MEM_GOING_ONLINE: { | 847 | case MEM_GOING_ONLINE: { |
| 814 | void *ret; | 848 | void *ret; |
| 815 | 849 | ||
| 850 | /* | ||
| 851 | * If shadow is mapped already than it must have been mapped | ||
| 852 | * during the boot. This could happen if we onlining previously | ||
| 853 | * offlined memory. | ||
| 854 | */ | ||
| 855 | if (shadow_mapped(shadow_start)) | ||
| 856 | return NOTIFY_OK; | ||
| 857 | |||
| 816 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | 858 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, |
| 817 | shadow_end, GFP_KERNEL, | 859 | shadow_end, GFP_KERNEL, |
| 818 | PAGE_KERNEL, VM_NO_GUARD, | 860 | PAGE_KERNEL, VM_NO_GUARD, |
| @@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |||
| 824 | kmemleak_ignore(ret); | 866 | kmemleak_ignore(ret); |
| 825 | return NOTIFY_OK; | 867 | return NOTIFY_OK; |
| 826 | } | 868 | } |
| 827 | case MEM_OFFLINE: | 869 | case MEM_CANCEL_ONLINE: |
| 828 | vfree((void *)shadow_start); | 870 | case MEM_OFFLINE: { |
| 871 | struct vm_struct *vm; | ||
| 872 | |||
| 873 | /* | ||
| 874 | * shadow_start was either mapped during boot by kasan_init() | ||
| 875 | * or during memory online by __vmalloc_node_range(). | ||
| 876 | * In the latter case we can use vfree() to free shadow. | ||
| 877 | * Non-NULL result of the find_vm_area() will tell us if | ||
| 878 | * that was the second case. | ||
| 879 | * | ||
| 880 | * Currently it's not possible to free shadow mapped | ||
| 881 | * during boot by kasan_init(). It's because the code | ||
| 882 | * to do that hasn't been written yet. So we'll just | ||
| 883 | * leak the memory. | ||
| 884 | */ | ||
| 885 | vm = find_vm_area((void *)shadow_start); | ||
| 886 | if (vm) | ||
| 887 | vfree((void *)shadow_start); | ||
| 888 | } | ||
| 829 | } | 889 | } |
| 830 | 890 | ||
| 831 | return NOTIFY_OK; | 891 | return NOTIFY_OK; |
| @@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void) | |||
| 838 | return 0; | 898 | return 0; |
| 839 | } | 899 | } |
| 840 | 900 | ||
| 841 | module_init(kasan_memhotplug_init); | 901 | core_initcall(kasan_memhotplug_init); |
| 842 | #endif | 902 | #endif |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index f74826cdceea..25982467800b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1158,7 +1158,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) | |||
| 1158 | * nodes have to go through register_node. | 1158 | * nodes have to go through register_node. |
| 1159 | * TODO clean up this mess. | 1159 | * TODO clean up this mess. |
| 1160 | */ | 1160 | */ |
| 1161 | ret = link_mem_sections(nid, start_pfn, nr_pages); | 1161 | ret = link_mem_sections(nid, start_pfn, nr_pages, false); |
| 1162 | register_fail: | 1162 | register_fail: |
| 1163 | /* | 1163 | /* |
| 1164 | * If sysfs file of new node can't create, cpu on the node | 1164 | * If sysfs file of new node can't create, cpu on the node |
| @@ -1327,7 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm, | |||
| 1327 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) | 1327 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) |
| 1328 | { | 1328 | { |
| 1329 | if (S_ISREG(inode->i_mode)) | 1329 | if (S_ISREG(inode->i_mode)) |
| 1330 | return inode->i_sb->s_maxbytes; | 1330 | return MAX_LFS_FILESIZE; |
| 1331 | 1331 | ||
| 1332 | if (S_ISBLK(inode->i_mode)) | 1332 | if (S_ISBLK(inode->i_mode)) |
| 1333 | return MAX_LFS_FILESIZE; | 1333 | return MAX_LFS_FILESIZE; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 905db9d7962f..22320ea27489 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1743,38 +1743,16 @@ void __init page_alloc_init_late(void) | |||
| 1743 | } | 1743 | } |
| 1744 | 1744 | ||
| 1745 | #ifdef CONFIG_CMA | 1745 | #ifdef CONFIG_CMA |
| 1746 | static void __init adjust_present_page_count(struct page *page, long count) | ||
| 1747 | { | ||
| 1748 | struct zone *zone = page_zone(page); | ||
| 1749 | |||
| 1750 | /* We don't need to hold a lock since it is boot-up process */ | ||
| 1751 | zone->present_pages += count; | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ | 1746 | /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ |
| 1755 | void __init init_cma_reserved_pageblock(struct page *page) | 1747 | void __init init_cma_reserved_pageblock(struct page *page) |
| 1756 | { | 1748 | { |
| 1757 | unsigned i = pageblock_nr_pages; | 1749 | unsigned i = pageblock_nr_pages; |
| 1758 | unsigned long pfn = page_to_pfn(page); | ||
| 1759 | struct page *p = page; | 1750 | struct page *p = page; |
| 1760 | int nid = page_to_nid(page); | ||
| 1761 | |||
| 1762 | /* | ||
| 1763 | * ZONE_MOVABLE will steal present pages from other zones by | ||
| 1764 | * changing page links so page_zone() is changed. Before that, | ||
| 1765 | * we need to adjust previous zone's page count first. | ||
| 1766 | */ | ||
| 1767 | adjust_present_page_count(page, -pageblock_nr_pages); | ||
| 1768 | 1751 | ||
| 1769 | do { | 1752 | do { |
| 1770 | __ClearPageReserved(p); | 1753 | __ClearPageReserved(p); |
| 1771 | set_page_count(p, 0); | 1754 | set_page_count(p, 0); |
| 1772 | 1755 | } while (++p, --i); | |
| 1773 | /* Steal pages from other zones */ | ||
| 1774 | set_page_links(p, ZONE_MOVABLE, nid, pfn); | ||
| 1775 | } while (++p, ++pfn, --i); | ||
| 1776 | |||
| 1777 | adjust_present_page_count(page, pageblock_nr_pages); | ||
| 1778 | 1756 | ||
| 1779 | set_pageblock_migratetype(page, MIGRATE_CMA); | 1757 | set_pageblock_migratetype(page, MIGRATE_CMA); |
| 1780 | 1758 | ||
| @@ -2889,7 +2867,7 @@ int __isolate_free_page(struct page *page, unsigned int order) | |||
| 2889 | * exists. | 2867 | * exists. |
| 2890 | */ | 2868 | */ |
| 2891 | watermark = min_wmark_pages(zone) + (1UL << order); | 2869 | watermark = min_wmark_pages(zone) + (1UL << order); |
| 2892 | if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) | 2870 | if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) |
| 2893 | return 0; | 2871 | return 0; |
| 2894 | 2872 | ||
| 2895 | __mod_zone_freepage_state(zone, -(1UL << order), mt); | 2873 | __mod_zone_freepage_state(zone, -(1UL << order), mt); |
| @@ -3165,6 +3143,12 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |||
| 3165 | } | 3143 | } |
| 3166 | 3144 | ||
| 3167 | 3145 | ||
| 3146 | #ifdef CONFIG_CMA | ||
| 3147 | /* If allocation can't use CMA areas don't use free CMA pages */ | ||
| 3148 | if (!(alloc_flags & ALLOC_CMA)) | ||
| 3149 | free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); | ||
| 3150 | #endif | ||
| 3151 | |||
| 3168 | /* | 3152 | /* |
| 3169 | * Check watermarks for an order-0 allocation request. If these | 3153 | * Check watermarks for an order-0 allocation request. If these |
| 3170 | * are not met, then a high-order request also cannot go ahead | 3154 | * are not met, then a high-order request also cannot go ahead |
| @@ -3191,8 +3175,10 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | |||
| 3191 | } | 3175 | } |
| 3192 | 3176 | ||
| 3193 | #ifdef CONFIG_CMA | 3177 | #ifdef CONFIG_CMA |
| 3194 | if (!list_empty(&area->free_list[MIGRATE_CMA])) | 3178 | if ((alloc_flags & ALLOC_CMA) && |
| 3179 | !list_empty(&area->free_list[MIGRATE_CMA])) { | ||
| 3195 | return true; | 3180 | return true; |
| 3181 | } | ||
| 3196 | #endif | 3182 | #endif |
| 3197 | if (alloc_harder && | 3183 | if (alloc_harder && |
| 3198 | !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | 3184 | !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) |
| @@ -3212,6 +3198,13 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | |||
| 3212 | unsigned long mark, int classzone_idx, unsigned int alloc_flags) | 3198 | unsigned long mark, int classzone_idx, unsigned int alloc_flags) |
| 3213 | { | 3199 | { |
| 3214 | long free_pages = zone_page_state(z, NR_FREE_PAGES); | 3200 | long free_pages = zone_page_state(z, NR_FREE_PAGES); |
| 3201 | long cma_pages = 0; | ||
| 3202 | |||
| 3203 | #ifdef CONFIG_CMA | ||
| 3204 | /* If allocation can't use CMA areas don't use free CMA pages */ | ||
| 3205 | if (!(alloc_flags & ALLOC_CMA)) | ||
| 3206 | cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); | ||
| 3207 | #endif | ||
| 3215 | 3208 | ||
| 3216 | /* | 3209 | /* |
| 3217 | * Fast check for order-0 only. If this fails then the reserves | 3210 | * Fast check for order-0 only. If this fails then the reserves |
| @@ -3220,7 +3213,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | |||
| 3220 | * the caller is !atomic then it'll uselessly search the free | 3213 | * the caller is !atomic then it'll uselessly search the free |
| 3221 | * list. That corner case is then slower but it is harmless. | 3214 | * list. That corner case is then slower but it is harmless. |
| 3222 | */ | 3215 | */ |
| 3223 | if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx]) | 3216 | if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) |
| 3224 | return true; | 3217 | return true; |
| 3225 | 3218 | ||
| 3226 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 3219 | return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, |
| @@ -3856,6 +3849,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
| 3856 | } else if (unlikely(rt_task(current)) && !in_interrupt()) | 3849 | } else if (unlikely(rt_task(current)) && !in_interrupt()) |
| 3857 | alloc_flags |= ALLOC_HARDER; | 3850 | alloc_flags |= ALLOC_HARDER; |
| 3858 | 3851 | ||
| 3852 | #ifdef CONFIG_CMA | ||
| 3853 | if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | ||
| 3854 | alloc_flags |= ALLOC_CMA; | ||
| 3855 | #endif | ||
| 3859 | return alloc_flags; | 3856 | return alloc_flags; |
| 3860 | } | 3857 | } |
| 3861 | 3858 | ||
| @@ -4322,6 +4319,9 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, | |||
| 4322 | if (should_fail_alloc_page(gfp_mask, order)) | 4319 | if (should_fail_alloc_page(gfp_mask, order)) |
| 4323 | return false; | 4320 | return false; |
| 4324 | 4321 | ||
| 4322 | if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) | ||
| 4323 | *alloc_flags |= ALLOC_CMA; | ||
| 4324 | |||
| 4325 | return true; | 4325 | return true; |
| 4326 | } | 4326 | } |
| 4327 | 4327 | ||
| @@ -6204,7 +6204,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
| 6204 | { | 6204 | { |
| 6205 | enum zone_type j; | 6205 | enum zone_type j; |
| 6206 | int nid = pgdat->node_id; | 6206 | int nid = pgdat->node_id; |
| 6207 | unsigned long node_end_pfn = 0; | ||
| 6208 | 6207 | ||
| 6209 | pgdat_resize_init(pgdat); | 6208 | pgdat_resize_init(pgdat); |
| 6210 | #ifdef CONFIG_NUMA_BALANCING | 6209 | #ifdef CONFIG_NUMA_BALANCING |
| @@ -6232,13 +6231,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
| 6232 | struct zone *zone = pgdat->node_zones + j; | 6231 | struct zone *zone = pgdat->node_zones + j; |
| 6233 | unsigned long size, realsize, freesize, memmap_pages; | 6232 | unsigned long size, realsize, freesize, memmap_pages; |
| 6234 | unsigned long zone_start_pfn = zone->zone_start_pfn; | 6233 | unsigned long zone_start_pfn = zone->zone_start_pfn; |
| 6235 | unsigned long movable_size = 0; | ||
| 6236 | 6234 | ||
| 6237 | size = zone->spanned_pages; | 6235 | size = zone->spanned_pages; |
| 6238 | realsize = freesize = zone->present_pages; | 6236 | realsize = freesize = zone->present_pages; |
| 6239 | if (zone_end_pfn(zone) > node_end_pfn) | ||
| 6240 | node_end_pfn = zone_end_pfn(zone); | ||
| 6241 | |||
| 6242 | 6237 | ||
| 6243 | /* | 6238 | /* |
| 6244 | * Adjust freesize so that it accounts for how much memory | 6239 | * Adjust freesize so that it accounts for how much memory |
| @@ -6287,30 +6282,12 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) | |||
| 6287 | zone_seqlock_init(zone); | 6282 | zone_seqlock_init(zone); |
| 6288 | zone_pcp_init(zone); | 6283 | zone_pcp_init(zone); |
| 6289 | 6284 | ||
| 6290 | /* | 6285 | if (!size) |
| 6291 | * The size of the CMA area is unknown now so we need to | ||
| 6292 | * prepare the memory for the usemap at maximum. | ||
| 6293 | */ | ||
| 6294 | if (IS_ENABLED(CONFIG_CMA) && j == ZONE_MOVABLE && | ||
| 6295 | pgdat->node_spanned_pages) { | ||
| 6296 | movable_size = node_end_pfn - pgdat->node_start_pfn; | ||
| 6297 | } | ||
| 6298 | |||
| 6299 | if (!size && !movable_size) | ||
| 6300 | continue; | 6286 | continue; |
| 6301 | 6287 | ||
| 6302 | set_pageblock_order(); | 6288 | set_pageblock_order(); |
| 6303 | if (movable_size) { | 6289 | setup_usemap(pgdat, zone, zone_start_pfn, size); |
| 6304 | zone->zone_start_pfn = pgdat->node_start_pfn; | 6290 | init_currently_empty_zone(zone, zone_start_pfn, size); |
| 6305 | zone->spanned_pages = movable_size; | ||
| 6306 | setup_usemap(pgdat, zone, | ||
| 6307 | pgdat->node_start_pfn, movable_size); | ||
| 6308 | init_currently_empty_zone(zone, | ||
| 6309 | pgdat->node_start_pfn, movable_size); | ||
| 6310 | } else { | ||
| 6311 | setup_usemap(pgdat, zone, zone_start_pfn, size); | ||
| 6312 | init_currently_empty_zone(zone, zone_start_pfn, size); | ||
| 6313 | } | ||
| 6314 | memmap_init(size, nid, j, zone_start_pfn); | 6291 | memmap_init(size, nid, j, zone_start_pfn); |
| 6315 | } | 6292 | } |
| 6316 | } | 6293 | } |
| @@ -7621,11 +7598,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7621 | unsigned long pfn, iter, found; | 7598 | unsigned long pfn, iter, found; |
| 7622 | 7599 | ||
| 7623 | /* | 7600 | /* |
| 7624 | * For avoiding noise data, lru_add_drain_all() should be called | 7601 | * TODO we could make this much more efficient by not checking every |
| 7625 | * If ZONE_MOVABLE, the zone never contains unmovable pages | 7602 | * page in the range if we know all of them are in MOVABLE_ZONE and |
| 7603 | * that the movable zone guarantees that pages are migratable but | ||
| 7604 | * the later is not the case right now unfortunatelly. E.g. movablecore | ||
| 7605 | * can still lead to having bootmem allocations in zone_movable. | ||
| 7626 | */ | 7606 | */ |
| 7627 | if (zone_idx(zone) == ZONE_MOVABLE) | ||
| 7628 | return false; | ||
| 7629 | 7607 | ||
| 7630 | /* | 7608 | /* |
| 7631 | * CMA allocations (alloc_contig_range) really need to mark isolate | 7609 | * CMA allocations (alloc_contig_range) really need to mark isolate |
| @@ -7646,7 +7624,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7646 | page = pfn_to_page(check); | 7624 | page = pfn_to_page(check); |
| 7647 | 7625 | ||
| 7648 | if (PageReserved(page)) | 7626 | if (PageReserved(page)) |
| 7649 | return true; | 7627 | goto unmovable; |
| 7650 | 7628 | ||
| 7651 | /* | 7629 | /* |
| 7652 | * Hugepages are not in LRU lists, but they're movable. | 7630 | * Hugepages are not in LRU lists, but they're movable. |
| @@ -7696,9 +7674,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, | |||
| 7696 | * page at boot. | 7674 | * page at boot. |
| 7697 | */ | 7675 | */ |
| 7698 | if (found > count) | 7676 | if (found > count) |
| 7699 | return true; | 7677 | goto unmovable; |
| 7700 | } | 7678 | } |
| 7701 | return false; | 7679 | return false; |
| 7680 | unmovable: | ||
| 7681 | WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); | ||
| 7682 | return true; | ||
| 7702 | } | 7683 | } |
| 7703 | 7684 | ||
| 7704 | bool is_pageblock_removable_nolock(struct page *page) | 7685 | bool is_pageblock_removable_nolock(struct page *page) |
| @@ -7951,7 +7932,7 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages) | |||
| 7951 | } | 7932 | } |
| 7952 | #endif | 7933 | #endif |
| 7953 | 7934 | ||
| 7954 | #if defined CONFIG_MEMORY_HOTPLUG || defined CONFIG_CMA | 7935 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 7955 | /* | 7936 | /* |
| 7956 | * The zone indicated has a new number of managed_pages; batch sizes and percpu | 7937 | * The zone indicated has a new number of managed_pages; batch sizes and percpu |
| 7957 | * page high values need to be recalulated. | 7938 | * page high values need to be recalulated. |
diff --git a/mm/swapfile.c b/mm/swapfile.c index cc2cf04d9018..78a015fcec3b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -3112,6 +3112,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 3112 | unsigned long *frontswap_map = NULL; | 3112 | unsigned long *frontswap_map = NULL; |
| 3113 | struct page *page = NULL; | 3113 | struct page *page = NULL; |
| 3114 | struct inode *inode = NULL; | 3114 | struct inode *inode = NULL; |
| 3115 | bool inced_nr_rotate_swap = false; | ||
| 3115 | 3116 | ||
| 3116 | if (swap_flags & ~SWAP_FLAGS_VALID) | 3117 | if (swap_flags & ~SWAP_FLAGS_VALID) |
| 3117 | return -EINVAL; | 3118 | return -EINVAL; |
| @@ -3215,8 +3216,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 3215 | cluster = per_cpu_ptr(p->percpu_cluster, cpu); | 3216 | cluster = per_cpu_ptr(p->percpu_cluster, cpu); |
| 3216 | cluster_set_null(&cluster->index); | 3217 | cluster_set_null(&cluster->index); |
| 3217 | } | 3218 | } |
| 3218 | } else | 3219 | } else { |
| 3219 | atomic_inc(&nr_rotate_swap); | 3220 | atomic_inc(&nr_rotate_swap); |
| 3221 | inced_nr_rotate_swap = true; | ||
| 3222 | } | ||
| 3220 | 3223 | ||
| 3221 | error = swap_cgroup_swapon(p->type, maxpages); | 3224 | error = swap_cgroup_swapon(p->type, maxpages); |
| 3222 | if (error) | 3225 | if (error) |
| @@ -3307,6 +3310,8 @@ bad_swap: | |||
| 3307 | vfree(swap_map); | 3310 | vfree(swap_map); |
| 3308 | kvfree(cluster_info); | 3311 | kvfree(cluster_info); |
| 3309 | kvfree(frontswap_map); | 3312 | kvfree(frontswap_map); |
| 3313 | if (inced_nr_rotate_swap) | ||
| 3314 | atomic_dec(&nr_rotate_swap); | ||
| 3310 | if (swap_file) { | 3315 | if (swap_file) { |
| 3311 | if (inode && S_ISREG(inode->i_mode)) { | 3316 | if (inode && S_ISREG(inode->i_mode)) { |
| 3312 | inode_unlock(inode); | 3317 | inode_unlock(inode); |
diff --git a/net/9p/Kconfig b/net/9p/Kconfig index e6014e0e51f7..46c39f7da444 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig | |||
| @@ -32,7 +32,7 @@ config NET_9P_XEN | |||
| 32 | 32 | ||
| 33 | 33 | ||
| 34 | config NET_9P_RDMA | 34 | config NET_9P_RDMA |
| 35 | depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS | 35 | depends on INET && INFINIBAND_ADDR_TRANS |
| 36 | tristate "9P RDMA Transport (Experimental)" | 36 | tristate "9P RDMA Transport (Experimental)" |
| 37 | help | 37 | help |
| 38 | This builds support for an RDMA transport. | 38 | This builds support for an RDMA transport. |
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index a11d3d89f012..a35f597e8c8b 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c | |||
| @@ -1536,7 +1536,7 @@ out: | |||
| 1536 | 1536 | ||
| 1537 | if (!ret && primary_if) | 1537 | if (!ret && primary_if) |
| 1538 | *primary_if = hard_iface; | 1538 | *primary_if = hard_iface; |
| 1539 | else | 1539 | else if (hard_iface) |
| 1540 | batadv_hardif_put(hard_iface); | 1540 | batadv_hardif_put(hard_iface); |
| 1541 | 1541 | ||
| 1542 | return ret; | 1542 | return ret; |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 0225616d5771..3986551397ca 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, | |||
| 862 | struct batadv_orig_node_vlan *vlan; | 862 | struct batadv_orig_node_vlan *vlan; |
| 863 | u8 *tt_change_ptr; | 863 | u8 *tt_change_ptr; |
| 864 | 864 | ||
| 865 | rcu_read_lock(); | 865 | spin_lock_bh(&orig_node->vlan_list_lock); |
| 866 | hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { | 866 | hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { |
| 867 | num_vlan++; | 867 | num_vlan++; |
| 868 | num_entries += atomic_read(&vlan->tt.num_entries); | 868 | num_entries += atomic_read(&vlan->tt.num_entries); |
| @@ -900,7 +900,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, | |||
| 900 | *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; | 900 | *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; |
| 901 | 901 | ||
| 902 | out: | 902 | out: |
| 903 | rcu_read_unlock(); | 903 | spin_unlock_bh(&orig_node->vlan_list_lock); |
| 904 | return tvlv_len; | 904 | return tvlv_len; |
| 905 | } | 905 | } |
| 906 | 906 | ||
| @@ -931,15 +931,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, | |||
| 931 | struct batadv_tvlv_tt_vlan_data *tt_vlan; | 931 | struct batadv_tvlv_tt_vlan_data *tt_vlan; |
| 932 | struct batadv_softif_vlan *vlan; | 932 | struct batadv_softif_vlan *vlan; |
| 933 | u16 num_vlan = 0; | 933 | u16 num_vlan = 0; |
| 934 | u16 num_entries = 0; | 934 | u16 vlan_entries = 0; |
| 935 | u16 total_entries = 0; | ||
| 935 | u16 tvlv_len; | 936 | u16 tvlv_len; |
| 936 | u8 *tt_change_ptr; | 937 | u8 *tt_change_ptr; |
| 937 | int change_offset; | 938 | int change_offset; |
| 938 | 939 | ||
| 939 | rcu_read_lock(); | 940 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); |
| 940 | hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { | 941 | hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { |
| 942 | vlan_entries = atomic_read(&vlan->tt.num_entries); | ||
| 943 | if (vlan_entries < 1) | ||
| 944 | continue; | ||
| 945 | |||
| 941 | num_vlan++; | 946 | num_vlan++; |
| 942 | num_entries += atomic_read(&vlan->tt.num_entries); | 947 | total_entries += vlan_entries; |
| 943 | } | 948 | } |
| 944 | 949 | ||
| 945 | change_offset = sizeof(**tt_data); | 950 | change_offset = sizeof(**tt_data); |
| @@ -947,7 +952,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, | |||
| 947 | 952 | ||
| 948 | /* if tt_len is negative, allocate the space needed by the full table */ | 953 | /* if tt_len is negative, allocate the space needed by the full table */ |
| 949 | if (*tt_len < 0) | 954 | if (*tt_len < 0) |
| 950 | *tt_len = batadv_tt_len(num_entries); | 955 | *tt_len = batadv_tt_len(total_entries); |
| 951 | 956 | ||
| 952 | tvlv_len = *tt_len; | 957 | tvlv_len = *tt_len; |
| 953 | tvlv_len += change_offset; | 958 | tvlv_len += change_offset; |
| @@ -964,6 +969,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, | |||
| 964 | 969 | ||
| 965 | tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); | 970 | tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); |
| 966 | hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { | 971 | hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { |
| 972 | vlan_entries = atomic_read(&vlan->tt.num_entries); | ||
| 973 | if (vlan_entries < 1) | ||
| 974 | continue; | ||
| 975 | |||
| 967 | tt_vlan->vid = htons(vlan->vid); | 976 | tt_vlan->vid = htons(vlan->vid); |
| 968 | tt_vlan->crc = htonl(vlan->tt.crc); | 977 | tt_vlan->crc = htonl(vlan->tt.crc); |
| 969 | 978 | ||
| @@ -974,7 +983,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, | |||
| 974 | *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; | 983 | *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; |
| 975 | 984 | ||
| 976 | out: | 985 | out: |
| 977 | rcu_read_unlock(); | 986 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
| 978 | return tvlv_len; | 987 | return tvlv_len; |
| 979 | } | 988 | } |
| 980 | 989 | ||
| @@ -1538,6 +1547,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, | |||
| 1538 | * handled by a given originator | 1547 | * handled by a given originator |
| 1539 | * @entry: the TT global entry to check | 1548 | * @entry: the TT global entry to check |
| 1540 | * @orig_node: the originator to search in the list | 1549 | * @orig_node: the originator to search in the list |
| 1550 | * @flags: a pointer to store TT flags for the given @entry received | ||
| 1551 | * from @orig_node | ||
| 1541 | * | 1552 | * |
| 1542 | * find out if an orig_node is already in the list of a tt_global_entry. | 1553 | * find out if an orig_node is already in the list of a tt_global_entry. |
| 1543 | * | 1554 | * |
| @@ -1545,7 +1556,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, | |||
| 1545 | */ | 1556 | */ |
| 1546 | static bool | 1557 | static bool |
| 1547 | batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, | 1558 | batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, |
| 1548 | const struct batadv_orig_node *orig_node) | 1559 | const struct batadv_orig_node *orig_node, |
| 1560 | u8 *flags) | ||
| 1549 | { | 1561 | { |
| 1550 | struct batadv_tt_orig_list_entry *orig_entry; | 1562 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1551 | bool found = false; | 1563 | bool found = false; |
| @@ -1553,6 +1565,10 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, | |||
| 1553 | orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); | 1565 | orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); |
| 1554 | if (orig_entry) { | 1566 | if (orig_entry) { |
| 1555 | found = true; | 1567 | found = true; |
| 1568 | |||
| 1569 | if (flags) | ||
| 1570 | *flags = orig_entry->flags; | ||
| 1571 | |||
| 1556 | batadv_tt_orig_list_entry_put(orig_entry); | 1572 | batadv_tt_orig_list_entry_put(orig_entry); |
| 1557 | } | 1573 | } |
| 1558 | 1574 | ||
| @@ -1731,7 +1747,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1731 | if (!(common->flags & BATADV_TT_CLIENT_TEMP)) | 1747 | if (!(common->flags & BATADV_TT_CLIENT_TEMP)) |
| 1732 | goto out; | 1748 | goto out; |
| 1733 | if (batadv_tt_global_entry_has_orig(tt_global_entry, | 1749 | if (batadv_tt_global_entry_has_orig(tt_global_entry, |
| 1734 | orig_node)) | 1750 | orig_node, NULL)) |
| 1735 | goto out_remove; | 1751 | goto out_remove; |
| 1736 | batadv_tt_global_del_orig_list(tt_global_entry); | 1752 | batadv_tt_global_del_orig_list(tt_global_entry); |
| 1737 | goto add_orig_entry; | 1753 | goto add_orig_entry; |
| @@ -2880,23 +2896,46 @@ unlock: | |||
| 2880 | } | 2896 | } |
| 2881 | 2897 | ||
| 2882 | /** | 2898 | /** |
| 2883 | * batadv_tt_local_valid() - verify that given tt entry is a valid one | 2899 | * batadv_tt_local_valid() - verify local tt entry and get flags |
| 2884 | * @entry_ptr: to be checked local tt entry | 2900 | * @entry_ptr: to be checked local tt entry |
| 2885 | * @data_ptr: not used but definition required to satisfy the callback prototype | 2901 | * @data_ptr: not used but definition required to satisfy the callback prototype |
| 2902 | * @flags: a pointer to store TT flags for this client to | ||
| 2903 | * | ||
| 2904 | * Checks the validity of the given local TT entry. If it is, then the provided | ||
| 2905 | * flags pointer is updated. | ||
| 2886 | * | 2906 | * |
| 2887 | * Return: true if the entry is a valid, false otherwise. | 2907 | * Return: true if the entry is a valid, false otherwise. |
| 2888 | */ | 2908 | */ |
| 2889 | static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr) | 2909 | static bool batadv_tt_local_valid(const void *entry_ptr, |
| 2910 | const void *data_ptr, | ||
| 2911 | u8 *flags) | ||
| 2890 | { | 2912 | { |
| 2891 | const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; | 2913 | const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; |
| 2892 | 2914 | ||
| 2893 | if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) | 2915 | if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) |
| 2894 | return false; | 2916 | return false; |
| 2917 | |||
| 2918 | if (flags) | ||
| 2919 | *flags = tt_common_entry->flags; | ||
| 2920 | |||
| 2895 | return true; | 2921 | return true; |
| 2896 | } | 2922 | } |
| 2897 | 2923 | ||
| 2924 | /** | ||
| 2925 | * batadv_tt_global_valid() - verify global tt entry and get flags | ||
| 2926 | * @entry_ptr: to be checked global tt entry | ||
| 2927 | * @data_ptr: an orig_node object (may be NULL) | ||
| 2928 | * @flags: a pointer to store TT flags for this client to | ||
| 2929 | * | ||
| 2930 | * Checks the validity of the given global TT entry. If it is, then the provided | ||
| 2931 | * flags pointer is updated either with the common (summed) TT flags if data_ptr | ||
| 2932 | * is NULL or the specific, per originator TT flags otherwise. | ||
| 2933 | * | ||
| 2934 | * Return: true if the entry is a valid, false otherwise. | ||
| 2935 | */ | ||
| 2898 | static bool batadv_tt_global_valid(const void *entry_ptr, | 2936 | static bool batadv_tt_global_valid(const void *entry_ptr, |
| 2899 | const void *data_ptr) | 2937 | const void *data_ptr, |
| 2938 | u8 *flags) | ||
| 2900 | { | 2939 | { |
| 2901 | const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; | 2940 | const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; |
| 2902 | const struct batadv_tt_global_entry *tt_global_entry; | 2941 | const struct batadv_tt_global_entry *tt_global_entry; |
| @@ -2910,7 +2949,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr, | |||
| 2910 | struct batadv_tt_global_entry, | 2949 | struct batadv_tt_global_entry, |
| 2911 | common); | 2950 | common); |
| 2912 | 2951 | ||
| 2913 | return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node); | 2952 | return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node, |
| 2953 | flags); | ||
| 2914 | } | 2954 | } |
| 2915 | 2955 | ||
| 2916 | /** | 2956 | /** |
| @@ -2920,25 +2960,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr, | |||
| 2920 | * @hash: hash table containing the tt entries | 2960 | * @hash: hash table containing the tt entries |
| 2921 | * @tt_len: expected tvlv tt data buffer length in number of bytes | 2961 | * @tt_len: expected tvlv tt data buffer length in number of bytes |
| 2922 | * @tvlv_buff: pointer to the buffer to fill with the TT data | 2962 | * @tvlv_buff: pointer to the buffer to fill with the TT data |
| 2923 | * @valid_cb: function to filter tt change entries | 2963 | * @valid_cb: function to filter tt change entries and to return TT flags |
| 2924 | * @cb_data: data passed to the filter function as argument | 2964 | * @cb_data: data passed to the filter function as argument |
| 2965 | * | ||
| 2966 | * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb | ||
| 2967 | * is not provided then this becomes a no-op. | ||
| 2925 | */ | 2968 | */ |
| 2926 | static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, | 2969 | static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, |
| 2927 | struct batadv_hashtable *hash, | 2970 | struct batadv_hashtable *hash, |
| 2928 | void *tvlv_buff, u16 tt_len, | 2971 | void *tvlv_buff, u16 tt_len, |
| 2929 | bool (*valid_cb)(const void *, | 2972 | bool (*valid_cb)(const void *, |
| 2930 | const void *), | 2973 | const void *, |
| 2974 | u8 *flags), | ||
| 2931 | void *cb_data) | 2975 | void *cb_data) |
| 2932 | { | 2976 | { |
| 2933 | struct batadv_tt_common_entry *tt_common_entry; | 2977 | struct batadv_tt_common_entry *tt_common_entry; |
| 2934 | struct batadv_tvlv_tt_change *tt_change; | 2978 | struct batadv_tvlv_tt_change *tt_change; |
| 2935 | struct hlist_head *head; | 2979 | struct hlist_head *head; |
| 2936 | u16 tt_tot, tt_num_entries = 0; | 2980 | u16 tt_tot, tt_num_entries = 0; |
| 2981 | u8 flags; | ||
| 2982 | bool ret; | ||
| 2937 | u32 i; | 2983 | u32 i; |
| 2938 | 2984 | ||
| 2939 | tt_tot = batadv_tt_entries(tt_len); | 2985 | tt_tot = batadv_tt_entries(tt_len); |
| 2940 | tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff; | 2986 | tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff; |
| 2941 | 2987 | ||
| 2988 | if (!valid_cb) | ||
| 2989 | return; | ||
| 2990 | |||
| 2942 | rcu_read_lock(); | 2991 | rcu_read_lock(); |
| 2943 | for (i = 0; i < hash->size; i++) { | 2992 | for (i = 0; i < hash->size; i++) { |
| 2944 | head = &hash->table[i]; | 2993 | head = &hash->table[i]; |
| @@ -2948,11 +2997,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, | |||
| 2948 | if (tt_tot == tt_num_entries) | 2997 | if (tt_tot == tt_num_entries) |
| 2949 | break; | 2998 | break; |
| 2950 | 2999 | ||
| 2951 | if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) | 3000 | ret = valid_cb(tt_common_entry, cb_data, &flags); |
| 3001 | if (!ret) | ||
| 2952 | continue; | 3002 | continue; |
| 2953 | 3003 | ||
| 2954 | ether_addr_copy(tt_change->addr, tt_common_entry->addr); | 3004 | ether_addr_copy(tt_change->addr, tt_common_entry->addr); |
| 2955 | tt_change->flags = tt_common_entry->flags; | 3005 | tt_change->flags = flags; |
| 2956 | tt_change->vid = htons(tt_common_entry->vid); | 3006 | tt_change->vid = htons(tt_common_entry->vid); |
| 2957 | memset(tt_change->reserved, 0, | 3007 | memset(tt_change->reserved, 0, |
| 2958 | sizeof(tt_change->reserved)); | 3008 | sizeof(tt_change->reserved)); |
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c index 47ba98db145d..46c1fe7637ea 100644 --- a/net/bridge/netfilter/ebt_stp.c +++ b/net/bridge/netfilter/ebt_stp.c | |||
| @@ -161,8 +161,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par) | |||
| 161 | /* Make sure the match only receives stp frames */ | 161 | /* Make sure the match only receives stp frames */ |
| 162 | if (!par->nft_compat && | 162 | if (!par->nft_compat && |
| 163 | (!ether_addr_equal(e->destmac, eth_stp_addr) || | 163 | (!ether_addr_equal(e->destmac, eth_stp_addr) || |
| 164 | !is_broadcast_ether_addr(e->destmsk) || | 164 | !(e->bitmask & EBT_DESTMAC) || |
| 165 | !(e->bitmask & EBT_DESTMAC))) | 165 | !is_broadcast_ether_addr(e->destmsk))) |
| 166 | return -EINVAL; | 166 | return -EINVAL; |
| 167 | 167 | ||
| 168 | return 0; | 168 | return 0; |
diff --git a/net/core/dev.c b/net/core/dev.c index af0558b00c6c..2af787e8b130 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -2124,7 +2124,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev, | |||
| 2124 | int i, j; | 2124 | int i, j; |
| 2125 | 2125 | ||
| 2126 | for (i = count, j = offset; i--; j++) { | 2126 | for (i = count, j = offset; i--; j++) { |
| 2127 | if (!remove_xps_queue(dev_maps, cpu, j)) | 2127 | if (!remove_xps_queue(dev_maps, tci, j)) |
| 2128 | break; | 2128 | break; |
| 2129 | } | 2129 | } |
| 2130 | 2130 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index e77c30ca491d..201ff36b17a8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -481,11 +481,18 @@ do_pass: | |||
| 481 | 481 | ||
| 482 | #define BPF_EMIT_JMP \ | 482 | #define BPF_EMIT_JMP \ |
| 483 | do { \ | 483 | do { \ |
| 484 | const s32 off_min = S16_MIN, off_max = S16_MAX; \ | ||
| 485 | s32 off; \ | ||
| 486 | \ | ||
| 484 | if (target >= len || target < 0) \ | 487 | if (target >= len || target < 0) \ |
| 485 | goto err; \ | 488 | goto err; \ |
| 486 | insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ | 489 | off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ |
| 487 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ | 490 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ |
| 488 | insn->off -= insn - tmp_insns; \ | 491 | off -= insn - tmp_insns; \ |
| 492 | /* Reject anything not fitting into insn->off. */ \ | ||
| 493 | if (off < off_min || off > off_max) \ | ||
| 494 | goto err; \ | ||
| 495 | insn->off = off; \ | ||
| 489 | } while (0) | 496 | } while (0) |
| 490 | 497 | ||
| 491 | case BPF_JMP | BPF_JA: | 498 | case BPF_JMP | BPF_JA: |
diff --git a/net/core/sock.c b/net/core/sock.c index 6444525f610c..3b6d02854e57 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -1606,7 +1606,7 @@ static void __sk_free(struct sock *sk) | |||
| 1606 | if (likely(sk->sk_net_refcnt)) | 1606 | if (likely(sk->sk_net_refcnt)) |
| 1607 | sock_inuse_add(sock_net(sk), -1); | 1607 | sock_inuse_add(sock_net(sk), -1); |
| 1608 | 1608 | ||
| 1609 | if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) | 1609 | if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) |
| 1610 | sock_diag_broadcast_destroy(sk); | 1610 | sock_diag_broadcast_destroy(sk); |
| 1611 | else | 1611 | else |
| 1612 | sk_destruct(sk); | 1612 | sk_destruct(sk); |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 84cd4e3fd01b..0d56e36a6db7 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
| @@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags) | |||
| 283 | 283 | ||
| 284 | dccp_clear_xmit_timers(sk); | 284 | dccp_clear_xmit_timers(sk); |
| 285 | ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); | 285 | ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); |
| 286 | ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); | ||
| 287 | dp->dccps_hc_rx_ccid = NULL; | 286 | dp->dccps_hc_rx_ccid = NULL; |
| 288 | dp->dccps_hc_tx_ccid = NULL; | ||
| 289 | 287 | ||
| 290 | __skb_queue_purge(&sk->sk_receive_queue); | 288 | __skb_queue_purge(&sk->sk_receive_queue); |
| 291 | __skb_queue_purge(&sk->sk_write_queue); | 289 | __skb_queue_purge(&sk->sk_write_queue); |
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index adf50fbc4c13..47725250b4ca 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
| @@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst) | |||
| 258 | static int dsa_port_setup(struct dsa_port *dp) | 258 | static int dsa_port_setup(struct dsa_port *dp) |
| 259 | { | 259 | { |
| 260 | struct dsa_switch *ds = dp->ds; | 260 | struct dsa_switch *ds = dp->ds; |
| 261 | int err; | 261 | int err = 0; |
| 262 | 262 | ||
| 263 | memset(&dp->devlink_port, 0, sizeof(dp->devlink_port)); | 263 | memset(&dp->devlink_port, 0, sizeof(dp->devlink_port)); |
| 264 | 264 | ||
| 265 | err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index); | 265 | if (dp->type != DSA_PORT_TYPE_UNUSED) |
| 266 | err = devlink_port_register(ds->devlink, &dp->devlink_port, | ||
| 267 | dp->index); | ||
| 266 | if (err) | 268 | if (err) |
| 267 | return err; | 269 | return err; |
| 268 | 270 | ||
| @@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp) | |||
| 293 | 295 | ||
| 294 | static void dsa_port_teardown(struct dsa_port *dp) | 296 | static void dsa_port_teardown(struct dsa_port *dp) |
| 295 | { | 297 | { |
| 296 | devlink_port_unregister(&dp->devlink_port); | 298 | if (dp->type != DSA_PORT_TYPE_UNUSED) |
| 299 | devlink_port_unregister(&dp->devlink_port); | ||
| 297 | 300 | ||
| 298 | switch (dp->type) { | 301 | switch (dp->type) { |
| 299 | case DSA_PORT_TYPE_UNUSED: | 302 | case DSA_PORT_TYPE_UNUSED: |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index f05afaf3235c..e66172aaf241 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -326,10 +326,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
| 326 | u8 tos, int oif, struct net_device *dev, | 326 | u8 tos, int oif, struct net_device *dev, |
| 327 | int rpf, struct in_device *idev, u32 *itag) | 327 | int rpf, struct in_device *idev, u32 *itag) |
| 328 | { | 328 | { |
| 329 | struct net *net = dev_net(dev); | ||
| 330 | struct flow_keys flkeys; | ||
| 329 | int ret, no_addr; | 331 | int ret, no_addr; |
| 330 | struct fib_result res; | 332 | struct fib_result res; |
| 331 | struct flowi4 fl4; | 333 | struct flowi4 fl4; |
| 332 | struct net *net = dev_net(dev); | ||
| 333 | bool dev_match; | 334 | bool dev_match; |
| 334 | 335 | ||
| 335 | fl4.flowi4_oif = 0; | 336 | fl4.flowi4_oif = 0; |
| @@ -347,6 +348,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, | |||
| 347 | no_addr = idev->ifa_list == NULL; | 348 | no_addr = idev->ifa_list == NULL; |
| 348 | 349 | ||
| 349 | fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; | 350 | fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; |
| 351 | if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) { | ||
| 352 | fl4.flowi4_proto = 0; | ||
| 353 | fl4.fl4_sport = 0; | ||
| 354 | fl4.fl4_dport = 0; | ||
| 355 | } | ||
| 350 | 356 | ||
| 351 | trace_fib_validate_source(dev, &fl4); | 357 | trace_fib_validate_source(dev, &fl4); |
| 352 | 358 | ||
| @@ -643,6 +649,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = { | |||
| 643 | [RTA_ENCAP] = { .type = NLA_NESTED }, | 649 | [RTA_ENCAP] = { .type = NLA_NESTED }, |
| 644 | [RTA_UID] = { .type = NLA_U32 }, | 650 | [RTA_UID] = { .type = NLA_U32 }, |
| 645 | [RTA_MARK] = { .type = NLA_U32 }, | 651 | [RTA_MARK] = { .type = NLA_U32 }, |
| 652 | [RTA_TABLE] = { .type = NLA_U32 }, | ||
| 646 | }; | 653 | }; |
| 647 | 654 | ||
| 648 | static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, | 655 | static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9c169bb2444d..f200b304f76c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -722,10 +722,12 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
| 722 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), | 722 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), |
| 723 | tunnel->index, | 723 | tunnel->index, |
| 724 | truncate, true); | 724 | truncate, true); |
| 725 | else | 725 | else if (tunnel->erspan_ver == 2) |
| 726 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), | 726 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), |
| 727 | tunnel->dir, tunnel->hwid, | 727 | tunnel->dir, tunnel->hwid, |
| 728 | truncate, true); | 728 | truncate, true); |
| 729 | else | ||
| 730 | goto free_skb; | ||
| 729 | 731 | ||
| 730 | tunnel->parms.o_flags &= ~TUNNEL_KEY; | 732 | tunnel->parms.o_flags &= ~TUNNEL_KEY; |
| 731 | __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); | 733 | __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 83c73bab2c3d..d54abc097800 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -1045,7 +1045,8 @@ alloc_new_skb: | |||
| 1045 | if (copy > length) | 1045 | if (copy > length) |
| 1046 | copy = length; | 1046 | copy = length; |
| 1047 | 1047 | ||
| 1048 | if (!(rt->dst.dev->features&NETIF_F_SG)) { | 1048 | if (!(rt->dst.dev->features&NETIF_F_SG) && |
| 1049 | skb_tailroom(skb) >= copy) { | ||
| 1049 | unsigned int off; | 1050 | unsigned int off; |
| 1050 | 1051 | ||
| 1051 | off = skb->len; | 1052 | off = skb->len; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5ad2d8ed3a3f..57bbb060faaf 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
| @@ -505,8 +505,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
| 505 | int err; | 505 | int err; |
| 506 | int copied; | 506 | int copied; |
| 507 | 507 | ||
| 508 | WARN_ON_ONCE(sk->sk_family == AF_INET6); | ||
| 509 | |||
| 510 | err = -EAGAIN; | 508 | err = -EAGAIN; |
| 511 | skb = sock_dequeue_err_skb(sk); | 509 | skb = sock_dequeue_err_skb(sk); |
| 512 | if (!skb) | 510 | if (!skb) |
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index 4fe97723b53f..30221701614c 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c | |||
| @@ -43,7 +43,10 @@ mr_table_alloc(struct net *net, u32 id, | |||
| 43 | write_pnet(&mrt->net, net); | 43 | write_pnet(&mrt->net, net); |
| 44 | 44 | ||
| 45 | mrt->ops = *ops; | 45 | mrt->ops = *ops; |
| 46 | rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params); | 46 | if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) { |
| 47 | kfree(mrt); | ||
| 48 | return NULL; | ||
| 49 | } | ||
| 47 | INIT_LIST_HEAD(&mrt->mfc_cache_list); | 50 | INIT_LIST_HEAD(&mrt->mfc_cache_list); |
| 48 | INIT_LIST_HEAD(&mrt->mfc_unres_queue); | 51 | INIT_LIST_HEAD(&mrt->mfc_unres_queue); |
| 49 | 52 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 44b308d93ec2..e85f35b89c49 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
| 35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 35 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
| 36 | MODULE_DESCRIPTION("IPv4 packet filter"); | 36 | MODULE_DESCRIPTION("IPv4 packet filter"); |
| 37 | MODULE_ALIAS("ipt_icmp"); | ||
| 37 | 38 | ||
| 38 | void *ipt_alloc_initial_table(const struct xt_table *info) | 39 | void *ipt_alloc_initial_table(const struct xt_table *info) |
| 39 | { | 40 | { |
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index fd01f13c896a..12843c9ef142 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
| @@ -89,10 +89,10 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 89 | return true ^ invert; | 89 | return true ^ invert; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | memset(&flow, 0, sizeof(flow)); | ||
| 92 | flow.flowi4_iif = LOOPBACK_IFINDEX; | 93 | flow.flowi4_iif = LOOPBACK_IFINDEX; |
| 93 | flow.daddr = iph->saddr; | 94 | flow.daddr = iph->saddr; |
| 94 | flow.saddr = rpfilter_get_saddr(iph->daddr); | 95 | flow.saddr = rpfilter_get_saddr(iph->daddr); |
| 95 | flow.flowi4_oif = 0; | ||
| 96 | flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; | 96 | flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; |
| 97 | flow.flowi4_tos = RT_TOS(iph->tos); | 97 | flow.flowi4_tos = RT_TOS(iph->tos); |
| 98 | flow.flowi4_scope = RT_SCOPE_UNIVERSE; | 98 | flow.flowi4_scope = RT_SCOPE_UNIVERSE; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 29268efad247..2cfa1b518f8d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1961,8 +1961,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
| 1961 | fl4.saddr = saddr; | 1961 | fl4.saddr = saddr; |
| 1962 | fl4.flowi4_uid = sock_net_uid(net, NULL); | 1962 | fl4.flowi4_uid = sock_net_uid(net, NULL); |
| 1963 | 1963 | ||
| 1964 | if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) | 1964 | if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { |
| 1965 | flkeys = &_flkeys; | 1965 | flkeys = &_flkeys; |
| 1966 | } else { | ||
| 1967 | fl4.flowi4_proto = 0; | ||
| 1968 | fl4.fl4_sport = 0; | ||
| 1969 | fl4.fl4_dport = 0; | ||
| 1970 | } | ||
| 1966 | 1971 | ||
| 1967 | err = fib_lookup(net, &fl4, res, 0); | 1972 | err = fib_lookup(net, &fl4, res, 0); |
| 1968 | if (err != 0) { | 1973 | if (err != 0) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 383cac0ff0ec..d07e34f8e309 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2833,8 +2833,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
| 2833 | return -EBUSY; | 2833 | return -EBUSY; |
| 2834 | 2834 | ||
| 2835 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { | 2835 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { |
| 2836 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) | 2836 | if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { |
| 2837 | BUG(); | 2837 | WARN_ON_ONCE(1); |
| 2838 | return -EINVAL; | ||
| 2839 | } | ||
| 2838 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) | 2840 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) |
| 2839 | return -ENOMEM; | 2841 | return -ENOMEM; |
| 2840 | } | 2842 | } |
| @@ -3342,6 +3344,7 @@ static void tcp_connect_init(struct sock *sk) | |||
| 3342 | sock_reset_flag(sk, SOCK_DONE); | 3344 | sock_reset_flag(sk, SOCK_DONE); |
| 3343 | tp->snd_wnd = 0; | 3345 | tp->snd_wnd = 0; |
| 3344 | tcp_init_wl(tp, 0); | 3346 | tcp_init_wl(tp, 0); |
| 3347 | tcp_write_queue_purge(sk); | ||
| 3345 | tp->snd_una = tp->write_seq; | 3348 | tp->snd_una = tp->write_seq; |
| 3346 | tp->snd_sml = tp->write_seq; | 3349 | tp->snd_sml = tp->write_seq; |
| 3347 | tp->snd_up = tp->write_seq; | 3350 | tp->snd_up = tp->write_seq; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 69727bc168cb..458de353f5d9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -71,6 +71,7 @@ struct ip6gre_net { | |||
| 71 | struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; | 71 | struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; |
| 72 | 72 | ||
| 73 | struct ip6_tnl __rcu *collect_md_tun; | 73 | struct ip6_tnl __rcu *collect_md_tun; |
| 74 | struct ip6_tnl __rcu *collect_md_tun_erspan; | ||
| 74 | struct net_device *fb_tunnel_dev; | 75 | struct net_device *fb_tunnel_dev; |
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| @@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev); | |||
| 81 | static void ip6gre_tunnel_setup(struct net_device *dev); | 82 | static void ip6gre_tunnel_setup(struct net_device *dev); |
| 82 | static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); | 83 | static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); |
| 83 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); | 84 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); |
| 85 | static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu); | ||
| 84 | 86 | ||
| 85 | /* Tunnel hash table */ | 87 | /* Tunnel hash table */ |
| 86 | 88 | ||
| @@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, | |||
| 232 | if (cand) | 234 | if (cand) |
| 233 | return cand; | 235 | return cand; |
| 234 | 236 | ||
| 235 | t = rcu_dereference(ign->collect_md_tun); | 237 | if (gre_proto == htons(ETH_P_ERSPAN) || |
| 238 | gre_proto == htons(ETH_P_ERSPAN2)) | ||
| 239 | t = rcu_dereference(ign->collect_md_tun_erspan); | ||
| 240 | else | ||
| 241 | t = rcu_dereference(ign->collect_md_tun); | ||
| 242 | |||
| 236 | if (t && t->dev->flags & IFF_UP) | 243 | if (t && t->dev->flags & IFF_UP) |
| 237 | return t; | 244 | return t; |
| 238 | 245 | ||
| @@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, | |||
| 261 | return &ign->tunnels[prio][h]; | 268 | return &ign->tunnels[prio][h]; |
| 262 | } | 269 | } |
| 263 | 270 | ||
| 271 | static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) | ||
| 272 | { | ||
| 273 | if (t->parms.collect_md) | ||
| 274 | rcu_assign_pointer(ign->collect_md_tun, t); | ||
| 275 | } | ||
| 276 | |||
| 277 | static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t) | ||
| 278 | { | ||
| 279 | if (t->parms.collect_md) | ||
| 280 | rcu_assign_pointer(ign->collect_md_tun_erspan, t); | ||
| 281 | } | ||
| 282 | |||
| 283 | static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t) | ||
| 284 | { | ||
| 285 | if (t->parms.collect_md) | ||
| 286 | rcu_assign_pointer(ign->collect_md_tun, NULL); | ||
| 287 | } | ||
| 288 | |||
| 289 | static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign, | ||
| 290 | struct ip6_tnl *t) | ||
| 291 | { | ||
| 292 | if (t->parms.collect_md) | ||
| 293 | rcu_assign_pointer(ign->collect_md_tun_erspan, NULL); | ||
| 294 | } | ||
| 295 | |||
| 264 | static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, | 296 | static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, |
| 265 | const struct ip6_tnl *t) | 297 | const struct ip6_tnl *t) |
| 266 | { | 298 | { |
| @@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) | |||
| 271 | { | 303 | { |
| 272 | struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); | 304 | struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); |
| 273 | 305 | ||
| 274 | if (t->parms.collect_md) | ||
| 275 | rcu_assign_pointer(ign->collect_md_tun, t); | ||
| 276 | |||
| 277 | rcu_assign_pointer(t->next, rtnl_dereference(*tp)); | 306 | rcu_assign_pointer(t->next, rtnl_dereference(*tp)); |
| 278 | rcu_assign_pointer(*tp, t); | 307 | rcu_assign_pointer(*tp, t); |
| 279 | } | 308 | } |
| @@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) | |||
| 283 | struct ip6_tnl __rcu **tp; | 312 | struct ip6_tnl __rcu **tp; |
| 284 | struct ip6_tnl *iter; | 313 | struct ip6_tnl *iter; |
| 285 | 314 | ||
| 286 | if (t->parms.collect_md) | ||
| 287 | rcu_assign_pointer(ign->collect_md_tun, NULL); | ||
| 288 | |||
| 289 | for (tp = ip6gre_bucket(ign, t); | 315 | for (tp = ip6gre_bucket(ign, t); |
| 290 | (iter = rtnl_dereference(*tp)) != NULL; | 316 | (iter = rtnl_dereference(*tp)) != NULL; |
| 291 | tp = &iter->next) { | 317 | tp = &iter->next) { |
| @@ -374,11 +400,23 @@ failed_free: | |||
| 374 | return NULL; | 400 | return NULL; |
| 375 | } | 401 | } |
| 376 | 402 | ||
| 403 | static void ip6erspan_tunnel_uninit(struct net_device *dev) | ||
| 404 | { | ||
| 405 | struct ip6_tnl *t = netdev_priv(dev); | ||
| 406 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | ||
| 407 | |||
| 408 | ip6erspan_tunnel_unlink_md(ign, t); | ||
| 409 | ip6gre_tunnel_unlink(ign, t); | ||
| 410 | dst_cache_reset(&t->dst_cache); | ||
| 411 | dev_put(dev); | ||
| 412 | } | ||
| 413 | |||
| 377 | static void ip6gre_tunnel_uninit(struct net_device *dev) | 414 | static void ip6gre_tunnel_uninit(struct net_device *dev) |
| 378 | { | 415 | { |
| 379 | struct ip6_tnl *t = netdev_priv(dev); | 416 | struct ip6_tnl *t = netdev_priv(dev); |
| 380 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | 417 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); |
| 381 | 418 | ||
| 419 | ip6gre_tunnel_unlink_md(ign, t); | ||
| 382 | ip6gre_tunnel_unlink(ign, t); | 420 | ip6gre_tunnel_unlink(ign, t); |
| 383 | dst_cache_reset(&t->dst_cache); | 421 | dst_cache_reset(&t->dst_cache); |
| 384 | dev_put(dev); | 422 | dev_put(dev); |
| @@ -698,6 +736,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, | |||
| 698 | else | 736 | else |
| 699 | fl6->daddr = tunnel->parms.raddr; | 737 | fl6->daddr = tunnel->parms.raddr; |
| 700 | 738 | ||
| 739 | if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) | ||
| 740 | return -ENOMEM; | ||
| 741 | |||
| 701 | /* Push GRE header. */ | 742 | /* Push GRE header. */ |
| 702 | protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; | 743 | protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; |
| 703 | 744 | ||
| @@ -908,7 +949,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
| 908 | truncate = true; | 949 | truncate = true; |
| 909 | } | 950 | } |
| 910 | 951 | ||
| 911 | if (skb_cow_head(skb, dev->needed_headroom)) | 952 | if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen)) |
| 912 | goto tx_err; | 953 | goto tx_err; |
| 913 | 954 | ||
| 914 | t->parms.o_flags &= ~TUNNEL_KEY; | 955 | t->parms.o_flags &= ~TUNNEL_KEY; |
| @@ -979,11 +1020,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | |||
| 979 | erspan_build_header(skb, ntohl(t->parms.o_key), | 1020 | erspan_build_header(skb, ntohl(t->parms.o_key), |
| 980 | t->parms.index, | 1021 | t->parms.index, |
| 981 | truncate, false); | 1022 | truncate, false); |
| 982 | else | 1023 | else if (t->parms.erspan_ver == 2) |
| 983 | erspan_build_header_v2(skb, ntohl(t->parms.o_key), | 1024 | erspan_build_header_v2(skb, ntohl(t->parms.o_key), |
| 984 | t->parms.dir, | 1025 | t->parms.dir, |
| 985 | t->parms.hwid, | 1026 | t->parms.hwid, |
| 986 | truncate, false); | 1027 | truncate, false); |
| 1028 | else | ||
| 1029 | goto tx_err; | ||
| 1030 | |||
| 987 | fl6.daddr = t->parms.raddr; | 1031 | fl6.daddr = t->parms.raddr; |
| 988 | } | 1032 | } |
| 989 | 1033 | ||
| @@ -1019,12 +1063,11 @@ tx_err: | |||
| 1019 | return NETDEV_TX_OK; | 1063 | return NETDEV_TX_OK; |
| 1020 | } | 1064 | } |
| 1021 | 1065 | ||
| 1022 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | 1066 | static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) |
| 1023 | { | 1067 | { |
| 1024 | struct net_device *dev = t->dev; | 1068 | struct net_device *dev = t->dev; |
| 1025 | struct __ip6_tnl_parm *p = &t->parms; | 1069 | struct __ip6_tnl_parm *p = &t->parms; |
| 1026 | struct flowi6 *fl6 = &t->fl.u.ip6; | 1070 | struct flowi6 *fl6 = &t->fl.u.ip6; |
| 1027 | int t_hlen; | ||
| 1028 | 1071 | ||
| 1029 | if (dev->type != ARPHRD_ETHER) { | 1072 | if (dev->type != ARPHRD_ETHER) { |
| 1030 | memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); | 1073 | memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); |
| @@ -1051,12 +1094,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 1051 | dev->flags |= IFF_POINTOPOINT; | 1094 | dev->flags |= IFF_POINTOPOINT; |
| 1052 | else | 1095 | else |
| 1053 | dev->flags &= ~IFF_POINTOPOINT; | 1096 | dev->flags &= ~IFF_POINTOPOINT; |
| 1097 | } | ||
| 1054 | 1098 | ||
| 1055 | t->tun_hlen = gre_calc_hlen(t->parms.o_flags); | 1099 | static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu, |
| 1056 | 1100 | int t_hlen) | |
| 1057 | t->hlen = t->encap_hlen + t->tun_hlen; | 1101 | { |
| 1058 | 1102 | const struct __ip6_tnl_parm *p = &t->parms; | |
| 1059 | t_hlen = t->hlen + sizeof(struct ipv6hdr); | 1103 | struct net_device *dev = t->dev; |
| 1060 | 1104 | ||
| 1061 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | 1105 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
| 1062 | int strict = (ipv6_addr_type(&p->raddr) & | 1106 | int strict = (ipv6_addr_type(&p->raddr) & |
| @@ -1088,8 +1132,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 1088 | } | 1132 | } |
| 1089 | } | 1133 | } |
| 1090 | 1134 | ||
| 1091 | static int ip6gre_tnl_change(struct ip6_tnl *t, | 1135 | static int ip6gre_calc_hlen(struct ip6_tnl *tunnel) |
| 1092 | const struct __ip6_tnl_parm *p, int set_mtu) | 1136 | { |
| 1137 | int t_hlen; | ||
| 1138 | |||
| 1139 | tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); | ||
| 1140 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; | ||
| 1141 | |||
| 1142 | t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); | ||
| 1143 | tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; | ||
| 1144 | return t_hlen; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | ||
| 1148 | { | ||
| 1149 | ip6gre_tnl_link_config_common(t); | ||
| 1150 | ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t)); | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, | ||
| 1154 | const struct __ip6_tnl_parm *p) | ||
| 1093 | { | 1155 | { |
| 1094 | t->parms.laddr = p->laddr; | 1156 | t->parms.laddr = p->laddr; |
| 1095 | t->parms.raddr = p->raddr; | 1157 | t->parms.raddr = p->raddr; |
| @@ -1105,6 +1167,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t, | |||
| 1105 | t->parms.o_flags = p->o_flags; | 1167 | t->parms.o_flags = p->o_flags; |
| 1106 | t->parms.fwmark = p->fwmark; | 1168 | t->parms.fwmark = p->fwmark; |
| 1107 | dst_cache_reset(&t->dst_cache); | 1169 | dst_cache_reset(&t->dst_cache); |
| 1170 | } | ||
| 1171 | |||
| 1172 | static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p, | ||
| 1173 | int set_mtu) | ||
| 1174 | { | ||
| 1175 | ip6gre_tnl_copy_tnl_parm(t, p); | ||
| 1108 | ip6gre_tnl_link_config(t, set_mtu); | 1176 | ip6gre_tnl_link_config(t, set_mtu); |
| 1109 | return 0; | 1177 | return 0; |
| 1110 | } | 1178 | } |
| @@ -1381,11 +1449,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) | |||
| 1381 | return ret; | 1449 | return ret; |
| 1382 | } | 1450 | } |
| 1383 | 1451 | ||
| 1384 | tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); | 1452 | t_hlen = ip6gre_calc_hlen(tunnel); |
| 1385 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; | ||
| 1386 | t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); | ||
| 1387 | |||
| 1388 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | ||
| 1389 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1453 | dev->mtu = ETH_DATA_LEN - t_hlen; |
| 1390 | if (dev->type == ARPHRD_ETHER) | 1454 | if (dev->type == ARPHRD_ETHER) |
| 1391 | dev->mtu -= ETH_HLEN; | 1455 | dev->mtu -= ETH_HLEN; |
| @@ -1728,6 +1792,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { | |||
| 1728 | .ndo_get_iflink = ip6_tnl_get_iflink, | 1792 | .ndo_get_iflink = ip6_tnl_get_iflink, |
| 1729 | }; | 1793 | }; |
| 1730 | 1794 | ||
| 1795 | static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel) | ||
| 1796 | { | ||
| 1797 | int t_hlen; | ||
| 1798 | |||
| 1799 | tunnel->tun_hlen = 8; | ||
| 1800 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + | ||
| 1801 | erspan_hdr_len(tunnel->parms.erspan_ver); | ||
| 1802 | |||
| 1803 | t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); | ||
| 1804 | tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen; | ||
| 1805 | return t_hlen; | ||
| 1806 | } | ||
| 1807 | |||
| 1731 | static int ip6erspan_tap_init(struct net_device *dev) | 1808 | static int ip6erspan_tap_init(struct net_device *dev) |
| 1732 | { | 1809 | { |
| 1733 | struct ip6_tnl *tunnel; | 1810 | struct ip6_tnl *tunnel; |
| @@ -1751,12 +1828,7 @@ static int ip6erspan_tap_init(struct net_device *dev) | |||
| 1751 | return ret; | 1828 | return ret; |
| 1752 | } | 1829 | } |
| 1753 | 1830 | ||
| 1754 | tunnel->tun_hlen = 8; | 1831 | t_hlen = ip6erspan_calc_hlen(tunnel); |
| 1755 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + | ||
| 1756 | erspan_hdr_len(tunnel->parms.erspan_ver); | ||
| 1757 | t_hlen = tunnel->hlen + sizeof(struct ipv6hdr); | ||
| 1758 | |||
| 1759 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | ||
| 1760 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1832 | dev->mtu = ETH_DATA_LEN - t_hlen; |
| 1761 | if (dev->type == ARPHRD_ETHER) | 1833 | if (dev->type == ARPHRD_ETHER) |
| 1762 | dev->mtu -= ETH_HLEN; | 1834 | dev->mtu -= ETH_HLEN; |
| @@ -1764,14 +1836,14 @@ static int ip6erspan_tap_init(struct net_device *dev) | |||
| 1764 | dev->mtu -= 8; | 1836 | dev->mtu -= 8; |
| 1765 | 1837 | ||
| 1766 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1838 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 1767 | ip6gre_tnl_link_config(tunnel, 1); | 1839 | ip6erspan_tnl_link_config(tunnel, 1); |
| 1768 | 1840 | ||
| 1769 | return 0; | 1841 | return 0; |
| 1770 | } | 1842 | } |
| 1771 | 1843 | ||
| 1772 | static const struct net_device_ops ip6erspan_netdev_ops = { | 1844 | static const struct net_device_ops ip6erspan_netdev_ops = { |
| 1773 | .ndo_init = ip6erspan_tap_init, | 1845 | .ndo_init = ip6erspan_tap_init, |
| 1774 | .ndo_uninit = ip6gre_tunnel_uninit, | 1846 | .ndo_uninit = ip6erspan_tunnel_uninit, |
| 1775 | .ndo_start_xmit = ip6erspan_tunnel_xmit, | 1847 | .ndo_start_xmit = ip6erspan_tunnel_xmit, |
| 1776 | .ndo_set_mac_address = eth_mac_addr, | 1848 | .ndo_set_mac_address = eth_mac_addr, |
| 1777 | .ndo_validate_addr = eth_validate_addr, | 1849 | .ndo_validate_addr = eth_validate_addr, |
| @@ -1835,13 +1907,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[], | |||
| 1835 | return ret; | 1907 | return ret; |
| 1836 | } | 1908 | } |
| 1837 | 1909 | ||
| 1838 | static int ip6gre_newlink(struct net *src_net, struct net_device *dev, | 1910 | static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, |
| 1839 | struct nlattr *tb[], struct nlattr *data[], | 1911 | struct nlattr *tb[], struct nlattr *data[], |
| 1840 | struct netlink_ext_ack *extack) | 1912 | struct netlink_ext_ack *extack) |
| 1841 | { | 1913 | { |
| 1842 | struct ip6_tnl *nt; | 1914 | struct ip6_tnl *nt; |
| 1843 | struct net *net = dev_net(dev); | ||
| 1844 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | ||
| 1845 | struct ip_tunnel_encap ipencap; | 1915 | struct ip_tunnel_encap ipencap; |
| 1846 | int err; | 1916 | int err; |
| 1847 | 1917 | ||
| @@ -1854,16 +1924,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, | |||
| 1854 | return err; | 1924 | return err; |
| 1855 | } | 1925 | } |
| 1856 | 1926 | ||
| 1857 | ip6gre_netlink_parms(data, &nt->parms); | ||
| 1858 | |||
| 1859 | if (nt->parms.collect_md) { | ||
| 1860 | if (rtnl_dereference(ign->collect_md_tun)) | ||
| 1861 | return -EEXIST; | ||
| 1862 | } else { | ||
| 1863 | if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) | ||
| 1864 | return -EEXIST; | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) | 1927 | if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) |
| 1868 | eth_hw_addr_random(dev); | 1928 | eth_hw_addr_random(dev); |
| 1869 | 1929 | ||
| @@ -1874,51 +1934,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, | |||
| 1874 | if (err) | 1934 | if (err) |
| 1875 | goto out; | 1935 | goto out; |
| 1876 | 1936 | ||
| 1877 | ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); | ||
| 1878 | |||
| 1879 | if (tb[IFLA_MTU]) | 1937 | if (tb[IFLA_MTU]) |
| 1880 | ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); | 1938 | ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); |
| 1881 | 1939 | ||
| 1882 | dev_hold(dev); | 1940 | dev_hold(dev); |
| 1883 | ip6gre_tunnel_link(ign, nt); | ||
| 1884 | 1941 | ||
| 1885 | out: | 1942 | out: |
| 1886 | return err; | 1943 | return err; |
| 1887 | } | 1944 | } |
| 1888 | 1945 | ||
| 1889 | static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | 1946 | static int ip6gre_newlink(struct net *src_net, struct net_device *dev, |
| 1890 | struct nlattr *data[], | 1947 | struct nlattr *tb[], struct nlattr *data[], |
| 1891 | struct netlink_ext_ack *extack) | 1948 | struct netlink_ext_ack *extack) |
| 1949 | { | ||
| 1950 | struct ip6_tnl *nt = netdev_priv(dev); | ||
| 1951 | struct net *net = dev_net(dev); | ||
| 1952 | struct ip6gre_net *ign; | ||
| 1953 | int err; | ||
| 1954 | |||
| 1955 | ip6gre_netlink_parms(data, &nt->parms); | ||
| 1956 | ign = net_generic(net, ip6gre_net_id); | ||
| 1957 | |||
| 1958 | if (nt->parms.collect_md) { | ||
| 1959 | if (rtnl_dereference(ign->collect_md_tun)) | ||
| 1960 | return -EEXIST; | ||
| 1961 | } else { | ||
| 1962 | if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) | ||
| 1963 | return -EEXIST; | ||
| 1964 | } | ||
| 1965 | |||
| 1966 | err = ip6gre_newlink_common(src_net, dev, tb, data, extack); | ||
| 1967 | if (!err) { | ||
| 1968 | ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); | ||
| 1969 | ip6gre_tunnel_link_md(ign, nt); | ||
| 1970 | ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); | ||
| 1971 | } | ||
| 1972 | return err; | ||
| 1973 | } | ||
| 1974 | |||
| 1975 | static struct ip6_tnl * | ||
| 1976 | ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[], | ||
| 1977 | struct nlattr *data[], struct __ip6_tnl_parm *p_p, | ||
| 1978 | struct netlink_ext_ack *extack) | ||
| 1892 | { | 1979 | { |
| 1893 | struct ip6_tnl *t, *nt = netdev_priv(dev); | 1980 | struct ip6_tnl *t, *nt = netdev_priv(dev); |
| 1894 | struct net *net = nt->net; | 1981 | struct net *net = nt->net; |
| 1895 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); | 1982 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
| 1896 | struct __ip6_tnl_parm p; | ||
| 1897 | struct ip_tunnel_encap ipencap; | 1983 | struct ip_tunnel_encap ipencap; |
| 1898 | 1984 | ||
| 1899 | if (dev == ign->fb_tunnel_dev) | 1985 | if (dev == ign->fb_tunnel_dev) |
| 1900 | return -EINVAL; | 1986 | return ERR_PTR(-EINVAL); |
| 1901 | 1987 | ||
| 1902 | if (ip6gre_netlink_encap_parms(data, &ipencap)) { | 1988 | if (ip6gre_netlink_encap_parms(data, &ipencap)) { |
| 1903 | int err = ip6_tnl_encap_setup(nt, &ipencap); | 1989 | int err = ip6_tnl_encap_setup(nt, &ipencap); |
| 1904 | 1990 | ||
| 1905 | if (err < 0) | 1991 | if (err < 0) |
| 1906 | return err; | 1992 | return ERR_PTR(err); |
| 1907 | } | 1993 | } |
| 1908 | 1994 | ||
| 1909 | ip6gre_netlink_parms(data, &p); | 1995 | ip6gre_netlink_parms(data, p_p); |
| 1910 | 1996 | ||
| 1911 | t = ip6gre_tunnel_locate(net, &p, 0); | 1997 | t = ip6gre_tunnel_locate(net, p_p, 0); |
| 1912 | 1998 | ||
| 1913 | if (t) { | 1999 | if (t) { |
| 1914 | if (t->dev != dev) | 2000 | if (t->dev != dev) |
| 1915 | return -EEXIST; | 2001 | return ERR_PTR(-EEXIST); |
| 1916 | } else { | 2002 | } else { |
| 1917 | t = nt; | 2003 | t = nt; |
| 1918 | } | 2004 | } |
| 1919 | 2005 | ||
| 2006 | return t; | ||
| 2007 | } | ||
| 2008 | |||
| 2009 | static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | ||
| 2010 | struct nlattr *data[], | ||
| 2011 | struct netlink_ext_ack *extack) | ||
| 2012 | { | ||
| 2013 | struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); | ||
| 2014 | struct __ip6_tnl_parm p; | ||
| 2015 | struct ip6_tnl *t; | ||
| 2016 | |||
| 2017 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); | ||
| 2018 | if (IS_ERR(t)) | ||
| 2019 | return PTR_ERR(t); | ||
| 2020 | |||
| 2021 | ip6gre_tunnel_unlink_md(ign, t); | ||
| 1920 | ip6gre_tunnel_unlink(ign, t); | 2022 | ip6gre_tunnel_unlink(ign, t); |
| 1921 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); | 2023 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); |
| 2024 | ip6gre_tunnel_link_md(ign, t); | ||
| 1922 | ip6gre_tunnel_link(ign, t); | 2025 | ip6gre_tunnel_link(ign, t); |
| 1923 | return 0; | 2026 | return 0; |
| 1924 | } | 2027 | } |
| @@ -2068,6 +2171,69 @@ static void ip6erspan_tap_setup(struct net_device *dev) | |||
| 2068 | netif_keep_dst(dev); | 2171 | netif_keep_dst(dev); |
| 2069 | } | 2172 | } |
| 2070 | 2173 | ||
| 2174 | static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, | ||
| 2175 | struct nlattr *tb[], struct nlattr *data[], | ||
| 2176 | struct netlink_ext_ack *extack) | ||
| 2177 | { | ||
| 2178 | struct ip6_tnl *nt = netdev_priv(dev); | ||
| 2179 | struct net *net = dev_net(dev); | ||
| 2180 | struct ip6gre_net *ign; | ||
| 2181 | int err; | ||
| 2182 | |||
| 2183 | ip6gre_netlink_parms(data, &nt->parms); | ||
| 2184 | ign = net_generic(net, ip6gre_net_id); | ||
| 2185 | |||
| 2186 | if (nt->parms.collect_md) { | ||
| 2187 | if (rtnl_dereference(ign->collect_md_tun_erspan)) | ||
| 2188 | return -EEXIST; | ||
| 2189 | } else { | ||
| 2190 | if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) | ||
| 2191 | return -EEXIST; | ||
| 2192 | } | ||
| 2193 | |||
| 2194 | err = ip6gre_newlink_common(src_net, dev, tb, data, extack); | ||
| 2195 | if (!err) { | ||
| 2196 | ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); | ||
| 2197 | ip6erspan_tunnel_link_md(ign, nt); | ||
| 2198 | ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt); | ||
| 2199 | } | ||
| 2200 | return err; | ||
| 2201 | } | ||
| 2202 | |||
| 2203 | static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu) | ||
| 2204 | { | ||
| 2205 | ip6gre_tnl_link_config_common(t); | ||
| 2206 | ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t)); | ||
| 2207 | } | ||
| 2208 | |||
| 2209 | static int ip6erspan_tnl_change(struct ip6_tnl *t, | ||
| 2210 | const struct __ip6_tnl_parm *p, int set_mtu) | ||
| 2211 | { | ||
| 2212 | ip6gre_tnl_copy_tnl_parm(t, p); | ||
| 2213 | ip6erspan_tnl_link_config(t, set_mtu); | ||
| 2214 | return 0; | ||
| 2215 | } | ||
| 2216 | |||
| 2217 | static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], | ||
| 2218 | struct nlattr *data[], | ||
| 2219 | struct netlink_ext_ack *extack) | ||
| 2220 | { | ||
| 2221 | struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); | ||
| 2222 | struct __ip6_tnl_parm p; | ||
| 2223 | struct ip6_tnl *t; | ||
| 2224 | |||
| 2225 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); | ||
| 2226 | if (IS_ERR(t)) | ||
| 2227 | return PTR_ERR(t); | ||
| 2228 | |||
| 2229 | ip6gre_tunnel_unlink_md(ign, t); | ||
| 2230 | ip6gre_tunnel_unlink(ign, t); | ||
| 2231 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); | ||
| 2232 | ip6erspan_tunnel_link_md(ign, t); | ||
| 2233 | ip6gre_tunnel_link(ign, t); | ||
| 2234 | return 0; | ||
| 2235 | } | ||
| 2236 | |||
| 2071 | static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { | 2237 | static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { |
| 2072 | .kind = "ip6gre", | 2238 | .kind = "ip6gre", |
| 2073 | .maxtype = IFLA_GRE_MAX, | 2239 | .maxtype = IFLA_GRE_MAX, |
| @@ -2104,8 +2270,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = { | |||
| 2104 | .priv_size = sizeof(struct ip6_tnl), | 2270 | .priv_size = sizeof(struct ip6_tnl), |
| 2105 | .setup = ip6erspan_tap_setup, | 2271 | .setup = ip6erspan_tap_setup, |
| 2106 | .validate = ip6erspan_tap_validate, | 2272 | .validate = ip6erspan_tap_validate, |
| 2107 | .newlink = ip6gre_newlink, | 2273 | .newlink = ip6erspan_newlink, |
| 2108 | .changelink = ip6gre_changelink, | 2274 | .changelink = ip6erspan_changelink, |
| 2109 | .get_size = ip6gre_get_size, | 2275 | .get_size = ip6gre_get_size, |
| 2110 | .fill_info = ip6gre_fill_info, | 2276 | .fill_info = ip6gre_fill_info, |
| 2111 | .get_link_net = ip6_tnl_get_link_net, | 2277 | .get_link_net = ip6_tnl_get_link_net, |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2e891d2c30ef..7b6d1689087b 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1503,7 +1503,8 @@ alloc_new_skb: | |||
| 1503 | if (copy > length) | 1503 | if (copy > length) |
| 1504 | copy = length; | 1504 | copy = length; |
| 1505 | 1505 | ||
| 1506 | if (!(rt->dst.dev->features&NETIF_F_SG)) { | 1506 | if (!(rt->dst.dev->features&NETIF_F_SG) && |
| 1507 | skb_tailroom(skb) >= copy) { | ||
| 1507 | unsigned int off; | 1508 | unsigned int off; |
| 1508 | 1509 | ||
| 1509 | off = skb->len; | 1510 | off = skb->len; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 65c9e1a58305..97f79dc943d7 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | MODULE_LICENSE("GPL"); | 38 | MODULE_LICENSE("GPL"); |
| 39 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 39 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
| 40 | MODULE_DESCRIPTION("IPv6 packet filter"); | 40 | MODULE_DESCRIPTION("IPv6 packet filter"); |
| 41 | MODULE_ALIAS("ip6t_icmp6"); | ||
| 41 | 42 | ||
| 42 | void *ip6t_alloc_initial_table(const struct xt_table *info) | 43 | void *ip6t_alloc_initial_table(const struct xt_table *info) |
| 43 | { | 44 | { |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 0f6c9ca59062..5b5b0f95ffd1 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
| @@ -401,7 +401,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta) | |||
| 401 | 401 | ||
| 402 | static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, | 402 | static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, |
| 403 | struct sta_info *sta, | 403 | struct sta_info *sta, |
| 404 | struct ieee802_11_elems *elems, bool insert) | 404 | struct ieee802_11_elems *elems) |
| 405 | { | 405 | { |
| 406 | struct ieee80211_local *local = sdata->local; | 406 | struct ieee80211_local *local = sdata->local; |
| 407 | struct ieee80211_supported_band *sband; | 407 | struct ieee80211_supported_band *sband; |
| @@ -447,7 +447,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, | |||
| 447 | sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; | 447 | sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; |
| 448 | } | 448 | } |
| 449 | 449 | ||
| 450 | if (insert) | 450 | if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) |
| 451 | rate_control_rate_init(sta); | 451 | rate_control_rate_init(sta); |
| 452 | else | 452 | else |
| 453 | rate_control_rate_update(local, sband, sta, changed); | 453 | rate_control_rate_update(local, sband, sta, changed); |
| @@ -551,7 +551,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata, | |||
| 551 | rcu_read_lock(); | 551 | rcu_read_lock(); |
| 552 | sta = sta_info_get(sdata, addr); | 552 | sta = sta_info_get(sdata, addr); |
| 553 | if (sta) { | 553 | if (sta) { |
| 554 | mesh_sta_info_init(sdata, sta, elems, false); | 554 | mesh_sta_info_init(sdata, sta, elems); |
| 555 | } else { | 555 | } else { |
| 556 | rcu_read_unlock(); | 556 | rcu_read_unlock(); |
| 557 | /* can't run atomic */ | 557 | /* can't run atomic */ |
| @@ -561,7 +561,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata, | |||
| 561 | return NULL; | 561 | return NULL; |
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | mesh_sta_info_init(sdata, sta, elems, true); | 564 | mesh_sta_info_init(sdata, sta, elems); |
| 565 | 565 | ||
| 566 | if (sta_info_insert_rcu(sta)) | 566 | if (sta_info_insert_rcu(sta)) |
| 567 | return NULL; | 567 | return NULL; |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 0f6b8172fb9a..206fb2c4c319 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
| @@ -585,7 +585,8 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); | |||
| 585 | EXPORT_SYMBOL(nf_nat_decode_session_hook); | 585 | EXPORT_SYMBOL(nf_nat_decode_session_hook); |
| 586 | #endif | 586 | #endif |
| 587 | 587 | ||
| 588 | static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max) | 588 | static void __net_init |
| 589 | __netfilter_net_init(struct nf_hook_entries __rcu **e, int max) | ||
| 589 | { | 590 | { |
| 590 | int h; | 591 | int h; |
| 591 | 592 | ||
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 370abbf6f421..75de46576f51 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
| @@ -232,7 +232,10 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
| 232 | static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) | 232 | static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) |
| 233 | { | 233 | { |
| 234 | unsigned int hash; | 234 | unsigned int hash; |
| 235 | bool ret; | 235 | bool ret = false; |
| 236 | |||
| 237 | if (cp->flags & IP_VS_CONN_F_ONE_PACKET) | ||
| 238 | return refcount_dec_if_one(&cp->refcnt); | ||
| 236 | 239 | ||
| 237 | hash = ip_vs_conn_hashkey_conn(cp); | 240 | hash = ip_vs_conn_hashkey_conn(cp); |
| 238 | 241 | ||
| @@ -240,15 +243,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) | |||
| 240 | spin_lock(&cp->lock); | 243 | spin_lock(&cp->lock); |
| 241 | 244 | ||
| 242 | if (cp->flags & IP_VS_CONN_F_HASHED) { | 245 | if (cp->flags & IP_VS_CONN_F_HASHED) { |
| 243 | ret = false; | ||
| 244 | /* Decrease refcnt and unlink conn only if we are last user */ | 246 | /* Decrease refcnt and unlink conn only if we are last user */ |
| 245 | if (refcount_dec_if_one(&cp->refcnt)) { | 247 | if (refcount_dec_if_one(&cp->refcnt)) { |
| 246 | hlist_del_rcu(&cp->c_list); | 248 | hlist_del_rcu(&cp->c_list); |
| 247 | cp->flags &= ~IP_VS_CONN_F_HASHED; | 249 | cp->flags &= ~IP_VS_CONN_F_HASHED; |
| 248 | ret = true; | 250 | ret = true; |
| 249 | } | 251 | } |
| 250 | } else | 252 | } |
| 251 | ret = refcount_read(&cp->refcnt) ? false : true; | ||
| 252 | 253 | ||
| 253 | spin_unlock(&cp->lock); | 254 | spin_unlock(&cp->lock); |
| 254 | ct_write_unlock_bh(hash); | 255 | ct_write_unlock_bh(hash); |
| @@ -454,12 +455,6 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, | |||
| 454 | } | 455 | } |
| 455 | EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); | 456 | EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); |
| 456 | 457 | ||
| 457 | static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp) | ||
| 458 | { | ||
| 459 | __ip_vs_conn_put(cp); | ||
| 460 | ip_vs_conn_expire(&cp->timer); | ||
| 461 | } | ||
| 462 | |||
| 463 | /* | 458 | /* |
| 464 | * Put back the conn and restart its timer with its timeout | 459 | * Put back the conn and restart its timer with its timeout |
| 465 | */ | 460 | */ |
| @@ -478,7 +473,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp) | |||
| 478 | (refcount_read(&cp->refcnt) == 1) && | 473 | (refcount_read(&cp->refcnt) == 1) && |
| 479 | !timer_pending(&cp->timer)) | 474 | !timer_pending(&cp->timer)) |
| 480 | /* expire connection immediately */ | 475 | /* expire connection immediately */ |
| 481 | __ip_vs_conn_put_notimer(cp); | 476 | ip_vs_conn_expire(&cp->timer); |
| 482 | else | 477 | else |
| 483 | __ip_vs_conn_put_timer(cp); | 478 | __ip_vs_conn_put_timer(cp); |
| 484 | } | 479 | } |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 5f6f73cf2174..0679dd101e72 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -119,6 +119,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
| 119 | struct ip_vs_cpu_stats *s; | 119 | struct ip_vs_cpu_stats *s; |
| 120 | struct ip_vs_service *svc; | 120 | struct ip_vs_service *svc; |
| 121 | 121 | ||
| 122 | local_bh_disable(); | ||
| 123 | |||
| 122 | s = this_cpu_ptr(dest->stats.cpustats); | 124 | s = this_cpu_ptr(dest->stats.cpustats); |
| 123 | u64_stats_update_begin(&s->syncp); | 125 | u64_stats_update_begin(&s->syncp); |
| 124 | s->cnt.inpkts++; | 126 | s->cnt.inpkts++; |
| @@ -137,6 +139,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
| 137 | s->cnt.inpkts++; | 139 | s->cnt.inpkts++; |
| 138 | s->cnt.inbytes += skb->len; | 140 | s->cnt.inbytes += skb->len; |
| 139 | u64_stats_update_end(&s->syncp); | 141 | u64_stats_update_end(&s->syncp); |
| 142 | |||
| 143 | local_bh_enable(); | ||
| 140 | } | 144 | } |
| 141 | } | 145 | } |
| 142 | 146 | ||
| @@ -151,6 +155,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
| 151 | struct ip_vs_cpu_stats *s; | 155 | struct ip_vs_cpu_stats *s; |
| 152 | struct ip_vs_service *svc; | 156 | struct ip_vs_service *svc; |
| 153 | 157 | ||
| 158 | local_bh_disable(); | ||
| 159 | |||
| 154 | s = this_cpu_ptr(dest->stats.cpustats); | 160 | s = this_cpu_ptr(dest->stats.cpustats); |
| 155 | u64_stats_update_begin(&s->syncp); | 161 | u64_stats_update_begin(&s->syncp); |
| 156 | s->cnt.outpkts++; | 162 | s->cnt.outpkts++; |
| @@ -169,6 +175,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) | |||
| 169 | s->cnt.outpkts++; | 175 | s->cnt.outpkts++; |
| 170 | s->cnt.outbytes += skb->len; | 176 | s->cnt.outbytes += skb->len; |
| 171 | u64_stats_update_end(&s->syncp); | 177 | u64_stats_update_end(&s->syncp); |
| 178 | |||
| 179 | local_bh_enable(); | ||
| 172 | } | 180 | } |
| 173 | } | 181 | } |
| 174 | 182 | ||
| @@ -179,6 +187,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) | |||
| 179 | struct netns_ipvs *ipvs = svc->ipvs; | 187 | struct netns_ipvs *ipvs = svc->ipvs; |
| 180 | struct ip_vs_cpu_stats *s; | 188 | struct ip_vs_cpu_stats *s; |
| 181 | 189 | ||
| 190 | local_bh_disable(); | ||
| 191 | |||
| 182 | s = this_cpu_ptr(cp->dest->stats.cpustats); | 192 | s = this_cpu_ptr(cp->dest->stats.cpustats); |
| 183 | u64_stats_update_begin(&s->syncp); | 193 | u64_stats_update_begin(&s->syncp); |
| 184 | s->cnt.conns++; | 194 | s->cnt.conns++; |
| @@ -193,6 +203,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) | |||
| 193 | u64_stats_update_begin(&s->syncp); | 203 | u64_stats_update_begin(&s->syncp); |
| 194 | s->cnt.conns++; | 204 | s->cnt.conns++; |
| 195 | u64_stats_update_end(&s->syncp); | 205 | u64_stats_update_end(&s->syncp); |
| 206 | |||
| 207 | local_bh_enable(); | ||
| 196 | } | 208 | } |
| 197 | 209 | ||
| 198 | 210 | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index e97cdc1cf98c..8e67910185a0 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
| @@ -981,6 +981,17 @@ static int tcp_packet(struct nf_conn *ct, | |||
| 981 | return NF_ACCEPT; /* Don't change state */ | 981 | return NF_ACCEPT; /* Don't change state */ |
| 982 | } | 982 | } |
| 983 | break; | 983 | break; |
| 984 | case TCP_CONNTRACK_SYN_SENT2: | ||
| 985 | /* tcp_conntracks table is not smart enough to handle | ||
| 986 | * simultaneous open. | ||
| 987 | */ | ||
| 988 | ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN; | ||
| 989 | break; | ||
| 990 | case TCP_CONNTRACK_SYN_RECV: | ||
| 991 | if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET && | ||
| 992 | ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN) | ||
| 993 | new_state = TCP_CONNTRACK_ESTABLISHED; | ||
| 994 | break; | ||
| 984 | case TCP_CONNTRACK_CLOSE: | 995 | case TCP_CONNTRACK_CLOSE: |
| 985 | if (index == TCP_RST_SET | 996 | if (index == TCP_RST_SET |
| 986 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) | 997 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 04d4e3772584..91e80aa852d6 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -214,6 +214,34 @@ static int nft_delchain(struct nft_ctx *ctx) | |||
| 214 | return err; | 214 | return err; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, | ||
| 218 | struct nft_rule *rule) | ||
| 219 | { | ||
| 220 | struct nft_expr *expr; | ||
| 221 | |||
| 222 | expr = nft_expr_first(rule); | ||
| 223 | while (expr != nft_expr_last(rule) && expr->ops) { | ||
| 224 | if (expr->ops->activate) | ||
| 225 | expr->ops->activate(ctx, expr); | ||
| 226 | |||
| 227 | expr = nft_expr_next(expr); | ||
| 228 | } | ||
| 229 | } | ||
| 230 | |||
| 231 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, | ||
| 232 | struct nft_rule *rule) | ||
| 233 | { | ||
| 234 | struct nft_expr *expr; | ||
| 235 | |||
| 236 | expr = nft_expr_first(rule); | ||
| 237 | while (expr != nft_expr_last(rule) && expr->ops) { | ||
| 238 | if (expr->ops->deactivate) | ||
| 239 | expr->ops->deactivate(ctx, expr); | ||
| 240 | |||
| 241 | expr = nft_expr_next(expr); | ||
| 242 | } | ||
| 243 | } | ||
| 244 | |||
| 217 | static int | 245 | static int |
| 218 | nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) | 246 | nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) |
| 219 | { | 247 | { |
| @@ -259,6 +287,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) | |||
| 259 | nft_trans_destroy(trans); | 287 | nft_trans_destroy(trans); |
| 260 | return err; | 288 | return err; |
| 261 | } | 289 | } |
| 290 | nft_rule_expr_deactivate(ctx, rule); | ||
| 262 | 291 | ||
| 263 | return 0; | 292 | return 0; |
| 264 | } | 293 | } |
| @@ -2238,6 +2267,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
| 2238 | kfree(rule); | 2267 | kfree(rule); |
| 2239 | } | 2268 | } |
| 2240 | 2269 | ||
| 2270 | static void nf_tables_rule_release(const struct nft_ctx *ctx, | ||
| 2271 | struct nft_rule *rule) | ||
| 2272 | { | ||
| 2273 | nft_rule_expr_deactivate(ctx, rule); | ||
| 2274 | nf_tables_rule_destroy(ctx, rule); | ||
| 2275 | } | ||
| 2276 | |||
| 2241 | #define NFT_RULE_MAXEXPRS 128 | 2277 | #define NFT_RULE_MAXEXPRS 128 |
| 2242 | 2278 | ||
| 2243 | static struct nft_expr_info *info; | 2279 | static struct nft_expr_info *info; |
| @@ -2402,7 +2438,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, | |||
| 2402 | return 0; | 2438 | return 0; |
| 2403 | 2439 | ||
| 2404 | err2: | 2440 | err2: |
| 2405 | nf_tables_rule_destroy(&ctx, rule); | 2441 | nf_tables_rule_release(&ctx, rule); |
| 2406 | err1: | 2442 | err1: |
| 2407 | for (i = 0; i < n; i++) { | 2443 | for (i = 0; i < n; i++) { |
| 2408 | if (info[i].ops != NULL) | 2444 | if (info[i].ops != NULL) |
| @@ -4044,8 +4080,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | |||
| 4044 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ | 4080 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ |
| 4045 | nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || | 4081 | nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || |
| 4046 | nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ | 4082 | nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ |
| 4047 | nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) | 4083 | nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) { |
| 4048 | return -EBUSY; | 4084 | err = -EBUSY; |
| 4085 | goto err5; | ||
| 4086 | } | ||
| 4049 | if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && | 4087 | if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && |
| 4050 | nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && | 4088 | nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && |
| 4051 | memcmp(nft_set_ext_data(ext), | 4089 | memcmp(nft_set_ext_data(ext), |
| @@ -4130,7 +4168,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, | |||
| 4130 | * NFT_GOTO verdicts. This function must be called on active data objects | 4168 | * NFT_GOTO verdicts. This function must be called on active data objects |
| 4131 | * from the second phase of the commit protocol. | 4169 | * from the second phase of the commit protocol. |
| 4132 | */ | 4170 | */ |
| 4133 | static void nft_data_hold(const struct nft_data *data, enum nft_data_types type) | 4171 | void nft_data_hold(const struct nft_data *data, enum nft_data_types type) |
| 4134 | { | 4172 | { |
| 4135 | if (type == NFT_DATA_VERDICT) { | 4173 | if (type == NFT_DATA_VERDICT) { |
| 4136 | switch (data->verdict.code) { | 4174 | switch (data->verdict.code) { |
| @@ -5761,7 +5799,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) | |||
| 5761 | } | 5799 | } |
| 5762 | } | 5800 | } |
| 5763 | 5801 | ||
| 5764 | static void nf_tables_commit_release(struct nft_trans *trans) | 5802 | static void nft_commit_release(struct nft_trans *trans) |
| 5765 | { | 5803 | { |
| 5766 | switch (trans->msg_type) { | 5804 | switch (trans->msg_type) { |
| 5767 | case NFT_MSG_DELTABLE: | 5805 | case NFT_MSG_DELTABLE: |
| @@ -5790,6 +5828,21 @@ static void nf_tables_commit_release(struct nft_trans *trans) | |||
| 5790 | kfree(trans); | 5828 | kfree(trans); |
| 5791 | } | 5829 | } |
| 5792 | 5830 | ||
| 5831 | static void nf_tables_commit_release(struct net *net) | ||
| 5832 | { | ||
| 5833 | struct nft_trans *trans, *next; | ||
| 5834 | |||
| 5835 | if (list_empty(&net->nft.commit_list)) | ||
| 5836 | return; | ||
| 5837 | |||
| 5838 | synchronize_rcu(); | ||
| 5839 | |||
| 5840 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | ||
| 5841 | list_del(&trans->list); | ||
| 5842 | nft_commit_release(trans); | ||
| 5843 | } | ||
| 5844 | } | ||
| 5845 | |||
| 5793 | static int nf_tables_commit(struct net *net, struct sk_buff *skb) | 5846 | static int nf_tables_commit(struct net *net, struct sk_buff *skb) |
| 5794 | { | 5847 | { |
| 5795 | struct nft_trans *trans, *next; | 5848 | struct nft_trans *trans, *next; |
| @@ -5920,13 +5973,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
| 5920 | } | 5973 | } |
| 5921 | } | 5974 | } |
| 5922 | 5975 | ||
| 5923 | synchronize_rcu(); | 5976 | nf_tables_commit_release(net); |
| 5924 | |||
| 5925 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | ||
| 5926 | list_del(&trans->list); | ||
| 5927 | nf_tables_commit_release(trans); | ||
| 5928 | } | ||
| 5929 | |||
| 5930 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); | 5977 | nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); |
| 5931 | 5978 | ||
| 5932 | return 0; | 5979 | return 0; |
| @@ -6006,10 +6053,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) | |||
| 6006 | case NFT_MSG_NEWRULE: | 6053 | case NFT_MSG_NEWRULE: |
| 6007 | trans->ctx.chain->use--; | 6054 | trans->ctx.chain->use--; |
| 6008 | list_del_rcu(&nft_trans_rule(trans)->list); | 6055 | list_del_rcu(&nft_trans_rule(trans)->list); |
| 6056 | nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); | ||
| 6009 | break; | 6057 | break; |
| 6010 | case NFT_MSG_DELRULE: | 6058 | case NFT_MSG_DELRULE: |
| 6011 | trans->ctx.chain->use++; | 6059 | trans->ctx.chain->use++; |
| 6012 | nft_clear(trans->ctx.net, nft_trans_rule(trans)); | 6060 | nft_clear(trans->ctx.net, nft_trans_rule(trans)); |
| 6061 | nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); | ||
| 6013 | nft_trans_destroy(trans); | 6062 | nft_trans_destroy(trans); |
| 6014 | break; | 6063 | break; |
| 6015 | case NFT_MSG_NEWSET: | 6064 | case NFT_MSG_NEWSET: |
| @@ -6585,7 +6634,7 @@ int __nft_release_basechain(struct nft_ctx *ctx) | |||
| 6585 | list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { | 6634 | list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { |
| 6586 | list_del(&rule->list); | 6635 | list_del(&rule->list); |
| 6587 | ctx->chain->use--; | 6636 | ctx->chain->use--; |
| 6588 | nf_tables_rule_destroy(ctx, rule); | 6637 | nf_tables_rule_release(ctx, rule); |
| 6589 | } | 6638 | } |
| 6590 | list_del(&ctx->chain->list); | 6639 | list_del(&ctx->chain->list); |
| 6591 | ctx->table->use--; | 6640 | ctx->table->use--; |
| @@ -6623,7 +6672,7 @@ static void __nft_release_tables(struct net *net) | |||
| 6623 | list_for_each_entry_safe(rule, nr, &chain->rules, list) { | 6672 | list_for_each_entry_safe(rule, nr, &chain->rules, list) { |
| 6624 | list_del(&rule->list); | 6673 | list_del(&rule->list); |
| 6625 | chain->use--; | 6674 | chain->use--; |
| 6626 | nf_tables_rule_destroy(&ctx, rule); | 6675 | nf_tables_rule_release(&ctx, rule); |
| 6627 | } | 6676 | } |
| 6628 | } | 6677 | } |
| 6629 | list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { | 6678 | list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { |
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index dfd0bf3810d2..942702a2776f 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c | |||
| @@ -119,15 +119,22 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled); | |||
| 119 | static noinline void nft_update_chain_stats(const struct nft_chain *chain, | 119 | static noinline void nft_update_chain_stats(const struct nft_chain *chain, |
| 120 | const struct nft_pktinfo *pkt) | 120 | const struct nft_pktinfo *pkt) |
| 121 | { | 121 | { |
| 122 | struct nft_base_chain *base_chain; | ||
| 122 | struct nft_stats *stats; | 123 | struct nft_stats *stats; |
| 123 | 124 | ||
| 124 | local_bh_disable(); | 125 | base_chain = nft_base_chain(chain); |
| 125 | stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats)); | 126 | if (!base_chain->stats) |
| 126 | u64_stats_update_begin(&stats->syncp); | 127 | return; |
| 127 | stats->pkts++; | 128 | |
| 128 | stats->bytes += pkt->skb->len; | 129 | stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); |
| 129 | u64_stats_update_end(&stats->syncp); | 130 | if (stats) { |
| 130 | local_bh_enable(); | 131 | local_bh_disable(); |
| 132 | u64_stats_update_begin(&stats->syncp); | ||
| 133 | stats->pkts++; | ||
| 134 | stats->bytes += pkt->skb->len; | ||
| 135 | u64_stats_update_end(&stats->syncp); | ||
| 136 | local_bh_enable(); | ||
| 137 | } | ||
| 131 | } | 138 | } |
| 132 | 139 | ||
| 133 | struct nft_jumpstack { | 140 | struct nft_jumpstack { |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index b9505bcd3827..6ddf89183e7b 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
| @@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl, | |||
| 115 | nfacct->flags = flags; | 115 | nfacct->flags = flags; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); | 118 | nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); |
| 119 | 119 | ||
| 120 | if (tb[NFACCT_BYTES]) { | 120 | if (tb[NFACCT_BYTES]) { |
| 121 | atomic64_set(&nfacct->bytes, | 121 | atomic64_set(&nfacct->bytes, |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 4a4b293fb2e5..fa026b269b36 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
| @@ -149,8 +149,8 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy, | |||
| 149 | !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) | 149 | !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) |
| 150 | return -EINVAL; | 150 | return -EINVAL; |
| 151 | 151 | ||
| 152 | strncpy(expect_policy->name, | 152 | nla_strlcpy(expect_policy->name, |
| 153 | nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); | 153 | nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); |
| 154 | expect_policy->max_expected = | 154 | expect_policy->max_expected = |
| 155 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); | 155 | ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); |
| 156 | if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) | 156 | if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) |
| @@ -234,7 +234,8 @@ nfnl_cthelper_create(const struct nlattr * const tb[], | |||
| 234 | if (ret < 0) | 234 | if (ret < 0) |
| 235 | goto err1; | 235 | goto err1; |
| 236 | 236 | ||
| 237 | strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); | 237 | nla_strlcpy(helper->name, |
| 238 | nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); | ||
| 238 | size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); | 239 | size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); |
| 239 | if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { | 240 | if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { |
| 240 | ret = -ENOMEM; | 241 | ret = -ENOMEM; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 8e23726b9081..1d99a1efdafc 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -27,14 +27,31 @@ struct nft_xt { | |||
| 27 | struct list_head head; | 27 | struct list_head head; |
| 28 | struct nft_expr_ops ops; | 28 | struct nft_expr_ops ops; |
| 29 | unsigned int refcnt; | 29 | unsigned int refcnt; |
| 30 | |||
| 31 | /* Unlike other expressions, ops doesn't have static storage duration. | ||
| 32 | * nft core assumes they do. We use kfree_rcu so that nft core can | ||
| 33 | * can check expr->ops->size even after nft_compat->destroy() frees | ||
| 34 | * the nft_xt struct that holds the ops structure. | ||
| 35 | */ | ||
| 36 | struct rcu_head rcu_head; | ||
| 37 | }; | ||
| 38 | |||
| 39 | /* Used for matches where *info is larger than X byte */ | ||
| 40 | #define NFT_MATCH_LARGE_THRESH 192 | ||
| 41 | |||
| 42 | struct nft_xt_match_priv { | ||
| 43 | void *info; | ||
| 30 | }; | 44 | }; |
| 31 | 45 | ||
| 32 | static void nft_xt_put(struct nft_xt *xt) | 46 | static bool nft_xt_put(struct nft_xt *xt) |
| 33 | { | 47 | { |
| 34 | if (--xt->refcnt == 0) { | 48 | if (--xt->refcnt == 0) { |
| 35 | list_del(&xt->head); | 49 | list_del(&xt->head); |
| 36 | kfree(xt); | 50 | kfree_rcu(xt, rcu_head); |
| 51 | return true; | ||
| 37 | } | 52 | } |
| 53 | |||
| 54 | return false; | ||
| 38 | } | 55 | } |
| 39 | 56 | ||
| 40 | static int nft_compat_chain_validate_dependency(const char *tablename, | 57 | static int nft_compat_chain_validate_dependency(const char *tablename, |
| @@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 226 | struct xt_target *target = expr->ops->data; | 243 | struct xt_target *target = expr->ops->data; |
| 227 | struct xt_tgchk_param par; | 244 | struct xt_tgchk_param par; |
| 228 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | 245 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); |
| 246 | struct nft_xt *nft_xt; | ||
| 229 | u16 proto = 0; | 247 | u16 proto = 0; |
| 230 | bool inv = false; | 248 | bool inv = false; |
| 231 | union nft_entry e = {}; | 249 | union nft_entry e = {}; |
| @@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 236 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 254 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
| 237 | ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); | 255 | ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); |
| 238 | if (ret < 0) | 256 | if (ret < 0) |
| 239 | goto err; | 257 | return ret; |
| 240 | } | 258 | } |
| 241 | 259 | ||
| 242 | nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); | 260 | nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); |
| 243 | 261 | ||
| 244 | ret = xt_check_target(&par, size, proto, inv); | 262 | ret = xt_check_target(&par, size, proto, inv); |
| 245 | if (ret < 0) | 263 | if (ret < 0) |
| 246 | goto err; | 264 | return ret; |
| 247 | 265 | ||
| 248 | /* The standard target cannot be used */ | 266 | /* The standard target cannot be used */ |
| 249 | if (target->target == NULL) { | 267 | if (!target->target) |
| 250 | ret = -EINVAL; | 268 | return -EINVAL; |
| 251 | goto err; | ||
| 252 | } | ||
| 253 | 269 | ||
| 270 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | ||
| 271 | nft_xt->refcnt++; | ||
| 254 | return 0; | 272 | return 0; |
| 255 | err: | ||
| 256 | module_put(target->me); | ||
| 257 | return ret; | ||
| 258 | } | 273 | } |
| 259 | 274 | ||
| 260 | static void | 275 | static void |
| @@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
| 271 | if (par.target->destroy != NULL) | 286 | if (par.target->destroy != NULL) |
| 272 | par.target->destroy(&par); | 287 | par.target->destroy(&par); |
| 273 | 288 | ||
| 274 | nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); | 289 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
| 275 | module_put(target->me); | 290 | module_put(target->me); |
| 276 | } | 291 | } |
| 277 | 292 | ||
| 278 | static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) | 293 | static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) |
| @@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
| 316 | return 0; | 331 | return 0; |
| 317 | } | 332 | } |
| 318 | 333 | ||
| 319 | static void nft_match_eval(const struct nft_expr *expr, | 334 | static void __nft_match_eval(const struct nft_expr *expr, |
| 320 | struct nft_regs *regs, | 335 | struct nft_regs *regs, |
| 321 | const struct nft_pktinfo *pkt) | 336 | const struct nft_pktinfo *pkt, |
| 337 | void *info) | ||
| 322 | { | 338 | { |
| 323 | void *info = nft_expr_priv(expr); | ||
| 324 | struct xt_match *match = expr->ops->data; | 339 | struct xt_match *match = expr->ops->data; |
| 325 | struct sk_buff *skb = pkt->skb; | 340 | struct sk_buff *skb = pkt->skb; |
| 326 | bool ret; | 341 | bool ret; |
| @@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr, | |||
| 344 | } | 359 | } |
| 345 | } | 360 | } |
| 346 | 361 | ||
| 362 | static void nft_match_large_eval(const struct nft_expr *expr, | ||
| 363 | struct nft_regs *regs, | ||
| 364 | const struct nft_pktinfo *pkt) | ||
| 365 | { | ||
| 366 | struct nft_xt_match_priv *priv = nft_expr_priv(expr); | ||
| 367 | |||
| 368 | __nft_match_eval(expr, regs, pkt, priv->info); | ||
| 369 | } | ||
| 370 | |||
| 371 | static void nft_match_eval(const struct nft_expr *expr, | ||
| 372 | struct nft_regs *regs, | ||
| 373 | const struct nft_pktinfo *pkt) | ||
| 374 | { | ||
| 375 | __nft_match_eval(expr, regs, pkt, nft_expr_priv(expr)); | ||
| 376 | } | ||
| 377 | |||
| 347 | static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { | 378 | static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { |
| 348 | [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING }, | 379 | [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING }, |
| 349 | [NFTA_MATCH_REV] = { .type = NLA_U32 }, | 380 | [NFTA_MATCH_REV] = { .type = NLA_U32 }, |
| @@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, void *in, void *out) | |||
| 404 | } | 435 | } |
| 405 | 436 | ||
| 406 | static int | 437 | static int |
| 407 | nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | 438 | __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, |
| 408 | const struct nlattr * const tb[]) | 439 | const struct nlattr * const tb[], |
| 440 | void *info) | ||
| 409 | { | 441 | { |
| 410 | void *info = nft_expr_priv(expr); | ||
| 411 | struct xt_match *match = expr->ops->data; | 442 | struct xt_match *match = expr->ops->data; |
| 412 | struct xt_mtchk_param par; | 443 | struct xt_mtchk_param par; |
| 413 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | 444 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); |
| 445 | struct nft_xt *nft_xt; | ||
| 414 | u16 proto = 0; | 446 | u16 proto = 0; |
| 415 | bool inv = false; | 447 | bool inv = false; |
| 416 | union nft_entry e = {}; | 448 | union nft_entry e = {}; |
| @@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 421 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 453 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
| 422 | ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); | 454 | ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); |
| 423 | if (ret < 0) | 455 | if (ret < 0) |
| 424 | goto err; | 456 | return ret; |
| 425 | } | 457 | } |
| 426 | 458 | ||
| 427 | nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); | 459 | nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); |
| 428 | 460 | ||
| 429 | ret = xt_check_match(&par, size, proto, inv); | 461 | ret = xt_check_match(&par, size, proto, inv); |
| 430 | if (ret < 0) | 462 | if (ret < 0) |
| 431 | goto err; | 463 | return ret; |
| 432 | 464 | ||
| 465 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | ||
| 466 | nft_xt->refcnt++; | ||
| 433 | return 0; | 467 | return 0; |
| 434 | err: | 468 | } |
| 435 | module_put(match->me); | 469 | |
| 470 | static int | ||
| 471 | nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
| 472 | const struct nlattr * const tb[]) | ||
| 473 | { | ||
| 474 | return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr)); | ||
| 475 | } | ||
| 476 | |||
| 477 | static int | ||
| 478 | nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
| 479 | const struct nlattr * const tb[]) | ||
| 480 | { | ||
| 481 | struct nft_xt_match_priv *priv = nft_expr_priv(expr); | ||
| 482 | struct xt_match *m = expr->ops->data; | ||
| 483 | int ret; | ||
| 484 | |||
| 485 | priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL); | ||
| 486 | if (!priv->info) | ||
| 487 | return -ENOMEM; | ||
| 488 | |||
| 489 | ret = __nft_match_init(ctx, expr, tb, priv->info); | ||
| 490 | if (ret) | ||
| 491 | kfree(priv->info); | ||
| 436 | return ret; | 492 | return ret; |
| 437 | } | 493 | } |
| 438 | 494 | ||
| 439 | static void | 495 | static void |
| 440 | nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | 496 | __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, |
| 497 | void *info) | ||
| 441 | { | 498 | { |
| 442 | struct xt_match *match = expr->ops->data; | 499 | struct xt_match *match = expr->ops->data; |
| 443 | void *info = nft_expr_priv(expr); | ||
| 444 | struct xt_mtdtor_param par; | 500 | struct xt_mtdtor_param par; |
| 445 | 501 | ||
| 446 | par.net = ctx->net; | 502 | par.net = ctx->net; |
| @@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
| 450 | if (par.match->destroy != NULL) | 506 | if (par.match->destroy != NULL) |
| 451 | par.match->destroy(&par); | 507 | par.match->destroy(&par); |
| 452 | 508 | ||
| 453 | nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); | 509 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
| 454 | module_put(match->me); | 510 | module_put(match->me); |
| 455 | } | 511 | } |
| 456 | 512 | ||
| 457 | static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) | 513 | static void |
| 514 | nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | ||
| 515 | { | ||
| 516 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); | ||
| 517 | } | ||
| 518 | |||
| 519 | static void | ||
| 520 | nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | ||
| 521 | { | ||
| 522 | struct nft_xt_match_priv *priv = nft_expr_priv(expr); | ||
| 523 | |||
| 524 | __nft_match_destroy(ctx, expr, priv->info); | ||
| 525 | kfree(priv->info); | ||
| 526 | } | ||
| 527 | |||
| 528 | static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr, | ||
| 529 | void *info) | ||
| 458 | { | 530 | { |
| 459 | void *info = nft_expr_priv(expr); | ||
| 460 | struct xt_match *match = expr->ops->data; | 531 | struct xt_match *match = expr->ops->data; |
| 461 | 532 | ||
| 462 | if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || | 533 | if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || |
| @@ -470,6 +541,18 @@ nla_put_failure: | |||
| 470 | return -1; | 541 | return -1; |
| 471 | } | 542 | } |
| 472 | 543 | ||
| 544 | static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) | ||
| 545 | { | ||
| 546 | return __nft_match_dump(skb, expr, nft_expr_priv(expr)); | ||
| 547 | } | ||
| 548 | |||
| 549 | static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e) | ||
| 550 | { | ||
| 551 | struct nft_xt_match_priv *priv = nft_expr_priv(e); | ||
| 552 | |||
| 553 | return __nft_match_dump(skb, e, priv->info); | ||
| 554 | } | ||
| 555 | |||
| 473 | static int nft_match_validate(const struct nft_ctx *ctx, | 556 | static int nft_match_validate(const struct nft_ctx *ctx, |
| 474 | const struct nft_expr *expr, | 557 | const struct nft_expr *expr, |
| 475 | const struct nft_data **data) | 558 | const struct nft_data **data) |
| @@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
| 637 | { | 720 | { |
| 638 | struct nft_xt *nft_match; | 721 | struct nft_xt *nft_match; |
| 639 | struct xt_match *match; | 722 | struct xt_match *match; |
| 723 | unsigned int matchsize; | ||
| 640 | char *mt_name; | 724 | char *mt_name; |
| 641 | u32 rev, family; | 725 | u32 rev, family; |
| 642 | int err; | 726 | int err; |
| @@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
| 654 | list_for_each_entry(nft_match, &nft_match_list, head) { | 738 | list_for_each_entry(nft_match, &nft_match_list, head) { |
| 655 | struct xt_match *match = nft_match->ops.data; | 739 | struct xt_match *match = nft_match->ops.data; |
| 656 | 740 | ||
| 657 | if (nft_match_cmp(match, mt_name, rev, family)) { | 741 | if (nft_match_cmp(match, mt_name, rev, family)) |
| 658 | if (!try_module_get(match->me)) | ||
| 659 | return ERR_PTR(-ENOENT); | ||
| 660 | |||
| 661 | nft_match->refcnt++; | ||
| 662 | return &nft_match->ops; | 742 | return &nft_match->ops; |
| 663 | } | ||
| 664 | } | 743 | } |
| 665 | 744 | ||
| 666 | match = xt_request_find_match(family, mt_name, rev); | 745 | match = xt_request_find_match(family, mt_name, rev); |
| @@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
| 679 | goto err; | 758 | goto err; |
| 680 | } | 759 | } |
| 681 | 760 | ||
| 682 | nft_match->refcnt = 1; | 761 | nft_match->refcnt = 0; |
| 683 | nft_match->ops.type = &nft_match_type; | 762 | nft_match->ops.type = &nft_match_type; |
| 684 | nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); | ||
| 685 | nft_match->ops.eval = nft_match_eval; | 763 | nft_match->ops.eval = nft_match_eval; |
| 686 | nft_match->ops.init = nft_match_init; | 764 | nft_match->ops.init = nft_match_init; |
| 687 | nft_match->ops.destroy = nft_match_destroy; | 765 | nft_match->ops.destroy = nft_match_destroy; |
| @@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
| 689 | nft_match->ops.validate = nft_match_validate; | 767 | nft_match->ops.validate = nft_match_validate; |
| 690 | nft_match->ops.data = match; | 768 | nft_match->ops.data = match; |
| 691 | 769 | ||
| 770 | matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); | ||
| 771 | if (matchsize > NFT_MATCH_LARGE_THRESH) { | ||
| 772 | matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv)); | ||
| 773 | |||
| 774 | nft_match->ops.eval = nft_match_large_eval; | ||
| 775 | nft_match->ops.init = nft_match_large_init; | ||
| 776 | nft_match->ops.destroy = nft_match_large_destroy; | ||
| 777 | nft_match->ops.dump = nft_match_large_dump; | ||
| 778 | } | ||
| 779 | |||
| 780 | nft_match->ops.size = matchsize; | ||
| 781 | |||
| 692 | list_add(&nft_match->head, &nft_match_list); | 782 | list_add(&nft_match->head, &nft_match_list); |
| 693 | 783 | ||
| 694 | return &nft_match->ops; | 784 | return &nft_match->ops; |
| @@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 739 | list_for_each_entry(nft_target, &nft_target_list, head) { | 829 | list_for_each_entry(nft_target, &nft_target_list, head) { |
| 740 | struct xt_target *target = nft_target->ops.data; | 830 | struct xt_target *target = nft_target->ops.data; |
| 741 | 831 | ||
| 742 | if (nft_target_cmp(target, tg_name, rev, family)) { | 832 | if (nft_target_cmp(target, tg_name, rev, family)) |
| 743 | if (!try_module_get(target->me)) | ||
| 744 | return ERR_PTR(-ENOENT); | ||
| 745 | |||
| 746 | nft_target->refcnt++; | ||
| 747 | return &nft_target->ops; | 833 | return &nft_target->ops; |
| 748 | } | ||
| 749 | } | 834 | } |
| 750 | 835 | ||
| 751 | target = xt_request_find_target(family, tg_name, rev); | 836 | target = xt_request_find_target(family, tg_name, rev); |
| @@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 764 | goto err; | 849 | goto err; |
| 765 | } | 850 | } |
| 766 | 851 | ||
| 767 | nft_target->refcnt = 1; | 852 | nft_target->refcnt = 0; |
| 768 | nft_target->ops.type = &nft_target_type; | 853 | nft_target->ops.type = &nft_target_type; |
| 769 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | 854 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); |
| 770 | nft_target->ops.init = nft_target_init; | 855 | nft_target->ops.init = nft_target_init; |
| @@ -823,6 +908,32 @@ err_match: | |||
| 823 | 908 | ||
| 824 | static void __exit nft_compat_module_exit(void) | 909 | static void __exit nft_compat_module_exit(void) |
| 825 | { | 910 | { |
| 911 | struct nft_xt *xt, *next; | ||
| 912 | |||
| 913 | /* list should be empty here, it can be non-empty only in case there | ||
| 914 | * was an error that caused nft_xt expr to not be initialized fully | ||
| 915 | * and noone else requested the same expression later. | ||
| 916 | * | ||
| 917 | * In this case, the lists contain 0-refcount entries that still | ||
| 918 | * hold module reference. | ||
| 919 | */ | ||
| 920 | list_for_each_entry_safe(xt, next, &nft_target_list, head) { | ||
| 921 | struct xt_target *target = xt->ops.data; | ||
| 922 | |||
| 923 | if (WARN_ON_ONCE(xt->refcnt)) | ||
| 924 | continue; | ||
| 925 | module_put(target->me); | ||
| 926 | kfree(xt); | ||
| 927 | } | ||
| 928 | |||
| 929 | list_for_each_entry_safe(xt, next, &nft_match_list, head) { | ||
| 930 | struct xt_match *match = xt->ops.data; | ||
| 931 | |||
| 932 | if (WARN_ON_ONCE(xt->refcnt)) | ||
| 933 | continue; | ||
| 934 | module_put(match->me); | ||
| 935 | kfree(xt); | ||
| 936 | } | ||
| 826 | nfnetlink_subsys_unregister(&nfnl_compat_subsys); | 937 | nfnetlink_subsys_unregister(&nfnl_compat_subsys); |
| 827 | nft_unregister_expr(&nft_target_type); | 938 | nft_unregister_expr(&nft_target_type); |
| 828 | nft_unregister_expr(&nft_match_type); | 939 | nft_unregister_expr(&nft_match_type); |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 4717d7796927..aa87ff8beae8 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
| @@ -69,8 +69,16 @@ err1: | |||
| 69 | return err; | 69 | return err; |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static void nft_immediate_destroy(const struct nft_ctx *ctx, | 72 | static void nft_immediate_activate(const struct nft_ctx *ctx, |
| 73 | const struct nft_expr *expr) | 73 | const struct nft_expr *expr) |
| 74 | { | ||
| 75 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | ||
| 76 | |||
| 77 | return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg)); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, | ||
| 81 | const struct nft_expr *expr) | ||
| 74 | { | 82 | { |
| 75 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 83 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
| 76 | 84 | ||
| @@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = { | |||
| 108 | .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), | 116 | .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), |
| 109 | .eval = nft_immediate_eval, | 117 | .eval = nft_immediate_eval, |
| 110 | .init = nft_immediate_init, | 118 | .init = nft_immediate_init, |
| 111 | .destroy = nft_immediate_destroy, | 119 | .activate = nft_immediate_activate, |
| 120 | .deactivate = nft_immediate_deactivate, | ||
| 112 | .dump = nft_immediate_dump, | 121 | .dump = nft_immediate_dump, |
| 113 | .validate = nft_immediate_validate, | 122 | .validate = nft_immediate_validate, |
| 114 | }; | 123 | }; |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 71325fef647d..cb7cb300c3bc 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -183,6 +183,9 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) | |||
| 183 | struct xt_match *m; | 183 | struct xt_match *m; |
| 184 | int err = -ENOENT; | 184 | int err = -ENOENT; |
| 185 | 185 | ||
| 186 | if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) | ||
| 187 | return ERR_PTR(-EINVAL); | ||
| 188 | |||
| 186 | mutex_lock(&xt[af].mutex); | 189 | mutex_lock(&xt[af].mutex); |
| 187 | list_for_each_entry(m, &xt[af].match, list) { | 190 | list_for_each_entry(m, &xt[af].match, list) { |
| 188 | if (strcmp(m->name, name) == 0) { | 191 | if (strcmp(m->name, name) == 0) { |
| @@ -229,6 +232,9 @@ struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) | |||
| 229 | struct xt_target *t; | 232 | struct xt_target *t; |
| 230 | int err = -ENOENT; | 233 | int err = -ENOENT; |
| 231 | 234 | ||
| 235 | if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) | ||
| 236 | return ERR_PTR(-EINVAL); | ||
| 237 | |||
| 232 | mutex_lock(&xt[af].mutex); | 238 | mutex_lock(&xt[af].mutex); |
| 233 | list_for_each_entry(t, &xt[af].target, list) { | 239 | list_for_each_entry(t, &xt[af].target, list) { |
| 234 | if (strcmp(t->name, name) == 0) { | 240 | if (strcmp(t->name, name) == 0) { |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 01f3515cada0..acb7b86574cd 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2903 | if (skb == NULL) | 2903 | if (skb == NULL) |
| 2904 | goto out_unlock; | 2904 | goto out_unlock; |
| 2905 | 2905 | ||
| 2906 | skb_set_network_header(skb, reserve); | 2906 | skb_reset_network_header(skb); |
| 2907 | 2907 | ||
| 2908 | err = -EINVAL; | 2908 | err = -EINVAL; |
| 2909 | if (sock->type == SOCK_DGRAM) { | 2909 | if (sock->type == SOCK_DGRAM) { |
| 2910 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); | 2910 | offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); |
| 2911 | if (unlikely(offset < 0)) | 2911 | if (unlikely(offset < 0)) |
| 2912 | goto out_free; | 2912 | goto out_free; |
| 2913 | } else if (reserve) { | ||
| 2914 | skb_reserve(skb, -reserve); | ||
| 2913 | } | 2915 | } |
| 2914 | 2916 | ||
| 2915 | /* Returns -EFAULT on error */ | 2917 | /* Returns -EFAULT on error */ |
diff --git a/net/rds/Kconfig b/net/rds/Kconfig index bffde4b46c5d..1a31502ee7db 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig | |||
| @@ -8,7 +8,7 @@ config RDS | |||
| 8 | 8 | ||
| 9 | config RDS_RDMA | 9 | config RDS_RDMA |
| 10 | tristate "RDS over Infiniband" | 10 | tristate "RDS over Infiniband" |
| 11 | depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS | 11 | depends on RDS && INFINIBAND_ADDR_TRANS |
| 12 | ---help--- | 12 | ---help--- |
| 13 | Allow RDS to use Infiniband as a transport. | 13 | Allow RDS to use Infiniband as a transport. |
| 14 | This transport supports RDMA operations. | 14 | This transport supports RDMA operations. |
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 853604685965..1fb39e1f9d07 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
| @@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, | |||
| 161 | case htons(ETH_P_8021AD): | 161 | case htons(ETH_P_8021AD): |
| 162 | break; | 162 | break; |
| 163 | default: | 163 | default: |
| 164 | if (exists) | ||
| 165 | tcf_idr_release(*a, bind); | ||
| 164 | return -EPROTONOSUPPORT; | 166 | return -EPROTONOSUPPORT; |
| 165 | } | 167 | } |
| 166 | } else { | 168 | } else { |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 963e4bf0aab8..a57e112d9b3e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -1588,7 +1588,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, | |||
| 1588 | return ret; | 1588 | return ret; |
| 1589 | ok_count = ret; | 1589 | ok_count = ret; |
| 1590 | 1590 | ||
| 1591 | if (!exts) | 1591 | if (!exts || ok_count) |
| 1592 | return ok_count; | 1592 | return ok_count; |
| 1593 | ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); | 1593 | ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); |
| 1594 | if (ret < 0) | 1594 | if (ret < 0) |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 16644b3d2362..56c181c3feeb 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 222 | extack); | 222 | extack); |
| 223 | if (IS_ERR(child)) | 223 | if (IS_ERR(child)) |
| 224 | return PTR_ERR(child); | 224 | return PTR_ERR(child); |
| 225 | } | ||
| 226 | 225 | ||
| 227 | if (child != &noop_qdisc) | 226 | /* child is fifo, no need to check for noop_qdisc */ |
| 228 | qdisc_hash_add(child, true); | 227 | qdisc_hash_add(child, true); |
| 228 | } | ||
| 229 | |||
| 229 | sch_tree_lock(sch); | 230 | sch_tree_lock(sch); |
| 230 | q->flags = ctl->flags; | 231 | q->flags = ctl->flags; |
| 231 | q->limit = ctl->limit; | 232 | q->limit = ctl->limit; |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 03225a8df973..6f74a426f159 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 383 | err = PTR_ERR(child); | 383 | err = PTR_ERR(child); |
| 384 | goto done; | 384 | goto done; |
| 385 | } | 385 | } |
| 386 | |||
| 387 | /* child is fifo, no need to check for noop_qdisc */ | ||
| 388 | qdisc_hash_add(child, true); | ||
| 386 | } | 389 | } |
| 387 | 390 | ||
| 388 | sch_tree_lock(sch); | 391 | sch_tree_lock(sch); |
| @@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, | |||
| 391 | q->qdisc->qstats.backlog); | 394 | q->qdisc->qstats.backlog); |
| 392 | qdisc_destroy(q->qdisc); | 395 | qdisc_destroy(q->qdisc); |
| 393 | q->qdisc = child; | 396 | q->qdisc = child; |
| 394 | if (child != &noop_qdisc) | ||
| 395 | qdisc_hash_add(child, true); | ||
| 396 | } | 397 | } |
| 397 | q->limit = qopt->limit; | 398 | q->limit = qopt->limit; |
| 398 | if (tb[TCA_TBF_PBURST]) | 399 | if (tb[TCA_TBF_PBURST]) |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 42247110d842..0cd2e764f47f 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -1006,7 +1006,7 @@ static const struct proto_ops inet6_seqpacket_ops = { | |||
| 1006 | .owner = THIS_MODULE, | 1006 | .owner = THIS_MODULE, |
| 1007 | .release = inet6_release, | 1007 | .release = inet6_release, |
| 1008 | .bind = inet6_bind, | 1008 | .bind = inet6_bind, |
| 1009 | .connect = inet_dgram_connect, | 1009 | .connect = sctp_inet_connect, |
| 1010 | .socketpair = sock_no_socketpair, | 1010 | .socketpair = sock_no_socketpair, |
| 1011 | .accept = inet_accept, | 1011 | .accept = inet_accept, |
| 1012 | .getname = sctp_getname, | 1012 | .getname = sctp_getname, |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index d685f8456762..6bf0a9971888 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
| @@ -1012,7 +1012,7 @@ static const struct proto_ops inet_seqpacket_ops = { | |||
| 1012 | .owner = THIS_MODULE, | 1012 | .owner = THIS_MODULE, |
| 1013 | .release = inet_release, /* Needs to be wrapped... */ | 1013 | .release = inet_release, /* Needs to be wrapped... */ |
| 1014 | .bind = inet_bind, | 1014 | .bind = inet_bind, |
| 1015 | .connect = inet_dgram_connect, | 1015 | .connect = sctp_inet_connect, |
| 1016 | .socketpair = sock_no_socketpair, | 1016 | .socketpair = sock_no_socketpair, |
| 1017 | .accept = inet_accept, | 1017 | .accept = inet_accept, |
| 1018 | .getname = inet_getname, /* Semantics are different. */ | 1018 | .getname = inet_getname, /* Semantics are different. */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 80835ac26d2c..ae7e7c606f72 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -1086,7 +1086,7 @@ out: | |||
| 1086 | */ | 1086 | */ |
| 1087 | static int __sctp_connect(struct sock *sk, | 1087 | static int __sctp_connect(struct sock *sk, |
| 1088 | struct sockaddr *kaddrs, | 1088 | struct sockaddr *kaddrs, |
| 1089 | int addrs_size, | 1089 | int addrs_size, int flags, |
| 1090 | sctp_assoc_t *assoc_id) | 1090 | sctp_assoc_t *assoc_id) |
| 1091 | { | 1091 | { |
| 1092 | struct net *net = sock_net(sk); | 1092 | struct net *net = sock_net(sk); |
| @@ -1104,7 +1104,6 @@ static int __sctp_connect(struct sock *sk, | |||
| 1104 | union sctp_addr *sa_addr = NULL; | 1104 | union sctp_addr *sa_addr = NULL; |
| 1105 | void *addr_buf; | 1105 | void *addr_buf; |
| 1106 | unsigned short port; | 1106 | unsigned short port; |
| 1107 | unsigned int f_flags = 0; | ||
| 1108 | 1107 | ||
| 1109 | sp = sctp_sk(sk); | 1108 | sp = sctp_sk(sk); |
| 1110 | ep = sp->ep; | 1109 | ep = sp->ep; |
| @@ -1254,13 +1253,7 @@ static int __sctp_connect(struct sock *sk, | |||
| 1254 | sp->pf->to_sk_daddr(sa_addr, sk); | 1253 | sp->pf->to_sk_daddr(sa_addr, sk); |
| 1255 | sk->sk_err = 0; | 1254 | sk->sk_err = 0; |
| 1256 | 1255 | ||
| 1257 | /* in-kernel sockets don't generally have a file allocated to them | 1256 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); |
| 1258 | * if all they do is call sock_create_kern(). | ||
| 1259 | */ | ||
| 1260 | if (sk->sk_socket->file) | ||
| 1261 | f_flags = sk->sk_socket->file->f_flags; | ||
| 1262 | |||
| 1263 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | ||
| 1264 | 1257 | ||
| 1265 | if (assoc_id) | 1258 | if (assoc_id) |
| 1266 | *assoc_id = asoc->assoc_id; | 1259 | *assoc_id = asoc->assoc_id; |
| @@ -1348,7 +1341,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, | |||
| 1348 | sctp_assoc_t *assoc_id) | 1341 | sctp_assoc_t *assoc_id) |
| 1349 | { | 1342 | { |
| 1350 | struct sockaddr *kaddrs; | 1343 | struct sockaddr *kaddrs; |
| 1351 | int err = 0; | 1344 | int err = 0, flags = 0; |
| 1352 | 1345 | ||
| 1353 | pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", | 1346 | pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", |
| 1354 | __func__, sk, addrs, addrs_size); | 1347 | __func__, sk, addrs, addrs_size); |
| @@ -1367,7 +1360,13 @@ static int __sctp_setsockopt_connectx(struct sock *sk, | |||
| 1367 | if (err) | 1360 | if (err) |
| 1368 | goto out_free; | 1361 | goto out_free; |
| 1369 | 1362 | ||
| 1370 | err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); | 1363 | /* in-kernel sockets don't generally have a file allocated to them |
| 1364 | * if all they do is call sock_create_kern(). | ||
| 1365 | */ | ||
| 1366 | if (sk->sk_socket->file) | ||
| 1367 | flags = sk->sk_socket->file->f_flags; | ||
| 1368 | |||
| 1369 | err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); | ||
| 1371 | 1370 | ||
| 1372 | out_free: | 1371 | out_free: |
| 1373 | kvfree(kaddrs); | 1372 | kvfree(kaddrs); |
| @@ -4397,16 +4396,26 @@ out_nounlock: | |||
| 4397 | * len: the size of the address. | 4396 | * len: the size of the address. |
| 4398 | */ | 4397 | */ |
| 4399 | static int sctp_connect(struct sock *sk, struct sockaddr *addr, | 4398 | static int sctp_connect(struct sock *sk, struct sockaddr *addr, |
| 4400 | int addr_len) | 4399 | int addr_len, int flags) |
| 4401 | { | 4400 | { |
| 4402 | int err = 0; | 4401 | struct inet_sock *inet = inet_sk(sk); |
| 4403 | struct sctp_af *af; | 4402 | struct sctp_af *af; |
| 4403 | int err = 0; | ||
| 4404 | 4404 | ||
| 4405 | lock_sock(sk); | 4405 | lock_sock(sk); |
| 4406 | 4406 | ||
| 4407 | pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, | 4407 | pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, |
| 4408 | addr, addr_len); | 4408 | addr, addr_len); |
| 4409 | 4409 | ||
| 4410 | /* We may need to bind the socket. */ | ||
| 4411 | if (!inet->inet_num) { | ||
| 4412 | if (sk->sk_prot->get_port(sk, 0)) { | ||
| 4413 | release_sock(sk); | ||
| 4414 | return -EAGAIN; | ||
| 4415 | } | ||
| 4416 | inet->inet_sport = htons(inet->inet_num); | ||
| 4417 | } | ||
| 4418 | |||
| 4410 | /* Validate addr_len before calling common connect/connectx routine. */ | 4419 | /* Validate addr_len before calling common connect/connectx routine. */ |
| 4411 | af = sctp_get_af_specific(addr->sa_family); | 4420 | af = sctp_get_af_specific(addr->sa_family); |
| 4412 | if (!af || addr_len < af->sockaddr_len) { | 4421 | if (!af || addr_len < af->sockaddr_len) { |
| @@ -4415,13 +4424,25 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr, | |||
| 4415 | /* Pass correct addr len to common routine (so it knows there | 4424 | /* Pass correct addr len to common routine (so it knows there |
| 4416 | * is only one address being passed. | 4425 | * is only one address being passed. |
| 4417 | */ | 4426 | */ |
| 4418 | err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); | 4427 | err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL); |
| 4419 | } | 4428 | } |
| 4420 | 4429 | ||
| 4421 | release_sock(sk); | 4430 | release_sock(sk); |
| 4422 | return err; | 4431 | return err; |
| 4423 | } | 4432 | } |
| 4424 | 4433 | ||
| 4434 | int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, | ||
| 4435 | int addr_len, int flags) | ||
| 4436 | { | ||
| 4437 | if (addr_len < sizeof(uaddr->sa_family)) | ||
| 4438 | return -EINVAL; | ||
| 4439 | |||
| 4440 | if (uaddr->sa_family == AF_UNSPEC) | ||
| 4441 | return -EOPNOTSUPP; | ||
| 4442 | |||
| 4443 | return sctp_connect(sock->sk, uaddr, addr_len, flags); | ||
| 4444 | } | ||
| 4445 | |||
| 4425 | /* FIXME: Write comments. */ | 4446 | /* FIXME: Write comments. */ |
| 4426 | static int sctp_disconnect(struct sock *sk, int flags) | 4447 | static int sctp_disconnect(struct sock *sk, int flags) |
| 4427 | { | 4448 | { |
| @@ -8724,7 +8745,6 @@ struct proto sctp_prot = { | |||
| 8724 | .name = "SCTP", | 8745 | .name = "SCTP", |
| 8725 | .owner = THIS_MODULE, | 8746 | .owner = THIS_MODULE, |
| 8726 | .close = sctp_close, | 8747 | .close = sctp_close, |
| 8727 | .connect = sctp_connect, | ||
| 8728 | .disconnect = sctp_disconnect, | 8748 | .disconnect = sctp_disconnect, |
| 8729 | .accept = sctp_accept, | 8749 | .accept = sctp_accept, |
| 8730 | .ioctl = sctp_ioctl, | 8750 | .ioctl = sctp_ioctl, |
| @@ -8767,7 +8787,6 @@ struct proto sctpv6_prot = { | |||
| 8767 | .name = "SCTPv6", | 8787 | .name = "SCTPv6", |
| 8768 | .owner = THIS_MODULE, | 8788 | .owner = THIS_MODULE, |
| 8769 | .close = sctp_close, | 8789 | .close = sctp_close, |
| 8770 | .connect = sctp_connect, | ||
| 8771 | .disconnect = sctp_disconnect, | 8790 | .disconnect = sctp_disconnect, |
| 8772 | .accept = sctp_accept, | 8791 | .accept = sctp_accept, |
| 8773 | .ioctl = sctp_ioctl, | 8792 | .ioctl = sctp_ioctl, |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 74568cdbca70..d7b88b2d1b22 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
| @@ -245,40 +245,45 @@ out: | |||
| 245 | static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, | 245 | static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, |
| 246 | struct nlattr *tb[]) | 246 | struct nlattr *tb[]) |
| 247 | { | 247 | { |
| 248 | char *string, *ibname = NULL; | 248 | char *string, *ibname; |
| 249 | int rc = 0; | 249 | int rc; |
| 250 | 250 | ||
| 251 | memset(pnetelem, 0, sizeof(*pnetelem)); | 251 | memset(pnetelem, 0, sizeof(*pnetelem)); |
| 252 | INIT_LIST_HEAD(&pnetelem->list); | 252 | INIT_LIST_HEAD(&pnetelem->list); |
| 253 | if (tb[SMC_PNETID_NAME]) { | 253 | |
| 254 | string = (char *)nla_data(tb[SMC_PNETID_NAME]); | 254 | rc = -EINVAL; |
| 255 | if (!smc_pnetid_valid(string, pnetelem->pnet_name)) { | 255 | if (!tb[SMC_PNETID_NAME]) |
| 256 | rc = -EINVAL; | 256 | goto error; |
| 257 | goto error; | 257 | string = (char *)nla_data(tb[SMC_PNETID_NAME]); |
| 258 | } | 258 | if (!smc_pnetid_valid(string, pnetelem->pnet_name)) |
| 259 | } | 259 | goto error; |
| 260 | if (tb[SMC_PNETID_ETHNAME]) { | 260 | |
| 261 | string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); | 261 | rc = -EINVAL; |
| 262 | pnetelem->ndev = dev_get_by_name(net, string); | 262 | if (!tb[SMC_PNETID_ETHNAME]) |
| 263 | if (!pnetelem->ndev) | 263 | goto error; |
| 264 | return -ENOENT; | 264 | rc = -ENOENT; |
| 265 | } | 265 | string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); |
| 266 | if (tb[SMC_PNETID_IBNAME]) { | 266 | pnetelem->ndev = dev_get_by_name(net, string); |
| 267 | ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); | 267 | if (!pnetelem->ndev) |
| 268 | ibname = strim(ibname); | 268 | goto error; |
| 269 | pnetelem->smcibdev = smc_pnet_find_ib(ibname); | 269 | |
| 270 | if (!pnetelem->smcibdev) { | 270 | rc = -EINVAL; |
| 271 | rc = -ENOENT; | 271 | if (!tb[SMC_PNETID_IBNAME]) |
| 272 | goto error; | 272 | goto error; |
| 273 | } | 273 | rc = -ENOENT; |
| 274 | } | 274 | ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); |
| 275 | if (tb[SMC_PNETID_IBPORT]) { | 275 | ibname = strim(ibname); |
| 276 | pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); | 276 | pnetelem->smcibdev = smc_pnet_find_ib(ibname); |
| 277 | if (pnetelem->ib_port > SMC_MAX_PORTS) { | 277 | if (!pnetelem->smcibdev) |
| 278 | rc = -EINVAL; | 278 | goto error; |
| 279 | goto error; | 279 | |
| 280 | } | 280 | rc = -EINVAL; |
| 281 | } | 281 | if (!tb[SMC_PNETID_IBPORT]) |
| 282 | goto error; | ||
| 283 | pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); | ||
| 284 | if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS) | ||
| 285 | goto error; | ||
| 286 | |||
| 282 | return 0; | 287 | return 0; |
| 283 | 288 | ||
| 284 | error: | 289 | error: |
| @@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) | |||
| 307 | void *hdr; | 312 | void *hdr; |
| 308 | int rc; | 313 | int rc; |
| 309 | 314 | ||
| 315 | if (!info->attrs[SMC_PNETID_NAME]) | ||
| 316 | return -EINVAL; | ||
| 310 | pnetelem = smc_pnet_find_pnetid( | 317 | pnetelem = smc_pnet_find_pnetid( |
| 311 | (char *)nla_data(info->attrs[SMC_PNETID_NAME])); | 318 | (char *)nla_data(info->attrs[SMC_PNETID_NAME])); |
| 312 | if (!pnetelem) | 319 | if (!pnetelem) |
| @@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) | |||
| 359 | 366 | ||
| 360 | static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) | 367 | static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) |
| 361 | { | 368 | { |
| 369 | if (!info->attrs[SMC_PNETID_NAME]) | ||
| 370 | return -EINVAL; | ||
| 362 | return smc_pnet_remove_by_pnetid( | 371 | return smc_pnet_remove_by_pnetid( |
| 363 | (char *)nla_data(info->attrs[SMC_PNETID_NAME])); | 372 | (char *)nla_data(info->attrs[SMC_PNETID_NAME])); |
| 364 | } | 373 | } |
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index ac09ca803296..6358e5271070 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig | |||
| @@ -50,7 +50,7 @@ config SUNRPC_DEBUG | |||
| 50 | 50 | ||
| 51 | config SUNRPC_XPRT_RDMA | 51 | config SUNRPC_XPRT_RDMA |
| 52 | tristate "RPC-over-RDMA transport" | 52 | tristate "RPC-over-RDMA transport" |
| 53 | depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS | 53 | depends on SUNRPC && INFINIBAND_ADDR_TRANS |
| 54 | default SUNRPC && INFINIBAND | 54 | default SUNRPC && INFINIBAND |
| 55 | select SG_POOL | 55 | select SG_POOL |
| 56 | help | 56 | help |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 71e79597f940..e1c93ce74e0f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
| @@ -680,7 +680,6 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
| 680 | struct scatterlist *sgin = &sgin_arr[0]; | 680 | struct scatterlist *sgin = &sgin_arr[0]; |
| 681 | struct strp_msg *rxm = strp_msg(skb); | 681 | struct strp_msg *rxm = strp_msg(skb); |
| 682 | int ret, nsg = ARRAY_SIZE(sgin_arr); | 682 | int ret, nsg = ARRAY_SIZE(sgin_arr); |
| 683 | char aad_recv[TLS_AAD_SPACE_SIZE]; | ||
| 684 | struct sk_buff *unused; | 683 | struct sk_buff *unused; |
| 685 | 684 | ||
| 686 | ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, | 685 | ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, |
| @@ -698,13 +697,13 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb, | |||
| 698 | } | 697 | } |
| 699 | 698 | ||
| 700 | sg_init_table(sgin, nsg); | 699 | sg_init_table(sgin, nsg); |
| 701 | sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv)); | 700 | sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE); |
| 702 | 701 | ||
| 703 | nsg = skb_to_sgvec(skb, &sgin[1], | 702 | nsg = skb_to_sgvec(skb, &sgin[1], |
| 704 | rxm->offset + tls_ctx->rx.prepend_size, | 703 | rxm->offset + tls_ctx->rx.prepend_size, |
| 705 | rxm->full_len - tls_ctx->rx.prepend_size); | 704 | rxm->full_len - tls_ctx->rx.prepend_size); |
| 706 | 705 | ||
| 707 | tls_make_aad(aad_recv, | 706 | tls_make_aad(ctx->rx_aad_ciphertext, |
| 708 | rxm->full_len - tls_ctx->rx.overhead_size, | 707 | rxm->full_len - tls_ctx->rx.overhead_size, |
| 709 | tls_ctx->rx.rec_seq, | 708 | tls_ctx->rx.rec_seq, |
| 710 | tls_ctx->rx.rec_seq_size, | 709 | tls_ctx->rx.rec_seq_size, |
| @@ -803,12 +802,12 @@ int tls_sw_recvmsg(struct sock *sk, | |||
| 803 | if (to_copy <= len && page_count < MAX_SKB_FRAGS && | 802 | if (to_copy <= len && page_count < MAX_SKB_FRAGS && |
| 804 | likely(!(flags & MSG_PEEK))) { | 803 | likely(!(flags & MSG_PEEK))) { |
| 805 | struct scatterlist sgin[MAX_SKB_FRAGS + 1]; | 804 | struct scatterlist sgin[MAX_SKB_FRAGS + 1]; |
| 806 | char unused[21]; | ||
| 807 | int pages = 0; | 805 | int pages = 0; |
| 808 | 806 | ||
| 809 | zc = true; | 807 | zc = true; |
| 810 | sg_init_table(sgin, MAX_SKB_FRAGS + 1); | 808 | sg_init_table(sgin, MAX_SKB_FRAGS + 1); |
| 811 | sg_set_buf(&sgin[0], unused, 13); | 809 | sg_set_buf(&sgin[0], ctx->rx_aad_plaintext, |
| 810 | TLS_AAD_SPACE_SIZE); | ||
| 812 | 811 | ||
| 813 | err = zerocopy_from_iter(sk, &msg->msg_iter, | 812 | err = zerocopy_from_iter(sk, &msg->msg_iter, |
| 814 | to_copy, &pages, | 813 | to_copy, &pages, |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a052693c2e85..7c5135a92d76 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -15555,7 +15555,8 @@ void cfg80211_ft_event(struct net_device *netdev, | |||
| 15555 | if (!ft_event->target_ap) | 15555 | if (!ft_event->target_ap) |
| 15556 | return; | 15556 | return; |
| 15557 | 15557 | ||
| 15558 | msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL); | 15558 | msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len, |
| 15559 | GFP_KERNEL); | ||
| 15559 | if (!msg) | 15560 | if (!msg) |
| 15560 | return; | 15561 | return; |
| 15561 | 15562 | ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ac3e12c32aa3..5fcec5c94eb7 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -916,6 +916,9 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, | |||
| 916 | const struct fwdb_header *hdr = regdb; | 916 | const struct fwdb_header *hdr = regdb; |
| 917 | const struct fwdb_country *country; | 917 | const struct fwdb_country *country; |
| 918 | 918 | ||
| 919 | if (!regdb) | ||
| 920 | return -ENODATA; | ||
| 921 | |||
| 919 | if (IS_ERR(regdb)) | 922 | if (IS_ERR(regdb)) |
| 920 | return PTR_ERR(regdb); | 923 | return PTR_ERR(regdb); |
| 921 | 924 | ||
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4d6a6edd4bf6..092947676143 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
| @@ -255,7 +255,7 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h | |||
| 255 | $(obj)/%.o: $(src)/%.c | 255 | $(obj)/%.o: $(src)/%.c |
| 256 | $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ | 256 | $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ |
| 257 | -I$(srctree)/tools/testing/selftests/bpf/ \ | 257 | -I$(srctree)/tools/testing/selftests/bpf/ \ |
| 258 | -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ | 258 | -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ |
| 259 | -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \ | 259 | -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \ |
| 260 | -Wno-gnu-variable-sized-type-not-at-end \ | 260 | -Wno-gnu-variable-sized-type-not-at-end \ |
| 261 | -Wno-address-of-packed-member -Wno-tautological-compare \ | 261 | -Wno-address-of-packed-member -Wno-tautological-compare \ |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e16d6713f236..2d42eb9cd1a5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -5041,7 +5041,7 @@ sub process { | |||
| 5041 | $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; | 5041 | $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; |
| 5042 | $tmp_stmt =~ s/\#+\s*$arg\b//g; | 5042 | $tmp_stmt =~ s/\#+\s*$arg\b//g; |
| 5043 | $tmp_stmt =~ s/\b$arg\s*\#\#//g; | 5043 | $tmp_stmt =~ s/\b$arg\s*\#\#//g; |
| 5044 | my $use_cnt = $tmp_stmt =~ s/\b$arg\b//g; | 5044 | my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g; |
| 5045 | if ($use_cnt > 1) { | 5045 | if ($use_cnt > 1) { |
| 5046 | CHK("MACRO_ARG_REUSE", | 5046 | CHK("MACRO_ARG_REUSE", |
| 5047 | "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); | 5047 | "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4cafe6a19167..179dd20bec0a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
| @@ -1568,8 +1568,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent | |||
| 1568 | /* Called from d_instantiate or d_splice_alias. */ | 1568 | /* Called from d_instantiate or d_splice_alias. */ |
| 1569 | dentry = dget(opt_dentry); | 1569 | dentry = dget(opt_dentry); |
| 1570 | } else { | 1570 | } else { |
| 1571 | /* Called from selinux_complete_init, try to find a dentry. */ | 1571 | /* |
| 1572 | * Called from selinux_complete_init, try to find a dentry. | ||
| 1573 | * Some filesystems really want a connected one, so try | ||
| 1574 | * that first. We could split SECURITY_FS_USE_XATTR in | ||
| 1575 | * two, depending upon that... | ||
| 1576 | */ | ||
| 1572 | dentry = d_find_alias(inode); | 1577 | dentry = d_find_alias(inode); |
| 1578 | if (!dentry) | ||
| 1579 | dentry = d_find_any_alias(inode); | ||
| 1573 | } | 1580 | } |
| 1574 | if (!dentry) { | 1581 | if (!dentry) { |
| 1575 | /* | 1582 | /* |
| @@ -1674,14 +1681,19 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent | |||
| 1674 | if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) { | 1681 | if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) { |
| 1675 | /* We must have a dentry to determine the label on | 1682 | /* We must have a dentry to determine the label on |
| 1676 | * procfs inodes */ | 1683 | * procfs inodes */ |
| 1677 | if (opt_dentry) | 1684 | if (opt_dentry) { |
| 1678 | /* Called from d_instantiate or | 1685 | /* Called from d_instantiate or |
| 1679 | * d_splice_alias. */ | 1686 | * d_splice_alias. */ |
| 1680 | dentry = dget(opt_dentry); | 1687 | dentry = dget(opt_dentry); |
| 1681 | else | 1688 | } else { |
| 1682 | /* Called from selinux_complete_init, try to | 1689 | /* Called from selinux_complete_init, try to |
| 1683 | * find a dentry. */ | 1690 | * find a dentry. Some filesystems really want |
| 1691 | * a connected one, so try that first. | ||
| 1692 | */ | ||
| 1684 | dentry = d_find_alias(inode); | 1693 | dentry = d_find_alias(inode); |
| 1694 | if (!dentry) | ||
| 1695 | dentry = d_find_any_alias(inode); | ||
| 1696 | } | ||
| 1685 | /* | 1697 | /* |
| 1686 | * This can be hit on boot when a file is accessed | 1698 | * This can be hit on boot when a file is accessed |
| 1687 | * before the policy is loaded. When we load policy we | 1699 | * before the policy is loaded. When we load policy we |
| @@ -4576,6 +4588,7 @@ static int selinux_socket_post_create(struct socket *sock, int family, | |||
| 4576 | static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) | 4588 | static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) |
| 4577 | { | 4589 | { |
| 4578 | struct sock *sk = sock->sk; | 4590 | struct sock *sk = sock->sk; |
| 4591 | struct sk_security_struct *sksec = sk->sk_security; | ||
| 4579 | u16 family; | 4592 | u16 family; |
| 4580 | int err; | 4593 | int err; |
| 4581 | 4594 | ||
| @@ -4587,11 +4600,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4587 | family = sk->sk_family; | 4600 | family = sk->sk_family; |
| 4588 | if (family == PF_INET || family == PF_INET6) { | 4601 | if (family == PF_INET || family == PF_INET6) { |
| 4589 | char *addrp; | 4602 | char *addrp; |
| 4590 | struct sk_security_struct *sksec = sk->sk_security; | ||
| 4591 | struct common_audit_data ad; | 4603 | struct common_audit_data ad; |
| 4592 | struct lsm_network_audit net = {0,}; | 4604 | struct lsm_network_audit net = {0,}; |
| 4593 | struct sockaddr_in *addr4 = NULL; | 4605 | struct sockaddr_in *addr4 = NULL; |
| 4594 | struct sockaddr_in6 *addr6 = NULL; | 4606 | struct sockaddr_in6 *addr6 = NULL; |
| 4607 | u16 family_sa = address->sa_family; | ||
| 4595 | unsigned short snum; | 4608 | unsigned short snum; |
| 4596 | u32 sid, node_perm; | 4609 | u32 sid, node_perm; |
| 4597 | 4610 | ||
| @@ -4601,11 +4614,20 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4601 | * need to check address->sa_family as it is possible to have | 4614 | * need to check address->sa_family as it is possible to have |
| 4602 | * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET. | 4615 | * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET. |
| 4603 | */ | 4616 | */ |
| 4604 | switch (address->sa_family) { | 4617 | switch (family_sa) { |
| 4618 | case AF_UNSPEC: | ||
| 4605 | case AF_INET: | 4619 | case AF_INET: |
| 4606 | if (addrlen < sizeof(struct sockaddr_in)) | 4620 | if (addrlen < sizeof(struct sockaddr_in)) |
| 4607 | return -EINVAL; | 4621 | return -EINVAL; |
| 4608 | addr4 = (struct sockaddr_in *)address; | 4622 | addr4 = (struct sockaddr_in *)address; |
| 4623 | if (family_sa == AF_UNSPEC) { | ||
| 4624 | /* see __inet_bind(), we only want to allow | ||
| 4625 | * AF_UNSPEC if the address is INADDR_ANY | ||
| 4626 | */ | ||
| 4627 | if (addr4->sin_addr.s_addr != htonl(INADDR_ANY)) | ||
| 4628 | goto err_af; | ||
| 4629 | family_sa = AF_INET; | ||
| 4630 | } | ||
| 4609 | snum = ntohs(addr4->sin_port); | 4631 | snum = ntohs(addr4->sin_port); |
| 4610 | addrp = (char *)&addr4->sin_addr.s_addr; | 4632 | addrp = (char *)&addr4->sin_addr.s_addr; |
| 4611 | break; | 4633 | break; |
| @@ -4617,15 +4639,14 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4617 | addrp = (char *)&addr6->sin6_addr.s6_addr; | 4639 | addrp = (char *)&addr6->sin6_addr.s6_addr; |
| 4618 | break; | 4640 | break; |
| 4619 | default: | 4641 | default: |
| 4620 | /* Note that SCTP services expect -EINVAL, whereas | 4642 | goto err_af; |
| 4621 | * others expect -EAFNOSUPPORT. | ||
| 4622 | */ | ||
| 4623 | if (sksec->sclass == SECCLASS_SCTP_SOCKET) | ||
| 4624 | return -EINVAL; | ||
| 4625 | else | ||
| 4626 | return -EAFNOSUPPORT; | ||
| 4627 | } | 4643 | } |
| 4628 | 4644 | ||
| 4645 | ad.type = LSM_AUDIT_DATA_NET; | ||
| 4646 | ad.u.net = &net; | ||
| 4647 | ad.u.net->sport = htons(snum); | ||
| 4648 | ad.u.net->family = family_sa; | ||
| 4649 | |||
| 4629 | if (snum) { | 4650 | if (snum) { |
| 4630 | int low, high; | 4651 | int low, high; |
| 4631 | 4652 | ||
| @@ -4637,10 +4658,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4637 | snum, &sid); | 4658 | snum, &sid); |
| 4638 | if (err) | 4659 | if (err) |
| 4639 | goto out; | 4660 | goto out; |
| 4640 | ad.type = LSM_AUDIT_DATA_NET; | ||
| 4641 | ad.u.net = &net; | ||
| 4642 | ad.u.net->sport = htons(snum); | ||
| 4643 | ad.u.net->family = family; | ||
| 4644 | err = avc_has_perm(&selinux_state, | 4661 | err = avc_has_perm(&selinux_state, |
| 4645 | sksec->sid, sid, | 4662 | sksec->sid, sid, |
| 4646 | sksec->sclass, | 4663 | sksec->sclass, |
| @@ -4672,16 +4689,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4672 | break; | 4689 | break; |
| 4673 | } | 4690 | } |
| 4674 | 4691 | ||
| 4675 | err = sel_netnode_sid(addrp, family, &sid); | 4692 | err = sel_netnode_sid(addrp, family_sa, &sid); |
| 4676 | if (err) | 4693 | if (err) |
| 4677 | goto out; | 4694 | goto out; |
| 4678 | 4695 | ||
| 4679 | ad.type = LSM_AUDIT_DATA_NET; | 4696 | if (family_sa == AF_INET) |
| 4680 | ad.u.net = &net; | ||
| 4681 | ad.u.net->sport = htons(snum); | ||
| 4682 | ad.u.net->family = family; | ||
| 4683 | |||
| 4684 | if (address->sa_family == AF_INET) | ||
| 4685 | ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; | 4697 | ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; |
| 4686 | else | 4698 | else |
| 4687 | ad.u.net->v6info.saddr = addr6->sin6_addr; | 4699 | ad.u.net->v6info.saddr = addr6->sin6_addr; |
| @@ -4694,6 +4706,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in | |||
| 4694 | } | 4706 | } |
| 4695 | out: | 4707 | out: |
| 4696 | return err; | 4708 | return err; |
| 4709 | err_af: | ||
| 4710 | /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */ | ||
| 4711 | if (sksec->sclass == SECCLASS_SCTP_SOCKET) | ||
| 4712 | return -EINVAL; | ||
| 4713 | return -EAFNOSUPPORT; | ||
| 4697 | } | 4714 | } |
| 4698 | 4715 | ||
| 4699 | /* This supports connect(2) and SCTP connect services such as sctp_connectx(3) | 4716 | /* This supports connect(2) and SCTP connect services such as sctp_connectx(3) |
| @@ -4771,7 +4788,7 @@ static int selinux_socket_connect_helper(struct socket *sock, | |||
| 4771 | ad.type = LSM_AUDIT_DATA_NET; | 4788 | ad.type = LSM_AUDIT_DATA_NET; |
| 4772 | ad.u.net = &net; | 4789 | ad.u.net = &net; |
| 4773 | ad.u.net->dport = htons(snum); | 4790 | ad.u.net->dport = htons(snum); |
| 4774 | ad.u.net->family = sk->sk_family; | 4791 | ad.u.net->family = address->sa_family; |
| 4775 | err = avc_has_perm(&selinux_state, | 4792 | err = avc_has_perm(&selinux_state, |
| 4776 | sksec->sid, sid, sksec->sclass, perm, &ad); | 4793 | sksec->sid, sid, sksec->sclass, perm, &ad); |
| 4777 | if (err) | 4794 | if (err) |
| @@ -5272,6 +5289,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, | |||
| 5272 | while (walk_size < addrlen) { | 5289 | while (walk_size < addrlen) { |
| 5273 | addr = addr_buf; | 5290 | addr = addr_buf; |
| 5274 | switch (addr->sa_family) { | 5291 | switch (addr->sa_family) { |
| 5292 | case AF_UNSPEC: | ||
| 5275 | case AF_INET: | 5293 | case AF_INET: |
| 5276 | len = sizeof(struct sockaddr_in); | 5294 | len = sizeof(struct sockaddr_in); |
| 5277 | break; | 5295 | break; |
| @@ -5279,7 +5297,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, | |||
| 5279 | len = sizeof(struct sockaddr_in6); | 5297 | len = sizeof(struct sockaddr_in6); |
| 5280 | break; | 5298 | break; |
| 5281 | default: | 5299 | default: |
| 5282 | return -EAFNOSUPPORT; | 5300 | return -EINVAL; |
| 5283 | } | 5301 | } |
| 5284 | 5302 | ||
| 5285 | err = -EINVAL; | 5303 | err = -EINVAL; |
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index a848836a5de0..507fd5210c1c 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c | |||
| @@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, | |||
| 396 | if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || | 396 | if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || |
| 397 | copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) | 397 | copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) |
| 398 | goto error; | 398 | goto error; |
| 399 | if (get_user(data->owner, &data32->owner) || | 399 | if (get_user(data->owner, &data32->owner)) |
| 400 | get_user(data->type, &data32->type)) | ||
| 401 | goto error; | 400 | goto error; |
| 402 | switch (data->type) { | 401 | switch (data->type) { |
| 403 | case SNDRV_CTL_ELEM_TYPE_BOOLEAN: | 402 | case SNDRV_CTL_ELEM_TYPE_BOOLEAN: |
diff --git a/sound/core/timer.c b/sound/core/timer.c index dc87728c5b74..0ddcae495838 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c | |||
| @@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) | |||
| 592 | else | 592 | else |
| 593 | timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; | 593 | timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; |
| 594 | snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : | 594 | snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : |
| 595 | SNDRV_TIMER_EVENT_CONTINUE); | 595 | SNDRV_TIMER_EVENT_PAUSE); |
| 596 | unlock: | 596 | unlock: |
| 597 | spin_unlock_irqrestore(&timer->lock, flags); | 597 | spin_unlock_irqrestore(&timer->lock, flags); |
| 598 | return result; | 598 | return result; |
| @@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) | |||
| 614 | list_del_init(&timeri->ack_list); | 614 | list_del_init(&timeri->ack_list); |
| 615 | list_del_init(&timeri->active_list); | 615 | list_del_init(&timeri->active_list); |
| 616 | snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : | 616 | snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : |
| 617 | SNDRV_TIMER_EVENT_CONTINUE); | 617 | SNDRV_TIMER_EVENT_PAUSE); |
| 618 | spin_unlock(&timeri->timer->lock); | 618 | spin_unlock(&timeri->timer->lock); |
| 619 | } | 619 | } |
| 620 | spin_unlock_irqrestore(&slave_active_lock, flags); | 620 | spin_unlock_irqrestore(&slave_active_lock, flags); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b0c8c79848a9..a0c93b9c9a28 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -2210,6 +2210,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { | |||
| 2210 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), | 2210 | SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), |
| 2211 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ | 2211 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ |
| 2212 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), | 2212 | SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), |
| 2213 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ | ||
| 2214 | SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), | ||
| 2213 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ | 2215 | /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ |
| 2214 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), | 2216 | SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), |
| 2215 | {} | 2217 | {} |
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 321e78baa63c..9bd935216c18 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h | |||
| @@ -622,8 +622,10 @@ snd_hda_check_power_state(struct hda_codec *codec, hda_nid_t nid, | |||
| 622 | { | 622 | { |
| 623 | return snd_hdac_check_power_state(&codec->core, nid, target_state); | 623 | return snd_hdac_check_power_state(&codec->core, nid, target_state); |
| 624 | } | 624 | } |
| 625 | static inline bool snd_hda_sync_power_state(struct hda_codec *codec, | 625 | |
| 626 | hda_nid_t nid, unsigned int target_state) | 626 | static inline unsigned int snd_hda_sync_power_state(struct hda_codec *codec, |
| 627 | hda_nid_t nid, | ||
| 628 | unsigned int target_state) | ||
| 627 | { | 629 | { |
| 628 | return snd_hdac_sync_power_state(&codec->core, nid, target_state); | 630 | return snd_hdac_sync_power_state(&codec->core, nid, target_state); |
| 629 | } | 631 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2dd34dd77447..01a6643fc7d4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
| 2363 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | 2363 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
| 2364 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2364 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
| 2365 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2365 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
| 2366 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | ||
| 2366 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2367 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
| 2367 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2368 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
| 2368 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), | 2369 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 344d7b069d59..bb5ab7a7dfa5 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
| @@ -967,6 +967,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, | |||
| 967 | } | 967 | } |
| 968 | break; | 968 | break; |
| 969 | 969 | ||
| 970 | case USB_ID(0x0d8c, 0x0103): | ||
| 971 | if (!strcmp(kctl->id.name, "PCM Playback Volume")) { | ||
| 972 | usb_audio_info(chip, | ||
| 973 | "set volume quirk for CM102-A+/102S+\n"); | ||
| 974 | cval->min = -256; | ||
| 975 | } | ||
| 976 | break; | ||
| 977 | |||
| 970 | case USB_ID(0x0471, 0x0101): | 978 | case USB_ID(0x0471, 0x0101): |
| 971 | case USB_ID(0x0471, 0x0104): | 979 | case USB_ID(0x0471, 0x0104): |
| 972 | case USB_ID(0x0471, 0x0105): | 980 | case USB_ID(0x0471, 0x0105): |
diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 956be9f7c72a..5ed334575fc7 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c | |||
| @@ -576,7 +576,7 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, | |||
| 576 | 576 | ||
| 577 | if (protocol == UAC_VERSION_1) { | 577 | if (protocol == UAC_VERSION_1) { |
| 578 | attributes = csep->bmAttributes; | 578 | attributes = csep->bmAttributes; |
| 579 | } else { | 579 | } else if (protocol == UAC_VERSION_2) { |
| 580 | struct uac2_iso_endpoint_descriptor *csep2 = | 580 | struct uac2_iso_endpoint_descriptor *csep2 = |
| 581 | (struct uac2_iso_endpoint_descriptor *) csep; | 581 | (struct uac2_iso_endpoint_descriptor *) csep; |
| 582 | 582 | ||
| @@ -585,6 +585,13 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, | |||
| 585 | /* emulate the endpoint attributes of a v1 device */ | 585 | /* emulate the endpoint attributes of a v1 device */ |
| 586 | if (csep2->bmControls & UAC2_CONTROL_PITCH) | 586 | if (csep2->bmControls & UAC2_CONTROL_PITCH) |
| 587 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; | 587 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; |
| 588 | } else { /* UAC_VERSION_3 */ | ||
| 589 | struct uac3_iso_endpoint_descriptor *csep3 = | ||
| 590 | (struct uac3_iso_endpoint_descriptor *) csep; | ||
| 591 | |||
| 592 | /* emulate the endpoint attributes of a v1 device */ | ||
| 593 | if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) | ||
| 594 | attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; | ||
| 588 | } | 595 | } |
| 589 | 596 | ||
| 590 | return attributes; | 597 | return attributes; |
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index b21b586b9854..1738c0391da4 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h | |||
| @@ -6,8 +6,9 @@ | |||
| 6 | #include <stdbool.h> | 6 | #include <stdbool.h> |
| 7 | 7 | ||
| 8 | #define spinlock_t pthread_mutex_t | 8 | #define spinlock_t pthread_mutex_t |
| 9 | #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; | 9 | #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER |
| 10 | #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER | 10 | #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER |
| 11 | #define spin_lock_init(x) pthread_mutex_init(x, NULL) | ||
| 11 | 12 | ||
| 12 | #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) | 13 | #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) |
| 13 | #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) | 14 | #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) |
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5922443063f0..0f9f06df49bc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
| @@ -2035,7 +2035,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, | |||
| 2035 | return -EINVAL; | 2035 | return -EINVAL; |
| 2036 | 2036 | ||
| 2037 | obj = bpf_object__open(attr->file); | 2037 | obj = bpf_object__open(attr->file); |
| 2038 | if (IS_ERR(obj)) | 2038 | if (IS_ERR_OR_NULL(obj)) |
| 2039 | return -ENOENT; | 2039 | return -ENOENT; |
| 2040 | 2040 | ||
| 2041 | bpf_object__for_each_program(prog, obj) { | 2041 | bpf_object__for_each_program(prog, obj) { |
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h index b3e32b010ab1..c2c01f84df75 100644 --- a/tools/objtool/arch/x86/include/asm/insn.h +++ b/tools/objtool/arch/x86/include/asm/insn.h | |||
| @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) | |||
| 208 | return insn_offset_displacement(insn) + insn->displacement.nbytes; | 208 | return insn_offset_displacement(insn) + insn->displacement.nbytes; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | #define POP_SS_OPCODE 0x1f | ||
| 212 | #define MOV_SREG_OPCODE 0x8e | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Intel SDM Vol.3A 6.8.3 states; | ||
| 216 | * "Any single-step trap that would be delivered following the MOV to SS | ||
| 217 | * instruction or POP to SS instruction (because EFLAGS.TF is 1) is | ||
| 218 | * suppressed." | ||
| 219 | * This function returns true if @insn is MOV SS or POP SS. On these | ||
| 220 | * instructions, single stepping is suppressed. | ||
| 221 | */ | ||
| 222 | static inline int insn_masking_exception(struct insn *insn) | ||
| 223 | { | ||
| 224 | return insn->opcode.bytes[0] == POP_SS_OPCODE || | ||
| 225 | (insn->opcode.bytes[0] == MOV_SREG_OPCODE && | ||
| 226 | X86_MODRM_REG(insn->modrm.bytes[0]) == 2); | ||
| 227 | } | ||
| 228 | |||
| 211 | #endif /* _ASM_X86_INSN_H */ | 229 | #endif /* _ASM_X86_INSN_H */ |
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5409f6f6c48d..3a31b238f885 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
| @@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file, | |||
| 59 | return next; | 59 | return next; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static struct instruction *next_insn_same_func(struct objtool_file *file, | ||
| 63 | struct instruction *insn) | ||
| 64 | { | ||
| 65 | struct instruction *next = list_next_entry(insn, list); | ||
| 66 | struct symbol *func = insn->func; | ||
| 67 | |||
| 68 | if (!func) | ||
| 69 | return NULL; | ||
| 70 | |||
| 71 | if (&next->list != &file->insn_list && next->func == func) | ||
| 72 | return next; | ||
| 73 | |||
| 74 | /* Check if we're already in the subfunction: */ | ||
| 75 | if (func == func->cfunc) | ||
| 76 | return NULL; | ||
| 77 | |||
| 78 | /* Move to the subfunction: */ | ||
| 79 | return find_insn(file, func->cfunc->sec, func->cfunc->offset); | ||
| 80 | } | ||
| 81 | |||
| 82 | #define func_for_each_insn_all(file, func, insn) \ | ||
| 83 | for (insn = find_insn(file, func->sec, func->offset); \ | ||
| 84 | insn; \ | ||
| 85 | insn = next_insn_same_func(file, insn)) | ||
| 86 | |||
| 62 | #define func_for_each_insn(file, func, insn) \ | 87 | #define func_for_each_insn(file, func, insn) \ |
| 63 | for (insn = find_insn(file, func->sec, func->offset); \ | 88 | for (insn = find_insn(file, func->sec, func->offset); \ |
| 64 | insn && &insn->list != &file->insn_list && \ | 89 | insn && &insn->list != &file->insn_list && \ |
| @@ -149,10 +174,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
| 149 | if (!strcmp(func->name, global_noreturns[i])) | 174 | if (!strcmp(func->name, global_noreturns[i])) |
| 150 | return 1; | 175 | return 1; |
| 151 | 176 | ||
| 152 | if (!func->sec) | 177 | if (!func->len) |
| 153 | return 0; | 178 | return 0; |
| 154 | 179 | ||
| 155 | func_for_each_insn(file, func, insn) { | 180 | insn = find_insn(file, func->sec, func->offset); |
| 181 | if (!insn->func) | ||
| 182 | return 0; | ||
| 183 | |||
| 184 | func_for_each_insn_all(file, func, insn) { | ||
| 156 | empty = false; | 185 | empty = false; |
| 157 | 186 | ||
| 158 | if (insn->type == INSN_RETURN) | 187 | if (insn->type == INSN_RETURN) |
| @@ -167,35 +196,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, | |||
| 167 | * case, the function's dead-end status depends on whether the target | 196 | * case, the function's dead-end status depends on whether the target |
| 168 | * of the sibling call returns. | 197 | * of the sibling call returns. |
| 169 | */ | 198 | */ |
| 170 | func_for_each_insn(file, func, insn) { | 199 | func_for_each_insn_all(file, func, insn) { |
| 171 | if (insn->sec != func->sec || | ||
| 172 | insn->offset >= func->offset + func->len) | ||
| 173 | break; | ||
| 174 | |||
| 175 | if (insn->type == INSN_JUMP_UNCONDITIONAL) { | 200 | if (insn->type == INSN_JUMP_UNCONDITIONAL) { |
| 176 | struct instruction *dest = insn->jump_dest; | 201 | struct instruction *dest = insn->jump_dest; |
| 177 | struct symbol *dest_func; | ||
| 178 | 202 | ||
| 179 | if (!dest) | 203 | if (!dest) |
| 180 | /* sibling call to another file */ | 204 | /* sibling call to another file */ |
| 181 | return 0; | 205 | return 0; |
| 182 | 206 | ||
| 183 | if (dest->sec != func->sec || | 207 | if (dest->func && dest->func->pfunc != insn->func->pfunc) { |
| 184 | dest->offset < func->offset || | ||
| 185 | dest->offset >= func->offset + func->len) { | ||
| 186 | /* local sibling call */ | ||
| 187 | dest_func = find_symbol_by_offset(dest->sec, | ||
| 188 | dest->offset); | ||
| 189 | if (!dest_func) | ||
| 190 | continue; | ||
| 191 | 208 | ||
| 209 | /* local sibling call */ | ||
| 192 | if (recursion == 5) { | 210 | if (recursion == 5) { |
| 193 | WARN_FUNC("infinite recursion (objtool bug!)", | 211 | /* |
| 194 | dest->sec, dest->offset); | 212 | * Infinite recursion: two functions |
| 195 | return -1; | 213 | * have sibling calls to each other. |
| 214 | * This is a very rare case. It means | ||
| 215 | * they aren't dead ends. | ||
| 216 | */ | ||
| 217 | return 0; | ||
| 196 | } | 218 | } |
| 197 | 219 | ||
| 198 | return __dead_end_function(file, dest_func, | 220 | return __dead_end_function(file, dest->func, |
| 199 | recursion + 1); | 221 | recursion + 1); |
| 200 | } | 222 | } |
| 201 | } | 223 | } |
| @@ -422,7 +444,7 @@ static void add_ignores(struct objtool_file *file) | |||
| 422 | if (!ignore_func(file, func)) | 444 | if (!ignore_func(file, func)) |
| 423 | continue; | 445 | continue; |
| 424 | 446 | ||
| 425 | func_for_each_insn(file, func, insn) | 447 | func_for_each_insn_all(file, func, insn) |
| 426 | insn->ignore = true; | 448 | insn->ignore = true; |
| 427 | } | 449 | } |
| 428 | } | 450 | } |
| @@ -782,30 +804,35 @@ out: | |||
| 782 | return ret; | 804 | return ret; |
| 783 | } | 805 | } |
| 784 | 806 | ||
| 785 | static int add_switch_table(struct objtool_file *file, struct symbol *func, | 807 | static int add_switch_table(struct objtool_file *file, struct instruction *insn, |
| 786 | struct instruction *insn, struct rela *table, | 808 | struct rela *table, struct rela *next_table) |
| 787 | struct rela *next_table) | ||
| 788 | { | 809 | { |
| 789 | struct rela *rela = table; | 810 | struct rela *rela = table; |
| 790 | struct instruction *alt_insn; | 811 | struct instruction *alt_insn; |
| 791 | struct alternative *alt; | 812 | struct alternative *alt; |
| 813 | struct symbol *pfunc = insn->func->pfunc; | ||
| 814 | unsigned int prev_offset = 0; | ||
| 792 | 815 | ||
| 793 | list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { | 816 | list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { |
| 794 | if (rela == next_table) | 817 | if (rela == next_table) |
| 795 | break; | 818 | break; |
| 796 | 819 | ||
| 797 | if (rela->sym->sec != insn->sec || | 820 | /* Make sure the switch table entries are consecutive: */ |
| 798 | rela->addend <= func->offset || | 821 | if (prev_offset && rela->offset != prev_offset + 8) |
| 799 | rela->addend >= func->offset + func->len) | ||
| 800 | break; | 822 | break; |
| 801 | 823 | ||
| 802 | alt_insn = find_insn(file, insn->sec, rela->addend); | 824 | /* Detect function pointers from contiguous objects: */ |
| 803 | if (!alt_insn) { | 825 | if (rela->sym->sec == pfunc->sec && |
| 804 | WARN("%s: can't find instruction at %s+0x%x", | 826 | rela->addend == pfunc->offset) |
| 805 | file->rodata->rela->name, insn->sec->name, | 827 | break; |
| 806 | rela->addend); | 828 | |
| 807 | return -1; | 829 | alt_insn = find_insn(file, rela->sym->sec, rela->addend); |
| 808 | } | 830 | if (!alt_insn) |
| 831 | break; | ||
| 832 | |||
| 833 | /* Make sure the jmp dest is in the function or subfunction: */ | ||
| 834 | if (alt_insn->func->pfunc != pfunc) | ||
| 835 | break; | ||
| 809 | 836 | ||
| 810 | alt = malloc(sizeof(*alt)); | 837 | alt = malloc(sizeof(*alt)); |
| 811 | if (!alt) { | 838 | if (!alt) { |
| @@ -815,6 +842,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, | |||
| 815 | 842 | ||
| 816 | alt->insn = alt_insn; | 843 | alt->insn = alt_insn; |
| 817 | list_add_tail(&alt->list, &insn->alts); | 844 | list_add_tail(&alt->list, &insn->alts); |
| 845 | prev_offset = rela->offset; | ||
| 846 | } | ||
| 847 | |||
| 848 | if (!prev_offset) { | ||
| 849 | WARN_FUNC("can't find switch jump table", | ||
| 850 | insn->sec, insn->offset); | ||
| 851 | return -1; | ||
| 818 | } | 852 | } |
| 819 | 853 | ||
| 820 | return 0; | 854 | return 0; |
| @@ -869,40 +903,21 @@ static struct rela *find_switch_table(struct objtool_file *file, | |||
| 869 | { | 903 | { |
| 870 | struct rela *text_rela, *rodata_rela; | 904 | struct rela *text_rela, *rodata_rela; |
| 871 | struct instruction *orig_insn = insn; | 905 | struct instruction *orig_insn = insn; |
| 906 | unsigned long table_offset; | ||
| 872 | 907 | ||
| 873 | text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); | ||
| 874 | if (text_rela && text_rela->sym == file->rodata->sym) { | ||
| 875 | /* case 1 */ | ||
| 876 | rodata_rela = find_rela_by_dest(file->rodata, | ||
| 877 | text_rela->addend); | ||
| 878 | if (rodata_rela) | ||
| 879 | return rodata_rela; | ||
| 880 | |||
| 881 | /* case 2 */ | ||
| 882 | rodata_rela = find_rela_by_dest(file->rodata, | ||
| 883 | text_rela->addend + 4); | ||
| 884 | if (!rodata_rela) | ||
| 885 | return NULL; | ||
| 886 | |||
| 887 | file->ignore_unreachables = true; | ||
| 888 | return rodata_rela; | ||
| 889 | } | ||
| 890 | |||
| 891 | /* case 3 */ | ||
| 892 | /* | 908 | /* |
| 893 | * Backward search using the @first_jump_src links, these help avoid | 909 | * Backward search using the @first_jump_src links, these help avoid |
| 894 | * much of the 'in between' code. Which avoids us getting confused by | 910 | * much of the 'in between' code. Which avoids us getting confused by |
| 895 | * it. | 911 | * it. |
| 896 | */ | 912 | */ |
| 897 | for (insn = list_prev_entry(insn, list); | 913 | for (; |
| 898 | |||
| 899 | &insn->list != &file->insn_list && | 914 | &insn->list != &file->insn_list && |
| 900 | insn->sec == func->sec && | 915 | insn->sec == func->sec && |
| 901 | insn->offset >= func->offset; | 916 | insn->offset >= func->offset; |
| 902 | 917 | ||
| 903 | insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { | 918 | insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { |
| 904 | 919 | ||
| 905 | if (insn->type == INSN_JUMP_DYNAMIC) | 920 | if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) |
| 906 | break; | 921 | break; |
| 907 | 922 | ||
| 908 | /* allow small jumps within the range */ | 923 | /* allow small jumps within the range */ |
| @@ -918,18 +933,29 @@ static struct rela *find_switch_table(struct objtool_file *file, | |||
| 918 | if (!text_rela || text_rela->sym != file->rodata->sym) | 933 | if (!text_rela || text_rela->sym != file->rodata->sym) |
| 919 | continue; | 934 | continue; |
| 920 | 935 | ||
| 936 | table_offset = text_rela->addend; | ||
| 937 | if (text_rela->type == R_X86_64_PC32) | ||
| 938 | table_offset += 4; | ||
| 939 | |||
| 921 | /* | 940 | /* |
| 922 | * Make sure the .rodata address isn't associated with a | 941 | * Make sure the .rodata address isn't associated with a |
| 923 | * symbol. gcc jump tables are anonymous data. | 942 | * symbol. gcc jump tables are anonymous data. |
| 924 | */ | 943 | */ |
| 925 | if (find_symbol_containing(file->rodata, text_rela->addend)) | 944 | if (find_symbol_containing(file->rodata, table_offset)) |
| 926 | continue; | 945 | continue; |
| 927 | 946 | ||
| 928 | rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend); | 947 | rodata_rela = find_rela_by_dest(file->rodata, table_offset); |
| 929 | if (!rodata_rela) | 948 | if (rodata_rela) { |
| 930 | continue; | 949 | /* |
| 950 | * Use of RIP-relative switch jumps is quite rare, and | ||
| 951 | * indicates a rare GCC quirk/bug which can leave dead | ||
| 952 | * code behind. | ||
| 953 | */ | ||
| 954 | if (text_rela->type == R_X86_64_PC32) | ||
| 955 | file->ignore_unreachables = true; | ||
| 931 | 956 | ||
| 932 | return rodata_rela; | 957 | return rodata_rela; |
| 958 | } | ||
| 933 | } | 959 | } |
| 934 | 960 | ||
| 935 | return NULL; | 961 | return NULL; |
| @@ -943,7 +969,7 @@ static int add_func_switch_tables(struct objtool_file *file, | |||
| 943 | struct rela *rela, *prev_rela = NULL; | 969 | struct rela *rela, *prev_rela = NULL; |
| 944 | int ret; | 970 | int ret; |
| 945 | 971 | ||
| 946 | func_for_each_insn(file, func, insn) { | 972 | func_for_each_insn_all(file, func, insn) { |
| 947 | if (!last) | 973 | if (!last) |
| 948 | last = insn; | 974 | last = insn; |
| 949 | 975 | ||
| @@ -974,8 +1000,7 @@ static int add_func_switch_tables(struct objtool_file *file, | |||
| 974 | * the beginning of another switch table in the same function. | 1000 | * the beginning of another switch table in the same function. |
| 975 | */ | 1001 | */ |
| 976 | if (prev_jump) { | 1002 | if (prev_jump) { |
| 977 | ret = add_switch_table(file, func, prev_jump, prev_rela, | 1003 | ret = add_switch_table(file, prev_jump, prev_rela, rela); |
| 978 | rela); | ||
| 979 | if (ret) | 1004 | if (ret) |
| 980 | return ret; | 1005 | return ret; |
| 981 | } | 1006 | } |
| @@ -985,7 +1010,7 @@ static int add_func_switch_tables(struct objtool_file *file, | |||
| 985 | } | 1010 | } |
| 986 | 1011 | ||
| 987 | if (prev_jump) { | 1012 | if (prev_jump) { |
| 988 | ret = add_switch_table(file, func, prev_jump, prev_rela, NULL); | 1013 | ret = add_switch_table(file, prev_jump, prev_rela, NULL); |
| 989 | if (ret) | 1014 | if (ret) |
| 990 | return ret; | 1015 | return ret; |
| 991 | } | 1016 | } |
| @@ -1749,15 +1774,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, | |||
| 1749 | while (1) { | 1774 | while (1) { |
| 1750 | next_insn = next_insn_same_sec(file, insn); | 1775 | next_insn = next_insn_same_sec(file, insn); |
| 1751 | 1776 | ||
| 1752 | 1777 | if (file->c_file && func && insn->func && func != insn->func->pfunc) { | |
| 1753 | if (file->c_file && func && insn->func && func != insn->func) { | ||
| 1754 | WARN("%s() falls through to next function %s()", | 1778 | WARN("%s() falls through to next function %s()", |
| 1755 | func->name, insn->func->name); | 1779 | func->name, insn->func->name); |
| 1756 | return 1; | 1780 | return 1; |
| 1757 | } | 1781 | } |
| 1758 | 1782 | ||
| 1759 | if (insn->func) | 1783 | func = insn->func ? insn->func->pfunc : NULL; |
| 1760 | func = insn->func; | ||
| 1761 | 1784 | ||
| 1762 | if (func && insn->ignore) { | 1785 | if (func && insn->ignore) { |
| 1763 | WARN_FUNC("BUG: why am I validating an ignored function?", | 1786 | WARN_FUNC("BUG: why am I validating an ignored function?", |
| @@ -1778,7 +1801,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, | |||
| 1778 | 1801 | ||
| 1779 | i = insn; | 1802 | i = insn; |
| 1780 | save_insn = NULL; | 1803 | save_insn = NULL; |
| 1781 | func_for_each_insn_continue_reverse(file, func, i) { | 1804 | func_for_each_insn_continue_reverse(file, insn->func, i) { |
| 1782 | if (i->save) { | 1805 | if (i->save) { |
| 1783 | save_insn = i; | 1806 | save_insn = i; |
| 1784 | break; | 1807 | break; |
| @@ -1865,7 +1888,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, | |||
| 1865 | case INSN_JUMP_UNCONDITIONAL: | 1888 | case INSN_JUMP_UNCONDITIONAL: |
| 1866 | if (insn->jump_dest && | 1889 | if (insn->jump_dest && |
| 1867 | (!func || !insn->jump_dest->func || | 1890 | (!func || !insn->jump_dest->func || |
| 1868 | func == insn->jump_dest->func)) { | 1891 | insn->jump_dest->func->pfunc == func)) { |
| 1869 | ret = validate_branch(file, insn->jump_dest, | 1892 | ret = validate_branch(file, insn->jump_dest, |
| 1870 | state); | 1893 | state); |
| 1871 | if (ret) | 1894 | if (ret) |
| @@ -2060,7 +2083,7 @@ static int validate_functions(struct objtool_file *file) | |||
| 2060 | 2083 | ||
| 2061 | for_each_sec(file, sec) { | 2084 | for_each_sec(file, sec) { |
| 2062 | list_for_each_entry(func, &sec->symbol_list, list) { | 2085 | list_for_each_entry(func, &sec->symbol_list, list) { |
| 2063 | if (func->type != STT_FUNC) | 2086 | if (func->type != STT_FUNC || func->pfunc != func) |
| 2064 | continue; | 2087 | continue; |
| 2065 | 2088 | ||
| 2066 | insn = find_insn(file, sec, func->offset); | 2089 | insn = find_insn(file, sec, func->offset); |
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index c1c338661699..4e60e105583e 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
| @@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) | |||
| 79 | return NULL; | 79 | return NULL; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | struct symbol *find_symbol_by_name(struct elf *elf, const char *name) | ||
| 83 | { | ||
| 84 | struct section *sec; | ||
| 85 | struct symbol *sym; | ||
| 86 | |||
| 87 | list_for_each_entry(sec, &elf->sections, list) | ||
| 88 | list_for_each_entry(sym, &sec->symbol_list, list) | ||
| 89 | if (!strcmp(sym->name, name)) | ||
| 90 | return sym; | ||
| 91 | |||
| 92 | return NULL; | ||
| 93 | } | ||
| 94 | |||
| 82 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) | 95 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) |
| 83 | { | 96 | { |
| 84 | struct symbol *sym; | 97 | struct symbol *sym; |
| @@ -203,10 +216,11 @@ static int read_sections(struct elf *elf) | |||
| 203 | 216 | ||
| 204 | static int read_symbols(struct elf *elf) | 217 | static int read_symbols(struct elf *elf) |
| 205 | { | 218 | { |
| 206 | struct section *symtab; | 219 | struct section *symtab, *sec; |
| 207 | struct symbol *sym; | 220 | struct symbol *sym, *pfunc; |
| 208 | struct list_head *entry, *tmp; | 221 | struct list_head *entry, *tmp; |
| 209 | int symbols_nr, i; | 222 | int symbols_nr, i; |
| 223 | char *coldstr; | ||
| 210 | 224 | ||
| 211 | symtab = find_section_by_name(elf, ".symtab"); | 225 | symtab = find_section_by_name(elf, ".symtab"); |
| 212 | if (!symtab) { | 226 | if (!symtab) { |
| @@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf) | |||
| 281 | hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); | 295 | hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); |
| 282 | } | 296 | } |
| 283 | 297 | ||
| 298 | /* Create parent/child links for any cold subfunctions */ | ||
| 299 | list_for_each_entry(sec, &elf->sections, list) { | ||
| 300 | list_for_each_entry(sym, &sec->symbol_list, list) { | ||
| 301 | if (sym->type != STT_FUNC) | ||
| 302 | continue; | ||
| 303 | sym->pfunc = sym->cfunc = sym; | ||
| 304 | coldstr = strstr(sym->name, ".cold."); | ||
| 305 | if (coldstr) { | ||
| 306 | coldstr[0] = '\0'; | ||
| 307 | pfunc = find_symbol_by_name(elf, sym->name); | ||
| 308 | coldstr[0] = '.'; | ||
| 309 | |||
| 310 | if (!pfunc) { | ||
| 311 | WARN("%s(): can't find parent function", | ||
| 312 | sym->name); | ||
| 313 | goto err; | ||
| 314 | } | ||
| 315 | |||
| 316 | sym->pfunc = pfunc; | ||
| 317 | pfunc->cfunc = sym; | ||
| 318 | } | ||
| 319 | } | ||
| 320 | } | ||
| 321 | |||
| 284 | return 0; | 322 | return 0; |
| 285 | 323 | ||
| 286 | err: | 324 | err: |
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index d86e2ff14466..de5cd2ddded9 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h | |||
| @@ -61,6 +61,7 @@ struct symbol { | |||
| 61 | unsigned char bind, type; | 61 | unsigned char bind, type; |
| 62 | unsigned long offset; | 62 | unsigned long offset; |
| 63 | unsigned int len; | 63 | unsigned int len; |
| 64 | struct symbol *pfunc, *cfunc; | ||
| 64 | }; | 65 | }; |
| 65 | 66 | ||
| 66 | struct rela { | 67 | struct rela { |
| @@ -86,6 +87,7 @@ struct elf { | |||
| 86 | struct elf *elf_open(const char *name, int flags); | 87 | struct elf *elf_open(const char *name, int flags); |
| 87 | struct section *find_section_by_name(struct elf *elf, const char *name); | 88 | struct section *find_section_by_name(struct elf *elf, const char *name); |
| 88 | struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); | 89 | struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); |
| 90 | struct symbol *find_symbol_by_name(struct elf *elf, const char *name); | ||
| 89 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); | 91 | struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); |
| 90 | struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); | 92 | struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); |
| 91 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, | 93 | struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, |
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 016882dbbc16..ee86473643be 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh | |||
| @@ -16,7 +16,7 @@ nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254 | |||
| 16 | trace_libc_inet_pton_backtrace() { | 16 | trace_libc_inet_pton_backtrace() { |
| 17 | idx=0 | 17 | idx=0 |
| 18 | expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" | 18 | expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" |
| 19 | expected[1]=".*inet_pton[[:space:]]\($libc\)$" | 19 | expected[1]=".*inet_pton[[:space:]]\($libc|inlined\)$" |
| 20 | case "$(uname -m)" in | 20 | case "$(uname -m)" in |
| 21 | s390x) | 21 | s390x) |
| 22 | eventattr='call-graph=dwarf,max-stack=4' | 22 | eventattr='call-graph=dwarf,max-stack=4' |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 536ee148bff8..5d74a30fe00f 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
| @@ -1263,6 +1263,9 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start | |||
| 1263 | max_percent = sample->percent; | 1263 | max_percent = sample->percent; |
| 1264 | } | 1264 | } |
| 1265 | 1265 | ||
| 1266 | if (al->samples_nr > nr_percent) | ||
| 1267 | nr_percent = al->samples_nr; | ||
| 1268 | |||
| 1266 | if (max_percent < min_pcnt) | 1269 | if (max_percent < min_pcnt) |
| 1267 | return -1; | 1270 | return -1; |
| 1268 | 1271 | ||
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 40020b1ca54f..bf16dc9ee507 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c | |||
| @@ -239,6 +239,7 @@ static void cs_etm__free(struct perf_session *session) | |||
| 239 | for (i = 0; i < aux->num_cpu; i++) | 239 | for (i = 0; i < aux->num_cpu; i++) |
| 240 | zfree(&aux->metadata[i]); | 240 | zfree(&aux->metadata[i]); |
| 241 | 241 | ||
| 242 | thread__zput(aux->unknown_thread); | ||
| 242 | zfree(&aux->metadata); | 243 | zfree(&aux->metadata); |
| 243 | zfree(&aux); | 244 | zfree(&aux); |
| 244 | } | 245 | } |
| @@ -612,8 +613,8 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) | |||
| 612 | return buff->len; | 613 | return buff->len; |
| 613 | } | 614 | } |
| 614 | 615 | ||
| 615 | static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, | 616 | static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, |
| 616 | struct auxtrace_queue *queue) | 617 | struct auxtrace_queue *queue) |
| 617 | { | 618 | { |
| 618 | struct cs_etm_queue *etmq = queue->priv; | 619 | struct cs_etm_queue *etmq = queue->priv; |
| 619 | 620 | ||
| @@ -1357,6 +1358,23 @@ int cs_etm__process_auxtrace_info(union perf_event *event, | |||
| 1357 | etm->auxtrace.free = cs_etm__free; | 1358 | etm->auxtrace.free = cs_etm__free; |
| 1358 | session->auxtrace = &etm->auxtrace; | 1359 | session->auxtrace = &etm->auxtrace; |
| 1359 | 1360 | ||
| 1361 | etm->unknown_thread = thread__new(999999999, 999999999); | ||
| 1362 | if (!etm->unknown_thread) | ||
| 1363 | goto err_free_queues; | ||
| 1364 | |||
| 1365 | /* | ||
| 1366 | * Initialize list node so that at thread__zput() we can avoid | ||
| 1367 | * segmentation fault at list_del_init(). | ||
| 1368 | */ | ||
| 1369 | INIT_LIST_HEAD(&etm->unknown_thread->node); | ||
| 1370 | |||
| 1371 | err = thread__set_comm(etm->unknown_thread, "unknown", 0); | ||
| 1372 | if (err) | ||
| 1373 | goto err_delete_thread; | ||
| 1374 | |||
| 1375 | if (thread__init_map_groups(etm->unknown_thread, etm->machine)) | ||
| 1376 | goto err_delete_thread; | ||
| 1377 | |||
| 1360 | if (dump_trace) { | 1378 | if (dump_trace) { |
| 1361 | cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); | 1379 | cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); |
| 1362 | return 0; | 1380 | return 0; |
| @@ -1371,16 +1389,18 @@ int cs_etm__process_auxtrace_info(union perf_event *event, | |||
| 1371 | 1389 | ||
| 1372 | err = cs_etm__synth_events(etm, session); | 1390 | err = cs_etm__synth_events(etm, session); |
| 1373 | if (err) | 1391 | if (err) |
| 1374 | goto err_free_queues; | 1392 | goto err_delete_thread; |
| 1375 | 1393 | ||
| 1376 | err = auxtrace_queues__process_index(&etm->queues, session); | 1394 | err = auxtrace_queues__process_index(&etm->queues, session); |
| 1377 | if (err) | 1395 | if (err) |
| 1378 | goto err_free_queues; | 1396 | goto err_delete_thread; |
| 1379 | 1397 | ||
| 1380 | etm->data_queued = etm->queues.populated; | 1398 | etm->data_queued = etm->queues.populated; |
| 1381 | 1399 | ||
| 1382 | return 0; | 1400 | return 0; |
| 1383 | 1401 | ||
| 1402 | err_delete_thread: | ||
| 1403 | thread__zput(etm->unknown_thread); | ||
| 1384 | err_free_queues: | 1404 | err_free_queues: |
| 1385 | auxtrace_queues__free(&etm->queues); | 1405 | auxtrace_queues__free(&etm->queues); |
| 1386 | session->auxtrace = NULL; | 1406 | session->auxtrace = NULL; |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2fb0272146d8..b8b8a9558d32 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
| @@ -1715,7 +1715,7 @@ int parse_events(struct perf_evlist *evlist, const char *str, | |||
| 1715 | struct perf_evsel *last; | 1715 | struct perf_evsel *last; |
| 1716 | 1716 | ||
| 1717 | if (list_empty(&parse_state.list)) { | 1717 | if (list_empty(&parse_state.list)) { |
| 1718 | WARN_ONCE(true, "WARNING: event parser found nothing"); | 1718 | WARN_ONCE(true, "WARNING: event parser found nothing\n"); |
| 1719 | return -1; | 1719 | return -1; |
| 1720 | } | 1720 | } |
| 1721 | 1721 | ||
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index fa7ee369b3c9..db66f8a0d4be 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
| @@ -17,7 +17,7 @@ ifeq ($(BUILD), 32) | |||
| 17 | LDFLAGS += -m32 | 17 | LDFLAGS += -m32 |
| 18 | endif | 18 | endif |
| 19 | 19 | ||
| 20 | targets: mapshift $(TARGETS) | 20 | targets: generated/map-shift.h $(TARGETS) |
| 21 | 21 | ||
| 22 | main: $(OFILES) | 22 | main: $(OFILES) |
| 23 | 23 | ||
| @@ -42,9 +42,7 @@ radix-tree.c: ../../../lib/radix-tree.c | |||
| 42 | idr.c: ../../../lib/idr.c | 42 | idr.c: ../../../lib/idr.c |
| 43 | sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ | 43 | sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ |
| 44 | 44 | ||
| 45 | .PHONY: mapshift | 45 | generated/map-shift.h: |
| 46 | |||
| 47 | mapshift: | ||
| 48 | @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ | 46 | @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ |
| 49 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ | 47 | echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ |
| 50 | generated/map-shift.h; \ | 48 | generated/map-shift.h; \ |
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 6c645eb77d42..ee820fcc29b0 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c | |||
| @@ -252,6 +252,13 @@ void idr_checks(void) | |||
| 252 | idr_remove(&idr, 3); | 252 | idr_remove(&idr, 3); |
| 253 | idr_remove(&idr, 0); | 253 | idr_remove(&idr, 0); |
| 254 | 254 | ||
| 255 | assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); | ||
| 256 | idr_remove(&idr, 1); | ||
| 257 | for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) | ||
| 258 | assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); | ||
| 259 | idr_remove(&idr, 1 << 30); | ||
| 260 | idr_destroy(&idr); | ||
| 261 | |||
| 255 | for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { | 262 | for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { |
| 256 | struct item *item = item_create(i, 0); | 263 | struct item *item = item_create(i, 0); |
| 257 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); | 264 | assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); |
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 59245b3d587c..7bf405638b0b 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/radix-tree.h> | 16 | #include <linux/radix-tree.h> |
| 17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
| 19 | #include <pthread.h> | ||
| 19 | 20 | ||
| 20 | #include "test.h" | 21 | #include "test.h" |
| 21 | 22 | ||
| @@ -624,6 +625,67 @@ static void multiorder_account(void) | |||
| 624 | item_kill_tree(&tree); | 625 | item_kill_tree(&tree); |
| 625 | } | 626 | } |
| 626 | 627 | ||
| 628 | bool stop_iteration = false; | ||
| 629 | |||
| 630 | static void *creator_func(void *ptr) | ||
| 631 | { | ||
| 632 | /* 'order' is set up to ensure we have sibling entries */ | ||
| 633 | unsigned int order = RADIX_TREE_MAP_SHIFT - 1; | ||
| 634 | struct radix_tree_root *tree = ptr; | ||
| 635 | int i; | ||
| 636 | |||
| 637 | for (i = 0; i < 10000; i++) { | ||
| 638 | item_insert_order(tree, 0, order); | ||
| 639 | item_delete_rcu(tree, 0); | ||
| 640 | } | ||
| 641 | |||
| 642 | stop_iteration = true; | ||
| 643 | return NULL; | ||
| 644 | } | ||
| 645 | |||
| 646 | static void *iterator_func(void *ptr) | ||
| 647 | { | ||
| 648 | struct radix_tree_root *tree = ptr; | ||
| 649 | struct radix_tree_iter iter; | ||
| 650 | struct item *item; | ||
| 651 | void **slot; | ||
| 652 | |||
| 653 | while (!stop_iteration) { | ||
| 654 | rcu_read_lock(); | ||
| 655 | radix_tree_for_each_slot(slot, tree, &iter, 0) { | ||
| 656 | item = radix_tree_deref_slot(slot); | ||
| 657 | |||
| 658 | if (!item) | ||
| 659 | continue; | ||
| 660 | if (radix_tree_deref_retry(item)) { | ||
| 661 | slot = radix_tree_iter_retry(&iter); | ||
| 662 | continue; | ||
| 663 | } | ||
| 664 | |||
| 665 | item_sanity(item, iter.index); | ||
| 666 | } | ||
| 667 | rcu_read_unlock(); | ||
| 668 | } | ||
| 669 | return NULL; | ||
| 670 | } | ||
| 671 | |||
| 672 | static void multiorder_iteration_race(void) | ||
| 673 | { | ||
| 674 | const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); | ||
| 675 | pthread_t worker_thread[num_threads]; | ||
| 676 | RADIX_TREE(tree, GFP_KERNEL); | ||
| 677 | int i; | ||
| 678 | |||
| 679 | pthread_create(&worker_thread[0], NULL, &creator_func, &tree); | ||
| 680 | for (i = 1; i < num_threads; i++) | ||
| 681 | pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); | ||
| 682 | |||
| 683 | for (i = 0; i < num_threads; i++) | ||
| 684 | pthread_join(worker_thread[i], NULL); | ||
| 685 | |||
| 686 | item_kill_tree(&tree); | ||
| 687 | } | ||
| 688 | |||
| 627 | void multiorder_checks(void) | 689 | void multiorder_checks(void) |
| 628 | { | 690 | { |
| 629 | int i; | 691 | int i; |
| @@ -644,6 +706,7 @@ void multiorder_checks(void) | |||
| 644 | multiorder_join(); | 706 | multiorder_join(); |
| 645 | multiorder_split(); | 707 | multiorder_split(); |
| 646 | multiorder_account(); | 708 | multiorder_account(); |
| 709 | multiorder_iteration_race(); | ||
| 647 | 710 | ||
| 648 | radix_tree_cpu_dead(0); | 711 | radix_tree_cpu_dead(0); |
| 649 | } | 712 | } |
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 5978ab1f403d..def6015570b2 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c | |||
| @@ -75,6 +75,25 @@ int item_delete(struct radix_tree_root *root, unsigned long index) | |||
| 75 | return 0; | 75 | return 0; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void item_free_rcu(struct rcu_head *head) | ||
| 79 | { | ||
| 80 | struct item *item = container_of(head, struct item, rcu_head); | ||
| 81 | |||
| 82 | free(item); | ||
| 83 | } | ||
| 84 | |||
| 85 | int item_delete_rcu(struct radix_tree_root *root, unsigned long index) | ||
| 86 | { | ||
| 87 | struct item *item = radix_tree_delete(root, index); | ||
| 88 | |||
| 89 | if (item) { | ||
| 90 | item_sanity(item, index); | ||
| 91 | call_rcu(&item->rcu_head, item_free_rcu); | ||
| 92 | return 1; | ||
| 93 | } | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | |||
| 78 | void item_check_present(struct radix_tree_root *root, unsigned long index) | 97 | void item_check_present(struct radix_tree_root *root, unsigned long index) |
| 79 | { | 98 | { |
| 80 | struct item *item; | 99 | struct item *item; |
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index d9c031dbeb1a..31f1d9b6f506 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/rcupdate.h> | 5 | #include <linux/rcupdate.h> |
| 6 | 6 | ||
| 7 | struct item { | 7 | struct item { |
| 8 | struct rcu_head rcu_head; | ||
| 8 | unsigned long index; | 9 | unsigned long index; |
| 9 | unsigned int order; | 10 | unsigned int order; |
| 10 | }; | 11 | }; |
| @@ -12,9 +13,11 @@ struct item { | |||
| 12 | struct item *item_create(unsigned long index, unsigned int order); | 13 | struct item *item_create(unsigned long index, unsigned int order); |
| 13 | int __item_insert(struct radix_tree_root *root, struct item *item); | 14 | int __item_insert(struct radix_tree_root *root, struct item *item); |
| 14 | int item_insert(struct radix_tree_root *root, unsigned long index); | 15 | int item_insert(struct radix_tree_root *root, unsigned long index); |
| 16 | void item_sanity(struct item *item, unsigned long index); | ||
| 15 | int item_insert_order(struct radix_tree_root *root, unsigned long index, | 17 | int item_insert_order(struct radix_tree_root *root, unsigned long index, |
| 16 | unsigned order); | 18 | unsigned order); |
| 17 | int item_delete(struct radix_tree_root *root, unsigned long index); | 19 | int item_delete(struct radix_tree_root *root, unsigned long index); |
| 20 | int item_delete_rcu(struct radix_tree_root *root, unsigned long index); | ||
| 18 | struct item *item_lookup(struct radix_tree_root *root, unsigned long index); | 21 | struct item *item_lookup(struct radix_tree_root *root, unsigned long index); |
| 19 | 22 | ||
| 20 | void item_check_present(struct radix_tree_root *root, unsigned long index); | 23 | void item_check_present(struct radix_tree_root *root, unsigned long index); |
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 983dd25d49f4..1eefe211a4a8 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config | |||
| @@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y | |||
| 5 | CONFIG_TEST_BPF=m | 5 | CONFIG_TEST_BPF=m |
| 6 | CONFIG_CGROUP_BPF=y | 6 | CONFIG_CGROUP_BPF=y |
| 7 | CONFIG_NETDEVSIM=m | 7 | CONFIG_NETDEVSIM=m |
| 8 | CONFIG_NET_CLS_ACT=y | ||
| 9 | CONFIG_NET_SCH_INGRESS=y | ||
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3e7718b1a9ae..fd7de7eb329e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
| @@ -11713,6 +11713,11 @@ static void get_unpriv_disabled() | |||
| 11713 | FILE *fd; | 11713 | FILE *fd; |
| 11714 | 11714 | ||
| 11715 | fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); | 11715 | fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); |
| 11716 | if (!fd) { | ||
| 11717 | perror("fopen /proc/sys/"UNPRIV_SYSCTL); | ||
| 11718 | unpriv_disabled = true; | ||
| 11719 | return; | ||
| 11720 | } | ||
| 11716 | if (fgets(buf, 2, fd) == buf && atoi(buf)) | 11721 | if (fgets(buf, 2, fd) == buf && atoi(buf)) |
| 11717 | unpriv_disabled = true; | 11722 | unpriv_disabled = true; |
| 11718 | fclose(fd); | 11723 | fclose(fd); |
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 2ddcc96ae456..d9d00319b07c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
| @@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) | |||
| 15 | 15 | ||
| 16 | INSTALL_HDR_PATH = $(top_srcdir)/usr | 16 | INSTALL_HDR_PATH = $(top_srcdir)/usr |
| 17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
| 18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) | 18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. |
| 19 | 19 | ||
| 20 | # After inclusion, $(OUTPUT) is defined and | 20 | # After inclusion, $(OUTPUT) is defined and |
| 21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 7ab98e41324f..ac53730b30aa 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <errno.h> | 19 | #include <errno.h> |
| 20 | #include <unistd.h> | 20 | #include <unistd.h> |
| 21 | #include <fcntl.h> | 21 | #include <fcntl.h> |
| 22 | #include "kselftest.h" | ||
| 22 | 23 | ||
| 23 | ssize_t test_write(int fd, const void *buf, size_t count); | 24 | ssize_t test_write(int fd, const void *buf, size_t count); |
| 24 | ssize_t test_read(int fd, void *buf, size_t count); | 25 | ssize_t test_read(int fd, void *buf, size_t count); |
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 2cedfda181d4..37e2a787d2fc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
| @@ -50,8 +50,8 @@ int kvm_check_cap(long cap) | |||
| 50 | int kvm_fd; | 50 | int kvm_fd; |
| 51 | 51 | ||
| 52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
| 53 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 53 | if (kvm_fd < 0) |
| 54 | KVM_DEV_PATH, kvm_fd, errno); | 54 | exit(KSFT_SKIP); |
| 55 | 55 | ||
| 56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); | 56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); |
| 57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" | 57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" |
| @@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) | |||
| 91 | 91 | ||
| 92 | vm->mode = mode; | 92 | vm->mode = mode; |
| 93 | kvm_fd = open(KVM_DEV_PATH, perm); | 93 | kvm_fd = open(KVM_DEV_PATH, perm); |
| 94 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 94 | if (kvm_fd < 0) |
| 95 | KVM_DEV_PATH, kvm_fd, errno); | 95 | exit(KSFT_SKIP); |
| 96 | 96 | ||
| 97 | /* Create VM. */ | 97 | /* Create VM. */ |
| 98 | vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); | 98 | vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); |
| @@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) | |||
| 418 | 418 | ||
| 419 | cpuid = allocate_kvm_cpuid2(); | 419 | cpuid = allocate_kvm_cpuid2(); |
| 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
| 421 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 421 | if (kvm_fd < 0) |
| 422 | KVM_DEV_PATH, kvm_fd, errno); | 422 | exit(KSFT_SKIP); |
| 423 | 423 | ||
| 424 | ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); | 424 | ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); |
| 425 | TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", | 425 | TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", |
| @@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void) | |||
| 675 | int dev_fd, ret; | 675 | int dev_fd, ret; |
| 676 | 676 | ||
| 677 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); | 677 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); |
| 678 | TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i", | 678 | if (dev_fd < 0) |
| 679 | __func__, KVM_DEV_PATH, dev_fd, errno); | 679 | exit(KSFT_SKIP); |
| 680 | 680 | ||
| 681 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); | 681 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); |
| 682 | TEST_ASSERT(ret >= sizeof(struct kvm_run), | 682 | TEST_ASSERT(ret >= sizeof(struct kvm_run), |
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/sync_regs_test.c index 428e9473f5e2..eae1ece3c31b 100644 --- a/tools/testing/selftests/kvm/sync_regs_test.c +++ b/tools/testing/selftests/kvm/sync_regs_test.c | |||
| @@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, | |||
| 85 | { | 85 | { |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) | ||
| 89 | #define INVALID_SYNC_FIELD 0x80000000 | ||
| 90 | |||
| 88 | int main(int argc, char *argv[]) | 91 | int main(int argc, char *argv[]) |
| 89 | { | 92 | { |
| 90 | struct kvm_vm *vm; | 93 | struct kvm_vm *vm; |
| @@ -98,9 +101,14 @@ int main(int argc, char *argv[]) | |||
| 98 | setbuf(stdout, NULL); | 101 | setbuf(stdout, NULL); |
| 99 | 102 | ||
| 100 | cap = kvm_check_cap(KVM_CAP_SYNC_REGS); | 103 | cap = kvm_check_cap(KVM_CAP_SYNC_REGS); |
| 101 | TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS, | 104 | if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { |
| 102 | "KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n", | 105 | fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n"); |
| 103 | cap, KVM_SYNC_X86_VALID_FIELDS); | 106 | exit(KSFT_SKIP); |
| 107 | } | ||
| 108 | if ((cap & INVALID_SYNC_FIELD) != 0) { | ||
| 109 | fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n"); | ||
| 110 | exit(KSFT_SKIP); | ||
| 111 | } | ||
| 104 | 112 | ||
| 105 | /* Create VM */ | 113 | /* Create VM */ |
| 106 | vm = vm_create_default(VCPU_ID, guest_code); | 114 | vm = vm_create_default(VCPU_ID, guest_code); |
| @@ -108,7 +116,14 @@ int main(int argc, char *argv[]) | |||
| 108 | run = vcpu_state(vm, VCPU_ID); | 116 | run = vcpu_state(vm, VCPU_ID); |
| 109 | 117 | ||
| 110 | /* Request reading invalid register set from VCPU. */ | 118 | /* Request reading invalid register set from VCPU. */ |
| 111 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1; | 119 | run->kvm_valid_regs = INVALID_SYNC_FIELD; |
| 120 | rv = _vcpu_run(vm, VCPU_ID); | ||
| 121 | TEST_ASSERT(rv < 0 && errno == EINVAL, | ||
| 122 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", | ||
| 123 | rv); | ||
| 124 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; | ||
| 125 | |||
| 126 | run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; | ||
| 112 | rv = _vcpu_run(vm, VCPU_ID); | 127 | rv = _vcpu_run(vm, VCPU_ID); |
| 113 | TEST_ASSERT(rv < 0 && errno == EINVAL, | 128 | TEST_ASSERT(rv < 0 && errno == EINVAL, |
| 114 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", | 129 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", |
| @@ -116,7 +131,14 @@ int main(int argc, char *argv[]) | |||
| 116 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; | 131 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; |
| 117 | 132 | ||
| 118 | /* Request setting invalid register set into VCPU. */ | 133 | /* Request setting invalid register set into VCPU. */ |
| 119 | run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1; | 134 | run->kvm_dirty_regs = INVALID_SYNC_FIELD; |
| 135 | rv = _vcpu_run(vm, VCPU_ID); | ||
| 136 | TEST_ASSERT(rv < 0 && errno == EINVAL, | ||
| 137 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", | ||
| 138 | rv); | ||
| 139 | vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; | ||
| 140 | |||
| 141 | run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; | ||
| 120 | rv = _vcpu_run(vm, VCPU_ID); | 142 | rv = _vcpu_run(vm, VCPU_ID); |
| 121 | TEST_ASSERT(rv < 0 && errno == EINVAL, | 143 | TEST_ASSERT(rv < 0 && errno == EINVAL, |
| 122 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", | 144 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", |
| @@ -125,7 +147,7 @@ int main(int argc, char *argv[]) | |||
| 125 | 147 | ||
| 126 | /* Request and verify all valid register sets. */ | 148 | /* Request and verify all valid register sets. */ |
| 127 | /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ | 149 | /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ |
| 128 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 150 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
| 129 | rv = _vcpu_run(vm, VCPU_ID); | 151 | rv = _vcpu_run(vm, VCPU_ID); |
| 130 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 152 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
| 131 | "Unexpected exit reason: %u (%s),\n", | 153 | "Unexpected exit reason: %u (%s),\n", |
| @@ -146,7 +168,7 @@ int main(int argc, char *argv[]) | |||
| 146 | run->s.regs.sregs.apic_base = 1 << 11; | 168 | run->s.regs.sregs.apic_base = 1 << 11; |
| 147 | /* TODO run->s.regs.events.XYZ = ABC; */ | 169 | /* TODO run->s.regs.events.XYZ = ABC; */ |
| 148 | 170 | ||
| 149 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 171 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
| 150 | run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; | 172 | run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; |
| 151 | rv = _vcpu_run(vm, VCPU_ID); | 173 | rv = _vcpu_run(vm, VCPU_ID); |
| 152 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 174 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
| @@ -172,7 +194,7 @@ int main(int argc, char *argv[]) | |||
| 172 | /* Clear kvm_dirty_regs bits, verify new s.regs values are | 194 | /* Clear kvm_dirty_regs bits, verify new s.regs values are |
| 173 | * overwritten with existing guest values. | 195 | * overwritten with existing guest values. |
| 174 | */ | 196 | */ |
| 175 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 197 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
| 176 | run->kvm_dirty_regs = 0; | 198 | run->kvm_dirty_regs = 0; |
| 177 | run->s.regs.regs.r11 = 0xDEADBEEF; | 199 | run->s.regs.regs.r11 = 0xDEADBEEF; |
| 178 | rv = _vcpu_run(vm, VCPU_ID); | 200 | rv = _vcpu_run(vm, VCPU_ID); |
| @@ -211,7 +233,7 @@ int main(int argc, char *argv[]) | |||
| 211 | * with kvm_sync_regs values. | 233 | * with kvm_sync_regs values. |
| 212 | */ | 234 | */ |
| 213 | run->kvm_valid_regs = 0; | 235 | run->kvm_valid_regs = 0; |
| 214 | run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS; | 236 | run->kvm_dirty_regs = TEST_SYNC_FIELDS; |
| 215 | run->s.regs.regs.r11 = 0xBBBB; | 237 | run->s.regs.regs.r11 = 0xBBBB; |
| 216 | rv = _vcpu_run(vm, VCPU_ID); | 238 | rv = _vcpu_run(vm, VCPU_ID); |
| 217 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 239 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c index 8f7f62093add..aaa633263b2c 100644 --- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | |||
| @@ -189,8 +189,8 @@ int main(int argc, char *argv[]) | |||
| 189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | 189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); |
| 190 | 190 | ||
| 191 | if (!(entry->ecx & CPUID_VMX)) { | 191 | if (!(entry->ecx & CPUID_VMX)) { |
| 192 | printf("nested VMX not enabled, skipping test"); | 192 | fprintf(stderr, "nested VMX not enabled, skipping test\n"); |
| 193 | return 0; | 193 | exit(KSFT_SKIP); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); | 196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); |
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index 6a75a3ea44ad..7ba089b33e8b 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config | |||
| @@ -7,3 +7,8 @@ CONFIG_NET_L3_MASTER_DEV=y | |||
| 7 | CONFIG_IPV6=y | 7 | CONFIG_IPV6=y |
| 8 | CONFIG_IPV6_MULTIPLE_TABLES=y | 8 | CONFIG_IPV6_MULTIPLE_TABLES=y |
| 9 | CONFIG_VETH=y | 9 | CONFIG_VETH=y |
| 10 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
| 11 | CONFIG_NET_IPVTI=y | ||
| 12 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | ||
| 13 | CONFIG_IPV6_VTI=y | ||
| 14 | CONFIG_DUMMY=y | ||
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c index 365c32e84189..c9f478b40996 100644 --- a/tools/testing/selftests/net/reuseport_bpf_numa.c +++ b/tools/testing/selftests/net/reuseport_bpf_numa.c | |||
| @@ -23,6 +23,8 @@ | |||
| 23 | #include <unistd.h> | 23 | #include <unistd.h> |
| 24 | #include <numa.h> | 24 | #include <numa.h> |
| 25 | 25 | ||
| 26 | #include "../kselftest.h" | ||
| 27 | |||
| 26 | static const int PORT = 8888; | 28 | static const int PORT = 8888; |
| 27 | 29 | ||
| 28 | static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) | 30 | static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) |
| @@ -229,7 +231,7 @@ int main(void) | |||
| 229 | int *rcv_fd, nodes; | 231 | int *rcv_fd, nodes; |
| 230 | 232 | ||
| 231 | if (numa_available() < 0) | 233 | if (numa_available() < 0) |
| 232 | error(1, errno, "no numa api support"); | 234 | ksft_exit_skip("no numa api support\n"); |
| 233 | 235 | ||
| 234 | nodes = numa_max_node() + 1; | 236 | nodes = numa_max_node() + 1; |
| 235 | 237 | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 168c66d74fc5..e1473234968d 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
| @@ -134,11 +134,15 @@ struct seccomp_data { | |||
| 134 | #endif | 134 | #endif |
| 135 | 135 | ||
| 136 | #ifndef SECCOMP_FILTER_FLAG_TSYNC | 136 | #ifndef SECCOMP_FILTER_FLAG_TSYNC |
| 137 | #define SECCOMP_FILTER_FLAG_TSYNC 1 | 137 | #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) |
| 138 | #endif | 138 | #endif |
| 139 | 139 | ||
| 140 | #ifndef SECCOMP_FILTER_FLAG_LOG | 140 | #ifndef SECCOMP_FILTER_FLAG_LOG |
| 141 | #define SECCOMP_FILTER_FLAG_LOG 2 | 141 | #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) |
| 142 | #endif | ||
| 143 | |||
| 144 | #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW | ||
| 145 | #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) | ||
| 142 | #endif | 146 | #endif |
| 143 | 147 | ||
| 144 | #ifndef PTRACE_SECCOMP_GET_METADATA | 148 | #ifndef PTRACE_SECCOMP_GET_METADATA |
| @@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock) | |||
| 2072 | TEST(detect_seccomp_filter_flags) | 2076 | TEST(detect_seccomp_filter_flags) |
| 2073 | { | 2077 | { |
| 2074 | unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, | 2078 | unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, |
| 2075 | SECCOMP_FILTER_FLAG_LOG }; | 2079 | SECCOMP_FILTER_FLAG_LOG, |
| 2080 | SECCOMP_FILTER_FLAG_SPEC_ALLOW }; | ||
| 2076 | unsigned int flag, all_flags; | 2081 | unsigned int flag, all_flags; |
| 2077 | int i; | 2082 | int i; |
| 2078 | long ret; | 2083 | long ret; |
| 2079 | 2084 | ||
| 2080 | /* Test detection of known-good filter flags */ | 2085 | /* Test detection of known-good filter flags */ |
| 2081 | for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { | 2086 | for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { |
| 2087 | int bits = 0; | ||
| 2088 | |||
| 2082 | flag = flags[i]; | 2089 | flag = flags[i]; |
| 2090 | /* Make sure the flag is a single bit! */ | ||
| 2091 | while (flag) { | ||
| 2092 | if (flag & 0x1) | ||
| 2093 | bits ++; | ||
| 2094 | flag >>= 1; | ||
| 2095 | } | ||
| 2096 | ASSERT_EQ(1, bits); | ||
| 2097 | flag = flags[i]; | ||
| 2098 | |||
| 2083 | ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); | 2099 | ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); |
| 2084 | ASSERT_NE(ENOSYS, errno) { | 2100 | ASSERT_NE(ENOSYS, errno) { |
| 2085 | TH_LOG("Kernel does not support seccomp syscall!"); | 2101 | TH_LOG("Kernel does not support seccomp syscall!"); |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index d744991c0f4f..39f66bc29b82 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
| @@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) | |||
| 11 | 11 | ||
| 12 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ | 12 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ |
| 13 | check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ | 13 | check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ |
| 14 | protection_keys test_vdso test_vsyscall | 14 | protection_keys test_vdso test_vsyscall mov_ss_trap |
| 15 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ | 15 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ |
| 16 | test_FCMOV test_FCOMI test_FISTTP \ | 16 | test_FCMOV test_FCOMI test_FISTTP \ |
| 17 | vdso_restorer | 17 | vdso_restorer |
diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c new file mode 100644 index 000000000000..3c3a022654f3 --- /dev/null +++ b/tools/testing/selftests/x86/mov_ss_trap.c | |||
| @@ -0,0 +1,285 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS | ||
| 4 | * | ||
| 5 | * This does MOV SS from a watchpointed address followed by various | ||
| 6 | * types of kernel entries. A MOV SS that hits a watchpoint will queue | ||
| 7 | * up a #DB trap but will not actually deliver that trap. The trap | ||
| 8 | * will be delivered after the next instruction instead. The CPU's logic | ||
| 9 | * seems to be: | ||
| 10 | * | ||
| 11 | * - Any fault: drop the pending #DB trap. | ||
| 12 | * - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then | ||
| 13 | * deliver #DB. | ||
| 14 | * - ICEBP: enter the kernel but do not deliver the watchpoint trap | ||
| 15 | * - breakpoint: only one #DB is delivered (phew!) | ||
| 16 | * | ||
| 17 | * There are plenty of ways for a kernel to handle this incorrectly. This | ||
| 18 | * test tries to exercise all the cases. | ||
| 19 | * | ||
| 20 | * This should mostly cover CVE-2018-1087 and CVE-2018-8897. | ||
| 21 | */ | ||
| 22 | #define _GNU_SOURCE | ||
| 23 | |||
| 24 | #include <stdlib.h> | ||
| 25 | #include <sys/ptrace.h> | ||
| 26 | #include <sys/types.h> | ||
| 27 | #include <sys/wait.h> | ||
| 28 | #include <sys/user.h> | ||
| 29 | #include <sys/syscall.h> | ||
| 30 | #include <unistd.h> | ||
| 31 | #include <errno.h> | ||
| 32 | #include <stddef.h> | ||
| 33 | #include <stdio.h> | ||
| 34 | #include <err.h> | ||
| 35 | #include <string.h> | ||
| 36 | #include <setjmp.h> | ||
| 37 | #include <sys/prctl.h> | ||
| 38 | |||
| 39 | #define X86_EFLAGS_RF (1UL << 16) | ||
| 40 | |||
| 41 | #if __x86_64__ | ||
| 42 | # define REG_IP REG_RIP | ||
| 43 | #else | ||
| 44 | # define REG_IP REG_EIP | ||
| 45 | #endif | ||
| 46 | |||
| 47 | unsigned short ss; | ||
| 48 | extern unsigned char breakpoint_insn[]; | ||
| 49 | sigjmp_buf jmpbuf; | ||
| 50 | static unsigned char altstack_data[SIGSTKSZ]; | ||
| 51 | |||
| 52 | static void enable_watchpoint(void) | ||
| 53 | { | ||
| 54 | pid_t parent = getpid(); | ||
| 55 | int status; | ||
| 56 | |||
| 57 | pid_t child = fork(); | ||
| 58 | if (child < 0) | ||
| 59 | err(1, "fork"); | ||
| 60 | |||
| 61 | if (child) { | ||
| 62 | if (waitpid(child, &status, 0) != child) | ||
| 63 | err(1, "waitpid for child"); | ||
| 64 | } else { | ||
| 65 | unsigned long dr0, dr1, dr7; | ||
| 66 | |||
| 67 | dr0 = (unsigned long)&ss; | ||
| 68 | dr1 = (unsigned long)breakpoint_insn; | ||
| 69 | dr7 = ((1UL << 1) | /* G0 */ | ||
| 70 | (3UL << 16) | /* RW0 = read or write */ | ||
| 71 | (1UL << 18) | /* LEN0 = 2 bytes */ | ||
| 72 | (1UL << 3)); /* G1, RW1 = insn */ | ||
| 73 | |||
| 74 | if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0) | ||
| 75 | err(1, "PTRACE_ATTACH"); | ||
| 76 | |||
| 77 | if (waitpid(parent, &status, 0) != parent) | ||
| 78 | err(1, "waitpid for child"); | ||
| 79 | |||
| 80 | if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0) | ||
| 81 | err(1, "PTRACE_POKEUSER DR0"); | ||
| 82 | |||
| 83 | if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0) | ||
| 84 | err(1, "PTRACE_POKEUSER DR1"); | ||
| 85 | |||
| 86 | if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0) | ||
| 87 | err(1, "PTRACE_POKEUSER DR7"); | ||
| 88 | |||
| 89 | printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7); | ||
| 90 | |||
| 91 | if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0) | ||
| 92 | err(1, "PTRACE_DETACH"); | ||
| 93 | |||
| 94 | exit(0); | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), | ||
| 99 | int flags) | ||
| 100 | { | ||
| 101 | struct sigaction sa; | ||
| 102 | memset(&sa, 0, sizeof(sa)); | ||
| 103 | sa.sa_sigaction = handler; | ||
| 104 | sa.sa_flags = SA_SIGINFO | flags; | ||
| 105 | sigemptyset(&sa.sa_mask); | ||
| 106 | if (sigaction(sig, &sa, 0)) | ||
| 107 | err(1, "sigaction"); | ||
| 108 | } | ||
| 109 | |||
| 110 | static char const * const signames[] = { | ||
| 111 | [SIGSEGV] = "SIGSEGV", | ||
| 112 | [SIGBUS] = "SIBGUS", | ||
| 113 | [SIGTRAP] = "SIGTRAP", | ||
| 114 | [SIGILL] = "SIGILL", | ||
| 115 | }; | ||
| 116 | |||
| 117 | static void sigtrap(int sig, siginfo_t *si, void *ctx_void) | ||
| 118 | { | ||
| 119 | ucontext_t *ctx = ctx_void; | ||
| 120 | |||
| 121 | printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n", | ||
| 122 | (unsigned long)ctx->uc_mcontext.gregs[REG_IP], | ||
| 123 | !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF)); | ||
| 124 | } | ||
| 125 | |||
| 126 | static void handle_and_return(int sig, siginfo_t *si, void *ctx_void) | ||
| 127 | { | ||
| 128 | ucontext_t *ctx = ctx_void; | ||
| 129 | |||
| 130 | printf("\tGot %s with RIP=%lx\n", signames[sig], | ||
| 131 | (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); | ||
| 132 | } | ||
| 133 | |||
| 134 | static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void) | ||
| 135 | { | ||
| 136 | ucontext_t *ctx = ctx_void; | ||
| 137 | |||
| 138 | printf("\tGot %s with RIP=%lx\n", signames[sig], | ||
| 139 | (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); | ||
| 140 | |||
| 141 | siglongjmp(jmpbuf, 1); | ||
| 142 | } | ||
| 143 | |||
| 144 | int main() | ||
| 145 | { | ||
| 146 | unsigned long nr; | ||
| 147 | |||
| 148 | asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss)); | ||
| 149 | printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss); | ||
| 150 | |||
| 151 | if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0) | ||
| 152 | printf("\tPR_SET_PTRACER_ANY succeeded\n"); | ||
| 153 | |||
| 154 | printf("\tSet up a watchpoint\n"); | ||
| 155 | sethandler(SIGTRAP, sigtrap, 0); | ||
| 156 | enable_watchpoint(); | ||
| 157 | |||
| 158 | printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n"); | ||
| 159 | asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss)); | ||
| 160 | |||
| 161 | printf("[RUN]\tMOV SS; INT3\n"); | ||
| 162 | asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss)); | ||
| 163 | |||
| 164 | printf("[RUN]\tMOV SS; INT 3\n"); | ||
| 165 | asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss)); | ||
| 166 | |||
| 167 | printf("[RUN]\tMOV SS; CS CS INT3\n"); | ||
| 168 | asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss)); | ||
| 169 | |||
| 170 | printf("[RUN]\tMOV SS; CSx14 INT3\n"); | ||
| 171 | asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss)); | ||
| 172 | |||
| 173 | printf("[RUN]\tMOV SS; INT 4\n"); | ||
| 174 | sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); | ||
| 175 | asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss)); | ||
| 176 | |||
| 177 | #ifdef __i386__ | ||
| 178 | printf("[RUN]\tMOV SS; INTO\n"); | ||
| 179 | sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); | ||
| 180 | nr = -1; | ||
| 181 | asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into" | ||
| 182 | : [tmp] "+r" (nr) : [ss] "m" (ss)); | ||
| 183 | #endif | ||
| 184 | |||
| 185 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 186 | printf("[RUN]\tMOV SS; ICEBP\n"); | ||
| 187 | |||
| 188 | /* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */ | ||
| 189 | sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); | ||
| 190 | |||
| 191 | asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss)); | ||
| 192 | } | ||
| 193 | |||
| 194 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 195 | printf("[RUN]\tMOV SS; CLI\n"); | ||
| 196 | sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); | ||
| 197 | asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss)); | ||
| 198 | } | ||
| 199 | |||
| 200 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 201 | printf("[RUN]\tMOV SS; #PF\n"); | ||
| 202 | sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); | ||
| 203 | asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]" | ||
| 204 | : [tmp] "=r" (nr) : [ss] "m" (ss)); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* | ||
| 208 | * INT $1: if #DB has DPL=3 and there isn't special handling, | ||
| 209 | * then the kernel will die. | ||
| 210 | */ | ||
| 211 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 212 | printf("[RUN]\tMOV SS; INT 1\n"); | ||
| 213 | sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); | ||
| 214 | asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss)); | ||
| 215 | } | ||
| 216 | |||
| 217 | #ifdef __x86_64__ | ||
| 218 | /* | ||
| 219 | * In principle, we should test 32-bit SYSCALL as well, but | ||
| 220 | * the calling convention is so unpredictable that it's | ||
| 221 | * not obviously worth the effort. | ||
| 222 | */ | ||
| 223 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 224 | printf("[RUN]\tMOV SS; SYSCALL\n"); | ||
| 225 | sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); | ||
| 226 | nr = SYS_getpid; | ||
| 227 | /* | ||
| 228 | * Toggle the high bit of RSP to make it noncanonical to | ||
| 229 | * strengthen this test on non-SMAP systems. | ||
| 230 | */ | ||
| 231 | asm volatile ("btc $63, %%rsp\n\t" | ||
| 232 | "mov %[ss], %%ss; syscall\n\t" | ||
| 233 | "btc $63, %%rsp" | ||
| 234 | : "+a" (nr) : [ss] "m" (ss) | ||
| 235 | : "rcx" | ||
| 236 | #ifdef __x86_64__ | ||
| 237 | , "r11" | ||
| 238 | #endif | ||
| 239 | ); | ||
| 240 | } | ||
| 241 | #endif | ||
| 242 | |||
| 243 | printf("[RUN]\tMOV SS; breakpointed NOP\n"); | ||
| 244 | asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss)); | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Invoking SYSENTER directly breaks all the rules. Just handle | ||
| 248 | * the SIGSEGV. | ||
| 249 | */ | ||
| 250 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 251 | printf("[RUN]\tMOV SS; SYSENTER\n"); | ||
| 252 | stack_t stack = { | ||
| 253 | .ss_sp = altstack_data, | ||
| 254 | .ss_size = SIGSTKSZ, | ||
| 255 | }; | ||
| 256 | if (sigaltstack(&stack, NULL) != 0) | ||
| 257 | err(1, "sigaltstack"); | ||
| 258 | sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); | ||
| 259 | nr = SYS_getpid; | ||
| 260 | asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) | ||
| 261 | : [ss] "m" (ss) : "flags", "rcx" | ||
| 262 | #ifdef __x86_64__ | ||
| 263 | , "r11" | ||
| 264 | #endif | ||
| 265 | ); | ||
| 266 | |||
| 267 | /* We're unreachable here. SYSENTER forgets RIP. */ | ||
| 268 | } | ||
| 269 | |||
| 270 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
| 271 | printf("[RUN]\tMOV SS; INT $0x80\n"); | ||
| 272 | sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); | ||
| 273 | nr = 20; /* compat getpid */ | ||
| 274 | asm volatile ("mov %[ss], %%ss; int $0x80" | ||
| 275 | : "+a" (nr) : [ss] "m" (ss) | ||
| 276 | : "flags" | ||
| 277 | #ifdef __x86_64__ | ||
| 278 | , "r8", "r9", "r10", "r11" | ||
| 279 | #endif | ||
| 280 | ); | ||
| 281 | } | ||
| 282 | |||
| 283 | printf("[OK]\tI aten't dead\n"); | ||
| 284 | return 0; | ||
| 285 | } | ||
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index 9c0325e1ea68..50f7e9272481 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c | |||
| @@ -368,6 +368,11 @@ static int expected_bnd_index = -1; | |||
| 368 | uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */ | 368 | uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */ |
| 369 | unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS]; | 369 | unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS]; |
| 370 | 370 | ||
| 371 | /* Failed address bound checks: */ | ||
| 372 | #ifndef SEGV_BNDERR | ||
| 373 | # define SEGV_BNDERR 3 | ||
| 374 | #endif | ||
| 375 | |||
| 371 | /* | 376 | /* |
| 372 | * The kernel is supposed to provide some information about the bounds | 377 | * The kernel is supposed to provide some information about the bounds |
| 373 | * exception in the siginfo. It should match what we have in the bounds | 378 | * exception in the siginfo. It should match what we have in the bounds |
| @@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext) | |||
| 419 | br_count++; | 424 | br_count++; |
| 420 | dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count); | 425 | dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count); |
| 421 | 426 | ||
| 422 | #define SEGV_BNDERR 3 /* failed address bound checks */ | ||
| 423 | |||
| 424 | dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n", | 427 | dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n", |
| 425 | status, ip, br_reason); | 428 | status, ip, br_reason); |
| 426 | dprintf2("si_signo: %d\n", si->si_signo); | 429 | dprintf2("si_signo: %d\n", si->si_signo); |
diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h index b3cb7670e026..254e5436bdd9 100644 --- a/tools/testing/selftests/x86/pkey-helpers.h +++ b/tools/testing/selftests/x86/pkey-helpers.h | |||
| @@ -26,30 +26,26 @@ static inline void sigsafe_printf(const char *format, ...) | |||
| 26 | { | 26 | { |
| 27 | va_list ap; | 27 | va_list ap; |
| 28 | 28 | ||
| 29 | va_start(ap, format); | ||
| 30 | if (!dprint_in_signal) { | 29 | if (!dprint_in_signal) { |
| 30 | va_start(ap, format); | ||
| 31 | vprintf(format, ap); | 31 | vprintf(format, ap); |
| 32 | va_end(ap); | ||
| 32 | } else { | 33 | } else { |
| 33 | int ret; | 34 | int ret; |
| 34 | int len = vsnprintf(dprint_in_signal_buffer, | ||
| 35 | DPRINT_IN_SIGNAL_BUF_SIZE, | ||
| 36 | format, ap); | ||
| 37 | /* | 35 | /* |
| 38 | * len is amount that would have been printed, | 36 | * No printf() functions are signal-safe. |
| 39 | * but actual write is truncated at BUF_SIZE. | 37 | * They deadlock easily. Write the format |
| 38 | * string to get some output, even if | ||
| 39 | * incomplete. | ||
| 40 | */ | 40 | */ |
| 41 | if (len > DPRINT_IN_SIGNAL_BUF_SIZE) | 41 | ret = write(1, format, strlen(format)); |
| 42 | len = DPRINT_IN_SIGNAL_BUF_SIZE; | ||
| 43 | ret = write(1, dprint_in_signal_buffer, len); | ||
| 44 | if (ret < 0) | 42 | if (ret < 0) |
| 45 | abort(); | 43 | exit(1); |
| 46 | } | 44 | } |
| 47 | va_end(ap); | ||
| 48 | } | 45 | } |
| 49 | #define dprintf_level(level, args...) do { \ | 46 | #define dprintf_level(level, args...) do { \ |
| 50 | if (level <= DEBUG_LEVEL) \ | 47 | if (level <= DEBUG_LEVEL) \ |
| 51 | sigsafe_printf(args); \ | 48 | sigsafe_printf(args); \ |
| 52 | fflush(NULL); \ | ||
| 53 | } while (0) | 49 | } while (0) |
| 54 | #define dprintf0(args...) dprintf_level(0, args) | 50 | #define dprintf0(args...) dprintf_level(0, args) |
| 55 | #define dprintf1(args...) dprintf_level(1, args) | 51 | #define dprintf1(args...) dprintf_level(1, args) |
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index f15aa5a76fe3..460b4bdf4c1e 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c | |||
| @@ -72,10 +72,9 @@ extern void abort_hooks(void); | |||
| 72 | test_nr, iteration_nr); \ | 72 | test_nr, iteration_nr); \ |
| 73 | dprintf0("errno at assert: %d", errno); \ | 73 | dprintf0("errno at assert: %d", errno); \ |
| 74 | abort_hooks(); \ | 74 | abort_hooks(); \ |
| 75 | assert(condition); \ | 75 | exit(__LINE__); \ |
| 76 | } \ | 76 | } \ |
| 77 | } while (0) | 77 | } while (0) |
| 78 | #define raw_assert(cond) assert(cond) | ||
| 79 | 78 | ||
| 80 | void cat_into_file(char *str, char *file) | 79 | void cat_into_file(char *str, char *file) |
| 81 | { | 80 | { |
| @@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file) | |||
| 87 | * these need to be raw because they are called under | 86 | * these need to be raw because they are called under |
| 88 | * pkey_assert() | 87 | * pkey_assert() |
| 89 | */ | 88 | */ |
| 90 | raw_assert(fd >= 0); | 89 | if (fd < 0) { |
| 90 | fprintf(stderr, "error opening '%s'\n", str); | ||
| 91 | perror("error: "); | ||
| 92 | exit(__LINE__); | ||
| 93 | } | ||
| 94 | |||
| 91 | ret = write(fd, str, strlen(str)); | 95 | ret = write(fd, str, strlen(str)); |
| 92 | if (ret != strlen(str)) { | 96 | if (ret != strlen(str)) { |
| 93 | perror("write to file failed"); | 97 | perror("write to file failed"); |
| 94 | fprintf(stderr, "filename: '%s' str: '%s'\n", file, str); | 98 | fprintf(stderr, "filename: '%s' str: '%s'\n", file, str); |
| 95 | raw_assert(0); | 99 | exit(__LINE__); |
| 96 | } | 100 | } |
| 97 | close(fd); | 101 | close(fd); |
| 98 | } | 102 | } |
| @@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me) | |||
| 191 | #ifdef __i386__ | 195 | #ifdef __i386__ |
| 192 | 196 | ||
| 193 | #ifndef SYS_mprotect_key | 197 | #ifndef SYS_mprotect_key |
| 194 | # define SYS_mprotect_key 380 | 198 | # define SYS_mprotect_key 380 |
| 195 | #endif | 199 | #endif |
| 200 | |||
| 196 | #ifndef SYS_pkey_alloc | 201 | #ifndef SYS_pkey_alloc |
| 197 | # define SYS_pkey_alloc 381 | 202 | # define SYS_pkey_alloc 381 |
| 198 | # define SYS_pkey_free 382 | 203 | # define SYS_pkey_free 382 |
| 199 | #endif | 204 | #endif |
| 200 | #define REG_IP_IDX REG_EIP | 205 | |
| 201 | #define si_pkey_offset 0x14 | 206 | #define REG_IP_IDX REG_EIP |
| 207 | #define si_pkey_offset 0x14 | ||
| 202 | 208 | ||
| 203 | #else | 209 | #else |
| 204 | 210 | ||
| 205 | #ifndef SYS_mprotect_key | 211 | #ifndef SYS_mprotect_key |
| 206 | # define SYS_mprotect_key 329 | 212 | # define SYS_mprotect_key 329 |
| 207 | #endif | 213 | #endif |
| 214 | |||
| 208 | #ifndef SYS_pkey_alloc | 215 | #ifndef SYS_pkey_alloc |
| 209 | # define SYS_pkey_alloc 330 | 216 | # define SYS_pkey_alloc 330 |
| 210 | # define SYS_pkey_free 331 | 217 | # define SYS_pkey_free 331 |
| 211 | #endif | 218 | #endif |
| 212 | #define REG_IP_IDX REG_RIP | 219 | |
| 213 | #define si_pkey_offset 0x20 | 220 | #define REG_IP_IDX REG_RIP |
| 221 | #define si_pkey_offset 0x20 | ||
| 214 | 222 | ||
| 215 | #endif | 223 | #endif |
| 216 | 224 | ||
| @@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes) | |||
| 225 | } | 233 | } |
| 226 | } | 234 | } |
| 227 | 235 | ||
| 228 | #define SEGV_BNDERR 3 /* failed address bound checks */ | 236 | /* Failed address bound checks: */ |
| 229 | #define SEGV_PKUERR 4 | 237 | #ifndef SEGV_BNDERR |
| 238 | # define SEGV_BNDERR 3 | ||
| 239 | #endif | ||
| 240 | |||
| 241 | #ifndef SEGV_PKUERR | ||
| 242 | # define SEGV_PKUERR 4 | ||
| 243 | #endif | ||
| 230 | 244 | ||
| 231 | static char *si_code_str(int si_code) | 245 | static char *si_code_str(int si_code) |
| 232 | { | 246 | { |
| @@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) | |||
| 289 | dump_mem(pkru_ptr - 128, 256); | 303 | dump_mem(pkru_ptr - 128, 256); |
| 290 | pkey_assert(*pkru_ptr); | 304 | pkey_assert(*pkru_ptr); |
| 291 | 305 | ||
| 292 | si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); | ||
| 293 | dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); | ||
| 294 | dump_mem(si_pkey_ptr - 8, 24); | ||
| 295 | siginfo_pkey = *si_pkey_ptr; | ||
| 296 | pkey_assert(siginfo_pkey < NR_PKEYS); | ||
| 297 | last_si_pkey = siginfo_pkey; | ||
| 298 | |||
| 299 | if ((si->si_code == SEGV_MAPERR) || | 306 | if ((si->si_code == SEGV_MAPERR) || |
| 300 | (si->si_code == SEGV_ACCERR) || | 307 | (si->si_code == SEGV_ACCERR) || |
| 301 | (si->si_code == SEGV_BNDERR)) { | 308 | (si->si_code == SEGV_BNDERR)) { |
| @@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) | |||
| 303 | exit(4); | 310 | exit(4); |
| 304 | } | 311 | } |
| 305 | 312 | ||
| 313 | si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); | ||
| 314 | dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); | ||
| 315 | dump_mem((u8 *)si_pkey_ptr - 8, 24); | ||
| 316 | siginfo_pkey = *si_pkey_ptr; | ||
| 317 | pkey_assert(siginfo_pkey < NR_PKEYS); | ||
| 318 | last_si_pkey = siginfo_pkey; | ||
| 319 | |||
| 306 | dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); | 320 | dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); |
| 307 | /* need __rdpkru() version so we do not do shadow_pkru checking */ | 321 | /* need __rdpkru() version so we do not do shadow_pkru checking */ |
| 308 | dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); | 322 | dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); |
| @@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) | |||
| 311 | dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); | 325 | dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); |
| 312 | pkru_faults++; | 326 | pkru_faults++; |
| 313 | dprintf1("<<<<==================================================\n"); | 327 | dprintf1("<<<<==================================================\n"); |
| 314 | return; | ||
| 315 | if (trapno == 14) { | ||
| 316 | fprintf(stderr, | ||
| 317 | "ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n", | ||
| 318 | trapno, ip); | ||
| 319 | fprintf(stderr, "si_addr %p\n", si->si_addr); | ||
| 320 | fprintf(stderr, "REG_ERR: %lx\n", | ||
| 321 | (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); | ||
| 322 | exit(1); | ||
| 323 | } else { | ||
| 324 | fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip); | ||
| 325 | fprintf(stderr, "si_addr %p\n", si->si_addr); | ||
| 326 | fprintf(stderr, "REG_ERR: %lx\n", | ||
| 327 | (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); | ||
| 328 | exit(2); | ||
| 329 | } | ||
| 330 | dprint_in_signal = 0; | 328 | dprint_in_signal = 0; |
| 331 | } | 329 | } |
| 332 | 330 | ||
| @@ -393,10 +391,15 @@ pid_t fork_lazy_child(void) | |||
| 393 | return forkret; | 391 | return forkret; |
| 394 | } | 392 | } |
| 395 | 393 | ||
| 396 | #define PKEY_DISABLE_ACCESS 0x1 | 394 | #ifndef PKEY_DISABLE_ACCESS |
| 397 | #define PKEY_DISABLE_WRITE 0x2 | 395 | # define PKEY_DISABLE_ACCESS 0x1 |
| 396 | #endif | ||
| 397 | |||
| 398 | #ifndef PKEY_DISABLE_WRITE | ||
| 399 | # define PKEY_DISABLE_WRITE 0x2 | ||
| 400 | #endif | ||
| 398 | 401 | ||
| 399 | u32 pkey_get(int pkey, unsigned long flags) | 402 | static u32 hw_pkey_get(int pkey, unsigned long flags) |
| 400 | { | 403 | { |
| 401 | u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); | 404 | u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); |
| 402 | u32 pkru = __rdpkru(); | 405 | u32 pkru = __rdpkru(); |
| @@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags) | |||
| 418 | return masked_pkru; | 421 | return masked_pkru; |
| 419 | } | 422 | } |
| 420 | 423 | ||
| 421 | int pkey_set(int pkey, unsigned long rights, unsigned long flags) | 424 | static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags) |
| 422 | { | 425 | { |
| 423 | u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); | 426 | u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); |
| 424 | u32 old_pkru = __rdpkru(); | 427 | u32 old_pkru = __rdpkru(); |
| @@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags) | |||
| 452 | pkey, flags); | 455 | pkey, flags); |
| 453 | pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); | 456 | pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); |
| 454 | 457 | ||
| 455 | pkey_rights = pkey_get(pkey, syscall_flags); | 458 | pkey_rights = hw_pkey_get(pkey, syscall_flags); |
| 456 | 459 | ||
| 457 | dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, | 460 | dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, |
| 458 | pkey, pkey, pkey_rights); | 461 | pkey, pkey, pkey_rights); |
| 459 | pkey_assert(pkey_rights >= 0); | 462 | pkey_assert(pkey_rights >= 0); |
| 460 | 463 | ||
| 461 | pkey_rights |= flags; | 464 | pkey_rights |= flags; |
| 462 | 465 | ||
| 463 | ret = pkey_set(pkey, pkey_rights, syscall_flags); | 466 | ret = hw_pkey_set(pkey, pkey_rights, syscall_flags); |
| 464 | assert(!ret); | 467 | assert(!ret); |
| 465 | /*pkru and flags have the same format */ | 468 | /*pkru and flags have the same format */ |
| 466 | shadow_pkru |= flags << (pkey * 2); | 469 | shadow_pkru |= flags << (pkey * 2); |
| @@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags) | |||
| 468 | 471 | ||
| 469 | pkey_assert(ret >= 0); | 472 | pkey_assert(ret >= 0); |
| 470 | 473 | ||
| 471 | pkey_rights = pkey_get(pkey, syscall_flags); | 474 | pkey_rights = hw_pkey_get(pkey, syscall_flags); |
| 472 | dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, | 475 | dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, |
| 473 | pkey, pkey, pkey_rights); | 476 | pkey, pkey, pkey_rights); |
| 474 | 477 | ||
| 475 | dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); | 478 | dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); |
| @@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags) | |||
| 483 | { | 486 | { |
| 484 | unsigned long syscall_flags = 0; | 487 | unsigned long syscall_flags = 0; |
| 485 | int ret; | 488 | int ret; |
| 486 | int pkey_rights = pkey_get(pkey, syscall_flags); | 489 | int pkey_rights = hw_pkey_get(pkey, syscall_flags); |
| 487 | u32 orig_pkru = rdpkru(); | 490 | u32 orig_pkru = rdpkru(); |
| 488 | 491 | ||
| 489 | pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); | 492 | pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); |
| 490 | 493 | ||
| 491 | dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, | 494 | dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, |
| 492 | pkey, pkey, pkey_rights); | 495 | pkey, pkey, pkey_rights); |
| 493 | pkey_assert(pkey_rights >= 0); | 496 | pkey_assert(pkey_rights >= 0); |
| 494 | 497 | ||
| 495 | pkey_rights |= flags; | 498 | pkey_rights |= flags; |
| 496 | 499 | ||
| 497 | ret = pkey_set(pkey, pkey_rights, 0); | 500 | ret = hw_pkey_set(pkey, pkey_rights, 0); |
| 498 | /* pkru and flags have the same format */ | 501 | /* pkru and flags have the same format */ |
| 499 | shadow_pkru &= ~(flags << (pkey * 2)); | 502 | shadow_pkru &= ~(flags << (pkey * 2)); |
| 500 | pkey_assert(ret >= 0); | 503 | pkey_assert(ret >= 0); |
| 501 | 504 | ||
| 502 | pkey_rights = pkey_get(pkey, syscall_flags); | 505 | pkey_rights = hw_pkey_get(pkey, syscall_flags); |
| 503 | dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, | 506 | dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, |
| 504 | pkey, pkey, pkey_rights); | 507 | pkey, pkey, pkey_rights); |
| 505 | 508 | ||
| 506 | dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); | 509 | dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); |
| @@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, | |||
| 674 | struct pkey_malloc_record { | 677 | struct pkey_malloc_record { |
| 675 | void *ptr; | 678 | void *ptr; |
| 676 | long size; | 679 | long size; |
| 680 | int prot; | ||
| 677 | }; | 681 | }; |
| 678 | struct pkey_malloc_record *pkey_malloc_records; | 682 | struct pkey_malloc_record *pkey_malloc_records; |
| 683 | struct pkey_malloc_record *pkey_last_malloc_record; | ||
| 679 | long nr_pkey_malloc_records; | 684 | long nr_pkey_malloc_records; |
| 680 | void record_pkey_malloc(void *ptr, long size) | 685 | void record_pkey_malloc(void *ptr, long size, int prot) |
| 681 | { | 686 | { |
| 682 | long i; | 687 | long i; |
| 683 | struct pkey_malloc_record *rec = NULL; | 688 | struct pkey_malloc_record *rec = NULL; |
| @@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size) | |||
| 709 | (int)(rec - pkey_malloc_records), rec, ptr, size); | 714 | (int)(rec - pkey_malloc_records), rec, ptr, size); |
| 710 | rec->ptr = ptr; | 715 | rec->ptr = ptr; |
| 711 | rec->size = size; | 716 | rec->size = size; |
| 717 | rec->prot = prot; | ||
| 718 | pkey_last_malloc_record = rec; | ||
| 712 | nr_pkey_malloc_records++; | 719 | nr_pkey_malloc_records++; |
| 713 | } | 720 | } |
| 714 | 721 | ||
| @@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) | |||
| 753 | pkey_assert(ptr != (void *)-1); | 760 | pkey_assert(ptr != (void *)-1); |
| 754 | ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey); | 761 | ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey); |
| 755 | pkey_assert(!ret); | 762 | pkey_assert(!ret); |
| 756 | record_pkey_malloc(ptr, size); | 763 | record_pkey_malloc(ptr, size, prot); |
| 757 | rdpkru(); | 764 | rdpkru(); |
| 758 | 765 | ||
| 759 | dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr); | 766 | dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr); |
| @@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) | |||
| 774 | size = ALIGN_UP(size, HPAGE_SIZE * 2); | 781 | size = ALIGN_UP(size, HPAGE_SIZE * 2); |
| 775 | ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); | 782 | ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); |
| 776 | pkey_assert(ptr != (void *)-1); | 783 | pkey_assert(ptr != (void *)-1); |
| 777 | record_pkey_malloc(ptr, size); | 784 | record_pkey_malloc(ptr, size, prot); |
| 778 | mprotect_pkey(ptr, size, prot, pkey); | 785 | mprotect_pkey(ptr, size, prot, pkey); |
| 779 | 786 | ||
| 780 | dprintf1("unaligned ptr: %p\n", ptr); | 787 | dprintf1("unaligned ptr: %p\n", ptr); |
| @@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) | |||
| 847 | pkey_assert(ptr != (void *)-1); | 854 | pkey_assert(ptr != (void *)-1); |
| 848 | mprotect_pkey(ptr, size, prot, pkey); | 855 | mprotect_pkey(ptr, size, prot, pkey); |
| 849 | 856 | ||
| 850 | record_pkey_malloc(ptr, size); | 857 | record_pkey_malloc(ptr, size, prot); |
| 851 | 858 | ||
| 852 | dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr); | 859 | dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr); |
| 853 | return ptr; | 860 | return ptr; |
| @@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey) | |||
| 869 | 876 | ||
| 870 | mprotect_pkey(ptr, size, prot, pkey); | 877 | mprotect_pkey(ptr, size, prot, pkey); |
| 871 | 878 | ||
| 872 | record_pkey_malloc(ptr, size); | 879 | record_pkey_malloc(ptr, size, prot); |
| 873 | 880 | ||
| 874 | dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr); | 881 | dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr); |
| 875 | close(fd); | 882 | close(fd); |
| @@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey) | |||
| 918 | } | 925 | } |
| 919 | 926 | ||
| 920 | int last_pkru_faults; | 927 | int last_pkru_faults; |
| 928 | #define UNKNOWN_PKEY -2 | ||
| 921 | void expected_pk_fault(int pkey) | 929 | void expected_pk_fault(int pkey) |
| 922 | { | 930 | { |
| 923 | dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n", | 931 | dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n", |
| 924 | __func__, last_pkru_faults, pkru_faults); | 932 | __func__, last_pkru_faults, pkru_faults); |
| 925 | dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey); | 933 | dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey); |
| 926 | pkey_assert(last_pkru_faults + 1 == pkru_faults); | 934 | pkey_assert(last_pkru_faults + 1 == pkru_faults); |
| 927 | pkey_assert(last_si_pkey == pkey); | 935 | |
| 936 | /* | ||
| 937 | * For exec-only memory, we do not know the pkey in | ||
| 938 | * advance, so skip this check. | ||
| 939 | */ | ||
| 940 | if (pkey != UNKNOWN_PKEY) | ||
| 941 | pkey_assert(last_si_pkey == pkey); | ||
| 942 | |||
| 928 | /* | 943 | /* |
| 929 | * The signal handler shold have cleared out PKRU to let the | 944 | * The signal handler shold have cleared out PKRU to let the |
| 930 | * test program continue. We now have to restore it. | 945 | * test program continue. We now have to restore it. |
| @@ -939,10 +954,11 @@ void expected_pk_fault(int pkey) | |||
| 939 | last_si_pkey = -1; | 954 | last_si_pkey = -1; |
| 940 | } | 955 | } |
| 941 | 956 | ||
| 942 | void do_not_expect_pk_fault(void) | 957 | #define do_not_expect_pk_fault(msg) do { \ |
| 943 | { | 958 | if (last_pkru_faults != pkru_faults) \ |
| 944 | pkey_assert(last_pkru_faults == pkru_faults); | 959 | dprintf0("unexpected PK fault: %s\n", msg); \ |
| 945 | } | 960 | pkey_assert(last_pkru_faults == pkru_faults); \ |
| 961 | } while (0) | ||
| 946 | 962 | ||
| 947 | int test_fds[10] = { -1 }; | 963 | int test_fds[10] = { -1 }; |
| 948 | int nr_test_fds; | 964 | int nr_test_fds; |
| @@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | |||
| 1151 | pkey_assert(i < NR_PKEYS*2); | 1167 | pkey_assert(i < NR_PKEYS*2); |
| 1152 | 1168 | ||
| 1153 | /* | 1169 | /* |
| 1154 | * There are 16 pkeys supported in hardware. One is taken | 1170 | * There are 16 pkeys supported in hardware. Three are |
| 1155 | * up for the default (0) and another can be taken up by | 1171 | * allocated by the time we get here: |
| 1156 | * an execute-only mapping. Ensure that we can allocate | 1172 | * 1. The default key (0) |
| 1157 | * at least 14 (16-2). | 1173 | * 2. One possibly consumed by an execute-only mapping. |
| 1174 | * 3. One allocated by the test code and passed in via | ||
| 1175 | * 'pkey' to this function. | ||
| 1176 | * Ensure that we can allocate at least another 13 (16-3). | ||
| 1158 | */ | 1177 | */ |
| 1159 | pkey_assert(i >= NR_PKEYS-2); | 1178 | pkey_assert(i >= NR_PKEYS-3); |
| 1160 | 1179 | ||
| 1161 | for (i = 0; i < nr_allocated_pkeys; i++) { | 1180 | for (i = 0; i < nr_allocated_pkeys; i++) { |
| 1162 | err = sys_pkey_free(allocated_pkeys[i]); | 1181 | err = sys_pkey_free(allocated_pkeys[i]); |
| @@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | |||
| 1165 | } | 1184 | } |
| 1166 | } | 1185 | } |
| 1167 | 1186 | ||
| 1187 | /* | ||
| 1188 | * pkey 0 is special. It is allocated by default, so you do not | ||
| 1189 | * have to call pkey_alloc() to use it first. Make sure that it | ||
| 1190 | * is usable. | ||
| 1191 | */ | ||
| 1192 | void test_mprotect_with_pkey_0(int *ptr, u16 pkey) | ||
| 1193 | { | ||
| 1194 | long size; | ||
| 1195 | int prot; | ||
| 1196 | |||
| 1197 | assert(pkey_last_malloc_record); | ||
| 1198 | size = pkey_last_malloc_record->size; | ||
| 1199 | /* | ||
| 1200 | * This is a bit of a hack. But mprotect() requires | ||
| 1201 | * huge-page-aligned sizes when operating on hugetlbfs. | ||
| 1202 | * So, make sure that we use something that's a multiple | ||
| 1203 | * of a huge page when we can. | ||
| 1204 | */ | ||
| 1205 | if (size >= HPAGE_SIZE) | ||
| 1206 | size = HPAGE_SIZE; | ||
| 1207 | prot = pkey_last_malloc_record->prot; | ||
| 1208 | |||
| 1209 | /* Use pkey 0 */ | ||
| 1210 | mprotect_pkey(ptr, size, prot, 0); | ||
| 1211 | |||
| 1212 | /* Make sure that we can set it back to the original pkey. */ | ||
| 1213 | mprotect_pkey(ptr, size, prot, pkey); | ||
| 1214 | } | ||
| 1215 | |||
| 1168 | void test_ptrace_of_child(int *ptr, u16 pkey) | 1216 | void test_ptrace_of_child(int *ptr, u16 pkey) |
| 1169 | { | 1217 | { |
| 1170 | __attribute__((__unused__)) int peek_result; | 1218 | __attribute__((__unused__)) int peek_result; |
| @@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey) | |||
| 1228 | pkey_assert(ret != -1); | 1276 | pkey_assert(ret != -1); |
| 1229 | /* Now access from the current task, and expect NO exception: */ | 1277 | /* Now access from the current task, and expect NO exception: */ |
| 1230 | peek_result = read_ptr(plain_ptr); | 1278 | peek_result = read_ptr(plain_ptr); |
| 1231 | do_not_expect_pk_fault(); | 1279 | do_not_expect_pk_fault("read plain pointer after ptrace"); |
| 1232 | 1280 | ||
| 1233 | ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0); | 1281 | ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0); |
| 1234 | pkey_assert(ret != -1); | 1282 | pkey_assert(ret != -1); |
| @@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey) | |||
| 1241 | free(plain_ptr_unaligned); | 1289 | free(plain_ptr_unaligned); |
| 1242 | } | 1290 | } |
| 1243 | 1291 | ||
| 1244 | void test_executing_on_unreadable_memory(int *ptr, u16 pkey) | 1292 | void *get_pointer_to_instructions(void) |
| 1245 | { | 1293 | { |
| 1246 | void *p1; | 1294 | void *p1; |
| 1247 | int scratch; | ||
| 1248 | int ptr_contents; | ||
| 1249 | int ret; | ||
| 1250 | 1295 | ||
| 1251 | p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE); | 1296 | p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE); |
| 1252 | dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write); | 1297 | dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write); |
| @@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) | |||
| 1256 | /* Point 'p1' at the *second* page of the function: */ | 1301 | /* Point 'p1' at the *second* page of the function: */ |
| 1257 | p1 += PAGE_SIZE; | 1302 | p1 += PAGE_SIZE; |
| 1258 | 1303 | ||
| 1304 | /* | ||
| 1305 | * Try to ensure we fault this in on next touch to ensure | ||
| 1306 | * we get an instruction fault as opposed to a data one | ||
| 1307 | */ | ||
| 1259 | madvise(p1, PAGE_SIZE, MADV_DONTNEED); | 1308 | madvise(p1, PAGE_SIZE, MADV_DONTNEED); |
| 1309 | |||
| 1310 | return p1; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | void test_executing_on_unreadable_memory(int *ptr, u16 pkey) | ||
| 1314 | { | ||
| 1315 | void *p1; | ||
| 1316 | int scratch; | ||
| 1317 | int ptr_contents; | ||
| 1318 | int ret; | ||
| 1319 | |||
| 1320 | p1 = get_pointer_to_instructions(); | ||
| 1260 | lots_o_noops_around_write(&scratch); | 1321 | lots_o_noops_around_write(&scratch); |
| 1261 | ptr_contents = read_ptr(p1); | 1322 | ptr_contents = read_ptr(p1); |
| 1262 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); | 1323 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); |
| @@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) | |||
| 1272 | */ | 1333 | */ |
| 1273 | madvise(p1, PAGE_SIZE, MADV_DONTNEED); | 1334 | madvise(p1, PAGE_SIZE, MADV_DONTNEED); |
| 1274 | lots_o_noops_around_write(&scratch); | 1335 | lots_o_noops_around_write(&scratch); |
| 1275 | do_not_expect_pk_fault(); | 1336 | do_not_expect_pk_fault("executing on PROT_EXEC memory"); |
| 1276 | ptr_contents = read_ptr(p1); | 1337 | ptr_contents = read_ptr(p1); |
| 1277 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); | 1338 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); |
| 1278 | expected_pk_fault(pkey); | 1339 | expected_pk_fault(pkey); |
| 1279 | } | 1340 | } |
| 1280 | 1341 | ||
| 1342 | void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) | ||
| 1343 | { | ||
| 1344 | void *p1; | ||
| 1345 | int scratch; | ||
| 1346 | int ptr_contents; | ||
| 1347 | int ret; | ||
| 1348 | |||
| 1349 | dprintf1("%s() start\n", __func__); | ||
| 1350 | |||
| 1351 | p1 = get_pointer_to_instructions(); | ||
| 1352 | lots_o_noops_around_write(&scratch); | ||
| 1353 | ptr_contents = read_ptr(p1); | ||
| 1354 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); | ||
| 1355 | |||
| 1356 | /* Use a *normal* mprotect(), not mprotect_pkey(): */ | ||
| 1357 | ret = mprotect(p1, PAGE_SIZE, PROT_EXEC); | ||
| 1358 | pkey_assert(!ret); | ||
| 1359 | |||
| 1360 | dprintf2("pkru: %x\n", rdpkru()); | ||
| 1361 | |||
| 1362 | /* Make sure this is an *instruction* fault */ | ||
| 1363 | madvise(p1, PAGE_SIZE, MADV_DONTNEED); | ||
| 1364 | lots_o_noops_around_write(&scratch); | ||
| 1365 | do_not_expect_pk_fault("executing on PROT_EXEC memory"); | ||
| 1366 | ptr_contents = read_ptr(p1); | ||
| 1367 | dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); | ||
| 1368 | expected_pk_fault(UNKNOWN_PKEY); | ||
| 1369 | |||
| 1370 | /* | ||
| 1371 | * Put the memory back to non-PROT_EXEC. Should clear the | ||
| 1372 | * exec-only pkey off the VMA and allow it to be readable | ||
| 1373 | * again. Go to PROT_NONE first to check for a kernel bug | ||
| 1374 | * that did not clear the pkey when doing PROT_NONE. | ||
| 1375 | */ | ||
| 1376 | ret = mprotect(p1, PAGE_SIZE, PROT_NONE); | ||
| 1377 | pkey_assert(!ret); | ||
| 1378 | |||
| 1379 | ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC); | ||
| 1380 | pkey_assert(!ret); | ||
| 1381 | ptr_contents = read_ptr(p1); | ||
| 1382 | do_not_expect_pk_fault("plain read on recently PROT_EXEC area"); | ||
| 1383 | } | ||
| 1384 | |||
| 1281 | void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) | 1385 | void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) |
| 1282 | { | 1386 | { |
| 1283 | int size = PAGE_SIZE; | 1387 | int size = PAGE_SIZE; |
| @@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = { | |||
| 1302 | test_kernel_gup_of_access_disabled_region, | 1406 | test_kernel_gup_of_access_disabled_region, |
| 1303 | test_kernel_gup_write_to_write_disabled_region, | 1407 | test_kernel_gup_write_to_write_disabled_region, |
| 1304 | test_executing_on_unreadable_memory, | 1408 | test_executing_on_unreadable_memory, |
| 1409 | test_implicit_mprotect_exec_only_memory, | ||
| 1410 | test_mprotect_with_pkey_0, | ||
| 1305 | test_ptrace_of_child, | 1411 | test_ptrace_of_child, |
| 1306 | test_pkey_syscalls_on_non_allocated_pkey, | 1412 | test_pkey_syscalls_on_non_allocated_pkey, |
| 1307 | test_pkey_syscalls_bad_args, | 1413 | test_pkey_syscalls_bad_args, |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 10b38178cff2..4ffc0b5e6105 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
| @@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
| 211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; | 211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; |
| 212 | struct vgic_irq *irq; | 212 | struct vgic_irq *irq; |
| 213 | struct kvm_vcpu *vcpu = NULL; | 213 | struct kvm_vcpu *vcpu = NULL; |
| 214 | unsigned long flags; | ||
| 214 | 215 | ||
| 215 | if (iter->dist_id == 0) { | 216 | if (iter->dist_id == 0) { |
| 216 | print_dist_state(s, &kvm->arch.vgic); | 217 | print_dist_state(s, &kvm->arch.vgic); |
| @@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
| 227 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; | 228 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; |
| 228 | } | 229 | } |
| 229 | 230 | ||
| 230 | spin_lock(&irq->irq_lock); | 231 | spin_lock_irqsave(&irq->irq_lock, flags); |
| 231 | print_irq_state(s, irq, vcpu); | 232 | print_irq_state(s, irq, vcpu); |
| 232 | spin_unlock(&irq->irq_lock); | 233 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
| 233 | 234 | ||
| 234 | return 0; | 235 | return 0; |
| 235 | } | 236 | } |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index a8f07243aa9f..4ed79c939fb4 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
| @@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
| 52 | { | 52 | { |
| 53 | struct vgic_dist *dist = &kvm->arch.vgic; | 53 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; | 54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; |
| 55 | unsigned long flags; | ||
| 55 | int ret; | 56 | int ret; |
| 56 | 57 | ||
| 57 | /* In this case there is no put, since we keep the reference. */ | 58 | /* In this case there is no put, since we keep the reference. */ |
| @@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
| 71 | irq->intid = intid; | 72 | irq->intid = intid; |
| 72 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
| 73 | 74 | ||
| 74 | spin_lock(&dist->lpi_list_lock); | 75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
| 75 | 76 | ||
| 76 | /* | 77 | /* |
| 77 | * There could be a race with another vgic_add_lpi(), so we need to | 78 | * There could be a race with another vgic_add_lpi(), so we need to |
| @@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
| 99 | dist->lpi_list_count++; | 100 | dist->lpi_list_count++; |
| 100 | 101 | ||
| 101 | out_unlock: | 102 | out_unlock: |
| 102 | spin_unlock(&dist->lpi_list_lock); | 103 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
| 103 | 104 | ||
| 104 | /* | 105 | /* |
| 105 | * We "cache" the configuration table entries in our struct vgic_irq's. | 106 | * We "cache" the configuration table entries in our struct vgic_irq's. |
| @@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
| 280 | int ret; | 281 | int ret; |
| 281 | unsigned long flags; | 282 | unsigned long flags; |
| 282 | 283 | ||
| 283 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, | 284 | ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, |
| 284 | &prop, 1); | 285 | &prop, 1); |
| 285 | 286 | ||
| 286 | if (ret) | 287 | if (ret) |
| 287 | return ret; | 288 | return ret; |
| @@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
| 315 | { | 316 | { |
| 316 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 317 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 317 | struct vgic_irq *irq; | 318 | struct vgic_irq *irq; |
| 319 | unsigned long flags; | ||
| 318 | u32 *intids; | 320 | u32 *intids; |
| 319 | int irq_count, i = 0; | 321 | int irq_count, i = 0; |
| 320 | 322 | ||
| @@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
| 330 | if (!intids) | 332 | if (!intids) |
| 331 | return -ENOMEM; | 333 | return -ENOMEM; |
| 332 | 334 | ||
| 333 | spin_lock(&dist->lpi_list_lock); | 335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
| 334 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
| 335 | if (i == irq_count) | 337 | if (i == irq_count) |
| 336 | break; | 338 | break; |
| @@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
| 339 | continue; | 341 | continue; |
| 340 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
| 341 | } | 343 | } |
| 342 | spin_unlock(&dist->lpi_list_lock); | 344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
| 343 | 345 | ||
| 344 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
| 345 | return i; | 347 | return i; |
| @@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
| 348 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | 350 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) |
| 349 | { | 351 | { |
| 350 | int ret = 0; | 352 | int ret = 0; |
| 353 | unsigned long flags; | ||
| 351 | 354 | ||
| 352 | spin_lock(&irq->irq_lock); | 355 | spin_lock_irqsave(&irq->irq_lock, flags); |
| 353 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
| 354 | spin_unlock(&irq->irq_lock); | 357 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
| 355 | 358 | ||
| 356 | if (irq->hw) { | 359 | if (irq->hw) { |
| 357 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
| @@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
| 441 | * this very same byte in the last iteration. Reuse that. | 444 | * this very same byte in the last iteration. Reuse that. |
| 442 | */ | 445 | */ |
| 443 | if (byte_offset != last_byte_offset) { | 446 | if (byte_offset != last_byte_offset) { |
| 444 | ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, | 447 | ret = kvm_read_guest_lock(vcpu->kvm, |
| 445 | &pendmask, 1); | 448 | pendbase + byte_offset, |
| 449 | &pendmask, 1); | ||
| 446 | if (ret) { | 450 | if (ret) { |
| 447 | kfree(intids); | 451 | kfree(intids); |
| 448 | return ret; | 452 | return ret; |
| @@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
| 786 | return false; | 790 | return false; |
| 787 | 791 | ||
| 788 | /* Each 1st level entry is represented by a 64-bit value. */ | 792 | /* Each 1st level entry is represented by a 64-bit value. */ |
| 789 | if (kvm_read_guest(its->dev->kvm, | 793 | if (kvm_read_guest_lock(its->dev->kvm, |
| 790 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), | 794 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), |
| 791 | &indirect_ptr, sizeof(indirect_ptr))) | 795 | &indirect_ptr, sizeof(indirect_ptr))) |
| 792 | return false; | 796 | return false; |
| @@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) | |||
| 1367 | cbaser = CBASER_ADDRESS(its->cbaser); | 1371 | cbaser = CBASER_ADDRESS(its->cbaser); |
| 1368 | 1372 | ||
| 1369 | while (its->cwriter != its->creadr) { | 1373 | while (its->cwriter != its->creadr) { |
| 1370 | int ret = kvm_read_guest(kvm, cbaser + its->creadr, | 1374 | int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, |
| 1371 | cmd_buf, ITS_CMD_SIZE); | 1375 | cmd_buf, ITS_CMD_SIZE); |
| 1372 | /* | 1376 | /* |
| 1373 | * If kvm_read_guest() fails, this could be due to the guest | 1377 | * If kvm_read_guest() fails, this could be due to the guest |
| 1374 | * programming a bogus value in CBASER or something else going | 1378 | * programming a bogus value in CBASER or something else going |
| @@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, | |||
| 1893 | int next_offset; | 1897 | int next_offset; |
| 1894 | size_t byte_offset; | 1898 | size_t byte_offset; |
| 1895 | 1899 | ||
| 1896 | ret = kvm_read_guest(kvm, gpa, entry, esz); | 1900 | ret = kvm_read_guest_lock(kvm, gpa, entry, esz); |
| 1897 | if (ret) | 1901 | if (ret) |
| 1898 | return ret; | 1902 | return ret; |
| 1899 | 1903 | ||
| @@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) | |||
| 2263 | int ret; | 2267 | int ret; |
| 2264 | 2268 | ||
| 2265 | BUG_ON(esz > sizeof(val)); | 2269 | BUG_ON(esz > sizeof(val)); |
| 2266 | ret = kvm_read_guest(kvm, gpa, &val, esz); | 2270 | ret = kvm_read_guest_lock(kvm, gpa, &val, esz); |
| 2267 | if (ret) | 2271 | if (ret) |
| 2268 | return ret; | 2272 | return ret; |
| 2269 | val = le64_to_cpu(val); | 2273 | val = le64_to_cpu(val); |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c7423f3768e5..bdcf8e7a6161 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -344,7 +344,7 @@ retry: | |||
| 344 | bit_nr = irq->intid % BITS_PER_BYTE; | 344 | bit_nr = irq->intid % BITS_PER_BYTE; |
| 345 | ptr = pendbase + byte_offset; | 345 | ptr = pendbase + byte_offset; |
| 346 | 346 | ||
| 347 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 347 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
| 348 | if (ret) | 348 | if (ret) |
| 349 | return ret; | 349 | return ret; |
| 350 | 350 | ||
| @@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) | |||
| 397 | ptr = pendbase + byte_offset; | 397 | ptr = pendbase + byte_offset; |
| 398 | 398 | ||
| 399 | if (byte_offset != last_byte_offset) { | 399 | if (byte_offset != last_byte_offset) { |
| 400 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 400 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
| 401 | if (ret) | 401 | if (ret) |
| 402 | return ret; | 402 | return ret; |
| 403 | last_byte_offset = byte_offset; | 403 | last_byte_offset = byte_offset; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 97bfba8d9a59..33c8325c8f35 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
| @@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
| 43 | * kvm->lock (mutex) | 43 | * kvm->lock (mutex) |
| 44 | * its->cmd_lock (mutex) | 44 | * its->cmd_lock (mutex) |
| 45 | * its->its_lock (mutex) | 45 | * its->its_lock (mutex) |
| 46 | * vgic_cpu->ap_list_lock | 46 | * vgic_cpu->ap_list_lock must be taken with IRQs disabled |
| 47 | * kvm->lpi_list_lock | 47 | * kvm->lpi_list_lock must be taken with IRQs disabled |
| 48 | * vgic_irq->irq_lock | 48 | * vgic_irq->irq_lock must be taken with IRQs disabled |
| 49 | * | ||
| 50 | * As the ap_list_lock might be taken from the timer interrupt handler, | ||
| 51 | * we have to disable IRQs before taking this lock and everything lower | ||
| 52 | * than it. | ||
| 49 | * | 53 | * |
| 50 | * If you need to take multiple locks, always take the upper lock first, | 54 | * If you need to take multiple locks, always take the upper lock first, |
| 51 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. | 55 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. |
| @@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
| 72 | { | 76 | { |
| 73 | struct vgic_dist *dist = &kvm->arch.vgic; | 77 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 74 | struct vgic_irq *irq = NULL; | 78 | struct vgic_irq *irq = NULL; |
| 79 | unsigned long flags; | ||
| 75 | 80 | ||
| 76 | spin_lock(&dist->lpi_list_lock); | 81 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
| 77 | 82 | ||
| 78 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 83 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
| 79 | if (irq->intid != intid) | 84 | if (irq->intid != intid) |
| @@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
| 89 | irq = NULL; | 94 | irq = NULL; |
| 90 | 95 | ||
| 91 | out_unlock: | 96 | out_unlock: |
| 92 | spin_unlock(&dist->lpi_list_lock); | 97 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
| 93 | 98 | ||
| 94 | return irq; | 99 | return irq; |
| 95 | } | 100 | } |
| @@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref) | |||
| 134 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | 139 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) |
| 135 | { | 140 | { |
| 136 | struct vgic_dist *dist = &kvm->arch.vgic; | 141 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 142 | unsigned long flags; | ||
| 137 | 143 | ||
| 138 | if (irq->intid < VGIC_MIN_LPI) | 144 | if (irq->intid < VGIC_MIN_LPI) |
| 139 | return; | 145 | return; |
| 140 | 146 | ||
| 141 | spin_lock(&dist->lpi_list_lock); | 147 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
| 142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 148 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
| 143 | spin_unlock(&dist->lpi_list_lock); | 149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
| 144 | return; | 150 | return; |
| 145 | }; | 151 | }; |
| 146 | 152 | ||
| 147 | list_del(&irq->lpi_list); | 153 | list_del(&irq->lpi_list); |
| 148 | dist->lpi_list_count--; | 154 | dist->lpi_list_count--; |
| 149 | spin_unlock(&dist->lpi_list_lock); | 155 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
| 150 | 156 | ||
| 151 | kfree(irq); | 157 | kfree(irq); |
| 152 | } | 158 | } |
