diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-10-09 03:02:35 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-10-09 03:02:35 -0400 |
commit | 1236d6bb6e19fc72ffc6bbcdeb1bfefe450e54ee (patch) | |
tree | 47da3feee8e263e8c9352c85cf518e624be3c211 | |
parent | 750b1a6894ecc9b178c6e3d0a1170122971b2036 (diff) | |
parent | 8a5776a5f49812d29fe4b2d0a2d71675c3facf3f (diff) |
Merge 4.14-rc4 into staging-next
We want the staging/iio fixes in here as well to handle merge issues.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
967 files changed, 12885 insertions, 6418 deletions
@@ -68,6 +68,8 @@ Jacob Shin <Jacob.Shin@amd.com> | |||
68 | James Bottomley <jejb@mulgrave.(none)> | 68 | James Bottomley <jejb@mulgrave.(none)> |
69 | James Bottomley <jejb@titanic.il.steeleye.com> | 69 | James Bottomley <jejb@titanic.il.steeleye.com> |
70 | James E Wilson <wilson@specifix.com> | 70 | James E Wilson <wilson@specifix.com> |
71 | James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> | ||
72 | James Hogan <jhogan@kernel.org> <james@albanarts.com> | ||
71 | James Ketrenos <jketreno@io.(none)> | 73 | James Ketrenos <jketreno@io.(none)> |
72 | Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> | 74 | Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> |
73 | <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> | 75 | <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> |
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 713cab1d5f12..a1d1612f3651 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
@@ -127,7 +127,7 @@ Description: | |||
127 | 127 | ||
128 | What; /sys/power/pm_trace_dev_match | 128 | What; /sys/power/pm_trace_dev_match |
129 | Date: October 2010 | 129 | Date: October 2010 |
130 | Contact: James Hogan <james@albanarts.com> | 130 | Contact: James Hogan <jhogan@kernel.org> |
131 | Description: | 131 | Description: |
132 | The /sys/power/pm_trace_dev_match file contains the name of the | 132 | The /sys/power/pm_trace_dev_match file contains the name of the |
133 | device associated with the last PM event point saved in the RTC | 133 | device associated with the last PM event point saved in the RTC |
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst index 3943b5bfa8cf..00a5ba51e63f 100644 --- a/Documentation/core-api/workqueue.rst +++ b/Documentation/core-api/workqueue.rst | |||
@@ -39,8 +39,8 @@ up. | |||
39 | Although MT wq wasted a lot of resource, the level of concurrency | 39 | Although MT wq wasted a lot of resource, the level of concurrency |
40 | provided was unsatisfactory. The limitation was common to both ST and | 40 | provided was unsatisfactory. The limitation was common to both ST and |
41 | MT wq albeit less severe on MT. Each wq maintained its own separate | 41 | MT wq albeit less severe on MT. Each wq maintained its own separate |
42 | worker pool. A MT wq could provide only one execution context per CPU | 42 | worker pool. An MT wq could provide only one execution context per CPU |
43 | while a ST wq one for the whole system. Work items had to compete for | 43 | while an ST wq one for the whole system. Work items had to compete for |
44 | those very limited execution contexts leading to various problems | 44 | those very limited execution contexts leading to various problems |
45 | including proneness to deadlocks around the single execution context. | 45 | including proneness to deadlocks around the single execution context. |
46 | 46 | ||
@@ -151,7 +151,7 @@ Application Programming Interface (API) | |||
151 | 151 | ||
152 | ``alloc_workqueue()`` allocates a wq. The original | 152 | ``alloc_workqueue()`` allocates a wq. The original |
153 | ``create_*workqueue()`` functions are deprecated and scheduled for | 153 | ``create_*workqueue()`` functions are deprecated and scheduled for |
154 | removal. ``alloc_workqueue()`` takes three arguments - @``name``, | 154 | removal. ``alloc_workqueue()`` takes three arguments - ``@name``, |
155 | ``@flags`` and ``@max_active``. ``@name`` is the name of the wq and | 155 | ``@flags`` and ``@max_active``. ``@name`` is the name of the wq and |
156 | also used as the name of the rescuer thread if there is one. | 156 | also used as the name of the rescuer thread if there is one. |
157 | 157 | ||
@@ -197,7 +197,7 @@ resources, scheduled and executed. | |||
197 | served by worker threads with elevated nice level. | 197 | served by worker threads with elevated nice level. |
198 | 198 | ||
199 | Note that normal and highpri worker-pools don't interact with | 199 | Note that normal and highpri worker-pools don't interact with |
200 | each other. Each maintain its separate pool of workers and | 200 | each other. Each maintains its separate pool of workers and |
201 | implements concurrency management among its workers. | 201 | implements concurrency management among its workers. |
202 | 202 | ||
203 | ``WQ_CPU_INTENSIVE`` | 203 | ``WQ_CPU_INTENSIVE`` |
@@ -249,8 +249,8 @@ unbound worker-pools and only one work item could be active at any given | |||
249 | time thus achieving the same ordering property as ST wq. | 249 | time thus achieving the same ordering property as ST wq. |
250 | 250 | ||
251 | In the current implementation the above configuration only guarantees | 251 | In the current implementation the above configuration only guarantees |
252 | ST behavior within a given NUMA node. Instead alloc_ordered_queue should | 252 | ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should |
253 | be used to achieve system wide ST behavior. | 253 | be used to achieve system-wide ST behavior. |
254 | 254 | ||
255 | 255 | ||
256 | Example Execution Scenarios | 256 | Example Execution Scenarios |
diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt index 03a7cee6ac73..c15e75386a05 100644 --- a/Documentation/cpu-freq/index.txt +++ b/Documentation/cpu-freq/index.txt | |||
@@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats. | |||
32 | 32 | ||
33 | index.txt - File index, Mailing list and Links (this document) | 33 | index.txt - File index, Mailing list and Links (this document) |
34 | 34 | ||
35 | intel-pstate.txt - Intel pstate cpufreq driver specific file. | ||
36 | |||
37 | pcc-cpufreq.txt - PCC cpufreq driver specific file. | 35 | pcc-cpufreq.txt - PCC cpufreq driver specific file. |
38 | 36 | ||
39 | 37 | ||
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 4a0a7469fdd7..32df07e29f68 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt | |||
@@ -344,3 +344,4 @@ Version History | |||
344 | (wrong raid10_copies/raid10_format sequence) | 344 | (wrong raid10_copies/raid10_format sequence) |
345 | 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option | 345 | 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option |
346 | 1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available | 346 | 1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available |
347 | 1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A') | ||
diff --git a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt index a135504c7d57..cac24ee10b72 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt +++ b/Documentation/devicetree/bindings/clock/st,stm32h7-rcc.txt | |||
@@ -32,7 +32,7 @@ Example: | |||
32 | compatible = "st,stm32h743-rcc", "st,stm32-rcc"; | 32 | compatible = "st,stm32h743-rcc", "st,stm32-rcc"; |
33 | reg = <0x58024400 0x400>; | 33 | reg = <0x58024400 0x400>; |
34 | #reset-cells = <1>; | 34 | #reset-cells = <1>; |
35 | #clock-cells = <2>; | 35 | #clock-cells = <1>; |
36 | clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>; | 36 | clocks = <&clk_hse>, <&clk_lse>, <&clk_i2s_ckin>; |
37 | 37 | ||
38 | st,syscfg = <&pwrcfg>; | 38 | st,syscfg = <&pwrcfg>; |
diff --git a/Documentation/devicetree/bindings/leds/ams,as3645a.txt b/Documentation/devicetree/bindings/leds/ams,as3645a.txt index 12c5ef26ec73..fdc40e354a64 100644 --- a/Documentation/devicetree/bindings/leds/ams,as3645a.txt +++ b/Documentation/devicetree/bindings/leds/ams,as3645a.txt | |||
@@ -15,11 +15,14 @@ Required properties | |||
15 | 15 | ||
16 | compatible : Must be "ams,as3645a". | 16 | compatible : Must be "ams,as3645a". |
17 | reg : The I2C address of the device. Typically 0x30. | 17 | reg : The I2C address of the device. Typically 0x30. |
18 | #address-cells : 1 | ||
19 | #size-cells : 0 | ||
18 | 20 | ||
19 | 21 | ||
20 | Required properties of the "flash" child node | 22 | Required properties of the flash child node (0) |
21 | ============================================= | 23 | =============================================== |
22 | 24 | ||
25 | reg: 0 | ||
23 | flash-timeout-us: Flash timeout in microseconds. The value must be in | 26 | flash-timeout-us: Flash timeout in microseconds. The value must be in |
24 | the range [100000, 850000] and divisible by 50000. | 27 | the range [100000, 850000] and divisible by 50000. |
25 | flash-max-microamp: Maximum flash current in microamperes. Has to be | 28 | flash-max-microamp: Maximum flash current in microamperes. Has to be |
@@ -33,20 +36,21 @@ ams,input-max-microamp: Maximum flash controller input current. The | |||
33 | and divisible by 50000. | 36 | and divisible by 50000. |
34 | 37 | ||
35 | 38 | ||
36 | Optional properties of the "flash" child node | 39 | Optional properties of the flash child node |
37 | ============================================= | 40 | =========================================== |
38 | 41 | ||
39 | label : The label of the flash LED. | 42 | label : The label of the flash LED. |
40 | 43 | ||
41 | 44 | ||
42 | Required properties of the "indicator" child node | 45 | Required properties of the indicator child node (1) |
43 | ================================================= | 46 | =================================================== |
44 | 47 | ||
48 | reg: 1 | ||
45 | led-max-microamp: Maximum indicator current. The allowed values are | 49 | led-max-microamp: Maximum indicator current. The allowed values are |
46 | 2500, 5000, 7500 and 10000. | 50 | 2500, 5000, 7500 and 10000. |
47 | 51 | ||
48 | Optional properties of the "indicator" child node | 52 | Optional properties of the indicator child node |
49 | ================================================= | 53 | =============================================== |
50 | 54 | ||
51 | label : The label of the indicator LED. | 55 | label : The label of the indicator LED. |
52 | 56 | ||
@@ -55,16 +59,20 @@ Example | |||
55 | ======= | 59 | ======= |
56 | 60 | ||
57 | as3645a@30 { | 61 | as3645a@30 { |
62 | #address-cells: 1 | ||
63 | #size-cells: 0 | ||
58 | reg = <0x30>; | 64 | reg = <0x30>; |
59 | compatible = "ams,as3645a"; | 65 | compatible = "ams,as3645a"; |
60 | flash { | 66 | flash@0 { |
67 | reg = <0x0>; | ||
61 | flash-timeout-us = <150000>; | 68 | flash-timeout-us = <150000>; |
62 | flash-max-microamp = <320000>; | 69 | flash-max-microamp = <320000>; |
63 | led-max-microamp = <60000>; | 70 | led-max-microamp = <60000>; |
64 | ams,input-max-microamp = <1750000>; | 71 | ams,input-max-microamp = <1750000>; |
65 | label = "as3645a:flash"; | 72 | label = "as3645a:flash"; |
66 | }; | 73 | }; |
67 | indicator { | 74 | indicator@1 { |
75 | reg = <0x1>; | ||
68 | led-max-microamp = <10000>; | 76 | led-max-microamp = <10000>; |
69 | label = "as3645a:indicator"; | 77 | label = "as3645a:indicator"; |
70 | }; | 78 | }; |
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt index b878a1e305af..ed1456f5c94d 100644 --- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt | |||
@@ -16,11 +16,13 @@ Required Properties: | |||
16 | 16 | ||
17 | - clocks: | 17 | - clocks: |
18 | Array of clocks required for SDHC. | 18 | Array of clocks required for SDHC. |
19 | Require at least input clock for Xenon IP core. | 19 | Require at least input clock for Xenon IP core. For Armada AP806 and |
20 | CP110, the AXI clock is also mandatory. | ||
20 | 21 | ||
21 | - clock-names: | 22 | - clock-names: |
22 | Array of names corresponding to clocks property. | 23 | Array of names corresponding to clocks property. |
23 | The input clock for Xenon IP core should be named as "core". | 24 | The input clock for Xenon IP core should be named as "core". |
25 | The input clock for the AXI bus must be named as "axi". | ||
24 | 26 | ||
25 | - reg: | 27 | - reg: |
26 | * For "marvell,armada-3700-sdhci", two register areas. | 28 | * For "marvell,armada-3700-sdhci", two register areas. |
@@ -106,8 +108,8 @@ Example: | |||
106 | compatible = "marvell,armada-ap806-sdhci"; | 108 | compatible = "marvell,armada-ap806-sdhci"; |
107 | reg = <0xaa0000 0x1000>; | 109 | reg = <0xaa0000 0x1000>; |
108 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> | 110 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> |
109 | clocks = <&emmc_clk>; | 111 | clocks = <&emmc_clk>,<&axi_clk>; |
110 | clock-names = "core"; | 112 | clock-names = "core", "axi"; |
111 | bus-width = <4>; | 113 | bus-width = <4>; |
112 | marvell,xenon-phy-slow-mode; | 114 | marvell,xenon-phy-slow-mode; |
113 | marvell,xenon-tun-count = <11>; | 115 | marvell,xenon-tun-count = <11>; |
@@ -126,8 +128,8 @@ Example: | |||
126 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> | 128 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> |
127 | vqmmc-supply = <&sd_vqmmc_regulator>; | 129 | vqmmc-supply = <&sd_vqmmc_regulator>; |
128 | vmmc-supply = <&sd_vmmc_regulator>; | 130 | vmmc-supply = <&sd_vmmc_regulator>; |
129 | clocks = <&sdclk>; | 131 | clocks = <&sdclk>, <&axi_clk>; |
130 | clock-names = "core"; | 132 | clock-names = "core", "axi"; |
131 | bus-width = <4>; | 133 | bus-width = <4>; |
132 | marvell,xenon-tun-count = <9>; | 134 | marvell,xenon-tun-count = <9>; |
133 | }; | 135 | }; |
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 7e2dad08a12e..1814fa13f6ab 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt | |||
@@ -21,8 +21,9 @@ Required properties: | |||
21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) | 21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) |
22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) | 22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) |
23 | - MG clock (only for armada-7k-pp2) | 23 | - MG clock (only for armada-7k-pp2) |
24 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk" and | 24 | - AXI clock (only for armada-7k-pp2) |
25 | "mg_clk" (the latter only for armada-7k-pp2). | 25 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk" |
26 | and "axi_clk" (the 2 latter only for armada-7k-pp2). | ||
26 | 27 | ||
27 | The ethernet ports are represented by subnodes. At least one port is | 28 | The ethernet ports are represented by subnodes. At least one port is |
28 | required. | 29 | required. |
@@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2: | |||
78 | cpm_ethernet: ethernet@0 { | 79 | cpm_ethernet: ethernet@0 { |
79 | compatible = "marvell,armada-7k-pp22"; | 80 | compatible = "marvell,armada-7k-pp22"; |
80 | reg = <0x0 0x100000>, <0x129000 0xb000>; | 81 | reg = <0x0 0x100000>, <0x129000 0xb000>; |
81 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>; | 82 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, |
82 | clock-names = "pp_clk", "gop_clk", "gp_clk"; | 83 | <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>; |
84 | clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk"; | ||
83 | 85 | ||
84 | eth0: eth0 { | 86 | eth0: eth0 { |
85 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, | 87 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, |
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 6af8eed1adeb..9c16ee2965a2 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt | |||
@@ -4,6 +4,7 @@ The device node has following properties. | |||
4 | 4 | ||
5 | Required properties: | 5 | Required properties: |
6 | - compatible: should be "rockchip,<name>-gamc" | 6 | - compatible: should be "rockchip,<name>-gamc" |
7 | "rockchip,rk3128-gmac": found on RK312x SoCs | ||
7 | "rockchip,rk3228-gmac": found on RK322x SoCs | 8 | "rockchip,rk3228-gmac": found on RK322x SoCs |
8 | "rockchip,rk3288-gmac": found on RK3288 SoCs | 9 | "rockchip,rk3288-gmac": found on RK3288 SoCs |
9 | "rockchip,rk3328-gmac": found on RK3328 SoCs | 10 | "rockchip,rk3328-gmac": found on RK3328 SoCs |
diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt new file mode 100644 index 000000000000..830069b1c37c --- /dev/null +++ b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt | |||
@@ -0,0 +1,28 @@ | |||
1 | Binding for the Synopsys HSDK reset controller | ||
2 | |||
3 | This binding uses the common reset binding[1]. | ||
4 | |||
5 | [1] Documentation/devicetree/bindings/reset/reset.txt | ||
6 | |||
7 | Required properties: | ||
8 | - compatible: should be "snps,hsdk-reset". | ||
9 | - reg: should always contain 2 pairs address - length: first for reset | ||
10 | configuration register and second for corresponding SW reset and status bits | ||
11 | register. | ||
12 | - #reset-cells: from common reset binding; Should always be set to 1. | ||
13 | |||
14 | Example: | ||
15 | reset: reset@880 { | ||
16 | compatible = "snps,hsdk-reset"; | ||
17 | #reset-cells = <1>; | ||
18 | reg = <0x8A0 0x4>, <0xFF0 0x4>; | ||
19 | }; | ||
20 | |||
21 | Specifying reset lines connected to IP modules: | ||
22 | ethernet@.... { | ||
23 | .... | ||
24 | resets = <&reset HSDK_V1_ETH_RESET>; | ||
25 | .... | ||
26 | }; | ||
27 | |||
28 | The index could be found in <dt-bindings/reset/snps,hsdk-reset.h> | ||
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt index 3eca6de6369d..a65d7b71e81a 100644 --- a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt +++ b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt | |||
@@ -8,6 +8,12 @@ Required properties: | |||
8 | the firmware event log | 8 | the firmware event log |
9 | - linux,sml-size : size of the memory allocated for the firmware event log | 9 | - linux,sml-size : size of the memory allocated for the firmware event log |
10 | 10 | ||
11 | Optional properties: | ||
12 | |||
13 | - powered-while-suspended: present when the TPM is left powered on between | ||
14 | suspend and resume (makes the suspend/resume | ||
15 | callbacks do nothing). | ||
16 | |||
11 | Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C) | 17 | Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C) |
12 | ---------------------------------------------------------- | 18 | ---------------------------------------------------------- |
13 | 19 | ||
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index 4fc96946f81d..cf504d0380ae 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt | |||
@@ -41,6 +41,8 @@ Required properties: | |||
41 | - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. | 41 | - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. |
42 | - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. | 42 | - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. |
43 | - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. | 43 | - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. |
44 | - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. | ||
45 | - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. | ||
44 | - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. | 46 | - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. |
45 | - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART. | 47 | - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART. |
46 | - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART. | 48 | - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART. |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 1ea1fd4232ab..1afd298eddd7 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -3,8 +3,8 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order. | |||
3 | This isn't an exhaustive list, but you should add new prefixes to it before | 3 | This isn't an exhaustive list, but you should add new prefixes to it before |
4 | using them to avoid name-space collisions. | 4 | using them to avoid name-space collisions. |
5 | 5 | ||
6 | abcn Abracon Corporation | ||
7 | abilis Abilis Systems | 6 | abilis Abilis Systems |
7 | abracon Abracon Corporation | ||
8 | actions Actions Semiconductor Co., Ltd. | 8 | actions Actions Semiconductor Co., Ltd. |
9 | active-semi Active-Semi International Inc | 9 | active-semi Active-Semi International Inc |
10 | ad Avionic Design GmbH | 10 | ad Avionic Design GmbH |
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index bedd32388dac..a0dc2879a152 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst | |||
@@ -675,7 +675,7 @@ sub-domain of the parent domain. | |||
675 | 675 | ||
676 | Support for power domains is provided through the :c:member:`pm_domain` field of | 676 | Support for power domains is provided through the :c:member:`pm_domain` field of |
677 | |struct device|. This field is a pointer to an object of type | 677 | |struct device|. This field is a pointer to an object of type |
678 | |struct dev_pm_domain|, defined in :file:`include/linux/pm.h``, providing a set | 678 | |struct dev_pm_domain|, defined in :file:`include/linux/pm.h`, providing a set |
679 | of power management callbacks analogous to the subsystem-level and device driver | 679 | of power management callbacks analogous to the subsystem-level and device driver |
680 | callbacks that are executed for the given device during all power transitions, | 680 | callbacks that are executed for the given device during all power transitions, |
681 | instead of the respective subsystem-level callbacks. Specifically, if a | 681 | instead of the respective subsystem-level callbacks. Specifically, if a |
diff --git a/Documentation/driver-model/driver.txt b/Documentation/driver-model/driver.txt index 4421135826a2..d661e6f7e6a0 100644 --- a/Documentation/driver-model/driver.txt +++ b/Documentation/driver-model/driver.txt | |||
@@ -196,12 +196,13 @@ struct driver_attribute { | |||
196 | }; | 196 | }; |
197 | 197 | ||
198 | Device drivers can export attributes via their sysfs directories. | 198 | Device drivers can export attributes via their sysfs directories. |
199 | Drivers can declare attributes using a DRIVER_ATTR macro that works | 199 | Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO |
200 | identically to the DEVICE_ATTR macro. | 200 | macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO |
201 | macros. | ||
201 | 202 | ||
202 | Example: | 203 | Example: |
203 | 204 | ||
204 | DRIVER_ATTR(debug,0644,show_debug,store_debug); | 205 | DRIVER_ATTR_RW(debug); |
205 | 206 | ||
206 | This is equivalent to declaring: | 207 | This is equivalent to declaring: |
207 | 208 | ||
diff --git a/Documentation/filesystems/cifs/AUTHORS b/Documentation/filesystems/cifs/AUTHORS index c98800df677f..9f4f87e16240 100644 --- a/Documentation/filesystems/cifs/AUTHORS +++ b/Documentation/filesystems/cifs/AUTHORS | |||
@@ -41,6 +41,11 @@ Igor Mammedov (DFS support) | |||
41 | Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) | 41 | Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) |
42 | Scott Lovenberg | 42 | Scott Lovenberg |
43 | Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features) | 43 | Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features) |
44 | Aurelien Aptel (for DFS SMB3 work and some key bug fixes) | ||
45 | Ronnie Sahlberg (for SMB3 xattr work and bug fixes) | ||
46 | Shirish Pargaonkar (for many ACL patches over the years) | ||
47 | Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security) | ||
48 | |||
44 | 49 | ||
45 | Test case and Bug Report contributors | 50 | Test case and Bug Report contributors |
46 | ------------------------------------- | 51 | ------------------------------------- |
diff --git a/Documentation/filesystems/cifs/README b/Documentation/filesystems/cifs/README index a54788405429..a9da51553ba3 100644 --- a/Documentation/filesystems/cifs/README +++ b/Documentation/filesystems/cifs/README | |||
@@ -1,10 +1,14 @@ | |||
1 | The CIFS VFS support for Linux supports many advanced network filesystem | 1 | This module supports the SMB3 family of advanced network protocols (as well |
2 | features such as hierarchical dfs like namespace, hardlinks, locking and more. | 2 | as older dialects, originally called "CIFS" or SMB1). |
3 | |||
4 | The CIFS VFS module for Linux supports many advanced network filesystem | ||
5 | features such as hierarchical DFS like namespace, hardlinks, locking and more. | ||
3 | It was designed to comply with the SNIA CIFS Technical Reference (which | 6 | It was designed to comply with the SNIA CIFS Technical Reference (which |
4 | supersedes the 1992 X/Open SMB Standard) as well as to perform best practice | 7 | supersedes the 1992 X/Open SMB Standard) as well as to perform best practice |
5 | practical interoperability with Windows 2000, Windows XP, Samba and equivalent | 8 | practical interoperability with Windows 2000, Windows XP, Samba and equivalent |
6 | servers. This code was developed in participation with the Protocol Freedom | 9 | servers. This code was developed in participation with the Protocol Freedom |
7 | Information Foundation. | 10 | Information Foundation. CIFS and now SMB3 has now become a defacto |
11 | standard for interoperating between Macs and Windows and major NAS appliances. | ||
8 | 12 | ||
9 | Please see | 13 | Please see |
10 | http://protocolfreedom.org/ and | 14 | http://protocolfreedom.org/ and |
@@ -15,30 +19,11 @@ for more details. | |||
15 | For questions or bug reports please contact: | 19 | For questions or bug reports please contact: |
16 | sfrench@samba.org (sfrench@us.ibm.com) | 20 | sfrench@samba.org (sfrench@us.ibm.com) |
17 | 21 | ||
22 | See the project page at: https://wiki.samba.org/index.php/LinuxCIFS_utils | ||
23 | |||
18 | Build instructions: | 24 | Build instructions: |
19 | ================== | 25 | ================== |
20 | For Linux 2.4: | 26 | For Linux: |
21 | 1) Get the kernel source (e.g.from http://www.kernel.org) | ||
22 | and download the cifs vfs source (see the project page | ||
23 | at http://us1.samba.org/samba/Linux_CIFS_client.html) | ||
24 | and change directory into the top of the kernel directory | ||
25 | then patch the kernel (e.g. "patch -p1 < cifs_24.patch") | ||
26 | to add the cifs vfs to your kernel configure options if | ||
27 | it has not already been added (e.g. current SuSE and UL | ||
28 | users do not need to apply the cifs_24.patch since the cifs vfs is | ||
29 | already in the kernel configure menu) and then | ||
30 | mkdir linux/fs/cifs and then copy the current cifs vfs files from | ||
31 | the cifs download to your kernel build directory e.g. | ||
32 | |||
33 | cp <cifs_download_dir>/fs/cifs/* to <kernel_download_dir>/fs/cifs | ||
34 | |||
35 | 2) make menuconfig (or make xconfig) | ||
36 | 3) select cifs from within the network filesystem choices | ||
37 | 4) save and exit | ||
38 | 5) make dep | ||
39 | 6) make modules (or "make" if CIFS VFS not to be built as a module) | ||
40 | |||
41 | For Linux 2.6: | ||
42 | 1) Download the kernel (e.g. from http://www.kernel.org) | 27 | 1) Download the kernel (e.g. from http://www.kernel.org) |
43 | and change directory into the top of the kernel directory tree | 28 | and change directory into the top of the kernel directory tree |
44 | (e.g. /usr/src/linux-2.5.73) | 29 | (e.g. /usr/src/linux-2.5.73) |
@@ -61,16 +46,13 @@ would simply type "make install"). | |||
61 | If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on | 46 | If you do not have the utility mount.cifs (in the Samba 3.0 source tree and on |
62 | the CIFS VFS web site) copy it to the same directory in which mount.smbfs and | 47 | the CIFS VFS web site) copy it to the same directory in which mount.smbfs and |
63 | similar files reside (usually /sbin). Although the helper software is not | 48 | similar files reside (usually /sbin). Although the helper software is not |
64 | required, mount.cifs is recommended. Eventually the Samba 3.0 utility program | 49 | required, mount.cifs is recommended. Most distros include a "cifs-utils" |
65 | "net" may also be helpful since it may someday provide easier mount syntax for | 50 | package that includes this utility so it is recommended to install this. |
66 | users who are used to Windows e.g. | 51 | |
67 | net use <mount point> <UNC name or cifs URL> | ||
68 | Note that running the Winbind pam/nss module (logon service) on all of your | 52 | Note that running the Winbind pam/nss module (logon service) on all of your |
69 | Linux clients is useful in mapping Uids and Gids consistently across the | 53 | Linux clients is useful in mapping Uids and Gids consistently across the |
70 | domain to the proper network user. The mount.cifs mount helper can be | 54 | domain to the proper network user. The mount.cifs mount helper can be |
71 | trivially built from Samba 3.0 or later source e.g. by executing: | 55 | found at cifs-utils.git on git.samba.org |
72 | |||
73 | gcc samba/source/client/mount.cifs.c -o mount.cifs | ||
74 | 56 | ||
75 | If cifs is built as a module, then the size and number of network buffers | 57 | If cifs is built as a module, then the size and number of network buffers |
76 | and maximum number of simultaneous requests to one server can be configured. | 58 | and maximum number of simultaneous requests to one server can be configured. |
@@ -79,6 +61,18 @@ Changing these from their defaults is not recommended. By executing modinfo | |||
79 | on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made | 61 | on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made |
80 | at module initialization time (by running insmod cifs.ko) can be seen. | 62 | at module initialization time (by running insmod cifs.ko) can be seen. |
81 | 63 | ||
64 | Recommendations | ||
65 | =============== | ||
66 | To improve security the SMB2.1 dialect or later (usually will get SMB3) is now | ||
67 | the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0" | ||
68 | on mount (or vers=2.0 for Windows Vista). Note that the CIFS (vers=1.0) is | ||
69 | much older and less secure than the default dialect SMB3 which includes | ||
70 | many advanced security features such as downgrade attack detection | ||
71 | and encrypted shares and stronger signing and authentication algorithms. | ||
72 | There are additional mount options that may be helpful for SMB3 to get | ||
73 | improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1): | ||
74 | "mfsymlinks" and "cifsacl" and "idsfromsid" | ||
75 | |||
82 | Allowing User Mounts | 76 | Allowing User Mounts |
83 | ==================== | 77 | ==================== |
84 | To permit users to mount and unmount over directories they own is possible | 78 | To permit users to mount and unmount over directories they own is possible |
@@ -98,9 +92,7 @@ and execution of suid programs on the remote target would be enabled | |||
98 | by default. This can be changed, as with nfs and other filesystems, | 92 | by default. This can be changed, as with nfs and other filesystems, |
99 | by simply specifying "nosuid" among the mount options. For user mounts | 93 | by simply specifying "nosuid" among the mount options. For user mounts |
100 | though to be able to pass the suid flag to mount requires rebuilding | 94 | though to be able to pass the suid flag to mount requires rebuilding |
101 | mount.cifs with the following flag: | 95 | mount.cifs with the following flag: CIFS_ALLOW_USR_SUID |
102 | |||
103 | gcc samba/source/client/mount.cifs.c -DCIFS_ALLOW_USR_SUID -o mount.cifs | ||
104 | 96 | ||
105 | There is a corresponding manual page for cifs mounting in the Samba 3.0 and | 97 | There is a corresponding manual page for cifs mounting in the Samba 3.0 and |
106 | later source tree in docs/manpages/mount.cifs.8 | 98 | later source tree in docs/manpages/mount.cifs.8 |
@@ -189,18 +181,18 @@ applications running on the same server as Samba. | |||
189 | Use instructions: | 181 | Use instructions: |
190 | ================ | 182 | ================ |
191 | Once the CIFS VFS support is built into the kernel or installed as a module | 183 | Once the CIFS VFS support is built into the kernel or installed as a module |
192 | (cifs.o), you can use mount syntax like the following to access Samba or Windows | 184 | (cifs.ko), you can use mount syntax like the following to access Samba or |
193 | servers: | 185 | Mac or Windows servers: |
194 | 186 | ||
195 | mount -t cifs //9.53.216.11/e$ /mnt -o user=myname,pass=mypassword | 187 | mount -t cifs //9.53.216.11/e$ /mnt -o username=myname,password=mypassword |
196 | 188 | ||
197 | Before -o the option -v may be specified to make the mount.cifs | 189 | Before -o the option -v may be specified to make the mount.cifs |
198 | mount helper display the mount steps more verbosely. | 190 | mount helper display the mount steps more verbosely. |
199 | After -o the following commonly used cifs vfs specific options | 191 | After -o the following commonly used cifs vfs specific options |
200 | are supported: | 192 | are supported: |
201 | 193 | ||
202 | user=<username> | 194 | username=<username> |
203 | pass=<password> | 195 | password=<password> |
204 | domain=<domain name> | 196 | domain=<domain name> |
205 | 197 | ||
206 | Other cifs mount options are described below. Use of TCP names (in addition to | 198 | Other cifs mount options are described below. Use of TCP names (in addition to |
@@ -246,13 +238,16 @@ the Server's registry. Samba starting with version 3.10 will allow such | |||
246 | filenames (ie those which contain valid Linux characters, which normally | 238 | filenames (ie those which contain valid Linux characters, which normally |
247 | would be forbidden for Windows/CIFS semantics) as long as the server is | 239 | would be forbidden for Windows/CIFS semantics) as long as the server is |
248 | configured for Unix Extensions (and the client has not disabled | 240 | configured for Unix Extensions (and the client has not disabled |
249 | /proc/fs/cifs/LinuxExtensionsEnabled). | 241 | /proc/fs/cifs/LinuxExtensionsEnabled). In addition the mount option |
250 | 242 | "mapposix" can be used on CIFS (vers=1.0) to force the mapping of | |
243 | illegal Windows/NTFS/SMB characters to a remap range (this mount parm | ||
244 | is the default for SMB3). This remap ("mapposix") range is also | ||
245 | compatible with Mac (and "Services for Mac" on some older Windows). | ||
251 | 246 | ||
252 | CIFS VFS Mount Options | 247 | CIFS VFS Mount Options |
253 | ====================== | 248 | ====================== |
254 | A partial list of the supported mount options follows: | 249 | A partial list of the supported mount options follows: |
255 | user The user name to use when trying to establish | 250 | username The user name to use when trying to establish |
256 | the CIFS session. | 251 | the CIFS session. |
257 | password The user password. If the mount helper is | 252 | password The user password. If the mount helper is |
258 | installed, the user will be prompted for password | 253 | installed, the user will be prompted for password |
diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO index 066ffddc3964..396ecfd6ff4a 100644 --- a/Documentation/filesystems/cifs/TODO +++ b/Documentation/filesystems/cifs/TODO | |||
@@ -1,4 +1,4 @@ | |||
1 | Version 2.03 August 1, 2014 | 1 | Version 2.04 September 13, 2017 |
2 | 2 | ||
3 | A Partial List of Missing Features | 3 | A Partial List of Missing Features |
4 | ================================== | 4 | ================================== |
@@ -8,73 +8,69 @@ for visible, important contributions to this module. Here | |||
8 | is a partial list of the known problems and missing features: | 8 | is a partial list of the known problems and missing features: |
9 | 9 | ||
10 | a) SMB3 (and SMB3.02) missing optional features: | 10 | a) SMB3 (and SMB3.02) missing optional features: |
11 | - RDMA | 11 | - RDMA (started) |
12 | - multichannel (started) | 12 | - multichannel (started) |
13 | - directory leases (improved metadata caching) | 13 | - directory leases (improved metadata caching) |
14 | - T10 copy offload (copy chunk is only mechanism supported) | 14 | - T10 copy offload (copy chunk is only mechanism supported) |
15 | - encrypted shares | ||
16 | 15 | ||
17 | b) improved sparse file support | 16 | b) improved sparse file support |
18 | 17 | ||
19 | c) Directory entry caching relies on a 1 second timer, rather than | 18 | c) Directory entry caching relies on a 1 second timer, rather than |
20 | using FindNotify or equivalent. - (started) | 19 | using Directory Leases |
21 | 20 | ||
22 | d) quota support (needs minor kernel change since quota calls | 21 | d) quota support (needs minor kernel change since quota calls |
23 | to make it to network filesystems or deviceless filesystems) | 22 | to make it to network filesystems or deviceless filesystems) |
24 | 23 | ||
25 | e) improve support for very old servers (OS/2 and Win9x for example) | 24 | e) Better optimize open to reduce redundant opens (using reference |
26 | Including support for changing the time remotely (utimes command). | 25 | counts more) and to improve use of compounding in SMB3 to reduce |
26 | number of roundtrips. | ||
27 | 27 | ||
28 | f) hook lower into the sockets api (as NFS/SunRPC does) to avoid the | 28 | f) Finish inotify support so kde and gnome file list windows |
29 | extra copy in/out of the socket buffers in some cases. | ||
30 | |||
31 | g) Better optimize open (and pathbased setfilesize) to reduce the | ||
32 | oplock breaks coming from windows srv. Piggyback identical file | ||
33 | opens on top of each other by incrementing reference count rather | ||
34 | than resending (helps reduce server resource utilization and avoid | ||
35 | spurious oplock breaks). | ||
36 | |||
37 | h) Add support for storing symlink info to Windows servers | ||
38 | in the Extended Attribute format their SFU clients would recognize. | ||
39 | |||
40 | i) Finish inotify support so kde and gnome file list windows | ||
41 | will autorefresh (partially complete by Asser). Needs minor kernel | 29 | will autorefresh (partially complete by Asser). Needs minor kernel |
42 | vfs change to support removing D_NOTIFY on a file. | 30 | vfs change to support removing D_NOTIFY on a file. |
43 | 31 | ||
44 | j) Add GUI tool to configure /proc/fs/cifs settings and for display of | 32 | g) Add GUI tool to configure /proc/fs/cifs settings and for display of |
45 | the CIFS statistics (started) | 33 | the CIFS statistics (started) |
46 | 34 | ||
47 | k) implement support for security and trusted categories of xattrs | 35 | h) implement support for security and trusted categories of xattrs |
48 | (requires minor protocol extension) to enable better support for SELINUX | 36 | (requires minor protocol extension) to enable better support for SELINUX |
49 | 37 | ||
50 | l) Implement O_DIRECT flag on open (already supported on mount) | 38 | i) Implement O_DIRECT flag on open (already supported on mount) |
51 | 39 | ||
52 | m) Create UID mapping facility so server UIDs can be mapped on a per | 40 | j) Create UID mapping facility so server UIDs can be mapped on a per |
53 | mount or a per server basis to client UIDs or nobody if no mapping | 41 | mount or a per server basis to client UIDs or nobody if no mapping |
54 | exists. This is helpful when Unix extensions are negotiated to | 42 | exists. Also better integration with winbind for resolving SID owners |
55 | allow better permission checking when UIDs differ on the server | 43 | |
56 | and client. Add new protocol request to the CIFS protocol | 44 | k) Add tools to take advantage of more smb3 specific ioctls and features |
57 | standard for asking the server for the corresponding name of a | 45 | |
58 | particular uid. | 46 | l) encrypted file support |
47 | |||
48 | m) improved stats gathering, tools (perhaps integration with nfsometer?) | ||
59 | 49 | ||
60 | n) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for this too) | 50 | n) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed |
51 | file attribute via chflags) and improve user space tools for managing and | ||
52 | viewing them. | ||
61 | 53 | ||
62 | o) mount check for unmatched uids | 54 | o) mount helper GUI (to simplify the various configuration options on mount) |
63 | 55 | ||
64 | p) Add support for new vfs entry point for fallocate | 56 | p) autonegotiation of dialects (offering more than one dialect ie SMB3.02, |
57 | SMB3, SMB2.1 not just SMB3). | ||
65 | 58 | ||
66 | q) Add tools to take advantage of cifs/smb3 specific ioctls and features | 59 | q) Allow mount.cifs to be more verbose in reporting errors with dialect |
67 | such as "CopyChunk" (fast server side file copy) | 60 | or unsupported feature errors. |
68 | 61 | ||
69 | r) encrypted file support | 62 | r) updating cifs documentation, and user guid. |
70 | 63 | ||
71 | s) improved stats gathering, tools (perhaps integration with nfsometer?) | 64 | s) Addressing bugs found by running a broader set of xfstests in standard |
65 | file system xfstest suite. | ||
72 | 66 | ||
73 | t) allow setting more NTFS/SMB3 file attributes remotely (currently limited to compressed | 67 | t) split cifs and smb3 support into separate modules so legacy (and less |
74 | file attribute via chflags) | 68 | secure) CIFS dialect can be disabled in environments that don't need it |
69 | and simplify the code. | ||
75 | 70 | ||
76 | u) mount helper GUI (to simplify the various configuration options on mount) | 71 | u) Finish up SMB3.1.1 dialect support |
77 | 72 | ||
73 | v) POSIX Extensions for SMB3.1.1 | ||
78 | 74 | ||
79 | KNOWN BUGS | 75 | KNOWN BUGS |
80 | ==================================== | 76 | ==================================== |
diff --git a/Documentation/filesystems/cifs/cifs.txt b/Documentation/filesystems/cifs/cifs.txt index 2fac91ac96cf..67756607246e 100644 --- a/Documentation/filesystems/cifs/cifs.txt +++ b/Documentation/filesystems/cifs/cifs.txt | |||
@@ -1,24 +1,28 @@ | |||
1 | This is the client VFS module for the Common Internet File System | 1 | This is the client VFS module for the SMB3 NAS protocol as well |
2 | (CIFS) protocol which is the successor to the Server Message Block | 2 | older dialects such as the Common Internet File System (CIFS) |
3 | protocol which was the successor to the Server Message Block | ||
3 | (SMB) protocol, the native file sharing mechanism for most early | 4 | (SMB) protocol, the native file sharing mechanism for most early |
4 | PC operating systems. New and improved versions of CIFS are now | 5 | PC operating systems. New and improved versions of CIFS are now |
5 | called SMB2 and SMB3. These dialects are also supported by the | 6 | called SMB2 and SMB3. These dialects are also supported by the |
6 | CIFS VFS module. CIFS is fully supported by network | 7 | CIFS VFS module. CIFS is fully supported by network |
7 | file servers such as Windows 2000, 2003, 2008 and 2012 | 8 | file servers such as Windows 2000, 2003, 2008, 2012 and 2016 |
8 | as well by Samba (which provides excellent CIFS | 9 | as well by Samba (which provides excellent CIFS |
9 | server support for Linux and many other operating systems), so | 10 | server support for Linux and many other operating systems), Apple |
11 | systems, as well as most Network Attached Storage vendors, so | ||
10 | this network filesystem client can mount to a wide variety of | 12 | this network filesystem client can mount to a wide variety of |
11 | servers. | 13 | servers. |
12 | 14 | ||
13 | The intent of this module is to provide the most advanced network | 15 | The intent of this module is to provide the most advanced network |
14 | file system function for CIFS compliant servers, including better | 16 | file system function for SMB3 compliant servers, including advanced |
15 | POSIX compliance, secure per-user session establishment, high | 17 | security features, excellent parallelized high performance i/o, better |
16 | performance safe distributed caching (oplock), optional packet | 18 | POSIX compliance, secure per-user session establishment, encryption, |
19 | high performance safe distributed caching (leases/oplocks), optional packet | ||
17 | signing, large files, Unicode support and other internationalization | 20 | signing, large files, Unicode support and other internationalization |
18 | improvements. Since both Samba server and this filesystem client support | 21 | improvements. Since both Samba server and this filesystem client support |
19 | the CIFS Unix extensions, the combination can provide a reasonable | 22 | the CIFS Unix extensions (and in the future SMB3 POSIX extensions), |
20 | alternative to NFSv4 for fileserving in some Linux to Linux environments, | 23 | the combination can provide a reasonable alternative to other network and |
21 | not just in Linux to Windows environments. | 24 | cluster file systems for fileserving in some Linux to Linux environments, |
25 | not just in Linux to Windows (or Linux to Mac) environments. | ||
22 | 26 | ||
23 | This filesystem has an mount utility (mount.cifs) that can be obtained from | 27 | This filesystem has an mount utility (mount.cifs) that can be obtained from |
24 | 28 | ||
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 36f528a7fdd6..8caa60734647 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt | |||
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is | |||
210 | beneath or above the path of another overlay lower layer path. | 210 | beneath or above the path of another overlay lower layer path. |
211 | 211 | ||
212 | Using an upper layer path and/or a workdir path that are already used by | 212 | Using an upper layer path and/or a workdir path that are already used by |
213 | another overlay mount is not allowed and will fail with EBUSY. Using | 213 | another overlay mount is not allowed and may fail with EBUSY. Using |
214 | partially overlapping paths is not allowed but will not fail with EBUSY. | 214 | partially overlapping paths is not allowed but will not fail with EBUSY. |
215 | If files are accessed from two overlayfs mounts which share or overlap the | ||
216 | upper layer and/or workdir path the behavior of the overlay is undefined, | ||
217 | though it will not result in a crash or deadlock. | ||
215 | 218 | ||
216 | Mounting an overlay using an upper layer path, where the upper layer path | 219 | Mounting an overlay using an upper layer path, where the upper layer path |
217 | was previously used by another mounted overlay in combination with a | 220 | was previously used by another mounted overlay in combination with a |
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 24da7b32c489..9a3658cc399e 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt | |||
@@ -366,7 +366,8 @@ struct driver_attribute { | |||
366 | 366 | ||
367 | Declaring: | 367 | Declaring: |
368 | 368 | ||
369 | DRIVER_ATTR(_name, _mode, _show, _store) | 369 | DRIVER_ATTR_RO(_name) |
370 | DRIVER_ATTR_RW(_name) | ||
370 | 371 | ||
371 | Creation/Removal: | 372 | Creation/Removal: |
372 | 373 | ||
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 0500193434cb..d47702456926 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
@@ -36,6 +36,7 @@ Supported adapters: | |||
36 | * Intel Gemini Lake (SOC) | 36 | * Intel Gemini Lake (SOC) |
37 | * Intel Cannon Lake-H (PCH) | 37 | * Intel Cannon Lake-H (PCH) |
38 | * Intel Cannon Lake-LP (PCH) | 38 | * Intel Cannon Lake-LP (PCH) |
39 | * Intel Cedar Fork (PCH) | ||
39 | Datasheets: Publicly available at the Intel website | 40 | Datasheets: Publicly available at the Intel website |
40 | 41 | ||
41 | On Intel Patsburg and later chipsets, both the normal host SMBus controller | 42 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 789b74dbe1d9..87814859cfc2 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -337,7 +337,7 @@ Examples for low-level BPF: | |||
337 | jeq #14, good /* __NR_rt_sigprocmask */ | 337 | jeq #14, good /* __NR_rt_sigprocmask */ |
338 | jeq #13, good /* __NR_rt_sigaction */ | 338 | jeq #13, good /* __NR_rt_sigaction */ |
339 | jeq #35, good /* __NR_nanosleep */ | 339 | jeq #35, good /* __NR_nanosleep */ |
340 | bad: ret #0 /* SECCOMP_RET_KILL */ | 340 | bad: ret #0 /* SECCOMP_RET_KILL_THREAD */ |
341 | good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */ | 341 | good: ret #0x7fff0000 /* SECCOMP_RET_ALLOW */ |
342 | 342 | ||
343 | The above example code can be placed into a file (here called "foo"), and | 343 | The above example code can be placed into a file (here called "foo"), and |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index b3345d0fe0a6..77f4de59dc9c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -1680,6 +1680,9 @@ accept_dad - INTEGER | |||
1680 | 2: Enable DAD, and disable IPv6 operation if MAC-based duplicate | 1680 | 2: Enable DAD, and disable IPv6 operation if MAC-based duplicate |
1681 | link-local address has been found. | 1681 | link-local address has been found. |
1682 | 1682 | ||
1683 | DAD operation and mode on a given interface will be selected according | ||
1684 | to the maximum value of conf/{all,interface}/accept_dad. | ||
1685 | |||
1683 | force_tllao - BOOLEAN | 1686 | force_tllao - BOOLEAN |
1684 | Enable sending the target link-layer address option even when | 1687 | Enable sending the target link-layer address option even when |
1685 | responding to a unicast neighbor solicitation. | 1688 | responding to a unicast neighbor solicitation. |
@@ -1727,16 +1730,23 @@ suppress_frag_ndisc - INTEGER | |||
1727 | 1730 | ||
1728 | optimistic_dad - BOOLEAN | 1731 | optimistic_dad - BOOLEAN |
1729 | Whether to perform Optimistic Duplicate Address Detection (RFC 4429). | 1732 | Whether to perform Optimistic Duplicate Address Detection (RFC 4429). |
1730 | 0: disabled (default) | 1733 | 0: disabled (default) |
1731 | 1: enabled | 1734 | 1: enabled |
1735 | |||
1736 | Optimistic Duplicate Address Detection for the interface will be enabled | ||
1737 | if at least one of conf/{all,interface}/optimistic_dad is set to 1, | ||
1738 | it will be disabled otherwise. | ||
1732 | 1739 | ||
1733 | use_optimistic - BOOLEAN | 1740 | use_optimistic - BOOLEAN |
1734 | If enabled, do not classify optimistic addresses as deprecated during | 1741 | If enabled, do not classify optimistic addresses as deprecated during |
1735 | source address selection. Preferred addresses will still be chosen | 1742 | source address selection. Preferred addresses will still be chosen |
1736 | before optimistic addresses, subject to other ranking in the source | 1743 | before optimistic addresses, subject to other ranking in the source |
1737 | address selection algorithm. | 1744 | address selection algorithm. |
1738 | 0: disabled (default) | 1745 | 0: disabled (default) |
1739 | 1: enabled | 1746 | 1: enabled |
1747 | |||
1748 | This will be enabled if at least one of | ||
1749 | conf/{all,interface}/use_optimistic is set to 1, disabled otherwise. | ||
1740 | 1750 | ||
1741 | stable_secret - IPv6 address | 1751 | stable_secret - IPv6 address |
1742 | This IPv6 address will be used as a secret to generate IPv6 | 1752 | This IPv6 address will be used as a secret to generate IPv6 |
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 5e40e1f68873..82236a17b5e6 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt | |||
@@ -13,42 +13,42 @@ an example setup using a data-center-class switch ASIC chip. Other setups | |||
13 | with SR-IOV or soft switches, such as OVS, are possible. | 13 | with SR-IOV or soft switches, such as OVS, are possible. |
14 | 14 | ||
15 | 15 | ||
16 | User-space tools | 16 | User-space tools |
17 | 17 | ||
18 | user space | | 18 | user space | |
19 | +-------------------------------------------------------------------+ | 19 | +-------------------------------------------------------------------+ |
20 | kernel | Netlink | 20 | kernel | Netlink |
21 | | | 21 | | |
22 | +--------------+-------------------------------+ | 22 | +--------------+-------------------------------+ |
23 | | Network stack | | 23 | | Network stack | |
24 | | (Linux) | | 24 | | (Linux) | |
25 | | | | 25 | | | |
26 | +----------------------------------------------+ | 26 | +----------------------------------------------+ |
27 | 27 | ||
28 | sw1p2 sw1p4 sw1p6 | 28 | sw1p2 sw1p4 sw1p6 |
29 | sw1p1 + sw1p3 + sw1p5 + eth1 | 29 | sw1p1 + sw1p3 + sw1p5 + eth1 |
30 | + | + | + | + | 30 | + | + | + | + |
31 | | | | | | | | | 31 | | | | | | | | |
32 | +--+----+----+----+-+--+----+---+ +-----+-----+ | 32 | +--+----+----+----+----+----+---+ +-----+-----+ |
33 | | Switch driver | | mgmt | | 33 | | Switch driver | | mgmt | |
34 | | (this document) | | driver | | 34 | | (this document) | | driver | |
35 | | | | | | 35 | | | | | |
36 | +--------------+----------------+ +-----------+ | 36 | +--------------+----------------+ +-----------+ |
37 | | | 37 | | |
38 | kernel | HW bus (eg PCI) | 38 | kernel | HW bus (eg PCI) |
39 | +-------------------------------------------------------------------+ | 39 | +-------------------------------------------------------------------+ |
40 | hardware | | 40 | hardware | |
41 | +--------------+---+------------+ | 41 | +--------------+----------------+ |
42 | | Switch device (sw1) | | 42 | | Switch device (sw1) | |
43 | | +----+ +--------+ | 43 | | +----+ +--------+ |
44 | | | v offloaded data path | mgmt port | 44 | | | v offloaded data path | mgmt port |
45 | | | | | | 45 | | | | | |
46 | +--|----|----+----+----+----+---+ | 46 | +--|----|----+----+----+----+---+ |
47 | | | | | | | | 47 | | | | | | | |
48 | + + + + + + | 48 | + + + + + + |
49 | p1 p2 p3 p4 p5 p6 | 49 | p1 p2 p3 p4 p5 p6 |
50 | 50 | ||
51 | front-panel ports | 51 | front-panel ports |
52 | 52 | ||
53 | 53 | ||
54 | Fig 1. | 54 | Fig 1. |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index ce61d1fe08ca..694968c7523c 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel: | |||
75 | - reboot-cmd [ SPARC only ] | 75 | - reboot-cmd [ SPARC only ] |
76 | - rtsig-max | 76 | - rtsig-max |
77 | - rtsig-nr | 77 | - rtsig-nr |
78 | - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst | ||
78 | - sem | 79 | - sem |
79 | - sem_next_id [ sysv ipc ] | 80 | - sem_next_id [ sysv ipc ] |
80 | - sg-big-buff [ generic SCSI device (sg) ] | 81 | - sg-big-buff [ generic SCSI device (sg) ] |
diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index f71eb5ef1f2d..099c412951d6 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst | |||
@@ -87,11 +87,16 @@ Return values | |||
87 | A seccomp filter may return any of the following values. If multiple | 87 | A seccomp filter may return any of the following values. If multiple |
88 | filters exist, the return value for the evaluation of a given system | 88 | filters exist, the return value for the evaluation of a given system |
89 | call will always use the highest precedent value. (For example, | 89 | call will always use the highest precedent value. (For example, |
90 | ``SECCOMP_RET_KILL`` will always take precedence.) | 90 | ``SECCOMP_RET_KILL_PROCESS`` will always take precedence.) |
91 | 91 | ||
92 | In precedence order, they are: | 92 | In precedence order, they are: |
93 | 93 | ||
94 | ``SECCOMP_RET_KILL``: | 94 | ``SECCOMP_RET_KILL_PROCESS``: |
95 | Results in the entire process exiting immediately without executing | ||
96 | the system call. The exit status of the task (``status & 0x7f``) | ||
97 | will be ``SIGSYS``, not ``SIGKILL``. | ||
98 | |||
99 | ``SECCOMP_RET_KILL_THREAD``: | ||
95 | Results in the task exiting immediately without executing the | 100 | Results in the task exiting immediately without executing the |
96 | system call. The exit status of the task (``status & 0x7f``) will | 101 | system call. The exit status of the task (``status & 0x7f``) will |
97 | be ``SIGSYS``, not ``SIGKILL``. | 102 | be ``SIGSYS``, not ``SIGKILL``. |
@@ -141,6 +146,15 @@ In precedence order, they are: | |||
141 | allow use of ptrace, even of other sandboxed processes, without | 146 | allow use of ptrace, even of other sandboxed processes, without |
142 | extreme care; ptracers can use this mechanism to escape.) | 147 | extreme care; ptracers can use this mechanism to escape.) |
143 | 148 | ||
149 | ``SECCOMP_RET_LOG``: | ||
150 | Results in the system call being executed after it is logged. This | ||
151 | should be used by application developers to learn which syscalls their | ||
152 | application needs without having to iterate through multiple test and | ||
153 | development cycles to build the list. | ||
154 | |||
155 | This action will only be logged if "log" is present in the | ||
156 | actions_logged sysctl string. | ||
157 | |||
144 | ``SECCOMP_RET_ALLOW``: | 158 | ``SECCOMP_RET_ALLOW``: |
145 | Results in the system call being executed. | 159 | Results in the system call being executed. |
146 | 160 | ||
@@ -169,7 +183,41 @@ The ``samples/seccomp/`` directory contains both an x86-specific example | |||
169 | and a more generic example of a higher level macro interface for BPF | 183 | and a more generic example of a higher level macro interface for BPF |
170 | program generation. | 184 | program generation. |
171 | 185 | ||
186 | Sysctls | ||
187 | ======= | ||
172 | 188 | ||
189 | Seccomp's sysctl files can be found in the ``/proc/sys/kernel/seccomp/`` | ||
190 | directory. Here's a description of each file in that directory: | ||
191 | |||
192 | ``actions_avail``: | ||
193 | A read-only ordered list of seccomp return values (refer to the | ||
194 | ``SECCOMP_RET_*`` macros above) in string form. The ordering, from | ||
195 | left-to-right, is the least permissive return value to the most | ||
196 | permissive return value. | ||
197 | |||
198 | The list represents the set of seccomp return values supported | ||
199 | by the kernel. A userspace program may use this list to | ||
200 | determine if the actions found in the ``seccomp.h``, when the | ||
201 | program was built, differs from the set of actions actually | ||
202 | supported in the current running kernel. | ||
203 | |||
204 | ``actions_logged``: | ||
205 | A read-write ordered list of seccomp return values (refer to the | ||
206 | ``SECCOMP_RET_*`` macros above) that are allowed to be logged. Writes | ||
207 | to the file do not need to be in ordered form but reads from the file | ||
208 | will be ordered in the same way as the actions_avail sysctl. | ||
209 | |||
210 | It is important to note that the value of ``actions_logged`` does not | ||
211 | prevent certain actions from being logged when the audit subsystem is | ||
212 | configured to audit a task. If the action is not found in | ||
213 | ``actions_logged`` list, the final decision on whether to audit the | ||
214 | action for that task is ultimately left up to the audit subsystem to | ||
215 | decide for all seccomp return values other than ``SECCOMP_RET_ALLOW``. | ||
216 | |||
217 | The ``allow`` string is not accepted in the ``actions_logged`` sysctl | ||
218 | as it is not possible to log ``SECCOMP_RET_ALLOW`` actions. Attempting | ||
219 | to write ``allow`` to the sysctl will result in an EINVAL being | ||
220 | returned. | ||
173 | 221 | ||
174 | Adding architecture support | 222 | Adding architecture support |
175 | =========================== | 223 | =========================== |
diff --git a/MAINTAINERS b/MAINTAINERS index 2281af4b41b6..2d3d750b19c0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -352,6 +352,18 @@ L: linux-acpi@vger.kernel.org | |||
352 | S: Maintained | 352 | S: Maintained |
353 | F: drivers/acpi/arm64 | 353 | F: drivers/acpi/arm64 |
354 | 354 | ||
355 | ACPI PMIC DRIVERS | ||
356 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> | ||
357 | M: Len Brown <lenb@kernel.org> | ||
358 | R: Andy Shevchenko <andy@infradead.org> | ||
359 | R: Mika Westerberg <mika.westerberg@linux.intel.com> | ||
360 | L: linux-acpi@vger.kernel.org | ||
361 | Q: https://patchwork.kernel.org/project/linux-acpi/list/ | ||
362 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm | ||
363 | B: https://bugzilla.kernel.org | ||
364 | S: Supported | ||
365 | F: drivers/acpi/pmic/ | ||
366 | |||
355 | ACPI THERMAL DRIVER | 367 | ACPI THERMAL DRIVER |
356 | M: Zhang Rui <rui.zhang@intel.com> | 368 | M: Zhang Rui <rui.zhang@intel.com> |
357 | L: linux-acpi@vger.kernel.org | 369 | L: linux-acpi@vger.kernel.org |
@@ -2853,7 +2865,6 @@ S: Supported | |||
2853 | F: drivers/scsi/bnx2i/ | 2865 | F: drivers/scsi/bnx2i/ |
2854 | 2866 | ||
2855 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | 2867 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER |
2856 | M: Yuval Mintz <Yuval.Mintz@cavium.com> | ||
2857 | M: Ariel Elior <ariel.elior@cavium.com> | 2868 | M: Ariel Elior <ariel.elior@cavium.com> |
2858 | M: everest-linux-l2@cavium.com | 2869 | M: everest-linux-l2@cavium.com |
2859 | L: netdev@vger.kernel.org | 2870 | L: netdev@vger.kernel.org |
@@ -5248,7 +5259,8 @@ S: Maintained | |||
5248 | F: drivers/iommu/exynos-iommu.c | 5259 | F: drivers/iommu/exynos-iommu.c |
5249 | 5260 | ||
5250 | EZchip NPS platform support | 5261 | EZchip NPS platform support |
5251 | M: Noam Camus <noamc@ezchip.com> | 5262 | M: Elad Kanfi <eladkan@mellanox.com> |
5263 | M: Vineet Gupta <vgupta@synopsys.com> | ||
5252 | S: Supported | 5264 | S: Supported |
5253 | F: arch/arc/plat-eznps | 5265 | F: arch/arc/plat-eznps |
5254 | F: arch/arc/boot/dts/eznps.dts | 5266 | F: arch/arc/boot/dts/eznps.dts |
@@ -6643,8 +6655,8 @@ M: Alexander Aring <alex.aring@gmail.com> | |||
6643 | M: Stefan Schmidt <stefan@osg.samsung.com> | 6655 | M: Stefan Schmidt <stefan@osg.samsung.com> |
6644 | L: linux-wpan@vger.kernel.org | 6656 | L: linux-wpan@vger.kernel.org |
6645 | W: http://wpan.cakelab.org/ | 6657 | W: http://wpan.cakelab.org/ |
6646 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git | 6658 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git |
6647 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git | 6659 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git |
6648 | S: Maintained | 6660 | S: Maintained |
6649 | F: net/ieee802154/ | 6661 | F: net/ieee802154/ |
6650 | F: net/mac802154/ | 6662 | F: net/mac802154/ |
@@ -6727,7 +6739,7 @@ F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt | |||
6727 | F: drivers/auxdisplay/img-ascii-lcd.c | 6739 | F: drivers/auxdisplay/img-ascii-lcd.c |
6728 | 6740 | ||
6729 | IMGTEC IR DECODER DRIVER | 6741 | IMGTEC IR DECODER DRIVER |
6730 | M: James Hogan <james.hogan@imgtec.com> | 6742 | M: James Hogan <jhogan@kernel.org> |
6731 | S: Maintained | 6743 | S: Maintained |
6732 | F: drivers/media/rc/img-ir/ | 6744 | F: drivers/media/rc/img-ir/ |
6733 | 6745 | ||
@@ -7551,7 +7563,7 @@ F: arch/arm64/include/asm/kvm* | |||
7551 | F: arch/arm64/kvm/ | 7563 | F: arch/arm64/kvm/ |
7552 | 7564 | ||
7553 | KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) | 7565 | KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) |
7554 | M: James Hogan <james.hogan@imgtec.com> | 7566 | M: James Hogan <jhogan@kernel.org> |
7555 | L: linux-mips@linux-mips.org | 7567 | L: linux-mips@linux-mips.org |
7556 | S: Supported | 7568 | S: Supported |
7557 | F: arch/mips/include/uapi/asm/kvm* | 7569 | F: arch/mips/include/uapi/asm/kvm* |
@@ -8253,6 +8265,12 @@ L: libertas-dev@lists.infradead.org | |||
8253 | S: Orphan | 8265 | S: Orphan |
8254 | F: drivers/net/wireless/marvell/libertas/ | 8266 | F: drivers/net/wireless/marvell/libertas/ |
8255 | 8267 | ||
8268 | MARVELL MACCHIATOBIN SUPPORT | ||
8269 | M: Russell King <rmk@armlinux.org.uk> | ||
8270 | L: linux-arm-kernel@lists.infradead.org | ||
8271 | S: Maintained | ||
8272 | F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts | ||
8273 | |||
8256 | MARVELL MV643XX ETHERNET DRIVER | 8274 | MARVELL MV643XX ETHERNET DRIVER |
8257 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 8275 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
8258 | L: netdev@vger.kernel.org | 8276 | L: netdev@vger.kernel.org |
@@ -8586,6 +8604,12 @@ M: Sean Wang <sean.wang@mediatek.com> | |||
8586 | S: Maintained | 8604 | S: Maintained |
8587 | F: drivers/media/rc/mtk-cir.c | 8605 | F: drivers/media/rc/mtk-cir.c |
8588 | 8606 | ||
8607 | MEDIATEK PMIC LED DRIVER | ||
8608 | M: Sean Wang <sean.wang@mediatek.com> | ||
8609 | S: Maintained | ||
8610 | F: drivers/leds/leds-mt6323.c | ||
8611 | F: Documentation/devicetree/bindings/leds/leds-mt6323.txt | ||
8612 | |||
8589 | MEDIATEK ETHERNET DRIVER | 8613 | MEDIATEK ETHERNET DRIVER |
8590 | M: Felix Fietkau <nbd@openwrt.org> | 8614 | M: Felix Fietkau <nbd@openwrt.org> |
8591 | M: John Crispin <john@phrozen.org> | 8615 | M: John Crispin <john@phrozen.org> |
@@ -8868,7 +8892,7 @@ F: Documentation/devicetree/bindings/media/meson-ao-cec.txt | |||
8868 | T: git git://linuxtv.org/media_tree.git | 8892 | T: git git://linuxtv.org/media_tree.git |
8869 | 8893 | ||
8870 | METAG ARCHITECTURE | 8894 | METAG ARCHITECTURE |
8871 | M: James Hogan <james.hogan@imgtec.com> | 8895 | M: James Hogan <jhogan@kernel.org> |
8872 | L: linux-metag@vger.kernel.org | 8896 | L: linux-metag@vger.kernel.org |
8873 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git | 8897 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git |
8874 | S: Odd Fixes | 8898 | S: Odd Fixes |
@@ -9337,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD) | |||
9337 | M: Josef Bacik <jbacik@fb.com> | 9361 | M: Josef Bacik <jbacik@fb.com> |
9338 | S: Maintained | 9362 | S: Maintained |
9339 | L: linux-block@vger.kernel.org | 9363 | L: linux-block@vger.kernel.org |
9340 | L: nbd-general@lists.sourceforge.net | 9364 | L: nbd@other.debian.org |
9341 | F: Documentation/blockdev/nbd.txt | 9365 | F: Documentation/blockdev/nbd.txt |
9342 | F: drivers/block/nbd.c | 9366 | F: drivers/block/nbd.c |
9343 | F: include/uapi/linux/nbd.h | 9367 | F: include/uapi/linux/nbd.h |
@@ -11047,7 +11071,6 @@ S: Supported | |||
11047 | F: drivers/scsi/qedi/ | 11071 | F: drivers/scsi/qedi/ |
11048 | 11072 | ||
11049 | QLOGIC QL4xxx ETHERNET DRIVER | 11073 | QLOGIC QL4xxx ETHERNET DRIVER |
11050 | M: Yuval Mintz <Yuval.Mintz@cavium.com> | ||
11051 | M: Ariel Elior <Ariel.Elior@cavium.com> | 11074 | M: Ariel Elior <Ariel.Elior@cavium.com> |
11052 | M: everest-linux-l2@cavium.com | 11075 | M: everest-linux-l2@cavium.com |
11053 | L: netdev@vger.kernel.org | 11076 | L: netdev@vger.kernel.org |
@@ -12915,9 +12938,9 @@ F: drivers/mmc/host/dw_mmc* | |||
12915 | SYNOPSYS HSDK RESET CONTROLLER DRIVER | 12938 | SYNOPSYS HSDK RESET CONTROLLER DRIVER |
12916 | M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | 12939 | M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> |
12917 | S: Supported | 12940 | S: Supported |
12918 | F: drivers/reset/reset-hsdk-v1.c | 12941 | F: drivers/reset/reset-hsdk.c |
12919 | F: include/dt-bindings/reset/snps,hsdk-v1-reset.h | 12942 | F: include/dt-bindings/reset/snps,hsdk-reset.h |
12920 | F: Documentation/devicetree/bindings/reset/snps,hsdk-v1-reset.txt | 12943 | F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt |
12921 | 12944 | ||
12922 | SYSTEM CONFIGURATION (SYSCON) | 12945 | SYSTEM CONFIGURATION (SYSCON) |
12923 | M: Lee Jones <lee.jones@linaro.org> | 12946 | M: Lee Jones <lee.jones@linaro.org> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -1172,11 +1172,11 @@ headers_check: headers_install | |||
1172 | 1172 | ||
1173 | PHONY += kselftest | 1173 | PHONY += kselftest |
1174 | kselftest: | 1174 | kselftest: |
1175 | $(Q)$(MAKE) -C tools/testing/selftests run_tests | 1175 | $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests |
1176 | 1176 | ||
1177 | PHONY += kselftest-clean | 1177 | PHONY += kselftest-clean |
1178 | kselftest-clean: | 1178 | kselftest-clean: |
1179 | $(Q)$(MAKE) -C tools/testing/selftests clean | 1179 | $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean |
1180 | 1180 | ||
1181 | PHONY += kselftest-merge | 1181 | PHONY += kselftest-merge |
1182 | kselftest-merge: | 1182 | kselftest-merge: |
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 384bd47b5187..45c020a0fe76 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/mm_types.h> | 10 | #include <linux/mm_types.h> |
11 | #include <linux/sched.h> | ||
11 | 12 | ||
12 | #include <asm/machvec.h> | 13 | #include <asm/machvec.h> |
13 | #include <asm/compiler.h> | 14 | #include <asm/compiler.h> |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index a598641eed98..c84e67fdea09 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -24,7 +24,7 @@ config ARC | |||
24 | select GENERIC_SMP_IDLE_THREAD | 24 | select GENERIC_SMP_IDLE_THREAD |
25 | select HAVE_ARCH_KGDB | 25 | select HAVE_ARCH_KGDB |
26 | select HAVE_ARCH_TRACEHOOK | 26 | select HAVE_ARCH_TRACEHOOK |
27 | select HAVE_FUTEX_CMPXCHG | 27 | select HAVE_FUTEX_CMPXCHG if FUTEX |
28 | select HAVE_IOREMAP_PROT | 28 | select HAVE_IOREMAP_PROT |
29 | select HAVE_KPROBES | 29 | select HAVE_KPROBES |
30 | select HAVE_KRETPROBES | 30 | select HAVE_KRETPROBES |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 3a4b52b7e09d..d37f49d6a27f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
@@ -6,8 +6,6 @@ | |||
6 | # published by the Free Software Foundation. | 6 | # published by the Free Software Foundation. |
7 | # | 7 | # |
8 | 8 | ||
9 | UTS_MACHINE := arc | ||
10 | |||
11 | ifeq ($(CROSS_COMPILE),) | 9 | ifeq ($(CROSS_COMPILE),) |
12 | ifndef CONFIG_CPU_BIG_ENDIAN | 10 | ifndef CONFIG_CPU_BIG_ENDIAN |
13 | CROSS_COMPILE := arc-linux- | 11 | CROSS_COMPILE := arc-linux- |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 2367a67c5f10..e114000a84f5 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
@@ -44,7 +44,14 @@ | |||
44 | 44 | ||
45 | mmcclk: mmcclk { | 45 | mmcclk: mmcclk { |
46 | compatible = "fixed-clock"; | 46 | compatible = "fixed-clock"; |
47 | clock-frequency = <50000000>; | 47 | /* |
48 | * DW sdio controller has external ciu clock divider | ||
49 | * controlled via register in SDIO IP. It divides | ||
50 | * sdio_ref_clk (which comes from CGU) by 16 for | ||
51 | * default. So default mmcclk clock (which comes | ||
52 | * to sdk_in) is 25000000 Hz. | ||
53 | */ | ||
54 | clock-frequency = <25000000>; | ||
48 | #clock-cells = <0>; | 55 | #clock-cells = <0>; |
49 | }; | 56 | }; |
50 | 57 | ||
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 229d13adbce4..8adde1b492f1 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
@@ -12,6 +12,7 @@ | |||
12 | /dts-v1/; | 12 | /dts-v1/; |
13 | 13 | ||
14 | #include <dt-bindings/net/ti-dp83867.h> | 14 | #include <dt-bindings/net/ti-dp83867.h> |
15 | #include <dt-bindings/reset/snps,hsdk-reset.h> | ||
15 | 16 | ||
16 | / { | 17 | / { |
17 | model = "snps,hsdk"; | 18 | model = "snps,hsdk"; |
@@ -57,10 +58,10 @@ | |||
57 | }; | 58 | }; |
58 | }; | 59 | }; |
59 | 60 | ||
60 | core_clk: core-clk { | 61 | input_clk: input-clk { |
61 | #clock-cells = <0>; | 62 | #clock-cells = <0>; |
62 | compatible = "fixed-clock"; | 63 | compatible = "fixed-clock"; |
63 | clock-frequency = <500000000>; | 64 | clock-frequency = <33333333>; |
64 | }; | 65 | }; |
65 | 66 | ||
66 | cpu_intc: cpu-interrupt-controller { | 67 | cpu_intc: cpu-interrupt-controller { |
@@ -102,6 +103,19 @@ | |||
102 | 103 | ||
103 | ranges = <0x00000000 0xf0000000 0x10000000>; | 104 | ranges = <0x00000000 0xf0000000 0x10000000>; |
104 | 105 | ||
106 | cgu_rst: reset-controller@8a0 { | ||
107 | compatible = "snps,hsdk-reset"; | ||
108 | #reset-cells = <1>; | ||
109 | reg = <0x8A0 0x4>, <0xFF0 0x4>; | ||
110 | }; | ||
111 | |||
112 | core_clk: core-clk@0 { | ||
113 | compatible = "snps,hsdk-core-pll-clock"; | ||
114 | reg = <0x00 0x10>, <0x14B8 0x4>; | ||
115 | #clock-cells = <0>; | ||
116 | clocks = <&input_clk>; | ||
117 | }; | ||
118 | |||
105 | serial: serial@5000 { | 119 | serial: serial@5000 { |
106 | compatible = "snps,dw-apb-uart"; | 120 | compatible = "snps,dw-apb-uart"; |
107 | reg = <0x5000 0x100>; | 121 | reg = <0x5000 0x100>; |
@@ -120,7 +134,17 @@ | |||
120 | 134 | ||
121 | mmcclk_ciu: mmcclk-ciu { | 135 | mmcclk_ciu: mmcclk-ciu { |
122 | compatible = "fixed-clock"; | 136 | compatible = "fixed-clock"; |
123 | clock-frequency = <100000000>; | 137 | /* |
138 | * DW sdio controller has external ciu clock divider | ||
139 | * controlled via register in SDIO IP. Due to its | ||
140 | * unexpected default value (it should devide by 1 | ||
141 | * but it devides by 8) SDIO IP uses wrong clock and | ||
142 | * works unstable (see STAR 9001204800) | ||
143 | * So add temporary fix and change clock frequency | ||
144 | * from 100000000 to 12500000 Hz until we fix dw sdio | ||
145 | * driver itself. | ||
146 | */ | ||
147 | clock-frequency = <12500000>; | ||
124 | #clock-cells = <0>; | 148 | #clock-cells = <0>; |
125 | }; | 149 | }; |
126 | 150 | ||
@@ -141,6 +165,8 @@ | |||
141 | clocks = <&gmacclk>; | 165 | clocks = <&gmacclk>; |
142 | clock-names = "stmmaceth"; | 166 | clock-names = "stmmaceth"; |
143 | phy-handle = <&phy0>; | 167 | phy-handle = <&phy0>; |
168 | resets = <&cgu_rst HSDK_ETH_RESET>; | ||
169 | reset-names = "stmmaceth"; | ||
144 | 170 | ||
145 | mdio { | 171 | mdio { |
146 | #address-cells = <1>; | 172 | #address-cells = <1>; |
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 6980b966a364..ec7c849a5c8e 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y | |||
105 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 105 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
106 | # CONFIG_ENABLE_MUST_CHECK is not set | 106 | # CONFIG_ENABLE_MUST_CHECK is not set |
107 | CONFIG_STRIP_ASM_SYMS=y | 107 | CONFIG_STRIP_ASM_SYMS=y |
108 | CONFIG_LOCKUP_DETECTOR=y | 108 | CONFIG_SOFTLOCKUP_DETECTOR=y |
109 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 109 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
110 | # CONFIG_SCHED_DEBUG is not set | 110 | # CONFIG_SCHED_DEBUG is not set |
111 | # CONFIG_DEBUG_PREEMPT is not set | 111 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 2233f5777a71..63d3cf69e0b0 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y | |||
104 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 104 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
105 | # CONFIG_ENABLE_MUST_CHECK is not set | 105 | # CONFIG_ENABLE_MUST_CHECK is not set |
106 | CONFIG_STRIP_ASM_SYMS=y | 106 | CONFIG_STRIP_ASM_SYMS=y |
107 | CONFIG_LOCKUP_DETECTOR=y | 107 | CONFIG_SOFTLOCKUP_DETECTOR=y |
108 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 108 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
109 | # CONFIG_SCHED_DEBUG is not set | 109 | # CONFIG_SCHED_DEBUG is not set |
110 | # CONFIG_DEBUG_PREEMPT is not set | 110 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 30a3d4cf53d2..f613ecac14a7 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y | |||
107 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 107 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
108 | # CONFIG_ENABLE_MUST_CHECK is not set | 108 | # CONFIG_ENABLE_MUST_CHECK is not set |
109 | CONFIG_STRIP_ASM_SYMS=y | 109 | CONFIG_STRIP_ASM_SYMS=y |
110 | CONFIG_LOCKUP_DETECTOR=y | 110 | CONFIG_SOFTLOCKUP_DETECTOR=y |
111 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 111 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
112 | # CONFIG_SCHED_DEBUG is not set | 112 | # CONFIG_SCHED_DEBUG is not set |
113 | # CONFIG_DEBUG_PREEMPT is not set | 113 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 821a2e562f3f..3507be2af6fe 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig | |||
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y | |||
84 | CONFIG_NFS_FS=y | 84 | CONFIG_NFS_FS=y |
85 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 85 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
86 | # CONFIG_ENABLE_MUST_CHECK is not set | 86 | # CONFIG_ENABLE_MUST_CHECK is not set |
87 | CONFIG_LOCKUP_DETECTOR=y | 87 | CONFIG_SOFTLOCKUP_DETECTOR=y |
88 | # CONFIG_DEBUG_PREEMPT is not set | 88 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 9a3fcf446388..15f0f6b5fec1 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
@@ -63,6 +63,7 @@ CONFIG_MMC_SDHCI=y | |||
63 | CONFIG_MMC_SDHCI_PLTFM=y | 63 | CONFIG_MMC_SDHCI_PLTFM=y |
64 | CONFIG_MMC_DW=y | 64 | CONFIG_MMC_DW=y |
65 | # CONFIG_IOMMU_SUPPORT is not set | 65 | # CONFIG_IOMMU_SUPPORT is not set |
66 | CONFIG_RESET_HSDK=y | ||
66 | CONFIG_EXT3_FS=y | 67 | CONFIG_EXT3_FS=y |
67 | CONFIG_VFAT_FS=y | 68 | CONFIG_VFAT_FS=y |
68 | CONFIG_TMPFS=y | 69 | CONFIG_TMPFS=y |
@@ -72,7 +73,7 @@ CONFIG_NLS_ISO8859_1=y | |||
72 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 73 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
73 | # CONFIG_ENABLE_MUST_CHECK is not set | 74 | # CONFIG_ENABLE_MUST_CHECK is not set |
74 | CONFIG_STRIP_ASM_SYMS=y | 75 | CONFIG_STRIP_ASM_SYMS=y |
75 | CONFIG_LOCKUP_DETECTOR=y | 76 | CONFIG_SOFTLOCKUP_DETECTOR=y |
76 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 77 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
77 | # CONFIG_SCHED_DEBUG is not set | 78 | # CONFIG_SCHED_DEBUG is not set |
78 | # CONFIG_DEBUG_PREEMPT is not set | 79 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index c0d6a010751a..4fcf4f2503f6 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y | |||
94 | # CONFIG_ENABLE_MUST_CHECK is not set | 94 | # CONFIG_ENABLE_MUST_CHECK is not set |
95 | CONFIG_STRIP_ASM_SYMS=y | 95 | CONFIG_STRIP_ASM_SYMS=y |
96 | CONFIG_DEBUG_SHIRQ=y | 96 | CONFIG_DEBUG_SHIRQ=y |
97 | CONFIG_LOCKUP_DETECTOR=y | 97 | CONFIG_SOFTLOCKUP_DETECTOR=y |
98 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 98 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
99 | # CONFIG_SCHED_DEBUG is not set | 99 | # CONFIG_SCHED_DEBUG is not set |
100 | # CONFIG_DEBUG_PREEMPT is not set | 100 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 5c0971787acf..7b71464f6c2f 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y | |||
98 | # CONFIG_ENABLE_MUST_CHECK is not set | 98 | # CONFIG_ENABLE_MUST_CHECK is not set |
99 | CONFIG_STRIP_ASM_SYMS=y | 99 | CONFIG_STRIP_ASM_SYMS=y |
100 | CONFIG_DEBUG_SHIRQ=y | 100 | CONFIG_DEBUG_SHIRQ=y |
101 | CONFIG_LOCKUP_DETECTOR=y | 101 | CONFIG_SOFTLOCKUP_DETECTOR=y |
102 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 102 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
103 | # CONFIG_SCHED_DEBUG is not set | 103 | # CONFIG_SCHED_DEBUG is not set |
104 | # CONFIG_DEBUG_PREEMPT is not set | 104 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index ba8e802dba80..b1c56d35f2a9 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -98,6 +98,7 @@ | |||
98 | 98 | ||
99 | /* Auxiliary registers */ | 99 | /* Auxiliary registers */ |
100 | #define AUX_IDENTITY 4 | 100 | #define AUX_IDENTITY 4 |
101 | #define AUX_EXEC_CTRL 8 | ||
101 | #define AUX_INTR_VEC_BASE 0x25 | 102 | #define AUX_INTR_VEC_BASE 0x25 |
102 | #define AUX_VOL 0x5e | 103 | #define AUX_VOL 0x5e |
103 | 104 | ||
@@ -135,12 +136,12 @@ struct bcr_identity { | |||
135 | #endif | 136 | #endif |
136 | }; | 137 | }; |
137 | 138 | ||
138 | struct bcr_isa { | 139 | struct bcr_isa_arcv2 { |
139 | #ifdef CONFIG_CPU_BIG_ENDIAN | 140 | #ifdef CONFIG_CPU_BIG_ENDIAN |
140 | unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, | 141 | unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, |
141 | pad1:11, atomic1:1, ver:8; | 142 | pad1:12, ver:8; |
142 | #else | 143 | #else |
143 | unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1, | 144 | unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1, |
144 | ldd:1, pad2:4, div_rem:4; | 145 | ldd:1, pad2:4, div_rem:4; |
145 | #endif | 146 | #endif |
146 | }; | 147 | }; |
@@ -263,13 +264,13 @@ struct cpuinfo_arc { | |||
263 | struct cpuinfo_arc_mmu mmu; | 264 | struct cpuinfo_arc_mmu mmu; |
264 | struct cpuinfo_arc_bpu bpu; | 265 | struct cpuinfo_arc_bpu bpu; |
265 | struct bcr_identity core; | 266 | struct bcr_identity core; |
266 | struct bcr_isa isa; | 267 | struct bcr_isa_arcv2 isa; |
267 | const char *details, *name; | 268 | const char *details, *name; |
268 | unsigned int vec_base; | 269 | unsigned int vec_base; |
269 | struct cpuinfo_arc_ccm iccm, dccm; | 270 | struct cpuinfo_arc_ccm iccm, dccm; |
270 | struct { | 271 | struct { |
271 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, | 272 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, |
272 | fpu_sp:1, fpu_dp:1, pad2:6, | 273 | fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4, |
273 | debug:1, ap:1, smart:1, rtt:1, pad3:4, | 274 | debug:1, ap:1, smart:1, rtt:1, pad3:4, |
274 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; | 275 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; |
275 | } extn; | 276 | } extn; |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index d400a2161935..8ee41e988169 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -78,9 +78,6 @@ struct task_struct; | |||
78 | 78 | ||
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #define copy_segments(tsk, mm) do { } while (0) | ||
82 | #define release_segments(mm) do { } while (0) | ||
83 | |||
84 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) | 81 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) |
85 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | 82 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) |
86 | 83 | ||
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 877cec8f5ea2..fb83844daeea 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = { | |||
51 | { 0x51, "R2.0" }, | 51 | { 0x51, "R2.0" }, |
52 | { 0x52, "R2.1" }, | 52 | { 0x52, "R2.1" }, |
53 | { 0x53, "R3.0" }, | 53 | { 0x53, "R3.0" }, |
54 | { 0x54, "R4.0" }, | ||
54 | #endif | 55 | #endif |
55 | { 0x00, NULL } | 56 | { 0x00, NULL } |
56 | }; | 57 | }; |
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = { | |||
62 | #else | 63 | #else |
63 | { 0x40, "ARC EM" }, | 64 | { 0x40, "ARC EM" }, |
64 | { 0x50, "ARC HS38" }, | 65 | { 0x50, "ARC HS38" }, |
66 | { 0x54, "ARC HS48" }, | ||
65 | #endif | 67 | #endif |
66 | { 0x00, "Unknown" } | 68 | { 0x00, "Unknown" } |
67 | }; | 69 | }; |
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void) | |||
119 | struct bcr_generic bcr; | 121 | struct bcr_generic bcr; |
120 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; | 122 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; |
121 | const struct id_to_str *tbl; | 123 | const struct id_to_str *tbl; |
124 | struct bcr_isa_arcv2 isa; | ||
122 | 125 | ||
123 | FIX_PTR(cpu); | 126 | FIX_PTR(cpu); |
124 | 127 | ||
125 | READ_BCR(AUX_IDENTITY, cpu->core); | 128 | READ_BCR(AUX_IDENTITY, cpu->core); |
126 | READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); | ||
127 | 129 | ||
128 | for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { | 130 | for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { |
129 | if (cpu->core.family == tbl->id) { | 131 | if (cpu->core.family == tbl->id) { |
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void) | |||
133 | } | 135 | } |
134 | 136 | ||
135 | for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { | 137 | for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { |
136 | if ((cpu->core.family & 0xF0) == tbl->id) | 138 | if ((cpu->core.family & 0xF4) == tbl->id) |
137 | break; | 139 | break; |
138 | } | 140 | } |
139 | cpu->name = tbl->str; | 141 | cpu->name = tbl->str; |
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void) | |||
192 | cpu->bpu.full = bpu.ft; | 194 | cpu->bpu.full = bpu.ft; |
193 | cpu->bpu.num_cache = 256 << bpu.bce; | 195 | cpu->bpu.num_cache = 256 << bpu.bce; |
194 | cpu->bpu.num_pred = 2048 << bpu.pte; | 196 | cpu->bpu.num_pred = 2048 << bpu.pte; |
197 | |||
198 | if (cpu->core.family >= 0x54) { | ||
199 | unsigned int exec_ctrl; | ||
200 | |||
201 | READ_BCR(AUX_EXEC_CTRL, exec_ctrl); | ||
202 | cpu->extn.dual_iss_exist = 1; | ||
203 | cpu->extn.dual_iss_enb = exec_ctrl & 1; | ||
204 | } | ||
195 | } | 205 | } |
196 | 206 | ||
197 | READ_BCR(ARC_REG_AP_BCR, bcr); | 207 | READ_BCR(ARC_REG_AP_BCR, bcr); |
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void) | |||
205 | 215 | ||
206 | cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; | 216 | cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; |
207 | 217 | ||
218 | READ_BCR(ARC_REG_ISA_CFG_BCR, isa); | ||
219 | |||
208 | /* some hacks for lack of feature BCR info in old ARC700 cores */ | 220 | /* some hacks for lack of feature BCR info in old ARC700 cores */ |
209 | if (is_isa_arcompact()) { | 221 | if (is_isa_arcompact()) { |
210 | if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ | 222 | if (!isa.ver) /* ISA BCR absent, use Kconfig info */ |
211 | cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); | 223 | cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); |
212 | else | 224 | else { |
213 | cpu->isa.atomic = cpu->isa.atomic1; | 225 | /* ARC700_BUILD only has 2 bits of isa info */ |
226 | struct bcr_generic bcr = *(struct bcr_generic *)&isa; | ||
227 | cpu->isa.atomic = bcr.info & 1; | ||
228 | } | ||
214 | 229 | ||
215 | cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); | 230 | cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); |
216 | 231 | ||
217 | /* there's no direct way to distinguish 750 vs. 770 */ | 232 | /* there's no direct way to distinguish 750 vs. 770 */ |
218 | if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) | 233 | if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) |
219 | cpu->name = "ARC750"; | 234 | cpu->name = "ARC750"; |
235 | } else { | ||
236 | cpu->isa = isa; | ||
220 | } | 237 | } |
221 | } | 238 | } |
222 | 239 | ||
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
232 | "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", | 249 | "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", |
233 | core->family, core->cpu_id, core->chip_id); | 250 | core->family, core->cpu_id, core->chip_id); |
234 | 251 | ||
235 | n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n", | 252 | n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", |
236 | cpu_id, cpu->name, cpu->details, | 253 | cpu_id, cpu->name, cpu->details, |
237 | is_isa_arcompact() ? "ARCompact" : "ARCv2", | 254 | is_isa_arcompact() ? "ARCompact" : "ARCv2", |
238 | IS_AVAIL1(cpu->isa.be, "[Big-Endian]")); | 255 | IS_AVAIL1(cpu->isa.be, "[Big-Endian]"), |
256 | IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue")); | ||
239 | 257 | ||
240 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", | 258 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", |
241 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), | 259 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), |
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index f1ac6790da5f..cf14ebc36916 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c | |||
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void) | |||
111 | 111 | ||
112 | axs10x_enable_gpio_intc_wire(); | 112 | axs10x_enable_gpio_intc_wire(); |
113 | 113 | ||
114 | /* | ||
115 | * Reset ethernet IP core. | ||
116 | * TODO: get rid of this quirk after axs10x reset driver (or simple | ||
117 | * reset driver) will be available in upstream. | ||
118 | */ | ||
119 | iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET); | ||
120 | |||
114 | scnprintf(mb, 32, "MainBoard v%d", mb_rev); | 121 | scnprintf(mb, 32, "MainBoard v%d", mb_rev); |
115 | axs10x_print_board_ver(CREG_MB_VER, mb); | 122 | axs10x_print_board_ver(CREG_MB_VER, mb); |
116 | } | 123 | } |
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 5a6ed5afb009..bd08de4be75e 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig | |||
@@ -6,4 +6,5 @@ | |||
6 | # | 6 | # |
7 | 7 | ||
8 | menuconfig ARC_SOC_HSDK | 8 | menuconfig ARC_SOC_HSDK |
9 | bool "ARC HS Development Kit SOC" | 9 | bool "ARC HS Development Kit SOC" |
10 | select CLK_HSDK | ||
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index a2e7fd17e36d..744e62e58788 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c | |||
@@ -38,6 +38,42 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) | |||
38 | #define CREG_PAE (CREG_BASE + 0x180) | 38 | #define CREG_PAE (CREG_BASE + 0x180) |
39 | #define CREG_PAE_UPDATE (CREG_BASE + 0x194) | 39 | #define CREG_PAE_UPDATE (CREG_BASE + 0x194) |
40 | 40 | ||
41 | #define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8) | ||
42 | #define CREG_CORE_IF_CLK_DIV_2 0x1 | ||
43 | #define CGU_BASE ARC_PERIPHERAL_BASE | ||
44 | #define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4) | ||
45 | #define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0) | ||
46 | #define CGU_PLL_STATUS_LOCK BIT(0) | ||
47 | #define CGU_PLL_STATUS_ERR BIT(1) | ||
48 | #define CGU_PLL_CTRL_1GHZ 0x3A10 | ||
49 | #define HSDK_PLL_LOCK_TIMEOUT 500 | ||
50 | |||
51 | #define HSDK_PLL_LOCKED() \ | ||
52 | !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK) | ||
53 | |||
54 | #define HSDK_PLL_ERR() \ | ||
55 | !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR) | ||
56 | |||
57 | static void __init hsdk_set_cpu_freq_1ghz(void) | ||
58 | { | ||
59 | u32 timeout = HSDK_PLL_LOCK_TIMEOUT; | ||
60 | |||
61 | /* | ||
62 | * As we set cpu clock which exceeds 500MHz, the divider for the interface | ||
63 | * clock must be programmed to div-by-2. | ||
64 | */ | ||
65 | iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV); | ||
66 | |||
67 | /* Set cpu clock to 1GHz */ | ||
68 | iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL); | ||
69 | |||
70 | while (!HSDK_PLL_LOCKED() && timeout--) | ||
71 | cpu_relax(); | ||
72 | |||
73 | if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR()) | ||
74 | pr_err("Failed to setup CPU frequency to 1GHz!"); | ||
75 | } | ||
76 | |||
41 | static void __init hsdk_init_early(void) | 77 | static void __init hsdk_init_early(void) |
42 | { | 78 | { |
43 | /* | 79 | /* |
@@ -52,6 +88,12 @@ static void __init hsdk_init_early(void) | |||
52 | 88 | ||
53 | /* Really apply settings made above */ | 89 | /* Really apply settings made above */ |
54 | writel(1, (void __iomem *) CREG_PAE_UPDATE); | 90 | writel(1, (void __iomem *) CREG_PAE_UPDATE); |
91 | |||
92 | /* | ||
93 | * Setup CPU frequency to 1GHz. | ||
94 | * TODO: remove it after smart hsdk pll driver will be introduced. | ||
95 | */ | ||
96 | hsdk_set_cpu_freq_1ghz(); | ||
55 | } | 97 | } |
56 | 98 | ||
57 | static const char *hsdk_compat[] __initconst = { | 99 | static const char *hsdk_compat[] __initconst = { |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 7d7ca054c557..e58fab8aec5d 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -36,6 +36,8 @@ | |||
36 | phy1 = &usb1_phy; | 36 | phy1 = &usb1_phy; |
37 | ethernet0 = &cpsw_emac0; | 37 | ethernet0 = &cpsw_emac0; |
38 | ethernet1 = &cpsw_emac1; | 38 | ethernet1 = &cpsw_emac1; |
39 | spi0 = &spi0; | ||
40 | spi1 = &spi1; | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | cpus { | 43 | cpus { |
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 9d276af7c539..081fa68b6f98 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts | |||
@@ -388,6 +388,7 @@ | |||
388 | pinctrl-0 = <&cpsw_default>; | 388 | pinctrl-0 = <&cpsw_default>; |
389 | pinctrl-1 = <&cpsw_sleep>; | 389 | pinctrl-1 = <&cpsw_sleep>; |
390 | status = "okay"; | 390 | status = "okay"; |
391 | slaves = <1>; | ||
391 | }; | 392 | }; |
392 | 393 | ||
393 | &davinci_mdio { | 394 | &davinci_mdio { |
@@ -402,11 +403,6 @@ | |||
402 | phy-mode = "rmii"; | 403 | phy-mode = "rmii"; |
403 | }; | 404 | }; |
404 | 405 | ||
405 | &cpsw_emac1 { | ||
406 | phy_id = <&davinci_mdio>, <1>; | ||
407 | phy-mode = "rmii"; | ||
408 | }; | ||
409 | |||
410 | &phy_sel { | 406 | &phy_sel { |
411 | rmii-clock-ext; | 407 | rmii-clock-ext; |
412 | }; | 408 | }; |
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts index 9c9088c99cc4..60cb084a8d92 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts | |||
@@ -67,7 +67,10 @@ | |||
67 | 67 | ||
68 | usb1: ohci@00400000 { | 68 | usb1: ohci@00400000 { |
69 | num-ports = <3>; | 69 | num-ports = <3>; |
70 | atmel,vbus-gpio = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; | 70 | atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */ |
71 | &pioA PIN_PA27 GPIO_ACTIVE_HIGH | ||
72 | 0 | ||
73 | >; | ||
71 | pinctrl-names = "default"; | 74 | pinctrl-names = "default"; |
72 | pinctrl-0 = <&pinctrl_usb_default>; | 75 | pinctrl-0 = <&pinctrl_usb_default>; |
73 | status = "okay"; | 76 | status = "okay"; |
@@ -120,7 +123,7 @@ | |||
120 | pinctrl-names = "default"; | 123 | pinctrl-names = "default"; |
121 | pinctrl-0 = <&pinctrl_mikrobus2_uart>; | 124 | pinctrl-0 = <&pinctrl_mikrobus2_uart>; |
122 | atmel,use-dma-rx; | 125 | atmel,use-dma-rx; |
123 | atmel-use-dma-tx; | 126 | atmel,use-dma-tx; |
124 | status = "okay"; | 127 | status = "okay"; |
125 | }; | 128 | }; |
126 | 129 | ||
@@ -178,7 +181,7 @@ | |||
178 | uart4: serial@fc00c000 { | 181 | uart4: serial@fc00c000 { |
179 | atmel,use-dma-rx; | 182 | atmel,use-dma-rx; |
180 | atmel,use-dma-tx; | 183 | atmel,use-dma-tx; |
181 | pinctrl-name = "default"; | 184 | pinctrl-names = "default"; |
182 | pinctrl-0 = <&pinctrl_mikrobus1_uart>; | 185 | pinctrl-0 = <&pinctrl_mikrobus1_uart>; |
183 | status = "okay"; | 186 | status = "okay"; |
184 | }; | 187 | }; |
@@ -330,7 +333,7 @@ | |||
330 | }; | 333 | }; |
331 | 334 | ||
332 | pinctrl_led_gpio_default: led_gpio_default { | 335 | pinctrl_led_gpio_default: led_gpio_default { |
333 | pinmux = <PIN_PA27__GPIO>, | 336 | pinmux = <PIN_PA10__GPIO>, |
334 | <PIN_PB1__GPIO>, | 337 | <PIN_PB1__GPIO>, |
335 | <PIN_PA31__GPIO>; | 338 | <PIN_PA31__GPIO>; |
336 | bias-pull-up; | 339 | bias-pull-up; |
@@ -396,7 +399,7 @@ | |||
396 | }; | 399 | }; |
397 | 400 | ||
398 | pinctrl_usb_default: usb_default { | 401 | pinctrl_usb_default: usb_default { |
399 | pinmux = <PIN_PA10__GPIO>, | 402 | pinmux = <PIN_PA27__GPIO>, |
400 | <PIN_PD19__GPIO>; | 403 | <PIN_PD19__GPIO>; |
401 | bias-disable; | 404 | bias-disable; |
402 | }; | 405 | }; |
@@ -520,17 +523,17 @@ | |||
520 | 523 | ||
521 | red { | 524 | red { |
522 | label = "red"; | 525 | label = "red"; |
523 | gpios = <&pioA PIN_PA27 GPIO_ACTIVE_LOW>; | 526 | gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; |
524 | }; | 527 | }; |
525 | 528 | ||
526 | green { | 529 | green { |
527 | label = "green"; | 530 | label = "green"; |
528 | gpios = <&pioA PIN_PB1 GPIO_ACTIVE_LOW>; | 531 | gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>; |
529 | }; | 532 | }; |
530 | 533 | ||
531 | blue { | 534 | blue { |
532 | label = "blue"; | 535 | label = "blue"; |
533 | gpios = <&pioA PIN_PA31 GPIO_ACTIVE_LOW>; | 536 | gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>; |
534 | linux,default-trigger = "heartbeat"; | 537 | linux,default-trigger = "heartbeat"; |
535 | }; | 538 | }; |
536 | }; | 539 | }; |
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index 67e72bc72e80..c75507922f7d 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts | |||
@@ -15,6 +15,13 @@ | |||
15 | compatible = "ti,da850-evm", "ti,da850"; | 15 | compatible = "ti,da850-evm", "ti,da850"; |
16 | model = "DA850/AM1808/OMAP-L138 EVM"; | 16 | model = "DA850/AM1808/OMAP-L138 EVM"; |
17 | 17 | ||
18 | aliases { | ||
19 | serial0 = &serial0; | ||
20 | serial1 = &serial1; | ||
21 | serial2 = &serial2; | ||
22 | ethernet0 = ð0; | ||
23 | }; | ||
24 | |||
18 | soc@1c00000 { | 25 | soc@1c00000 { |
19 | pmx_core: pinmux@14120 { | 26 | pmx_core: pinmux@14120 { |
20 | status = "okay"; | 27 | status = "okay"; |
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index cf229dfabf61..e62b62875cba 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
@@ -1817,6 +1817,8 @@ | |||
1817 | clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; | 1817 | clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; |
1818 | ti,bit-shift = <24>; | 1818 | ti,bit-shift = <24>; |
1819 | reg = <0x1868>; | 1819 | reg = <0x1868>; |
1820 | assigned-clocks = <&mcasp3_ahclkx_mux>; | ||
1821 | assigned-clock-parents = <&abe_24m_fclk>; | ||
1820 | }; | 1822 | }; |
1821 | 1823 | ||
1822 | mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { | 1824 | mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 26c20e1167b9..4acd32a1c4ef 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -144,15 +144,6 @@ | |||
144 | io-channel-names = "temp", "bsi", "vbat"; | 144 | io-channel-names = "temp", "bsi", "vbat"; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | rear_camera: camera@0 { | ||
148 | compatible = "linux,camera"; | ||
149 | |||
150 | module { | ||
151 | model = "TCM8341MD"; | ||
152 | sensor = <&cam1>; | ||
153 | }; | ||
154 | }; | ||
155 | |||
156 | pwm9: dmtimer-pwm { | 147 | pwm9: dmtimer-pwm { |
157 | compatible = "ti,omap-dmtimer-pwm"; | 148 | compatible = "ti,omap-dmtimer-pwm"; |
158 | #pwm-cells = <3>; | 149 | #pwm-cells = <3>; |
@@ -189,10 +180,8 @@ | |||
189 | clock-lanes = <1>; | 180 | clock-lanes = <1>; |
190 | data-lanes = <0>; | 181 | data-lanes = <0>; |
191 | lane-polarity = <0 0>; | 182 | lane-polarity = <0 0>; |
192 | clock-inv = <0>; | ||
193 | /* Select strobe = <1> for back camera, <0> for front camera */ | 183 | /* Select strobe = <1> for back camera, <0> for front camera */ |
194 | strobe = <1>; | 184 | strobe = <1>; |
195 | crc = <0>; | ||
196 | }; | 185 | }; |
197 | }; | 186 | }; |
198 | }; | 187 | }; |
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index cb47ae79a5f9..1b0bd72945f2 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi | |||
@@ -267,15 +267,19 @@ | |||
267 | clock-frequency = <400000>; | 267 | clock-frequency = <400000>; |
268 | 268 | ||
269 | as3645a@30 { | 269 | as3645a@30 { |
270 | #address-cells = <1>; | ||
271 | #size-cells = <0>; | ||
270 | reg = <0x30>; | 272 | reg = <0x30>; |
271 | compatible = "ams,as3645a"; | 273 | compatible = "ams,as3645a"; |
272 | flash { | 274 | flash@0 { |
275 | reg = <0x0>; | ||
273 | flash-timeout-us = <150000>; | 276 | flash-timeout-us = <150000>; |
274 | flash-max-microamp = <320000>; | 277 | flash-max-microamp = <320000>; |
275 | led-max-microamp = <60000>; | 278 | led-max-microamp = <60000>; |
276 | peak-current-limit = <1750000>; | 279 | ams,input-max-microamp = <1750000>; |
277 | }; | 280 | }; |
278 | indicator { | 281 | indicator@1 { |
282 | reg = <0x1>; | ||
279 | led-max-microamp = <10000>; | 283 | led-max-microamp = <10000>; |
280 | }; | 284 | }; |
281 | }; | 285 | }; |
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts index 97b1c2321ba9..293ecb957227 100644 --- a/arch/arm/boot/dts/stm32429i-eval.dts +++ b/arch/arm/boot/dts/stm32429i-eval.dts | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | /dts-v1/; | 48 | /dts-v1/; |
49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
50 | #include "stm32f429-pinctrl.dtsi" | ||
50 | #include <dt-bindings/input/input.h> | 51 | #include <dt-bindings/input/input.h> |
51 | #include <dt-bindings/gpio/gpio.h> | 52 | #include <dt-bindings/gpio/gpio.h> |
52 | 53 | ||
@@ -202,10 +203,8 @@ | |||
202 | stmpe1600: stmpe1600@42 { | 203 | stmpe1600: stmpe1600@42 { |
203 | compatible = "st,stmpe1600"; | 204 | compatible = "st,stmpe1600"; |
204 | reg = <0x42>; | 205 | reg = <0x42>; |
205 | irq-gpio = <&gpioi 8 0>; | ||
206 | irq-trigger = <3>; | ||
207 | interrupts = <8 3>; | 206 | interrupts = <8 3>; |
208 | interrupt-parent = <&exti>; | 207 | interrupt-parent = <&gpioi>; |
209 | interrupt-controller; | 208 | interrupt-controller; |
210 | wakeup-source; | 209 | wakeup-source; |
211 | 210 | ||
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi new file mode 100644 index 000000000000..7f3560c0211d --- /dev/null +++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
3 | * | ||
4 | * This file is dual-licensed: you can use it either under the terms | ||
5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
6 | * licensing only applies to this file, and not this project as a | ||
7 | * whole. | ||
8 | * | ||
9 | * a) This file is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of the | ||
12 | * License, or (at your option) any later version. | ||
13 | * | ||
14 | * This file is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * Or, alternatively, | ||
20 | * | ||
21 | * b) Permission is hereby granted, free of charge, to any person | ||
22 | * obtaining a copy of this software and associated documentation | ||
23 | * files (the "Software"), to deal in the Software without | ||
24 | * restriction, including without limitation the rights to use, | ||
25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
26 | * sell copies of the Software, and to permit persons to whom the | ||
27 | * Software is furnished to do so, subject to the following | ||
28 | * conditions: | ||
29 | * | ||
30 | * The above copyright notice and this permission notice shall be | ||
31 | * included in all copies or substantial portions of the Software. | ||
32 | * | ||
33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
41 | */ | ||
42 | |||
43 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> | ||
44 | #include <dt-bindings/mfd/stm32f4-rcc.h> | ||
45 | |||
46 | / { | ||
47 | soc { | ||
48 | pinctrl: pin-controller { | ||
49 | #address-cells = <1>; | ||
50 | #size-cells = <1>; | ||
51 | ranges = <0 0x40020000 0x3000>; | ||
52 | interrupt-parent = <&exti>; | ||
53 | st,syscfg = <&syscfg 0x8>; | ||
54 | pins-are-numbered; | ||
55 | |||
56 | gpioa: gpio@40020000 { | ||
57 | gpio-controller; | ||
58 | #gpio-cells = <2>; | ||
59 | interrupt-controller; | ||
60 | #interrupt-cells = <2>; | ||
61 | reg = <0x0 0x400>; | ||
62 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; | ||
63 | st,bank-name = "GPIOA"; | ||
64 | }; | ||
65 | |||
66 | gpiob: gpio@40020400 { | ||
67 | gpio-controller; | ||
68 | #gpio-cells = <2>; | ||
69 | interrupt-controller; | ||
70 | #interrupt-cells = <2>; | ||
71 | reg = <0x400 0x400>; | ||
72 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; | ||
73 | st,bank-name = "GPIOB"; | ||
74 | }; | ||
75 | |||
76 | gpioc: gpio@40020800 { | ||
77 | gpio-controller; | ||
78 | #gpio-cells = <2>; | ||
79 | interrupt-controller; | ||
80 | #interrupt-cells = <2>; | ||
81 | reg = <0x800 0x400>; | ||
82 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; | ||
83 | st,bank-name = "GPIOC"; | ||
84 | }; | ||
85 | |||
86 | gpiod: gpio@40020c00 { | ||
87 | gpio-controller; | ||
88 | #gpio-cells = <2>; | ||
89 | interrupt-controller; | ||
90 | #interrupt-cells = <2>; | ||
91 | reg = <0xc00 0x400>; | ||
92 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; | ||
93 | st,bank-name = "GPIOD"; | ||
94 | }; | ||
95 | |||
96 | gpioe: gpio@40021000 { | ||
97 | gpio-controller; | ||
98 | #gpio-cells = <2>; | ||
99 | interrupt-controller; | ||
100 | #interrupt-cells = <2>; | ||
101 | reg = <0x1000 0x400>; | ||
102 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; | ||
103 | st,bank-name = "GPIOE"; | ||
104 | }; | ||
105 | |||
106 | gpiof: gpio@40021400 { | ||
107 | gpio-controller; | ||
108 | #gpio-cells = <2>; | ||
109 | interrupt-controller; | ||
110 | #interrupt-cells = <2>; | ||
111 | reg = <0x1400 0x400>; | ||
112 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; | ||
113 | st,bank-name = "GPIOF"; | ||
114 | }; | ||
115 | |||
116 | gpiog: gpio@40021800 { | ||
117 | gpio-controller; | ||
118 | #gpio-cells = <2>; | ||
119 | interrupt-controller; | ||
120 | #interrupt-cells = <2>; | ||
121 | reg = <0x1800 0x400>; | ||
122 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; | ||
123 | st,bank-name = "GPIOG"; | ||
124 | }; | ||
125 | |||
126 | gpioh: gpio@40021c00 { | ||
127 | gpio-controller; | ||
128 | #gpio-cells = <2>; | ||
129 | interrupt-controller; | ||
130 | #interrupt-cells = <2>; | ||
131 | reg = <0x1c00 0x400>; | ||
132 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; | ||
133 | st,bank-name = "GPIOH"; | ||
134 | }; | ||
135 | |||
136 | gpioi: gpio@40022000 { | ||
137 | gpio-controller; | ||
138 | #gpio-cells = <2>; | ||
139 | interrupt-controller; | ||
140 | #interrupt-cells = <2>; | ||
141 | reg = <0x2000 0x400>; | ||
142 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; | ||
143 | st,bank-name = "GPIOI"; | ||
144 | }; | ||
145 | |||
146 | gpioj: gpio@40022400 { | ||
147 | gpio-controller; | ||
148 | #gpio-cells = <2>; | ||
149 | interrupt-controller; | ||
150 | #interrupt-cells = <2>; | ||
151 | reg = <0x2400 0x400>; | ||
152 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; | ||
153 | st,bank-name = "GPIOJ"; | ||
154 | }; | ||
155 | |||
156 | gpiok: gpio@40022800 { | ||
157 | gpio-controller; | ||
158 | #gpio-cells = <2>; | ||
159 | interrupt-controller; | ||
160 | #interrupt-cells = <2>; | ||
161 | reg = <0x2800 0x400>; | ||
162 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; | ||
163 | st,bank-name = "GPIOK"; | ||
164 | }; | ||
165 | |||
166 | usart1_pins_a: usart1@0 { | ||
167 | pins1 { | ||
168 | pinmux = <STM32F429_PA9_FUNC_USART1_TX>; | ||
169 | bias-disable; | ||
170 | drive-push-pull; | ||
171 | slew-rate = <0>; | ||
172 | }; | ||
173 | pins2 { | ||
174 | pinmux = <STM32F429_PA10_FUNC_USART1_RX>; | ||
175 | bias-disable; | ||
176 | }; | ||
177 | }; | ||
178 | |||
179 | usart3_pins_a: usart3@0 { | ||
180 | pins1 { | ||
181 | pinmux = <STM32F429_PB10_FUNC_USART3_TX>; | ||
182 | bias-disable; | ||
183 | drive-push-pull; | ||
184 | slew-rate = <0>; | ||
185 | }; | ||
186 | pins2 { | ||
187 | pinmux = <STM32F429_PB11_FUNC_USART3_RX>; | ||
188 | bias-disable; | ||
189 | }; | ||
190 | }; | ||
191 | |||
192 | usbotg_fs_pins_a: usbotg_fs@0 { | ||
193 | pins { | ||
194 | pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>, | ||
195 | <STM32F429_PA11_FUNC_OTG_FS_DM>, | ||
196 | <STM32F429_PA12_FUNC_OTG_FS_DP>; | ||
197 | bias-disable; | ||
198 | drive-push-pull; | ||
199 | slew-rate = <2>; | ||
200 | }; | ||
201 | }; | ||
202 | |||
203 | usbotg_fs_pins_b: usbotg_fs@1 { | ||
204 | pins { | ||
205 | pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>, | ||
206 | <STM32F429_PB14_FUNC_OTG_HS_DM>, | ||
207 | <STM32F429_PB15_FUNC_OTG_HS_DP>; | ||
208 | bias-disable; | ||
209 | drive-push-pull; | ||
210 | slew-rate = <2>; | ||
211 | }; | ||
212 | }; | ||
213 | |||
214 | usbotg_hs_pins_a: usbotg_hs@0 { | ||
215 | pins { | ||
216 | pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>, | ||
217 | <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>, | ||
218 | <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>, | ||
219 | <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>, | ||
220 | <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>, | ||
221 | <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>, | ||
222 | <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>, | ||
223 | <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>, | ||
224 | <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>, | ||
225 | <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>, | ||
226 | <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>, | ||
227 | <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>; | ||
228 | bias-disable; | ||
229 | drive-push-pull; | ||
230 | slew-rate = <2>; | ||
231 | }; | ||
232 | }; | ||
233 | |||
234 | ethernet_mii: mii@0 { | ||
235 | pins { | ||
236 | pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>, | ||
237 | <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>, | ||
238 | <STM32F429_PC2_FUNC_ETH_MII_TXD2>, | ||
239 | <STM32F429_PB8_FUNC_ETH_MII_TXD3>, | ||
240 | <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>, | ||
241 | <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>, | ||
242 | <STM32F429_PA2_FUNC_ETH_MDIO>, | ||
243 | <STM32F429_PC1_FUNC_ETH_MDC>, | ||
244 | <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>, | ||
245 | <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>, | ||
246 | <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>, | ||
247 | <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>, | ||
248 | <STM32F429_PH6_FUNC_ETH_MII_RXD2>, | ||
249 | <STM32F429_PH7_FUNC_ETH_MII_RXD3>; | ||
250 | slew-rate = <2>; | ||
251 | }; | ||
252 | }; | ||
253 | |||
254 | adc3_in8_pin: adc@200 { | ||
255 | pins { | ||
256 | pinmux = <STM32F429_PF10_FUNC_ANALOG>; | ||
257 | }; | ||
258 | }; | ||
259 | |||
260 | pwm1_pins: pwm@1 { | ||
261 | pins { | ||
262 | pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>, | ||
263 | <STM32F429_PB13_FUNC_TIM1_CH1N>, | ||
264 | <STM32F429_PB12_FUNC_TIM1_BKIN>; | ||
265 | }; | ||
266 | }; | ||
267 | |||
268 | pwm3_pins: pwm@3 { | ||
269 | pins { | ||
270 | pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>, | ||
271 | <STM32F429_PB5_FUNC_TIM3_CH2>; | ||
272 | }; | ||
273 | }; | ||
274 | |||
275 | i2c1_pins: i2c1@0 { | ||
276 | pins { | ||
277 | pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>, | ||
278 | <STM32F429_PB6_FUNC_I2C1_SCL>; | ||
279 | bias-disable; | ||
280 | drive-open-drain; | ||
281 | slew-rate = <3>; | ||
282 | }; | ||
283 | }; | ||
284 | |||
285 | ltdc_pins: ltdc@0 { | ||
286 | pins { | ||
287 | pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>, | ||
288 | <STM32F429_PI13_FUNC_LCD_VSYNC>, | ||
289 | <STM32F429_PI14_FUNC_LCD_CLK>, | ||
290 | <STM32F429_PI15_FUNC_LCD_R0>, | ||
291 | <STM32F429_PJ0_FUNC_LCD_R1>, | ||
292 | <STM32F429_PJ1_FUNC_LCD_R2>, | ||
293 | <STM32F429_PJ2_FUNC_LCD_R3>, | ||
294 | <STM32F429_PJ3_FUNC_LCD_R4>, | ||
295 | <STM32F429_PJ4_FUNC_LCD_R5>, | ||
296 | <STM32F429_PJ5_FUNC_LCD_R6>, | ||
297 | <STM32F429_PJ6_FUNC_LCD_R7>, | ||
298 | <STM32F429_PJ7_FUNC_LCD_G0>, | ||
299 | <STM32F429_PJ8_FUNC_LCD_G1>, | ||
300 | <STM32F429_PJ9_FUNC_LCD_G2>, | ||
301 | <STM32F429_PJ10_FUNC_LCD_G3>, | ||
302 | <STM32F429_PJ11_FUNC_LCD_G4>, | ||
303 | <STM32F429_PJ12_FUNC_LCD_B0>, | ||
304 | <STM32F429_PJ13_FUNC_LCD_B1>, | ||
305 | <STM32F429_PJ14_FUNC_LCD_B2>, | ||
306 | <STM32F429_PJ15_FUNC_LCD_B3>, | ||
307 | <STM32F429_PK0_FUNC_LCD_G5>, | ||
308 | <STM32F429_PK1_FUNC_LCD_G6>, | ||
309 | <STM32F429_PK2_FUNC_LCD_G7>, | ||
310 | <STM32F429_PK3_FUNC_LCD_B4>, | ||
311 | <STM32F429_PK4_FUNC_LCD_B5>, | ||
312 | <STM32F429_PK5_FUNC_LCD_B6>, | ||
313 | <STM32F429_PK6_FUNC_LCD_B7>, | ||
314 | <STM32F429_PK7_FUNC_LCD_DE>; | ||
315 | slew-rate = <2>; | ||
316 | }; | ||
317 | }; | ||
318 | |||
319 | dcmi_pins: dcmi@0 { | ||
320 | pins { | ||
321 | pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>, | ||
322 | <STM32F429_PB7_FUNC_DCMI_VSYNC>, | ||
323 | <STM32F429_PA6_FUNC_DCMI_PIXCLK>, | ||
324 | <STM32F429_PC6_FUNC_DCMI_D0>, | ||
325 | <STM32F429_PC7_FUNC_DCMI_D1>, | ||
326 | <STM32F429_PC8_FUNC_DCMI_D2>, | ||
327 | <STM32F429_PC9_FUNC_DCMI_D3>, | ||
328 | <STM32F429_PC11_FUNC_DCMI_D4>, | ||
329 | <STM32F429_PD3_FUNC_DCMI_D5>, | ||
330 | <STM32F429_PB8_FUNC_DCMI_D6>, | ||
331 | <STM32F429_PE6_FUNC_DCMI_D7>, | ||
332 | <STM32F429_PC10_FUNC_DCMI_D8>, | ||
333 | <STM32F429_PC12_FUNC_DCMI_D9>, | ||
334 | <STM32F429_PD6_FUNC_DCMI_D10>, | ||
335 | <STM32F429_PD2_FUNC_DCMI_D11>; | ||
336 | bias-disable; | ||
337 | drive-push-pull; | ||
338 | slew-rate = <3>; | ||
339 | }; | ||
340 | }; | ||
341 | }; | ||
342 | }; | ||
343 | }; | ||
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index c66d617e4245..5ceb2cf3777f 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | /dts-v1/; | 48 | /dts-v1/; |
49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
50 | #include "stm32f429-pinctrl.dtsi" | ||
50 | #include <dt-bindings/input/input.h> | 51 | #include <dt-bindings/input/input.h> |
51 | 52 | ||
52 | / { | 53 | / { |
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi new file mode 100644 index 000000000000..3e7a17d9112e --- /dev/null +++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
3 | * | ||
4 | * This file is dual-licensed: you can use it either under the terms | ||
5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
6 | * licensing only applies to this file, and not this project as a | ||
7 | * whole. | ||
8 | * | ||
9 | * a) This file is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of the | ||
12 | * License, or (at your option) any later version. | ||
13 | * | ||
14 | * This file is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * Or, alternatively, | ||
20 | * | ||
21 | * b) Permission is hereby granted, free of charge, to any person | ||
22 | * obtaining a copy of this software and associated documentation | ||
23 | * files (the "Software"), to deal in the Software without | ||
24 | * restriction, including without limitation the rights to use, | ||
25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
26 | * sell copies of the Software, and to permit persons to whom the | ||
27 | * Software is furnished to do so, subject to the following | ||
28 | * conditions: | ||
29 | * | ||
30 | * The above copyright notice and this permission notice shall be | ||
31 | * included in all copies or substantial portions of the Software. | ||
32 | * | ||
33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
41 | */ | ||
42 | |||
43 | #include "stm32f4-pinctrl.dtsi" | ||
44 | |||
45 | / { | ||
46 | soc { | ||
47 | pinctrl: pin-controller { | ||
48 | compatible = "st,stm32f429-pinctrl"; | ||
49 | |||
50 | gpioa: gpio@40020000 { | ||
51 | gpio-ranges = <&pinctrl 0 0 16>; | ||
52 | }; | ||
53 | |||
54 | gpiob: gpio@40020400 { | ||
55 | gpio-ranges = <&pinctrl 0 16 16>; | ||
56 | }; | ||
57 | |||
58 | gpioc: gpio@40020800 { | ||
59 | gpio-ranges = <&pinctrl 0 32 16>; | ||
60 | }; | ||
61 | |||
62 | gpiod: gpio@40020c00 { | ||
63 | gpio-ranges = <&pinctrl 0 48 16>; | ||
64 | }; | ||
65 | |||
66 | gpioe: gpio@40021000 { | ||
67 | gpio-ranges = <&pinctrl 0 64 16>; | ||
68 | }; | ||
69 | |||
70 | gpiof: gpio@40021400 { | ||
71 | gpio-ranges = <&pinctrl 0 80 16>; | ||
72 | }; | ||
73 | |||
74 | gpiog: gpio@40021800 { | ||
75 | gpio-ranges = <&pinctrl 0 96 16>; | ||
76 | }; | ||
77 | |||
78 | gpioh: gpio@40021c00 { | ||
79 | gpio-ranges = <&pinctrl 0 112 16>; | ||
80 | }; | ||
81 | |||
82 | gpioi: gpio@40022000 { | ||
83 | gpio-ranges = <&pinctrl 0 128 16>; | ||
84 | }; | ||
85 | |||
86 | gpioj: gpio@40022400 { | ||
87 | gpio-ranges = <&pinctrl 0 144 16>; | ||
88 | }; | ||
89 | |||
90 | gpiok: gpio@40022800 { | ||
91 | gpio-ranges = <&pinctrl 0 160 8>; | ||
92 | }; | ||
93 | }; | ||
94 | }; | ||
95 | }; | ||
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index dd7e99b1f43b..5b36eb114ddc 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi | |||
@@ -47,7 +47,6 @@ | |||
47 | 47 | ||
48 | #include "skeleton.dtsi" | 48 | #include "skeleton.dtsi" |
49 | #include "armv7-m.dtsi" | 49 | #include "armv7-m.dtsi" |
50 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> | ||
51 | #include <dt-bindings/clock/stm32fx-clock.h> | 50 | #include <dt-bindings/clock/stm32fx-clock.h> |
52 | #include <dt-bindings/mfd/stm32f4-rcc.h> | 51 | #include <dt-bindings/mfd/stm32f4-rcc.h> |
53 | 52 | ||
@@ -591,302 +590,6 @@ | |||
591 | status = "disabled"; | 590 | status = "disabled"; |
592 | }; | 591 | }; |
593 | 592 | ||
594 | pinctrl: pin-controller { | ||
595 | #address-cells = <1>; | ||
596 | #size-cells = <1>; | ||
597 | compatible = "st,stm32f429-pinctrl"; | ||
598 | ranges = <0 0x40020000 0x3000>; | ||
599 | interrupt-parent = <&exti>; | ||
600 | st,syscfg = <&syscfg 0x8>; | ||
601 | pins-are-numbered; | ||
602 | |||
603 | gpioa: gpio@40020000 { | ||
604 | gpio-controller; | ||
605 | #gpio-cells = <2>; | ||
606 | interrupt-controller; | ||
607 | #interrupt-cells = <2>; | ||
608 | reg = <0x0 0x400>; | ||
609 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; | ||
610 | st,bank-name = "GPIOA"; | ||
611 | }; | ||
612 | |||
613 | gpiob: gpio@40020400 { | ||
614 | gpio-controller; | ||
615 | #gpio-cells = <2>; | ||
616 | interrupt-controller; | ||
617 | #interrupt-cells = <2>; | ||
618 | reg = <0x400 0x400>; | ||
619 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; | ||
620 | st,bank-name = "GPIOB"; | ||
621 | }; | ||
622 | |||
623 | gpioc: gpio@40020800 { | ||
624 | gpio-controller; | ||
625 | #gpio-cells = <2>; | ||
626 | interrupt-controller; | ||
627 | #interrupt-cells = <2>; | ||
628 | reg = <0x800 0x400>; | ||
629 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; | ||
630 | st,bank-name = "GPIOC"; | ||
631 | }; | ||
632 | |||
633 | gpiod: gpio@40020c00 { | ||
634 | gpio-controller; | ||
635 | #gpio-cells = <2>; | ||
636 | interrupt-controller; | ||
637 | #interrupt-cells = <2>; | ||
638 | reg = <0xc00 0x400>; | ||
639 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; | ||
640 | st,bank-name = "GPIOD"; | ||
641 | }; | ||
642 | |||
643 | gpioe: gpio@40021000 { | ||
644 | gpio-controller; | ||
645 | #gpio-cells = <2>; | ||
646 | interrupt-controller; | ||
647 | #interrupt-cells = <2>; | ||
648 | reg = <0x1000 0x400>; | ||
649 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; | ||
650 | st,bank-name = "GPIOE"; | ||
651 | }; | ||
652 | |||
653 | gpiof: gpio@40021400 { | ||
654 | gpio-controller; | ||
655 | #gpio-cells = <2>; | ||
656 | interrupt-controller; | ||
657 | #interrupt-cells = <2>; | ||
658 | reg = <0x1400 0x400>; | ||
659 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; | ||
660 | st,bank-name = "GPIOF"; | ||
661 | }; | ||
662 | |||
663 | gpiog: gpio@40021800 { | ||
664 | gpio-controller; | ||
665 | #gpio-cells = <2>; | ||
666 | interrupt-controller; | ||
667 | #interrupt-cells = <2>; | ||
668 | reg = <0x1800 0x400>; | ||
669 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; | ||
670 | st,bank-name = "GPIOG"; | ||
671 | }; | ||
672 | |||
673 | gpioh: gpio@40021c00 { | ||
674 | gpio-controller; | ||
675 | #gpio-cells = <2>; | ||
676 | interrupt-controller; | ||
677 | #interrupt-cells = <2>; | ||
678 | reg = <0x1c00 0x400>; | ||
679 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; | ||
680 | st,bank-name = "GPIOH"; | ||
681 | }; | ||
682 | |||
683 | gpioi: gpio@40022000 { | ||
684 | gpio-controller; | ||
685 | #gpio-cells = <2>; | ||
686 | interrupt-controller; | ||
687 | #interrupt-cells = <2>; | ||
688 | reg = <0x2000 0x400>; | ||
689 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; | ||
690 | st,bank-name = "GPIOI"; | ||
691 | }; | ||
692 | |||
693 | gpioj: gpio@40022400 { | ||
694 | gpio-controller; | ||
695 | #gpio-cells = <2>; | ||
696 | interrupt-controller; | ||
697 | #interrupt-cells = <2>; | ||
698 | reg = <0x2400 0x400>; | ||
699 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; | ||
700 | st,bank-name = "GPIOJ"; | ||
701 | }; | ||
702 | |||
703 | gpiok: gpio@40022800 { | ||
704 | gpio-controller; | ||
705 | #gpio-cells = <2>; | ||
706 | interrupt-controller; | ||
707 | #interrupt-cells = <2>; | ||
708 | reg = <0x2800 0x400>; | ||
709 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; | ||
710 | st,bank-name = "GPIOK"; | ||
711 | }; | ||
712 | |||
713 | usart1_pins_a: usart1@0 { | ||
714 | pins1 { | ||
715 | pinmux = <STM32F429_PA9_FUNC_USART1_TX>; | ||
716 | bias-disable; | ||
717 | drive-push-pull; | ||
718 | slew-rate = <0>; | ||
719 | }; | ||
720 | pins2 { | ||
721 | pinmux = <STM32F429_PA10_FUNC_USART1_RX>; | ||
722 | bias-disable; | ||
723 | }; | ||
724 | }; | ||
725 | |||
726 | usart3_pins_a: usart3@0 { | ||
727 | pins1 { | ||
728 | pinmux = <STM32F429_PB10_FUNC_USART3_TX>; | ||
729 | bias-disable; | ||
730 | drive-push-pull; | ||
731 | slew-rate = <0>; | ||
732 | }; | ||
733 | pins2 { | ||
734 | pinmux = <STM32F429_PB11_FUNC_USART3_RX>; | ||
735 | bias-disable; | ||
736 | }; | ||
737 | }; | ||
738 | |||
739 | usbotg_fs_pins_a: usbotg_fs@0 { | ||
740 | pins { | ||
741 | pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>, | ||
742 | <STM32F429_PA11_FUNC_OTG_FS_DM>, | ||
743 | <STM32F429_PA12_FUNC_OTG_FS_DP>; | ||
744 | bias-disable; | ||
745 | drive-push-pull; | ||
746 | slew-rate = <2>; | ||
747 | }; | ||
748 | }; | ||
749 | |||
750 | usbotg_fs_pins_b: usbotg_fs@1 { | ||
751 | pins { | ||
752 | pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>, | ||
753 | <STM32F429_PB14_FUNC_OTG_HS_DM>, | ||
754 | <STM32F429_PB15_FUNC_OTG_HS_DP>; | ||
755 | bias-disable; | ||
756 | drive-push-pull; | ||
757 | slew-rate = <2>; | ||
758 | }; | ||
759 | }; | ||
760 | |||
761 | usbotg_hs_pins_a: usbotg_hs@0 { | ||
762 | pins { | ||
763 | pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>, | ||
764 | <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>, | ||
765 | <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>, | ||
766 | <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>, | ||
767 | <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>, | ||
768 | <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>, | ||
769 | <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>, | ||
770 | <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>, | ||
771 | <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>, | ||
772 | <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>, | ||
773 | <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>, | ||
774 | <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>; | ||
775 | bias-disable; | ||
776 | drive-push-pull; | ||
777 | slew-rate = <2>; | ||
778 | }; | ||
779 | }; | ||
780 | |||
781 | ethernet_mii: mii@0 { | ||
782 | pins { | ||
783 | pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>, | ||
784 | <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>, | ||
785 | <STM32F429_PC2_FUNC_ETH_MII_TXD2>, | ||
786 | <STM32F429_PB8_FUNC_ETH_MII_TXD3>, | ||
787 | <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>, | ||
788 | <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>, | ||
789 | <STM32F429_PA2_FUNC_ETH_MDIO>, | ||
790 | <STM32F429_PC1_FUNC_ETH_MDC>, | ||
791 | <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>, | ||
792 | <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>, | ||
793 | <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>, | ||
794 | <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>, | ||
795 | <STM32F429_PH6_FUNC_ETH_MII_RXD2>, | ||
796 | <STM32F429_PH7_FUNC_ETH_MII_RXD3>; | ||
797 | slew-rate = <2>; | ||
798 | }; | ||
799 | }; | ||
800 | |||
801 | adc3_in8_pin: adc@200 { | ||
802 | pins { | ||
803 | pinmux = <STM32F429_PF10_FUNC_ANALOG>; | ||
804 | }; | ||
805 | }; | ||
806 | |||
807 | pwm1_pins: pwm@1 { | ||
808 | pins { | ||
809 | pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>, | ||
810 | <STM32F429_PB13_FUNC_TIM1_CH1N>, | ||
811 | <STM32F429_PB12_FUNC_TIM1_BKIN>; | ||
812 | }; | ||
813 | }; | ||
814 | |||
815 | pwm3_pins: pwm@3 { | ||
816 | pins { | ||
817 | pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>, | ||
818 | <STM32F429_PB5_FUNC_TIM3_CH2>; | ||
819 | }; | ||
820 | }; | ||
821 | |||
822 | i2c1_pins: i2c1@0 { | ||
823 | pins { | ||
824 | pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>, | ||
825 | <STM32F429_PB6_FUNC_I2C1_SCL>; | ||
826 | bias-disable; | ||
827 | drive-open-drain; | ||
828 | slew-rate = <3>; | ||
829 | }; | ||
830 | }; | ||
831 | |||
832 | ltdc_pins: ltdc@0 { | ||
833 | pins { | ||
834 | pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>, | ||
835 | <STM32F429_PI13_FUNC_LCD_VSYNC>, | ||
836 | <STM32F429_PI14_FUNC_LCD_CLK>, | ||
837 | <STM32F429_PI15_FUNC_LCD_R0>, | ||
838 | <STM32F429_PJ0_FUNC_LCD_R1>, | ||
839 | <STM32F429_PJ1_FUNC_LCD_R2>, | ||
840 | <STM32F429_PJ2_FUNC_LCD_R3>, | ||
841 | <STM32F429_PJ3_FUNC_LCD_R4>, | ||
842 | <STM32F429_PJ4_FUNC_LCD_R5>, | ||
843 | <STM32F429_PJ5_FUNC_LCD_R6>, | ||
844 | <STM32F429_PJ6_FUNC_LCD_R7>, | ||
845 | <STM32F429_PJ7_FUNC_LCD_G0>, | ||
846 | <STM32F429_PJ8_FUNC_LCD_G1>, | ||
847 | <STM32F429_PJ9_FUNC_LCD_G2>, | ||
848 | <STM32F429_PJ10_FUNC_LCD_G3>, | ||
849 | <STM32F429_PJ11_FUNC_LCD_G4>, | ||
850 | <STM32F429_PJ12_FUNC_LCD_B0>, | ||
851 | <STM32F429_PJ13_FUNC_LCD_B1>, | ||
852 | <STM32F429_PJ14_FUNC_LCD_B2>, | ||
853 | <STM32F429_PJ15_FUNC_LCD_B3>, | ||
854 | <STM32F429_PK0_FUNC_LCD_G5>, | ||
855 | <STM32F429_PK1_FUNC_LCD_G6>, | ||
856 | <STM32F429_PK2_FUNC_LCD_G7>, | ||
857 | <STM32F429_PK3_FUNC_LCD_B4>, | ||
858 | <STM32F429_PK4_FUNC_LCD_B5>, | ||
859 | <STM32F429_PK5_FUNC_LCD_B6>, | ||
860 | <STM32F429_PK6_FUNC_LCD_B7>, | ||
861 | <STM32F429_PK7_FUNC_LCD_DE>; | ||
862 | slew-rate = <2>; | ||
863 | }; | ||
864 | }; | ||
865 | |||
866 | dcmi_pins: dcmi@0 { | ||
867 | pins { | ||
868 | pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>, | ||
869 | <STM32F429_PB7_FUNC_DCMI_VSYNC>, | ||
870 | <STM32F429_PA6_FUNC_DCMI_PIXCLK>, | ||
871 | <STM32F429_PC6_FUNC_DCMI_D0>, | ||
872 | <STM32F429_PC7_FUNC_DCMI_D1>, | ||
873 | <STM32F429_PC8_FUNC_DCMI_D2>, | ||
874 | <STM32F429_PC9_FUNC_DCMI_D3>, | ||
875 | <STM32F429_PC11_FUNC_DCMI_D4>, | ||
876 | <STM32F429_PD3_FUNC_DCMI_D5>, | ||
877 | <STM32F429_PB8_FUNC_DCMI_D6>, | ||
878 | <STM32F429_PE6_FUNC_DCMI_D7>, | ||
879 | <STM32F429_PC10_FUNC_DCMI_D8>, | ||
880 | <STM32F429_PC12_FUNC_DCMI_D9>, | ||
881 | <STM32F429_PD6_FUNC_DCMI_D10>, | ||
882 | <STM32F429_PD2_FUNC_DCMI_D11>; | ||
883 | bias-disable; | ||
884 | drive-push-pull; | ||
885 | slew-rate = <3>; | ||
886 | }; | ||
887 | }; | ||
888 | }; | ||
889 | |||
890 | crc: crc@40023000 { | 593 | crc: crc@40023000 { |
891 | compatible = "st,stm32f4-crc"; | 594 | compatible = "st,stm32f4-crc"; |
892 | reg = <0x40023000 0x400>; | 595 | reg = <0x40023000 0x400>; |
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts index 6ae1f037f3f0..c18acbe4cf4e 100644 --- a/arch/arm/boot/dts/stm32f469-disco.dts +++ b/arch/arm/boot/dts/stm32f469-disco.dts | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | /dts-v1/; | 48 | /dts-v1/; |
49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
50 | #include "stm32f469-pinctrl.dtsi" | ||
50 | 51 | ||
51 | / { | 52 | / { |
52 | model = "STMicroelectronics STM32F469i-DISCO board"; | 53 | model = "STMicroelectronics STM32F469i-DISCO board"; |
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi new file mode 100644 index 000000000000..fff542662eea --- /dev/null +++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
3 | * | ||
4 | * This file is dual-licensed: you can use it either under the terms | ||
5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
6 | * licensing only applies to this file, and not this project as a | ||
7 | * whole. | ||
8 | * | ||
9 | * a) This file is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of the | ||
12 | * License, or (at your option) any later version. | ||
13 | * | ||
14 | * This file is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * Or, alternatively, | ||
20 | * | ||
21 | * b) Permission is hereby granted, free of charge, to any person | ||
22 | * obtaining a copy of this software and associated documentation | ||
23 | * files (the "Software"), to deal in the Software without | ||
24 | * restriction, including without limitation the rights to use, | ||
25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
26 | * sell copies of the Software, and to permit persons to whom the | ||
27 | * Software is furnished to do so, subject to the following | ||
28 | * conditions: | ||
29 | * | ||
30 | * The above copyright notice and this permission notice shall be | ||
31 | * included in all copies or substantial portions of the Software. | ||
32 | * | ||
33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
41 | */ | ||
42 | |||
43 | #include "stm32f4-pinctrl.dtsi" | ||
44 | |||
45 | / { | ||
46 | soc { | ||
47 | pinctrl: pin-controller { | ||
48 | compatible = "st,stm32f469-pinctrl"; | ||
49 | |||
50 | gpioa: gpio@40020000 { | ||
51 | gpio-ranges = <&pinctrl 0 0 16>; | ||
52 | }; | ||
53 | |||
54 | gpiob: gpio@40020400 { | ||
55 | gpio-ranges = <&pinctrl 0 16 16>; | ||
56 | }; | ||
57 | |||
58 | gpioc: gpio@40020800 { | ||
59 | gpio-ranges = <&pinctrl 0 32 16>; | ||
60 | }; | ||
61 | |||
62 | gpiod: gpio@40020c00 { | ||
63 | gpio-ranges = <&pinctrl 0 48 16>; | ||
64 | }; | ||
65 | |||
66 | gpioe: gpio@40021000 { | ||
67 | gpio-ranges = <&pinctrl 0 64 16>; | ||
68 | }; | ||
69 | |||
70 | gpiof: gpio@40021400 { | ||
71 | gpio-ranges = <&pinctrl 0 80 16>; | ||
72 | }; | ||
73 | |||
74 | gpiog: gpio@40021800 { | ||
75 | gpio-ranges = <&pinctrl 0 96 16>; | ||
76 | }; | ||
77 | |||
78 | gpioh: gpio@40021c00 { | ||
79 | gpio-ranges = <&pinctrl 0 112 16>; | ||
80 | }; | ||
81 | |||
82 | gpioi: gpio@40022000 { | ||
83 | gpio-ranges = <&pinctrl 0 128 16>; | ||
84 | }; | ||
85 | |||
86 | gpioj: gpio@40022400 { | ||
87 | gpio-ranges = <&pinctrl 0 144 6>, | ||
88 | <&pinctrl 12 156 4>; | ||
89 | }; | ||
90 | |||
91 | gpiok: gpio@40022800 { | ||
92 | gpio-ranges = <&pinctrl 3 163 5>; | ||
93 | }; | ||
94 | }; | ||
95 | }; | ||
96 | }; | ||
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index d2d75fa664a6..2a63fa10c813 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig | |||
@@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
32 | CONFIG_BLK_DEV_SD=y | 32 | CONFIG_BLK_DEV_SD=y |
33 | # CONFIG_SCSI_LOWLEVEL is not set | 33 | # CONFIG_SCSI_LOWLEVEL is not set |
34 | CONFIG_ATA=y | 34 | CONFIG_ATA=y |
35 | CONFIG_PATA_FTIDE010=y | ||
35 | CONFIG_INPUT_EVDEV=y | 36 | CONFIG_INPUT_EVDEV=y |
36 | CONFIG_KEYBOARD_GPIO=y | 37 | CONFIG_KEYBOARD_GPIO=y |
37 | # CONFIG_INPUT_MOUSE is not set | 38 | # CONFIG_INPUT_MOUSE is not set |
@@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y | |||
55 | CONFIG_LEDS_TRIGGERS=y | 56 | CONFIG_LEDS_TRIGGERS=y |
56 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 57 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
57 | CONFIG_RTC_CLASS=y | 58 | CONFIG_RTC_CLASS=y |
58 | CONFIG_RTC_DRV_GEMINI=y | ||
59 | CONFIG_DMADEVICES=y | 59 | CONFIG_DMADEVICES=y |
60 | CONFIG_AMBA_PL08X=y | ||
60 | # CONFIG_DNOTIFY is not set | 61 | # CONFIG_DNOTIFY is not set |
61 | CONFIG_TMPFS=y | 62 | CONFIG_TMPFS=y |
62 | CONFIG_TMPFS_POSIX_ACL=y | 63 | CONFIG_TMPFS_POSIX_ACL=y |
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 1a608ec04e17..830e817a028a 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig | |||
@@ -472,7 +472,7 @@ CONFIG_LCD_PLATFORM=m | |||
472 | CONFIG_LCD_TOSA=m | 472 | CONFIG_LCD_TOSA=m |
473 | CONFIG_BACKLIGHT_PWM=m | 473 | CONFIG_BACKLIGHT_PWM=m |
474 | CONFIG_BACKLIGHT_TOSA=m | 474 | CONFIG_BACKLIGHT_TOSA=m |
475 | CONFIG_FRAMEBUFFER_CONSOLE=m | 475 | CONFIG_FRAMEBUFFER_CONSOLE=y |
476 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y | 476 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y |
477 | CONFIG_LOGO=y | 477 | CONFIG_LOGO=y |
478 | CONFIG_SOUND=m | 478 | CONFIG_SOUND=m |
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig index 44d4fa57ba0a..070e5074f1ee 100644 --- a/arch/arm/configs/viper_defconfig +++ b/arch/arm/configs/viper_defconfig | |||
@@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y | |||
113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
114 | CONFIG_BACKLIGHT_PWM=m | 114 | CONFIG_BACKLIGHT_PWM=m |
115 | # CONFIG_VGA_CONSOLE is not set | 115 | # CONFIG_VGA_CONSOLE is not set |
116 | CONFIG_FRAMEBUFFER_CONSOLE=m | 116 | CONFIG_FRAMEBUFFER_CONSOLE=y |
117 | CONFIG_LOGO=y | 117 | CONFIG_LOGO=y |
118 | CONFIG_SOUND=m | 118 | CONFIG_SOUND=m |
119 | CONFIG_SND=m | 119 | CONFIG_SND=m |
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig index 8d4c0c926c34..09e7050d5653 100644 --- a/arch/arm/configs/zeus_defconfig +++ b/arch/arm/configs/zeus_defconfig | |||
@@ -112,7 +112,7 @@ CONFIG_FB_PXA=m | |||
112 | CONFIG_FB_PXA_PARAMETERS=y | 112 | CONFIG_FB_PXA_PARAMETERS=y |
113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
114 | # CONFIG_VGA_CONSOLE is not set | 114 | # CONFIG_VGA_CONSOLE is not set |
115 | CONFIG_FRAMEBUFFER_CONSOLE=m | 115 | CONFIG_FRAMEBUFFER_CONSOLE=y |
116 | CONFIG_LOGO=y | 116 | CONFIG_LOGO=y |
117 | CONFIG_SOUND=m | 117 | CONFIG_SOUND=m |
118 | CONFIG_SND=m | 118 | CONFIG_SND=m |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1d468b527b7b..776757d1604a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -139,11 +139,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
139 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ | 139 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ |
140 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 140 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
141 | #define TIF_UPROBE 3 /* breakpointed or singlestepping */ | 141 | #define TIF_UPROBE 3 /* breakpointed or singlestepping */ |
142 | #define TIF_FSCHECK 4 /* Check FS is USER_DS on return */ | 142 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ |
143 | #define TIF_SYSCALL_TRACE 5 /* syscall trace active */ | 143 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ |
144 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | 144 | #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ |
145 | #define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */ | 145 | #define TIF_SECCOMP 7 /* seccomp syscall filtering active */ |
146 | #define TIF_SECCOMP 8 /* seccomp syscall filtering active */ | ||
147 | 146 | ||
148 | #define TIF_NOHZ 12 /* in adaptive nohz mode */ | 147 | #define TIF_NOHZ 12 /* in adaptive nohz mode */ |
149 | #define TIF_USING_IWMMXT 17 | 148 | #define TIF_USING_IWMMXT 17 |
@@ -154,7 +153,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
154 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 153 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
155 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 154 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
156 | #define _TIF_UPROBE (1 << TIF_UPROBE) | 155 | #define _TIF_UPROBE (1 << TIF_UPROBE) |
157 | #define _TIF_FSCHECK (1 << TIF_FSCHECK) | ||
158 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 156 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
159 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 157 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
160 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 158 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
@@ -168,9 +166,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, | |||
168 | /* | 166 | /* |
169 | * Change these and you break ASM code in entry-common.S | 167 | * Change these and you break ASM code in entry-common.S |
170 | */ | 168 | */ |
171 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ | 169 | #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ |
172 | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | 170 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) |
173 | _TIF_FSCHECK) | ||
174 | 171 | ||
175 | #endif /* __KERNEL__ */ | 172 | #endif /* __KERNEL__ */ |
176 | #endif /* __ASM_ARM_THREAD_INFO_H */ | 173 | #endif /* __ASM_ARM_THREAD_INFO_H */ |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 87936dd5d151..0bf2347495f1 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -70,8 +70,6 @@ static inline void set_fs(mm_segment_t fs) | |||
70 | { | 70 | { |
71 | current_thread_info()->addr_limit = fs; | 71 | current_thread_info()->addr_limit = fs; |
72 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); | 72 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); |
73 | /* On user-mode return, check fs is correct */ | ||
74 | set_thread_flag(TIF_FSCHECK); | ||
75 | } | 73 | } |
76 | 74 | ||
77 | #define segment_eq(a, b) ((a) == (b)) | 75 | #define segment_eq(a, b) ((a) == (b)) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index ca3614dc6938..99c908226065 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/unistd.h> | 12 | #include <asm/unistd.h> |
13 | #include <asm/ftrace.h> | 13 | #include <asm/ftrace.h> |
14 | #include <asm/unwind.h> | 14 | #include <asm/unwind.h> |
15 | #include <asm/memory.h> | ||
15 | #ifdef CONFIG_AEABI | 16 | #ifdef CONFIG_AEABI |
16 | #include <asm/unistd-oabi.h> | 17 | #include <asm/unistd-oabi.h> |
17 | #endif | 18 | #endif |
@@ -48,12 +49,14 @@ ret_fast_syscall: | |||
48 | UNWIND(.fnstart ) | 49 | UNWIND(.fnstart ) |
49 | UNWIND(.cantunwind ) | 50 | UNWIND(.cantunwind ) |
50 | disable_irq_notrace @ disable interrupts | 51 | disable_irq_notrace @ disable interrupts |
52 | ldr r2, [tsk, #TI_ADDR_LIMIT] | ||
53 | cmp r2, #TASK_SIZE | ||
54 | blne addr_limit_check_failed | ||
51 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing | 55 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
52 | tst r1, #_TIF_SYSCALL_WORK | 56 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
53 | bne fast_work_pending | ||
54 | tst r1, #_TIF_WORK_MASK | ||
55 | bne fast_work_pending | 57 | bne fast_work_pending |
56 | 58 | ||
59 | |||
57 | /* perform architecture specific actions before user return */ | 60 | /* perform architecture specific actions before user return */ |
58 | arch_ret_to_user r1, lr | 61 | arch_ret_to_user r1, lr |
59 | 62 | ||
@@ -76,16 +79,16 @@ ret_fast_syscall: | |||
76 | UNWIND(.cantunwind ) | 79 | UNWIND(.cantunwind ) |
77 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | 80 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
78 | disable_irq_notrace @ disable interrupts | 81 | disable_irq_notrace @ disable interrupts |
82 | ldr r2, [tsk, #TI_ADDR_LIMIT] | ||
83 | cmp r2, #TASK_SIZE | ||
84 | blne addr_limit_check_failed | ||
79 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing | 85 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
80 | tst r1, #_TIF_SYSCALL_WORK | 86 | tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK |
81 | bne fast_work_pending | ||
82 | tst r1, #_TIF_WORK_MASK | ||
83 | beq no_work_pending | 87 | beq no_work_pending |
84 | UNWIND(.fnend ) | 88 | UNWIND(.fnend ) |
85 | ENDPROC(ret_fast_syscall) | 89 | ENDPROC(ret_fast_syscall) |
86 | 90 | ||
87 | /* Slower path - fall through to work_pending */ | 91 | /* Slower path - fall through to work_pending */ |
88 | fast_work_pending: | ||
89 | #endif | 92 | #endif |
90 | 93 | ||
91 | tst r1, #_TIF_SYSCALL_WORK | 94 | tst r1, #_TIF_SYSCALL_WORK |
@@ -111,6 +114,9 @@ ENTRY(ret_to_user) | |||
111 | ret_slow_syscall: | 114 | ret_slow_syscall: |
112 | disable_irq_notrace @ disable interrupts | 115 | disable_irq_notrace @ disable interrupts |
113 | ENTRY(ret_to_user_from_irq) | 116 | ENTRY(ret_to_user_from_irq) |
117 | ldr r2, [tsk, #TI_ADDR_LIMIT] | ||
118 | cmp r2, #TASK_SIZE | ||
119 | blne addr_limit_check_failed | ||
114 | ldr r1, [tsk, #TI_FLAGS] | 120 | ldr r1, [tsk, #TI_FLAGS] |
115 | tst r1, #_TIF_WORK_MASK | 121 | tst r1, #_TIF_WORK_MASK |
116 | bne slow_work_pending | 122 | bne slow_work_pending |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e2de50bf8742..b67ae12503f3 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -614,10 +614,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) | |||
614 | * Update the trace code with the current status. | 614 | * Update the trace code with the current status. |
615 | */ | 615 | */ |
616 | trace_hardirqs_off(); | 616 | trace_hardirqs_off(); |
617 | |||
618 | /* Check valid user FS if needed */ | ||
619 | addr_limit_user_check(); | ||
620 | |||
621 | do { | 617 | do { |
622 | if (likely(thread_flags & _TIF_NEED_RESCHED)) { | 618 | if (likely(thread_flags & _TIF_NEED_RESCHED)) { |
623 | schedule(); | 619 | schedule(); |
@@ -678,3 +674,9 @@ struct page *get_signal_page(void) | |||
678 | 674 | ||
679 | return page; | 675 | return page; |
680 | } | 676 | } |
677 | |||
678 | /* Defer to generic check */ | ||
679 | asmlinkage void addr_limit_check_failed(void) | ||
680 | { | ||
681 | addr_limit_user_check(); | ||
682 | } | ||
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5036f996e694..849014c01cf4 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void) | |||
533 | } | 533 | } |
534 | 534 | ||
535 | pm_bu->suspended = 0; | 535 | pm_bu->suspended = 0; |
536 | pm_bu->canary = virt_to_phys(&canary); | 536 | pm_bu->canary = __pa_symbol(&canary); |
537 | pm_bu->resume = virt_to_phys(cpu_resume); | 537 | pm_bu->resume = __pa_symbol(cpu_resume); |
538 | 538 | ||
539 | return; | 539 | return; |
540 | 540 | ||
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 5b614388d72f..6d28aa20a7d3 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c | |||
@@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) | |||
58 | struct platform_device *pdev; | 58 | struct platform_device *pdev; |
59 | int res; | 59 | int res; |
60 | 60 | ||
61 | if (omap_hsmmc_done != 1) | 61 | if (omap_hsmmc_done) |
62 | return; | 62 | return; |
63 | 63 | ||
64 | omap_hsmmc_done++; | 64 | omap_hsmmc_done = 1; |
65 | 65 | ||
66 | for (; c->mmc; c++) { | 66 | for (; c->mmc; c++) { |
67 | pdev = c->pdev; | 67 | pdev = c->pdev; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index f040244c57e7..2f4f7002f38d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = { | |||
839 | .name = "gpio1", | 839 | .name = "gpio1", |
840 | .class = &dra7xx_gpio_hwmod_class, | 840 | .class = &dra7xx_gpio_hwmod_class, |
841 | .clkdm_name = "wkupaon_clkdm", | 841 | .clkdm_name = "wkupaon_clkdm", |
842 | .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, | ||
842 | .main_clk = "wkupaon_iclk_mux", | 843 | .main_clk = "wkupaon_iclk_mux", |
843 | .prcm = { | 844 | .prcm = { |
844 | .omap4 = { | 845 | .omap4 = { |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 9b41f1e3b1a0..939b310913cf 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -50,17 +50,22 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
50 | KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) | 50 | KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) |
51 | KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) | 51 | KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) |
52 | 52 | ||
53 | KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) | ||
54 | KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) | ||
55 | |||
53 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) | 56 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) |
54 | KBUILD_CPPFLAGS += -mbig-endian | 57 | KBUILD_CPPFLAGS += -mbig-endian |
55 | CHECKFLAGS += -D__AARCH64EB__ | 58 | CHECKFLAGS += -D__AARCH64EB__ |
56 | AS += -EB | 59 | AS += -EB |
57 | LD += -EB | 60 | LD += -EB |
61 | LDFLAGS += -maarch64linuxb | ||
58 | UTS_MACHINE := aarch64_be | 62 | UTS_MACHINE := aarch64_be |
59 | else | 63 | else |
60 | KBUILD_CPPFLAGS += -mlittle-endian | 64 | KBUILD_CPPFLAGS += -mlittle-endian |
61 | CHECKFLAGS += -D__AARCH64EL__ | 65 | CHECKFLAGS += -D__AARCH64EL__ |
62 | AS += -EL | 66 | AS += -EL |
63 | LD += -EL | 67 | LD += -EL |
68 | LDFLAGS += -maarch64linux | ||
64 | UTS_MACHINE := aarch64 | 69 | UTS_MACHINE := aarch64 |
65 | endif | 70 | endif |
66 | 71 | ||
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index c89010e56488..4157987f4a3d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi | |||
@@ -168,7 +168,8 @@ | |||
168 | &sd_emmc_a { | 168 | &sd_emmc_a { |
169 | status = "okay"; | 169 | status = "okay"; |
170 | pinctrl-0 = <&sdio_pins>; | 170 | pinctrl-0 = <&sdio_pins>; |
171 | pinctrl-names = "default"; | 171 | pinctrl-1 = <&sdio_clk_gate_pins>; |
172 | pinctrl-names = "default", "clk-gate"; | ||
172 | #address-cells = <1>; | 173 | #address-cells = <1>; |
173 | #size-cells = <0>; | 174 | #size-cells = <0>; |
174 | 175 | ||
@@ -194,7 +195,8 @@ | |||
194 | &sd_emmc_b { | 195 | &sd_emmc_b { |
195 | status = "okay"; | 196 | status = "okay"; |
196 | pinctrl-0 = <&sdcard_pins>; | 197 | pinctrl-0 = <&sdcard_pins>; |
197 | pinctrl-names = "default"; | 198 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
199 | pinctrl-names = "default", "clk-gate"; | ||
198 | 200 | ||
199 | bus-width = <4>; | 201 | bus-width = <4>; |
200 | cap-sd-highspeed; | 202 | cap-sd-highspeed; |
@@ -212,10 +214,10 @@ | |||
212 | &sd_emmc_c { | 214 | &sd_emmc_c { |
213 | status = "okay"; | 215 | status = "okay"; |
214 | pinctrl-0 = <&emmc_pins>; | 216 | pinctrl-0 = <&emmc_pins>; |
215 | pinctrl-names = "default"; | 217 | pinctrl-1 = <&emmc_clk_gate_pins>; |
218 | pinctrl-names = "default", "clk-gate"; | ||
216 | 219 | ||
217 | bus-width = <8>; | 220 | bus-width = <8>; |
218 | cap-sd-highspeed; | ||
219 | cap-mmc-highspeed; | 221 | cap-mmc-highspeed; |
220 | max-frequency = <200000000>; | 222 | max-frequency = <200000000>; |
221 | non-removable; | 223 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 9697a7a79464..4b17a76959b2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts | |||
@@ -107,6 +107,9 @@ | |||
107 | 107 | ||
108 | states = <3300000 0>, | 108 | states = <3300000 0>, |
109 | <1800000 1>; | 109 | <1800000 1>; |
110 | |||
111 | regulator-settling-time-up-us = <100>; | ||
112 | regulator-settling-time-down-us = <5000>; | ||
110 | }; | 113 | }; |
111 | 114 | ||
112 | wifi_32k: wifi-32k { | 115 | wifi_32k: wifi-32k { |
@@ -250,7 +253,8 @@ | |||
250 | &sd_emmc_a { | 253 | &sd_emmc_a { |
251 | status = "okay"; | 254 | status = "okay"; |
252 | pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; | 255 | pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; |
253 | pinctrl-names = "default"; | 256 | pinctrl-1 = <&sdio_clk_gate_pins>; |
257 | pinctrl-names = "default", "clk-gate"; | ||
254 | #address-cells = <1>; | 258 | #address-cells = <1>; |
255 | #size-cells = <0>; | 259 | #size-cells = <0>; |
256 | 260 | ||
@@ -276,11 +280,16 @@ | |||
276 | &sd_emmc_b { | 280 | &sd_emmc_b { |
277 | status = "okay"; | 281 | status = "okay"; |
278 | pinctrl-0 = <&sdcard_pins>; | 282 | pinctrl-0 = <&sdcard_pins>; |
279 | pinctrl-names = "default"; | 283 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
284 | pinctrl-names = "default", "clk-gate"; | ||
280 | 285 | ||
281 | bus-width = <4>; | 286 | bus-width = <4>; |
282 | cap-sd-highspeed; | 287 | cap-sd-highspeed; |
283 | max-frequency = <100000000>; | 288 | sd-uhs-sdr12; |
289 | sd-uhs-sdr25; | ||
290 | sd-uhs-sdr50; | ||
291 | sd-uhs-sdr104; | ||
292 | max-frequency = <200000000>; | ||
284 | disable-wp; | 293 | disable-wp; |
285 | 294 | ||
286 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 295 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; |
@@ -294,10 +303,10 @@ | |||
294 | &sd_emmc_c { | 303 | &sd_emmc_c { |
295 | status = "disabled"; | 304 | status = "disabled"; |
296 | pinctrl-0 = <&emmc_pins>; | 305 | pinctrl-0 = <&emmc_pins>; |
297 | pinctrl-names = "default"; | 306 | pinctrl-1 = <&emmc_clk_gate_pins>; |
307 | pinctrl-names = "default", "clk-gate"; | ||
298 | 308 | ||
299 | bus-width = <8>; | 309 | bus-width = <8>; |
300 | cap-sd-highspeed; | ||
301 | max-frequency = <200000000>; | 310 | max-frequency = <200000000>; |
302 | non-removable; | 311 | non-removable; |
303 | disable-wp; | 312 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 9c59c3c6d1b6..38dfdde5c147 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts | |||
@@ -51,7 +51,7 @@ | |||
51 | / { | 51 | / { |
52 | compatible = "nexbox,a95x", "amlogic,meson-gxbb"; | 52 | compatible = "nexbox,a95x", "amlogic,meson-gxbb"; |
53 | model = "NEXBOX A95X"; | 53 | model = "NEXBOX A95X"; |
54 | 54 | ||
55 | aliases { | 55 | aliases { |
56 | serial0 = &uart_AO; | 56 | serial0 = &uart_AO; |
57 | }; | 57 | }; |
@@ -232,7 +232,8 @@ | |||
232 | &sd_emmc_a { | 232 | &sd_emmc_a { |
233 | status = "okay"; | 233 | status = "okay"; |
234 | pinctrl-0 = <&sdio_pins>; | 234 | pinctrl-0 = <&sdio_pins>; |
235 | pinctrl-names = "default"; | 235 | pinctrl-1 = <&sdio_clk_gate_pins>; |
236 | pinctrl-names = "default", "clk-gate"; | ||
236 | #address-cells = <1>; | 237 | #address-cells = <1>; |
237 | #size-cells = <0>; | 238 | #size-cells = <0>; |
238 | 239 | ||
@@ -253,7 +254,8 @@ | |||
253 | &sd_emmc_b { | 254 | &sd_emmc_b { |
254 | status = "okay"; | 255 | status = "okay"; |
255 | pinctrl-0 = <&sdcard_pins>; | 256 | pinctrl-0 = <&sdcard_pins>; |
256 | pinctrl-names = "default"; | 257 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
258 | pinctrl-names = "default", "clk-gate"; | ||
257 | 259 | ||
258 | bus-width = <4>; | 260 | bus-width = <4>; |
259 | cap-sd-highspeed; | 261 | cap-sd-highspeed; |
@@ -271,10 +273,10 @@ | |||
271 | &sd_emmc_c { | 273 | &sd_emmc_c { |
272 | status = "okay"; | 274 | status = "okay"; |
273 | pinctrl-0 = <&emmc_pins>; | 275 | pinctrl-0 = <&emmc_pins>; |
274 | pinctrl-names = "default"; | 276 | pinctrl-1 = <&emmc_clk_gate_pins>; |
277 | pinctrl-names = "default", "clk-gate"; | ||
275 | 278 | ||
276 | bus-width = <8>; | 279 | bus-width = <8>; |
277 | cap-sd-highspeed; | ||
278 | cap-mmc-highspeed; | 280 | cap-mmc-highspeed; |
279 | max-frequency = <200000000>; | 281 | max-frequency = <200000000>; |
280 | non-removable; | 282 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index d147c853ab05..1ffa1c238a72 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | |||
@@ -50,7 +50,7 @@ | |||
50 | / { | 50 | / { |
51 | compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; | 51 | compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; |
52 | model = "Hardkernel ODROID-C2"; | 52 | model = "Hardkernel ODROID-C2"; |
53 | 53 | ||
54 | aliases { | 54 | aliases { |
55 | serial0 = &uart_AO; | 55 | serial0 = &uart_AO; |
56 | }; | 56 | }; |
@@ -253,7 +253,8 @@ | |||
253 | &sd_emmc_b { | 253 | &sd_emmc_b { |
254 | status = "okay"; | 254 | status = "okay"; |
255 | pinctrl-0 = <&sdcard_pins>; | 255 | pinctrl-0 = <&sdcard_pins>; |
256 | pinctrl-names = "default"; | 256 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
257 | pinctrl-names = "default", "clk-gate"; | ||
257 | 258 | ||
258 | bus-width = <4>; | 259 | bus-width = <4>; |
259 | cap-sd-highspeed; | 260 | cap-sd-highspeed; |
@@ -271,10 +272,10 @@ | |||
271 | &sd_emmc_c { | 272 | &sd_emmc_c { |
272 | status = "okay"; | 273 | status = "okay"; |
273 | pinctrl-0 = <&emmc_pins>; | 274 | pinctrl-0 = <&emmc_pins>; |
274 | pinctrl-names = "default"; | 275 | pinctrl-1 = <&emmc_clk_gate_pins>; |
276 | pinctrl-names = "default", "clk-gate"; | ||
275 | 277 | ||
276 | bus-width = <8>; | 278 | bus-width = <8>; |
277 | cap-sd-highspeed; | ||
278 | max-frequency = <200000000>; | 279 | max-frequency = <200000000>; |
279 | non-removable; | 280 | non-removable; |
280 | disable-wp; | 281 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index 81ffc689a5bf..23c08c3afd0a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi | |||
@@ -194,7 +194,8 @@ | |||
194 | &sd_emmc_a { | 194 | &sd_emmc_a { |
195 | status = "okay"; | 195 | status = "okay"; |
196 | pinctrl-0 = <&sdio_pins>; | 196 | pinctrl-0 = <&sdio_pins>; |
197 | pinctrl-names = "default"; | 197 | pinctrl-1 = <&sdio_clk_gate_pins>; |
198 | pinctrl-names = "default", "clk-gate"; | ||
198 | #address-cells = <1>; | 199 | #address-cells = <1>; |
199 | #size-cells = <0>; | 200 | #size-cells = <0>; |
200 | 201 | ||
@@ -220,10 +221,14 @@ | |||
220 | &sd_emmc_b { | 221 | &sd_emmc_b { |
221 | status = "okay"; | 222 | status = "okay"; |
222 | pinctrl-0 = <&sdcard_pins>; | 223 | pinctrl-0 = <&sdcard_pins>; |
223 | pinctrl-names = "default"; | 224 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
225 | pinctrl-names = "default", "clk-gate"; | ||
224 | 226 | ||
225 | bus-width = <4>; | 227 | bus-width = <4>; |
226 | cap-sd-highspeed; | 228 | cap-sd-highspeed; |
229 | sd-uhs-sdr12; | ||
230 | sd-uhs-sdr25; | ||
231 | sd-uhs-sdr50; | ||
227 | max-frequency = <100000000>; | 232 | max-frequency = <100000000>; |
228 | disable-wp; | 233 | disable-wp; |
229 | 234 | ||
@@ -238,10 +243,10 @@ | |||
238 | &sd_emmc_c { | 243 | &sd_emmc_c { |
239 | status = "okay"; | 244 | status = "okay"; |
240 | pinctrl-0 = <&emmc_pins>; | 245 | pinctrl-0 = <&emmc_pins>; |
241 | pinctrl-names = "default"; | 246 | pinctrl-1 = <&emmc_clk_gate_pins>; |
247 | pinctrl-names = "default", "clk-gate"; | ||
242 | 248 | ||
243 | bus-width = <8>; | 249 | bus-width = <8>; |
244 | cap-sd-highspeed; | ||
245 | cap-mmc-highspeed; | 250 | cap-mmc-highspeed; |
246 | max-frequency = <200000000>; | 251 | max-frequency = <200000000>; |
247 | non-removable; | 252 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 346753fb6324..f2bc6dea1fc6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi | |||
@@ -155,7 +155,8 @@ | |||
155 | &sd_emmc_a { | 155 | &sd_emmc_a { |
156 | status = "okay"; | 156 | status = "okay"; |
157 | pinctrl-0 = <&sdio_pins &sdio_irq_pins>; | 157 | pinctrl-0 = <&sdio_pins &sdio_irq_pins>; |
158 | pinctrl-names = "default"; | 158 | pinctrl-1 = <&sdio_clk_gate_pins>; |
159 | pinctrl-names = "default", "clk-gate"; | ||
159 | #address-cells = <1>; | 160 | #address-cells = <1>; |
160 | #size-cells = <0>; | 161 | #size-cells = <0>; |
161 | 162 | ||
@@ -181,7 +182,8 @@ | |||
181 | &sd_emmc_b { | 182 | &sd_emmc_b { |
182 | status = "okay"; | 183 | status = "okay"; |
183 | pinctrl-0 = <&sdcard_pins>; | 184 | pinctrl-0 = <&sdcard_pins>; |
184 | pinctrl-names = "default"; | 185 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
186 | pinctrl-names = "default", "clk-gate"; | ||
185 | 187 | ||
186 | bus-width = <4>; | 188 | bus-width = <4>; |
187 | cap-sd-highspeed; | 189 | cap-sd-highspeed; |
@@ -198,10 +200,10 @@ | |||
198 | &sd_emmc_c { | 200 | &sd_emmc_c { |
199 | status = "okay"; | 201 | status = "okay"; |
200 | pinctrl-0 = <&emmc_pins>; | 202 | pinctrl-0 = <&emmc_pins>; |
201 | pinctrl-names = "default"; | 203 | pinctrl-1 = <&emmc_clk_gate_pins>; |
204 | pinctrl-names = "default", "clk-gate"; | ||
202 | 205 | ||
203 | bus-width = <8>; | 206 | bus-width = <8>; |
204 | cap-sd-highspeed; | ||
205 | cap-mmc-highspeed; | 207 | cap-mmc-highspeed; |
206 | max-frequency = <200000000>; | 208 | max-frequency = <200000000>; |
207 | non-removable; | 209 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 52f1687e7a09..af834cdbba79 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi | |||
@@ -392,6 +392,17 @@ | |||
392 | }; | 392 | }; |
393 | }; | 393 | }; |
394 | 394 | ||
395 | emmc_clk_gate_pins: emmc_clk_gate { | ||
396 | mux { | ||
397 | groups = "BOOT_8"; | ||
398 | function = "gpio_periphs"; | ||
399 | }; | ||
400 | cfg-pull-down { | ||
401 | pins = "BOOT_8"; | ||
402 | bias-pull-down; | ||
403 | }; | ||
404 | }; | ||
405 | |||
395 | nor_pins: nor { | 406 | nor_pins: nor { |
396 | mux { | 407 | mux { |
397 | groups = "nor_d", | 408 | groups = "nor_d", |
@@ -430,6 +441,17 @@ | |||
430 | }; | 441 | }; |
431 | }; | 442 | }; |
432 | 443 | ||
444 | sdcard_clk_gate_pins: sdcard_clk_gate { | ||
445 | mux { | ||
446 | groups = "CARD_2"; | ||
447 | function = "gpio_periphs"; | ||
448 | }; | ||
449 | cfg-pull-down { | ||
450 | pins = "CARD_2"; | ||
451 | bias-pull-down; | ||
452 | }; | ||
453 | }; | ||
454 | |||
433 | sdio_pins: sdio { | 455 | sdio_pins: sdio { |
434 | mux { | 456 | mux { |
435 | groups = "sdio_d0", | 457 | groups = "sdio_d0", |
@@ -442,6 +464,17 @@ | |||
442 | }; | 464 | }; |
443 | }; | 465 | }; |
444 | 466 | ||
467 | sdio_clk_gate_pins: sdio_clk_gate { | ||
468 | mux { | ||
469 | groups = "GPIOX_4"; | ||
470 | function = "gpio_periphs"; | ||
471 | }; | ||
472 | cfg-pull-down { | ||
473 | pins = "GPIOX_4"; | ||
474 | bias-pull-down; | ||
475 | }; | ||
476 | }; | ||
477 | |||
445 | sdio_irq_pins: sdio_irq { | 478 | sdio_irq_pins: sdio_irq { |
446 | mux { | 479 | mux { |
447 | groups = "sdio_irq"; | 480 | groups = "sdio_irq"; |
@@ -661,21 +694,21 @@ | |||
661 | 694 | ||
662 | &sd_emmc_a { | 695 | &sd_emmc_a { |
663 | clocks = <&clkc CLKID_SD_EMMC_A>, | 696 | clocks = <&clkc CLKID_SD_EMMC_A>, |
664 | <&xtal>, | 697 | <&clkc CLKID_SD_EMMC_A_CLK0>, |
665 | <&clkc CLKID_FCLK_DIV2>; | 698 | <&clkc CLKID_FCLK_DIV2>; |
666 | clock-names = "core", "clkin0", "clkin1"; | 699 | clock-names = "core", "clkin0", "clkin1"; |
667 | }; | 700 | }; |
668 | 701 | ||
669 | &sd_emmc_b { | 702 | &sd_emmc_b { |
670 | clocks = <&clkc CLKID_SD_EMMC_B>, | 703 | clocks = <&clkc CLKID_SD_EMMC_B>, |
671 | <&xtal>, | 704 | <&clkc CLKID_SD_EMMC_B_CLK0>, |
672 | <&clkc CLKID_FCLK_DIV2>; | 705 | <&clkc CLKID_FCLK_DIV2>; |
673 | clock-names = "core", "clkin0", "clkin1"; | 706 | clock-names = "core", "clkin0", "clkin1"; |
674 | }; | 707 | }; |
675 | 708 | ||
676 | &sd_emmc_c { | 709 | &sd_emmc_c { |
677 | clocks = <&clkc CLKID_SD_EMMC_C>, | 710 | clocks = <&clkc CLKID_SD_EMMC_C>, |
678 | <&xtal>, | 711 | <&clkc CLKID_SD_EMMC_C_CLK0>, |
679 | <&clkc CLKID_FCLK_DIV2>; | 712 | <&clkc CLKID_FCLK_DIV2>; |
680 | clock-names = "core", "clkin0", "clkin1"; | 713 | clock-names = "core", "clkin0", "clkin1"; |
681 | }; | 714 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index 2a5804ce7f4b..977b4240f3c1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts | |||
@@ -123,7 +123,8 @@ | |||
123 | &sd_emmc_b { | 123 | &sd_emmc_b { |
124 | status = "okay"; | 124 | status = "okay"; |
125 | pinctrl-0 = <&sdcard_pins>; | 125 | pinctrl-0 = <&sdcard_pins>; |
126 | pinctrl-names = "default"; | 126 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
127 | pinctrl-names = "default", "clk-gate"; | ||
127 | 128 | ||
128 | bus-width = <4>; | 129 | bus-width = <4>; |
129 | cap-sd-highspeed; | 130 | cap-sd-highspeed; |
@@ -141,10 +142,10 @@ | |||
141 | &sd_emmc_c { | 142 | &sd_emmc_c { |
142 | status = "okay"; | 143 | status = "okay"; |
143 | pinctrl-0 = <&emmc_pins>; | 144 | pinctrl-0 = <&emmc_pins>; |
144 | pinctrl-names = "default"; | 145 | pinctrl-1 = <&emmc_clk_gate_pins>; |
146 | pinctrl-names = "default", "clk-gate"; | ||
145 | 147 | ||
146 | bus-width = <8>; | 148 | bus-width = <8>; |
147 | cap-sd-highspeed; | ||
148 | cap-mmc-highspeed; | 149 | cap-mmc-highspeed; |
149 | max-frequency = <100000000>; | 150 | max-frequency = <100000000>; |
150 | non-removable; | 151 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 69ca14ac10fa..64c54c92e214 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts | |||
@@ -91,6 +91,9 @@ | |||
91 | 91 | ||
92 | states = <3300000 0>, | 92 | states = <3300000 0>, |
93 | <1800000 1>; | 93 | <1800000 1>; |
94 | |||
95 | regulator-settling-time-up-us = <200>; | ||
96 | regulator-settling-time-down-us = <50000>; | ||
94 | }; | 97 | }; |
95 | 98 | ||
96 | vddio_boot: regulator-vddio_boot { | 99 | vddio_boot: regulator-vddio_boot { |
@@ -197,10 +200,14 @@ | |||
197 | &sd_emmc_b { | 200 | &sd_emmc_b { |
198 | status = "okay"; | 201 | status = "okay"; |
199 | pinctrl-0 = <&sdcard_pins>; | 202 | pinctrl-0 = <&sdcard_pins>; |
200 | pinctrl-names = "default"; | 203 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
204 | pinctrl-names = "default", "clk-gate"; | ||
201 | 205 | ||
202 | bus-width = <4>; | 206 | bus-width = <4>; |
203 | cap-sd-highspeed; | 207 | cap-sd-highspeed; |
208 | sd-uhs-sdr12; | ||
209 | sd-uhs-sdr25; | ||
210 | sd-uhs-sdr50; | ||
204 | max-frequency = <100000000>; | 211 | max-frequency = <100000000>; |
205 | disable-wp; | 212 | disable-wp; |
206 | 213 | ||
@@ -215,10 +222,12 @@ | |||
215 | &sd_emmc_c { | 222 | &sd_emmc_c { |
216 | status = "okay"; | 223 | status = "okay"; |
217 | pinctrl-0 = <&emmc_pins>; | 224 | pinctrl-0 = <&emmc_pins>; |
218 | pinctrl-names = "default"; | 225 | pinctrl-1 = <&emmc_clk_gate_pins>; |
226 | pinctrl-names = "default", "clk-gate"; | ||
219 | 227 | ||
220 | bus-width = <8>; | 228 | bus-width = <8>; |
221 | cap-mmc-highspeed; | 229 | cap-mmc-highspeed; |
230 | mmc-ddr-3_3v; | ||
222 | max-frequency = <50000000>; | 231 | max-frequency = <50000000>; |
223 | non-removable; | 232 | non-removable; |
224 | disable-wp; | 233 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 4c2ac7650fcd..1b8f32867aa1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts | |||
@@ -189,7 +189,8 @@ | |||
189 | &sd_emmc_a { | 189 | &sd_emmc_a { |
190 | status = "okay"; | 190 | status = "okay"; |
191 | pinctrl-0 = <&sdio_pins>; | 191 | pinctrl-0 = <&sdio_pins>; |
192 | pinctrl-names = "default"; | 192 | pinctrl-1 = <&sdio_clk_gate_pins>; |
193 | pinctrl-names = "default", "clk-gate"; | ||
193 | #address-cells = <1>; | 194 | #address-cells = <1>; |
194 | #size-cells = <0>; | 195 | #size-cells = <0>; |
195 | 196 | ||
@@ -210,7 +211,8 @@ | |||
210 | &sd_emmc_b { | 211 | &sd_emmc_b { |
211 | status = "okay"; | 212 | status = "okay"; |
212 | pinctrl-0 = <&sdcard_pins>; | 213 | pinctrl-0 = <&sdcard_pins>; |
213 | pinctrl-names = "default"; | 214 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
215 | pinctrl-names = "default", "clk-gate"; | ||
214 | 216 | ||
215 | bus-width = <4>; | 217 | bus-width = <4>; |
216 | cap-sd-highspeed; | 218 | cap-sd-highspeed; |
@@ -228,10 +230,10 @@ | |||
228 | &sd_emmc_c { | 230 | &sd_emmc_c { |
229 | status = "okay"; | 231 | status = "okay"; |
230 | pinctrl-0 = <&emmc_pins>; | 232 | pinctrl-0 = <&emmc_pins>; |
231 | pinctrl-names = "default"; | 233 | pinctrl-1 = <&emmc_clk_gate_pins>; |
234 | pinctrl-names = "default", "clk-gate"; | ||
232 | 235 | ||
233 | bus-width = <8>; | 236 | bus-width = <8>; |
234 | cap-sd-highspeed; | ||
235 | cap-mmc-highspeed; | 237 | cap-mmc-highspeed; |
236 | max-frequency = <200000000>; | 238 | max-frequency = <200000000>; |
237 | non-removable; | 239 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index f3eea8e89d12..129af9068814 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi | |||
@@ -95,7 +95,8 @@ | |||
95 | &sd_emmc_a { | 95 | &sd_emmc_a { |
96 | status = "okay"; | 96 | status = "okay"; |
97 | pinctrl-0 = <&sdio_pins>; | 97 | pinctrl-0 = <&sdio_pins>; |
98 | pinctrl-names = "default"; | 98 | pinctrl-1 = <&sdio_clk_gate_pins>; |
99 | pinctrl-names = "default", "clk-gate"; | ||
99 | #address-cells = <1>; | 100 | #address-cells = <1>; |
100 | #size-cells = <0>; | 101 | #size-cells = <0>; |
101 | 102 | ||
@@ -116,7 +117,8 @@ | |||
116 | &sd_emmc_b { | 117 | &sd_emmc_b { |
117 | status = "okay"; | 118 | status = "okay"; |
118 | pinctrl-0 = <&sdcard_pins>; | 119 | pinctrl-0 = <&sdcard_pins>; |
119 | pinctrl-names = "default"; | 120 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
121 | pinctrl-names = "default", "clk-gate"; | ||
120 | 122 | ||
121 | bus-width = <4>; | 123 | bus-width = <4>; |
122 | cap-sd-highspeed; | 124 | cap-sd-highspeed; |
@@ -134,10 +136,10 @@ | |||
134 | &sd_emmc_c { | 136 | &sd_emmc_c { |
135 | status = "okay"; | 137 | status = "okay"; |
136 | pinctrl-0 = <&emmc_pins>; | 138 | pinctrl-0 = <&emmc_pins>; |
137 | pinctrl-names = "default"; | 139 | pinctrl-1 = <&emmc_clk_gate_pins>; |
140 | pinctrl-names = "default", "clk-gate"; | ||
138 | 141 | ||
139 | bus-width = <8>; | 142 | bus-width = <8>; |
140 | cap-sd-highspeed; | ||
141 | cap-mmc-highspeed; | 143 | cap-mmc-highspeed; |
142 | max-frequency = <200000000>; | 144 | max-frequency = <200000000>; |
143 | non-removable; | 145 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d6876e64979e..d8dd3298b15c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi | |||
@@ -281,6 +281,17 @@ | |||
281 | }; | 281 | }; |
282 | }; | 282 | }; |
283 | 283 | ||
284 | emmc_clk_gate_pins: emmc_clk_gate { | ||
285 | mux { | ||
286 | groups = "BOOT_8"; | ||
287 | function = "gpio_periphs"; | ||
288 | }; | ||
289 | cfg-pull-down { | ||
290 | pins = "BOOT_8"; | ||
291 | bias-pull-down; | ||
292 | }; | ||
293 | }; | ||
294 | |||
284 | nor_pins: nor { | 295 | nor_pins: nor { |
285 | mux { | 296 | mux { |
286 | groups = "nor_d", | 297 | groups = "nor_d", |
@@ -319,6 +330,17 @@ | |||
319 | }; | 330 | }; |
320 | }; | 331 | }; |
321 | 332 | ||
333 | sdcard_clk_gate_pins: sdcard_clk_gate { | ||
334 | mux { | ||
335 | groups = "CARD_2"; | ||
336 | function = "gpio_periphs"; | ||
337 | }; | ||
338 | cfg-pull-down { | ||
339 | pins = "CARD_2"; | ||
340 | bias-pull-down; | ||
341 | }; | ||
342 | }; | ||
343 | |||
322 | sdio_pins: sdio { | 344 | sdio_pins: sdio { |
323 | mux { | 345 | mux { |
324 | groups = "sdio_d0", | 346 | groups = "sdio_d0", |
@@ -331,6 +353,17 @@ | |||
331 | }; | 353 | }; |
332 | }; | 354 | }; |
333 | 355 | ||
356 | sdio_clk_gate_pins: sdio_clk_gate { | ||
357 | mux { | ||
358 | groups = "GPIOX_4"; | ||
359 | function = "gpio_periphs"; | ||
360 | }; | ||
361 | cfg-pull-down { | ||
362 | pins = "GPIOX_4"; | ||
363 | bias-pull-down; | ||
364 | }; | ||
365 | }; | ||
366 | |||
334 | sdio_irq_pins: sdio_irq { | 367 | sdio_irq_pins: sdio_irq { |
335 | mux { | 368 | mux { |
336 | groups = "sdio_irq"; | 369 | groups = "sdio_irq"; |
@@ -603,21 +636,21 @@ | |||
603 | 636 | ||
604 | &sd_emmc_a { | 637 | &sd_emmc_a { |
605 | clocks = <&clkc CLKID_SD_EMMC_A>, | 638 | clocks = <&clkc CLKID_SD_EMMC_A>, |
606 | <&xtal>, | 639 | <&clkc CLKID_SD_EMMC_A_CLK0>, |
607 | <&clkc CLKID_FCLK_DIV2>; | 640 | <&clkc CLKID_FCLK_DIV2>; |
608 | clock-names = "core", "clkin0", "clkin1"; | 641 | clock-names = "core", "clkin0", "clkin1"; |
609 | }; | 642 | }; |
610 | 643 | ||
611 | &sd_emmc_b { | 644 | &sd_emmc_b { |
612 | clocks = <&clkc CLKID_SD_EMMC_B>, | 645 | clocks = <&clkc CLKID_SD_EMMC_B>, |
613 | <&xtal>, | 646 | <&clkc CLKID_SD_EMMC_B_CLK0>, |
614 | <&clkc CLKID_FCLK_DIV2>; | 647 | <&clkc CLKID_FCLK_DIV2>; |
615 | clock-names = "core", "clkin0", "clkin1"; | 648 | clock-names = "core", "clkin0", "clkin1"; |
616 | }; | 649 | }; |
617 | 650 | ||
618 | &sd_emmc_c { | 651 | &sd_emmc_c { |
619 | clocks = <&clkc CLKID_SD_EMMC_C>, | 652 | clocks = <&clkc CLKID_SD_EMMC_C>, |
620 | <&xtal>, | 653 | <&clkc CLKID_SD_EMMC_C_CLK0>, |
621 | <&clkc CLKID_FCLK_DIV2>; | 654 | <&clkc CLKID_FCLK_DIV2>; |
622 | clock-names = "core", "clkin0", "clkin1"; | 655 | clock-names = "core", "clkin0", "clkin1"; |
623 | }; | 656 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index 9b10c5f4f8c0..22c697732f66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts | |||
@@ -175,7 +175,8 @@ | |||
175 | &sd_emmc_b { | 175 | &sd_emmc_b { |
176 | status = "okay"; | 176 | status = "okay"; |
177 | pinctrl-0 = <&sdcard_pins>; | 177 | pinctrl-0 = <&sdcard_pins>; |
178 | pinctrl-names = "default"; | 178 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
179 | pinctrl-names = "default", "clk-gate"; | ||
179 | 180 | ||
180 | bus-width = <4>; | 181 | bus-width = <4>; |
181 | cap-sd-highspeed; | 182 | cap-sd-highspeed; |
@@ -193,10 +194,10 @@ | |||
193 | &sd_emmc_c { | 194 | &sd_emmc_c { |
194 | status = "okay"; | 195 | status = "okay"; |
195 | pinctrl-0 = <&emmc_pins>; | 196 | pinctrl-0 = <&emmc_pins>; |
196 | pinctrl-names = "default"; | 197 | pinctrl-1 = <&emmc_clk_gate_pins>; |
198 | pinctrl-names = "default", "clk-gate"; | ||
197 | 199 | ||
198 | bus-width = <8>; | 200 | bus-width = <8>; |
199 | cap-sd-highspeed; | ||
200 | cap-mmc-highspeed; | 201 | cap-mmc-highspeed; |
201 | max-frequency = <200000000>; | 202 | max-frequency = <200000000>; |
202 | non-removable; | 203 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 08f1dd69b679..470f72bb863c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts | |||
@@ -220,7 +220,6 @@ | |||
220 | pinctrl-names = "default"; | 220 | pinctrl-names = "default"; |
221 | 221 | ||
222 | bus-width = <8>; | 222 | bus-width = <8>; |
223 | cap-sd-highspeed; | ||
224 | cap-mmc-highspeed; | 223 | cap-mmc-highspeed; |
225 | max-frequency = <200000000>; | 224 | max-frequency = <200000000>; |
226 | non-removable; | 225 | non-removable; |
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 4d360713ed12..30d48ecf46e0 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi | |||
@@ -254,7 +254,7 @@ | |||
254 | 254 | ||
255 | ap_syscon: system-controller@6f4000 { | 255 | ap_syscon: system-controller@6f4000 { |
256 | compatible = "syscon", "simple-mfd"; | 256 | compatible = "syscon", "simple-mfd"; |
257 | reg = <0x6f4000 0x1000>; | 257 | reg = <0x6f4000 0x2000>; |
258 | 258 | ||
259 | ap_clk: clock { | 259 | ap_clk: clock { |
260 | compatible = "marvell,ap806-clock"; | 260 | compatible = "marvell,ap806-clock"; |
@@ -265,7 +265,7 @@ | |||
265 | compatible = "marvell,ap806-pinctrl"; | 265 | compatible = "marvell,ap806-pinctrl"; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | ap_gpio: gpio { | 268 | ap_gpio: gpio@1040 { |
269 | compatible = "marvell,armada-8k-gpio"; | 269 | compatible = "marvell,armada-8k-gpio"; |
270 | offset = <0x1040>; | 270 | offset = <0x1040>; |
271 | ngpios = <20>; | 271 | ngpios = <20>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index e0518b4bc6c2..19fbaa5e7bdd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
@@ -113,8 +113,7 @@ | |||
113 | compatible = "arm,cortex-a53", "arm,armv8"; | 113 | compatible = "arm,cortex-a53", "arm,armv8"; |
114 | reg = <0x0 0x0>; | 114 | reg = <0x0 0x0>; |
115 | enable-method = "psci"; | 115 | enable-method = "psci"; |
116 | clocks = <&cru ARMCLKL>; | 116 | |
117 | operating-points-v2 = <&cluster0_opp>; | ||
118 | #cooling-cells = <2>; /* min followed by max */ | 117 | #cooling-cells = <2>; /* min followed by max */ |
119 | }; | 118 | }; |
120 | 119 | ||
@@ -123,8 +122,6 @@ | |||
123 | compatible = "arm,cortex-a53", "arm,armv8"; | 122 | compatible = "arm,cortex-a53", "arm,armv8"; |
124 | reg = <0x0 0x1>; | 123 | reg = <0x0 0x1>; |
125 | enable-method = "psci"; | 124 | enable-method = "psci"; |
126 | clocks = <&cru ARMCLKL>; | ||
127 | operating-points-v2 = <&cluster0_opp>; | ||
128 | }; | 125 | }; |
129 | 126 | ||
130 | cpu_l2: cpu@2 { | 127 | cpu_l2: cpu@2 { |
@@ -132,8 +129,6 @@ | |||
132 | compatible = "arm,cortex-a53", "arm,armv8"; | 129 | compatible = "arm,cortex-a53", "arm,armv8"; |
133 | reg = <0x0 0x2>; | 130 | reg = <0x0 0x2>; |
134 | enable-method = "psci"; | 131 | enable-method = "psci"; |
135 | clocks = <&cru ARMCLKL>; | ||
136 | operating-points-v2 = <&cluster0_opp>; | ||
137 | }; | 132 | }; |
138 | 133 | ||
139 | cpu_l3: cpu@3 { | 134 | cpu_l3: cpu@3 { |
@@ -141,8 +136,6 @@ | |||
141 | compatible = "arm,cortex-a53", "arm,armv8"; | 136 | compatible = "arm,cortex-a53", "arm,armv8"; |
142 | reg = <0x0 0x3>; | 137 | reg = <0x0 0x3>; |
143 | enable-method = "psci"; | 138 | enable-method = "psci"; |
144 | clocks = <&cru ARMCLKL>; | ||
145 | operating-points-v2 = <&cluster0_opp>; | ||
146 | }; | 139 | }; |
147 | 140 | ||
148 | cpu_b0: cpu@100 { | 141 | cpu_b0: cpu@100 { |
@@ -150,8 +143,7 @@ | |||
150 | compatible = "arm,cortex-a53", "arm,armv8"; | 143 | compatible = "arm,cortex-a53", "arm,armv8"; |
151 | reg = <0x0 0x100>; | 144 | reg = <0x0 0x100>; |
152 | enable-method = "psci"; | 145 | enable-method = "psci"; |
153 | clocks = <&cru ARMCLKB>; | 146 | |
154 | operating-points-v2 = <&cluster1_opp>; | ||
155 | #cooling-cells = <2>; /* min followed by max */ | 147 | #cooling-cells = <2>; /* min followed by max */ |
156 | }; | 148 | }; |
157 | 149 | ||
@@ -160,8 +152,6 @@ | |||
160 | compatible = "arm,cortex-a53", "arm,armv8"; | 152 | compatible = "arm,cortex-a53", "arm,armv8"; |
161 | reg = <0x0 0x101>; | 153 | reg = <0x0 0x101>; |
162 | enable-method = "psci"; | 154 | enable-method = "psci"; |
163 | clocks = <&cru ARMCLKB>; | ||
164 | operating-points-v2 = <&cluster1_opp>; | ||
165 | }; | 155 | }; |
166 | 156 | ||
167 | cpu_b2: cpu@102 { | 157 | cpu_b2: cpu@102 { |
@@ -169,8 +159,6 @@ | |||
169 | compatible = "arm,cortex-a53", "arm,armv8"; | 159 | compatible = "arm,cortex-a53", "arm,armv8"; |
170 | reg = <0x0 0x102>; | 160 | reg = <0x0 0x102>; |
171 | enable-method = "psci"; | 161 | enable-method = "psci"; |
172 | clocks = <&cru ARMCLKB>; | ||
173 | operating-points-v2 = <&cluster1_opp>; | ||
174 | }; | 162 | }; |
175 | 163 | ||
176 | cpu_b3: cpu@103 { | 164 | cpu_b3: cpu@103 { |
@@ -178,62 +166,6 @@ | |||
178 | compatible = "arm,cortex-a53", "arm,armv8"; | 166 | compatible = "arm,cortex-a53", "arm,armv8"; |
179 | reg = <0x0 0x103>; | 167 | reg = <0x0 0x103>; |
180 | enable-method = "psci"; | 168 | enable-method = "psci"; |
181 | clocks = <&cru ARMCLKB>; | ||
182 | operating-points-v2 = <&cluster1_opp>; | ||
183 | }; | ||
184 | }; | ||
185 | |||
186 | cluster0_opp: opp-table0 { | ||
187 | compatible = "operating-points-v2"; | ||
188 | opp-shared; | ||
189 | |||
190 | opp00 { | ||
191 | opp-hz = /bits/ 64 <312000000>; | ||
192 | opp-microvolt = <950000>; | ||
193 | clock-latency-ns = <40000>; | ||
194 | }; | ||
195 | opp01 { | ||
196 | opp-hz = /bits/ 64 <408000000>; | ||
197 | opp-microvolt = <950000>; | ||
198 | }; | ||
199 | opp02 { | ||
200 | opp-hz = /bits/ 64 <600000000>; | ||
201 | opp-microvolt = <950000>; | ||
202 | }; | ||
203 | opp03 { | ||
204 | opp-hz = /bits/ 64 <816000000>; | ||
205 | opp-microvolt = <1025000>; | ||
206 | }; | ||
207 | opp04 { | ||
208 | opp-hz = /bits/ 64 <1008000000>; | ||
209 | opp-microvolt = <1125000>; | ||
210 | }; | ||
211 | }; | ||
212 | |||
213 | cluster1_opp: opp-table1 { | ||
214 | compatible = "operating-points-v2"; | ||
215 | opp-shared; | ||
216 | |||
217 | opp00 { | ||
218 | opp-hz = /bits/ 64 <312000000>; | ||
219 | opp-microvolt = <950000>; | ||
220 | clock-latency-ns = <40000>; | ||
221 | }; | ||
222 | opp01 { | ||
223 | opp-hz = /bits/ 64 <408000000>; | ||
224 | opp-microvolt = <950000>; | ||
225 | }; | ||
226 | opp02 { | ||
227 | opp-hz = /bits/ 64 <600000000>; | ||
228 | opp-microvolt = <950000>; | ||
229 | }; | ||
230 | opp03 { | ||
231 | opp-hz = /bits/ 64 <816000000>; | ||
232 | opp-microvolt = <975000>; | ||
233 | }; | ||
234 | opp04 { | ||
235 | opp-hz = /bits/ 64 <1008000000>; | ||
236 | opp-microvolt = <1050000>; | ||
237 | }; | 169 | }; |
238 | }; | 170 | }; |
239 | 171 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index d79e9b3265b9..ab7629c5b856 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
@@ -1629,9 +1629,9 @@ | |||
1629 | compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; | 1629 | compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; |
1630 | reg = <0x0 0xff960000 0x0 0x8000>; | 1630 | reg = <0x0 0xff960000 0x0 0x8000>; |
1631 | interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>; | 1631 | interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>; |
1632 | clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>, | 1632 | clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>, |
1633 | <&cru SCLK_DPHY_TX0_CFG>; | 1633 | <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>; |
1634 | clock-names = "ref", "pclk", "phy_cfg"; | 1634 | clock-names = "ref", "pclk", "phy_cfg", "grf"; |
1635 | power-domains = <&power RK3399_PD_VIO>; | 1635 | power-domains = <&power RK3399_PD_VIO>; |
1636 | rockchip,grf = <&grf>; | 1636 | rockchip,grf = <&grf>; |
1637 | status = "disabled"; | 1637 | status = "disabled"; |
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 636c1bced7d4..1b266292f0be 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | 1 | #ifndef __ASM_LINKAGE_H |
2 | #define __ASM_LINKAGE_H | 2 | #define __ASM_LINKAGE_H |
3 | 3 | ||
4 | #define __ALIGN .align 4 | 4 | #define __ALIGN .align 2 |
5 | #define __ALIGN_STR ".align 4" | 5 | #define __ALIGN_STR ".align 2" |
6 | 6 | ||
7 | #endif | 7 | #endif |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3585a5e26151..f7c4d2146aed 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -95,16 +95,19 @@ | |||
95 | #define KERNEL_END _end | 95 | #define KERNEL_END _end |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * The size of the KASAN shadow region. This should be 1/8th of the | 98 | * KASAN requires 1/8th of the kernel virtual address space for the shadow |
99 | * size of the entire kernel virtual address space. | 99 | * region. KASAN can bloat the stack significantly, so double the (minimum) |
100 | * stack size when KASAN is in use. | ||
100 | */ | 101 | */ |
101 | #ifdef CONFIG_KASAN | 102 | #ifdef CONFIG_KASAN |
102 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) | 103 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) |
104 | #define KASAN_THREAD_SHIFT 1 | ||
103 | #else | 105 | #else |
104 | #define KASAN_SHADOW_SIZE (0) | 106 | #define KASAN_SHADOW_SIZE (0) |
107 | #define KASAN_THREAD_SHIFT 0 | ||
105 | #endif | 108 | #endif |
106 | 109 | ||
107 | #define MIN_THREAD_SHIFT 14 | 110 | #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) |
108 | 111 | ||
109 | /* | 112 | /* |
110 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such | 113 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bc4e92337d16..b46e54c2399b 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) | |||
401 | /* Find an entry in the third-level page table. */ | 401 | /* Find an entry in the third-level page table. */ |
402 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 402 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
403 | 403 | ||
404 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) | 404 | #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) |
405 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) | 405 | #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) |
406 | 406 | ||
407 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 407 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index f0e6d717885b..d06fbe4cd38d 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void) | |||
649 | return 0; | 649 | return 0; |
650 | } | 650 | } |
651 | 651 | ||
652 | late_initcall(armv8_deprecated_init); | 652 | core_initcall(armv8_deprecated_init); |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cd52d365d1f0..21e2c95d24e7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void) | |||
1307 | return 0; | 1307 | return 0; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | late_initcall(enable_mrs_emulation); | 1310 | core_initcall(enable_mrs_emulation); |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3a68cf38a6b3..5d547deb6996 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
@@ -321,6 +321,8 @@ void kernel_neon_end(void) | |||
321 | } | 321 | } |
322 | EXPORT_SYMBOL(kernel_neon_end); | 322 | EXPORT_SYMBOL(kernel_neon_end); |
323 | 323 | ||
324 | #ifdef CONFIG_EFI | ||
325 | |||
324 | static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); | 326 | static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); |
325 | static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); | 327 | static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); |
326 | 328 | ||
@@ -370,6 +372,8 @@ void __efi_fpsimd_end(void) | |||
370 | kernel_neon_end(); | 372 | kernel_neon_end(); |
371 | } | 373 | } |
372 | 374 | ||
375 | #endif /* CONFIG_EFI */ | ||
376 | |||
373 | #endif /* CONFIG_KERNEL_MODE_NEON */ | 377 | #endif /* CONFIG_KERNEL_MODE_NEON */ |
374 | 378 | ||
375 | #ifdef CONFIG_CPU_PM | 379 | #ifdef CONFIG_CPU_PM |
@@ -440,4 +444,4 @@ static int __init fpsimd_init(void) | |||
440 | 444 | ||
441 | return 0; | 445 | return 0; |
442 | } | 446 | } |
443 | late_initcall(fpsimd_init); | 447 | core_initcall(fpsimd_init); |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7434ec0c7a27..0b243ecaf7ac 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -384,6 +384,7 @@ ENTRY(kimage_vaddr) | |||
384 | * booted in EL1 or EL2 respectively. | 384 | * booted in EL1 or EL2 respectively. |
385 | */ | 385 | */ |
386 | ENTRY(el2_setup) | 386 | ENTRY(el2_setup) |
387 | msr SPsel, #1 // We want to use SP_EL{1,2} | ||
387 | mrs x0, CurrentEL | 388 | mrs x0, CurrentEL |
388 | cmp x0, #CurrentEL_EL2 | 389 | cmp x0, #CurrentEL_EL2 |
389 | b.eq 1f | 390 | b.eq 1f |
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index c45214f8fb54..0bdc96c61bc0 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c | |||
@@ -751,10 +751,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, | |||
751 | */ | 751 | */ |
752 | trace_hardirqs_off(); | 752 | trace_hardirqs_off(); |
753 | 753 | ||
754 | /* Check valid user FS if needed */ | ||
755 | addr_limit_user_check(); | ||
756 | |||
757 | do { | 754 | do { |
755 | /* Check valid user FS if needed */ | ||
756 | addr_limit_user_check(); | ||
757 | |||
758 | if (thread_flags & _TIF_NEED_RESCHED) { | 758 | if (thread_flags & _TIF_NEED_RESCHED) { |
759 | schedule(); | 759 | schedule(); |
760 | } else { | 760 | } else { |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 89993c4be1be..b64958b23a7f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr) | |||
97 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, | 97 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, |
98 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); | 98 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); |
99 | } else { | 99 | } else { |
100 | pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK); | 100 | pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); |
101 | } | 101 | } |
102 | 102 | ||
103 | pr_alert(" CM = %lu, WnR = %lu\n", | 103 | pr_alert(" CM = %lu, WnR = %lu\n", |
@@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = { | |||
651 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, | 651 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, |
652 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | 652 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
653 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | 653 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, |
654 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 654 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
655 | { do_bad, SIGBUS, 0, "unknown 8" }, | 655 | { do_bad, SIGBUS, 0, "unknown 8" }, |
656 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | 656 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
657 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | 657 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index 7c87b5be53b5..8f7cce829f8e 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h | |||
@@ -92,9 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) | |||
92 | { | 92 | { |
93 | } | 93 | } |
94 | 94 | ||
95 | #define copy_segments(tsk, mm) do { } while (0) | ||
96 | #define release_segments(mm) do { } while (0) | ||
97 | |||
98 | /* | 95 | /* |
99 | * saved kernel SP and DP of a blocked thread. | 96 | * saved kernel SP and DP of a blocked thread. |
100 | */ | 97 | */ |
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index e4d08d74ed9f..021cce78b401 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h | |||
@@ -92,10 +92,6 @@ static inline void release_thread(struct task_struct *dead_task) | |||
92 | extern asmlinkage void save_user_regs(struct user_context *target); | 92 | extern asmlinkage void save_user_regs(struct user_context *target); |
93 | extern asmlinkage void *restore_user_regs(const struct user_context *target, ...); | 93 | extern asmlinkage void *restore_user_regs(const struct user_context *target, ...); |
94 | 94 | ||
95 | #define copy_segments(tsk, mm) do { } while (0) | ||
96 | #define release_segments(mm) do { } while (0) | ||
97 | #define forget_segments() do { } while (0) | ||
98 | |||
99 | unsigned long get_wchan(struct task_struct *p); | 95 | unsigned long get_wchan(struct task_struct *p); |
100 | 96 | ||
101 | #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) | 97 | #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 87cde1e4b38c..0777f3a8a1f3 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -194,6 +194,10 @@ config TIMER_DIVIDE | |||
194 | int "Timer divider (integer)" | 194 | int "Timer divider (integer)" |
195 | default "128" | 195 | default "128" |
196 | 196 | ||
197 | config CPU_BIG_ENDIAN | ||
198 | bool "Generate big endian code" | ||
199 | default n | ||
200 | |||
197 | config CPU_LITTLE_ENDIAN | 201 | config CPU_LITTLE_ENDIAN |
198 | bool "Generate little endian code" | 202 | bool "Generate little endian code" |
199 | default n | 203 | default n |
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 657874eeeccc..c70fa9ac7169 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h | |||
@@ -118,14 +118,6 @@ struct mm_struct; | |||
118 | /* Free all resources held by a thread. */ | 118 | /* Free all resources held by a thread. */ |
119 | extern void release_thread(struct task_struct *); | 119 | extern void release_thread(struct task_struct *); |
120 | 120 | ||
121 | /* Copy and release all segment info associated with a VM */ | ||
122 | extern void copy_segments(struct task_struct *p, struct mm_struct * mm); | ||
123 | extern void release_segments(struct mm_struct * mm); | ||
124 | |||
125 | /* Copy and release all segment info associated with a VM */ | ||
126 | #define copy_segments(p, mm) do { } while (0) | ||
127 | #define release_segments(mm) do { } while (0) | ||
128 | |||
129 | unsigned long get_wchan(struct task_struct *p); | 121 | unsigned long get_wchan(struct task_struct *p); |
130 | #define KSTK_EIP(tsk) ((tsk)->thread.lr) | 122 | #define KSTK_EIP(tsk) ((tsk)->thread.lr) |
131 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 123 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 647dd94a0c39..72b96f282689 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
@@ -114,6 +114,15 @@ static void set_eit_vector_entries(void) | |||
114 | _flush_cache_copyback_all(); | 114 | _flush_cache_copyback_all(); |
115 | } | 115 | } |
116 | 116 | ||
117 | void abort(void) | ||
118 | { | ||
119 | BUG(); | ||
120 | |||
121 | /* if that doesn't kill us, halt */ | ||
122 | panic("Oops failed to kill thread"); | ||
123 | } | ||
124 | EXPORT_SYMBOL(abort); | ||
125 | |||
117 | void __init trap_init(void) | 126 | void __init trap_init(void) |
118 | { | 127 | { |
119 | set_eit_vector_entries(); | 128 | set_eit_vector_entries(); |
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index ec6a49076980..8ae92d6abfd2 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h | |||
@@ -131,9 +131,6 @@ static inline void release_thread(struct task_struct *dead_task) | |||
131 | { | 131 | { |
132 | } | 132 | } |
133 | 133 | ||
134 | #define copy_segments(tsk, mm) do { } while (0) | ||
135 | #define release_segments(mm) do { } while (0) | ||
136 | |||
137 | /* | 134 | /* |
138 | * Return saved PC of a blocked thread. | 135 | * Return saved PC of a blocked thread. |
139 | */ | 136 | */ |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 9d26abdf0dc1..4f798aa671dd 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -39,7 +39,7 @@ config MICROBLAZE | |||
39 | # Endianness selection | 39 | # Endianness selection |
40 | choice | 40 | choice |
41 | prompt "Endianness selection" | 41 | prompt "Endianness selection" |
42 | default CPU_BIG_ENDIAN | 42 | default CPU_LITTLE_ENDIAN |
43 | help | 43 | help |
44 | microblaze architectures can be configured for either little or | 44 | microblaze architectures can be configured for either little or |
45 | big endian formats. Be sure to select the appropriate mode. | 45 | big endian formats. Be sure to select the appropriate mode. |
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index e77a596f3f1e..06609ca36115 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild | |||
@@ -7,6 +7,7 @@ generic-y += fcntl.h | |||
7 | generic-y += ioctl.h | 7 | generic-y += ioctl.h |
8 | generic-y += ioctls.h | 8 | generic-y += ioctls.h |
9 | generic-y += ipcbuf.h | 9 | generic-y += ipcbuf.h |
10 | generic-y += kvm_para.h | ||
10 | generic-y += mman.h | 11 | generic-y += mman.h |
11 | generic-y += msgbuf.h | 12 | generic-y += msgbuf.h |
12 | generic-y += param.h | 13 | generic-y += param.h |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index e45ada8fb006..94700c5270a9 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | |||
165 | unsigned long attrs) | 165 | unsigned long attrs) |
166 | { | 166 | { |
167 | #ifdef CONFIG_MMU | 167 | #ifdef CONFIG_MMU |
168 | unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 168 | unsigned long user_count = vma_pages(vma); |
169 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 169 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
170 | unsigned long off = vma->vm_pgoff; | 170 | unsigned long off = vma->vm_pgoff; |
171 | unsigned long pfn; | 171 | unsigned long pfn; |
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c index 730c0b03060d..b816cb4a25ff 100644 --- a/arch/mips/ath79/pci.c +++ b/arch/mips/ath79/pci.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include "pci.h" | 22 | #include "pci.h" |
23 | 23 | ||
24 | static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); | 24 | static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); |
25 | static const struct ath79_pci_irq *ath79_pci_irq_map __initdata; | 25 | static const struct ath79_pci_irq *ath79_pci_irq_map; |
26 | static unsigned ath79_pci_nr_irqs __initdata; | 26 | static unsigned ath79_pci_nr_irqs; |
27 | 27 | ||
28 | static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { | 28 | static const struct ath79_pci_irq ar71xx_pci_irq_map[] = { |
29 | { | 29 | { |
30 | .slot = 17, | 30 | .slot = 17, |
31 | .pin = 1, | 31 | .pin = 1, |
@@ -41,7 +41,7 @@ static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = { | |||
41 | } | 41 | } |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { | 44 | static const struct ath79_pci_irq ar724x_pci_irq_map[] = { |
45 | { | 45 | { |
46 | .slot = 0, | 46 | .slot = 0, |
47 | .pin = 1, | 47 | .pin = 1, |
@@ -49,7 +49,7 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = { | |||
49 | } | 49 | } |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { | 52 | static const struct ath79_pci_irq qca955x_pci_irq_map[] = { |
53 | { | 53 | { |
54 | .bus = 0, | 54 | .bus = 0, |
55 | .slot = 0, | 55 | .slot = 0, |
@@ -64,7 +64,7 @@ static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = { | |||
64 | }, | 64 | }, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) | 67 | int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) |
68 | { | 68 | { |
69 | int irq = -1; | 69 | int irq = -1; |
70 | int i; | 70 | int i; |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index e4ed1bc9a734..a6810923b3f0 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -1377,29 +1377,32 @@ do { \ | |||
1377 | 1377 | ||
1378 | #define __write_64bit_c0_split(source, sel, val) \ | 1378 | #define __write_64bit_c0_split(source, sel, val) \ |
1379 | do { \ | 1379 | do { \ |
1380 | unsigned long long __tmp; \ | ||
1380 | unsigned long __flags; \ | 1381 | unsigned long __flags; \ |
1381 | \ | 1382 | \ |
1382 | local_irq_save(__flags); \ | 1383 | local_irq_save(__flags); \ |
1383 | if (sel == 0) \ | 1384 | if (sel == 0) \ |
1384 | __asm__ __volatile__( \ | 1385 | __asm__ __volatile__( \ |
1385 | ".set\tmips64\n\t" \ | 1386 | ".set\tmips64\n\t" \ |
1386 | "dsll\t%L0, %L0, 32\n\t" \ | 1387 | "dsll\t%L0, %L1, 32\n\t" \ |
1387 | "dsrl\t%L0, %L0, 32\n\t" \ | 1388 | "dsrl\t%L0, %L0, 32\n\t" \ |
1388 | "dsll\t%M0, %M0, 32\n\t" \ | 1389 | "dsll\t%M0, %M1, 32\n\t" \ |
1389 | "or\t%L0, %L0, %M0\n\t" \ | 1390 | "or\t%L0, %L0, %M0\n\t" \ |
1390 | "dmtc0\t%L0, " #source "\n\t" \ | 1391 | "dmtc0\t%L0, " #source "\n\t" \ |
1391 | ".set\tmips0" \ | 1392 | ".set\tmips0" \ |
1392 | : : "r" (val)); \ | 1393 | : "=&r,r" (__tmp) \ |
1394 | : "r,0" (val)); \ | ||
1393 | else \ | 1395 | else \ |
1394 | __asm__ __volatile__( \ | 1396 | __asm__ __volatile__( \ |
1395 | ".set\tmips64\n\t" \ | 1397 | ".set\tmips64\n\t" \ |
1396 | "dsll\t%L0, %L0, 32\n\t" \ | 1398 | "dsll\t%L0, %L1, 32\n\t" \ |
1397 | "dsrl\t%L0, %L0, 32\n\t" \ | 1399 | "dsrl\t%L0, %L0, 32\n\t" \ |
1398 | "dsll\t%M0, %M0, 32\n\t" \ | 1400 | "dsll\t%M0, %M1, 32\n\t" \ |
1399 | "or\t%L0, %L0, %M0\n\t" \ | 1401 | "or\t%L0, %L0, %M0\n\t" \ |
1400 | "dmtc0\t%L0, " #source ", " #sel "\n\t" \ | 1402 | "dmtc0\t%L0, " #source ", " #sel "\n\t" \ |
1401 | ".set\tmips0" \ | 1403 | ".set\tmips0" \ |
1402 | : : "r" (val)); \ | 1404 | : "=&r,r" (__tmp) \ |
1405 | : "r,0" (val)); \ | ||
1403 | local_irq_restore(__flags); \ | 1406 | local_irq_restore(__flags); \ |
1404 | } while (0) | 1407 | } while (0) |
1405 | 1408 | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9e6c74bf66c4..6668f67a61c3 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event) | |||
618 | return -ENOENT; | 618 | return -ENOENT; |
619 | } | 619 | } |
620 | 620 | ||
621 | if ((unsigned int)event->cpu >= nr_cpumask_bits || | 621 | if (event->cpu >= 0 && !cpu_online(event->cpu)) |
622 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
623 | return -ENODEV; | 622 | return -ENODEV; |
624 | 623 | ||
625 | if (!atomic_inc_not_zero(&active_events)) { | 624 | if (!atomic_inc_not_zero(&active_events)) { |
diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c index 1c02f5737367..b4c263f16b15 100644 --- a/arch/mips/pci/fixup-capcella.c +++ b/arch/mips/pci/fixup-capcella.c | |||
@@ -32,13 +32,13 @@ | |||
32 | #define INTC PC104PLUS_INTC_IRQ | 32 | #define INTC PC104PLUS_INTC_IRQ |
33 | #define INTD PC104PLUS_INTD_IRQ | 33 | #define INTD PC104PLUS_INTD_IRQ |
34 | 34 | ||
35 | static char irq_tab_capcella[][5] __initdata = { | 35 | static char irq_tab_capcella[][5] = { |
36 | [11] = { -1, INT1, INT1, INT1, INT1 }, | 36 | [11] = { -1, INT1, INT1, INT1, INT1 }, |
37 | [12] = { -1, INT2, INT2, INT2, INT2 }, | 37 | [12] = { -1, INT2, INT2, INT2, INT2 }, |
38 | [14] = { -1, INTA, INTB, INTC, INTD } | 38 | [14] = { -1, INTA, INTB, INTC, INTD } |
39 | }; | 39 | }; |
40 | 40 | ||
41 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 41 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
42 | { | 42 | { |
43 | return irq_tab_capcella[slot][pin]; | 43 | return irq_tab_capcella[slot][pin]; |
44 | } | 44 | } |
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c index b3ab59318d91..44be65c3e6bb 100644 --- a/arch/mips/pci/fixup-cobalt.c +++ b/arch/mips/pci/fixup-cobalt.c | |||
@@ -147,7 +147,7 @@ static void qube_raq_via_board_id_fixup(struct pci_dev *dev) | |||
147 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, | 147 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, |
148 | qube_raq_via_board_id_fixup); | 148 | qube_raq_via_board_id_fixup); |
149 | 149 | ||
150 | static char irq_tab_qube1[] __initdata = { | 150 | static char irq_tab_qube1[] = { |
151 | [COBALT_PCICONF_CPU] = 0, | 151 | [COBALT_PCICONF_CPU] = 0, |
152 | [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ, | 152 | [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ, |
153 | [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, | 153 | [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, |
@@ -156,7 +156,7 @@ static char irq_tab_qube1[] __initdata = { | |||
156 | [COBALT_PCICONF_ETH1] = 0 | 156 | [COBALT_PCICONF_ETH1] = 0 |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static char irq_tab_cobalt[] __initdata = { | 159 | static char irq_tab_cobalt[] = { |
160 | [COBALT_PCICONF_CPU] = 0, | 160 | [COBALT_PCICONF_CPU] = 0, |
161 | [COBALT_PCICONF_ETH0] = ETH0_IRQ, | 161 | [COBALT_PCICONF_ETH0] = ETH0_IRQ, |
162 | [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, | 162 | [COBALT_PCICONF_RAQSCSI] = SCSI_IRQ, |
@@ -165,7 +165,7 @@ static char irq_tab_cobalt[] __initdata = { | |||
165 | [COBALT_PCICONF_ETH1] = ETH1_IRQ | 165 | [COBALT_PCICONF_ETH1] = ETH1_IRQ |
166 | }; | 166 | }; |
167 | 167 | ||
168 | static char irq_tab_raq2[] __initdata = { | 168 | static char irq_tab_raq2[] = { |
169 | [COBALT_PCICONF_CPU] = 0, | 169 | [COBALT_PCICONF_CPU] = 0, |
170 | [COBALT_PCICONF_ETH0] = ETH0_IRQ, | 170 | [COBALT_PCICONF_ETH0] = ETH0_IRQ, |
171 | [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ, | 171 | [COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ, |
@@ -174,7 +174,7 @@ static char irq_tab_raq2[] __initdata = { | |||
174 | [COBALT_PCICONF_ETH1] = ETH1_IRQ | 174 | [COBALT_PCICONF_ETH1] = ETH1_IRQ |
175 | }; | 175 | }; |
176 | 176 | ||
177 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 177 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
178 | { | 178 | { |
179 | if (cobalt_board_id <= COBALT_BRD_ID_QUBE1) | 179 | if (cobalt_board_id <= COBALT_BRD_ID_QUBE1) |
180 | return irq_tab_qube1[slot]; | 180 | return irq_tab_qube1[slot]; |
diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c index 19caf775c206..c31cb6af1cd0 100644 --- a/arch/mips/pci/fixup-emma2rh.c +++ b/arch/mips/pci/fixup-emma2rh.c | |||
@@ -43,7 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | 44 | ||
45 | #define MAX_SLOT_NUM 10 | 45 | #define MAX_SLOT_NUM 10 |
46 | static unsigned char irq_map[][5] __initdata = { | 46 | static unsigned char irq_map[][5] = { |
47 | [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, | 47 | [3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC, |
48 | MARKEINS_PCI_IRQ_INTD, 0,}, | 48 | MARKEINS_PCI_IRQ_INTD, 0,}, |
49 | [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, | 49 | [4] = {0, MARKEINS_PCI_IRQ_INTA, 0, 0, 0,}, |
@@ -85,7 +85,7 @@ static void emma2rh_pci_host_fixup(struct pci_dev *dev) | |||
85 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, | 85 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_EMMA2RH, |
86 | emma2rh_pci_host_fixup); | 86 | emma2rh_pci_host_fixup); |
87 | 87 | ||
88 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 88 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
89 | { | 89 | { |
90 | return irq_map[slot][pin]; | 90 | return irq_map[slot][pin]; |
91 | } | 91 | } |
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c index 50da773faede..b47c2771dc99 100644 --- a/arch/mips/pci/fixup-fuloong2e.c +++ b/arch/mips/pci/fixup-fuloong2e.c | |||
@@ -19,7 +19,7 @@ | |||
19 | /* South bridge slot number is set by the pci probe process */ | 19 | /* South bridge slot number is set by the pci probe process */ |
20 | static u8 sb_slot = 5; | 20 | static u8 sb_slot = 5; |
21 | 21 | ||
22 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 22 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
23 | { | 23 | { |
24 | int irq = 0; | 24 | int irq = 0; |
25 | 25 | ||
diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c index 133685e215ee..c6ec18a07e63 100644 --- a/arch/mips/pci/fixup-ip32.c +++ b/arch/mips/pci/fixup-ip32.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #define INTB MACEPCI_SHARED0_IRQ | 21 | #define INTB MACEPCI_SHARED0_IRQ |
22 | #define INTC MACEPCI_SHARED1_IRQ | 22 | #define INTC MACEPCI_SHARED1_IRQ |
23 | #define INTD MACEPCI_SHARED2_IRQ | 23 | #define INTD MACEPCI_SHARED2_IRQ |
24 | static char irq_tab_mace[][5] __initdata = { | 24 | static char irq_tab_mace[][5] = { |
25 | /* Dummy INT#A INT#B INT#C INT#D */ | 25 | /* Dummy INT#A INT#B INT#C INT#D */ |
26 | {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ | 26 | {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ |
27 | {0, SCSI0, SCSI0, SCSI0, SCSI0}, | 27 | {0, SCSI0, SCSI0, SCSI0, SCSI0}, |
@@ -39,7 +39,7 @@ static char irq_tab_mace[][5] __initdata = { | |||
39 | * irqs. I suppose a device without a pin A will thank us for doing it | 39 | * irqs. I suppose a device without a pin A will thank us for doing it |
40 | * right if there exists such a broken piece of crap. | 40 | * right if there exists such a broken piece of crap. |
41 | */ | 41 | */ |
42 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 42 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
43 | { | 43 | { |
44 | return irq_tab_mace[slot][pin]; | 44 | return irq_tab_mace[slot][pin]; |
45 | } | 45 | } |
diff --git a/arch/mips/pci/fixup-jmr3927.c b/arch/mips/pci/fixup-jmr3927.c index 0f1069527cba..d3102eeea898 100644 --- a/arch/mips/pci/fixup-jmr3927.c +++ b/arch/mips/pci/fixup-jmr3927.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <asm/txx9/pci.h> | 31 | #include <asm/txx9/pci.h> |
32 | #include <asm/txx9/jmr3927.h> | 32 | #include <asm/txx9/jmr3927.h> |
33 | 33 | ||
34 | int __init jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 34 | int jmr3927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
35 | { | 35 | { |
36 | unsigned char irq = pin; | 36 | unsigned char irq = pin; |
37 | 37 | ||
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c index 2b5427d3f35c..81530a13b349 100644 --- a/arch/mips/pci/fixup-lantiq.c +++ b/arch/mips/pci/fixup-lantiq.c | |||
@@ -23,7 +23,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) | |||
23 | return 0; | 23 | return 0; |
24 | } | 24 | } |
25 | 25 | ||
26 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 26 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
27 | { | 27 | { |
28 | return of_irq_parse_and_map_pci(dev, slot, pin); | 28 | return of_irq_parse_and_map_pci(dev, slot, pin); |
29 | } | 29 | } |
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c index 95ab9a1bd010..20cdfdc08938 100644 --- a/arch/mips/pci/fixup-lemote2f.c +++ b/arch/mips/pci/fixup-lemote2f.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define PCID 7 | 30 | #define PCID 7 |
31 | 31 | ||
32 | /* all the pci device has the PCIA pin, check the datasheet. */ | 32 | /* all the pci device has the PCIA pin, check the datasheet. */ |
33 | static char irq_tab[][5] __initdata = { | 33 | static char irq_tab[][5] = { |
34 | /* INTA INTB INTC INTD */ | 34 | /* INTA INTB INTC INTD */ |
35 | {0, 0, 0, 0, 0}, /* 11: Unused */ | 35 | {0, 0, 0, 0, 0}, /* 11: Unused */ |
36 | {0, 0, 0, 0, 0}, /* 12: Unused */ | 36 | {0, 0, 0, 0, 0}, /* 12: Unused */ |
@@ -51,7 +51,7 @@ static char irq_tab[][5] __initdata = { | |||
51 | {0, 0, 0, 0, 0}, /* 27: Unused */ | 51 | {0, 0, 0, 0, 0}, /* 27: Unused */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 54 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
55 | { | 55 | { |
56 | int virq; | 56 | int virq; |
57 | 57 | ||
diff --git a/arch/mips/pci/fixup-loongson3.c b/arch/mips/pci/fixup-loongson3.c index 2b6d5e196f99..8a741c2c6685 100644 --- a/arch/mips/pci/fixup-loongson3.c +++ b/arch/mips/pci/fixup-loongson3.c | |||
@@ -32,7 +32,7 @@ static void print_fixup_info(const struct pci_dev *pdev) | |||
32 | pdev->vendor, pdev->device, pdev->irq); | 32 | pdev->vendor, pdev->device, pdev->irq); |
33 | } | 33 | } |
34 | 34 | ||
35 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 35 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
36 | { | 36 | { |
37 | print_fixup_info(dev); | 37 | print_fixup_info(dev); |
38 | return dev->irq; | 38 | return dev->irq; |
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c index 40e920c653cc..3ec85331795e 100644 --- a/arch/mips/pci/fixup-malta.c +++ b/arch/mips/pci/fixup-malta.c | |||
@@ -12,7 +12,7 @@ | |||
12 | static char pci_irq[5] = { | 12 | static char pci_irq[5] = { |
13 | }; | 13 | }; |
14 | 14 | ||
15 | static char irq_tab[][5] __initdata = { | 15 | static char irq_tab[][5] = { |
16 | /* INTA INTB INTC INTD */ | 16 | /* INTA INTB INTC INTD */ |
17 | {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */ | 17 | {0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */ |
18 | {0, 0, 0, 0, 0 }, /* 1: Unused */ | 18 | {0, 0, 0, 0, 0 }, /* 1: Unused */ |
@@ -38,7 +38,7 @@ static char irq_tab[][5] __initdata = { | |||
38 | {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */ | 38 | {0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */ |
39 | }; | 39 | }; |
40 | 40 | ||
41 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 41 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
42 | { | 42 | { |
43 | int virq; | 43 | int virq; |
44 | virq = irq_tab[slot][pin]; | 44 | virq = irq_tab[slot][pin]; |
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c index 8e4f8288eca2..66eaf456bc89 100644 --- a/arch/mips/pci/fixup-mpc30x.c +++ b/arch/mips/pci/fixup-mpc30x.c | |||
@@ -22,19 +22,19 @@ | |||
22 | 22 | ||
23 | #include <asm/vr41xx/mpc30x.h> | 23 | #include <asm/vr41xx/mpc30x.h> |
24 | 24 | ||
25 | static const int internal_func_irqs[] __initconst = { | 25 | static const int internal_func_irqs[] = { |
26 | VRC4173_CASCADE_IRQ, | 26 | VRC4173_CASCADE_IRQ, |
27 | VRC4173_AC97_IRQ, | 27 | VRC4173_AC97_IRQ, |
28 | VRC4173_USB_IRQ, | 28 | VRC4173_USB_IRQ, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static const int irq_tab_mpc30x[] __initconst = { | 31 | static const int irq_tab_mpc30x[] = { |
32 | [12] = VRC4173_PCMCIA1_IRQ, | 32 | [12] = VRC4173_PCMCIA1_IRQ, |
33 | [13] = VRC4173_PCMCIA2_IRQ, | 33 | [13] = VRC4173_PCMCIA2_IRQ, |
34 | [29] = MQ200_IRQ, | 34 | [29] = MQ200_IRQ, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 37 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
38 | { | 38 | { |
39 | if (slot == 30) | 39 | if (slot == 30) |
40 | return internal_func_irqs[PCI_FUNC(dev->devfn)]; | 40 | return internal_func_irqs[PCI_FUNC(dev->devfn)]; |
diff --git a/arch/mips/pci/fixup-pmcmsp.c b/arch/mips/pci/fixup-pmcmsp.c index fab405c21c2f..4ad2ef02087b 100644 --- a/arch/mips/pci/fixup-pmcmsp.c +++ b/arch/mips/pci/fixup-pmcmsp.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #if defined(CONFIG_PMC_MSP7120_GW) | 48 | #if defined(CONFIG_PMC_MSP7120_GW) |
49 | /* Garibaldi Board IRQ wiring to PCI slots */ | 49 | /* Garibaldi Board IRQ wiring to PCI slots */ |
50 | static char irq_tab[][5] __initdata = { | 50 | static char irq_tab[][5] = { |
51 | /* INTA INTB INTC INTD */ | 51 | /* INTA INTB INTC INTD */ |
52 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ | 52 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ |
53 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ | 53 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ |
@@ -86,7 +86,7 @@ static char irq_tab[][5] __initdata = { | |||
86 | #elif defined(CONFIG_PMC_MSP7120_EVAL) | 86 | #elif defined(CONFIG_PMC_MSP7120_EVAL) |
87 | 87 | ||
88 | /* MSP7120 Eval Board IRQ wiring to PCI slots */ | 88 | /* MSP7120 Eval Board IRQ wiring to PCI slots */ |
89 | static char irq_tab[][5] __initdata = { | 89 | static char irq_tab[][5] = { |
90 | /* INTA INTB INTC INTD */ | 90 | /* INTA INTB INTC INTD */ |
91 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ | 91 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ |
92 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ | 92 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ |
@@ -125,7 +125,7 @@ static char irq_tab[][5] __initdata = { | |||
125 | #else | 125 | #else |
126 | 126 | ||
127 | /* Unknown board -- don't assign any IRQs */ | 127 | /* Unknown board -- don't assign any IRQs */ |
128 | static char irq_tab[][5] __initdata = { | 128 | static char irq_tab[][5] = { |
129 | /* INTA INTB INTC INTD */ | 129 | /* INTA INTB INTC INTD */ |
130 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ | 130 | {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ |
131 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ | 131 | {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ |
@@ -202,7 +202,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) | |||
202 | * RETURNS: IRQ number | 202 | * RETURNS: IRQ number |
203 | * | 203 | * |
204 | ****************************************************************************/ | 204 | ****************************************************************************/ |
205 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 205 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
206 | { | 206 | { |
207 | #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) | 207 | #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) |
208 | printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); | 208 | printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); |
diff --git a/arch/mips/pci/fixup-rbtx4927.c b/arch/mips/pci/fixup-rbtx4927.c index 321db265829c..d6aaed1d6be9 100644 --- a/arch/mips/pci/fixup-rbtx4927.c +++ b/arch/mips/pci/fixup-rbtx4927.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <asm/txx9/pci.h> | 36 | #include <asm/txx9/pci.h> |
37 | #include <asm/txx9/rbtx4927.h> | 37 | #include <asm/txx9/rbtx4927.h> |
38 | 38 | ||
39 | int __init rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 39 | int rbtx4927_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
40 | { | 40 | { |
41 | unsigned char irq = pin; | 41 | unsigned char irq = pin; |
42 | 42 | ||
diff --git a/arch/mips/pci/fixup-rbtx4938.c b/arch/mips/pci/fixup-rbtx4938.c index a80579af609b..ff22a22db73e 100644 --- a/arch/mips/pci/fixup-rbtx4938.c +++ b/arch/mips/pci/fixup-rbtx4938.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <asm/txx9/pci.h> | 13 | #include <asm/txx9/pci.h> |
14 | #include <asm/txx9/rbtx4938.h> | 14 | #include <asm/txx9/rbtx4938.h> |
15 | 15 | ||
16 | int __init rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 16 | int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
17 | { | 17 | { |
18 | int irq = tx4938_pcic1_map_irq(dev, slot); | 18 | int irq = tx4938_pcic1_map_irq(dev, slot); |
19 | 19 | ||
diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c index f67ebeeb4200..adb9a58641e8 100644 --- a/arch/mips/pci/fixup-sni.c +++ b/arch/mips/pci/fixup-sni.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * seem to be a documentation error. At least on my RM200C the Cirrus | 40 | * seem to be a documentation error. At least on my RM200C the Cirrus |
41 | * Logic CL-GD5434 VGA is device 3. | 41 | * Logic CL-GD5434 VGA is device 3. |
42 | */ | 42 | */ |
43 | static char irq_tab_rm200[8][5] __initdata = { | 43 | static char irq_tab_rm200[8][5] = { |
44 | /* INTA INTB INTC INTD */ | 44 | /* INTA INTB INTC INTD */ |
45 | { 0, 0, 0, 0, 0 }, /* EISA bridge */ | 45 | { 0, 0, 0, 0, 0 }, /* EISA bridge */ |
46 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ | 46 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ |
@@ -57,7 +57,7 @@ static char irq_tab_rm200[8][5] __initdata = { | |||
57 | * | 57 | * |
58 | * The VGA card is optional for RM300 systems. | 58 | * The VGA card is optional for RM300 systems. |
59 | */ | 59 | */ |
60 | static char irq_tab_rm300d[8][5] __initdata = { | 60 | static char irq_tab_rm300d[8][5] = { |
61 | /* INTA INTB INTC INTD */ | 61 | /* INTA INTB INTC INTD */ |
62 | { 0, 0, 0, 0, 0 }, /* EISA bridge */ | 62 | { 0, 0, 0, 0, 0 }, /* EISA bridge */ |
63 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ | 63 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ |
@@ -69,7 +69,7 @@ static char irq_tab_rm300d[8][5] __initdata = { | |||
69 | { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ | 69 | { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static char irq_tab_rm300e[5][5] __initdata = { | 72 | static char irq_tab_rm300e[5][5] = { |
73 | /* INTA INTB INTC INTD */ | 73 | /* INTA INTB INTC INTD */ |
74 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ | 74 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ |
75 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ | 75 | { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ |
@@ -96,7 +96,7 @@ static char irq_tab_rm300e[5][5] __initdata = { | |||
96 | #define INTC PCIT_IRQ_INTC | 96 | #define INTC PCIT_IRQ_INTC |
97 | #define INTD PCIT_IRQ_INTD | 97 | #define INTD PCIT_IRQ_INTD |
98 | 98 | ||
99 | static char irq_tab_pcit[13][5] __initdata = { | 99 | static char irq_tab_pcit[13][5] = { |
100 | /* INTA INTB INTC INTD */ | 100 | /* INTA INTB INTC INTD */ |
101 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ | 101 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ |
102 | { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ | 102 | { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ |
@@ -113,7 +113,7 @@ static char irq_tab_pcit[13][5] __initdata = { | |||
113 | { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ | 113 | { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ |
114 | }; | 114 | }; |
115 | 115 | ||
116 | static char irq_tab_pcit_cplus[13][5] __initdata = { | 116 | static char irq_tab_pcit_cplus[13][5] = { |
117 | /* INTA INTB INTC INTD */ | 117 | /* INTA INTB INTC INTD */ |
118 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ | 118 | { 0, 0, 0, 0, 0 }, /* HOST bridge */ |
119 | { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ | 119 | { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ |
@@ -130,7 +130,7 @@ static inline int is_rm300_revd(void) | |||
130 | return (csmsr & 0xa0) == 0x20; | 130 | return (csmsr & 0xa0) == 0x20; |
131 | } | 131 | } |
132 | 132 | ||
133 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 133 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
134 | { | 134 | { |
135 | switch (sni_brd_type) { | 135 | switch (sni_brd_type) { |
136 | case SNI_BRD_PCI_TOWER_CPLUS: | 136 | case SNI_BRD_PCI_TOWER_CPLUS: |
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c index d0b0083fbd27..cc581535f257 100644 --- a/arch/mips/pci/fixup-tb0219.c +++ b/arch/mips/pci/fixup-tb0219.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include <asm/vr41xx/tb0219.h> | 24 | #include <asm/vr41xx/tb0219.h> |
25 | 25 | ||
26 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 26 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
27 | { | 27 | { |
28 | int irq = -1; | 28 | int irq = -1; |
29 | 29 | ||
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c index 4196ccf3ea3d..b827b5cad5fd 100644 --- a/arch/mips/pci/fixup-tb0226.c +++ b/arch/mips/pci/fixup-tb0226.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/vr41xx/giu.h> | 23 | #include <asm/vr41xx/giu.h> |
24 | #include <asm/vr41xx/tb0226.h> | 24 | #include <asm/vr41xx/tb0226.h> |
25 | 25 | ||
26 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 26 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
27 | { | 27 | { |
28 | int irq = -1; | 28 | int irq = -1; |
29 | 29 | ||
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c index 8c5039ed75d7..98f26285f2e3 100644 --- a/arch/mips/pci/fixup-tb0287.c +++ b/arch/mips/pci/fixup-tb0287.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/vr41xx/tb0287.h> | 23 | #include <asm/vr41xx/tb0287.h> |
24 | 24 | ||
25 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 25 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
26 | { | 26 | { |
27 | unsigned char bus; | 27 | unsigned char bus; |
28 | int irq = -1; | 28 | int irq = -1; |
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index e99ca7702d8a..f15ec98de2de 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c | |||
@@ -522,7 +522,7 @@ static int __init alchemy_pci_init(void) | |||
522 | arch_initcall(alchemy_pci_init); | 522 | arch_initcall(alchemy_pci_init); |
523 | 523 | ||
524 | 524 | ||
525 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 525 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
526 | { | 526 | { |
527 | struct alchemy_pci_context *ctx = dev->sysdata; | 527 | struct alchemy_pci_context *ctx = dev->sysdata; |
528 | if (ctx && ctx->board_map_irq) | 528 | if (ctx && ctx->board_map_irq) |
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c index 76f16eaed0ad..230d7dd273e2 100644 --- a/arch/mips/pci/pci-bcm47xx.c +++ b/arch/mips/pci/pci-bcm47xx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/bcma/bcma.h> | 28 | #include <linux/bcma/bcma.h> |
29 | #include <bcm47xx.h> | 29 | #include <bcm47xx.h> |
30 | 30 | ||
31 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 31 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
32 | { | 32 | { |
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
diff --git a/arch/mips/pci/pci-lasat.c b/arch/mips/pci/pci-lasat.c index 40d2797d2bc4..47f4ee6bbb3b 100644 --- a/arch/mips/pci/pci-lasat.c +++ b/arch/mips/pci/pci-lasat.c | |||
@@ -61,7 +61,7 @@ arch_initcall(lasat_pci_setup); | |||
61 | #define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7) | 61 | #define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7) |
62 | #define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8) | 62 | #define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8) |
63 | 63 | ||
64 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 64 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
65 | { | 65 | { |
66 | switch (slot) { | 66 | switch (slot) { |
67 | case 1: | 67 | case 1: |
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 4e633c1e7ff3..90fba9bf98da 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c | |||
@@ -361,7 +361,7 @@ static int mt7620_pci_probe(struct platform_device *pdev) | |||
361 | return 0; | 361 | return 0; |
362 | } | 362 | } |
363 | 363 | ||
364 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 364 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
365 | { | 365 | { |
366 | u16 cmd; | 366 | u16 cmd; |
367 | u32 val; | 367 | u32 val; |
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 9ee01936862e..3e92a06fa772 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c | |||
@@ -59,8 +59,7 @@ union octeon_pci_address { | |||
59 | } s; | 59 | } s; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | int __initconst (*octeon_pcibios_map_irq)(const struct pci_dev *dev, | 62 | int (*octeon_pcibios_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); |
63 | u8 slot, u8 pin); | ||
64 | enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; | 63 | enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; |
65 | 64 | ||
66 | /** | 65 | /** |
@@ -74,7 +73,7 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; | |||
74 | * as it goes through each bridge. | 73 | * as it goes through each bridge. |
75 | * Returns Interrupt number for the device | 74 | * Returns Interrupt number for the device |
76 | */ | 75 | */ |
77 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 76 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
78 | { | 77 | { |
79 | if (octeon_pcibios_map_irq) | 78 | if (octeon_pcibios_map_irq) |
80 | return octeon_pcibios_map_irq(dev, slot, pin); | 79 | return octeon_pcibios_map_irq(dev, slot, pin); |
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index d6360fe73d05..711cdccdf65b 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c | |||
@@ -181,7 +181,7 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) | |||
181 | spin_unlock_irqrestore(&rt2880_pci_lock, flags); | 181 | spin_unlock_irqrestore(&rt2880_pci_lock, flags); |
182 | } | 182 | } |
183 | 183 | ||
184 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 184 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
185 | { | 185 | { |
186 | u16 cmd; | 186 | u16 cmd; |
187 | int irq = -1; | 187 | int irq = -1; |
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 04f8ea953297..958899ffe99c 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c | |||
@@ -564,7 +564,7 @@ err_put_intc_node: | |||
564 | return err; | 564 | return err; |
565 | } | 565 | } |
566 | 566 | ||
567 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 567 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
568 | { | 568 | { |
569 | return of_irq_parse_and_map_pci(dev, slot, pin); | 569 | return of_irq_parse_and_map_pci(dev, slot, pin); |
570 | } | 570 | } |
diff --git a/arch/mips/pci/pci-tx4938.c b/arch/mips/pci/pci-tx4938.c index 000c0e1f9ef8..a6418460e3c4 100644 --- a/arch/mips/pci/pci-tx4938.c +++ b/arch/mips/pci/pci-tx4938.c | |||
@@ -112,7 +112,7 @@ int __init tx4938_pciclk66_setup(void) | |||
112 | return pciclk; | 112 | return pciclk; |
113 | } | 113 | } |
114 | 114 | ||
115 | int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) | 115 | int tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) |
116 | { | 116 | { |
117 | if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { | 117 | if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { |
118 | switch (slot) { | 118 | switch (slot) { |
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c index 9d6acc00f348..09a65f7dbe7c 100644 --- a/arch/mips/pci/pci-tx4939.c +++ b/arch/mips/pci/pci-tx4939.c | |||
@@ -48,7 +48,7 @@ void __init tx4939_report_pci1clk(void) | |||
48 | ((pciclk + 50000) / 100000) % 10); | 48 | ((pciclk + 50000) / 100000) % 10); |
49 | } | 49 | } |
50 | 50 | ||
51 | int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) | 51 | int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) |
52 | { | 52 | { |
53 | if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) { | 53 | if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) { |
54 | switch (slot) { | 54 | switch (slot) { |
@@ -68,7 +68,7 @@ int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) | |||
68 | return -1; | 68 | return -1; |
69 | } | 69 | } |
70 | 70 | ||
71 | int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 71 | int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
72 | { | 72 | { |
73 | int irq = tx4939_pcic1_map_irq(dev, slot); | 73 | int irq = tx4939_pcic1_map_irq(dev, slot); |
74 | 74 | ||
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c index 7babf01600cb..9eff9137f78e 100644 --- a/arch/mips/pci/pci-xlp.c +++ b/arch/mips/pci/pci-xlp.c | |||
@@ -205,7 +205,7 @@ int xlp_socdev_to_node(const struct pci_dev *lnkdev) | |||
205 | return PCI_SLOT(lnkdev->devfn) / 8; | 205 | return PCI_SLOT(lnkdev->devfn) / 8; |
206 | } | 206 | } |
207 | 207 | ||
208 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 208 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
209 | { | 209 | { |
210 | struct pci_dev *lnkdev; | 210 | struct pci_dev *lnkdev; |
211 | int lnkfunc, node; | 211 | int lnkfunc, node; |
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c index 26d2dabef281..2a1c81a129ba 100644 --- a/arch/mips/pci/pci-xlr.c +++ b/arch/mips/pci/pci-xlr.c | |||
@@ -315,7 +315,7 @@ static void xls_pcie_ack_b(struct irq_data *d) | |||
315 | } | 315 | } |
316 | } | 316 | } |
317 | 317 | ||
318 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 318 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
319 | { | 319 | { |
320 | return get_irq_vector(dev); | 320 | return get_irq_vector(dev); |
321 | } | 321 | } |
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index ad3584dbc9d7..fd2887415bc8 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c | |||
@@ -1464,8 +1464,7 @@ static int cvmx_pcie_rc_initialize(int pcie_port) | |||
1464 | * as it goes through each bridge. | 1464 | * as it goes through each bridge. |
1465 | * Returns Interrupt number for the device | 1465 | * Returns Interrupt number for the device |
1466 | */ | 1466 | */ |
1467 | int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, | 1467 | int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
1468 | u8 slot, u8 pin) | ||
1469 | { | 1468 | { |
1470 | /* | 1469 | /* |
1471 | * The EBH5600 board with the PCI to PCIe bridge mistakenly | 1470 | * The EBH5600 board with the PCI to PCIe bridge mistakenly |
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index ffa0f7101a97..2b08242ade62 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | 24 | ||
25 | #include <asm/setup.h> | ||
26 | |||
25 | #ifdef CONFIG_MIPS_MT_SMP | 27 | #ifdef CONFIG_MIPS_MT_SMP |
26 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */ | 28 | #define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */ |
27 | #define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */ | 29 | #define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */ |
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c index 0bd2a1e1ff9a..fb998726bd5d 100644 --- a/arch/mips/txx9/generic/pci.c +++ b/arch/mips/txx9/generic/pci.c | |||
@@ -386,9 +386,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev) | |||
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
388 | 388 | ||
389 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 389 | static int (*txx9_pci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); |
390 | int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
390 | { | 391 | { |
391 | return txx9_board_vec->pci_map_irq(dev, slot, pin); | 392 | return txx9_pci_map_irq(dev, slot, pin); |
392 | } | 393 | } |
393 | 394 | ||
394 | char * (*txx9_board_pcibios_setup)(char *str) __initdata; | 395 | char * (*txx9_board_pcibios_setup)(char *str) __initdata; |
@@ -424,5 +425,8 @@ char *__init txx9_pcibios_setup(char *str) | |||
424 | txx9_pci_err_action = TXX9_PCI_ERR_IGNORE; | 425 | txx9_pci_err_action = TXX9_PCI_ERR_IGNORE; |
425 | return NULL; | 426 | return NULL; |
426 | } | 427 | } |
428 | |||
429 | txx9_pci_map_irq = txx9_board_vec->pci_map_irq; | ||
430 | |||
427 | return str; | 431 | return str; |
428 | } | 432 | } |
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 89e8027e07fb..7c475fd99c46 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c | |||
@@ -59,10 +59,6 @@ void arch_cpu_idle(void) | |||
59 | } | 59 | } |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | void release_segments(struct mm_struct *mm) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | void machine_restart(char *cmd) | 62 | void machine_restart(char *cmd) |
67 | { | 63 | { |
68 | #ifdef CONFIG_KERNEL_DEBUGGER | 64 | #ifdef CONFIG_KERNEL_DEBUGGER |
@@ -113,14 +109,6 @@ void release_thread(struct task_struct *dead_task) | |||
113 | } | 109 | } |
114 | 110 | ||
115 | /* | 111 | /* |
116 | * we do not have to muck with descriptors here, that is | ||
117 | * done in switch_mm() as needed. | ||
118 | */ | ||
119 | void copy_segments(struct task_struct *p, struct mm_struct *new_mm) | ||
120 | { | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * this gets called so that we can store lazy state into memory and copy the | 112 | * this gets called so that we can store lazy state into memory and copy the |
125 | * current task into the new thread. | 113 | * current task into the new thread. |
126 | */ | 114 | */ |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index ba7b7ddc3844..a57dedbfc7b7 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -257,6 +257,18 @@ config PARISC_PAGE_SIZE_64KB | |||
257 | 257 | ||
258 | endchoice | 258 | endchoice |
259 | 259 | ||
260 | config PARISC_SELF_EXTRACT | ||
261 | bool "Build kernel as self-extracting executable" | ||
262 | default y | ||
263 | help | ||
264 | Say Y if you want to build the parisc kernel as a kind of | ||
265 | self-extracting executable. | ||
266 | |||
267 | If you say N here, the kernel will be compressed with gzip | ||
268 | which can be loaded by the palo bootloader directly too. | ||
269 | |||
270 | If you don't know what to do here, say Y. | ||
271 | |||
260 | config SMP | 272 | config SMP |
261 | bool "Symmetric multi-processing support" | 273 | bool "Symmetric multi-processing support" |
262 | ---help--- | 274 | ---help--- |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 58fae5d2449d..01946ebaff72 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -129,8 +129,13 @@ Image: vmlinux | |||
129 | bzImage: vmlinux | 129 | bzImage: vmlinux |
130 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 130 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
131 | 131 | ||
132 | ifdef CONFIG_PARISC_SELF_EXTRACT | ||
132 | vmlinuz: bzImage | 133 | vmlinuz: bzImage |
133 | $(OBJCOPY) $(boot)/bzImage $@ | 134 | $(OBJCOPY) $(boot)/bzImage $@ |
135 | else | ||
136 | vmlinuz: vmlinux | ||
137 | @gzip -cf -9 $< > $@ | ||
138 | endif | ||
134 | 139 | ||
135 | install: | 140 | install: |
136 | $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ | 141 | $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \ |
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 5450a11c9d10..7d7e594bda36 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile | |||
@@ -15,7 +15,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o | |||
15 | KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER | 15 | KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER |
16 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 16 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
17 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks | 17 | KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks |
18 | KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs | 18 | KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os |
19 | ifndef CONFIG_64BIT | 19 | ifndef CONFIG_64BIT |
20 | KBUILD_CFLAGS += -mfast-indirect-calls | 20 | KBUILD_CFLAGS += -mfast-indirect-calls |
21 | endif | 21 | endif |
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 13a4bf9ac4da..9345b44b86f0 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c | |||
@@ -24,7 +24,8 @@ | |||
24 | /* Symbols defined by linker scripts */ | 24 | /* Symbols defined by linker scripts */ |
25 | extern char input_data[]; | 25 | extern char input_data[]; |
26 | extern int input_len; | 26 | extern int input_len; |
27 | extern __le32 output_len; /* at unaligned address, little-endian */ | 27 | /* output_len is inserted by the linker possibly at an unaligned address */ |
28 | extern __le32 output_len __aligned(1); | ||
28 | extern char _text, _end; | 29 | extern char _text, _end; |
29 | extern char _bss, _ebss; | 30 | extern char _bss, _ebss; |
30 | extern char _startcode_end; | 31 | extern char _startcode_end; |
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 26b4455baa83..510341f62d97 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h | |||
@@ -280,6 +280,7 @@ void setup_pdc(void); /* in inventory.c */ | |||
280 | /* wrapper-functions from pdc.c */ | 280 | /* wrapper-functions from pdc.c */ |
281 | 281 | ||
282 | int pdc_add_valid(unsigned long address); | 282 | int pdc_add_valid(unsigned long address); |
283 | int pdc_instr(unsigned int *instr); | ||
283 | int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len); | 284 | int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len); |
284 | int pdc_chassis_disp(unsigned long disp); | 285 | int pdc_chassis_disp(unsigned long disp); |
285 | int pdc_chassis_warn(unsigned long *warn); | 286 | int pdc_chassis_warn(unsigned long *warn); |
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index a5dc9066c6d8..ad9c9c3b4136 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __ASM_SMP_H | 1 | #ifndef __ASM_SMP_H |
2 | #define __ASM_SMP_H | 2 | #define __ASM_SMP_H |
3 | 3 | ||
4 | extern int init_per_cpu(int cpuid); | ||
4 | 5 | ||
5 | #if defined(CONFIG_SMP) | 6 | #if defined(CONFIG_SMP) |
6 | 7 | ||
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index ab80e5c6f651..6d471c00c71a 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c | |||
@@ -233,6 +233,26 @@ int pdc_add_valid(unsigned long address) | |||
233 | EXPORT_SYMBOL(pdc_add_valid); | 233 | EXPORT_SYMBOL(pdc_add_valid); |
234 | 234 | ||
235 | /** | 235 | /** |
236 | * pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler. | ||
237 | * @instr: Pointer to variable which will get instruction opcode. | ||
238 | * | ||
239 | * The return value is PDC_OK (0) in case call succeeded. | ||
240 | */ | ||
241 | int __init pdc_instr(unsigned int *instr) | ||
242 | { | ||
243 | int retval; | ||
244 | unsigned long flags; | ||
245 | |||
246 | spin_lock_irqsave(&pdc_lock, flags); | ||
247 | retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result)); | ||
248 | convert_to_wide(pdc_result); | ||
249 | *instr = pdc_result[0]; | ||
250 | spin_unlock_irqrestore(&pdc_lock, flags); | ||
251 | |||
252 | return retval; | ||
253 | } | ||
254 | |||
255 | /** | ||
236 | * pdc_chassis_info - Return chassis information. | 256 | * pdc_chassis_info - Return chassis information. |
237 | * @result: The return buffer. | 257 | * @result: The return buffer. |
238 | * @chassis_info: The memory buffer address. | 258 | * @chassis_info: The memory buffer address. |
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 05730a83895c..00aed082969b 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/memblock.h> | 15 | #include <linux/memblock.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
18 | #include <linux/initrd.h> | ||
18 | 19 | ||
19 | #include <asm/pdc.h> | 20 | #include <asm/pdc.h> |
20 | #include <asm/pdcpat.h> | 21 | #include <asm/pdcpat.h> |
@@ -216,8 +217,16 @@ void __init pdc_pdt_init(void) | |||
216 | } | 217 | } |
217 | 218 | ||
218 | for (i = 0; i < pdt_status.pdt_entries; i++) { | 219 | for (i = 0; i < pdt_status.pdt_entries; i++) { |
220 | unsigned long addr; | ||
221 | |||
219 | report_mem_err(pdt_entry[i]); | 222 | report_mem_err(pdt_entry[i]); |
220 | 223 | ||
224 | addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK; | ||
225 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && | ||
226 | addr >= initrd_start && addr < initrd_end) | ||
227 | pr_crit("CRITICAL: initrd possibly broken " | ||
228 | "due to bad memory!\n"); | ||
229 | |||
221 | /* mark memory page bad */ | 230 | /* mark memory page bad */ |
222 | memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); | 231 | memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); |
223 | } | 232 | } |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a45a67d526f8..30f92391a93e 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
@@ -146,7 +146,7 @@ void machine_power_off(void) | |||
146 | 146 | ||
147 | /* prevent soft lockup/stalled CPU messages for endless loop. */ | 147 | /* prevent soft lockup/stalled CPU messages for endless loop. */ |
148 | rcu_sysrq_start(); | 148 | rcu_sysrq_start(); |
149 | lockup_detector_suspend(); | 149 | lockup_detector_soft_poweroff(); |
150 | for (;;); | 150 | for (;;); |
151 | } | 151 | } |
152 | 152 | ||
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index a778bd3c107c..e120d63c1b28 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -317,7 +317,7 @@ void __init collect_boot_cpu_data(void) | |||
317 | * | 317 | * |
318 | * o Enable CPU profiling hooks. | 318 | * o Enable CPU profiling hooks. |
319 | */ | 319 | */ |
320 | int init_per_cpu(int cpunum) | 320 | int __init init_per_cpu(int cpunum) |
321 | { | 321 | { |
322 | int ret; | 322 | int ret; |
323 | struct pdc_coproc_cfg coproc_cfg; | 323 | struct pdc_coproc_cfg coproc_cfg; |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index dee6f9d6a153..f7d0c3b33d70 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/export.h> | 38 | #include <linux/export.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/sched/clock.h> | 40 | #include <linux/sched/clock.h> |
41 | #include <linux/start_kernel.h> | ||
41 | 42 | ||
42 | #include <asm/processor.h> | 43 | #include <asm/processor.h> |
43 | #include <asm/sections.h> | 44 | #include <asm/sections.h> |
@@ -48,6 +49,7 @@ | |||
48 | #include <asm/io.h> | 49 | #include <asm/io.h> |
49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
50 | #include <asm/unwind.h> | 51 | #include <asm/unwind.h> |
52 | #include <asm/smp.h> | ||
51 | 53 | ||
52 | static char __initdata command_line[COMMAND_LINE_SIZE]; | 54 | static char __initdata command_line[COMMAND_LINE_SIZE]; |
53 | 55 | ||
@@ -115,7 +117,6 @@ void __init dma_ops_init(void) | |||
115 | } | 117 | } |
116 | #endif | 118 | #endif |
117 | 119 | ||
118 | extern int init_per_cpu(int cpuid); | ||
119 | extern void collect_boot_cpu_data(void); | 120 | extern void collect_boot_cpu_data(void); |
120 | 121 | ||
121 | void __init setup_arch(char **cmdline_p) | 122 | void __init setup_arch(char **cmdline_p) |
@@ -398,9 +399,8 @@ static int __init parisc_init(void) | |||
398 | } | 399 | } |
399 | arch_initcall(parisc_init); | 400 | arch_initcall(parisc_init); |
400 | 401 | ||
401 | void start_parisc(void) | 402 | void __init start_parisc(void) |
402 | { | 403 | { |
403 | extern void start_kernel(void); | ||
404 | extern void early_trap_init(void); | 404 | extern void early_trap_init(void); |
405 | 405 | ||
406 | int ret, cpunum; | 406 | int ret, cpunum; |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 63365106ea19..30c28ab14540 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -255,12 +255,11 @@ void arch_send_call_function_single_ipi(int cpu) | |||
255 | static void __init | 255 | static void __init |
256 | smp_cpu_init(int cpunum) | 256 | smp_cpu_init(int cpunum) |
257 | { | 257 | { |
258 | extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ | ||
259 | extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ | 258 | extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ |
260 | extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ | 259 | extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ |
261 | 260 | ||
262 | /* Set modes and Enable floating point coprocessor */ | 261 | /* Set modes and Enable floating point coprocessor */ |
263 | (void) init_per_cpu(cpunum); | 262 | init_per_cpu(cpunum); |
264 | 263 | ||
265 | disable_sr_hashing(); | 264 | disable_sr_hashing(); |
266 | 265 | ||
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 991654c88eec..230333157fe3 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
@@ -817,7 +817,7 @@ void __init initialize_ivt(const void *iva) | |||
817 | u32 check = 0; | 817 | u32 check = 0; |
818 | u32 *ivap; | 818 | u32 *ivap; |
819 | u32 *hpmcp; | 819 | u32 *hpmcp; |
820 | u32 length; | 820 | u32 length, instr; |
821 | 821 | ||
822 | if (strcmp((const char *)iva, "cows can fly")) | 822 | if (strcmp((const char *)iva, "cows can fly")) |
823 | panic("IVT invalid"); | 823 | panic("IVT invalid"); |
@@ -827,6 +827,14 @@ void __init initialize_ivt(const void *iva) | |||
827 | for (i = 0; i < 8; i++) | 827 | for (i = 0; i < 8; i++) |
828 | *ivap++ = 0; | 828 | *ivap++ = 0; |
829 | 829 | ||
830 | /* | ||
831 | * Use PDC_INSTR firmware function to get instruction that invokes | ||
832 | * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of | ||
833 | * the PA 1.1 Firmware Architecture document. | ||
834 | */ | ||
835 | if (pdc_instr(&instr) == PDC_OK) | ||
836 | ivap[0] = instr; | ||
837 | |||
830 | /* Compute Checksum for HPMC handler */ | 838 | /* Compute Checksum for HPMC handler */ |
831 | length = os_hpmc_size; | 839 | length = os_hpmc_size; |
832 | ivap[7] = length; | 840 | ivap[7] = length; |
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 48dc7d4d20bb..caab39dfa95d 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/sort.h> | 16 | #include <linux/sort.h> |
17 | #include <linux/sched.h> | ||
17 | 18 | ||
18 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
19 | #include <asm/assembly.h> | 20 | #include <asm/assembly.h> |
@@ -279,6 +280,17 @@ static void unwind_frame_regs(struct unwind_frame_info *info) | |||
279 | 280 | ||
280 | info->prev_sp = sp - 64; | 281 | info->prev_sp = sp - 64; |
281 | info->prev_ip = 0; | 282 | info->prev_ip = 0; |
283 | |||
284 | /* The stack is at the end inside the thread_union | ||
285 | * struct. If we reach data, we have reached the | ||
286 | * beginning of the stack and should stop unwinding. */ | ||
287 | if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && | ||
288 | info->prev_sp < ((unsigned long) task_thread_info(info->t) | ||
289 | + THREAD_SZ_ALGN)) { | ||
290 | info->prev_sp = 0; | ||
291 | break; | ||
292 | } | ||
293 | |||
282 | if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) | 294 | if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) |
283 | break; | 295 | break; |
284 | info->prev_ip = tmp; | 296 | info->prev_ip = tmp; |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 5b101f6a5607..e247edbca68e 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/extable.h> | 18 | #include <linux/extable.h> |
19 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
20 | #include <linux/hugetlb.h> | ||
20 | 21 | ||
21 | #include <asm/traps.h> | 22 | #include <asm/traps.h> |
22 | 23 | ||
@@ -261,7 +262,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, | |||
261 | struct task_struct *tsk; | 262 | struct task_struct *tsk; |
262 | struct mm_struct *mm; | 263 | struct mm_struct *mm; |
263 | unsigned long acc_type; | 264 | unsigned long acc_type; |
264 | int fault; | 265 | int fault = 0; |
265 | unsigned int flags; | 266 | unsigned int flags; |
266 | 267 | ||
267 | if (faulthandler_disabled()) | 268 | if (faulthandler_disabled()) |
@@ -315,7 +316,8 @@ good_area: | |||
315 | goto out_of_memory; | 316 | goto out_of_memory; |
316 | else if (fault & VM_FAULT_SIGSEGV) | 317 | else if (fault & VM_FAULT_SIGSEGV) |
317 | goto bad_area; | 318 | goto bad_area; |
318 | else if (fault & VM_FAULT_SIGBUS) | 319 | else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
320 | VM_FAULT_HWPOISON_LARGE)) | ||
319 | goto bad_area; | 321 | goto bad_area; |
320 | BUG(); | 322 | BUG(); |
321 | } | 323 | } |
@@ -352,8 +354,7 @@ bad_area: | |||
352 | 354 | ||
353 | if (user_mode(regs)) { | 355 | if (user_mode(regs)) { |
354 | struct siginfo si; | 356 | struct siginfo si; |
355 | 357 | unsigned int lsb = 0; | |
356 | show_signal_msg(regs, code, address, tsk, vma); | ||
357 | 358 | ||
358 | switch (code) { | 359 | switch (code) { |
359 | case 15: /* Data TLB miss fault/Data page fault */ | 360 | case 15: /* Data TLB miss fault/Data page fault */ |
@@ -386,6 +387,30 @@ bad_area: | |||
386 | si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; | 387 | si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; |
387 | break; | 388 | break; |
388 | } | 389 | } |
390 | |||
391 | #ifdef CONFIG_MEMORY_FAILURE | ||
392 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | ||
393 | printk(KERN_ERR | ||
394 | "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n", | ||
395 | tsk->comm, tsk->pid, address); | ||
396 | si.si_signo = SIGBUS; | ||
397 | si.si_code = BUS_MCEERR_AR; | ||
398 | } | ||
399 | #endif | ||
400 | |||
401 | /* | ||
402 | * Either small page or large page may be poisoned. | ||
403 | * In other words, VM_FAULT_HWPOISON_LARGE and | ||
404 | * VM_FAULT_HWPOISON are mutually exclusive. | ||
405 | */ | ||
406 | if (fault & VM_FAULT_HWPOISON_LARGE) | ||
407 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | ||
408 | else if (fault & VM_FAULT_HWPOISON) | ||
409 | lsb = PAGE_SHIFT; | ||
410 | else | ||
411 | show_signal_msg(regs, code, address, tsk, vma); | ||
412 | si.si_addr_lsb = lsb; | ||
413 | |||
389 | si.si_errno = 0; | 414 | si.si_errno = 0; |
390 | si.si_addr = (void __user *) address; | 415 | si.si_addr = (void __user *) address; |
391 | force_sig_info(si.si_signo, &si, current); | 416 | force_sig_info(si.si_signo, &si, current); |
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index e084fa548d73..063817fee61c 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
@@ -138,10 +138,11 @@ CONFIG_FRAMEBUFFER_CONSOLE=y | |||
138 | CONFIG_LOGO=y | 138 | CONFIG_LOGO=y |
139 | CONFIG_SOUND=m | 139 | CONFIG_SOUND=m |
140 | CONFIG_SND=m | 140 | CONFIG_SND=m |
141 | CONFIG_SND_SEQUENCER=m | 141 | CONFIG_SND_OSSEMUL=y |
142 | CONFIG_SND_MIXER_OSS=m | 142 | CONFIG_SND_MIXER_OSS=m |
143 | CONFIG_SND_PCM_OSS=m | 143 | CONFIG_SND_PCM_OSS=m |
144 | CONFIG_SND_SEQUENCER_OSS=y | 144 | CONFIG_SND_SEQUENCER=m |
145 | CONFIG_SND_SEQUENCER_OSS=m | ||
145 | CONFIG_SND_POWERMAC=m | 146 | CONFIG_SND_POWERMAC=m |
146 | CONFIG_SND_AOA=m | 147 | CONFIG_SND_AOA=m |
147 | CONFIG_SND_AOA_FABRIC_LAYOUT=m | 148 | CONFIG_SND_AOA_FABRIC_LAYOUT=m |
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index 79bbc8238b32..805b0f87653c 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig | |||
@@ -64,11 +64,12 @@ CONFIG_LOGO=y | |||
64 | # CONFIG_LOGO_LINUX_CLUT224 is not set | 64 | # CONFIG_LOGO_LINUX_CLUT224 is not set |
65 | CONFIG_SOUND=y | 65 | CONFIG_SOUND=y |
66 | CONFIG_SND=y | 66 | CONFIG_SND=y |
67 | CONFIG_SND_SEQUENCER=y | 67 | CONFIG_SND_OSSEMUL=y |
68 | CONFIG_SND_MIXER_OSS=y | 68 | CONFIG_SND_MIXER_OSS=y |
69 | CONFIG_SND_PCM_OSS=y | 69 | CONFIG_SND_PCM_OSS=y |
70 | CONFIG_SND_SEQUENCER_OSS=y | ||
71 | # CONFIG_SND_VERBOSE_PROCFS is not set | 70 | # CONFIG_SND_VERBOSE_PROCFS is not set |
71 | CONFIG_SND_SEQUENCER=y | ||
72 | CONFIG_SND_SEQUENCER_OSS=y | ||
72 | # CONFIG_USB_SUPPORT is not set | 73 | # CONFIG_USB_SUPPORT is not set |
73 | CONFIG_RTC_CLASS=y | 74 | CONFIG_RTC_CLASS=y |
74 | CONFIG_RTC_DRV_GENERIC=y | 75 | CONFIG_RTC_DRV_GENERIC=y |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 8cf4a46bef86..6daa56f8895c 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -115,9 +115,10 @@ CONFIG_VGACON_SOFT_SCROLLBACK=y | |||
115 | CONFIG_LOGO=y | 115 | CONFIG_LOGO=y |
116 | CONFIG_SOUND=y | 116 | CONFIG_SOUND=y |
117 | CONFIG_SND=y | 117 | CONFIG_SND=y |
118 | CONFIG_SND_SEQUENCER=y | 118 | CONFIG_SND_OSSEMUL=y |
119 | CONFIG_SND_MIXER_OSS=y | 119 | CONFIG_SND_MIXER_OSS=y |
120 | CONFIG_SND_PCM_OSS=y | 120 | CONFIG_SND_PCM_OSS=y |
121 | CONFIG_SND_SEQUENCER=y | ||
121 | CONFIG_SND_SEQUENCER_OSS=y | 122 | CONFIG_SND_SEQUENCER_OSS=y |
122 | CONFIG_SND_USB_AUDIO=y | 123 | CONFIG_SND_USB_AUDIO=y |
123 | CONFIG_SND_USB_USX2Y=y | 124 | CONFIG_SND_USB_USX2Y=y |
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 8e798b1fbc99..1aab9a62a681 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig | |||
@@ -227,11 +227,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y | |||
227 | CONFIG_LOGO=y | 227 | CONFIG_LOGO=y |
228 | CONFIG_SOUND=m | 228 | CONFIG_SOUND=m |
229 | CONFIG_SND=m | 229 | CONFIG_SND=m |
230 | CONFIG_SND_SEQUENCER=m | 230 | CONFIG_SND_OSSEMUL=y |
231 | CONFIG_SND_SEQ_DUMMY=m | ||
232 | CONFIG_SND_MIXER_OSS=m | 231 | CONFIG_SND_MIXER_OSS=m |
233 | CONFIG_SND_PCM_OSS=m | 232 | CONFIG_SND_PCM_OSS=m |
234 | CONFIG_SND_SEQUENCER_OSS=y | 233 | CONFIG_SND_SEQUENCER=m |
234 | CONFIG_SND_SEQ_DUMMY=m | ||
235 | CONFIG_SND_SEQUENCER_OSS=m | ||
235 | CONFIG_SND_DUMMY=m | 236 | CONFIG_SND_DUMMY=m |
236 | CONFIG_SND_POWERMAC=m | 237 | CONFIG_SND_POWERMAC=m |
237 | CONFIG_SND_AOA=m | 238 | CONFIG_SND_AOA=m |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 791db775a09c..6ddca80c52c3 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -222,11 +222,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y | |||
222 | CONFIG_LOGO=y | 222 | CONFIG_LOGO=y |
223 | CONFIG_SOUND=m | 223 | CONFIG_SOUND=m |
224 | CONFIG_SND=m | 224 | CONFIG_SND=m |
225 | CONFIG_SND_SEQUENCER=m | 225 | CONFIG_SND_OSSEMUL=y |
226 | CONFIG_SND_SEQ_DUMMY=m | ||
227 | CONFIG_SND_MIXER_OSS=m | 226 | CONFIG_SND_MIXER_OSS=m |
228 | CONFIG_SND_PCM_OSS=m | 227 | CONFIG_SND_PCM_OSS=m |
229 | CONFIG_SND_SEQUENCER_OSS=y | 228 | CONFIG_SND_SEQUENCER=m |
229 | CONFIG_SND_SEQ_DUMMY=m | ||
230 | CONFIG_SND_SEQUENCER_OSS=m | ||
230 | CONFIG_SND_POWERMAC=m | 231 | CONFIG_SND_POWERMAC=m |
231 | CONFIG_SND_AOA=m | 232 | CONFIG_SND_AOA=m |
232 | CONFIG_SND_AOA_FABRIC_LAYOUT=m | 233 | CONFIG_SND_AOA_FABRIC_LAYOUT=m |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index d0fe0f8f77c2..41d85cb3c9a2 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
@@ -141,11 +141,12 @@ CONFIG_FRAMEBUFFER_CONSOLE=y | |||
141 | CONFIG_LOGO=y | 141 | CONFIG_LOGO=y |
142 | CONFIG_SOUND=m | 142 | CONFIG_SOUND=m |
143 | CONFIG_SND=m | 143 | CONFIG_SND=m |
144 | CONFIG_SND_SEQUENCER=m | 144 | CONFIG_SND_OSSEMUL=y |
145 | CONFIG_SND_SEQ_DUMMY=m | ||
146 | CONFIG_SND_MIXER_OSS=m | 145 | CONFIG_SND_MIXER_OSS=m |
147 | CONFIG_SND_PCM_OSS=m | 146 | CONFIG_SND_PCM_OSS=m |
148 | CONFIG_SND_SEQUENCER_OSS=y | 147 | CONFIG_SND_SEQUENCER=m |
148 | CONFIG_SND_SEQ_DUMMY=m | ||
149 | CONFIG_SND_SEQUENCER_OSS=m | ||
149 | CONFIG_HID_DRAGONRISE=y | 150 | CONFIG_HID_DRAGONRISE=y |
150 | CONFIG_HID_GYRATION=y | 151 | CONFIG_HID_GYRATION=y |
151 | CONFIG_HID_TWINHAN=y | 152 | CONFIG_HID_TWINHAN=y |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index ae6eba482d75..da0e8d535eb8 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -789,17 +789,18 @@ CONFIG_LOGO=y | |||
789 | # CONFIG_LOGO_LINUX_VGA16 is not set | 789 | # CONFIG_LOGO_LINUX_VGA16 is not set |
790 | CONFIG_SOUND=m | 790 | CONFIG_SOUND=m |
791 | CONFIG_SND=m | 791 | CONFIG_SND=m |
792 | CONFIG_SND_SEQUENCER=m | 792 | CONFIG_SND_OSSEMUL=y |
793 | CONFIG_SND_SEQ_DUMMY=m | ||
794 | CONFIG_SND_MIXER_OSS=m | 793 | CONFIG_SND_MIXER_OSS=m |
795 | CONFIG_SND_PCM_OSS=m | 794 | CONFIG_SND_PCM_OSS=m |
796 | CONFIG_SND_SEQUENCER_OSS=y | ||
797 | CONFIG_SND_DYNAMIC_MINORS=y | 795 | CONFIG_SND_DYNAMIC_MINORS=y |
798 | # CONFIG_SND_SUPPORT_OLD_API is not set | 796 | # CONFIG_SND_SUPPORT_OLD_API is not set |
799 | CONFIG_SND_VERBOSE_PRINTK=y | 797 | CONFIG_SND_VERBOSE_PRINTK=y |
800 | CONFIG_SND_DEBUG=y | 798 | CONFIG_SND_DEBUG=y |
801 | CONFIG_SND_DEBUG_VERBOSE=y | 799 | CONFIG_SND_DEBUG_VERBOSE=y |
802 | CONFIG_SND_PCM_XRUN_DEBUG=y | 800 | CONFIG_SND_PCM_XRUN_DEBUG=y |
801 | CONFIG_SND_SEQUENCER=m | ||
802 | CONFIG_SND_SEQ_DUMMY=m | ||
803 | CONFIG_SND_SEQUENCER_OSS=m | ||
803 | CONFIG_SND_DUMMY=m | 804 | CONFIG_SND_DUMMY=m |
804 | CONFIG_SND_VIRMIDI=m | 805 | CONFIG_SND_VIRMIDI=m |
805 | CONFIG_SND_MTPAV=m | 806 | CONFIG_SND_MTPAV=m |
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index aef41b17a8bc..9c7400a19e9d 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig | |||
@@ -79,11 +79,12 @@ CONFIG_FB=y | |||
79 | CONFIG_FRAMEBUFFER_CONSOLE=y | 79 | CONFIG_FRAMEBUFFER_CONSOLE=y |
80 | CONFIG_SOUND=y | 80 | CONFIG_SOUND=y |
81 | CONFIG_SND=y | 81 | CONFIG_SND=y |
82 | CONFIG_SND_SEQUENCER=y | 82 | CONFIG_SND_OSSEMUL=y |
83 | CONFIG_SND_MIXER_OSS=y | 83 | CONFIG_SND_MIXER_OSS=y |
84 | CONFIG_SND_PCM_OSS=y | 84 | CONFIG_SND_PCM_OSS=y |
85 | CONFIG_SND_SEQUENCER_OSS=y | ||
86 | # CONFIG_SND_VERBOSE_PROCFS is not set | 85 | # CONFIG_SND_VERBOSE_PROCFS is not set |
86 | CONFIG_SND_SEQUENCER=y | ||
87 | CONFIG_SND_SEQUENCER_OSS=y | ||
87 | CONFIG_HID_APPLE=m | 88 | CONFIG_HID_APPLE=m |
88 | CONFIG_HID_WACOM=m | 89 | CONFIG_HID_WACOM=m |
89 | CONFIG_MMC=y | 90 | CONFIG_MMC=y |
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1df770e8cbe0..7275fed271af 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c | |||
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void) | |||
102 | case PVR_POWER8: | 102 | case PVR_POWER8: |
103 | case PVR_POWER8E: | 103 | case PVR_POWER8E: |
104 | case PVR_POWER8NVL: | 104 | case PVR_POWER8NVL: |
105 | __flush_tlb_power8(POWER8_TLB_SETS); | 105 | __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL); |
106 | break; | 106 | break; |
107 | case PVR_POWER9: | 107 | case PVR_POWER9: |
108 | __flush_tlb_power9(POWER9_TLB_SETS_HASH); | 108 | __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL); |
109 | break; | 109 | break; |
110 | default: | 110 | default: |
111 | pr_err("unknown CPU version for boot TLB flush\n"); | 111 | pr_err("unknown CPU version for boot TLB flush\n"); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 9e816787c0d4..116000b45531 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -1019,6 +1019,10 @@ int eeh_init(void) | |||
1019 | } else if ((ret = eeh_ops->init())) | 1019 | } else if ((ret = eeh_ops->init())) |
1020 | return ret; | 1020 | return ret; |
1021 | 1021 | ||
1022 | /* Initialize PHB PEs */ | ||
1023 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
1024 | eeh_dev_phb_init_dynamic(hose); | ||
1025 | |||
1022 | /* Initialize EEH event */ | 1026 | /* Initialize EEH event */ |
1023 | ret = eeh_event_init(); | 1027 | ret = eeh_event_init(); |
1024 | if (ret) | 1028 | if (ret) |
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index ad04ecd63c20..a34e6912c15e 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c | |||
@@ -78,21 +78,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb) | |||
78 | /* EEH PE for PHB */ | 78 | /* EEH PE for PHB */ |
79 | eeh_phb_pe_create(phb); | 79 | eeh_phb_pe_create(phb); |
80 | } | 80 | } |
81 | |||
82 | /** | ||
83 | * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs | ||
84 | * | ||
85 | * Scan all the existing PHBs and create EEH devices for their OF | ||
86 | * nodes and their children OF nodes | ||
87 | */ | ||
88 | static int __init eeh_dev_phb_init(void) | ||
89 | { | ||
90 | struct pci_controller *phb, *tmp; | ||
91 | |||
92 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) | ||
93 | eeh_dev_phb_init_dynamic(phb); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | core_initcall(eeh_dev_phb_init); | ||
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index b76ca198e09c..72f153c6f3fa 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c | |||
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) | |||
624 | 624 | ||
625 | long __machine_check_early_realmode_p9(struct pt_regs *regs) | 625 | long __machine_check_early_realmode_p9(struct pt_regs *regs) |
626 | { | 626 | { |
627 | /* | ||
628 | * On POWER9 DD2.1 and below, it's possible to get a machine check | ||
629 | * caused by a paste instruction where only DSISR bit 25 is set. This | ||
630 | * will result in the MCE handler seeing an unknown event and the kernel | ||
631 | * crashing. An MCE that occurs like this is spurious, so we don't need | ||
632 | * to do anything in terms of servicing it. If there is something that | ||
633 | * needs to be serviced, the CPU will raise the MCE again with the | ||
634 | * correct DSISR so that it can be serviced properly. So detect this | ||
635 | * case and mark it as handled. | ||
636 | */ | ||
637 | if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000) | ||
638 | return 1; | ||
639 | |||
627 | return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); | 640 | return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); |
628 | } | 641 | } |
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 6f8273f5e988..91e037ab20a1 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c | |||
@@ -104,8 +104,10 @@ static unsigned long can_optimize(struct kprobe *p) | |||
104 | * and that can be emulated. | 104 | * and that can be emulated. |
105 | */ | 105 | */ |
106 | if (!is_conditional_branch(*p->ainsn.insn) && | 106 | if (!is_conditional_branch(*p->ainsn.insn) && |
107 | analyse_instr(&op, ®s, *p->ainsn.insn)) | 107 | analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { |
108 | emulate_update_regs(®s, &op); | ||
108 | nip = regs.nip; | 109 | nip = regs.nip; |
110 | } | ||
109 | 111 | ||
110 | return nip; | 112 | return nip; |
111 | } | 113 | } |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 07cd22e35405..f52ad5bb7109 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk) | |||
131 | * in the appropriate thread structures from live. | 131 | * in the appropriate thread structures from live. |
132 | */ | 132 | */ |
133 | 133 | ||
134 | if (tsk != current) | 134 | if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current)) |
135 | return; | 135 | return; |
136 | 136 | ||
137 | if (MSR_TM_SUSPENDED(mfmsr())) { | 137 | if (MSR_TM_SUSPENDED(mfmsr())) { |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 0ac741fae90e..2e3bc16d02b2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p) | |||
904 | #endif | 904 | #endif |
905 | #endif | 905 | #endif |
906 | 906 | ||
907 | #ifdef CONFIG_PPC_64K_PAGES | ||
908 | init_mm.context.pte_frag = NULL; | ||
909 | #endif | ||
910 | #ifdef CONFIG_SPAPR_TCE_IOMMU | 907 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
911 | mm_iommu_init(&init_mm); | 908 | mm_iommu_init(&init_mm); |
912 | #endif | 909 | #endif |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index ec74e203ee04..13c9dcdcba69 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -437,6 +437,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
437 | int machine_check_e500mc(struct pt_regs *regs) | 437 | int machine_check_e500mc(struct pt_regs *regs) |
438 | { | 438 | { |
439 | unsigned long mcsr = mfspr(SPRN_MCSR); | 439 | unsigned long mcsr = mfspr(SPRN_MCSR); |
440 | unsigned long pvr = mfspr(SPRN_PVR); | ||
440 | unsigned long reason = mcsr; | 441 | unsigned long reason = mcsr; |
441 | int recoverable = 1; | 442 | int recoverable = 1; |
442 | 443 | ||
@@ -478,8 +479,15 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
478 | * may still get logged and cause a machine check. We should | 479 | * may still get logged and cause a machine check. We should |
479 | * only treat the non-write shadow case as non-recoverable. | 480 | * only treat the non-write shadow case as non-recoverable. |
480 | */ | 481 | */ |
481 | if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) | 482 | /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit |
482 | recoverable = 0; | 483 | * is not implemented but L1 data cache always runs in write |
484 | * shadow mode. Hence on data cache parity errors HW will | ||
485 | * automatically invalidate the L1 Data Cache. | ||
486 | */ | ||
487 | if (PVR_VER(pvr) != PVR_VER_E6500) { | ||
488 | if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS)) | ||
489 | recoverable = 0; | ||
490 | } | ||
483 | } | 491 | } |
484 | 492 | ||
485 | if (reason & MCSR_L2MMU_MHIT) { | 493 | if (reason & MCSR_L2MMU_MHIT) { |
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 2f6eadd9408d..c702a8981452 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c | |||
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu) | |||
310 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | 310 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) |
311 | return 0; | 311 | return 0; |
312 | 312 | ||
313 | if (watchdog_suspended) | ||
314 | return 0; | ||
315 | |||
316 | if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) | 313 | if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) |
317 | return 0; | 314 | return 0; |
318 | 315 | ||
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void) | |||
358 | wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; | 355 | wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; |
359 | } | 356 | } |
360 | 357 | ||
361 | void watchdog_nmi_reconfigure(void) | 358 | void watchdog_nmi_stop(void) |
362 | { | 359 | { |
363 | int cpu; | 360 | int cpu; |
364 | 361 | ||
365 | watchdog_calc_timeouts(); | ||
366 | |||
367 | for_each_cpu(cpu, &wd_cpus_enabled) | 362 | for_each_cpu(cpu, &wd_cpus_enabled) |
368 | stop_wd_on_cpu(cpu); | 363 | stop_wd_on_cpu(cpu); |
364 | } | ||
369 | 365 | ||
366 | void watchdog_nmi_start(void) | ||
367 | { | ||
368 | int cpu; | ||
369 | |||
370 | watchdog_calc_timeouts(); | ||
370 | for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) | 371 | for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) |
371 | start_wd_on_cpu(cpu); | 372 | start_wd_on_cpu(cpu); |
372 | } | 373 | } |
373 | 374 | ||
374 | /* | 375 | /* |
375 | * This runs after lockup_detector_init() which sets up watchdog_cpumask. | 376 | * Invoked from core watchdog init. |
376 | */ | 377 | */ |
377 | static int __init powerpc_watchdog_init(void) | 378 | int __init watchdog_nmi_probe(void) |
378 | { | 379 | { |
379 | int err; | 380 | int err; |
380 | 381 | ||
381 | watchdog_calc_timeouts(); | 382 | err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
382 | 383 | "powerpc/watchdog:online", | |
383 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", | 384 | start_wd_on_cpu, stop_wd_on_cpu); |
384 | start_wd_on_cpu, stop_wd_on_cpu); | 385 | if (err < 0) { |
385 | if (err < 0) | ||
386 | pr_warn("Watchdog could not be initialized"); | 386 | pr_warn("Watchdog could not be initialized"); |
387 | 387 | return err; | |
388 | } | ||
388 | return 0; | 389 | return 0; |
389 | } | 390 | } |
390 | arch_initcall(powerpc_watchdog_init); | ||
391 | 391 | ||
392 | static void handle_backtrace_ipi(struct pt_regs *regs) | 392 | static void handle_backtrace_ipi(struct pt_regs *regs) |
393 | { | 393 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 17936f82d3c7..ec69fa45d5a2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1121,6 +1121,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
1121 | BEGIN_FTR_SECTION | 1121 | BEGIN_FTR_SECTION |
1122 | mtspr SPRN_PPR, r0 | 1122 | mtspr SPRN_PPR, r0 |
1123 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | 1123 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
1124 | |||
1125 | /* Move canary into DSISR to check for later */ | ||
1126 | BEGIN_FTR_SECTION | ||
1127 | li r0, 0x7fff | ||
1128 | mtspr SPRN_HDSISR, r0 | ||
1129 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
1130 | |||
1124 | ld r0, VCPU_GPR(R0)(r4) | 1131 | ld r0, VCPU_GPR(R0)(r4) |
1125 | ld r4, VCPU_GPR(R4)(r4) | 1132 | ld r4, VCPU_GPR(R4)(r4) |
1126 | 1133 | ||
@@ -1956,9 +1963,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
1956 | kvmppc_hdsi: | 1963 | kvmppc_hdsi: |
1957 | ld r3, VCPU_KVM(r9) | 1964 | ld r3, VCPU_KVM(r9) |
1958 | lbz r0, KVM_RADIX(r3) | 1965 | lbz r0, KVM_RADIX(r3) |
1959 | cmpwi r0, 0 | ||
1960 | mfspr r4, SPRN_HDAR | 1966 | mfspr r4, SPRN_HDAR |
1961 | mfspr r6, SPRN_HDSISR | 1967 | mfspr r6, SPRN_HDSISR |
1968 | BEGIN_FTR_SECTION | ||
1969 | /* Look for DSISR canary. If we find it, retry instruction */ | ||
1970 | cmpdi r6, 0x7fff | ||
1971 | beq 6f | ||
1972 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
1973 | cmpwi r0, 0 | ||
1962 | bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ | 1974 | bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ |
1963 | /* HPTE not found fault or protection fault? */ | 1975 | /* HPTE not found fault or protection fault? */ |
1964 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h | 1976 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h |
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 13304622ab1c..bf457843e032 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c | |||
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |||
622 | return -EINVAL; | 622 | return -EINVAL; |
623 | state = &sb->irq_state[idx]; | 623 | state = &sb->irq_state[idx]; |
624 | arch_spin_lock(&sb->lock); | 624 | arch_spin_lock(&sb->lock); |
625 | *server = state->guest_server; | 625 | *server = state->act_server; |
626 | *priority = state->guest_priority; | 626 | *priority = state->guest_priority; |
627 | arch_spin_unlock(&sb->lock); | 627 | arch_spin_unlock(&sb->lock); |
628 | 628 | ||
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) | |||
1331 | xive->saved_src_count++; | 1331 | xive->saved_src_count++; |
1332 | 1332 | ||
1333 | /* Convert saved state into something compatible with xics */ | 1333 | /* Convert saved state into something compatible with xics */ |
1334 | val = state->guest_server; | 1334 | val = state->act_server; |
1335 | prio = state->saved_scan_prio; | 1335 | prio = state->saved_scan_prio; |
1336 | 1336 | ||
1337 | if (prio == MASKED) { | 1337 | if (prio == MASKED) { |
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) | |||
1507 | /* First convert prio and mark interrupt as untargetted */ | 1507 | /* First convert prio and mark interrupt as untargetted */ |
1508 | act_prio = xive_prio_from_guest(guest_prio); | 1508 | act_prio = xive_prio_from_guest(guest_prio); |
1509 | state->act_priority = MASKED; | 1509 | state->act_priority = MASKED; |
1510 | state->guest_server = server; | ||
1511 | 1510 | ||
1512 | /* | 1511 | /* |
1513 | * We need to drop the lock due to the mutex below. Hopefully | 1512 | * We need to drop the lock due to the mutex below. Hopefully |
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 5938f7644dc1..6ba63f8e8a61 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h | |||
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state { | |||
35 | struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ | 35 | struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ |
36 | 36 | ||
37 | /* Targetting as set by guest */ | 37 | /* Targetting as set by guest */ |
38 | u32 guest_server; /* Current guest selected target */ | ||
39 | u8 guest_priority; /* Guest set priority */ | 38 | u8 guest_priority; /* Guest set priority */ |
40 | u8 saved_priority; /* Saved priority when masking */ | 39 | u8 saved_priority; /* Saved priority when masking */ |
41 | 40 | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index fb9f58b868e7..5e8418c28bd8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -944,9 +944,9 @@ NOKPROBE_SYMBOL(emulate_dcbz); | |||
944 | : "r" (addr), "i" (-EFAULT), "0" (err)) | 944 | : "r" (addr), "i" (-EFAULT), "0" (err)) |
945 | 945 | ||
946 | static nokprobe_inline void set_cr0(const struct pt_regs *regs, | 946 | static nokprobe_inline void set_cr0(const struct pt_regs *regs, |
947 | struct instruction_op *op, int rd) | 947 | struct instruction_op *op) |
948 | { | 948 | { |
949 | long val = regs->gpr[rd]; | 949 | long val = op->val; |
950 | 950 | ||
951 | op->type |= SETCC; | 951 | op->type |= SETCC; |
952 | op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); | 952 | op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); |
@@ -1326,7 +1326,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
1326 | case 13: /* addic. */ | 1326 | case 13: /* addic. */ |
1327 | imm = (short) instr; | 1327 | imm = (short) instr; |
1328 | add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); | 1328 | add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); |
1329 | set_cr0(regs, op, rd); | 1329 | set_cr0(regs, op); |
1330 | return 1; | 1330 | return 1; |
1331 | 1331 | ||
1332 | case 14: /* addi */ | 1332 | case 14: /* addi */ |
@@ -1397,13 +1397,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
1397 | 1397 | ||
1398 | case 28: /* andi. */ | 1398 | case 28: /* andi. */ |
1399 | op->val = regs->gpr[rd] & (unsigned short) instr; | 1399 | op->val = regs->gpr[rd] & (unsigned short) instr; |
1400 | set_cr0(regs, op, ra); | 1400 | set_cr0(regs, op); |
1401 | goto logical_done_nocc; | 1401 | goto logical_done_nocc; |
1402 | 1402 | ||
1403 | case 29: /* andis. */ | 1403 | case 29: /* andis. */ |
1404 | imm = (unsigned short) instr; | 1404 | imm = (unsigned short) instr; |
1405 | op->val = regs->gpr[rd] & (imm << 16); | 1405 | op->val = regs->gpr[rd] & (imm << 16); |
1406 | set_cr0(regs, op, ra); | 1406 | set_cr0(regs, op); |
1407 | goto logical_done_nocc; | 1407 | goto logical_done_nocc; |
1408 | 1408 | ||
1409 | #ifdef __powerpc64__ | 1409 | #ifdef __powerpc64__ |
@@ -1513,10 +1513,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
1513 | op->type = COMPUTE + SETCC; | 1513 | op->type = COMPUTE + SETCC; |
1514 | imm = 0xf0000000UL; | 1514 | imm = 0xf0000000UL; |
1515 | val = regs->gpr[rd]; | 1515 | val = regs->gpr[rd]; |
1516 | op->val = regs->ccr; | 1516 | op->ccval = regs->ccr; |
1517 | for (sh = 0; sh < 8; ++sh) { | 1517 | for (sh = 0; sh < 8; ++sh) { |
1518 | if (instr & (0x80000 >> sh)) | 1518 | if (instr & (0x80000 >> sh)) |
1519 | op->val = (op->val & ~imm) | | 1519 | op->ccval = (op->ccval & ~imm) | |
1520 | (val & imm); | 1520 | (val & imm); |
1521 | imm >>= 4; | 1521 | imm >>= 4; |
1522 | } | 1522 | } |
@@ -1651,8 +1651,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
1651 | goto arith_done; | 1651 | goto arith_done; |
1652 | 1652 | ||
1653 | case 235: /* mullw */ | 1653 | case 235: /* mullw */ |
1654 | op->val = (unsigned int) regs->gpr[ra] * | 1654 | op->val = (long)(int) regs->gpr[ra] * |
1655 | (unsigned int) regs->gpr[rb]; | 1655 | (int) regs->gpr[rb]; |
1656 | |||
1656 | goto arith_done; | 1657 | goto arith_done; |
1657 | 1658 | ||
1658 | case 266: /* add */ | 1659 | case 266: /* add */ |
@@ -2526,7 +2527,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
2526 | 2527 | ||
2527 | logical_done: | 2528 | logical_done: |
2528 | if (instr & 1) | 2529 | if (instr & 1) |
2529 | set_cr0(regs, op, ra); | 2530 | set_cr0(regs, op); |
2530 | logical_done_nocc: | 2531 | logical_done_nocc: |
2531 | op->reg = ra; | 2532 | op->reg = ra; |
2532 | op->type |= SETREG; | 2533 | op->type |= SETREG; |
@@ -2534,7 +2535,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
2534 | 2535 | ||
2535 | arith_done: | 2536 | arith_done: |
2536 | if (instr & 1) | 2537 | if (instr & 1) |
2537 | set_cr0(regs, op, rd); | 2538 | set_cr0(regs, op); |
2538 | compute_done: | 2539 | compute_done: |
2539 | op->reg = rd; | 2540 | op->reg = rd; |
2540 | op->type |= SETREG; | 2541 | op->type |= SETREG; |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 65eda1997c3f..f6c7f54c0515 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |||
361 | break; | 361 | break; |
362 | } | 362 | } |
363 | wmb(); | 363 | wmb(); |
364 | local_irq_restore(flags); | ||
364 | flush_tlb_kernel_range((unsigned long)page_address(start), | 365 | flush_tlb_kernel_range((unsigned long)page_address(start), |
365 | (unsigned long)page_address(page)); | 366 | (unsigned long)page_address(page)); |
366 | local_irq_restore(flags); | ||
367 | return err; | 367 | return err; |
368 | } | 368 | } |
369 | 369 | ||
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 2e3eb7431571..9e3da168d54c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -793,6 +793,11 @@ void perf_event_print_debug(void) | |||
793 | u32 pmcs[MAX_HWEVENTS]; | 793 | u32 pmcs[MAX_HWEVENTS]; |
794 | int i; | 794 | int i; |
795 | 795 | ||
796 | if (!ppmu) { | ||
797 | pr_info("Performance monitor hardware not registered.\n"); | ||
798 | return; | ||
799 | } | ||
800 | |||
796 | if (!ppmu->n_counter) | 801 | if (!ppmu->n_counter) |
797 | return; | 802 | return; |
798 | 803 | ||
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 9f59041a172b..443d5ca71995 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c | |||
@@ -393,7 +393,13 @@ static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) | |||
393 | u64 pir = get_hard_smp_processor_id(cpu); | 393 | u64 pir = get_hard_smp_processor_id(cpu); |
394 | 394 | ||
395 | mtspr(SPRN_LPCR, lpcr_val); | 395 | mtspr(SPRN_LPCR, lpcr_val); |
396 | opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); | 396 | |
397 | /* | ||
398 | * Program the LPCR via stop-api only if the deepest stop state | ||
399 | * can lose hypervisor context. | ||
400 | */ | ||
401 | if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) | ||
402 | opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); | ||
397 | } | 403 | } |
398 | 404 | ||
399 | /* | 405 | /* |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 897aa1400eb8..bbb73aa0eb8f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | |||
272 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 272 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
273 | static unsigned long pnv_memory_block_size(void) | 273 | static unsigned long pnv_memory_block_size(void) |
274 | { | 274 | { |
275 | return 256UL * 1024 * 1024; | 275 | /* |
276 | * We map the kernel linear region with 1GB large pages on radix. For | ||
277 | * memory hot unplug to work our memory block size must be at least | ||
278 | * this size. | ||
279 | */ | ||
280 | if (radix_enabled()) | ||
281 | return 1UL * 1024 * 1024 * 1024; | ||
282 | else | ||
283 | return 256UL * 1024 * 1024; | ||
276 | } | 284 | } |
277 | #endif | 285 | #endif |
278 | 286 | ||
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 783f36364690..e45b5f10645a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -266,7 +266,6 @@ int dlpar_attach_node(struct device_node *dn, struct device_node *parent) | |||
266 | return rc; | 266 | return rc; |
267 | } | 267 | } |
268 | 268 | ||
269 | of_node_put(dn->parent); | ||
270 | return 0; | 269 | return 0; |
271 | } | 270 | } |
272 | 271 | ||
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fc0d8f97c03a..fadb95efbb9e 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -462,15 +462,19 @@ static ssize_t dlpar_cpu_add(u32 drc_index) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); | 464 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); |
465 | of_node_put(parent); | ||
466 | if (!dn) { | 465 | if (!dn) { |
467 | pr_warn("Failed call to configure-connector, drc index: %x\n", | 466 | pr_warn("Failed call to configure-connector, drc index: %x\n", |
468 | drc_index); | 467 | drc_index); |
469 | dlpar_release_drc(drc_index); | 468 | dlpar_release_drc(drc_index); |
469 | of_node_put(parent); | ||
470 | return -EINVAL; | 470 | return -EINVAL; |
471 | } | 471 | } |
472 | 472 | ||
473 | rc = dlpar_attach_node(dn, parent); | 473 | rc = dlpar_attach_node(dn, parent); |
474 | |||
475 | /* Regardless we are done with parent now */ | ||
476 | of_node_put(parent); | ||
477 | |||
474 | if (rc) { | 478 | if (rc) { |
475 | saved_rc = rc; | 479 | saved_rc = rc; |
476 | pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", | 480 | pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", |
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 210ce632d63e..f7042ad492ba 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
@@ -226,8 +226,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) | |||
226 | return -ENOENT; | 226 | return -ENOENT; |
227 | 227 | ||
228 | dn = dlpar_configure_connector(drc_index, parent_dn); | 228 | dn = dlpar_configure_connector(drc_index, parent_dn); |
229 | if (!dn) | 229 | if (!dn) { |
230 | of_node_put(parent_dn); | ||
230 | return -ENOENT; | 231 | return -ENOENT; |
232 | } | ||
231 | 233 | ||
232 | rc = dlpar_attach_node(dn, parent_dn); | 234 | rc = dlpar_attach_node(dn, parent_dn); |
233 | if (rc) | 235 | if (rc) |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 9234be1e66f5..5011ffea4e4b 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -71,6 +71,8 @@ | |||
71 | #define RIWAR_WRTYP_ALLOC 0x00006000 | 71 | #define RIWAR_WRTYP_ALLOC 0x00006000 |
72 | #define RIWAR_SIZE_MASK 0x0000003F | 72 | #define RIWAR_SIZE_MASK 0x0000003F |
73 | 73 | ||
74 | static DEFINE_SPINLOCK(fsl_rio_config_lock); | ||
75 | |||
74 | #define __fsl_read_rio_config(x, addr, err, op) \ | 76 | #define __fsl_read_rio_config(x, addr, err, op) \ |
75 | __asm__ __volatile__( \ | 77 | __asm__ __volatile__( \ |
76 | "1: "op" %1,0(%2)\n" \ | 78 | "1: "op" %1,0(%2)\n" \ |
@@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
184 | u8 hopcount, u32 offset, int len, u32 *val) | 186 | u8 hopcount, u32 offset, int len, u32 *val) |
185 | { | 187 | { |
186 | struct rio_priv *priv = mport->priv; | 188 | struct rio_priv *priv = mport->priv; |
189 | unsigned long flags; | ||
187 | u8 *data; | 190 | u8 *data; |
188 | u32 rval, err = 0; | 191 | u32 rval, err = 0; |
189 | 192 | ||
@@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
197 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) | 200 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
198 | return -EINVAL; | 201 | return -EINVAL; |
199 | 202 | ||
203 | spin_lock_irqsave(&fsl_rio_config_lock, flags); | ||
204 | |||
200 | out_be32(&priv->maint_atmu_regs->rowtar, | 205 | out_be32(&priv->maint_atmu_regs->rowtar, |
201 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 206 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
202 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 207 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
@@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
213 | __fsl_read_rio_config(rval, data, err, "lwz"); | 218 | __fsl_read_rio_config(rval, data, err, "lwz"); |
214 | break; | 219 | break; |
215 | default: | 220 | default: |
221 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
216 | return -EINVAL; | 222 | return -EINVAL; |
217 | } | 223 | } |
218 | 224 | ||
@@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
221 | err, destid, hopcount, offset); | 227 | err, destid, hopcount, offset); |
222 | } | 228 | } |
223 | 229 | ||
230 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
224 | *val = rval; | 231 | *val = rval; |
225 | 232 | ||
226 | return err; | 233 | return err; |
@@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
244 | u8 hopcount, u32 offset, int len, u32 val) | 251 | u8 hopcount, u32 offset, int len, u32 val) |
245 | { | 252 | { |
246 | struct rio_priv *priv = mport->priv; | 253 | struct rio_priv *priv = mport->priv; |
254 | unsigned long flags; | ||
247 | u8 *data; | 255 | u8 *data; |
256 | int ret = 0; | ||
257 | |||
248 | pr_debug | 258 | pr_debug |
249 | ("fsl_rio_config_write:" | 259 | ("fsl_rio_config_write:" |
250 | " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", | 260 | " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", |
@@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
255 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) | 265 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
256 | return -EINVAL; | 266 | return -EINVAL; |
257 | 267 | ||
268 | spin_lock_irqsave(&fsl_rio_config_lock, flags); | ||
269 | |||
258 | out_be32(&priv->maint_atmu_regs->rowtar, | 270 | out_be32(&priv->maint_atmu_regs->rowtar, |
259 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 271 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
260 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 272 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
@@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
271 | out_be32((u32 *) data, val); | 283 | out_be32((u32 *) data, val); |
272 | break; | 284 | break; |
273 | default: | 285 | default: |
274 | return -EINVAL; | 286 | ret = -EINVAL; |
275 | } | 287 | } |
288 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
276 | 289 | ||
277 | return 0; | 290 | return ret; |
278 | } | 291 | } |
279 | 292 | ||
280 | static void fsl_rio_inbound_mem_init(struct rio_priv *priv) | 293 | static void fsl_rio_inbound_mem_init(struct rio_priv *priv) |
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index ab7a74c75be8..88b35a3dcdc5 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c | |||
@@ -104,6 +104,8 @@ | |||
104 | 104 | ||
105 | #define DOORBELL_MESSAGE_SIZE 0x08 | 105 | #define DOORBELL_MESSAGE_SIZE 0x08 |
106 | 106 | ||
107 | static DEFINE_SPINLOCK(fsl_rio_doorbell_lock); | ||
108 | |||
107 | struct rio_msg_regs { | 109 | struct rio_msg_regs { |
108 | u32 omr; | 110 | u32 omr; |
109 | u32 osr; | 111 | u32 osr; |
@@ -626,9 +628,13 @@ err_out: | |||
626 | int fsl_rio_doorbell_send(struct rio_mport *mport, | 628 | int fsl_rio_doorbell_send(struct rio_mport *mport, |
627 | int index, u16 destid, u16 data) | 629 | int index, u16 destid, u16 data) |
628 | { | 630 | { |
631 | unsigned long flags; | ||
632 | |||
629 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", | 633 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", |
630 | index, destid, data); | 634 | index, destid, data); |
631 | 635 | ||
636 | spin_lock_irqsave(&fsl_rio_doorbell_lock, flags); | ||
637 | |||
632 | /* In the serial version silicons, such as MPC8548, MPC8641, | 638 | /* In the serial version silicons, such as MPC8548, MPC8641, |
633 | * below operations is must be. | 639 | * below operations is must be. |
634 | */ | 640 | */ |
@@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport, | |||
638 | out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); | 644 | out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); |
639 | out_be32(&dbell->dbell_regs->odmr, 0x00000001); | 645 | out_be32(&dbell->dbell_regs->odmr, 0x00000001); |
640 | 646 | ||
647 | spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags); | ||
648 | |||
641 | return 0; | 649 | return 0; |
642 | } | 650 | } |
643 | 651 | ||
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index f387318678b9..a3b8d7d1316e 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c | |||
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void) | |||
1402 | 1402 | ||
1403 | if (xive_ops->teardown_cpu) | 1403 | if (xive_ops->teardown_cpu) |
1404 | xive_ops->teardown_cpu(cpu, xc); | 1404 | xive_ops->teardown_cpu(cpu, xc); |
1405 | |||
1406 | #ifdef CONFIG_SMP | ||
1407 | /* Get rid of IPI */ | ||
1408 | xive_cleanup_cpu_ipi(cpu, xc); | ||
1409 | #endif | ||
1410 | |||
1411 | /* Disable and free the queues */ | ||
1412 | xive_cleanup_cpu_queues(cpu, xc); | ||
1405 | } | 1413 | } |
1406 | 1414 | ||
1407 | void xive_kexec_teardown_cpu(int secondary) | 1415 | void xive_kexec_teardown_cpu(int secondary) |
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index f24a70bc6855..d9c4c9366049 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c | |||
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) | |||
431 | 431 | ||
432 | static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) | 432 | static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) |
433 | { | 433 | { |
434 | if (!xc->hw_ipi) | ||
435 | return; | ||
436 | |||
434 | xive_irq_bitmap_free(xc->hw_ipi); | 437 | xive_irq_bitmap_free(xc->hw_ipi); |
438 | xc->hw_ipi = 0; | ||
435 | } | 439 | } |
436 | #endif /* CONFIG_SMP */ | 440 | #endif /* CONFIG_SMP */ |
437 | 441 | ||
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index dce708e061ea..20e75a2ca93a 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -1507,7 +1507,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, | |||
1507 | static inline void pmdp_invalidate(struct vm_area_struct *vma, | 1507 | static inline void pmdp_invalidate(struct vm_area_struct *vma, |
1508 | unsigned long addr, pmd_t *pmdp) | 1508 | unsigned long addr, pmd_t *pmdp) |
1509 | { | 1509 | { |
1510 | pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); | 1510 | pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); |
1511 | |||
1512 | pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); | ||
1511 | } | 1513 | } |
1512 | 1514 | ||
1513 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | 1515 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index ca8cd80e8feb..60181caf8e8a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -404,18 +404,6 @@ static inline void save_vector_registers(void) | |||
404 | #endif | 404 | #endif |
405 | } | 405 | } |
406 | 406 | ||
407 | static int __init topology_setup(char *str) | ||
408 | { | ||
409 | bool enabled; | ||
410 | int rc; | ||
411 | |||
412 | rc = kstrtobool(str, &enabled); | ||
413 | if (!rc && !enabled) | ||
414 | S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY; | ||
415 | return rc; | ||
416 | } | ||
417 | early_param("topology", topology_setup); | ||
418 | |||
419 | static int __init disable_vector_extension(char *str) | 407 | static int __init disable_vector_extension(char *str) |
420 | { | 408 | { |
421 | S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; | 409 | S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index c1bf75ffb875..7e1e40323b78 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event) | |||
823 | } | 823 | } |
824 | 824 | ||
825 | /* Check online status of the CPU to which the event is pinned */ | 825 | /* Check online status of the CPU to which the event is pinned */ |
826 | if ((unsigned int)event->cpu >= nr_cpumask_bits || | 826 | if (event->cpu >= 0) { |
827 | (event->cpu >= 0 && !cpu_online(event->cpu))) | 827 | if ((unsigned int)event->cpu >= nr_cpumask_bits) |
828 | return -ENODEV; | 828 | return -ENODEV; |
829 | if (!cpu_online(event->cpu)) | ||
830 | return -ENODEV; | ||
831 | } | ||
829 | 832 | ||
830 | /* Force reset of idle/hv excludes regardless of what the | 833 | /* Force reset of idle/hv excludes regardless of what the |
831 | * user requested. | 834 | * user requested. |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bb47c92476f0..ed0bdd220e1a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/workqueue.h> | 9 | #include <linux/workqueue.h> |
10 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/sysctl.h> | ||
11 | #include <linux/cpuset.h> | 13 | #include <linux/cpuset.h> |
12 | #include <linux/device.h> | 14 | #include <linux/device.h> |
13 | #include <linux/export.h> | 15 | #include <linux/export.h> |
@@ -29,12 +31,20 @@ | |||
29 | #define PTF_VERTICAL (1UL) | 31 | #define PTF_VERTICAL (1UL) |
30 | #define PTF_CHECK (2UL) | 32 | #define PTF_CHECK (2UL) |
31 | 33 | ||
34 | enum { | ||
35 | TOPOLOGY_MODE_HW, | ||
36 | TOPOLOGY_MODE_SINGLE, | ||
37 | TOPOLOGY_MODE_PACKAGE, | ||
38 | TOPOLOGY_MODE_UNINITIALIZED | ||
39 | }; | ||
40 | |||
32 | struct mask_info { | 41 | struct mask_info { |
33 | struct mask_info *next; | 42 | struct mask_info *next; |
34 | unsigned char id; | 43 | unsigned char id; |
35 | cpumask_t mask; | 44 | cpumask_t mask; |
36 | }; | 45 | }; |
37 | 46 | ||
47 | static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED; | ||
38 | static void set_topology_timer(void); | 48 | static void set_topology_timer(void); |
39 | static void topology_work_fn(struct work_struct *work); | 49 | static void topology_work_fn(struct work_struct *work); |
40 | static struct sysinfo_15_1_x *tl_info; | 50 | static struct sysinfo_15_1_x *tl_info; |
@@ -59,11 +69,26 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
59 | cpumask_t mask; | 69 | cpumask_t mask; |
60 | 70 | ||
61 | cpumask_copy(&mask, cpumask_of(cpu)); | 71 | cpumask_copy(&mask, cpumask_of(cpu)); |
62 | if (!MACHINE_HAS_TOPOLOGY) | 72 | switch (topology_mode) { |
63 | return mask; | 73 | case TOPOLOGY_MODE_HW: |
64 | for (; info; info = info->next) { | 74 | while (info) { |
65 | if (cpumask_test_cpu(cpu, &info->mask)) | 75 | if (cpumask_test_cpu(cpu, &info->mask)) { |
66 | return info->mask; | 76 | mask = info->mask; |
77 | break; | ||
78 | } | ||
79 | info = info->next; | ||
80 | } | ||
81 | if (cpumask_empty(&mask)) | ||
82 | cpumask_copy(&mask, cpumask_of(cpu)); | ||
83 | break; | ||
84 | case TOPOLOGY_MODE_PACKAGE: | ||
85 | cpumask_copy(&mask, cpu_present_mask); | ||
86 | break; | ||
87 | default: | ||
88 | /* fallthrough */ | ||
89 | case TOPOLOGY_MODE_SINGLE: | ||
90 | cpumask_copy(&mask, cpumask_of(cpu)); | ||
91 | break; | ||
67 | } | 92 | } |
68 | return mask; | 93 | return mask; |
69 | } | 94 | } |
@@ -74,7 +99,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu) | |||
74 | int i; | 99 | int i; |
75 | 100 | ||
76 | cpumask_copy(&mask, cpumask_of(cpu)); | 101 | cpumask_copy(&mask, cpumask_of(cpu)); |
77 | if (!MACHINE_HAS_TOPOLOGY) | 102 | if (topology_mode != TOPOLOGY_MODE_HW) |
78 | return mask; | 103 | return mask; |
79 | cpu -= cpu % (smp_cpu_mtid + 1); | 104 | cpu -= cpu % (smp_cpu_mtid + 1); |
80 | for (i = 0; i <= smp_cpu_mtid; i++) | 105 | for (i = 0; i <= smp_cpu_mtid; i++) |
@@ -184,10 +209,8 @@ static void topology_update_polarization_simple(void) | |||
184 | { | 209 | { |
185 | int cpu; | 210 | int cpu; |
186 | 211 | ||
187 | mutex_lock(&smp_cpu_state_mutex); | ||
188 | for_each_possible_cpu(cpu) | 212 | for_each_possible_cpu(cpu) |
189 | smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); | 213 | smp_cpu_set_polarization(cpu, POLARIZATION_HRZ); |
190 | mutex_unlock(&smp_cpu_state_mutex); | ||
191 | } | 214 | } |
192 | 215 | ||
193 | static int ptf(unsigned long fc) | 216 | static int ptf(unsigned long fc) |
@@ -223,7 +246,7 @@ int topology_set_cpu_management(int fc) | |||
223 | static void update_cpu_masks(void) | 246 | static void update_cpu_masks(void) |
224 | { | 247 | { |
225 | struct cpu_topology_s390 *topo; | 248 | struct cpu_topology_s390 *topo; |
226 | int cpu; | 249 | int cpu, id; |
227 | 250 | ||
228 | for_each_possible_cpu(cpu) { | 251 | for_each_possible_cpu(cpu) { |
229 | topo = &cpu_topology[cpu]; | 252 | topo = &cpu_topology[cpu]; |
@@ -231,12 +254,13 @@ static void update_cpu_masks(void) | |||
231 | topo->core_mask = cpu_group_map(&socket_info, cpu); | 254 | topo->core_mask = cpu_group_map(&socket_info, cpu); |
232 | topo->book_mask = cpu_group_map(&book_info, cpu); | 255 | topo->book_mask = cpu_group_map(&book_info, cpu); |
233 | topo->drawer_mask = cpu_group_map(&drawer_info, cpu); | 256 | topo->drawer_mask = cpu_group_map(&drawer_info, cpu); |
234 | if (!MACHINE_HAS_TOPOLOGY) { | 257 | if (topology_mode != TOPOLOGY_MODE_HW) { |
258 | id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; | ||
235 | topo->thread_id = cpu; | 259 | topo->thread_id = cpu; |
236 | topo->core_id = cpu; | 260 | topo->core_id = cpu; |
237 | topo->socket_id = cpu; | 261 | topo->socket_id = id; |
238 | topo->book_id = cpu; | 262 | topo->book_id = id; |
239 | topo->drawer_id = cpu; | 263 | topo->drawer_id = id; |
240 | if (cpu_present(cpu)) | 264 | if (cpu_present(cpu)) |
241 | cpumask_set_cpu(cpu, &cpus_with_topology); | 265 | cpumask_set_cpu(cpu, &cpus_with_topology); |
242 | } | 266 | } |
@@ -254,6 +278,7 @@ static int __arch_update_cpu_topology(void) | |||
254 | struct sysinfo_15_1_x *info = tl_info; | 278 | struct sysinfo_15_1_x *info = tl_info; |
255 | int rc = 0; | 279 | int rc = 0; |
256 | 280 | ||
281 | mutex_lock(&smp_cpu_state_mutex); | ||
257 | cpumask_clear(&cpus_with_topology); | 282 | cpumask_clear(&cpus_with_topology); |
258 | if (MACHINE_HAS_TOPOLOGY) { | 283 | if (MACHINE_HAS_TOPOLOGY) { |
259 | rc = 1; | 284 | rc = 1; |
@@ -263,6 +288,7 @@ static int __arch_update_cpu_topology(void) | |||
263 | update_cpu_masks(); | 288 | update_cpu_masks(); |
264 | if (!MACHINE_HAS_TOPOLOGY) | 289 | if (!MACHINE_HAS_TOPOLOGY) |
265 | topology_update_polarization_simple(); | 290 | topology_update_polarization_simple(); |
291 | mutex_unlock(&smp_cpu_state_mutex); | ||
266 | return rc; | 292 | return rc; |
267 | } | 293 | } |
268 | 294 | ||
@@ -289,6 +315,11 @@ void topology_schedule_update(void) | |||
289 | schedule_work(&topology_work); | 315 | schedule_work(&topology_work); |
290 | } | 316 | } |
291 | 317 | ||
318 | static void topology_flush_work(void) | ||
319 | { | ||
320 | flush_work(&topology_work); | ||
321 | } | ||
322 | |||
292 | static void topology_timer_fn(unsigned long ignored) | 323 | static void topology_timer_fn(unsigned long ignored) |
293 | { | 324 | { |
294 | if (ptf(PTF_CHECK)) | 325 | if (ptf(PTF_CHECK)) |
@@ -459,6 +490,12 @@ void __init topology_init_early(void) | |||
459 | struct sysinfo_15_1_x *info; | 490 | struct sysinfo_15_1_x *info; |
460 | 491 | ||
461 | set_sched_topology(s390_topology); | 492 | set_sched_topology(s390_topology); |
493 | if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { | ||
494 | if (MACHINE_HAS_TOPOLOGY) | ||
495 | topology_mode = TOPOLOGY_MODE_HW; | ||
496 | else | ||
497 | topology_mode = TOPOLOGY_MODE_SINGLE; | ||
498 | } | ||
462 | if (!MACHINE_HAS_TOPOLOGY) | 499 | if (!MACHINE_HAS_TOPOLOGY) |
463 | goto out; | 500 | goto out; |
464 | tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); | 501 | tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE); |
@@ -474,12 +511,97 @@ out: | |||
474 | __arch_update_cpu_topology(); | 511 | __arch_update_cpu_topology(); |
475 | } | 512 | } |
476 | 513 | ||
514 | static inline int topology_get_mode(int enabled) | ||
515 | { | ||
516 | if (!enabled) | ||
517 | return TOPOLOGY_MODE_SINGLE; | ||
518 | return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; | ||
519 | } | ||
520 | |||
521 | static inline int topology_is_enabled(void) | ||
522 | { | ||
523 | return topology_mode != TOPOLOGY_MODE_SINGLE; | ||
524 | } | ||
525 | |||
526 | static int __init topology_setup(char *str) | ||
527 | { | ||
528 | bool enabled; | ||
529 | int rc; | ||
530 | |||
531 | rc = kstrtobool(str, &enabled); | ||
532 | if (rc) | ||
533 | return rc; | ||
534 | topology_mode = topology_get_mode(enabled); | ||
535 | return 0; | ||
536 | } | ||
537 | early_param("topology", topology_setup); | ||
538 | |||
539 | static int topology_ctl_handler(struct ctl_table *ctl, int write, | ||
540 | void __user *buffer, size_t *lenp, loff_t *ppos) | ||
541 | { | ||
542 | unsigned int len; | ||
543 | int new_mode; | ||
544 | char buf[2]; | ||
545 | |||
546 | if (!*lenp || *ppos) { | ||
547 | *lenp = 0; | ||
548 | return 0; | ||
549 | } | ||
550 | if (!write) { | ||
551 | strncpy(buf, topology_is_enabled() ? "1\n" : "0\n", | ||
552 | ARRAY_SIZE(buf)); | ||
553 | len = strnlen(buf, ARRAY_SIZE(buf)); | ||
554 | if (len > *lenp) | ||
555 | len = *lenp; | ||
556 | if (copy_to_user(buffer, buf, len)) | ||
557 | return -EFAULT; | ||
558 | goto out; | ||
559 | } | ||
560 | len = *lenp; | ||
561 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) | ||
562 | return -EFAULT; | ||
563 | if (buf[0] != '0' && buf[0] != '1') | ||
564 | return -EINVAL; | ||
565 | mutex_lock(&smp_cpu_state_mutex); | ||
566 | new_mode = topology_get_mode(buf[0] == '1'); | ||
567 | if (topology_mode != new_mode) { | ||
568 | topology_mode = new_mode; | ||
569 | topology_schedule_update(); | ||
570 | } | ||
571 | mutex_unlock(&smp_cpu_state_mutex); | ||
572 | topology_flush_work(); | ||
573 | out: | ||
574 | *lenp = len; | ||
575 | *ppos += len; | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static struct ctl_table topology_ctl_table[] = { | ||
580 | { | ||
581 | .procname = "topology", | ||
582 | .mode = 0644, | ||
583 | .proc_handler = topology_ctl_handler, | ||
584 | }, | ||
585 | { }, | ||
586 | }; | ||
587 | |||
588 | static struct ctl_table topology_dir_table[] = { | ||
589 | { | ||
590 | .procname = "s390", | ||
591 | .maxlen = 0, | ||
592 | .mode = 0555, | ||
593 | .child = topology_ctl_table, | ||
594 | }, | ||
595 | { }, | ||
596 | }; | ||
597 | |||
477 | static int __init topology_init(void) | 598 | static int __init topology_init(void) |
478 | { | 599 | { |
479 | if (MACHINE_HAS_TOPOLOGY) | 600 | if (MACHINE_HAS_TOPOLOGY) |
480 | set_topology_timer(); | 601 | set_topology_timer(); |
481 | else | 602 | else |
482 | topology_update_polarization_simple(); | 603 | topology_update_polarization_simple(); |
604 | register_sysctl_table(topology_dir_table); | ||
483 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); | 605 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); |
484 | } | 606 | } |
485 | device_initcall(topology_init); | 607 | device_initcall(topology_init); |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8ecc25e760fa..98ffe3ee9411 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
56 | static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | 56 | static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, |
57 | unsigned long end, int write, struct page **pages, int *nr) | 57 | unsigned long end, int write, struct page **pages, int *nr) |
58 | { | 58 | { |
59 | unsigned long mask, result; | ||
60 | struct page *head, *page; | 59 | struct page *head, *page; |
60 | unsigned long mask; | ||
61 | int refs; | 61 | int refs; |
62 | 62 | ||
63 | result = write ? 0 : _SEGMENT_ENTRY_PROTECT; | 63 | mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID; |
64 | mask = result | _SEGMENT_ENTRY_INVALID; | 64 | if ((pmd_val(pmd) & mask) != 0) |
65 | if ((pmd_val(pmd) & mask) != result) | ||
66 | return 0; | 65 | return 0; |
67 | VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); | 66 | VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); |
68 | 67 | ||
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 18e0377f72bb..88ce1e22237b 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h | |||
@@ -136,10 +136,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned lo | |||
136 | /* Free all resources held by a thread. */ | 136 | /* Free all resources held by a thread. */ |
137 | extern void release_thread(struct task_struct *); | 137 | extern void release_thread(struct task_struct *); |
138 | 138 | ||
139 | /* Copy and release all segment info associated with a VM */ | ||
140 | #define copy_segments(p, mm) do { } while(0) | ||
141 | #define release_segments(mm) do { } while(0) | ||
142 | |||
143 | /* | 139 | /* |
144 | * FPU lazy state save handling. | 140 | * FPU lazy state save handling. |
145 | */ | 141 | */ |
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index eedd4f625d07..777a16318aff 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h | |||
@@ -170,10 +170,6 @@ struct mm_struct; | |||
170 | /* Free all resources held by a thread. */ | 170 | /* Free all resources held by a thread. */ |
171 | extern void release_thread(struct task_struct *); | 171 | extern void release_thread(struct task_struct *); |
172 | 172 | ||
173 | /* Copy and release all segment info associated with a VM */ | ||
174 | #define copy_segments(p, mm) do { } while (0) | ||
175 | #define release_segments(mm) do { } while (0) | ||
176 | #define forget_segments() do { } while (0) | ||
177 | /* | 173 | /* |
178 | * FPU lazy state save handling. | 174 | * FPU lazy state save handling. |
179 | */ | 175 | */ |
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h index 4d1ef6d74bd6..2ae0e938b657 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h | |||
@@ -43,9 +43,7 @@ enum { | |||
43 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, | 43 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, |
44 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, | 44 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, |
45 | 45 | ||
46 | /* Port H */ | 46 | /* Port H - Port H does not have a Data Register */ |
47 | GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, | ||
48 | GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, | ||
49 | 47 | ||
50 | /* Port I - not on device */ | 48 | /* Port I - not on device */ |
51 | 49 | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 2a0ca8780f0d..13c495a9fc00 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h | |||
@@ -45,9 +45,7 @@ enum { | |||
45 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, | 45 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, |
46 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, | 46 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, |
47 | 47 | ||
48 | /* Port H */ | 48 | /* Port H - Port H does not have a Data Register */ |
49 | GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, | ||
50 | GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, | ||
51 | 49 | ||
52 | /* Port I - not on device */ | 50 | /* Port I - not on device */ |
53 | 51 | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h index 3bb74e534d0f..78961ab78a5a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h | |||
@@ -67,7 +67,7 @@ enum { | |||
67 | GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, | 67 | GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, |
68 | 68 | ||
69 | /* PTQ */ | 69 | /* PTQ */ |
70 | GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, | 70 | GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, |
71 | GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, | 71 | GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, |
72 | 72 | ||
73 | /* PTR */ | 73 | /* PTR */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h index 5340f3bc1863..b40fb541e72a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h | |||
@@ -40,7 +40,7 @@ enum { | |||
40 | 40 | ||
41 | /* PTJ */ | 41 | /* PTJ */ |
42 | GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, | 42 | GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, |
43 | GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV, | 43 | GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, |
44 | 44 | ||
45 | /* PTK */ | 45 | /* PTK */ |
46 | GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, | 46 | GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, |
@@ -48,7 +48,7 @@ enum { | |||
48 | 48 | ||
49 | /* PTL */ | 49 | /* PTL */ |
50 | GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, | 50 | GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, |
51 | GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV, | 51 | GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, |
52 | 52 | ||
53 | /* PTM */ | 53 | /* PTM */ |
54 | GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, | 54 | GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, |
@@ -56,7 +56,7 @@ enum { | |||
56 | 56 | ||
57 | /* PTN */ | 57 | /* PTN */ |
58 | GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, | 58 | GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, |
59 | GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV, | 59 | GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, |
60 | 60 | ||
61 | /* PTO */ | 61 | /* PTO */ |
62 | GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, | 62 | GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, |
@@ -68,7 +68,7 @@ enum { | |||
68 | 68 | ||
69 | /* PTQ */ | 69 | /* PTQ */ |
70 | GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, | 70 | GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, |
71 | GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV, | 71 | GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, |
72 | 72 | ||
73 | /* PTR */ | 73 | /* PTR */ |
74 | GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, | 74 | GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, |
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 0d925fa0f0c1..9f94435cc44f 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig | |||
@@ -409,5 +409,4 @@ CONFIG_CRYPTO_SEED=m | |||
409 | CONFIG_CRYPTO_SERPENT=m | 409 | CONFIG_CRYPTO_SERPENT=m |
410 | CONFIG_CRYPTO_TEA=m | 410 | CONFIG_CRYPTO_TEA=m |
411 | CONFIG_CRYPTO_TWOFISH=m | 411 | CONFIG_CRYPTO_TWOFISH=m |
412 | CONFIG_CRYPTO_ZLIB=m | ||
413 | CONFIG_CRYPTO_LZO=m | 412 | CONFIG_CRYPTO_LZO=m |
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index 149d8e8eacb8..1c5bd4f8ffca 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig | |||
@@ -189,7 +189,6 @@ CONFIG_IP_NF_MATCH_ECN=m | |||
189 | CONFIG_IP_NF_MATCH_TTL=m | 189 | CONFIG_IP_NF_MATCH_TTL=m |
190 | CONFIG_IP_NF_FILTER=y | 190 | CONFIG_IP_NF_FILTER=y |
191 | CONFIG_IP_NF_TARGET_REJECT=y | 191 | CONFIG_IP_NF_TARGET_REJECT=y |
192 | CONFIG_IP_NF_TARGET_ULOG=m | ||
193 | CONFIG_IP_NF_MANGLE=m | 192 | CONFIG_IP_NF_MANGLE=m |
194 | CONFIG_IP_NF_TARGET_ECN=m | 193 | CONFIG_IP_NF_TARGET_ECN=m |
195 | CONFIG_IP_NF_TARGET_TTL=m | 194 | CONFIG_IP_NF_TARGET_TTL=m |
@@ -521,7 +520,6 @@ CONFIG_CRYPTO_SEED=m | |||
521 | CONFIG_CRYPTO_SERPENT=m | 520 | CONFIG_CRYPTO_SERPENT=m |
522 | CONFIG_CRYPTO_TEA=m | 521 | CONFIG_CRYPTO_TEA=m |
523 | CONFIG_CRYPTO_TWOFISH=m | 522 | CONFIG_CRYPTO_TWOFISH=m |
524 | CONFIG_CRYPTO_ZLIB=m | ||
525 | CONFIG_CRYPTO_LZO=m | 523 | CONFIG_CRYPTO_LZO=m |
526 | CONFIG_CRC_CCITT=m | 524 | CONFIG_CRC_CCITT=m |
527 | CONFIG_CRC7=m | 525 | CONFIG_CRC7=m |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6becb96c60a0..ad83c1e66dbd 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -140,7 +140,7 @@ static int __init setup_maxnodemem(char *str) | |||
140 | { | 140 | { |
141 | char *endp; | 141 | char *endp; |
142 | unsigned long long maxnodemem; | 142 | unsigned long long maxnodemem; |
143 | long node; | 143 | unsigned long node; |
144 | 144 | ||
145 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | 145 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; |
146 | if (node >= MAX_NUMNODES || *endp != ':') | 146 | if (node >= MAX_NUMNODES || *endp != ':') |
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index f6d1a3f747a9..86942a492454 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h | |||
@@ -58,11 +58,6 @@ static inline void release_thread(struct task_struct *task) | |||
58 | { | 58 | { |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline void mm_copy_segments(struct mm_struct *from_mm, | ||
62 | struct mm_struct *new_mm) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | #define init_stack (init_thread_union.stack) | 61 | #define init_stack (init_thread_union.stack) |
67 | 62 | ||
68 | /* | 63 | /* |
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 0b034ebbda2a..7f69d17de354 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c | |||
@@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = { | |||
98 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 98 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static void __init timer_setup(void) | 101 | static void __init um_timer_setup(void) |
102 | { | 102 | { |
103 | int err; | 103 | int err; |
104 | 104 | ||
@@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts) | |||
132 | void __init time_init(void) | 132 | void __init time_init(void) |
133 | { | 133 | { |
134 | timer_set_signal_handler(); | 134 | timer_set_signal_handler(); |
135 | late_time_init = timer_setup; | 135 | late_time_init = um_timer_setup; |
136 | } | 136 | } |
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 246c67006ed0..8c1fcb6bad21 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S | |||
@@ -33,7 +33,7 @@ | |||
33 | #define s3 ((16 + 2 + (3 * 256)) * 4) | 33 | #define s3 ((16 + 2 + (3 * 256)) * 4) |
34 | 34 | ||
35 | /* register macros */ | 35 | /* register macros */ |
36 | #define CTX %rdi | 36 | #define CTX %r12 |
37 | #define RIO %rsi | 37 | #define RIO %rsi |
38 | 38 | ||
39 | #define RX0 %rax | 39 | #define RX0 %rax |
@@ -56,12 +56,12 @@ | |||
56 | #define RX2bh %ch | 56 | #define RX2bh %ch |
57 | #define RX3bh %dh | 57 | #define RX3bh %dh |
58 | 58 | ||
59 | #define RT0 %rbp | 59 | #define RT0 %rdi |
60 | #define RT1 %rsi | 60 | #define RT1 %rsi |
61 | #define RT2 %r8 | 61 | #define RT2 %r8 |
62 | #define RT3 %r9 | 62 | #define RT3 %r9 |
63 | 63 | ||
64 | #define RT0d %ebp | 64 | #define RT0d %edi |
65 | #define RT1d %esi | 65 | #define RT1d %esi |
66 | #define RT2d %r8d | 66 | #define RT2d %r8d |
67 | #define RT3d %r9d | 67 | #define RT3d %r9d |
@@ -120,13 +120,14 @@ | |||
120 | 120 | ||
121 | ENTRY(__blowfish_enc_blk) | 121 | ENTRY(__blowfish_enc_blk) |
122 | /* input: | 122 | /* input: |
123 | * %rdi: ctx, CTX | 123 | * %rdi: ctx |
124 | * %rsi: dst | 124 | * %rsi: dst |
125 | * %rdx: src | 125 | * %rdx: src |
126 | * %rcx: bool, if true: xor output | 126 | * %rcx: bool, if true: xor output |
127 | */ | 127 | */ |
128 | movq %rbp, %r11; | 128 | movq %r12, %r11; |
129 | 129 | ||
130 | movq %rdi, CTX; | ||
130 | movq %rsi, %r10; | 131 | movq %rsi, %r10; |
131 | movq %rdx, RIO; | 132 | movq %rdx, RIO; |
132 | 133 | ||
@@ -142,7 +143,7 @@ ENTRY(__blowfish_enc_blk) | |||
142 | round_enc(14); | 143 | round_enc(14); |
143 | add_roundkey_enc(16); | 144 | add_roundkey_enc(16); |
144 | 145 | ||
145 | movq %r11, %rbp; | 146 | movq %r11, %r12; |
146 | 147 | ||
147 | movq %r10, RIO; | 148 | movq %r10, RIO; |
148 | test %cl, %cl; | 149 | test %cl, %cl; |
@@ -157,12 +158,13 @@ ENDPROC(__blowfish_enc_blk) | |||
157 | 158 | ||
158 | ENTRY(blowfish_dec_blk) | 159 | ENTRY(blowfish_dec_blk) |
159 | /* input: | 160 | /* input: |
160 | * %rdi: ctx, CTX | 161 | * %rdi: ctx |
161 | * %rsi: dst | 162 | * %rsi: dst |
162 | * %rdx: src | 163 | * %rdx: src |
163 | */ | 164 | */ |
164 | movq %rbp, %r11; | 165 | movq %r12, %r11; |
165 | 166 | ||
167 | movq %rdi, CTX; | ||
166 | movq %rsi, %r10; | 168 | movq %rsi, %r10; |
167 | movq %rdx, RIO; | 169 | movq %rdx, RIO; |
168 | 170 | ||
@@ -181,7 +183,7 @@ ENTRY(blowfish_dec_blk) | |||
181 | movq %r10, RIO; | 183 | movq %r10, RIO; |
182 | write_block(); | 184 | write_block(); |
183 | 185 | ||
184 | movq %r11, %rbp; | 186 | movq %r11, %r12; |
185 | 187 | ||
186 | ret; | 188 | ret; |
187 | ENDPROC(blowfish_dec_blk) | 189 | ENDPROC(blowfish_dec_blk) |
@@ -298,20 +300,21 @@ ENDPROC(blowfish_dec_blk) | |||
298 | 300 | ||
299 | ENTRY(__blowfish_enc_blk_4way) | 301 | ENTRY(__blowfish_enc_blk_4way) |
300 | /* input: | 302 | /* input: |
301 | * %rdi: ctx, CTX | 303 | * %rdi: ctx |
302 | * %rsi: dst | 304 | * %rsi: dst |
303 | * %rdx: src | 305 | * %rdx: src |
304 | * %rcx: bool, if true: xor output | 306 | * %rcx: bool, if true: xor output |
305 | */ | 307 | */ |
306 | pushq %rbp; | 308 | pushq %r12; |
307 | pushq %rbx; | 309 | pushq %rbx; |
308 | pushq %rcx; | 310 | pushq %rcx; |
309 | 311 | ||
310 | preload_roundkey_enc(0); | 312 | movq %rdi, CTX |
311 | |||
312 | movq %rsi, %r11; | 313 | movq %rsi, %r11; |
313 | movq %rdx, RIO; | 314 | movq %rdx, RIO; |
314 | 315 | ||
316 | preload_roundkey_enc(0); | ||
317 | |||
315 | read_block4(); | 318 | read_block4(); |
316 | 319 | ||
317 | round_enc4(0); | 320 | round_enc4(0); |
@@ -324,39 +327,40 @@ ENTRY(__blowfish_enc_blk_4way) | |||
324 | round_enc4(14); | 327 | round_enc4(14); |
325 | add_preloaded_roundkey4(); | 328 | add_preloaded_roundkey4(); |
326 | 329 | ||
327 | popq %rbp; | 330 | popq %r12; |
328 | movq %r11, RIO; | 331 | movq %r11, RIO; |
329 | 332 | ||
330 | test %bpl, %bpl; | 333 | test %r12b, %r12b; |
331 | jnz .L__enc_xor4; | 334 | jnz .L__enc_xor4; |
332 | 335 | ||
333 | write_block4(); | 336 | write_block4(); |
334 | 337 | ||
335 | popq %rbx; | 338 | popq %rbx; |
336 | popq %rbp; | 339 | popq %r12; |
337 | ret; | 340 | ret; |
338 | 341 | ||
339 | .L__enc_xor4: | 342 | .L__enc_xor4: |
340 | xor_block4(); | 343 | xor_block4(); |
341 | 344 | ||
342 | popq %rbx; | 345 | popq %rbx; |
343 | popq %rbp; | 346 | popq %r12; |
344 | ret; | 347 | ret; |
345 | ENDPROC(__blowfish_enc_blk_4way) | 348 | ENDPROC(__blowfish_enc_blk_4way) |
346 | 349 | ||
347 | ENTRY(blowfish_dec_blk_4way) | 350 | ENTRY(blowfish_dec_blk_4way) |
348 | /* input: | 351 | /* input: |
349 | * %rdi: ctx, CTX | 352 | * %rdi: ctx |
350 | * %rsi: dst | 353 | * %rsi: dst |
351 | * %rdx: src | 354 | * %rdx: src |
352 | */ | 355 | */ |
353 | pushq %rbp; | 356 | pushq %r12; |
354 | pushq %rbx; | 357 | pushq %rbx; |
355 | preload_roundkey_dec(17); | ||
356 | 358 | ||
357 | movq %rsi, %r11; | 359 | movq %rdi, CTX; |
360 | movq %rsi, %r11 | ||
358 | movq %rdx, RIO; | 361 | movq %rdx, RIO; |
359 | 362 | ||
363 | preload_roundkey_dec(17); | ||
360 | read_block4(); | 364 | read_block4(); |
361 | 365 | ||
362 | round_dec4(17); | 366 | round_dec4(17); |
@@ -373,7 +377,7 @@ ENTRY(blowfish_dec_blk_4way) | |||
373 | write_block4(); | 377 | write_block4(); |
374 | 378 | ||
375 | popq %rbx; | 379 | popq %rbx; |
376 | popq %rbp; | 380 | popq %r12; |
377 | 381 | ||
378 | ret; | 382 | ret; |
379 | ENDPROC(blowfish_dec_blk_4way) | 383 | ENDPROC(blowfish_dec_blk_4way) |
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 310319c601ed..95ba6956a7f6 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S | |||
@@ -75,17 +75,17 @@ | |||
75 | #define RCD1bh %dh | 75 | #define RCD1bh %dh |
76 | 76 | ||
77 | #define RT0 %rsi | 77 | #define RT0 %rsi |
78 | #define RT1 %rbp | 78 | #define RT1 %r12 |
79 | #define RT2 %r8 | 79 | #define RT2 %r8 |
80 | 80 | ||
81 | #define RT0d %esi | 81 | #define RT0d %esi |
82 | #define RT1d %ebp | 82 | #define RT1d %r12d |
83 | #define RT2d %r8d | 83 | #define RT2d %r8d |
84 | 84 | ||
85 | #define RT2bl %r8b | 85 | #define RT2bl %r8b |
86 | 86 | ||
87 | #define RXOR %r9 | 87 | #define RXOR %r9 |
88 | #define RRBP %r10 | 88 | #define RR12 %r10 |
89 | #define RDST %r11 | 89 | #define RDST %r11 |
90 | 90 | ||
91 | #define RXORd %r9d | 91 | #define RXORd %r9d |
@@ -197,7 +197,7 @@ ENTRY(__camellia_enc_blk) | |||
197 | * %rdx: src | 197 | * %rdx: src |
198 | * %rcx: bool xor | 198 | * %rcx: bool xor |
199 | */ | 199 | */ |
200 | movq %rbp, RRBP; | 200 | movq %r12, RR12; |
201 | 201 | ||
202 | movq %rcx, RXOR; | 202 | movq %rcx, RXOR; |
203 | movq %rsi, RDST; | 203 | movq %rsi, RDST; |
@@ -227,13 +227,13 @@ ENTRY(__camellia_enc_blk) | |||
227 | 227 | ||
228 | enc_outunpack(mov, RT1); | 228 | enc_outunpack(mov, RT1); |
229 | 229 | ||
230 | movq RRBP, %rbp; | 230 | movq RR12, %r12; |
231 | ret; | 231 | ret; |
232 | 232 | ||
233 | .L__enc_xor: | 233 | .L__enc_xor: |
234 | enc_outunpack(xor, RT1); | 234 | enc_outunpack(xor, RT1); |
235 | 235 | ||
236 | movq RRBP, %rbp; | 236 | movq RR12, %r12; |
237 | ret; | 237 | ret; |
238 | ENDPROC(__camellia_enc_blk) | 238 | ENDPROC(__camellia_enc_blk) |
239 | 239 | ||
@@ -248,7 +248,7 @@ ENTRY(camellia_dec_blk) | |||
248 | movl $24, RXORd; | 248 | movl $24, RXORd; |
249 | cmovel RXORd, RT2d; /* max */ | 249 | cmovel RXORd, RT2d; /* max */ |
250 | 250 | ||
251 | movq %rbp, RRBP; | 251 | movq %r12, RR12; |
252 | movq %rsi, RDST; | 252 | movq %rsi, RDST; |
253 | movq %rdx, RIO; | 253 | movq %rdx, RIO; |
254 | 254 | ||
@@ -271,7 +271,7 @@ ENTRY(camellia_dec_blk) | |||
271 | 271 | ||
272 | dec_outunpack(); | 272 | dec_outunpack(); |
273 | 273 | ||
274 | movq RRBP, %rbp; | 274 | movq RR12, %r12; |
275 | ret; | 275 | ret; |
276 | ENDPROC(camellia_dec_blk) | 276 | ENDPROC(camellia_dec_blk) |
277 | 277 | ||
@@ -433,7 +433,7 @@ ENTRY(__camellia_enc_blk_2way) | |||
433 | */ | 433 | */ |
434 | pushq %rbx; | 434 | pushq %rbx; |
435 | 435 | ||
436 | movq %rbp, RRBP; | 436 | movq %r12, RR12; |
437 | movq %rcx, RXOR; | 437 | movq %rcx, RXOR; |
438 | movq %rsi, RDST; | 438 | movq %rsi, RDST; |
439 | movq %rdx, RIO; | 439 | movq %rdx, RIO; |
@@ -461,14 +461,14 @@ ENTRY(__camellia_enc_blk_2way) | |||
461 | 461 | ||
462 | enc_outunpack2(mov, RT2); | 462 | enc_outunpack2(mov, RT2); |
463 | 463 | ||
464 | movq RRBP, %rbp; | 464 | movq RR12, %r12; |
465 | popq %rbx; | 465 | popq %rbx; |
466 | ret; | 466 | ret; |
467 | 467 | ||
468 | .L__enc2_xor: | 468 | .L__enc2_xor: |
469 | enc_outunpack2(xor, RT2); | 469 | enc_outunpack2(xor, RT2); |
470 | 470 | ||
471 | movq RRBP, %rbp; | 471 | movq RR12, %r12; |
472 | popq %rbx; | 472 | popq %rbx; |
473 | ret; | 473 | ret; |
474 | ENDPROC(__camellia_enc_blk_2way) | 474 | ENDPROC(__camellia_enc_blk_2way) |
@@ -485,7 +485,7 @@ ENTRY(camellia_dec_blk_2way) | |||
485 | cmovel RXORd, RT2d; /* max */ | 485 | cmovel RXORd, RT2d; /* max */ |
486 | 486 | ||
487 | movq %rbx, RXOR; | 487 | movq %rbx, RXOR; |
488 | movq %rbp, RRBP; | 488 | movq %r12, RR12; |
489 | movq %rsi, RDST; | 489 | movq %rsi, RDST; |
490 | movq %rdx, RIO; | 490 | movq %rdx, RIO; |
491 | 491 | ||
@@ -508,7 +508,7 @@ ENTRY(camellia_dec_blk_2way) | |||
508 | 508 | ||
509 | dec_outunpack2(); | 509 | dec_outunpack2(); |
510 | 510 | ||
511 | movq RRBP, %rbp; | 511 | movq RR12, %r12; |
512 | movq RXOR, %rbx; | 512 | movq RXOR, %rbx; |
513 | ret; | 513 | ret; |
514 | ENDPROC(camellia_dec_blk_2way) | 514 | ENDPROC(camellia_dec_blk_2way) |
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index b4a8806234ea..86107c961bb4 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S | |||
@@ -47,7 +47,7 @@ | |||
47 | /********************************************************************** | 47 | /********************************************************************** |
48 | 16-way AVX cast5 | 48 | 16-way AVX cast5 |
49 | **********************************************************************/ | 49 | **********************************************************************/ |
50 | #define CTX %rdi | 50 | #define CTX %r15 |
51 | 51 | ||
52 | #define RL1 %xmm0 | 52 | #define RL1 %xmm0 |
53 | #define RR1 %xmm1 | 53 | #define RR1 %xmm1 |
@@ -70,8 +70,8 @@ | |||
70 | 70 | ||
71 | #define RTMP %xmm15 | 71 | #define RTMP %xmm15 |
72 | 72 | ||
73 | #define RID1 %rbp | 73 | #define RID1 %rdi |
74 | #define RID1d %ebp | 74 | #define RID1d %edi |
75 | #define RID2 %rsi | 75 | #define RID2 %rsi |
76 | #define RID2d %esi | 76 | #define RID2d %esi |
77 | 77 | ||
@@ -226,7 +226,7 @@ | |||
226 | .align 16 | 226 | .align 16 |
227 | __cast5_enc_blk16: | 227 | __cast5_enc_blk16: |
228 | /* input: | 228 | /* input: |
229 | * %rdi: ctx, CTX | 229 | * %rdi: ctx |
230 | * RL1: blocks 1 and 2 | 230 | * RL1: blocks 1 and 2 |
231 | * RR1: blocks 3 and 4 | 231 | * RR1: blocks 3 and 4 |
232 | * RL2: blocks 5 and 6 | 232 | * RL2: blocks 5 and 6 |
@@ -246,9 +246,11 @@ __cast5_enc_blk16: | |||
246 | * RR4: encrypted blocks 15 and 16 | 246 | * RR4: encrypted blocks 15 and 16 |
247 | */ | 247 | */ |
248 | 248 | ||
249 | pushq %rbp; | 249 | pushq %r15; |
250 | pushq %rbx; | 250 | pushq %rbx; |
251 | 251 | ||
252 | movq %rdi, CTX; | ||
253 | |||
252 | vmovdqa .Lbswap_mask, RKM; | 254 | vmovdqa .Lbswap_mask, RKM; |
253 | vmovd .Lfirst_mask, R1ST; | 255 | vmovd .Lfirst_mask, R1ST; |
254 | vmovd .L32_mask, R32; | 256 | vmovd .L32_mask, R32; |
@@ -283,7 +285,7 @@ __cast5_enc_blk16: | |||
283 | 285 | ||
284 | .L__skip_enc: | 286 | .L__skip_enc: |
285 | popq %rbx; | 287 | popq %rbx; |
286 | popq %rbp; | 288 | popq %r15; |
287 | 289 | ||
288 | vmovdqa .Lbswap_mask, RKM; | 290 | vmovdqa .Lbswap_mask, RKM; |
289 | 291 | ||
@@ -298,7 +300,7 @@ ENDPROC(__cast5_enc_blk16) | |||
298 | .align 16 | 300 | .align 16 |
299 | __cast5_dec_blk16: | 301 | __cast5_dec_blk16: |
300 | /* input: | 302 | /* input: |
301 | * %rdi: ctx, CTX | 303 | * %rdi: ctx |
302 | * RL1: encrypted blocks 1 and 2 | 304 | * RL1: encrypted blocks 1 and 2 |
303 | * RR1: encrypted blocks 3 and 4 | 305 | * RR1: encrypted blocks 3 and 4 |
304 | * RL2: encrypted blocks 5 and 6 | 306 | * RL2: encrypted blocks 5 and 6 |
@@ -318,9 +320,11 @@ __cast5_dec_blk16: | |||
318 | * RR4: decrypted blocks 15 and 16 | 320 | * RR4: decrypted blocks 15 and 16 |
319 | */ | 321 | */ |
320 | 322 | ||
321 | pushq %rbp; | 323 | pushq %r15; |
322 | pushq %rbx; | 324 | pushq %rbx; |
323 | 325 | ||
326 | movq %rdi, CTX; | ||
327 | |||
324 | vmovdqa .Lbswap_mask, RKM; | 328 | vmovdqa .Lbswap_mask, RKM; |
325 | vmovd .Lfirst_mask, R1ST; | 329 | vmovd .Lfirst_mask, R1ST; |
326 | vmovd .L32_mask, R32; | 330 | vmovd .L32_mask, R32; |
@@ -356,7 +360,7 @@ __cast5_dec_blk16: | |||
356 | 360 | ||
357 | vmovdqa .Lbswap_mask, RKM; | 361 | vmovdqa .Lbswap_mask, RKM; |
358 | popq %rbx; | 362 | popq %rbx; |
359 | popq %rbp; | 363 | popq %r15; |
360 | 364 | ||
361 | outunpack_blocks(RR1, RL1, RTMP, RX, RKM); | 365 | outunpack_blocks(RR1, RL1, RTMP, RX, RKM); |
362 | outunpack_blocks(RR2, RL2, RTMP, RX, RKM); | 366 | outunpack_blocks(RR2, RL2, RTMP, RX, RKM); |
@@ -372,12 +376,14 @@ ENDPROC(__cast5_dec_blk16) | |||
372 | 376 | ||
373 | ENTRY(cast5_ecb_enc_16way) | 377 | ENTRY(cast5_ecb_enc_16way) |
374 | /* input: | 378 | /* input: |
375 | * %rdi: ctx, CTX | 379 | * %rdi: ctx |
376 | * %rsi: dst | 380 | * %rsi: dst |
377 | * %rdx: src | 381 | * %rdx: src |
378 | */ | 382 | */ |
379 | FRAME_BEGIN | 383 | FRAME_BEGIN |
384 | pushq %r15; | ||
380 | 385 | ||
386 | movq %rdi, CTX; | ||
381 | movq %rsi, %r11; | 387 | movq %rsi, %r11; |
382 | 388 | ||
383 | vmovdqu (0*4*4)(%rdx), RL1; | 389 | vmovdqu (0*4*4)(%rdx), RL1; |
@@ -400,18 +406,22 @@ ENTRY(cast5_ecb_enc_16way) | |||
400 | vmovdqu RR4, (6*4*4)(%r11); | 406 | vmovdqu RR4, (6*4*4)(%r11); |
401 | vmovdqu RL4, (7*4*4)(%r11); | 407 | vmovdqu RL4, (7*4*4)(%r11); |
402 | 408 | ||
409 | popq %r15; | ||
403 | FRAME_END | 410 | FRAME_END |
404 | ret; | 411 | ret; |
405 | ENDPROC(cast5_ecb_enc_16way) | 412 | ENDPROC(cast5_ecb_enc_16way) |
406 | 413 | ||
407 | ENTRY(cast5_ecb_dec_16way) | 414 | ENTRY(cast5_ecb_dec_16way) |
408 | /* input: | 415 | /* input: |
409 | * %rdi: ctx, CTX | 416 | * %rdi: ctx |
410 | * %rsi: dst | 417 | * %rsi: dst |
411 | * %rdx: src | 418 | * %rdx: src |
412 | */ | 419 | */ |
413 | 420 | ||
414 | FRAME_BEGIN | 421 | FRAME_BEGIN |
422 | pushq %r15; | ||
423 | |||
424 | movq %rdi, CTX; | ||
415 | movq %rsi, %r11; | 425 | movq %rsi, %r11; |
416 | 426 | ||
417 | vmovdqu (0*4*4)(%rdx), RL1; | 427 | vmovdqu (0*4*4)(%rdx), RL1; |
@@ -434,20 +444,22 @@ ENTRY(cast5_ecb_dec_16way) | |||
434 | vmovdqu RR4, (6*4*4)(%r11); | 444 | vmovdqu RR4, (6*4*4)(%r11); |
435 | vmovdqu RL4, (7*4*4)(%r11); | 445 | vmovdqu RL4, (7*4*4)(%r11); |
436 | 446 | ||
447 | popq %r15; | ||
437 | FRAME_END | 448 | FRAME_END |
438 | ret; | 449 | ret; |
439 | ENDPROC(cast5_ecb_dec_16way) | 450 | ENDPROC(cast5_ecb_dec_16way) |
440 | 451 | ||
441 | ENTRY(cast5_cbc_dec_16way) | 452 | ENTRY(cast5_cbc_dec_16way) |
442 | /* input: | 453 | /* input: |
443 | * %rdi: ctx, CTX | 454 | * %rdi: ctx |
444 | * %rsi: dst | 455 | * %rsi: dst |
445 | * %rdx: src | 456 | * %rdx: src |
446 | */ | 457 | */ |
447 | FRAME_BEGIN | 458 | FRAME_BEGIN |
448 | |||
449 | pushq %r12; | 459 | pushq %r12; |
460 | pushq %r15; | ||
450 | 461 | ||
462 | movq %rdi, CTX; | ||
451 | movq %rsi, %r11; | 463 | movq %rsi, %r11; |
452 | movq %rdx, %r12; | 464 | movq %rdx, %r12; |
453 | 465 | ||
@@ -483,23 +495,24 @@ ENTRY(cast5_cbc_dec_16way) | |||
483 | vmovdqu RR4, (6*16)(%r11); | 495 | vmovdqu RR4, (6*16)(%r11); |
484 | vmovdqu RL4, (7*16)(%r11); | 496 | vmovdqu RL4, (7*16)(%r11); |
485 | 497 | ||
498 | popq %r15; | ||
486 | popq %r12; | 499 | popq %r12; |
487 | |||
488 | FRAME_END | 500 | FRAME_END |
489 | ret; | 501 | ret; |
490 | ENDPROC(cast5_cbc_dec_16way) | 502 | ENDPROC(cast5_cbc_dec_16way) |
491 | 503 | ||
492 | ENTRY(cast5_ctr_16way) | 504 | ENTRY(cast5_ctr_16way) |
493 | /* input: | 505 | /* input: |
494 | * %rdi: ctx, CTX | 506 | * %rdi: ctx |
495 | * %rsi: dst | 507 | * %rsi: dst |
496 | * %rdx: src | 508 | * %rdx: src |
497 | * %rcx: iv (big endian, 64bit) | 509 | * %rcx: iv (big endian, 64bit) |
498 | */ | 510 | */ |
499 | FRAME_BEGIN | 511 | FRAME_BEGIN |
500 | |||
501 | pushq %r12; | 512 | pushq %r12; |
513 | pushq %r15; | ||
502 | 514 | ||
515 | movq %rdi, CTX; | ||
503 | movq %rsi, %r11; | 516 | movq %rsi, %r11; |
504 | movq %rdx, %r12; | 517 | movq %rdx, %r12; |
505 | 518 | ||
@@ -558,8 +571,8 @@ ENTRY(cast5_ctr_16way) | |||
558 | vmovdqu RR4, (6*16)(%r11); | 571 | vmovdqu RR4, (6*16)(%r11); |
559 | vmovdqu RL4, (7*16)(%r11); | 572 | vmovdqu RL4, (7*16)(%r11); |
560 | 573 | ||
574 | popq %r15; | ||
561 | popq %r12; | 575 | popq %r12; |
562 | |||
563 | FRAME_END | 576 | FRAME_END |
564 | ret; | 577 | ret; |
565 | ENDPROC(cast5_ctr_16way) | 578 | ENDPROC(cast5_ctr_16way) |
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 952d3156a933..7f30b6f0d72c 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S | |||
@@ -47,7 +47,7 @@ | |||
47 | /********************************************************************** | 47 | /********************************************************************** |
48 | 8-way AVX cast6 | 48 | 8-way AVX cast6 |
49 | **********************************************************************/ | 49 | **********************************************************************/ |
50 | #define CTX %rdi | 50 | #define CTX %r15 |
51 | 51 | ||
52 | #define RA1 %xmm0 | 52 | #define RA1 %xmm0 |
53 | #define RB1 %xmm1 | 53 | #define RB1 %xmm1 |
@@ -70,8 +70,8 @@ | |||
70 | 70 | ||
71 | #define RTMP %xmm15 | 71 | #define RTMP %xmm15 |
72 | 72 | ||
73 | #define RID1 %rbp | 73 | #define RID1 %rdi |
74 | #define RID1d %ebp | 74 | #define RID1d %edi |
75 | #define RID2 %rsi | 75 | #define RID2 %rsi |
76 | #define RID2d %esi | 76 | #define RID2d %esi |
77 | 77 | ||
@@ -264,15 +264,17 @@ | |||
264 | .align 8 | 264 | .align 8 |
265 | __cast6_enc_blk8: | 265 | __cast6_enc_blk8: |
266 | /* input: | 266 | /* input: |
267 | * %rdi: ctx, CTX | 267 | * %rdi: ctx |
268 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks | 268 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks |
269 | * output: | 269 | * output: |
270 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks | 270 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks |
271 | */ | 271 | */ |
272 | 272 | ||
273 | pushq %rbp; | 273 | pushq %r15; |
274 | pushq %rbx; | 274 | pushq %rbx; |
275 | 275 | ||
276 | movq %rdi, CTX; | ||
277 | |||
276 | vmovdqa .Lbswap_mask, RKM; | 278 | vmovdqa .Lbswap_mask, RKM; |
277 | vmovd .Lfirst_mask, R1ST; | 279 | vmovd .Lfirst_mask, R1ST; |
278 | vmovd .L32_mask, R32; | 280 | vmovd .L32_mask, R32; |
@@ -297,7 +299,7 @@ __cast6_enc_blk8: | |||
297 | QBAR(11); | 299 | QBAR(11); |
298 | 300 | ||
299 | popq %rbx; | 301 | popq %rbx; |
300 | popq %rbp; | 302 | popq %r15; |
301 | 303 | ||
302 | vmovdqa .Lbswap_mask, RKM; | 304 | vmovdqa .Lbswap_mask, RKM; |
303 | 305 | ||
@@ -310,15 +312,17 @@ ENDPROC(__cast6_enc_blk8) | |||
310 | .align 8 | 312 | .align 8 |
311 | __cast6_dec_blk8: | 313 | __cast6_dec_blk8: |
312 | /* input: | 314 | /* input: |
313 | * %rdi: ctx, CTX | 315 | * %rdi: ctx |
314 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks | 316 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks |
315 | * output: | 317 | * output: |
316 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks | 318 | * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks |
317 | */ | 319 | */ |
318 | 320 | ||
319 | pushq %rbp; | 321 | pushq %r15; |
320 | pushq %rbx; | 322 | pushq %rbx; |
321 | 323 | ||
324 | movq %rdi, CTX; | ||
325 | |||
322 | vmovdqa .Lbswap_mask, RKM; | 326 | vmovdqa .Lbswap_mask, RKM; |
323 | vmovd .Lfirst_mask, R1ST; | 327 | vmovd .Lfirst_mask, R1ST; |
324 | vmovd .L32_mask, R32; | 328 | vmovd .L32_mask, R32; |
@@ -343,7 +347,7 @@ __cast6_dec_blk8: | |||
343 | QBAR(0); | 347 | QBAR(0); |
344 | 348 | ||
345 | popq %rbx; | 349 | popq %rbx; |
346 | popq %rbp; | 350 | popq %r15; |
347 | 351 | ||
348 | vmovdqa .Lbswap_mask, RKM; | 352 | vmovdqa .Lbswap_mask, RKM; |
349 | outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); | 353 | outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); |
@@ -354,12 +358,14 @@ ENDPROC(__cast6_dec_blk8) | |||
354 | 358 | ||
355 | ENTRY(cast6_ecb_enc_8way) | 359 | ENTRY(cast6_ecb_enc_8way) |
356 | /* input: | 360 | /* input: |
357 | * %rdi: ctx, CTX | 361 | * %rdi: ctx |
358 | * %rsi: dst | 362 | * %rsi: dst |
359 | * %rdx: src | 363 | * %rdx: src |
360 | */ | 364 | */ |
361 | FRAME_BEGIN | 365 | FRAME_BEGIN |
366 | pushq %r15; | ||
362 | 367 | ||
368 | movq %rdi, CTX; | ||
363 | movq %rsi, %r11; | 369 | movq %rsi, %r11; |
364 | 370 | ||
365 | load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 371 | load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
@@ -368,18 +374,21 @@ ENTRY(cast6_ecb_enc_8way) | |||
368 | 374 | ||
369 | store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 375 | store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
370 | 376 | ||
377 | popq %r15; | ||
371 | FRAME_END | 378 | FRAME_END |
372 | ret; | 379 | ret; |
373 | ENDPROC(cast6_ecb_enc_8way) | 380 | ENDPROC(cast6_ecb_enc_8way) |
374 | 381 | ||
375 | ENTRY(cast6_ecb_dec_8way) | 382 | ENTRY(cast6_ecb_dec_8way) |
376 | /* input: | 383 | /* input: |
377 | * %rdi: ctx, CTX | 384 | * %rdi: ctx |
378 | * %rsi: dst | 385 | * %rsi: dst |
379 | * %rdx: src | 386 | * %rdx: src |
380 | */ | 387 | */ |
381 | FRAME_BEGIN | 388 | FRAME_BEGIN |
389 | pushq %r15; | ||
382 | 390 | ||
391 | movq %rdi, CTX; | ||
383 | movq %rsi, %r11; | 392 | movq %rsi, %r11; |
384 | 393 | ||
385 | load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 394 | load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
@@ -388,20 +397,22 @@ ENTRY(cast6_ecb_dec_8way) | |||
388 | 397 | ||
389 | store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 398 | store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
390 | 399 | ||
400 | popq %r15; | ||
391 | FRAME_END | 401 | FRAME_END |
392 | ret; | 402 | ret; |
393 | ENDPROC(cast6_ecb_dec_8way) | 403 | ENDPROC(cast6_ecb_dec_8way) |
394 | 404 | ||
395 | ENTRY(cast6_cbc_dec_8way) | 405 | ENTRY(cast6_cbc_dec_8way) |
396 | /* input: | 406 | /* input: |
397 | * %rdi: ctx, CTX | 407 | * %rdi: ctx |
398 | * %rsi: dst | 408 | * %rsi: dst |
399 | * %rdx: src | 409 | * %rdx: src |
400 | */ | 410 | */ |
401 | FRAME_BEGIN | 411 | FRAME_BEGIN |
402 | |||
403 | pushq %r12; | 412 | pushq %r12; |
413 | pushq %r15; | ||
404 | 414 | ||
415 | movq %rdi, CTX; | ||
405 | movq %rsi, %r11; | 416 | movq %rsi, %r11; |
406 | movq %rdx, %r12; | 417 | movq %rdx, %r12; |
407 | 418 | ||
@@ -411,8 +422,8 @@ ENTRY(cast6_cbc_dec_8way) | |||
411 | 422 | ||
412 | store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 423 | store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
413 | 424 | ||
425 | popq %r15; | ||
414 | popq %r12; | 426 | popq %r12; |
415 | |||
416 | FRAME_END | 427 | FRAME_END |
417 | ret; | 428 | ret; |
418 | ENDPROC(cast6_cbc_dec_8way) | 429 | ENDPROC(cast6_cbc_dec_8way) |
@@ -425,9 +436,10 @@ ENTRY(cast6_ctr_8way) | |||
425 | * %rcx: iv (little endian, 128bit) | 436 | * %rcx: iv (little endian, 128bit) |
426 | */ | 437 | */ |
427 | FRAME_BEGIN | 438 | FRAME_BEGIN |
428 | |||
429 | pushq %r12; | 439 | pushq %r12; |
440 | pushq %r15 | ||
430 | 441 | ||
442 | movq %rdi, CTX; | ||
431 | movq %rsi, %r11; | 443 | movq %rsi, %r11; |
432 | movq %rdx, %r12; | 444 | movq %rdx, %r12; |
433 | 445 | ||
@@ -438,8 +450,8 @@ ENTRY(cast6_ctr_8way) | |||
438 | 450 | ||
439 | store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 451 | store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
440 | 452 | ||
453 | popq %r15; | ||
441 | popq %r12; | 454 | popq %r12; |
442 | |||
443 | FRAME_END | 455 | FRAME_END |
444 | ret; | 456 | ret; |
445 | ENDPROC(cast6_ctr_8way) | 457 | ENDPROC(cast6_ctr_8way) |
@@ -452,7 +464,9 @@ ENTRY(cast6_xts_enc_8way) | |||
452 | * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) | 464 | * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) |
453 | */ | 465 | */ |
454 | FRAME_BEGIN | 466 | FRAME_BEGIN |
467 | pushq %r15; | ||
455 | 468 | ||
469 | movq %rdi, CTX | ||
456 | movq %rsi, %r11; | 470 | movq %rsi, %r11; |
457 | 471 | ||
458 | /* regs <= src, dst <= IVs, regs <= regs xor IVs */ | 472 | /* regs <= src, dst <= IVs, regs <= regs xor IVs */ |
@@ -464,6 +478,7 @@ ENTRY(cast6_xts_enc_8way) | |||
464 | /* dst <= regs xor IVs(in dst) */ | 478 | /* dst <= regs xor IVs(in dst) */ |
465 | store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 479 | store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
466 | 480 | ||
481 | popq %r15; | ||
467 | FRAME_END | 482 | FRAME_END |
468 | ret; | 483 | ret; |
469 | ENDPROC(cast6_xts_enc_8way) | 484 | ENDPROC(cast6_xts_enc_8way) |
@@ -476,7 +491,9 @@ ENTRY(cast6_xts_dec_8way) | |||
476 | * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) | 491 | * %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸)) |
477 | */ | 492 | */ |
478 | FRAME_BEGIN | 493 | FRAME_BEGIN |
494 | pushq %r15; | ||
479 | 495 | ||
496 | movq %rdi, CTX | ||
480 | movq %rsi, %r11; | 497 | movq %rsi, %r11; |
481 | 498 | ||
482 | /* regs <= src, dst <= IVs, regs <= regs xor IVs */ | 499 | /* regs <= src, dst <= IVs, regs <= regs xor IVs */ |
@@ -488,6 +505,7 @@ ENTRY(cast6_xts_dec_8way) | |||
488 | /* dst <= regs xor IVs(in dst) */ | 505 | /* dst <= regs xor IVs(in dst) */ |
489 | store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); | 506 | store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); |
490 | 507 | ||
508 | popq %r15; | ||
491 | FRAME_END | 509 | FRAME_END |
492 | ret; | 510 | ret; |
493 | ENDPROC(cast6_xts_dec_8way) | 511 | ENDPROC(cast6_xts_dec_8way) |
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index f3e91647ca27..8e49ce117494 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S | |||
@@ -64,12 +64,12 @@ | |||
64 | #define RW2bh %ch | 64 | #define RW2bh %ch |
65 | 65 | ||
66 | #define RT0 %r15 | 66 | #define RT0 %r15 |
67 | #define RT1 %rbp | 67 | #define RT1 %rsi |
68 | #define RT2 %r14 | 68 | #define RT2 %r14 |
69 | #define RT3 %rdx | 69 | #define RT3 %rdx |
70 | 70 | ||
71 | #define RT0d %r15d | 71 | #define RT0d %r15d |
72 | #define RT1d %ebp | 72 | #define RT1d %esi |
73 | #define RT2d %r14d | 73 | #define RT2d %r14d |
74 | #define RT3d %edx | 74 | #define RT3d %edx |
75 | 75 | ||
@@ -177,13 +177,14 @@ ENTRY(des3_ede_x86_64_crypt_blk) | |||
177 | * %rsi: dst | 177 | * %rsi: dst |
178 | * %rdx: src | 178 | * %rdx: src |
179 | */ | 179 | */ |
180 | pushq %rbp; | ||
181 | pushq %rbx; | 180 | pushq %rbx; |
182 | pushq %r12; | 181 | pushq %r12; |
183 | pushq %r13; | 182 | pushq %r13; |
184 | pushq %r14; | 183 | pushq %r14; |
185 | pushq %r15; | 184 | pushq %r15; |
186 | 185 | ||
186 | pushq %rsi; /* dst */ | ||
187 | |||
187 | read_block(%rdx, RL0, RR0); | 188 | read_block(%rdx, RL0, RR0); |
188 | initial_permutation(RL0, RR0); | 189 | initial_permutation(RL0, RR0); |
189 | 190 | ||
@@ -241,6 +242,8 @@ ENTRY(des3_ede_x86_64_crypt_blk) | |||
241 | round1(32+15, RL0, RR0, dummy2); | 242 | round1(32+15, RL0, RR0, dummy2); |
242 | 243 | ||
243 | final_permutation(RR0, RL0); | 244 | final_permutation(RR0, RL0); |
245 | |||
246 | popq %rsi /* dst */ | ||
244 | write_block(%rsi, RR0, RL0); | 247 | write_block(%rsi, RR0, RL0); |
245 | 248 | ||
246 | popq %r15; | 249 | popq %r15; |
@@ -248,7 +251,6 @@ ENTRY(des3_ede_x86_64_crypt_blk) | |||
248 | popq %r13; | 251 | popq %r13; |
249 | popq %r12; | 252 | popq %r12; |
250 | popq %rbx; | 253 | popq %rbx; |
251 | popq %rbp; | ||
252 | 254 | ||
253 | ret; | 255 | ret; |
254 | ENDPROC(des3_ede_x86_64_crypt_blk) | 256 | ENDPROC(des3_ede_x86_64_crypt_blk) |
@@ -432,13 +434,14 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) | |||
432 | * %rdx: src (3 blocks) | 434 | * %rdx: src (3 blocks) |
433 | */ | 435 | */ |
434 | 436 | ||
435 | pushq %rbp; | ||
436 | pushq %rbx; | 437 | pushq %rbx; |
437 | pushq %r12; | 438 | pushq %r12; |
438 | pushq %r13; | 439 | pushq %r13; |
439 | pushq %r14; | 440 | pushq %r14; |
440 | pushq %r15; | 441 | pushq %r15; |
441 | 442 | ||
443 | pushq %rsi /* dst */ | ||
444 | |||
442 | /* load input */ | 445 | /* load input */ |
443 | movl 0 * 4(%rdx), RL0d; | 446 | movl 0 * 4(%rdx), RL0d; |
444 | movl 1 * 4(%rdx), RR0d; | 447 | movl 1 * 4(%rdx), RR0d; |
@@ -520,6 +523,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) | |||
520 | bswapl RR2d; | 523 | bswapl RR2d; |
521 | bswapl RL2d; | 524 | bswapl RL2d; |
522 | 525 | ||
526 | popq %rsi /* dst */ | ||
523 | movl RR0d, 0 * 4(%rsi); | 527 | movl RR0d, 0 * 4(%rsi); |
524 | movl RL0d, 1 * 4(%rsi); | 528 | movl RL0d, 1 * 4(%rsi); |
525 | movl RR1d, 2 * 4(%rsi); | 529 | movl RR1d, 2 * 4(%rsi); |
@@ -532,7 +536,6 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) | |||
532 | popq %r13; | 536 | popq %r13; |
533 | popq %r12; | 537 | popq %r12; |
534 | popq %rbx; | 538 | popq %rbx; |
535 | popq %rbp; | ||
536 | 539 | ||
537 | ret; | 540 | ret; |
538 | ENDPROC(des3_ede_x86_64_crypt_blk_3way) | 541 | ENDPROC(des3_ede_x86_64_crypt_blk_3way) |
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1eab79c9ac48..9f712a7dfd79 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S | |||
@@ -89,7 +89,7 @@ | |||
89 | #define REG_RE %rdx | 89 | #define REG_RE %rdx |
90 | #define REG_RTA %r12 | 90 | #define REG_RTA %r12 |
91 | #define REG_RTB %rbx | 91 | #define REG_RTB %rbx |
92 | #define REG_T1 %ebp | 92 | #define REG_T1 %r11d |
93 | #define xmm_mov vmovups | 93 | #define xmm_mov vmovups |
94 | #define avx2_zeroupper vzeroupper | 94 | #define avx2_zeroupper vzeroupper |
95 | #define RND_F1 1 | 95 | #define RND_F1 1 |
@@ -637,7 +637,6 @@ _loop3: | |||
637 | ENTRY(\name) | 637 | ENTRY(\name) |
638 | 638 | ||
639 | push %rbx | 639 | push %rbx |
640 | push %rbp | ||
641 | push %r12 | 640 | push %r12 |
642 | push %r13 | 641 | push %r13 |
643 | push %r14 | 642 | push %r14 |
@@ -673,7 +672,6 @@ _loop3: | |||
673 | pop %r14 | 672 | pop %r14 |
674 | pop %r13 | 673 | pop %r13 |
675 | pop %r12 | 674 | pop %r12 |
676 | pop %rbp | ||
677 | pop %rbx | 675 | pop %rbx |
678 | 676 | ||
679 | ret | 677 | ret |
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index a4109506a5e8..6204bd53528c 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S | |||
@@ -37,7 +37,7 @@ | |||
37 | #define REG_A %ecx | 37 | #define REG_A %ecx |
38 | #define REG_B %esi | 38 | #define REG_B %esi |
39 | #define REG_C %edi | 39 | #define REG_C %edi |
40 | #define REG_D %ebp | 40 | #define REG_D %r12d |
41 | #define REG_E %edx | 41 | #define REG_E %edx |
42 | 42 | ||
43 | #define REG_T1 %eax | 43 | #define REG_T1 %eax |
@@ -74,10 +74,10 @@ | |||
74 | ENTRY(\name) | 74 | ENTRY(\name) |
75 | 75 | ||
76 | push %rbx | 76 | push %rbx |
77 | push %rbp | ||
78 | push %r12 | 77 | push %r12 |
78 | push %rbp | ||
79 | mov %rsp, %rbp | ||
79 | 80 | ||
80 | mov %rsp, %r12 | ||
81 | sub $64, %rsp # allocate workspace | 81 | sub $64, %rsp # allocate workspace |
82 | and $~15, %rsp # align stack | 82 | and $~15, %rsp # align stack |
83 | 83 | ||
@@ -99,10 +99,9 @@ | |||
99 | xor %rax, %rax | 99 | xor %rax, %rax |
100 | rep stosq | 100 | rep stosq |
101 | 101 | ||
102 | mov %r12, %rsp # deallocate workspace | 102 | mov %rbp, %rsp # deallocate workspace |
103 | |||
104 | pop %r12 | ||
105 | pop %rbp | 103 | pop %rbp |
104 | pop %r12 | ||
106 | pop %rbx | 105 | pop %rbx |
107 | ret | 106 | ret |
108 | 107 | ||
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index e08888a1a5f2..001bbcf93c79 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S | |||
@@ -103,7 +103,7 @@ SRND = %rsi # clobbers INP | |||
103 | c = %ecx | 103 | c = %ecx |
104 | d = %r8d | 104 | d = %r8d |
105 | e = %edx | 105 | e = %edx |
106 | TBL = %rbp | 106 | TBL = %r12 |
107 | a = %eax | 107 | a = %eax |
108 | b = %ebx | 108 | b = %ebx |
109 | 109 | ||
@@ -350,13 +350,13 @@ a = TMP_ | |||
350 | ENTRY(sha256_transform_avx) | 350 | ENTRY(sha256_transform_avx) |
351 | .align 32 | 351 | .align 32 |
352 | pushq %rbx | 352 | pushq %rbx |
353 | pushq %rbp | 353 | pushq %r12 |
354 | pushq %r13 | 354 | pushq %r13 |
355 | pushq %r14 | 355 | pushq %r14 |
356 | pushq %r15 | 356 | pushq %r15 |
357 | pushq %r12 | 357 | pushq %rbp |
358 | movq %rsp, %rbp | ||
358 | 359 | ||
359 | mov %rsp, %r12 | ||
360 | subq $STACK_SIZE, %rsp # allocate stack space | 360 | subq $STACK_SIZE, %rsp # allocate stack space |
361 | and $~15, %rsp # align stack pointer | 361 | and $~15, %rsp # align stack pointer |
362 | 362 | ||
@@ -452,13 +452,12 @@ loop2: | |||
452 | 452 | ||
453 | done_hash: | 453 | done_hash: |
454 | 454 | ||
455 | mov %r12, %rsp | 455 | mov %rbp, %rsp |
456 | 456 | popq %rbp | |
457 | popq %r12 | ||
458 | popq %r15 | 457 | popq %r15 |
459 | popq %r14 | 458 | popq %r14 |
460 | popq %r13 | 459 | popq %r13 |
461 | popq %rbp | 460 | popq %r12 |
462 | popq %rbx | 461 | popq %rbx |
463 | ret | 462 | ret |
464 | ENDPROC(sha256_transform_avx) | 463 | ENDPROC(sha256_transform_avx) |
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 89c8f09787d2..1420db15dcdd 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S | |||
@@ -98,8 +98,6 @@ d = %r8d | |||
98 | e = %edx # clobbers NUM_BLKS | 98 | e = %edx # clobbers NUM_BLKS |
99 | y3 = %esi # clobbers INP | 99 | y3 = %esi # clobbers INP |
100 | 100 | ||
101 | |||
102 | TBL = %rbp | ||
103 | SRND = CTX # SRND is same register as CTX | 101 | SRND = CTX # SRND is same register as CTX |
104 | 102 | ||
105 | a = %eax | 103 | a = %eax |
@@ -531,7 +529,6 @@ STACK_SIZE = _RSP + _RSP_SIZE | |||
531 | ENTRY(sha256_transform_rorx) | 529 | ENTRY(sha256_transform_rorx) |
532 | .align 32 | 530 | .align 32 |
533 | pushq %rbx | 531 | pushq %rbx |
534 | pushq %rbp | ||
535 | pushq %r12 | 532 | pushq %r12 |
536 | pushq %r13 | 533 | pushq %r13 |
537 | pushq %r14 | 534 | pushq %r14 |
@@ -568,8 +565,6 @@ ENTRY(sha256_transform_rorx) | |||
568 | mov CTX, _CTX(%rsp) | 565 | mov CTX, _CTX(%rsp) |
569 | 566 | ||
570 | loop0: | 567 | loop0: |
571 | lea K256(%rip), TBL | ||
572 | |||
573 | ## Load first 16 dwords from two blocks | 568 | ## Load first 16 dwords from two blocks |
574 | VMOVDQ 0*32(INP),XTMP0 | 569 | VMOVDQ 0*32(INP),XTMP0 |
575 | VMOVDQ 1*32(INP),XTMP1 | 570 | VMOVDQ 1*32(INP),XTMP1 |
@@ -597,19 +592,19 @@ last_block_enter: | |||
597 | 592 | ||
598 | .align 16 | 593 | .align 16 |
599 | loop1: | 594 | loop1: |
600 | vpaddd 0*32(TBL, SRND), X0, XFER | 595 | vpaddd K256+0*32(SRND), X0, XFER |
601 | vmovdqa XFER, 0*32+_XFER(%rsp, SRND) | 596 | vmovdqa XFER, 0*32+_XFER(%rsp, SRND) |
602 | FOUR_ROUNDS_AND_SCHED _XFER + 0*32 | 597 | FOUR_ROUNDS_AND_SCHED _XFER + 0*32 |
603 | 598 | ||
604 | vpaddd 1*32(TBL, SRND), X0, XFER | 599 | vpaddd K256+1*32(SRND), X0, XFER |
605 | vmovdqa XFER, 1*32+_XFER(%rsp, SRND) | 600 | vmovdqa XFER, 1*32+_XFER(%rsp, SRND) |
606 | FOUR_ROUNDS_AND_SCHED _XFER + 1*32 | 601 | FOUR_ROUNDS_AND_SCHED _XFER + 1*32 |
607 | 602 | ||
608 | vpaddd 2*32(TBL, SRND), X0, XFER | 603 | vpaddd K256+2*32(SRND), X0, XFER |
609 | vmovdqa XFER, 2*32+_XFER(%rsp, SRND) | 604 | vmovdqa XFER, 2*32+_XFER(%rsp, SRND) |
610 | FOUR_ROUNDS_AND_SCHED _XFER + 2*32 | 605 | FOUR_ROUNDS_AND_SCHED _XFER + 2*32 |
611 | 606 | ||
612 | vpaddd 3*32(TBL, SRND), X0, XFER | 607 | vpaddd K256+3*32(SRND), X0, XFER |
613 | vmovdqa XFER, 3*32+_XFER(%rsp, SRND) | 608 | vmovdqa XFER, 3*32+_XFER(%rsp, SRND) |
614 | FOUR_ROUNDS_AND_SCHED _XFER + 3*32 | 609 | FOUR_ROUNDS_AND_SCHED _XFER + 3*32 |
615 | 610 | ||
@@ -619,10 +614,11 @@ loop1: | |||
619 | 614 | ||
620 | loop2: | 615 | loop2: |
621 | ## Do last 16 rounds with no scheduling | 616 | ## Do last 16 rounds with no scheduling |
622 | vpaddd 0*32(TBL, SRND), X0, XFER | 617 | vpaddd K256+0*32(SRND), X0, XFER |
623 | vmovdqa XFER, 0*32+_XFER(%rsp, SRND) | 618 | vmovdqa XFER, 0*32+_XFER(%rsp, SRND) |
624 | DO_4ROUNDS _XFER + 0*32 | 619 | DO_4ROUNDS _XFER + 0*32 |
625 | vpaddd 1*32(TBL, SRND), X1, XFER | 620 | |
621 | vpaddd K256+1*32(SRND), X1, XFER | ||
626 | vmovdqa XFER, 1*32+_XFER(%rsp, SRND) | 622 | vmovdqa XFER, 1*32+_XFER(%rsp, SRND) |
627 | DO_4ROUNDS _XFER + 1*32 | 623 | DO_4ROUNDS _XFER + 1*32 |
628 | add $2*32, SRND | 624 | add $2*32, SRND |
@@ -676,9 +672,6 @@ loop3: | |||
676 | ja done_hash | 672 | ja done_hash |
677 | 673 | ||
678 | do_last_block: | 674 | do_last_block: |
679 | #### do last block | ||
680 | lea K256(%rip), TBL | ||
681 | |||
682 | VMOVDQ 0*16(INP),XWORD0 | 675 | VMOVDQ 0*16(INP),XWORD0 |
683 | VMOVDQ 1*16(INP),XWORD1 | 676 | VMOVDQ 1*16(INP),XWORD1 |
684 | VMOVDQ 2*16(INP),XWORD2 | 677 | VMOVDQ 2*16(INP),XWORD2 |
@@ -718,7 +711,6 @@ done_hash: | |||
718 | popq %r14 | 711 | popq %r14 |
719 | popq %r13 | 712 | popq %r13 |
720 | popq %r12 | 713 | popq %r12 |
721 | popq %rbp | ||
722 | popq %rbx | 714 | popq %rbx |
723 | ret | 715 | ret |
724 | ENDPROC(sha256_transform_rorx) | 716 | ENDPROC(sha256_transform_rorx) |
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index 39b83c93e7fd..c6c05ed2c16a 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S | |||
@@ -95,7 +95,7 @@ SRND = %rsi # clobbers INP | |||
95 | c = %ecx | 95 | c = %ecx |
96 | d = %r8d | 96 | d = %r8d |
97 | e = %edx | 97 | e = %edx |
98 | TBL = %rbp | 98 | TBL = %r12 |
99 | a = %eax | 99 | a = %eax |
100 | b = %ebx | 100 | b = %ebx |
101 | 101 | ||
@@ -356,13 +356,13 @@ a = TMP_ | |||
356 | ENTRY(sha256_transform_ssse3) | 356 | ENTRY(sha256_transform_ssse3) |
357 | .align 32 | 357 | .align 32 |
358 | pushq %rbx | 358 | pushq %rbx |
359 | pushq %rbp | 359 | pushq %r12 |
360 | pushq %r13 | 360 | pushq %r13 |
361 | pushq %r14 | 361 | pushq %r14 |
362 | pushq %r15 | 362 | pushq %r15 |
363 | pushq %r12 | 363 | pushq %rbp |
364 | mov %rsp, %rbp | ||
364 | 365 | ||
365 | mov %rsp, %r12 | ||
366 | subq $STACK_SIZE, %rsp | 366 | subq $STACK_SIZE, %rsp |
367 | and $~15, %rsp | 367 | and $~15, %rsp |
368 | 368 | ||
@@ -462,13 +462,12 @@ loop2: | |||
462 | 462 | ||
463 | done_hash: | 463 | done_hash: |
464 | 464 | ||
465 | mov %r12, %rsp | 465 | mov %rbp, %rsp |
466 | 466 | popq %rbp | |
467 | popq %r12 | ||
468 | popq %r15 | 467 | popq %r15 |
469 | popq %r14 | 468 | popq %r14 |
470 | popq %r13 | 469 | popq %r13 |
471 | popq %rbp | 470 | popq %r12 |
472 | popq %rbx | 471 | popq %rbx |
473 | 472 | ||
474 | ret | 473 | ret |
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 7f5f6c6ec72e..b16d56005162 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S | |||
@@ -69,8 +69,9 @@ XFER = YTMP0 | |||
69 | 69 | ||
70 | BYTE_FLIP_MASK = %ymm9 | 70 | BYTE_FLIP_MASK = %ymm9 |
71 | 71 | ||
72 | # 1st arg | 72 | # 1st arg is %rdi, which is saved to the stack and accessed later via %r12 |
73 | CTX = %rdi | 73 | CTX1 = %rdi |
74 | CTX2 = %r12 | ||
74 | # 2nd arg | 75 | # 2nd arg |
75 | INP = %rsi | 76 | INP = %rsi |
76 | # 3rd arg | 77 | # 3rd arg |
@@ -81,7 +82,7 @@ d = %r8 | |||
81 | e = %rdx | 82 | e = %rdx |
82 | y3 = %rsi | 83 | y3 = %rsi |
83 | 84 | ||
84 | TBL = %rbp | 85 | TBL = %rdi # clobbers CTX1 |
85 | 86 | ||
86 | a = %rax | 87 | a = %rax |
87 | b = %rbx | 88 | b = %rbx |
@@ -91,26 +92,26 @@ g = %r10 | |||
91 | h = %r11 | 92 | h = %r11 |
92 | old_h = %r11 | 93 | old_h = %r11 |
93 | 94 | ||
94 | T1 = %r12 | 95 | T1 = %r12 # clobbers CTX2 |
95 | y0 = %r13 | 96 | y0 = %r13 |
96 | y1 = %r14 | 97 | y1 = %r14 |
97 | y2 = %r15 | 98 | y2 = %r15 |
98 | 99 | ||
99 | y4 = %r12 | ||
100 | |||
101 | # Local variables (stack frame) | 100 | # Local variables (stack frame) |
102 | XFER_SIZE = 4*8 | 101 | XFER_SIZE = 4*8 |
103 | SRND_SIZE = 1*8 | 102 | SRND_SIZE = 1*8 |
104 | INP_SIZE = 1*8 | 103 | INP_SIZE = 1*8 |
105 | INPEND_SIZE = 1*8 | 104 | INPEND_SIZE = 1*8 |
105 | CTX_SIZE = 1*8 | ||
106 | RSPSAVE_SIZE = 1*8 | 106 | RSPSAVE_SIZE = 1*8 |
107 | GPRSAVE_SIZE = 6*8 | 107 | GPRSAVE_SIZE = 5*8 |
108 | 108 | ||
109 | frame_XFER = 0 | 109 | frame_XFER = 0 |
110 | frame_SRND = frame_XFER + XFER_SIZE | 110 | frame_SRND = frame_XFER + XFER_SIZE |
111 | frame_INP = frame_SRND + SRND_SIZE | 111 | frame_INP = frame_SRND + SRND_SIZE |
112 | frame_INPEND = frame_INP + INP_SIZE | 112 | frame_INPEND = frame_INP + INP_SIZE |
113 | frame_RSPSAVE = frame_INPEND + INPEND_SIZE | 113 | frame_CTX = frame_INPEND + INPEND_SIZE |
114 | frame_RSPSAVE = frame_CTX + CTX_SIZE | ||
114 | frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE | 115 | frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE |
115 | frame_size = frame_GPRSAVE + GPRSAVE_SIZE | 116 | frame_size = frame_GPRSAVE + GPRSAVE_SIZE |
116 | 117 | ||
@@ -576,12 +577,11 @@ ENTRY(sha512_transform_rorx) | |||
576 | mov %rax, frame_RSPSAVE(%rsp) | 577 | mov %rax, frame_RSPSAVE(%rsp) |
577 | 578 | ||
578 | # Save GPRs | 579 | # Save GPRs |
579 | mov %rbp, frame_GPRSAVE(%rsp) | 580 | mov %rbx, 8*0+frame_GPRSAVE(%rsp) |
580 | mov %rbx, 8*1+frame_GPRSAVE(%rsp) | 581 | mov %r12, 8*1+frame_GPRSAVE(%rsp) |
581 | mov %r12, 8*2+frame_GPRSAVE(%rsp) | 582 | mov %r13, 8*2+frame_GPRSAVE(%rsp) |
582 | mov %r13, 8*3+frame_GPRSAVE(%rsp) | 583 | mov %r14, 8*3+frame_GPRSAVE(%rsp) |
583 | mov %r14, 8*4+frame_GPRSAVE(%rsp) | 584 | mov %r15, 8*4+frame_GPRSAVE(%rsp) |
584 | mov %r15, 8*5+frame_GPRSAVE(%rsp) | ||
585 | 585 | ||
586 | shl $7, NUM_BLKS # convert to bytes | 586 | shl $7, NUM_BLKS # convert to bytes |
587 | jz done_hash | 587 | jz done_hash |
@@ -589,14 +589,17 @@ ENTRY(sha512_transform_rorx) | |||
589 | mov NUM_BLKS, frame_INPEND(%rsp) | 589 | mov NUM_BLKS, frame_INPEND(%rsp) |
590 | 590 | ||
591 | ## load initial digest | 591 | ## load initial digest |
592 | mov 8*0(CTX),a | 592 | mov 8*0(CTX1), a |
593 | mov 8*1(CTX),b | 593 | mov 8*1(CTX1), b |
594 | mov 8*2(CTX),c | 594 | mov 8*2(CTX1), c |
595 | mov 8*3(CTX),d | 595 | mov 8*3(CTX1), d |
596 | mov 8*4(CTX),e | 596 | mov 8*4(CTX1), e |
597 | mov 8*5(CTX),f | 597 | mov 8*5(CTX1), f |
598 | mov 8*6(CTX),g | 598 | mov 8*6(CTX1), g |
599 | mov 8*7(CTX),h | 599 | mov 8*7(CTX1), h |
600 | |||
601 | # save %rdi (CTX) before it gets clobbered | ||
602 | mov %rdi, frame_CTX(%rsp) | ||
600 | 603 | ||
601 | vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK | 604 | vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK |
602 | 605 | ||
@@ -652,14 +655,15 @@ loop2: | |||
652 | subq $1, frame_SRND(%rsp) | 655 | subq $1, frame_SRND(%rsp) |
653 | jne loop2 | 656 | jne loop2 |
654 | 657 | ||
655 | addm 8*0(CTX),a | 658 | mov frame_CTX(%rsp), CTX2 |
656 | addm 8*1(CTX),b | 659 | addm 8*0(CTX2), a |
657 | addm 8*2(CTX),c | 660 | addm 8*1(CTX2), b |
658 | addm 8*3(CTX),d | 661 | addm 8*2(CTX2), c |
659 | addm 8*4(CTX),e | 662 | addm 8*3(CTX2), d |
660 | addm 8*5(CTX),f | 663 | addm 8*4(CTX2), e |
661 | addm 8*6(CTX),g | 664 | addm 8*5(CTX2), f |
662 | addm 8*7(CTX),h | 665 | addm 8*6(CTX2), g |
666 | addm 8*7(CTX2), h | ||
663 | 667 | ||
664 | mov frame_INP(%rsp), INP | 668 | mov frame_INP(%rsp), INP |
665 | add $128, INP | 669 | add $128, INP |
@@ -669,12 +673,11 @@ loop2: | |||
669 | done_hash: | 673 | done_hash: |
670 | 674 | ||
671 | # Restore GPRs | 675 | # Restore GPRs |
672 | mov frame_GPRSAVE(%rsp) ,%rbp | 676 | mov 8*0+frame_GPRSAVE(%rsp), %rbx |
673 | mov 8*1+frame_GPRSAVE(%rsp) ,%rbx | 677 | mov 8*1+frame_GPRSAVE(%rsp), %r12 |
674 | mov 8*2+frame_GPRSAVE(%rsp) ,%r12 | 678 | mov 8*2+frame_GPRSAVE(%rsp), %r13 |
675 | mov 8*3+frame_GPRSAVE(%rsp) ,%r13 | 679 | mov 8*3+frame_GPRSAVE(%rsp), %r14 |
676 | mov 8*4+frame_GPRSAVE(%rsp) ,%r14 | 680 | mov 8*4+frame_GPRSAVE(%rsp), %r15 |
677 | mov 8*5+frame_GPRSAVE(%rsp) ,%r15 | ||
678 | 681 | ||
679 | # Restore Stack Pointer | 682 | # Restore Stack Pointer |
680 | mov frame_RSPSAVE(%rsp), %rsp | 683 | mov frame_RSPSAVE(%rsp), %rsp |
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index b3f49d286348..73b471da3622 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S | |||
@@ -76,8 +76,8 @@ | |||
76 | #define RT %xmm14 | 76 | #define RT %xmm14 |
77 | #define RR %xmm15 | 77 | #define RR %xmm15 |
78 | 78 | ||
79 | #define RID1 %rbp | 79 | #define RID1 %r13 |
80 | #define RID1d %ebp | 80 | #define RID1d %r13d |
81 | #define RID2 %rsi | 81 | #define RID2 %rsi |
82 | #define RID2d %esi | 82 | #define RID2d %esi |
83 | 83 | ||
@@ -259,7 +259,7 @@ __twofish_enc_blk8: | |||
259 | 259 | ||
260 | vmovdqu w(CTX), RK1; | 260 | vmovdqu w(CTX), RK1; |
261 | 261 | ||
262 | pushq %rbp; | 262 | pushq %r13; |
263 | pushq %rbx; | 263 | pushq %rbx; |
264 | pushq %rcx; | 264 | pushq %rcx; |
265 | 265 | ||
@@ -282,7 +282,7 @@ __twofish_enc_blk8: | |||
282 | 282 | ||
283 | popq %rcx; | 283 | popq %rcx; |
284 | popq %rbx; | 284 | popq %rbx; |
285 | popq %rbp; | 285 | popq %r13; |
286 | 286 | ||
287 | outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); | 287 | outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); |
288 | outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); | 288 | outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); |
@@ -301,7 +301,7 @@ __twofish_dec_blk8: | |||
301 | 301 | ||
302 | vmovdqu (w+4*4)(CTX), RK1; | 302 | vmovdqu (w+4*4)(CTX), RK1; |
303 | 303 | ||
304 | pushq %rbp; | 304 | pushq %r13; |
305 | pushq %rbx; | 305 | pushq %rbx; |
306 | 306 | ||
307 | inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); | 307 | inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); |
@@ -322,7 +322,7 @@ __twofish_dec_blk8: | |||
322 | vmovdqu (w)(CTX), RK1; | 322 | vmovdqu (w)(CTX), RK1; |
323 | 323 | ||
324 | popq %rbx; | 324 | popq %rbx; |
325 | popq %rbp; | 325 | popq %r13; |
326 | 326 | ||
327 | outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); | 327 | outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); |
328 | outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); | 328 | outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 829e89cfcee2..9fb9a1f1e47b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void) | |||
4409 | return 0; | 4409 | return 0; |
4410 | } | 4410 | } |
4411 | 4411 | ||
4412 | if (lockup_detector_suspend() != 0) { | 4412 | cpus_read_lock(); |
4413 | pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n"); | 4413 | |
4414 | return 0; | 4414 | hardlockup_detector_perf_stop(); |
4415 | } | ||
4416 | 4415 | ||
4417 | x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); | 4416 | x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); |
4418 | 4417 | ||
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void) | |||
4420 | x86_pmu.commit_scheduling = NULL; | 4419 | x86_pmu.commit_scheduling = NULL; |
4421 | x86_pmu.stop_scheduling = NULL; | 4420 | x86_pmu.stop_scheduling = NULL; |
4422 | 4421 | ||
4423 | lockup_detector_resume(); | 4422 | hardlockup_detector_perf_restart(); |
4424 | |||
4425 | cpus_read_lock(); | ||
4426 | 4423 | ||
4427 | for_each_online_cpu(c) | 4424 | for_each_online_cpu(c) |
4428 | free_excl_cntrs(c); | 4425 | free_excl_cntrs(c); |
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4cf100ff2a37..72db0664a53d 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c | |||
@@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { | |||
552 | 552 | ||
553 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), | 553 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), |
554 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), | 554 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), |
555 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), | ||
555 | 556 | ||
556 | X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), | 557 | X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), |
557 | X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), | 558 | X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), |
@@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { | |||
560 | X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), | 561 | X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), |
561 | 562 | ||
562 | X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), | 563 | X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), |
564 | X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates), | ||
565 | |||
566 | X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates), | ||
563 | { }, | 567 | { }, |
564 | }; | 568 | }; |
565 | MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); | 569 | MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 8e2457cb6b4a..005908ee9333 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
@@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { | |||
775 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), | 775 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), |
776 | 776 | ||
777 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), | 777 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), |
778 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), | ||
779 | |||
780 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init), | ||
778 | {}, | 781 | {}, |
779 | }; | 782 | }; |
780 | 783 | ||
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index db1fe377e6dd..a7196818416a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { | |||
3462 | static struct intel_uncore_type skx_uncore_iio = { | 3462 | static struct intel_uncore_type skx_uncore_iio = { |
3463 | .name = "iio", | 3463 | .name = "iio", |
3464 | .num_counters = 4, | 3464 | .num_counters = 4, |
3465 | .num_boxes = 5, | 3465 | .num_boxes = 6, |
3466 | .perf_ctr_bits = 48, | 3466 | .perf_ctr_bits = 48, |
3467 | .event_ctl = SKX_IIO0_MSR_PMON_CTL0, | 3467 | .event_ctl = SKX_IIO0_MSR_PMON_CTL0, |
3468 | .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, | 3468 | .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, |
@@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = { | |||
3492 | static struct intel_uncore_type skx_uncore_irp = { | 3492 | static struct intel_uncore_type skx_uncore_irp = { |
3493 | .name = "irp", | 3493 | .name = "irp", |
3494 | .num_counters = 2, | 3494 | .num_counters = 2, |
3495 | .num_boxes = 5, | 3495 | .num_boxes = 6, |
3496 | .perf_ctr_bits = 48, | 3496 | .perf_ctr_bits = 48, |
3497 | .event_ctl = SKX_IRP0_MSR_PMON_CTL0, | 3497 | .event_ctl = SKX_IRP0_MSR_PMON_CTL0, |
3498 | .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, | 3498 | .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, |
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 4bb3ec69e8ea..06723671ae4e 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c | |||
@@ -63,6 +63,14 @@ static bool test_intel(int idx) | |||
63 | case INTEL_FAM6_ATOM_SILVERMONT1: | 63 | case INTEL_FAM6_ATOM_SILVERMONT1: |
64 | case INTEL_FAM6_ATOM_SILVERMONT2: | 64 | case INTEL_FAM6_ATOM_SILVERMONT2: |
65 | case INTEL_FAM6_ATOM_AIRMONT: | 65 | case INTEL_FAM6_ATOM_AIRMONT: |
66 | |||
67 | case INTEL_FAM6_ATOM_GOLDMONT: | ||
68 | case INTEL_FAM6_ATOM_DENVERTON: | ||
69 | |||
70 | case INTEL_FAM6_ATOM_GEMINI_LAKE: | ||
71 | |||
72 | case INTEL_FAM6_XEON_PHI_KNL: | ||
73 | case INTEL_FAM6_XEON_PHI_KNM: | ||
66 | if (idx == PERF_MSR_SMI) | 74 | if (idx == PERF_MSR_SMI) |
67 | return true; | 75 | return true; |
68 | break; | 76 | break; |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index e0bb46c02857..0e2a5edbce00 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, | |||
231 | ksig->ka.sa.sa_restorer) | 231 | ksig->ka.sa.sa_restorer) |
232 | sp = (unsigned long) ksig->ka.sa.sa_restorer; | 232 | sp = (unsigned long) ksig->ka.sa.sa_restorer; |
233 | 233 | ||
234 | if (fpu->fpstate_active) { | 234 | if (fpu->initialized) { |
235 | unsigned long fx_aligned, math_size; | 235 | unsigned long fx_aligned, math_size; |
236 | 236 | ||
237 | sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); | 237 | sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 1b020381ab38..c096624137ae 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -218,10 +218,9 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
218 | #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ | 218 | #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ |
219 | output, input...) \ | 219 | output, input...) \ |
220 | { \ | 220 | { \ |
221 | register void *__sp asm(_ASM_SP); \ | ||
222 | asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ | 221 | asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ |
223 | "call %P[new2]", feature2) \ | 222 | "call %P[new2]", feature2) \ |
224 | : output, "+r" (__sp) \ | 223 | : output, ASM_CALL_CONSTRAINT \ |
225 | : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ | 224 | : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ |
226 | [new2] "i" (newfunc2), ## input); \ | 225 | [new2] "i" (newfunc2), ## input); \ |
227 | } | 226 | } |
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 676ee5807d86..b0dc91f4bedc 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -11,10 +11,12 @@ | |||
11 | # define __ASM_FORM_COMMA(x) " " #x "," | 11 | # define __ASM_FORM_COMMA(x) " " #x "," |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #ifdef CONFIG_X86_32 | 14 | #ifndef __x86_64__ |
15 | /* 32 bit */ | ||
15 | # define __ASM_SEL(a,b) __ASM_FORM(a) | 16 | # define __ASM_SEL(a,b) __ASM_FORM(a) |
16 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) | 17 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) |
17 | #else | 18 | #else |
19 | /* 64 bit */ | ||
18 | # define __ASM_SEL(a,b) __ASM_FORM(b) | 20 | # define __ASM_SEL(a,b) __ASM_FORM(b) |
19 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) | 21 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) |
20 | #endif | 22 | #endif |
@@ -132,4 +134,15 @@ | |||
132 | /* For C file, we already have NOKPROBE_SYMBOL macro */ | 134 | /* For C file, we already have NOKPROBE_SYMBOL macro */ |
133 | #endif | 135 | #endif |
134 | 136 | ||
137 | #ifndef __ASSEMBLY__ | ||
138 | /* | ||
139 | * This output constraint should be used for any inline asm which has a "call" | ||
140 | * instruction. Otherwise the asm may be inserted before the frame pointer | ||
141 | * gets set up by the containing function. If you forget to do this, objtool | ||
142 | * may print a "call without frame pointer save/setup" warning. | ||
143 | */ | ||
144 | register unsigned long current_stack_pointer asm(_ASM_SP); | ||
145 | #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) | ||
146 | #endif | ||
147 | |||
135 | #endif /* _ASM_X86_ASM_H */ | 148 | #endif /* _ASM_X86_ASM_H */ |
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 554cdb205d17..e3221ffa304e 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
@@ -23,11 +23,9 @@ | |||
23 | /* | 23 | /* |
24 | * High level FPU state handling functions: | 24 | * High level FPU state handling functions: |
25 | */ | 25 | */ |
26 | extern void fpu__activate_curr(struct fpu *fpu); | 26 | extern void fpu__initialize(struct fpu *fpu); |
27 | extern void fpu__activate_fpstate_read(struct fpu *fpu); | 27 | extern void fpu__prepare_read(struct fpu *fpu); |
28 | extern void fpu__activate_fpstate_write(struct fpu *fpu); | 28 | extern void fpu__prepare_write(struct fpu *fpu); |
29 | extern void fpu__current_fpstate_write_begin(void); | ||
30 | extern void fpu__current_fpstate_write_end(void); | ||
31 | extern void fpu__save(struct fpu *fpu); | 29 | extern void fpu__save(struct fpu *fpu); |
32 | extern void fpu__restore(struct fpu *fpu); | 30 | extern void fpu__restore(struct fpu *fpu); |
33 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); | 31 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); | |||
120 | err; \ | 118 | err; \ |
121 | }) | 119 | }) |
122 | 120 | ||
123 | #define check_insn(insn, output, input...) \ | 121 | #define kernel_insn(insn, output, input...) \ |
124 | ({ \ | ||
125 | int err; \ | ||
126 | asm volatile("1:" #insn "\n\t" \ | 122 | asm volatile("1:" #insn "\n\t" \ |
127 | "2:\n" \ | 123 | "2:\n" \ |
128 | ".section .fixup,\"ax\"\n" \ | 124 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ |
129 | "3: movl $-1,%[err]\n" \ | 125 | : output : input) |
130 | " jmp 2b\n" \ | ||
131 | ".previous\n" \ | ||
132 | _ASM_EXTABLE(1b, 3b) \ | ||
133 | : [err] "=r" (err), output \ | ||
134 | : "0"(0), input); \ | ||
135 | err; \ | ||
136 | }) | ||
137 | 126 | ||
138 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) | 127 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
139 | { | 128 | { |
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) | |||
153 | 142 | ||
154 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) | 143 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
155 | { | 144 | { |
156 | int err; | ||
157 | |||
158 | if (IS_ENABLED(CONFIG_X86_32)) { | 145 | if (IS_ENABLED(CONFIG_X86_32)) { |
159 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 146 | kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
160 | } else { | 147 | } else { |
161 | if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { | 148 | if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { |
162 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); | 149 | kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
163 | } else { | 150 | } else { |
164 | /* See comment in copy_fxregs_to_kernel() below. */ | 151 | /* See comment in copy_fxregs_to_kernel() below. */ |
165 | err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); | 152 | kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); |
166 | } | 153 | } |
167 | } | 154 | } |
168 | /* Copying from a kernel buffer to FPU registers should never fail: */ | ||
169 | WARN_ON_FPU(err); | ||
170 | } | 155 | } |
171 | 156 | ||
172 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) | 157 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) | |||
183 | 168 | ||
184 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) | 169 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
185 | { | 170 | { |
186 | int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | 171 | kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
187 | |||
188 | WARN_ON_FPU(err); | ||
189 | } | 172 | } |
190 | 173 | ||
191 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) | 174 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
281 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | 264 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact |
282 | * XSAVE area format. | 265 | * XSAVE area format. |
283 | */ | 266 | */ |
284 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | 267 | #define XSTATE_XRESTORE(st, lmask, hmask) \ |
285 | asm volatile(ALTERNATIVE(XRSTOR, \ | 268 | asm volatile(ALTERNATIVE(XRSTOR, \ |
286 | XRSTORS, X86_FEATURE_XSAVES) \ | 269 | XRSTORS, X86_FEATURE_XSAVES) \ |
287 | "\n" \ | 270 | "\n" \ |
288 | "xor %[err], %[err]\n" \ | ||
289 | "3:\n" \ | 271 | "3:\n" \ |
290 | ".pushsection .fixup,\"ax\"\n" \ | 272 | _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ |
291 | "4: movl $-2, %[err]\n" \ | 273 | : \ |
292 | "jmp 3b\n" \ | ||
293 | ".popsection\n" \ | ||
294 | _ASM_EXTABLE(661b, 4b) \ | ||
295 | : [err] "=r" (err) \ | ||
296 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | 274 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
297 | : "memory") | 275 | : "memory") |
298 | 276 | ||
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) | |||
336 | else | 314 | else |
337 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); | 315 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
338 | 316 | ||
339 | /* We should never fault when copying from a kernel buffer: */ | 317 | /* |
318 | * We should never fault when copying from a kernel buffer, and the FPU | ||
319 | * state we set at boot time should be valid. | ||
320 | */ | ||
340 | WARN_ON_FPU(err); | 321 | WARN_ON_FPU(err); |
341 | } | 322 | } |
342 | 323 | ||
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) | |||
350 | u32 hmask = mask >> 32; | 331 | u32 hmask = mask >> 32; |
351 | int err; | 332 | int err; |
352 | 333 | ||
353 | WARN_ON(!alternatives_patched); | 334 | WARN_ON_FPU(!alternatives_patched); |
354 | 335 | ||
355 | XSTATE_XSAVE(xstate, lmask, hmask, err); | 336 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
356 | 337 | ||
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) | |||
365 | { | 346 | { |
366 | u32 lmask = mask; | 347 | u32 lmask = mask; |
367 | u32 hmask = mask >> 32; | 348 | u32 hmask = mask >> 32; |
368 | int err; | ||
369 | |||
370 | XSTATE_XRESTORE(xstate, lmask, hmask, err); | ||
371 | 349 | ||
372 | /* We should never fault when copying from a kernel buffer: */ | 350 | XSTATE_XRESTORE(xstate, lmask, hmask); |
373 | WARN_ON_FPU(err); | ||
374 | } | 351 | } |
375 | 352 | ||
376 | /* | 353 | /* |
@@ -526,38 +503,17 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) | |||
526 | */ | 503 | */ |
527 | static inline void fpregs_deactivate(struct fpu *fpu) | 504 | static inline void fpregs_deactivate(struct fpu *fpu) |
528 | { | 505 | { |
529 | WARN_ON_FPU(!fpu->fpregs_active); | ||
530 | |||
531 | fpu->fpregs_active = 0; | ||
532 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); | 506 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
533 | trace_x86_fpu_regs_deactivated(fpu); | 507 | trace_x86_fpu_regs_deactivated(fpu); |
534 | } | 508 | } |
535 | 509 | ||
536 | static inline void fpregs_activate(struct fpu *fpu) | 510 | static inline void fpregs_activate(struct fpu *fpu) |
537 | { | 511 | { |
538 | WARN_ON_FPU(fpu->fpregs_active); | ||
539 | |||
540 | fpu->fpregs_active = 1; | ||
541 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); | 512 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
542 | trace_x86_fpu_regs_activated(fpu); | 513 | trace_x86_fpu_regs_activated(fpu); |
543 | } | 514 | } |
544 | 515 | ||
545 | /* | 516 | /* |
546 | * The question "does this thread have fpu access?" | ||
547 | * is slightly racy, since preemption could come in | ||
548 | * and revoke it immediately after the test. | ||
549 | * | ||
550 | * However, even in that very unlikely scenario, | ||
551 | * we can just assume we have FPU access - typically | ||
552 | * to save the FP state - we'll just take a #NM | ||
553 | * fault and get the FPU access back. | ||
554 | */ | ||
555 | static inline int fpregs_active(void) | ||
556 | { | ||
557 | return current->thread.fpu.fpregs_active; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * FPU state switching for scheduling. | 517 | * FPU state switching for scheduling. |
562 | * | 518 | * |
563 | * This is a two-stage process: | 519 | * This is a two-stage process: |
@@ -571,14 +527,13 @@ static inline int fpregs_active(void) | |||
571 | static inline void | 527 | static inline void |
572 | switch_fpu_prepare(struct fpu *old_fpu, int cpu) | 528 | switch_fpu_prepare(struct fpu *old_fpu, int cpu) |
573 | { | 529 | { |
574 | if (old_fpu->fpregs_active) { | 530 | if (old_fpu->initialized) { |
575 | if (!copy_fpregs_to_fpstate(old_fpu)) | 531 | if (!copy_fpregs_to_fpstate(old_fpu)) |
576 | old_fpu->last_cpu = -1; | 532 | old_fpu->last_cpu = -1; |
577 | else | 533 | else |
578 | old_fpu->last_cpu = cpu; | 534 | old_fpu->last_cpu = cpu; |
579 | 535 | ||
580 | /* But leave fpu_fpregs_owner_ctx! */ | 536 | /* But leave fpu_fpregs_owner_ctx! */ |
581 | old_fpu->fpregs_active = 0; | ||
582 | trace_x86_fpu_regs_deactivated(old_fpu); | 537 | trace_x86_fpu_regs_deactivated(old_fpu); |
583 | } else | 538 | } else |
584 | old_fpu->last_cpu = -1; | 539 | old_fpu->last_cpu = -1; |
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) | |||
595 | static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) | 550 | static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) |
596 | { | 551 | { |
597 | bool preload = static_cpu_has(X86_FEATURE_FPU) && | 552 | bool preload = static_cpu_has(X86_FEATURE_FPU) && |
598 | new_fpu->fpstate_active; | 553 | new_fpu->initialized; |
599 | 554 | ||
600 | if (preload) { | 555 | if (preload) { |
601 | if (!fpregs_state_valid(new_fpu, cpu)) | 556 | if (!fpregs_state_valid(new_fpu, cpu)) |
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void) | |||
617 | struct fpu *fpu = ¤t->thread.fpu; | 572 | struct fpu *fpu = ¤t->thread.fpu; |
618 | 573 | ||
619 | preempt_disable(); | 574 | preempt_disable(); |
620 | if (!fpregs_active()) | 575 | fpregs_activate(fpu); |
621 | fpregs_activate(fpu); | ||
622 | preempt_enable(); | 576 | preempt_enable(); |
623 | } | 577 | } |
624 | 578 | ||
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 3c80f5b9c09d..a1520575d86b 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h | |||
@@ -68,6 +68,9 @@ struct fxregs_state { | |||
68 | /* Default value for fxregs_state.mxcsr: */ | 68 | /* Default value for fxregs_state.mxcsr: */ |
69 | #define MXCSR_DEFAULT 0x1f80 | 69 | #define MXCSR_DEFAULT 0x1f80 |
70 | 70 | ||
71 | /* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */ | ||
72 | #define MXCSR_AND_FLAGS_SIZE sizeof(u64) | ||
73 | |||
71 | /* | 74 | /* |
72 | * Software based FPU emulation state. This is arbitrary really, | 75 | * Software based FPU emulation state. This is arbitrary really, |
73 | * it matches the x87 format to make it easier to understand: | 76 | * it matches the x87 format to make it easier to understand: |
@@ -290,36 +293,13 @@ struct fpu { | |||
290 | unsigned int last_cpu; | 293 | unsigned int last_cpu; |
291 | 294 | ||
292 | /* | 295 | /* |
293 | * @fpstate_active: | 296 | * @initialized: |
294 | * | 297 | * |
295 | * This flag indicates whether this context is active: if the task | 298 | * This flag indicates whether this context is initialized: if the task |
296 | * is not running then we can restore from this context, if the task | 299 | * is not running then we can restore from this context, if the task |
297 | * is running then we should save into this context. | 300 | * is running then we should save into this context. |
298 | */ | 301 | */ |
299 | unsigned char fpstate_active; | 302 | unsigned char initialized; |
300 | |||
301 | /* | ||
302 | * @fpregs_active: | ||
303 | * | ||
304 | * This flag determines whether a given context is actively | ||
305 | * loaded into the FPU's registers and that those registers | ||
306 | * represent the task's current FPU state. | ||
307 | * | ||
308 | * Note the interaction with fpstate_active: | ||
309 | * | ||
310 | * # task does not use the FPU: | ||
311 | * fpstate_active == 0 | ||
312 | * | ||
313 | * # task uses the FPU and regs are active: | ||
314 | * fpstate_active == 1 && fpregs_active == 1 | ||
315 | * | ||
316 | * # the regs are inactive but still match fpstate: | ||
317 | * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu | ||
318 | * | ||
319 | * The third state is what we use for the lazy restore optimization | ||
320 | * on lazy-switching CPUs. | ||
321 | */ | ||
322 | unsigned char fpregs_active; | ||
323 | 303 | ||
324 | /* | 304 | /* |
325 | * @state: | 305 | * @state: |
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index 1b2799e0699a..83fee2469eb7 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h | |||
@@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void); | |||
48 | void *get_xsave_addr(struct xregs_state *xsave, int xstate); | 48 | void *get_xsave_addr(struct xregs_state *xsave, int xstate); |
49 | const void *get_xsave_field_ptr(int xstate_field); | 49 | const void *get_xsave_field_ptr(int xstate_field); |
50 | int using_compacted_format(void); | 50 | int using_compacted_format(void); |
51 | int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | 51 | int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); |
52 | void __user *ubuf, struct xregs_state *xsave); | 52 | int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); |
53 | int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, | 53 | int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); |
54 | struct xregs_state *xsave); | 54 | int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); |
55 | |||
56 | /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ | ||
57 | extern int validate_xstate_header(const struct xstate_header *hdr); | ||
58 | |||
55 | #endif | 59 | #endif |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index bc62e7cbf1b1..59ad3d132353 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
88 | bool kvm_para_available(void); | 88 | bool kvm_para_available(void); |
89 | unsigned int kvm_arch_para_features(void); | 89 | unsigned int kvm_arch_para_features(void); |
90 | void __init kvm_guest_init(void); | 90 | void __init kvm_guest_init(void); |
91 | void kvm_async_pf_task_wait(u32 token); | 91 | void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); |
92 | void kvm_async_pf_task_wake(u32 token); | 92 | void kvm_async_pf_task_wake(u32 token); |
93 | u32 kvm_read_and_reset_pf_reason(void); | 93 | u32 kvm_read_and_reset_pf_reason(void); |
94 | extern void kvm_disable_steal_time(void); | 94 | extern void kvm_disable_steal_time(void); |
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void) | |||
103 | 103 | ||
104 | #else /* CONFIG_KVM_GUEST */ | 104 | #else /* CONFIG_KVM_GUEST */ |
105 | #define kvm_guest_init() do {} while (0) | 105 | #define kvm_guest_init() do {} while (0) |
106 | #define kvm_async_pf_task_wait(T) do {} while(0) | 106 | #define kvm_async_pf_task_wait(T, I) do {} while(0) |
107 | #define kvm_async_pf_task_wake(T) do {} while(0) | 107 | #define kvm_async_pf_task_wake(T) do {} while(0) |
108 | 108 | ||
109 | static inline bool kvm_para_available(void) | 109 | static inline bool kvm_para_available(void) |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 7ae318c340d9..c120b5db178a 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -286,6 +286,32 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | |||
286 | return __pkru_allows_pkey(vma_pkey(vma), write); | 286 | return __pkru_allows_pkey(vma_pkey(vma), write); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | ||
290 | * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID | ||
291 | * bits. This serves two purposes. It prevents a nasty situation in | ||
292 | * which PCID-unaware code saves CR3, loads some other value (with PCID | ||
293 | * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if | ||
294 | * the saved ASID was nonzero. It also means that any bugs involving | ||
295 | * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger | ||
296 | * deterministically. | ||
297 | */ | ||
298 | |||
299 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) | ||
300 | { | ||
301 | if (static_cpu_has(X86_FEATURE_PCID)) { | ||
302 | VM_WARN_ON_ONCE(asid > 4094); | ||
303 | return __sme_pa(mm->pgd) | (asid + 1); | ||
304 | } else { | ||
305 | VM_WARN_ON_ONCE(asid != 0); | ||
306 | return __sme_pa(mm->pgd); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) | ||
311 | { | ||
312 | VM_WARN_ON_ONCE(asid > 4094); | ||
313 | return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; | ||
314 | } | ||
289 | 315 | ||
290 | /* | 316 | /* |
291 | * This can be used from process context to figure out what the value of | 317 | * This can be used from process context to figure out what the value of |
@@ -296,10 +322,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | |||
296 | */ | 322 | */ |
297 | static inline unsigned long __get_current_cr3_fast(void) | 323 | static inline unsigned long __get_current_cr3_fast(void) |
298 | { | 324 | { |
299 | unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); | 325 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), |
300 | 326 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); | |
301 | if (static_cpu_has(X86_FEATURE_PCID)) | ||
302 | cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); | ||
303 | 327 | ||
304 | /* For now, be very restrictive about when this can be called. */ | 328 | /* For now, be very restrictive about when this can be called. */ |
305 | VM_WARN_ON(in_nmi() || preemptible()); | 329 | VM_WARN_ON(in_nmi() || preemptible()); |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 63cc96f064dc..738503e1f80c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -179,7 +179,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
179 | u64 input_address = input ? virt_to_phys(input) : 0; | 179 | u64 input_address = input ? virt_to_phys(input) : 0; |
180 | u64 output_address = output ? virt_to_phys(output) : 0; | 180 | u64 output_address = output ? virt_to_phys(output) : 0; |
181 | u64 hv_status; | 181 | u64 hv_status; |
182 | register void *__sp asm(_ASM_SP); | ||
183 | 182 | ||
184 | #ifdef CONFIG_X86_64 | 183 | #ifdef CONFIG_X86_64 |
185 | if (!hv_hypercall_pg) | 184 | if (!hv_hypercall_pg) |
@@ -187,7 +186,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
187 | 186 | ||
188 | __asm__ __volatile__("mov %4, %%r8\n" | 187 | __asm__ __volatile__("mov %4, %%r8\n" |
189 | "call *%5" | 188 | "call *%5" |
190 | : "=a" (hv_status), "+r" (__sp), | 189 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, |
191 | "+c" (control), "+d" (input_address) | 190 | "+c" (control), "+d" (input_address) |
192 | : "r" (output_address), "m" (hv_hypercall_pg) | 191 | : "r" (output_address), "m" (hv_hypercall_pg) |
193 | : "cc", "memory", "r8", "r9", "r10", "r11"); | 192 | : "cc", "memory", "r8", "r9", "r10", "r11"); |
@@ -202,7 +201,7 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
202 | 201 | ||
203 | __asm__ __volatile__("call *%7" | 202 | __asm__ __volatile__("call *%7" |
204 | : "=A" (hv_status), | 203 | : "=A" (hv_status), |
205 | "+c" (input_address_lo), "+r" (__sp) | 204 | "+c" (input_address_lo), ASM_CALL_CONSTRAINT |
206 | : "A" (control), | 205 | : "A" (control), |
207 | "b" (input_address_hi), | 206 | "b" (input_address_hi), |
208 | "D"(output_address_hi), "S"(output_address_lo), | 207 | "D"(output_address_hi), "S"(output_address_lo), |
@@ -224,12 +223,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
224 | static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) | 223 | static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) |
225 | { | 224 | { |
226 | u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; | 225 | u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT; |
227 | register void *__sp asm(_ASM_SP); | ||
228 | 226 | ||
229 | #ifdef CONFIG_X86_64 | 227 | #ifdef CONFIG_X86_64 |
230 | { | 228 | { |
231 | __asm__ __volatile__("call *%4" | 229 | __asm__ __volatile__("call *%4" |
232 | : "=a" (hv_status), "+r" (__sp), | 230 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, |
233 | "+c" (control), "+d" (input1) | 231 | "+c" (control), "+d" (input1) |
234 | : "m" (hv_hypercall_pg) | 232 | : "m" (hv_hypercall_pg) |
235 | : "cc", "r8", "r9", "r10", "r11"); | 233 | : "cc", "r8", "r9", "r10", "r11"); |
@@ -242,7 +240,7 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) | |||
242 | __asm__ __volatile__ ("call *%5" | 240 | __asm__ __volatile__ ("call *%5" |
243 | : "=A"(hv_status), | 241 | : "=A"(hv_status), |
244 | "+c"(input1_lo), | 242 | "+c"(input1_lo), |
245 | "+r"(__sp) | 243 | ASM_CALL_CONSTRAINT |
246 | : "A" (control), | 244 | : "A" (control), |
247 | "b" (input1_hi), | 245 | "b" (input1_hi), |
248 | "m" (hv_hypercall_pg) | 246 | "m" (hv_hypercall_pg) |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 42873edd9f9d..280d94c36dad 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -459,8 +459,8 @@ int paravirt_disable_iospace(void); | |||
459 | */ | 459 | */ |
460 | #ifdef CONFIG_X86_32 | 460 | #ifdef CONFIG_X86_32 |
461 | #define PVOP_VCALL_ARGS \ | 461 | #define PVOP_VCALL_ARGS \ |
462 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ | 462 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; |
463 | register void *__sp asm("esp") | 463 | |
464 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | 464 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
465 | 465 | ||
466 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) | 466 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) |
@@ -480,8 +480,8 @@ int paravirt_disable_iospace(void); | |||
480 | /* [re]ax isn't an arg, but the return val */ | 480 | /* [re]ax isn't an arg, but the return val */ |
481 | #define PVOP_VCALL_ARGS \ | 481 | #define PVOP_VCALL_ARGS \ |
482 | unsigned long __edi = __edi, __esi = __esi, \ | 482 | unsigned long __edi = __edi, __esi = __esi, \ |
483 | __edx = __edx, __ecx = __ecx, __eax = __eax; \ | 483 | __edx = __edx, __ecx = __ecx, __eax = __eax; |
484 | register void *__sp asm("rsp") | 484 | |
485 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS | 485 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
486 | 486 | ||
487 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | 487 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
@@ -532,7 +532,7 @@ int paravirt_disable_iospace(void); | |||
532 | asm volatile(pre \ | 532 | asm volatile(pre \ |
533 | paravirt_alt(PARAVIRT_CALL) \ | 533 | paravirt_alt(PARAVIRT_CALL) \ |
534 | post \ | 534 | post \ |
535 | : call_clbr, "+r" (__sp) \ | 535 | : call_clbr, ASM_CALL_CONSTRAINT \ |
536 | : paravirt_type(op), \ | 536 | : paravirt_type(op), \ |
537 | paravirt_clobber(clbr), \ | 537 | paravirt_clobber(clbr), \ |
538 | ##__VA_ARGS__ \ | 538 | ##__VA_ARGS__ \ |
@@ -542,7 +542,7 @@ int paravirt_disable_iospace(void); | |||
542 | asm volatile(pre \ | 542 | asm volatile(pre \ |
543 | paravirt_alt(PARAVIRT_CALL) \ | 543 | paravirt_alt(PARAVIRT_CALL) \ |
544 | post \ | 544 | post \ |
545 | : call_clbr, "+r" (__sp) \ | 545 | : call_clbr, ASM_CALL_CONSTRAINT \ |
546 | : paravirt_type(op), \ | 546 | : paravirt_type(op), \ |
547 | paravirt_clobber(clbr), \ | 547 | paravirt_clobber(clbr), \ |
548 | ##__VA_ARGS__ \ | 548 | ##__VA_ARGS__ \ |
@@ -569,7 +569,7 @@ int paravirt_disable_iospace(void); | |||
569 | asm volatile(pre \ | 569 | asm volatile(pre \ |
570 | paravirt_alt(PARAVIRT_CALL) \ | 570 | paravirt_alt(PARAVIRT_CALL) \ |
571 | post \ | 571 | post \ |
572 | : call_clbr, "+r" (__sp) \ | 572 | : call_clbr, ASM_CALL_CONSTRAINT \ |
573 | : paravirt_type(op), \ | 573 | : paravirt_type(op), \ |
574 | paravirt_clobber(clbr), \ | 574 | paravirt_clobber(clbr), \ |
575 | ##__VA_ARGS__ \ | 575 | ##__VA_ARGS__ \ |
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index ec1f3c651150..4f44505dbf87 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h | |||
@@ -100,19 +100,14 @@ static __always_inline bool should_resched(int preempt_offset) | |||
100 | 100 | ||
101 | #ifdef CONFIG_PREEMPT | 101 | #ifdef CONFIG_PREEMPT |
102 | extern asmlinkage void ___preempt_schedule(void); | 102 | extern asmlinkage void ___preempt_schedule(void); |
103 | # define __preempt_schedule() \ | 103 | # define __preempt_schedule() \ |
104 | ({ \ | 104 | asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) |
105 | register void *__sp asm(_ASM_SP); \ | ||
106 | asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \ | ||
107 | }) | ||
108 | 105 | ||
109 | extern asmlinkage void preempt_schedule(void); | 106 | extern asmlinkage void preempt_schedule(void); |
110 | extern asmlinkage void ___preempt_schedule_notrace(void); | 107 | extern asmlinkage void ___preempt_schedule_notrace(void); |
111 | # define __preempt_schedule_notrace() \ | 108 | # define __preempt_schedule_notrace() \ |
112 | ({ \ | 109 | asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) |
113 | register void *__sp asm(_ASM_SP); \ | 110 | |
114 | asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \ | ||
115 | }) | ||
116 | extern asmlinkage void preempt_schedule_notrace(void); | 111 | extern asmlinkage void preempt_schedule_notrace(void); |
117 | #endif | 112 | #endif |
118 | 113 | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3fa26a61eabc..b390ff76e58f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -677,8 +677,6 @@ static inline void sync_core(void) | |||
677 | * Like all of Linux's memory ordering operations, this is a | 677 | * Like all of Linux's memory ordering operations, this is a |
678 | * compiler barrier as well. | 678 | * compiler barrier as well. |
679 | */ | 679 | */ |
680 | register void *__sp asm(_ASM_SP); | ||
681 | |||
682 | #ifdef CONFIG_X86_32 | 680 | #ifdef CONFIG_X86_32 |
683 | asm volatile ( | 681 | asm volatile ( |
684 | "pushfl\n\t" | 682 | "pushfl\n\t" |
@@ -686,7 +684,7 @@ static inline void sync_core(void) | |||
686 | "pushl $1f\n\t" | 684 | "pushl $1f\n\t" |
687 | "iret\n\t" | 685 | "iret\n\t" |
688 | "1:" | 686 | "1:" |
689 | : "+r" (__sp) : : "memory"); | 687 | : ASM_CALL_CONSTRAINT : : "memory"); |
690 | #else | 688 | #else |
691 | unsigned int tmp; | 689 | unsigned int tmp; |
692 | 690 | ||
@@ -703,7 +701,7 @@ static inline void sync_core(void) | |||
703 | "iretq\n\t" | 701 | "iretq\n\t" |
704 | UNWIND_HINT_RESTORE | 702 | UNWIND_HINT_RESTORE |
705 | "1:" | 703 | "1:" |
706 | : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); | 704 | : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory"); |
707 | #endif | 705 | #endif |
708 | } | 706 | } |
709 | 707 | ||
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index a34e0d4b957d..7116b7931c7b 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -103,7 +103,6 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) | |||
103 | ({ \ | 103 | ({ \ |
104 | long tmp; \ | 104 | long tmp; \ |
105 | struct rw_semaphore* ret; \ | 105 | struct rw_semaphore* ret; \ |
106 | register void *__sp asm(_ASM_SP); \ | ||
107 | \ | 106 | \ |
108 | asm volatile("# beginning down_write\n\t" \ | 107 | asm volatile("# beginning down_write\n\t" \ |
109 | LOCK_PREFIX " xadd %1,(%4)\n\t" \ | 108 | LOCK_PREFIX " xadd %1,(%4)\n\t" \ |
@@ -114,7 +113,8 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) | |||
114 | " call " slow_path "\n" \ | 113 | " call " slow_path "\n" \ |
115 | "1:\n" \ | 114 | "1:\n" \ |
116 | "# ending down_write" \ | 115 | "# ending down_write" \ |
117 | : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \ | 116 | : "+m" (sem->count), "=d" (tmp), \ |
117 | "=a" (ret), ASM_CALL_CONSTRAINT \ | ||
118 | : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ | 118 | : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ |
119 | : "memory", "cc"); \ | 119 | : "memory", "cc"); \ |
120 | ret; \ | 120 | ret; \ |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 5161da1a0fa0..89e7eeb5cec1 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -158,17 +158,6 @@ struct thread_info { | |||
158 | */ | 158 | */ |
159 | #ifndef __ASSEMBLY__ | 159 | #ifndef __ASSEMBLY__ |
160 | 160 | ||
161 | static inline unsigned long current_stack_pointer(void) | ||
162 | { | ||
163 | unsigned long sp; | ||
164 | #ifdef CONFIG_X86_64 | ||
165 | asm("mov %%rsp,%0" : "=g" (sp)); | ||
166 | #else | ||
167 | asm("mov %%esp,%0" : "=g" (sp)); | ||
168 | #endif | ||
169 | return sp; | ||
170 | } | ||
171 | |||
172 | /* | 161 | /* |
173 | * Walks up the stack frames to make sure that the specified object is | 162 | * Walks up the stack frames to make sure that the specified object is |
174 | * entirely contained by a single stack frame. | 163 | * entirely contained by a single stack frame. |
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index 342e59789fcd..39f7a27bef13 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h | |||
@@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu, | |||
12 | 12 | ||
13 | TP_STRUCT__entry( | 13 | TP_STRUCT__entry( |
14 | __field(struct fpu *, fpu) | 14 | __field(struct fpu *, fpu) |
15 | __field(bool, fpregs_active) | 15 | __field(bool, initialized) |
16 | __field(bool, fpstate_active) | ||
17 | __field(u64, xfeatures) | 16 | __field(u64, xfeatures) |
18 | __field(u64, xcomp_bv) | 17 | __field(u64, xcomp_bv) |
19 | ), | 18 | ), |
20 | 19 | ||
21 | TP_fast_assign( | 20 | TP_fast_assign( |
22 | __entry->fpu = fpu; | 21 | __entry->fpu = fpu; |
23 | __entry->fpregs_active = fpu->fpregs_active; | 22 | __entry->initialized = fpu->initialized; |
24 | __entry->fpstate_active = fpu->fpstate_active; | ||
25 | if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { | 23 | if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { |
26 | __entry->xfeatures = fpu->state.xsave.header.xfeatures; | 24 | __entry->xfeatures = fpu->state.xsave.header.xfeatures; |
27 | __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; | 25 | __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; |
28 | } | 26 | } |
29 | ), | 27 | ), |
30 | TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", | 28 | TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx", |
31 | __entry->fpu, | 29 | __entry->fpu, |
32 | __entry->fpregs_active, | 30 | __entry->initialized, |
33 | __entry->fpstate_active, | ||
34 | __entry->xfeatures, | 31 | __entry->xfeatures, |
35 | __entry->xcomp_bv | 32 | __entry->xcomp_bv |
36 | ) | 33 | ) |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 184eb9894dae..4b892917edeb 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -166,11 +166,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) | |||
166 | ({ \ | 166 | ({ \ |
167 | int __ret_gu; \ | 167 | int __ret_gu; \ |
168 | register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ | 168 | register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ |
169 | register void *__sp asm(_ASM_SP); \ | ||
170 | __chk_user_ptr(ptr); \ | 169 | __chk_user_ptr(ptr); \ |
171 | might_fault(); \ | 170 | might_fault(); \ |
172 | asm volatile("call __get_user_%P4" \ | 171 | asm volatile("call __get_user_%P4" \ |
173 | : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ | 172 | : "=a" (__ret_gu), "=r" (__val_gu), \ |
173 | ASM_CALL_CONSTRAINT \ | ||
174 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ | 174 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ |
175 | (x) = (__force __typeof__(*(ptr))) __val_gu; \ | 175 | (x) = (__force __typeof__(*(ptr))) __val_gu; \ |
176 | __builtin_expect(__ret_gu, 0); \ | 176 | __builtin_expect(__ret_gu, 0); \ |
@@ -337,7 +337,7 @@ do { \ | |||
337 | _ASM_EXTABLE(1b, 4b) \ | 337 | _ASM_EXTABLE(1b, 4b) \ |
338 | _ASM_EXTABLE(2b, 4b) \ | 338 | _ASM_EXTABLE(2b, 4b) \ |
339 | : "=r" (retval), "=&A"(x) \ | 339 | : "=r" (retval), "=&A"(x) \ |
340 | : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ | 340 | : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ |
341 | "i" (errret), "0" (retval)); \ | 341 | "i" (errret), "0" (retval)); \ |
342 | }) | 342 | }) |
343 | 343 | ||
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 9606688caa4b..7cb282e9e587 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -113,10 +113,9 @@ extern struct { char _entry[32]; } hypercall_page[]; | |||
113 | register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ | 113 | register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \ |
114 | register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ | 114 | register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \ |
115 | register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ | 115 | register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \ |
116 | register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \ | 116 | register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; |
117 | register void *__sp asm(_ASM_SP); | ||
118 | 117 | ||
119 | #define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp) | 118 | #define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT |
120 | #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) | 119 | #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1) |
121 | #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) | 120 | #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2) |
122 | #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) | 121 | #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3) |
@@ -552,13 +551,13 @@ static inline void | |||
552 | MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, | 551 | MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, |
553 | struct desc_struct desc) | 552 | struct desc_struct desc) |
554 | { | 553 | { |
555 | u32 *p = (u32 *) &desc; | ||
556 | |||
557 | mcl->op = __HYPERVISOR_update_descriptor; | 554 | mcl->op = __HYPERVISOR_update_descriptor; |
558 | if (sizeof(maddr) == sizeof(long)) { | 555 | if (sizeof(maddr) == sizeof(long)) { |
559 | mcl->args[0] = maddr; | 556 | mcl->args[0] = maddr; |
560 | mcl->args[1] = *(unsigned long *)&desc; | 557 | mcl->args[1] = *(unsigned long *)&desc; |
561 | } else { | 558 | } else { |
559 | u32 *p = (u32 *)&desc; | ||
560 | |||
562 | mcl->args[0] = maddr; | 561 | mcl->args[0] = maddr; |
563 | mcl->args[1] = maddr >> 32; | 562 | mcl->args[1] = maddr >> 32; |
564 | mcl->args[2] = *p++; | 563 | mcl->args[2] = *p++; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9862e2cd6d93..d58184b7cd44 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -763,6 +763,16 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
763 | } | 763 | } |
764 | } | 764 | } |
765 | 765 | ||
766 | static void init_amd_zn(struct cpuinfo_x86 *c) | ||
767 | { | ||
768 | /* | ||
769 | * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects | ||
770 | * all up to and including B1. | ||
771 | */ | ||
772 | if (c->x86_model <= 1 && c->x86_mask <= 1) | ||
773 | set_cpu_cap(c, X86_FEATURE_CPB); | ||
774 | } | ||
775 | |||
766 | static void init_amd(struct cpuinfo_x86 *c) | 776 | static void init_amd(struct cpuinfo_x86 *c) |
767 | { | 777 | { |
768 | early_init_amd(c); | 778 | early_init_amd(c); |
@@ -791,6 +801,7 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
791 | case 0x10: init_amd_gh(c); break; | 801 | case 0x10: init_amd_gh(c); break; |
792 | case 0x12: init_amd_ln(c); break; | 802 | case 0x12: init_amd_ln(c); break; |
793 | case 0x15: init_amd_bd(c); break; | 803 | case 0x15: init_amd_bd(c); break; |
804 | case 0x17: init_amd_zn(c); break; | ||
794 | } | 805 | } |
795 | 806 | ||
796 | /* Enable workaround for FXSAVE leak */ | 807 | /* Enable workaround for FXSAVE leak */ |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index db684880d74a..0af86d9242da 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -21,14 +21,6 @@ | |||
21 | 21 | ||
22 | void __init check_bugs(void) | 22 | void __init check_bugs(void) |
23 | { | 23 | { |
24 | #ifdef CONFIG_X86_32 | ||
25 | /* | ||
26 | * Regardless of whether PCID is enumerated, the SDM says | ||
27 | * that it can't be enabled in 32-bit mode. | ||
28 | */ | ||
29 | setup_clear_cpu_cap(X86_FEATURE_PCID); | ||
30 | #endif | ||
31 | |||
32 | identify_boot_cpu(); | 24 | identify_boot_cpu(); |
33 | 25 | ||
34 | if (!IS_ENABLED(CONFIG_SMP)) { | 26 | if (!IS_ENABLED(CONFIG_SMP)) { |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 775f10100d7f..c9176bae7fd8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -904,6 +904,14 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
904 | 904 | ||
905 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); | 905 | setup_force_cpu_cap(X86_FEATURE_ALWAYS); |
906 | fpu__init_system(c); | 906 | fpu__init_system(c); |
907 | |||
908 | #ifdef CONFIG_X86_32 | ||
909 | /* | ||
910 | * Regardless of whether PCID is enumerated, the SDM says | ||
911 | * that it can't be enabled in 32-bit mode. | ||
912 | */ | ||
913 | setup_clear_cpu_cap(X86_FEATURE_PCID); | ||
914 | #endif | ||
907 | } | 915 | } |
908 | 916 | ||
909 | void __init early_cpu_init(void) | 917 | void __init early_cpu_init(void) |
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e1114f070c2d..f92a6593de1e 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void) | |||
100 | 100 | ||
101 | kernel_fpu_disable(); | 101 | kernel_fpu_disable(); |
102 | 102 | ||
103 | if (fpu->fpregs_active) { | 103 | if (fpu->initialized) { |
104 | /* | 104 | /* |
105 | * Ignore return value -- we don't care if reg state | 105 | * Ignore return value -- we don't care if reg state |
106 | * is clobbered. | 106 | * is clobbered. |
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void) | |||
116 | { | 116 | { |
117 | struct fpu *fpu = ¤t->thread.fpu; | 117 | struct fpu *fpu = ¤t->thread.fpu; |
118 | 118 | ||
119 | if (fpu->fpregs_active) | 119 | if (fpu->initialized) |
120 | copy_kernel_to_fpregs(&fpu->state); | 120 | copy_kernel_to_fpregs(&fpu->state); |
121 | 121 | ||
122 | kernel_fpu_enable(); | 122 | kernel_fpu_enable(); |
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu) | |||
148 | 148 | ||
149 | preempt_disable(); | 149 | preempt_disable(); |
150 | trace_x86_fpu_before_save(fpu); | 150 | trace_x86_fpu_before_save(fpu); |
151 | if (fpu->fpregs_active) { | 151 | if (fpu->initialized) { |
152 | if (!copy_fpregs_to_fpstate(fpu)) { | 152 | if (!copy_fpregs_to_fpstate(fpu)) { |
153 | copy_kernel_to_fpregs(&fpu->state); | 153 | copy_kernel_to_fpregs(&fpu->state); |
154 | } | 154 | } |
@@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init); | |||
189 | 189 | ||
190 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | 190 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
191 | { | 191 | { |
192 | dst_fpu->fpregs_active = 0; | ||
193 | dst_fpu->last_cpu = -1; | 192 | dst_fpu->last_cpu = -1; |
194 | 193 | ||
195 | if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) | 194 | if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) |
196 | return 0; | 195 | return 0; |
197 | 196 | ||
198 | WARN_ON_FPU(src_fpu != ¤t->thread.fpu); | 197 | WARN_ON_FPU(src_fpu != ¤t->thread.fpu); |
@@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | |||
206 | /* | 205 | /* |
207 | * Save current FPU registers directly into the child | 206 | * Save current FPU registers directly into the child |
208 | * FPU context, without any memory-to-memory copying. | 207 | * FPU context, without any memory-to-memory copying. |
209 | * In lazy mode, if the FPU context isn't loaded into | ||
210 | * fpregs, CR0.TS will be set and do_device_not_available | ||
211 | * will load the FPU context. | ||
212 | * | 208 | * |
213 | * We have to do all this with preemption disabled, | 209 | * ( The function 'fails' in the FNSAVE case, which destroys |
214 | * mostly because of the FNSAVE case, because in that | 210 | * register contents so we have to copy them back. ) |
215 | * case we must not allow preemption in the window | ||
216 | * between the FNSAVE and us marking the context lazy. | ||
217 | * | ||
218 | * It shouldn't be an issue as even FNSAVE is plenty | ||
219 | * fast in terms of critical section length. | ||
220 | */ | 211 | */ |
221 | preempt_disable(); | ||
222 | if (!copy_fpregs_to_fpstate(dst_fpu)) { | 212 | if (!copy_fpregs_to_fpstate(dst_fpu)) { |
223 | memcpy(&src_fpu->state, &dst_fpu->state, | 213 | memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size); |
224 | fpu_kernel_xstate_size); | ||
225 | |||
226 | copy_kernel_to_fpregs(&src_fpu->state); | 214 | copy_kernel_to_fpregs(&src_fpu->state); |
227 | } | 215 | } |
228 | preempt_enable(); | ||
229 | 216 | ||
230 | trace_x86_fpu_copy_src(src_fpu); | 217 | trace_x86_fpu_copy_src(src_fpu); |
231 | trace_x86_fpu_copy_dst(dst_fpu); | 218 | trace_x86_fpu_copy_dst(dst_fpu); |
@@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | |||
237 | * Activate the current task's in-memory FPU context, | 224 | * Activate the current task's in-memory FPU context, |
238 | * if it has not been used before: | 225 | * if it has not been used before: |
239 | */ | 226 | */ |
240 | void fpu__activate_curr(struct fpu *fpu) | 227 | void fpu__initialize(struct fpu *fpu) |
241 | { | 228 | { |
242 | WARN_ON_FPU(fpu != ¤t->thread.fpu); | 229 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
243 | 230 | ||
244 | if (!fpu->fpstate_active) { | 231 | if (!fpu->initialized) { |
245 | fpstate_init(&fpu->state); | 232 | fpstate_init(&fpu->state); |
246 | trace_x86_fpu_init_state(fpu); | 233 | trace_x86_fpu_init_state(fpu); |
247 | 234 | ||
248 | trace_x86_fpu_activate_state(fpu); | 235 | trace_x86_fpu_activate_state(fpu); |
249 | /* Safe to do for the current task: */ | 236 | /* Safe to do for the current task: */ |
250 | fpu->fpstate_active = 1; | 237 | fpu->initialized = 1; |
251 | } | 238 | } |
252 | } | 239 | } |
253 | EXPORT_SYMBOL_GPL(fpu__activate_curr); | 240 | EXPORT_SYMBOL_GPL(fpu__initialize); |
254 | 241 | ||
255 | /* | 242 | /* |
256 | * This function must be called before we read a task's fpstate. | 243 | * This function must be called before we read a task's fpstate. |
257 | * | 244 | * |
258 | * If the task has not used the FPU before then initialize its | 245 | * There's two cases where this gets called: |
259 | * fpstate. | 246 | * |
247 | * - for the current task (when coredumping), in which case we have | ||
248 | * to save the latest FPU registers into the fpstate, | ||
249 | * | ||
250 | * - or it's called for stopped tasks (ptrace), in which case the | ||
251 | * registers were already saved by the context-switch code when | ||
252 | * the task scheduled out - we only have to initialize the registers | ||
253 | * if they've never been initialized. | ||
260 | * | 254 | * |
261 | * If the task has used the FPU before then save it. | 255 | * If the task has used the FPU before then save it. |
262 | */ | 256 | */ |
263 | void fpu__activate_fpstate_read(struct fpu *fpu) | 257 | void fpu__prepare_read(struct fpu *fpu) |
264 | { | 258 | { |
265 | /* | 259 | if (fpu == ¤t->thread.fpu) { |
266 | * If fpregs are active (in the current CPU), then | ||
267 | * copy them to the fpstate: | ||
268 | */ | ||
269 | if (fpu->fpregs_active) { | ||
270 | fpu__save(fpu); | 260 | fpu__save(fpu); |
271 | } else { | 261 | } else { |
272 | if (!fpu->fpstate_active) { | 262 | if (!fpu->initialized) { |
273 | fpstate_init(&fpu->state); | 263 | fpstate_init(&fpu->state); |
274 | trace_x86_fpu_init_state(fpu); | 264 | trace_x86_fpu_init_state(fpu); |
275 | 265 | ||
276 | trace_x86_fpu_activate_state(fpu); | 266 | trace_x86_fpu_activate_state(fpu); |
277 | /* Safe to do for current and for stopped child tasks: */ | 267 | /* Safe to do for current and for stopped child tasks: */ |
278 | fpu->fpstate_active = 1; | 268 | fpu->initialized = 1; |
279 | } | 269 | } |
280 | } | 270 | } |
281 | } | 271 | } |
@@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu) | |||
283 | /* | 273 | /* |
284 | * This function must be called before we write a task's fpstate. | 274 | * This function must be called before we write a task's fpstate. |
285 | * | 275 | * |
286 | * If the task has used the FPU before then unlazy it. | 276 | * If the task has used the FPU before then invalidate any cached FPU registers. |
287 | * If the task has not used the FPU before then initialize its fpstate. | 277 | * If the task has not used the FPU before then initialize its fpstate. |
288 | * | 278 | * |
289 | * After this function call, after registers in the fpstate are | 279 | * After this function call, after registers in the fpstate are |
290 | * modified and the child task has woken up, the child task will | 280 | * modified and the child task has woken up, the child task will |
291 | * restore the modified FPU state from the modified context. If we | 281 | * restore the modified FPU state from the modified context. If we |
292 | * didn't clear its lazy status here then the lazy in-registers | 282 | * didn't clear its cached status here then the cached in-registers |
293 | * state pending on its former CPU could be restored, corrupting | 283 | * state pending on its former CPU could be restored, corrupting |
294 | * the modifications. | 284 | * the modifications. |
295 | */ | 285 | */ |
296 | void fpu__activate_fpstate_write(struct fpu *fpu) | 286 | void fpu__prepare_write(struct fpu *fpu) |
297 | { | 287 | { |
298 | /* | 288 | /* |
299 | * Only stopped child tasks can be used to modify the FPU | 289 | * Only stopped child tasks can be used to modify the FPU |
@@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu) | |||
301 | */ | 291 | */ |
302 | WARN_ON_FPU(fpu == ¤t->thread.fpu); | 292 | WARN_ON_FPU(fpu == ¤t->thread.fpu); |
303 | 293 | ||
304 | if (fpu->fpstate_active) { | 294 | if (fpu->initialized) { |
305 | /* Invalidate any lazy state: */ | 295 | /* Invalidate any cached state: */ |
306 | __fpu_invalidate_fpregs_state(fpu); | 296 | __fpu_invalidate_fpregs_state(fpu); |
307 | } else { | 297 | } else { |
308 | fpstate_init(&fpu->state); | 298 | fpstate_init(&fpu->state); |
@@ -310,74 +300,11 @@ void fpu__activate_fpstate_write(struct fpu *fpu) | |||
310 | 300 | ||
311 | trace_x86_fpu_activate_state(fpu); | 301 | trace_x86_fpu_activate_state(fpu); |
312 | /* Safe to do for stopped child tasks: */ | 302 | /* Safe to do for stopped child tasks: */ |
313 | fpu->fpstate_active = 1; | 303 | fpu->initialized = 1; |
314 | } | 304 | } |
315 | } | 305 | } |
316 | 306 | ||
317 | /* | 307 | /* |
318 | * This function must be called before we write the current | ||
319 | * task's fpstate. | ||
320 | * | ||
321 | * This call gets the current FPU register state and moves | ||
322 | * it in to the 'fpstate'. Preemption is disabled so that | ||
323 | * no writes to the 'fpstate' can occur from context | ||
324 | * swiches. | ||
325 | * | ||
326 | * Must be followed by a fpu__current_fpstate_write_end(). | ||
327 | */ | ||
328 | void fpu__current_fpstate_write_begin(void) | ||
329 | { | ||
330 | struct fpu *fpu = ¤t->thread.fpu; | ||
331 | |||
332 | /* | ||
333 | * Ensure that the context-switching code does not write | ||
334 | * over the fpstate while we are doing our update. | ||
335 | */ | ||
336 | preempt_disable(); | ||
337 | |||
338 | /* | ||
339 | * Move the fpregs in to the fpu's 'fpstate'. | ||
340 | */ | ||
341 | fpu__activate_fpstate_read(fpu); | ||
342 | |||
343 | /* | ||
344 | * The caller is about to write to 'fpu'. Ensure that no | ||
345 | * CPU thinks that its fpregs match the fpstate. This | ||
346 | * ensures we will not be lazy and skip a XRSTOR in the | ||
347 | * future. | ||
348 | */ | ||
349 | __fpu_invalidate_fpregs_state(fpu); | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * This function must be paired with fpu__current_fpstate_write_begin() | ||
354 | * | ||
355 | * This will ensure that the modified fpstate gets placed back in | ||
356 | * the fpregs if necessary. | ||
357 | * | ||
358 | * Note: This function may be called whether or not an _actual_ | ||
359 | * write to the fpstate occurred. | ||
360 | */ | ||
361 | void fpu__current_fpstate_write_end(void) | ||
362 | { | ||
363 | struct fpu *fpu = ¤t->thread.fpu; | ||
364 | |||
365 | /* | ||
366 | * 'fpu' now has an updated copy of the state, but the | ||
367 | * registers may still be out of date. Update them with | ||
368 | * an XRSTOR if they are active. | ||
369 | */ | ||
370 | if (fpregs_active()) | ||
371 | copy_kernel_to_fpregs(&fpu->state); | ||
372 | |||
373 | /* | ||
374 | * Our update is done and the fpregs/fpstate are in sync | ||
375 | * if necessary. Context switches can happen again. | ||
376 | */ | ||
377 | preempt_enable(); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * 'fpu__restore()' is called to copy FPU registers from | 308 | * 'fpu__restore()' is called to copy FPU registers from |
382 | * the FPU fpstate to the live hw registers and to activate | 309 | * the FPU fpstate to the live hw registers and to activate |
383 | * access to the hardware registers, so that FPU instructions | 310 | * access to the hardware registers, so that FPU instructions |
@@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void) | |||
389 | */ | 316 | */ |
390 | void fpu__restore(struct fpu *fpu) | 317 | void fpu__restore(struct fpu *fpu) |
391 | { | 318 | { |
392 | fpu__activate_curr(fpu); | 319 | fpu__initialize(fpu); |
393 | 320 | ||
394 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ | 321 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
395 | kernel_fpu_disable(); | 322 | kernel_fpu_disable(); |
@@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu) | |||
414 | { | 341 | { |
415 | preempt_disable(); | 342 | preempt_disable(); |
416 | 343 | ||
417 | if (fpu->fpregs_active) { | 344 | if (fpu == ¤t->thread.fpu) { |
418 | /* Ignore delayed exceptions from user space */ | 345 | if (fpu->initialized) { |
419 | asm volatile("1: fwait\n" | 346 | /* Ignore delayed exceptions from user space */ |
420 | "2:\n" | 347 | asm volatile("1: fwait\n" |
421 | _ASM_EXTABLE(1b, 2b)); | 348 | "2:\n" |
422 | fpregs_deactivate(fpu); | 349 | _ASM_EXTABLE(1b, 2b)); |
350 | fpregs_deactivate(fpu); | ||
351 | } | ||
423 | } | 352 | } |
424 | 353 | ||
425 | fpu->fpstate_active = 0; | 354 | fpu->initialized = 0; |
426 | 355 | ||
427 | trace_x86_fpu_dropped(fpu); | 356 | trace_x86_fpu_dropped(fpu); |
428 | 357 | ||
@@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu) | |||
462 | * Make sure fpstate is cleared and initialized. | 391 | * Make sure fpstate is cleared and initialized. |
463 | */ | 392 | */ |
464 | if (static_cpu_has(X86_FEATURE_FPU)) { | 393 | if (static_cpu_has(X86_FEATURE_FPU)) { |
465 | fpu__activate_curr(fpu); | 394 | preempt_disable(); |
395 | fpu__initialize(fpu); | ||
466 | user_fpu_begin(); | 396 | user_fpu_begin(); |
467 | copy_init_fpstate_to_fpregs(); | 397 | copy_init_fpstate_to_fpregs(); |
398 | preempt_enable(); | ||
468 | } | 399 | } |
469 | } | 400 | } |
470 | 401 | ||
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index d5d44c452624..7affb7e3d9a5 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c | |||
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void) | |||
240 | WARN_ON_FPU(!on_boot_cpu); | 240 | WARN_ON_FPU(!on_boot_cpu); |
241 | on_boot_cpu = 0; | 241 | on_boot_cpu = 0; |
242 | 242 | ||
243 | WARN_ON_FPU(current->thread.fpu.fpstate_active); | 243 | WARN_ON_FPU(current->thread.fpu.initialized); |
244 | } | 244 | } |
245 | 245 | ||
246 | /* | 246 | /* |
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index b188b16841e3..3ea151372389 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c | |||
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r | |||
16 | { | 16 | { |
17 | struct fpu *target_fpu = &target->thread.fpu; | 17 | struct fpu *target_fpu = &target->thread.fpu; |
18 | 18 | ||
19 | return target_fpu->fpstate_active ? regset->n : 0; | 19 | return target_fpu->initialized ? regset->n : 0; |
20 | } | 20 | } |
21 | 21 | ||
22 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) | 22 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) |
23 | { | 23 | { |
24 | struct fpu *target_fpu = &target->thread.fpu; | 24 | struct fpu *target_fpu = &target->thread.fpu; |
25 | 25 | ||
26 | if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) | 26 | if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized) |
27 | return regset->n; | 27 | return regset->n; |
28 | else | 28 | else |
29 | return 0; | 29 | return 0; |
@@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
38 | if (!boot_cpu_has(X86_FEATURE_FXSR)) | 38 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
39 | return -ENODEV; | 39 | return -ENODEV; |
40 | 40 | ||
41 | fpu__activate_fpstate_read(fpu); | 41 | fpu__prepare_read(fpu); |
42 | fpstate_sanitize_xstate(fpu); | 42 | fpstate_sanitize_xstate(fpu); |
43 | 43 | ||
44 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 44 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
@@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
55 | if (!boot_cpu_has(X86_FEATURE_FXSR)) | 55 | if (!boot_cpu_has(X86_FEATURE_FXSR)) |
56 | return -ENODEV; | 56 | return -ENODEV; |
57 | 57 | ||
58 | fpu__activate_fpstate_write(fpu); | 58 | fpu__prepare_write(fpu); |
59 | fpstate_sanitize_xstate(fpu); | 59 | fpstate_sanitize_xstate(fpu); |
60 | 60 | ||
61 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 61 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
@@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | |||
89 | 89 | ||
90 | xsave = &fpu->state.xsave; | 90 | xsave = &fpu->state.xsave; |
91 | 91 | ||
92 | fpu__activate_fpstate_read(fpu); | 92 | fpu__prepare_read(fpu); |
93 | 93 | ||
94 | if (using_compacted_format()) { | 94 | if (using_compacted_format()) { |
95 | ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave); | 95 | if (kbuf) |
96 | ret = copy_xstate_to_kernel(kbuf, xsave, pos, count); | ||
97 | else | ||
98 | ret = copy_xstate_to_user(ubuf, xsave, pos, count); | ||
96 | } else { | 99 | } else { |
97 | fpstate_sanitize_xstate(fpu); | 100 | fpstate_sanitize_xstate(fpu); |
98 | /* | 101 | /* |
@@ -129,28 +132,29 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | |||
129 | 132 | ||
130 | xsave = &fpu->state.xsave; | 133 | xsave = &fpu->state.xsave; |
131 | 134 | ||
132 | fpu__activate_fpstate_write(fpu); | 135 | fpu__prepare_write(fpu); |
133 | 136 | ||
134 | if (boot_cpu_has(X86_FEATURE_XSAVES)) | 137 | if (using_compacted_format()) { |
135 | ret = copyin_to_xsaves(kbuf, ubuf, xsave); | 138 | if (kbuf) |
136 | else | 139 | ret = copy_kernel_to_xstate(xsave, kbuf); |
140 | else | ||
141 | ret = copy_user_to_xstate(xsave, ubuf); | ||
142 | } else { | ||
137 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); | 143 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); |
138 | 144 | if (!ret) | |
139 | /* | 145 | ret = validate_xstate_header(&xsave->header); |
140 | * In case of failure, mark all states as init: | 146 | } |
141 | */ | ||
142 | if (ret) | ||
143 | fpstate_init(&fpu->state); | ||
144 | 147 | ||
145 | /* | 148 | /* |
146 | * mxcsr reserved bits must be masked to zero for security reasons. | 149 | * mxcsr reserved bits must be masked to zero for security reasons. |
147 | */ | 150 | */ |
148 | xsave->i387.mxcsr &= mxcsr_feature_mask; | 151 | xsave->i387.mxcsr &= mxcsr_feature_mask; |
149 | xsave->header.xfeatures &= xfeatures_mask; | 152 | |
150 | /* | 153 | /* |
151 | * These bits must be zero. | 154 | * In case of failure, mark all states as init: |
152 | */ | 155 | */ |
153 | memset(&xsave->header.reserved, 0, 48); | 156 | if (ret) |
157 | fpstate_init(&fpu->state); | ||
154 | 158 | ||
155 | return ret; | 159 | return ret; |
156 | } | 160 | } |
@@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, | |||
299 | struct fpu *fpu = &target->thread.fpu; | 303 | struct fpu *fpu = &target->thread.fpu; |
300 | struct user_i387_ia32_struct env; | 304 | struct user_i387_ia32_struct env; |
301 | 305 | ||
302 | fpu__activate_fpstate_read(fpu); | 306 | fpu__prepare_read(fpu); |
303 | 307 | ||
304 | if (!boot_cpu_has(X86_FEATURE_FPU)) | 308 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
305 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); | 309 | return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); |
@@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, | |||
329 | struct user_i387_ia32_struct env; | 333 | struct user_i387_ia32_struct env; |
330 | int ret; | 334 | int ret; |
331 | 335 | ||
332 | fpu__activate_fpstate_write(fpu); | 336 | fpu__prepare_write(fpu); |
333 | fpstate_sanitize_xstate(fpu); | 337 | fpstate_sanitize_xstate(fpu); |
334 | 338 | ||
335 | if (!boot_cpu_has(X86_FEATURE_FPU)) | 339 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
@@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) | |||
369 | struct fpu *fpu = &tsk->thread.fpu; | 373 | struct fpu *fpu = &tsk->thread.fpu; |
370 | int fpvalid; | 374 | int fpvalid; |
371 | 375 | ||
372 | fpvalid = fpu->fpstate_active; | 376 | fpvalid = fpu->initialized; |
373 | if (fpvalid) | 377 | if (fpvalid) |
374 | fpvalid = !fpregs_get(tsk, NULL, | 378 | fpvalid = !fpregs_get(tsk, NULL, |
375 | 0, sizeof(struct user_i387_ia32_struct), | 379 | 0, sizeof(struct user_i387_ia32_struct), |
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 83c23c230b4c..fb639e70048f 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) | |||
155 | */ | 155 | */ |
156 | int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) | 156 | int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) |
157 | { | 157 | { |
158 | struct xregs_state *xsave = ¤t->thread.fpu.state.xsave; | 158 | struct fpu *fpu = ¤t->thread.fpu; |
159 | struct xregs_state *xsave = &fpu->state.xsave; | ||
159 | struct task_struct *tsk = current; | 160 | struct task_struct *tsk = current; |
160 | int ia32_fxstate = (buf != buf_fx); | 161 | int ia32_fxstate = (buf != buf_fx); |
161 | 162 | ||
@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) | |||
170 | sizeof(struct user_i387_ia32_struct), NULL, | 171 | sizeof(struct user_i387_ia32_struct), NULL, |
171 | (struct _fpstate_32 __user *) buf) ? -1 : 1; | 172 | (struct _fpstate_32 __user *) buf) ? -1 : 1; |
172 | 173 | ||
173 | if (fpregs_active() || using_compacted_format()) { | 174 | if (fpu->initialized || using_compacted_format()) { |
174 | /* Save the live register state to the user directly. */ | 175 | /* Save the live register state to the user directly. */ |
175 | if (copy_fpregs_to_sigframe(buf_fx)) | 176 | if (copy_fpregs_to_sigframe(buf_fx)) |
176 | return -1; | 177 | return -1; |
177 | /* Update the thread's fxstate to save the fsave header. */ | 178 | /* Update the thread's fxstate to save the fsave header. */ |
178 | if (ia32_fxstate) | 179 | if (ia32_fxstate) |
179 | copy_fxregs_to_kernel(&tsk->thread.fpu); | 180 | copy_fxregs_to_kernel(fpu); |
180 | } else { | 181 | } else { |
181 | /* | 182 | /* |
182 | * It is a *bug* if kernel uses compacted-format for xsave | 183 | * It is a *bug* if kernel uses compacted-format for xsave |
@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) | |||
189 | return -1; | 190 | return -1; |
190 | } | 191 | } |
191 | 192 | ||
192 | fpstate_sanitize_xstate(&tsk->thread.fpu); | 193 | fpstate_sanitize_xstate(fpu); |
193 | if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) | 194 | if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) |
194 | return -1; | 195 | return -1; |
195 | } | 196 | } |
@@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk, | |||
213 | struct xstate_header *header = &xsave->header; | 214 | struct xstate_header *header = &xsave->header; |
214 | 215 | ||
215 | if (use_xsave()) { | 216 | if (use_xsave()) { |
216 | /* These bits must be zero. */ | 217 | /* |
217 | memset(header->reserved, 0, 48); | 218 | * Note: we don't need to zero the reserved bits in the |
219 | * xstate_header here because we either didn't copy them at all, | ||
220 | * or we checked earlier that they aren't set. | ||
221 | */ | ||
218 | 222 | ||
219 | /* | 223 | /* |
220 | * Init the state that is not present in the memory | 224 | * Init the state that is not present in the memory |
@@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk, | |||
223 | if (fx_only) | 227 | if (fx_only) |
224 | header->xfeatures = XFEATURE_MASK_FPSSE; | 228 | header->xfeatures = XFEATURE_MASK_FPSSE; |
225 | else | 229 | else |
226 | header->xfeatures &= (xfeatures_mask & xfeatures); | 230 | header->xfeatures &= xfeatures; |
227 | } | 231 | } |
228 | 232 | ||
229 | if (use_fxsr()) { | 233 | if (use_fxsr()) { |
@@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
279 | if (!access_ok(VERIFY_READ, buf, size)) | 283 | if (!access_ok(VERIFY_READ, buf, size)) |
280 | return -EACCES; | 284 | return -EACCES; |
281 | 285 | ||
282 | fpu__activate_curr(fpu); | 286 | fpu__initialize(fpu); |
283 | 287 | ||
284 | if (!static_cpu_has(X86_FEATURE_FPU)) | 288 | if (!static_cpu_has(X86_FEATURE_FPU)) |
285 | return fpregs_soft_set(current, NULL, | 289 | return fpregs_soft_set(current, NULL, |
@@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
307 | /* | 311 | /* |
308 | * For 32-bit frames with fxstate, copy the user state to the | 312 | * For 32-bit frames with fxstate, copy the user state to the |
309 | * thread's fpu state, reconstruct fxstate from the fsave | 313 | * thread's fpu state, reconstruct fxstate from the fsave |
310 | * header. Sanitize the copied state etc. | 314 | * header. Validate and sanitize the copied state. |
311 | */ | 315 | */ |
312 | struct fpu *fpu = &tsk->thread.fpu; | 316 | struct fpu *fpu = &tsk->thread.fpu; |
313 | struct user_i387_ia32_struct env; | 317 | struct user_i387_ia32_struct env; |
314 | int err = 0; | 318 | int err = 0; |
315 | 319 | ||
316 | /* | 320 | /* |
317 | * Drop the current fpu which clears fpu->fpstate_active. This ensures | 321 | * Drop the current fpu which clears fpu->initialized. This ensures |
318 | * that any context-switch during the copy of the new state, | 322 | * that any context-switch during the copy of the new state, |
319 | * avoids the intermediate state from getting restored/saved. | 323 | * avoids the intermediate state from getting restored/saved. |
320 | * Thus avoiding the new restored state from getting corrupted. | 324 | * Thus avoiding the new restored state from getting corrupted. |
321 | * We will be ready to restore/save the state only after | 325 | * We will be ready to restore/save the state only after |
322 | * fpu->fpstate_active is again set. | 326 | * fpu->initialized is again set. |
323 | */ | 327 | */ |
324 | fpu__drop(fpu); | 328 | fpu__drop(fpu); |
325 | 329 | ||
326 | if (using_compacted_format()) { | 330 | if (using_compacted_format()) { |
327 | err = copyin_to_xsaves(NULL, buf_fx, | 331 | err = copy_user_to_xstate(&fpu->state.xsave, buf_fx); |
328 | &fpu->state.xsave); | ||
329 | } else { | 332 | } else { |
330 | err = __copy_from_user(&fpu->state.xsave, | 333 | err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size); |
331 | buf_fx, state_size); | 334 | |
335 | if (!err && state_size > offsetof(struct xregs_state, header)) | ||
336 | err = validate_xstate_header(&fpu->state.xsave.header); | ||
332 | } | 337 | } |
333 | 338 | ||
334 | if (err || __copy_from_user(&env, buf, sizeof(env))) { | 339 | if (err || __copy_from_user(&env, buf, sizeof(env))) { |
@@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
339 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); | 344 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); |
340 | } | 345 | } |
341 | 346 | ||
342 | fpu->fpstate_active = 1; | 347 | fpu->initialized = 1; |
343 | preempt_disable(); | 348 | preempt_disable(); |
344 | fpu__restore(fpu); | 349 | fpu__restore(fpu); |
345 | preempt_enable(); | 350 | preempt_enable(); |
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index c24ac1efb12d..f1d5476c9022 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c | |||
@@ -483,6 +483,30 @@ int using_compacted_format(void) | |||
483 | return boot_cpu_has(X86_FEATURE_XSAVES); | 483 | return boot_cpu_has(X86_FEATURE_XSAVES); |
484 | } | 484 | } |
485 | 485 | ||
486 | /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ | ||
487 | int validate_xstate_header(const struct xstate_header *hdr) | ||
488 | { | ||
489 | /* No unknown or supervisor features may be set */ | ||
490 | if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR)) | ||
491 | return -EINVAL; | ||
492 | |||
493 | /* Userspace must use the uncompacted format */ | ||
494 | if (hdr->xcomp_bv) | ||
495 | return -EINVAL; | ||
496 | |||
497 | /* | ||
498 | * If 'reserved' is shrunken to add a new field, make sure to validate | ||
499 | * that new field here! | ||
500 | */ | ||
501 | BUILD_BUG_ON(sizeof(hdr->reserved) != 48); | ||
502 | |||
503 | /* No reserved bits may be set */ | ||
504 | if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) | ||
505 | return -EINVAL; | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
486 | static void __xstate_dump_leaves(void) | 510 | static void __xstate_dump_leaves(void) |
487 | { | 511 | { |
488 | int i; | 512 | int i; |
@@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state) | |||
867 | { | 891 | { |
868 | struct fpu *fpu = ¤t->thread.fpu; | 892 | struct fpu *fpu = ¤t->thread.fpu; |
869 | 893 | ||
870 | if (!fpu->fpstate_active) | 894 | if (!fpu->initialized) |
871 | return NULL; | 895 | return NULL; |
872 | /* | 896 | /* |
873 | * fpu__save() takes the CPU's xstate registers | 897 | * fpu__save() takes the CPU's xstate registers |
@@ -921,38 +945,129 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, | |||
921 | #endif /* ! CONFIG_ARCH_HAS_PKEYS */ | 945 | #endif /* ! CONFIG_ARCH_HAS_PKEYS */ |
922 | 946 | ||
923 | /* | 947 | /* |
948 | * Weird legacy quirk: SSE and YMM states store information in the | ||
949 | * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP | ||
950 | * area is marked as unused in the xfeatures header, we need to copy | ||
951 | * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use. | ||
952 | */ | ||
953 | static inline bool xfeatures_mxcsr_quirk(u64 xfeatures) | ||
954 | { | ||
955 | if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM))) | ||
956 | return false; | ||
957 | |||
958 | if (xfeatures & XFEATURE_MASK_FP) | ||
959 | return false; | ||
960 | |||
961 | return true; | ||
962 | } | ||
963 | |||
964 | /* | ||
924 | * This is similar to user_regset_copyout(), but will not add offset to | 965 | * This is similar to user_regset_copyout(), but will not add offset to |
925 | * the source data pointer or increment pos, count, kbuf, and ubuf. | 966 | * the source data pointer or increment pos, count, kbuf, and ubuf. |
926 | */ | 967 | */ |
927 | static inline int xstate_copyout(unsigned int pos, unsigned int count, | 968 | static inline void |
928 | void *kbuf, void __user *ubuf, | 969 | __copy_xstate_to_kernel(void *kbuf, const void *data, |
929 | const void *data, const int start_pos, | 970 | unsigned int offset, unsigned int size, unsigned int size_total) |
930 | const int end_pos) | ||
931 | { | 971 | { |
932 | if ((count == 0) || (pos < start_pos)) | 972 | if (offset < size_total) { |
933 | return 0; | 973 | unsigned int copy = min(size, size_total - offset); |
934 | 974 | ||
935 | if (end_pos < 0 || pos < end_pos) { | 975 | memcpy(kbuf + offset, data, copy); |
936 | unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); | 976 | } |
977 | } | ||
937 | 978 | ||
938 | if (kbuf) { | 979 | /* |
939 | memcpy(kbuf + pos, data, copy); | 980 | * Convert from kernel XSAVES compacted format to standard format and copy |
940 | } else { | 981 | * to a kernel-space ptrace buffer. |
941 | if (__copy_to_user(ubuf + pos, data, copy)) | 982 | * |
942 | return -EFAULT; | 983 | * It supports partial copy but pos always starts from zero. This is called |
984 | * from xstateregs_get() and there we check the CPU has XSAVES. | ||
985 | */ | ||
986 | int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) | ||
987 | { | ||
988 | unsigned int offset, size; | ||
989 | struct xstate_header header; | ||
990 | int i; | ||
991 | |||
992 | /* | ||
993 | * Currently copy_regset_to_user() starts from pos 0: | ||
994 | */ | ||
995 | if (unlikely(offset_start != 0)) | ||
996 | return -EFAULT; | ||
997 | |||
998 | /* | ||
999 | * The destination is a ptrace buffer; we put in only user xstates: | ||
1000 | */ | ||
1001 | memset(&header, 0, sizeof(header)); | ||
1002 | header.xfeatures = xsave->header.xfeatures; | ||
1003 | header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR; | ||
1004 | |||
1005 | /* | ||
1006 | * Copy xregs_state->header: | ||
1007 | */ | ||
1008 | offset = offsetof(struct xregs_state, header); | ||
1009 | size = sizeof(header); | ||
1010 | |||
1011 | __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total); | ||
1012 | |||
1013 | for (i = 0; i < XFEATURE_MAX; i++) { | ||
1014 | /* | ||
1015 | * Copy only in-use xstates: | ||
1016 | */ | ||
1017 | if ((header.xfeatures >> i) & 1) { | ||
1018 | void *src = __raw_xsave_addr(xsave, 1 << i); | ||
1019 | |||
1020 | offset = xstate_offsets[i]; | ||
1021 | size = xstate_sizes[i]; | ||
1022 | |||
1023 | /* The next component has to fit fully into the output buffer: */ | ||
1024 | if (offset + size > size_total) | ||
1025 | break; | ||
1026 | |||
1027 | __copy_xstate_to_kernel(kbuf, src, offset, size, size_total); | ||
943 | } | 1028 | } |
1029 | |||
1030 | } | ||
1031 | |||
1032 | if (xfeatures_mxcsr_quirk(header.xfeatures)) { | ||
1033 | offset = offsetof(struct fxregs_state, mxcsr); | ||
1034 | size = MXCSR_AND_FLAGS_SIZE; | ||
1035 | __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total); | ||
1036 | } | ||
1037 | |||
1038 | /* | ||
1039 | * Fill xsave->i387.sw_reserved value for ptrace frame: | ||
1040 | */ | ||
1041 | offset = offsetof(struct fxregs_state, sw_reserved); | ||
1042 | size = sizeof(xstate_fx_sw_bytes); | ||
1043 | |||
1044 | __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total); | ||
1045 | |||
1046 | return 0; | ||
1047 | } | ||
1048 | |||
1049 | static inline int | ||
1050 | __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total) | ||
1051 | { | ||
1052 | if (!size) | ||
1053 | return 0; | ||
1054 | |||
1055 | if (offset < size_total) { | ||
1056 | unsigned int copy = min(size, size_total - offset); | ||
1057 | |||
1058 | if (__copy_to_user(ubuf + offset, data, copy)) | ||
1059 | return -EFAULT; | ||
944 | } | 1060 | } |
945 | return 0; | 1061 | return 0; |
946 | } | 1062 | } |
947 | 1063 | ||
948 | /* | 1064 | /* |
949 | * Convert from kernel XSAVES compacted format to standard format and copy | 1065 | * Convert from kernel XSAVES compacted format to standard format and copy |
950 | * to a ptrace buffer. It supports partial copy but pos always starts from | 1066 | * to a user-space buffer. It supports partial copy but pos always starts from |
951 | * zero. This is called from xstateregs_get() and there we check the CPU | 1067 | * zero. This is called from xstateregs_get() and there we check the CPU |
952 | * has XSAVES. | 1068 | * has XSAVES. |
953 | */ | 1069 | */ |
954 | int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | 1070 | int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total) |
955 | void __user *ubuf, struct xregs_state *xsave) | ||
956 | { | 1071 | { |
957 | unsigned int offset, size; | 1072 | unsigned int offset, size; |
958 | int ret, i; | 1073 | int ret, i; |
@@ -961,7 +1076,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | |||
961 | /* | 1076 | /* |
962 | * Currently copy_regset_to_user() starts from pos 0: | 1077 | * Currently copy_regset_to_user() starts from pos 0: |
963 | */ | 1078 | */ |
964 | if (unlikely(pos != 0)) | 1079 | if (unlikely(offset_start != 0)) |
965 | return -EFAULT; | 1080 | return -EFAULT; |
966 | 1081 | ||
967 | /* | 1082 | /* |
@@ -977,8 +1092,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | |||
977 | offset = offsetof(struct xregs_state, header); | 1092 | offset = offsetof(struct xregs_state, header); |
978 | size = sizeof(header); | 1093 | size = sizeof(header); |
979 | 1094 | ||
980 | ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count); | 1095 | ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total); |
981 | |||
982 | if (ret) | 1096 | if (ret) |
983 | return ret; | 1097 | return ret; |
984 | 1098 | ||
@@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | |||
992 | offset = xstate_offsets[i]; | 1106 | offset = xstate_offsets[i]; |
993 | size = xstate_sizes[i]; | 1107 | size = xstate_sizes[i]; |
994 | 1108 | ||
995 | ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count); | 1109 | /* The next component has to fit fully into the output buffer: */ |
1110 | if (offset + size > size_total) | ||
1111 | break; | ||
996 | 1112 | ||
1113 | ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total); | ||
997 | if (ret) | 1114 | if (ret) |
998 | return ret; | 1115 | return ret; |
999 | |||
1000 | if (offset + size >= count) | ||
1001 | break; | ||
1002 | } | 1116 | } |
1003 | 1117 | ||
1004 | } | 1118 | } |
1005 | 1119 | ||
1120 | if (xfeatures_mxcsr_quirk(header.xfeatures)) { | ||
1121 | offset = offsetof(struct fxregs_state, mxcsr); | ||
1122 | size = MXCSR_AND_FLAGS_SIZE; | ||
1123 | __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total); | ||
1124 | } | ||
1125 | |||
1006 | /* | 1126 | /* |
1007 | * Fill xsave->i387.sw_reserved value for ptrace frame: | 1127 | * Fill xsave->i387.sw_reserved value for ptrace frame: |
1008 | */ | 1128 | */ |
1009 | offset = offsetof(struct fxregs_state, sw_reserved); | 1129 | offset = offsetof(struct fxregs_state, sw_reserved); |
1010 | size = sizeof(xstate_fx_sw_bytes); | 1130 | size = sizeof(xstate_fx_sw_bytes); |
1011 | 1131 | ||
1012 | ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); | 1132 | ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total); |
1013 | |||
1014 | if (ret) | 1133 | if (ret) |
1015 | return ret; | 1134 | return ret; |
1016 | 1135 | ||
@@ -1018,55 +1137,98 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, | |||
1018 | } | 1137 | } |
1019 | 1138 | ||
1020 | /* | 1139 | /* |
1021 | * Convert from a ptrace standard-format buffer to kernel XSAVES format | 1140 | * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format |
1022 | * and copy to the target thread. This is called from xstateregs_set() and | 1141 | * and copy to the target thread. This is called from xstateregs_set(). |
1023 | * there we check the CPU has XSAVES and a whole standard-sized buffer | ||
1024 | * exists. | ||
1025 | */ | 1142 | */ |
1026 | int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, | 1143 | int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf) |
1027 | struct xregs_state *xsave) | ||
1028 | { | 1144 | { |
1029 | unsigned int offset, size; | 1145 | unsigned int offset, size; |
1030 | int i; | 1146 | int i; |
1031 | u64 xfeatures; | 1147 | struct xstate_header hdr; |
1032 | u64 allowed_features; | ||
1033 | 1148 | ||
1034 | offset = offsetof(struct xregs_state, header); | 1149 | offset = offsetof(struct xregs_state, header); |
1035 | size = sizeof(xfeatures); | 1150 | size = sizeof(hdr); |
1036 | 1151 | ||
1037 | if (kbuf) { | 1152 | memcpy(&hdr, kbuf + offset, size); |
1038 | memcpy(&xfeatures, kbuf + offset, size); | 1153 | |
1039 | } else { | 1154 | if (validate_xstate_header(&hdr)) |
1040 | if (__copy_from_user(&xfeatures, ubuf + offset, size)) | 1155 | return -EINVAL; |
1041 | return -EFAULT; | 1156 | |
1157 | for (i = 0; i < XFEATURE_MAX; i++) { | ||
1158 | u64 mask = ((u64)1 << i); | ||
1159 | |||
1160 | if (hdr.xfeatures & mask) { | ||
1161 | void *dst = __raw_xsave_addr(xsave, 1 << i); | ||
1162 | |||
1163 | offset = xstate_offsets[i]; | ||
1164 | size = xstate_sizes[i]; | ||
1165 | |||
1166 | memcpy(dst, kbuf + offset, size); | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { | ||
1171 | offset = offsetof(struct fxregs_state, mxcsr); | ||
1172 | size = MXCSR_AND_FLAGS_SIZE; | ||
1173 | memcpy(&xsave->i387.mxcsr, kbuf + offset, size); | ||
1042 | } | 1174 | } |
1043 | 1175 | ||
1044 | /* | 1176 | /* |
1045 | * Reject if the user sets any disabled or supervisor features: | 1177 | * The state that came in from userspace was user-state only. |
1178 | * Mask all the user states out of 'xfeatures': | ||
1179 | */ | ||
1180 | xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR; | ||
1181 | |||
1182 | /* | ||
1183 | * Add back in the features that came in from userspace: | ||
1046 | */ | 1184 | */ |
1047 | allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR; | 1185 | xsave->header.xfeatures |= hdr.xfeatures; |
1048 | 1186 | ||
1049 | if (xfeatures & ~allowed_features) | 1187 | return 0; |
1188 | } | ||
1189 | |||
1190 | /* | ||
1191 | * Convert from a ptrace or sigreturn standard-format user-space buffer to | ||
1192 | * kernel XSAVES format and copy to the target thread. This is called from | ||
1193 | * xstateregs_set(), as well as potentially from the sigreturn() and | ||
1194 | * rt_sigreturn() system calls. | ||
1195 | */ | ||
1196 | int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf) | ||
1197 | { | ||
1198 | unsigned int offset, size; | ||
1199 | int i; | ||
1200 | struct xstate_header hdr; | ||
1201 | |||
1202 | offset = offsetof(struct xregs_state, header); | ||
1203 | size = sizeof(hdr); | ||
1204 | |||
1205 | if (__copy_from_user(&hdr, ubuf + offset, size)) | ||
1206 | return -EFAULT; | ||
1207 | |||
1208 | if (validate_xstate_header(&hdr)) | ||
1050 | return -EINVAL; | 1209 | return -EINVAL; |
1051 | 1210 | ||
1052 | for (i = 0; i < XFEATURE_MAX; i++) { | 1211 | for (i = 0; i < XFEATURE_MAX; i++) { |
1053 | u64 mask = ((u64)1 << i); | 1212 | u64 mask = ((u64)1 << i); |
1054 | 1213 | ||
1055 | if (xfeatures & mask) { | 1214 | if (hdr.xfeatures & mask) { |
1056 | void *dst = __raw_xsave_addr(xsave, 1 << i); | 1215 | void *dst = __raw_xsave_addr(xsave, 1 << i); |
1057 | 1216 | ||
1058 | offset = xstate_offsets[i]; | 1217 | offset = xstate_offsets[i]; |
1059 | size = xstate_sizes[i]; | 1218 | size = xstate_sizes[i]; |
1060 | 1219 | ||
1061 | if (kbuf) { | 1220 | if (__copy_from_user(dst, ubuf + offset, size)) |
1062 | memcpy(dst, kbuf + offset, size); | 1221 | return -EFAULT; |
1063 | } else { | ||
1064 | if (__copy_from_user(dst, ubuf + offset, size)) | ||
1065 | return -EFAULT; | ||
1066 | } | ||
1067 | } | 1222 | } |
1068 | } | 1223 | } |
1069 | 1224 | ||
1225 | if (xfeatures_mxcsr_quirk(hdr.xfeatures)) { | ||
1226 | offset = offsetof(struct fxregs_state, mxcsr); | ||
1227 | size = MXCSR_AND_FLAGS_SIZE; | ||
1228 | if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size)) | ||
1229 | return -EFAULT; | ||
1230 | } | ||
1231 | |||
1070 | /* | 1232 | /* |
1071 | * The state that came in from userspace was user-state only. | 1233 | * The state that came in from userspace was user-state only. |
1072 | * Mask all the user states out of 'xfeatures': | 1234 | * Mask all the user states out of 'xfeatures': |
@@ -1076,7 +1238,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, | |||
1076 | /* | 1238 | /* |
1077 | * Add back in the features that came in from userspace: | 1239 | * Add back in the features that came in from userspace: |
1078 | */ | 1240 | */ |
1079 | xsave->header.xfeatures |= xfeatures; | 1241 | xsave->header.xfeatures |= hdr.xfeatures; |
1080 | 1242 | ||
1081 | return 0; | 1243 | return 0; |
1082 | } | 1244 | } |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1f38d9a4d9de..d4eb450144fd 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack) | |||
64 | 64 | ||
65 | static inline void *current_stack(void) | 65 | static inline void *current_stack(void) |
66 | { | 66 | { |
67 | return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); | 67 | return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) | 70 | static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) |
@@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) | |||
88 | 88 | ||
89 | /* Save the next esp at the bottom of the stack */ | 89 | /* Save the next esp at the bottom of the stack */ |
90 | prev_esp = (u32 *)irqstk; | 90 | prev_esp = (u32 *)irqstk; |
91 | *prev_esp = current_stack_pointer(); | 91 | *prev_esp = current_stack_pointer; |
92 | 92 | ||
93 | if (unlikely(overflow)) | 93 | if (unlikely(overflow)) |
94 | call_on_stack(print_stack_overflow, isp); | 94 | call_on_stack(print_stack_overflow, isp); |
@@ -139,7 +139,7 @@ void do_softirq_own_stack(void) | |||
139 | 139 | ||
140 | /* Push the previous esp onto the stack */ | 140 | /* Push the previous esp onto the stack */ |
141 | prev_esp = (u32 *)irqstk; | 141 | prev_esp = (u32 *)irqstk; |
142 | *prev_esp = current_stack_pointer(); | 142 | *prev_esp = current_stack_pointer; |
143 | 143 | ||
144 | call_on_stack(__do_softirq, isp); | 144 | call_on_stack(__do_softirq, isp); |
145 | } | 145 | } |
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 4b0592ca9e47..8c1cc08f514f 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c | |||
@@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent) | |||
299 | return 0; | 299 | return 0; |
300 | 300 | ||
301 | out_clean_nodes: | 301 | out_clean_nodes: |
302 | for (j = i - 1; j > 0; j--) | 302 | for (j = i - 1; j >= 0; j--) |
303 | cleanup_setup_data_node(*(kobjp + j)); | 303 | cleanup_setup_data_node(*(kobjp + j)); |
304 | kfree(kobjp); | 304 | kfree(kobjp); |
305 | out_setup_data_kobj: | 305 | out_setup_data_kobj: |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index aa60a08b65b1..8bb9594d0761 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, | |||
117 | return NULL; | 117 | return NULL; |
118 | } | 118 | } |
119 | 119 | ||
120 | void kvm_async_pf_task_wait(u32 token) | 120 | /* |
121 | * @interrupt_kernel: Is this called from a routine which interrupts the kernel | ||
122 | * (other than user space)? | ||
123 | */ | ||
124 | void kvm_async_pf_task_wait(u32 token, int interrupt_kernel) | ||
121 | { | 125 | { |
122 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | 126 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
123 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | 127 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
@@ -140,7 +144,10 @@ void kvm_async_pf_task_wait(u32 token) | |||
140 | 144 | ||
141 | n.token = token; | 145 | n.token = token; |
142 | n.cpu = smp_processor_id(); | 146 | n.cpu = smp_processor_id(); |
143 | n.halted = is_idle_task(current) || preempt_count() > 1; | 147 | n.halted = is_idle_task(current) || |
148 | (IS_ENABLED(CONFIG_PREEMPT_COUNT) | ||
149 | ? preempt_count() > 1 || rcu_preempt_depth() | ||
150 | : interrupt_kernel); | ||
144 | init_swait_queue_head(&n.wq); | 151 | init_swait_queue_head(&n.wq); |
145 | hlist_add_head(&n.link, &b->list); | 152 | hlist_add_head(&n.link, &b->list); |
146 | raw_spin_unlock(&b->lock); | 153 | raw_spin_unlock(&b->lock); |
@@ -268,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
268 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | 275 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
269 | /* page is swapped out by the host. */ | 276 | /* page is swapped out by the host. */ |
270 | prev_state = exception_enter(); | 277 | prev_state = exception_enter(); |
271 | kvm_async_pf_task_wait((u32)read_cr2()); | 278 | kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs)); |
272 | exception_exit(prev_state); | 279 | exception_exit(prev_state); |
273 | break; | 280 | break; |
274 | case KVM_PV_REASON_PAGE_READY: | 281 | case KVM_PV_REASON_PAGE_READY: |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index e04442345fc0..4e188fda5961 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
263 | sp = (unsigned long) ka->sa.sa_restorer; | 263 | sp = (unsigned long) ka->sa.sa_restorer; |
264 | } | 264 | } |
265 | 265 | ||
266 | if (fpu->fpstate_active) { | 266 | if (fpu->initialized) { |
267 | sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), | 267 | sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), |
268 | &buf_fx, &math_size); | 268 | &buf_fx, &math_size); |
269 | *fpstate = (void __user *)sp; | 269 | *fpstate = (void __user *)sp; |
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, | |||
279 | return (void __user *)-1L; | 279 | return (void __user *)-1L; |
280 | 280 | ||
281 | /* save i387 and extended state */ | 281 | /* save i387 and extended state */ |
282 | if (fpu->fpstate_active && | 282 | if (fpu->initialized && |
283 | copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) | 283 | copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) |
284 | return (void __user *)-1L; | 284 | return (void __user *)-1L; |
285 | 285 | ||
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
755 | /* | 755 | /* |
756 | * Ensure the signal handler starts with the new fpu state. | 756 | * Ensure the signal handler starts with the new fpu state. |
757 | */ | 757 | */ |
758 | if (fpu->fpstate_active) | 758 | if (fpu->initialized) |
759 | fpu__clear(fpu); | 759 | fpu__clear(fpu); |
760 | } | 760 | } |
761 | signal_setup_done(failed, ksig, stepping); | 761 | signal_setup_done(failed, ksig, stepping); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0854ff169274..ad59edd84de7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -232,12 +232,6 @@ static void notrace start_secondary(void *unused) | |||
232 | */ | 232 | */ |
233 | if (boot_cpu_has(X86_FEATURE_PCID)) | 233 | if (boot_cpu_has(X86_FEATURE_PCID)) |
234 | __write_cr4(__read_cr4() | X86_CR4_PCIDE); | 234 | __write_cr4(__read_cr4() | X86_CR4_PCIDE); |
235 | cpu_init(); | ||
236 | x86_cpuinit.early_percpu_clock_init(); | ||
237 | preempt_disable(); | ||
238 | smp_callin(); | ||
239 | |||
240 | enable_start_cpu0 = 0; | ||
241 | 235 | ||
242 | #ifdef CONFIG_X86_32 | 236 | #ifdef CONFIG_X86_32 |
243 | /* switch away from the initial page table */ | 237 | /* switch away from the initial page table */ |
@@ -245,6 +239,13 @@ static void notrace start_secondary(void *unused) | |||
245 | __flush_tlb_all(); | 239 | __flush_tlb_all(); |
246 | #endif | 240 | #endif |
247 | 241 | ||
242 | cpu_init(); | ||
243 | x86_cpuinit.early_percpu_clock_init(); | ||
244 | preempt_disable(); | ||
245 | smp_callin(); | ||
246 | |||
247 | enable_start_cpu0 = 0; | ||
248 | |||
248 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ | 249 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ |
249 | barrier(); | 250 | barrier(); |
250 | /* | 251 | /* |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 34ea3651362e..67db4f43309e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
142 | * from double_fault. | 142 | * from double_fault. |
143 | */ | 143 | */ |
144 | BUG_ON((unsigned long)(current_top_of_stack() - | 144 | BUG_ON((unsigned long)(current_top_of_stack() - |
145 | current_stack_pointer()) >= THREAD_SIZE); | 145 | current_stack_pointer) >= THREAD_SIZE); |
146 | 146 | ||
147 | preempt_enable_no_resched(); | 147 | preempt_enable_no_resched(); |
148 | } | 148 | } |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 3ea624452f93..3c48bc8bf08c 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -23,6 +23,7 @@ config KVM | |||
23 | depends on HIGH_RES_TIMERS | 23 | depends on HIGH_RES_TIMERS |
24 | # for TASKSTATS/TASK_DELAY_ACCT: | 24 | # for TASKSTATS/TASK_DELAY_ACCT: |
25 | depends on NET && MULTIUSER | 25 | depends on NET && MULTIUSER |
26 | depends on X86_LOCAL_APIC | ||
26 | select PREEMPT_NOTIFIERS | 27 | select PREEMPT_NOTIFIERS |
27 | select MMU_NOTIFIER | 28 | select MMU_NOTIFIER |
28 | select ANON_INODES | 29 | select ANON_INODES |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 16bf6655aa85..d90cdc77e077 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); | |||
425 | #op " %al \n\t" \ | 425 | #op " %al \n\t" \ |
426 | FOP_RET | 426 | FOP_RET |
427 | 427 | ||
428 | asm(".global kvm_fastop_exception \n" | 428 | asm(".pushsection .fixup, \"ax\"\n" |
429 | "kvm_fastop_exception: xor %esi, %esi; ret"); | 429 | ".global kvm_fastop_exception \n" |
430 | "kvm_fastop_exception: xor %esi, %esi; ret\n" | ||
431 | ".popsection"); | ||
430 | 432 | ||
431 | FOP_START(setcc) | 433 | FOP_START(setcc) |
432 | FOP_SETCC(seto) | 434 | FOP_SETCC(seto) |
@@ -4102,10 +4104,12 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) | |||
4102 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | 4104 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); |
4103 | if (efer & EFER_LMA) { | 4105 | if (efer & EFER_LMA) { |
4104 | u64 maxphyaddr; | 4106 | u64 maxphyaddr; |
4105 | u32 eax = 0x80000008; | 4107 | u32 eax, ebx, ecx, edx; |
4106 | 4108 | ||
4107 | if (ctxt->ops->get_cpuid(ctxt, &eax, NULL, NULL, | 4109 | eax = 0x80000008; |
4108 | NULL, false)) | 4110 | ecx = 0; |
4111 | if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, | ||
4112 | &edx, false)) | ||
4109 | maxphyaddr = eax & 0xff; | 4113 | maxphyaddr = eax & 0xff; |
4110 | else | 4114 | else |
4111 | maxphyaddr = 36; | 4115 | maxphyaddr = 36; |
@@ -5296,7 +5300,6 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, | |||
5296 | 5300 | ||
5297 | static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) | 5301 | static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) |
5298 | { | 5302 | { |
5299 | register void *__sp asm(_ASM_SP); | ||
5300 | ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; | 5303 | ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; |
5301 | 5304 | ||
5302 | if (!(ctxt->d & ByteOp)) | 5305 | if (!(ctxt->d & ByteOp)) |
@@ -5304,7 +5307,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) | |||
5304 | 5307 | ||
5305 | asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" | 5308 | asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" |
5306 | : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), | 5309 | : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), |
5307 | [fastop]"+S"(fop), "+r"(__sp) | 5310 | [fastop]"+S"(fop), ASM_CALL_CONSTRAINT |
5308 | : "c"(ctxt->src2.val)); | 5311 | : "c"(ctxt->src2.val)); |
5309 | 5312 | ||
5310 | ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); | 5313 | ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index eca30c1eb1d9..106d4a029a8a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, | |||
3837 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | 3837 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
3838 | vcpu->arch.apf.host_apf_reason = 0; | 3838 | vcpu->arch.apf.host_apf_reason = 0; |
3839 | local_irq_disable(); | 3839 | local_irq_disable(); |
3840 | kvm_async_pf_task_wait(fault_address); | 3840 | kvm_async_pf_task_wait(fault_address, 0); |
3841 | local_irq_enable(); | 3841 | local_irq_enable(); |
3842 | break; | 3842 | break; |
3843 | case KVM_PV_REASON_PAGE_READY: | 3843 | case KVM_PV_REASON_PAGE_READY: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 06c0c6d0541e..a2b804e10c95 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -200,6 +200,8 @@ struct loaded_vmcs { | |||
200 | int cpu; | 200 | int cpu; |
201 | bool launched; | 201 | bool launched; |
202 | bool nmi_known_unmasked; | 202 | bool nmi_known_unmasked; |
203 | unsigned long vmcs_host_cr3; /* May not match real cr3 */ | ||
204 | unsigned long vmcs_host_cr4; /* May not match real cr4 */ | ||
203 | struct list_head loaded_vmcss_on_cpu_link; | 205 | struct list_head loaded_vmcss_on_cpu_link; |
204 | }; | 206 | }; |
205 | 207 | ||
@@ -600,8 +602,6 @@ struct vcpu_vmx { | |||
600 | int gs_ldt_reload_needed; | 602 | int gs_ldt_reload_needed; |
601 | int fs_reload_needed; | 603 | int fs_reload_needed; |
602 | u64 msr_host_bndcfgs; | 604 | u64 msr_host_bndcfgs; |
603 | unsigned long vmcs_host_cr3; /* May not match real cr3 */ | ||
604 | unsigned long vmcs_host_cr4; /* May not match real cr4 */ | ||
605 | } host_state; | 605 | } host_state; |
606 | struct { | 606 | struct { |
607 | int vm86_active; | 607 | int vm86_active; |
@@ -2202,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
2202 | struct pi_desc old, new; | 2202 | struct pi_desc old, new; |
2203 | unsigned int dest; | 2203 | unsigned int dest; |
2204 | 2204 | ||
2205 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | 2205 | /* |
2206 | !irq_remapping_cap(IRQ_POSTING_CAP) || | 2206 | * In case of hot-plug or hot-unplug, we may have to undo |
2207 | !kvm_vcpu_apicv_active(vcpu)) | 2207 | * vmx_vcpu_pi_put even if there is no assigned device. And we |
2208 | * always keep PI.NDST up to date for simplicity: it makes the | ||
2209 | * code easier, and CPU migration is not a fast path. | ||
2210 | */ | ||
2211 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) | ||
2212 | return; | ||
2213 | |||
2214 | /* | ||
2215 | * First handle the simple case where no cmpxchg is necessary; just | ||
2216 | * allow posting non-urgent interrupts. | ||
2217 | * | ||
2218 | * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change | ||
2219 | * PI.NDST: pi_post_block will do it for us and the wakeup_handler | ||
2220 | * expects the VCPU to be on the blocked_vcpu_list that matches | ||
2221 | * PI.NDST. | ||
2222 | */ | ||
2223 | if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || | ||
2224 | vcpu->cpu == cpu) { | ||
2225 | pi_clear_sn(pi_desc); | ||
2208 | return; | 2226 | return; |
2227 | } | ||
2209 | 2228 | ||
2229 | /* The full case. */ | ||
2210 | do { | 2230 | do { |
2211 | old.control = new.control = pi_desc->control; | 2231 | old.control = new.control = pi_desc->control; |
2212 | 2232 | ||
2213 | /* | 2233 | dest = cpu_physical_id(cpu); |
2214 | * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there | ||
2215 | * are two possible cases: | ||
2216 | * 1. After running 'pre_block', context switch | ||
2217 | * happened. For this case, 'sn' was set in | ||
2218 | * vmx_vcpu_put(), so we need to clear it here. | ||
2219 | * 2. After running 'pre_block', we were blocked, | ||
2220 | * and woken up by some other guy. For this case, | ||
2221 | * we don't need to do anything, 'pi_post_block' | ||
2222 | * will do everything for us. However, we cannot | ||
2223 | * check whether it is case #1 or case #2 here | ||
2224 | * (maybe, not needed), so we also clear sn here, | ||
2225 | * I think it is not a big deal. | ||
2226 | */ | ||
2227 | if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { | ||
2228 | if (vcpu->cpu != cpu) { | ||
2229 | dest = cpu_physical_id(cpu); | ||
2230 | |||
2231 | if (x2apic_enabled()) | ||
2232 | new.ndst = dest; | ||
2233 | else | ||
2234 | new.ndst = (dest << 8) & 0xFF00; | ||
2235 | } | ||
2236 | 2234 | ||
2237 | /* set 'NV' to 'notification vector' */ | 2235 | if (x2apic_enabled()) |
2238 | new.nv = POSTED_INTR_VECTOR; | 2236 | new.ndst = dest; |
2239 | } | 2237 | else |
2238 | new.ndst = (dest << 8) & 0xFF00; | ||
2240 | 2239 | ||
2241 | /* Allow posting non-urgent interrupts */ | ||
2242 | new.sn = 0; | 2240 | new.sn = 0; |
2243 | } while (cmpxchg(&pi_desc->control, old.control, | 2241 | } while (cmpxchg64(&pi_desc->control, old.control, |
2244 | new.control) != old.control); | 2242 | new.control) != old.control); |
2245 | } | 2243 | } |
2246 | 2244 | ||
2247 | static void decache_tsc_multiplier(struct vcpu_vmx *vmx) | 2245 | static void decache_tsc_multiplier(struct vcpu_vmx *vmx) |
@@ -5077,21 +5075,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, | |||
5077 | int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; | 5075 | int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; |
5078 | 5076 | ||
5079 | if (vcpu->mode == IN_GUEST_MODE) { | 5077 | if (vcpu->mode == IN_GUEST_MODE) { |
5080 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
5081 | |||
5082 | /* | 5078 | /* |
5083 | * Currently, we don't support urgent interrupt, | 5079 | * The vector of interrupt to be delivered to vcpu had |
5084 | * all interrupts are recognized as non-urgent | 5080 | * been set in PIR before this function. |
5085 | * interrupt, so we cannot post interrupts when | 5081 | * |
5086 | * 'SN' is set. | 5082 | * Following cases will be reached in this block, and |
5083 | * we always send a notification event in all cases as | ||
5084 | * explained below. | ||
5085 | * | ||
5086 | * Case 1: vcpu keeps in non-root mode. Sending a | ||
5087 | * notification event posts the interrupt to vcpu. | ||
5087 | * | 5088 | * |
5088 | * If the vcpu is in guest mode, it means it is | 5089 | * Case 2: vcpu exits to root mode and is still |
5089 | * running instead of being scheduled out and | 5090 | * runnable. PIR will be synced to vIRR before the |
5090 | * waiting in the run queue, and that's the only | 5091 | * next vcpu entry. Sending a notification event in |
5091 | * case when 'SN' is set currently, warning if | 5092 | * this case has no effect, as vcpu is not in root |
5092 | * 'SN' is set. | 5093 | * mode. |
5094 | * | ||
5095 | * Case 3: vcpu exits to root mode and is blocked. | ||
5096 | * vcpu_block() has already synced PIR to vIRR and | ||
5097 | * never blocks vcpu if vIRR is not cleared. Therefore, | ||
5098 | * a blocked vcpu here does not wait for any requested | ||
5099 | * interrupts in PIR, and sending a notification event | ||
5100 | * which has no effect is safe here. | ||
5093 | */ | 5101 | */ |
5094 | WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); | ||
5095 | 5102 | ||
5096 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); | 5103 | apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); |
5097 | return true; | 5104 | return true; |
@@ -5169,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) | |||
5169 | */ | 5176 | */ |
5170 | cr3 = __read_cr3(); | 5177 | cr3 = __read_cr3(); |
5171 | vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ | 5178 | vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ |
5172 | vmx->host_state.vmcs_host_cr3 = cr3; | 5179 | vmx->loaded_vmcs->vmcs_host_cr3 = cr3; |
5173 | 5180 | ||
5174 | /* Save the most likely value for this task's CR4 in the VMCS. */ | 5181 | /* Save the most likely value for this task's CR4 in the VMCS. */ |
5175 | cr4 = cr4_read_shadow(); | 5182 | cr4 = cr4_read_shadow(); |
5176 | vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ | 5183 | vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ |
5177 | vmx->host_state.vmcs_host_cr4 = cr4; | 5184 | vmx->loaded_vmcs->vmcs_host_cr4 = cr4; |
5178 | 5185 | ||
5179 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ | 5186 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
5180 | #ifdef CONFIG_X86_64 | 5187 | #ifdef CONFIG_X86_64 |
@@ -9036,7 +9043,6 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) | |||
9036 | static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) | 9043 | static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) |
9037 | { | 9044 | { |
9038 | u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 9045 | u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
9039 | register void *__sp asm(_ASM_SP); | ||
9040 | 9046 | ||
9041 | if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) | 9047 | if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) |
9042 | == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { | 9048 | == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { |
@@ -9065,7 +9071,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) | |||
9065 | #ifdef CONFIG_X86_64 | 9071 | #ifdef CONFIG_X86_64 |
9066 | [sp]"=&r"(tmp), | 9072 | [sp]"=&r"(tmp), |
9067 | #endif | 9073 | #endif |
9068 | "+r"(__sp) | 9074 | ASM_CALL_CONSTRAINT |
9069 | : | 9075 | : |
9070 | [entry]"r"(entry), | 9076 | [entry]"r"(entry), |
9071 | [ss]"i"(__KERNEL_DS), | 9077 | [ss]"i"(__KERNEL_DS), |
@@ -9265,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9265 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); | 9271 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); |
9266 | 9272 | ||
9267 | cr3 = __get_current_cr3_fast(); | 9273 | cr3 = __get_current_cr3_fast(); |
9268 | if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { | 9274 | if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) { |
9269 | vmcs_writel(HOST_CR3, cr3); | 9275 | vmcs_writel(HOST_CR3, cr3); |
9270 | vmx->host_state.vmcs_host_cr3 = cr3; | 9276 | vmx->loaded_vmcs->vmcs_host_cr3 = cr3; |
9271 | } | 9277 | } |
9272 | 9278 | ||
9273 | cr4 = cr4_read_shadow(); | 9279 | cr4 = cr4_read_shadow(); |
9274 | if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { | 9280 | if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) { |
9275 | vmcs_writel(HOST_CR4, cr4); | 9281 | vmcs_writel(HOST_CR4, cr4); |
9276 | vmx->host_state.vmcs_host_cr4 = cr4; | 9282 | vmx->loaded_vmcs->vmcs_host_cr4 = cr4; |
9277 | } | 9283 | } |
9278 | 9284 | ||
9279 | /* When single-stepping over STI and MOV SS, we must clear the | 9285 | /* When single-stepping over STI and MOV SS, we must clear the |
@@ -9583,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
9583 | 9589 | ||
9584 | vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; | 9590 | vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; |
9585 | 9591 | ||
9592 | /* | ||
9593 | * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR | ||
9594 | * or POSTED_INTR_WAKEUP_VECTOR. | ||
9595 | */ | ||
9596 | vmx->pi_desc.nv = POSTED_INTR_VECTOR; | ||
9597 | vmx->pi_desc.sn = 1; | ||
9598 | |||
9586 | return &vmx->vcpu; | 9599 | return &vmx->vcpu; |
9587 | 9600 | ||
9588 | free_vmcs: | 9601 | free_vmcs: |
@@ -9831,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, | |||
9831 | 9844 | ||
9832 | WARN_ON(!is_guest_mode(vcpu)); | 9845 | WARN_ON(!is_guest_mode(vcpu)); |
9833 | 9846 | ||
9834 | if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { | 9847 | if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && |
9848 | !to_vmx(vcpu)->nested.nested_run_pending) { | ||
9835 | vmcs12->vm_exit_intr_error_code = fault->error_code; | 9849 | vmcs12->vm_exit_intr_error_code = fault->error_code; |
9836 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, | 9850 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, |
9837 | PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | | 9851 | PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | |
@@ -11696,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, | |||
11696 | kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); | 11710 | kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); |
11697 | } | 11711 | } |
11698 | 11712 | ||
11713 | static void __pi_post_block(struct kvm_vcpu *vcpu) | ||
11714 | { | ||
11715 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); | ||
11716 | struct pi_desc old, new; | ||
11717 | unsigned int dest; | ||
11718 | |||
11719 | do { | ||
11720 | old.control = new.control = pi_desc->control; | ||
11721 | WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR, | ||
11722 | "Wakeup handler not enabled while the VCPU is blocked\n"); | ||
11723 | |||
11724 | dest = cpu_physical_id(vcpu->cpu); | ||
11725 | |||
11726 | if (x2apic_enabled()) | ||
11727 | new.ndst = dest; | ||
11728 | else | ||
11729 | new.ndst = (dest << 8) & 0xFF00; | ||
11730 | |||
11731 | /* set 'NV' to 'notification vector' */ | ||
11732 | new.nv = POSTED_INTR_VECTOR; | ||
11733 | } while (cmpxchg64(&pi_desc->control, old.control, | ||
11734 | new.control) != old.control); | ||
11735 | |||
11736 | if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { | ||
11737 | spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); | ||
11738 | list_del(&vcpu->blocked_vcpu_list); | ||
11739 | spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); | ||
11740 | vcpu->pre_pcpu = -1; | ||
11741 | } | ||
11742 | } | ||
11743 | |||
11699 | /* | 11744 | /* |
11700 | * This routine does the following things for vCPU which is going | 11745 | * This routine does the following things for vCPU which is going |
11701 | * to be blocked if VT-d PI is enabled. | 11746 | * to be blocked if VT-d PI is enabled. |
@@ -11711,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, | |||
11711 | */ | 11756 | */ |
11712 | static int pi_pre_block(struct kvm_vcpu *vcpu) | 11757 | static int pi_pre_block(struct kvm_vcpu *vcpu) |
11713 | { | 11758 | { |
11714 | unsigned long flags; | ||
11715 | unsigned int dest; | 11759 | unsigned int dest; |
11716 | struct pi_desc old, new; | 11760 | struct pi_desc old, new; |
11717 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); | 11761 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
@@ -11721,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) | |||
11721 | !kvm_vcpu_apicv_active(vcpu)) | 11765 | !kvm_vcpu_apicv_active(vcpu)) |
11722 | return 0; | 11766 | return 0; |
11723 | 11767 | ||
11724 | vcpu->pre_pcpu = vcpu->cpu; | 11768 | WARN_ON(irqs_disabled()); |
11725 | spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, | 11769 | local_irq_disable(); |
11726 | vcpu->pre_pcpu), flags); | 11770 | if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { |
11727 | list_add_tail(&vcpu->blocked_vcpu_list, | 11771 | vcpu->pre_pcpu = vcpu->cpu; |
11728 | &per_cpu(blocked_vcpu_on_cpu, | 11772 | spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); |
11729 | vcpu->pre_pcpu)); | 11773 | list_add_tail(&vcpu->blocked_vcpu_list, |
11730 | spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, | 11774 | &per_cpu(blocked_vcpu_on_cpu, |
11731 | vcpu->pre_pcpu), flags); | 11775 | vcpu->pre_pcpu)); |
11776 | spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); | ||
11777 | } | ||
11732 | 11778 | ||
11733 | do { | 11779 | do { |
11734 | old.control = new.control = pi_desc->control; | 11780 | old.control = new.control = pi_desc->control; |
11735 | 11781 | ||
11736 | /* | ||
11737 | * We should not block the vCPU if | ||
11738 | * an interrupt is posted for it. | ||
11739 | */ | ||
11740 | if (pi_test_on(pi_desc) == 1) { | ||
11741 | spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, | ||
11742 | vcpu->pre_pcpu), flags); | ||
11743 | list_del(&vcpu->blocked_vcpu_list); | ||
11744 | spin_unlock_irqrestore( | ||
11745 | &per_cpu(blocked_vcpu_on_cpu_lock, | ||
11746 | vcpu->pre_pcpu), flags); | ||
11747 | vcpu->pre_pcpu = -1; | ||
11748 | |||
11749 | return 1; | ||
11750 | } | ||
11751 | |||
11752 | WARN((pi_desc->sn == 1), | 11782 | WARN((pi_desc->sn == 1), |
11753 | "Warning: SN field of posted-interrupts " | 11783 | "Warning: SN field of posted-interrupts " |
11754 | "is set before blocking\n"); | 11784 | "is set before blocking\n"); |
@@ -11770,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu) | |||
11770 | 11800 | ||
11771 | /* set 'NV' to 'wakeup vector' */ | 11801 | /* set 'NV' to 'wakeup vector' */ |
11772 | new.nv = POSTED_INTR_WAKEUP_VECTOR; | 11802 | new.nv = POSTED_INTR_WAKEUP_VECTOR; |
11773 | } while (cmpxchg(&pi_desc->control, old.control, | 11803 | } while (cmpxchg64(&pi_desc->control, old.control, |
11774 | new.control) != old.control); | 11804 | new.control) != old.control); |
11775 | 11805 | ||
11776 | return 0; | 11806 | /* We should not block the vCPU if an interrupt is posted for it. */ |
11807 | if (pi_test_on(pi_desc) == 1) | ||
11808 | __pi_post_block(vcpu); | ||
11809 | |||
11810 | local_irq_enable(); | ||
11811 | return (vcpu->pre_pcpu == -1); | ||
11777 | } | 11812 | } |
11778 | 11813 | ||
11779 | static int vmx_pre_block(struct kvm_vcpu *vcpu) | 11814 | static int vmx_pre_block(struct kvm_vcpu *vcpu) |
@@ -11789,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu) | |||
11789 | 11824 | ||
11790 | static void pi_post_block(struct kvm_vcpu *vcpu) | 11825 | static void pi_post_block(struct kvm_vcpu *vcpu) |
11791 | { | 11826 | { |
11792 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); | 11827 | if (vcpu->pre_pcpu == -1) |
11793 | struct pi_desc old, new; | ||
11794 | unsigned int dest; | ||
11795 | unsigned long flags; | ||
11796 | |||
11797 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | ||
11798 | !irq_remapping_cap(IRQ_POSTING_CAP) || | ||
11799 | !kvm_vcpu_apicv_active(vcpu)) | ||
11800 | return; | 11828 | return; |
11801 | 11829 | ||
11802 | do { | 11830 | WARN_ON(irqs_disabled()); |
11803 | old.control = new.control = pi_desc->control; | 11831 | local_irq_disable(); |
11804 | 11832 | __pi_post_block(vcpu); | |
11805 | dest = cpu_physical_id(vcpu->cpu); | 11833 | local_irq_enable(); |
11806 | |||
11807 | if (x2apic_enabled()) | ||
11808 | new.ndst = dest; | ||
11809 | else | ||
11810 | new.ndst = (dest << 8) & 0xFF00; | ||
11811 | |||
11812 | /* Allow posting non-urgent interrupts */ | ||
11813 | new.sn = 0; | ||
11814 | |||
11815 | /* set 'NV' to 'notification vector' */ | ||
11816 | new.nv = POSTED_INTR_VECTOR; | ||
11817 | } while (cmpxchg(&pi_desc->control, old.control, | ||
11818 | new.control) != old.control); | ||
11819 | |||
11820 | if(vcpu->pre_pcpu != -1) { | ||
11821 | spin_lock_irqsave( | ||
11822 | &per_cpu(blocked_vcpu_on_cpu_lock, | ||
11823 | vcpu->pre_pcpu), flags); | ||
11824 | list_del(&vcpu->blocked_vcpu_list); | ||
11825 | spin_unlock_irqrestore( | ||
11826 | &per_cpu(blocked_vcpu_on_cpu_lock, | ||
11827 | vcpu->pre_pcpu), flags); | ||
11828 | vcpu->pre_pcpu = -1; | ||
11829 | } | ||
11830 | } | 11834 | } |
11831 | 11835 | ||
11832 | static void vmx_post_block(struct kvm_vcpu *vcpu) | 11836 | static void vmx_post_block(struct kvm_vcpu *vcpu) |
@@ -11911,12 +11915,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
11911 | 11915 | ||
11912 | if (set) | 11916 | if (set) |
11913 | ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); | 11917 | ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); |
11914 | else { | 11918 | else |
11915 | /* suppress notification event before unposting */ | ||
11916 | pi_set_sn(vcpu_to_pi_desc(vcpu)); | ||
11917 | ret = irq_set_vcpu_affinity(host_irq, NULL); | 11919 | ret = irq_set_vcpu_affinity(host_irq, NULL); |
11918 | pi_clear_sn(vcpu_to_pi_desc(vcpu)); | ||
11919 | } | ||
11920 | 11920 | ||
11921 | if (ret < 0) { | 11921 | if (ret < 0) { |
11922 | printk(KERN_INFO "%s: failed to update PI IRTE\n", | 11922 | printk(KERN_INFO "%s: failed to update PI IRTE\n", |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cd17b7d9a107..03869eb7fcd6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
7225 | int r; | 7225 | int r; |
7226 | sigset_t sigsaved; | 7226 | sigset_t sigsaved; |
7227 | 7227 | ||
7228 | fpu__activate_curr(fpu); | 7228 | fpu__initialize(fpu); |
7229 | 7229 | ||
7230 | if (vcpu->sigset_active) | 7230 | if (vcpu->sigset_active) |
7231 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 7231 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c index d4a7df2205b8..220638a4cb94 100644 --- a/arch/x86/math-emu/fpu_entry.c +++ b/arch/x86/math-emu/fpu_entry.c | |||
@@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info) | |||
114 | struct desc_struct code_descriptor; | 114 | struct desc_struct code_descriptor; |
115 | struct fpu *fpu = ¤t->thread.fpu; | 115 | struct fpu *fpu = ¤t->thread.fpu; |
116 | 116 | ||
117 | fpu__activate_curr(fpu); | 117 | fpu__initialize(fpu); |
118 | 118 | ||
119 | #ifdef RE_ENTRANT_CHECKING | 119 | #ifdef RE_ENTRANT_CHECKING |
120 | if (emulating) { | 120 | if (emulating) { |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c076f710de4c..c3521e2be396 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/uaccess.h> | 2 | #include <linux/uaccess.h> |
3 | #include <linux/sched/debug.h> | 3 | #include <linux/sched/debug.h> |
4 | 4 | ||
5 | #include <asm/fpu/internal.h> | ||
5 | #include <asm/traps.h> | 6 | #include <asm/traps.h> |
6 | #include <asm/kdebug.h> | 7 | #include <asm/kdebug.h> |
7 | 8 | ||
@@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, | |||
78 | } | 79 | } |
79 | EXPORT_SYMBOL_GPL(ex_handler_refcount); | 80 | EXPORT_SYMBOL_GPL(ex_handler_refcount); |
80 | 81 | ||
82 | /* | ||
83 | * Handler for when we fail to restore a task's FPU state. We should never get | ||
84 | * here because the FPU state of a task using the FPU (task->thread.fpu.state) | ||
85 | * should always be valid. However, past bugs have allowed userspace to set | ||
86 | * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn(). | ||
87 | * These caused XRSTOR to fail when switching to the task, leaking the FPU | ||
88 | * registers of the task previously executing on the CPU. Mitigate this class | ||
89 | * of vulnerability by restoring from the initial state (essentially, zeroing | ||
90 | * out all the FPU registers) if we can't restore from the task's FPU state. | ||
91 | */ | ||
92 | bool ex_handler_fprestore(const struct exception_table_entry *fixup, | ||
93 | struct pt_regs *regs, int trapnr) | ||
94 | { | ||
95 | regs->ip = ex_fixup_addr(fixup); | ||
96 | |||
97 | WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.", | ||
98 | (void *)instruction_pointer(regs)); | ||
99 | |||
100 | __copy_kernel_to_fpregs(&init_fpstate, -1); | ||
101 | return true; | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(ex_handler_fprestore); | ||
104 | |||
81 | bool ex_handler_ext(const struct exception_table_entry *fixup, | 105 | bool ex_handler_ext(const struct exception_table_entry *fixup, |
82 | struct pt_regs *regs, int trapnr) | 106 | struct pt_regs *regs, int trapnr) |
83 | { | 107 | { |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b836a7274e12..e2baeaa053a5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | |||
192 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really | 192 | * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really |
193 | * faulted on a pte with its pkey=4. | 193 | * faulted on a pte with its pkey=4. |
194 | */ | 194 | */ |
195 | static void fill_sig_info_pkey(int si_code, siginfo_t *info, | 195 | static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) |
196 | struct vm_area_struct *vma) | ||
197 | { | 196 | { |
198 | /* This is effectively an #ifdef */ | 197 | /* This is effectively an #ifdef */ |
199 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) | 198 | if (!boot_cpu_has(X86_FEATURE_OSPKE)) |
@@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, | |||
209 | * valid VMA, so we should never reach this without a | 208 | * valid VMA, so we should never reach this without a |
210 | * valid VMA. | 209 | * valid VMA. |
211 | */ | 210 | */ |
212 | if (!vma) { | 211 | if (!pkey) { |
213 | WARN_ONCE(1, "PKU fault with no VMA passed in"); | 212 | WARN_ONCE(1, "PKU fault with no VMA passed in"); |
214 | info->si_pkey = 0; | 213 | info->si_pkey = 0; |
215 | return; | 214 | return; |
@@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, | |||
219 | * absolutely guranteed to be 100% accurate because of | 218 | * absolutely guranteed to be 100% accurate because of |
220 | * the race explained above. | 219 | * the race explained above. |
221 | */ | 220 | */ |
222 | info->si_pkey = vma_pkey(vma); | 221 | info->si_pkey = *pkey; |
223 | } | 222 | } |
224 | 223 | ||
225 | static void | 224 | static void |
226 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, | 225 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
227 | struct task_struct *tsk, struct vm_area_struct *vma, | 226 | struct task_struct *tsk, u32 *pkey, int fault) |
228 | int fault) | ||
229 | { | 227 | { |
230 | unsigned lsb = 0; | 228 | unsigned lsb = 0; |
231 | siginfo_t info; | 229 | siginfo_t info; |
@@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, | |||
240 | lsb = PAGE_SHIFT; | 238 | lsb = PAGE_SHIFT; |
241 | info.si_addr_lsb = lsb; | 239 | info.si_addr_lsb = lsb; |
242 | 240 | ||
243 | fill_sig_info_pkey(si_code, &info, vma); | 241 | fill_sig_info_pkey(si_code, &info, pkey); |
244 | 242 | ||
245 | force_sig_info(si_signo, &info, tsk); | 243 | force_sig_info(si_signo, &info, tsk); |
246 | } | 244 | } |
@@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
762 | struct task_struct *tsk = current; | 760 | struct task_struct *tsk = current; |
763 | unsigned long flags; | 761 | unsigned long flags; |
764 | int sig; | 762 | int sig; |
765 | /* No context means no VMA to pass down */ | ||
766 | struct vm_area_struct *vma = NULL; | ||
767 | 763 | ||
768 | /* Are we prepared to handle this kernel fault? */ | 764 | /* Are we prepared to handle this kernel fault? */ |
769 | if (fixup_exception(regs, X86_TRAP_PF)) { | 765 | if (fixup_exception(regs, X86_TRAP_PF)) { |
@@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
788 | 784 | ||
789 | /* XXX: hwpoison faults will set the wrong code. */ | 785 | /* XXX: hwpoison faults will set the wrong code. */ |
790 | force_sig_info_fault(signal, si_code, address, | 786 | force_sig_info_fault(signal, si_code, address, |
791 | tsk, vma, 0); | 787 | tsk, NULL, 0); |
792 | } | 788 | } |
793 | 789 | ||
794 | /* | 790 | /* |
@@ -806,7 +802,6 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
806 | if (is_vmalloc_addr((void *)address) && | 802 | if (is_vmalloc_addr((void *)address) && |
807 | (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || | 803 | (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) || |
808 | address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { | 804 | address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) { |
809 | register void *__sp asm("rsp"); | ||
810 | unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); | 805 | unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *); |
811 | /* | 806 | /* |
812 | * We're likely to be running with very little stack space | 807 | * We're likely to be running with very little stack space |
@@ -821,7 +816,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
821 | asm volatile ("movq %[stack], %%rsp\n\t" | 816 | asm volatile ("movq %[stack], %%rsp\n\t" |
822 | "call handle_stack_overflow\n\t" | 817 | "call handle_stack_overflow\n\t" |
823 | "1: jmp 1b" | 818 | "1: jmp 1b" |
824 | : "+r" (__sp) | 819 | : ASM_CALL_CONSTRAINT |
825 | : "D" ("kernel stack overflow (page fault)"), | 820 | : "D" ("kernel stack overflow (page fault)"), |
826 | "S" (regs), "d" (address), | 821 | "S" (regs), "d" (address), |
827 | [stack] "rm" (stack)); | 822 | [stack] "rm" (stack)); |
@@ -897,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, | |||
897 | 892 | ||
898 | static void | 893 | static void |
899 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | 894 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
900 | unsigned long address, struct vm_area_struct *vma, | 895 | unsigned long address, u32 *pkey, int si_code) |
901 | int si_code) | ||
902 | { | 896 | { |
903 | struct task_struct *tsk = current; | 897 | struct task_struct *tsk = current; |
904 | 898 | ||
@@ -946,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
946 | tsk->thread.error_code = error_code; | 940 | tsk->thread.error_code = error_code; |
947 | tsk->thread.trap_nr = X86_TRAP_PF; | 941 | tsk->thread.trap_nr = X86_TRAP_PF; |
948 | 942 | ||
949 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); | 943 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0); |
950 | 944 | ||
951 | return; | 945 | return; |
952 | } | 946 | } |
@@ -959,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
959 | 953 | ||
960 | static noinline void | 954 | static noinline void |
961 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | 955 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
962 | unsigned long address, struct vm_area_struct *vma) | 956 | unsigned long address, u32 *pkey) |
963 | { | 957 | { |
964 | __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); | 958 | __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR); |
965 | } | 959 | } |
966 | 960 | ||
967 | static void | 961 | static void |
@@ -969,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, | |||
969 | unsigned long address, struct vm_area_struct *vma, int si_code) | 963 | unsigned long address, struct vm_area_struct *vma, int si_code) |
970 | { | 964 | { |
971 | struct mm_struct *mm = current->mm; | 965 | struct mm_struct *mm = current->mm; |
966 | u32 pkey; | ||
967 | |||
968 | if (vma) | ||
969 | pkey = vma_pkey(vma); | ||
972 | 970 | ||
973 | /* | 971 | /* |
974 | * Something tried to access memory that isn't in our memory map.. | 972 | * Something tried to access memory that isn't in our memory map.. |
@@ -976,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, | |||
976 | */ | 974 | */ |
977 | up_read(&mm->mmap_sem); | 975 | up_read(&mm->mmap_sem); |
978 | 976 | ||
979 | __bad_area_nosemaphore(regs, error_code, address, vma, si_code); | 977 | __bad_area_nosemaphore(regs, error_code, address, |
978 | (vma) ? &pkey : NULL, si_code); | ||
980 | } | 979 | } |
981 | 980 | ||
982 | static noinline void | 981 | static noinline void |
@@ -1019,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, | |||
1019 | 1018 | ||
1020 | static void | 1019 | static void |
1021 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | 1020 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
1022 | struct vm_area_struct *vma, unsigned int fault) | 1021 | u32 *pkey, unsigned int fault) |
1023 | { | 1022 | { |
1024 | struct task_struct *tsk = current; | 1023 | struct task_struct *tsk = current; |
1025 | int code = BUS_ADRERR; | 1024 | int code = BUS_ADRERR; |
@@ -1046,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, | |||
1046 | code = BUS_MCEERR_AR; | 1045 | code = BUS_MCEERR_AR; |
1047 | } | 1046 | } |
1048 | #endif | 1047 | #endif |
1049 | force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); | 1048 | force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault); |
1050 | } | 1049 | } |
1051 | 1050 | ||
1052 | static noinline void | 1051 | static noinline void |
1053 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, | 1052 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
1054 | unsigned long address, struct vm_area_struct *vma, | 1053 | unsigned long address, u32 *pkey, unsigned int fault) |
1055 | unsigned int fault) | ||
1056 | { | 1054 | { |
1057 | if (fatal_signal_pending(current) && !(error_code & PF_USER)) { | 1055 | if (fatal_signal_pending(current) && !(error_code & PF_USER)) { |
1058 | no_context(regs, error_code, address, 0, 0); | 1056 | no_context(regs, error_code, address, 0, 0); |
@@ -1076,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
1076 | } else { | 1074 | } else { |
1077 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 1075 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
1078 | VM_FAULT_HWPOISON_LARGE)) | 1076 | VM_FAULT_HWPOISON_LARGE)) |
1079 | do_sigbus(regs, error_code, address, vma, fault); | 1077 | do_sigbus(regs, error_code, address, pkey, fault); |
1080 | else if (fault & VM_FAULT_SIGSEGV) | 1078 | else if (fault & VM_FAULT_SIGSEGV) |
1081 | bad_area_nosemaphore(regs, error_code, address, vma); | 1079 | bad_area_nosemaphore(regs, error_code, address, pkey); |
1082 | else | 1080 | else |
1083 | BUG(); | 1081 | BUG(); |
1084 | } | 1082 | } |
@@ -1268,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, | |||
1268 | struct mm_struct *mm; | 1266 | struct mm_struct *mm; |
1269 | int fault, major = 0; | 1267 | int fault, major = 0; |
1270 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 1268 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
1269 | u32 pkey; | ||
1271 | 1270 | ||
1272 | tsk = current; | 1271 | tsk = current; |
1273 | mm = tsk->mm; | 1272 | mm = tsk->mm; |
@@ -1468,9 +1467,10 @@ good_area: | |||
1468 | return; | 1467 | return; |
1469 | } | 1468 | } |
1470 | 1469 | ||
1470 | pkey = vma_pkey(vma); | ||
1471 | up_read(&mm->mmap_sem); | 1471 | up_read(&mm->mmap_sem); |
1472 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1472 | if (unlikely(fault & VM_FAULT_ERROR)) { |
1473 | mm_fault_error(regs, error_code, address, vma, fault); | 1473 | mm_fault_error(regs, error_code, address, &pkey, fault); |
1474 | return; | 1474 | return; |
1475 | } | 1475 | } |
1476 | 1476 | ||
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 3fcc8e01683b..16c5f37933a2 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define DISABLE_BRANCH_PROFILING | ||
14 | |||
13 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
15 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 2dab69a706ec..d7bc0eea20a5 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c | |||
@@ -18,7 +18,6 @@ | |||
18 | 18 | ||
19 | #include <asm/cpufeature.h> /* boot_cpu_has, ... */ | 19 | #include <asm/cpufeature.h> /* boot_cpu_has, ... */ |
20 | #include <asm/mmu_context.h> /* vma_pkey() */ | 20 | #include <asm/mmu_context.h> /* vma_pkey() */ |
21 | #include <asm/fpu/internal.h> /* fpregs_active() */ | ||
22 | 21 | ||
23 | int __execute_only_pkey(struct mm_struct *mm) | 22 | int __execute_only_pkey(struct mm_struct *mm) |
24 | { | 23 | { |
@@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm) | |||
45 | */ | 44 | */ |
46 | preempt_disable(); | 45 | preempt_disable(); |
47 | if (!need_to_set_mm_pkey && | 46 | if (!need_to_set_mm_pkey && |
48 | fpregs_active() && | 47 | current->thread.fpu.initialized && |
49 | !__pkru_allows_read(read_pkru(), execute_only_pkey)) { | 48 | !__pkru_allows_read(read_pkru(), execute_only_pkey)) { |
50 | preempt_enable(); | 49 | preempt_enable(); |
51 | return execute_only_pkey; | 50 | return execute_only_pkey; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1ab3821f9e26..49d9778376d7 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
126 | * isn't free. | 126 | * isn't free. |
127 | */ | 127 | */ |
128 | #ifdef CONFIG_DEBUG_VM | 128 | #ifdef CONFIG_DEBUG_VM |
129 | if (WARN_ON_ONCE(__read_cr3() != | 129 | if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { |
130 | (__sme_pa(real_prev->pgd) | prev_asid))) { | ||
131 | /* | 130 | /* |
132 | * If we were to BUG here, we'd be very likely to kill | 131 | * If we were to BUG here, we'd be very likely to kill |
133 | * the system so hard that we don't see the call trace. | 132 | * the system so hard that we don't see the call trace. |
@@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
172 | */ | 171 | */ |
173 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, | 172 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, |
174 | next_tlb_gen); | 173 | next_tlb_gen); |
175 | write_cr3(__sme_pa(next->pgd) | prev_asid); | 174 | write_cr3(build_cr3(next, prev_asid)); |
176 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | 175 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, |
177 | TLB_FLUSH_ALL); | 176 | TLB_FLUSH_ALL); |
178 | } | 177 | } |
@@ -192,7 +191,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
192 | * mapped in the new pgd, we'll double-fault. Forcibly | 191 | * mapped in the new pgd, we'll double-fault. Forcibly |
193 | * map it. | 192 | * map it. |
194 | */ | 193 | */ |
195 | unsigned int index = pgd_index(current_stack_pointer()); | 194 | unsigned int index = pgd_index(current_stack_pointer); |
196 | pgd_t *pgd = next->pgd + index; | 195 | pgd_t *pgd = next->pgd + index; |
197 | 196 | ||
198 | if (unlikely(pgd_none(*pgd))) | 197 | if (unlikely(pgd_none(*pgd))) |
@@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
216 | if (need_flush) { | 215 | if (need_flush) { |
217 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); | 216 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
218 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); | 217 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
219 | write_cr3(__sme_pa(next->pgd) | new_asid); | 218 | write_cr3(build_cr3(next, new_asid)); |
220 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | 219 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, |
221 | TLB_FLUSH_ALL); | 220 | TLB_FLUSH_ALL); |
222 | } else { | 221 | } else { |
223 | /* The new ASID is already up to date. */ | 222 | /* The new ASID is already up to date. */ |
224 | write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); | 223 | write_cr3(build_cr3_noflush(next, new_asid)); |
225 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); | 224 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); |
226 | } | 225 | } |
227 | 226 | ||
@@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void) | |||
265 | !(cr4_read_shadow() & X86_CR4_PCIDE)); | 264 | !(cr4_read_shadow() & X86_CR4_PCIDE)); |
266 | 265 | ||
267 | /* Force ASID 0 and force a TLB flush. */ | 266 | /* Force ASID 0 and force a TLB flush. */ |
268 | write_cr3(cr3 & ~CR3_PCID_MASK); | 267 | write_cr3(build_cr3(mm, 0)); |
269 | 268 | ||
270 | /* Reinitialize tlbstate. */ | 269 | /* Reinitialize tlbstate. */ |
271 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); | 270 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 8c9573660d51..0554e8aef4d5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
284 | /* if (index >= array->map.max_entries) | 284 | /* if (index >= array->map.max_entries) |
285 | * goto out; | 285 | * goto out; |
286 | */ | 286 | */ |
287 | EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ | 287 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
288 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | ||
288 | offsetof(struct bpf_array, map.max_entries)); | 289 | offsetof(struct bpf_array, map.max_entries)); |
289 | EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ | ||
290 | #define OFFSET1 43 /* number of bytes to jump */ | 290 | #define OFFSET1 43 /* number of bytes to jump */ |
291 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ | 291 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ |
292 | label1 = cnt; | 292 | label1 = cnt; |
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 509f560bd0c6..71495f1a86d7 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -1238,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void) | |||
1238 | * from _brk_limit way up to the max_pfn_mapped (which is the end of | 1238 | * from _brk_limit way up to the max_pfn_mapped (which is the end of |
1239 | * the ramdisk). We continue on, erasing PMD entries that point to page | 1239 | * the ramdisk). We continue on, erasing PMD entries that point to page |
1240 | * tables - do note that they are accessible at this stage via __va. | 1240 | * tables - do note that they are accessible at this stage via __va. |
1241 | * For good measure we also round up to the PMD - which means that if | 1241 | * As Xen is aligning the memory end to a 4MB boundary, for good |
1242 | * measure we also round up to PMD_SIZE * 2 - which means that if | ||
1242 | * anybody is using __ka address to the initial boot-stack - and try | 1243 | * anybody is using __ka address to the initial boot-stack - and try |
1243 | * to use it - they are going to crash. The xen_start_info has been | 1244 | * to use it - they are going to crash. The xen_start_info has been |
1244 | * taken care of already in xen_setup_kernel_pagetable. */ | 1245 | * taken care of already in xen_setup_kernel_pagetable. */ |
1245 | addr = xen_start_info->pt_base; | 1246 | addr = xen_start_info->pt_base; |
1246 | size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); | 1247 | size = xen_start_info->nr_pt_frames * PAGE_SIZE; |
1247 | 1248 | ||
1248 | xen_cleanhighmap(addr, addr + size); | 1249 | xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2)); |
1249 | xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); | 1250 | xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); |
1250 | #ifdef DEBUG | ||
1251 | /* This is superfluous and is not necessary, but you know what | ||
1252 | * lets do it. The MODULES_VADDR -> MODULES_END should be clear of | ||
1253 | * anything at this stage. */ | ||
1254 | xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); | ||
1255 | #endif | ||
1256 | } | 1251 | } |
1257 | #endif | 1252 | #endif |
1258 | 1253 | ||
@@ -2220,7 +2215,7 @@ static void __init xen_write_cr3_init(unsigned long cr3) | |||
2220 | * not the first page table in the page table pool. | 2215 | * not the first page table in the page table pool. |
2221 | * Iterate through the initial page tables to find the real page table base. | 2216 | * Iterate through the initial page tables to find the real page table base. |
2222 | */ | 2217 | */ |
2223 | static phys_addr_t xen_find_pt_base(pmd_t *pmd) | 2218 | static phys_addr_t __init xen_find_pt_base(pmd_t *pmd) |
2224 | { | 2219 | { |
2225 | phys_addr_t pt_base, paddr; | 2220 | phys_addr_t pt_base, paddr; |
2226 | unsigned pmdidx; | 2221 | unsigned pmdidx; |
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 30ee8c608853..5b0027d4ecc0 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
@@ -208,11 +208,6 @@ struct mm_struct; | |||
208 | /* Free all resources held by a thread. */ | 208 | /* Free all resources held by a thread. */ |
209 | #define release_thread(thread) do { } while(0) | 209 | #define release_thread(thread) do { } while(0) |
210 | 210 | ||
211 | /* Copy and release all segment info associated with a VM */ | ||
212 | #define copy_segments(p, mm) do { } while(0) | ||
213 | #define release_segments(mm) do { } while(0) | ||
214 | #define forget_segments() do { } while (0) | ||
215 | |||
216 | extern unsigned long get_wchan(struct task_struct *p); | 211 | extern unsigned long get_wchan(struct task_struct *p); |
217 | 212 | ||
218 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) | 213 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) |
diff --git a/block/blk-core.c b/block/blk-core.c index aebe676225e6..048be4aa6024 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
854 | 854 | ||
855 | kobject_init(&q->kobj, &blk_queue_ktype); | 855 | kobject_init(&q->kobj, &blk_queue_ktype); |
856 | 856 | ||
857 | #ifdef CONFIG_BLK_DEV_IO_TRACE | ||
858 | mutex_init(&q->blk_trace_mutex); | ||
859 | #endif | ||
857 | mutex_init(&q->sysfs_lock); | 860 | mutex_init(&q->sysfs_lock); |
858 | spin_lock_init(&q->__queue_lock); | 861 | spin_lock_init(&q->__queue_lock); |
859 | 862 | ||
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 980e73095643..de294d775acf 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q) | |||
815 | goto err; | 815 | goto err; |
816 | 816 | ||
817 | /* | 817 | /* |
818 | * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir | 818 | * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir |
819 | * didn't exist yet (because we don't know what to name the directory | 819 | * didn't exist yet (because we don't know what to name the directory |
820 | * until the queue is registered to a gendisk). | 820 | * until the queue is registered to a gendisk). |
821 | */ | 821 | */ |
822 | if (q->elevator && !q->sched_debugfs_dir) | ||
823 | blk_mq_debugfs_register_sched(q); | ||
824 | |||
825 | /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ | ||
822 | queue_for_each_hw_ctx(q, hctx, i) { | 826 | queue_for_each_hw_ctx(q, hctx, i) { |
823 | if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) | 827 | if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) |
824 | goto err; | 828 | goto err; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 0fea76aa0f3f..17816a028dcb 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td) | |||
1911 | 1911 | ||
1912 | tg->disptime = jiffies - 1; | 1912 | tg->disptime = jiffies - 1; |
1913 | throtl_select_dispatch(sq); | 1913 | throtl_select_dispatch(sq); |
1914 | throtl_schedule_next_dispatch(sq, false); | 1914 | throtl_schedule_next_dispatch(sq, true); |
1915 | } | 1915 | } |
1916 | rcu_read_unlock(); | 1916 | rcu_read_unlock(); |
1917 | throtl_select_dispatch(&td->service_queue); | 1917 | throtl_select_dispatch(&td->service_queue); |
1918 | throtl_schedule_next_dispatch(&td->service_queue, false); | 1918 | throtl_schedule_next_dispatch(&td->service_queue, true); |
1919 | queue_work(kthrotld_workqueue, &td->dispatch_work); | 1919 | queue_work(kthrotld_workqueue, &td->dispatch_work); |
1920 | } | 1920 | } |
1921 | 1921 | ||
diff --git a/block/bsg-lib.c b/block/bsg-lib.c index c82408c7cc3c..15d25ccd51a5 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c | |||
@@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req) | |||
154 | failjob_rls_rqst_payload: | 154 | failjob_rls_rqst_payload: |
155 | kfree(job->request_payload.sg_list); | 155 | kfree(job->request_payload.sg_list); |
156 | failjob_rls_job: | 156 | failjob_rls_job: |
157 | kfree(job); | ||
158 | return -ENOMEM; | 157 | return -ENOMEM; |
159 | } | 158 | } |
160 | 159 | ||
@@ -208,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) | |||
208 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | 207 | struct bsg_job *job = blk_mq_rq_to_pdu(req); |
209 | struct scsi_request *sreq = &job->sreq; | 208 | struct scsi_request *sreq = &job->sreq; |
210 | 209 | ||
210 | /* called right after the request is allocated for the request_queue */ | ||
211 | |||
212 | sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); | ||
213 | if (!sreq->sense) | ||
214 | return -ENOMEM; | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static void bsg_initialize_rq(struct request *req) | ||
220 | { | ||
221 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | ||
222 | struct scsi_request *sreq = &job->sreq; | ||
223 | void *sense = sreq->sense; | ||
224 | |||
225 | /* called right before the request is given to the request_queue user */ | ||
226 | |||
211 | memset(job, 0, sizeof(*job)); | 227 | memset(job, 0, sizeof(*job)); |
212 | 228 | ||
213 | scsi_req_init(sreq); | 229 | scsi_req_init(sreq); |
230 | |||
231 | sreq->sense = sense; | ||
214 | sreq->sense_len = SCSI_SENSE_BUFFERSIZE; | 232 | sreq->sense_len = SCSI_SENSE_BUFFERSIZE; |
215 | sreq->sense = kzalloc(sreq->sense_len, gfp); | ||
216 | if (!sreq->sense) | ||
217 | return -ENOMEM; | ||
218 | 233 | ||
219 | job->req = req; | 234 | job->req = req; |
220 | job->reply = sreq->sense; | 235 | job->reply = sense; |
221 | job->reply_len = sreq->sense_len; | 236 | job->reply_len = sreq->sense_len; |
222 | job->dd_data = job + 1; | 237 | job->dd_data = job + 1; |
223 | |||
224 | return 0; | ||
225 | } | 238 | } |
226 | 239 | ||
227 | static void bsg_exit_rq(struct request_queue *q, struct request *req) | 240 | static void bsg_exit_rq(struct request_queue *q, struct request *req) |
@@ -252,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, | |||
252 | q->cmd_size = sizeof(struct bsg_job) + dd_job_size; | 265 | q->cmd_size = sizeof(struct bsg_job) + dd_job_size; |
253 | q->init_rq_fn = bsg_init_rq; | 266 | q->init_rq_fn = bsg_init_rq; |
254 | q->exit_rq_fn = bsg_exit_rq; | 267 | q->exit_rq_fn = bsg_exit_rq; |
268 | q->initialize_rq_fn = bsg_initialize_rq; | ||
255 | q->request_fn = bsg_request_fn; | 269 | q->request_fn = bsg_request_fn; |
256 | 270 | ||
257 | ret = blk_init_allocated_queue(q); | 271 | ret = blk_init_allocated_queue(q); |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 86e8fe1adcdb..88c555db4e5d 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev, | |||
112 | struct device_attribute *attr, char *buf) | 112 | struct device_attribute *attr, char *buf) |
113 | { | 113 | { |
114 | struct hd_struct *p = dev_to_part(dev); | 114 | struct hd_struct *p = dev_to_part(dev); |
115 | struct request_queue *q = dev_to_disk(dev)->queue; | 115 | struct request_queue *q = part_to_disk(p)->queue; |
116 | unsigned int inflight[2]; | 116 | unsigned int inflight[2]; |
117 | int cpu; | 117 | int cpu; |
118 | 118 | ||
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index ffa9f4ccd9b4..337cf382718e 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -619,14 +619,14 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, | |||
619 | struct af_alg_ctx *ctx = ask->private; | 619 | struct af_alg_ctx *ctx = ask->private; |
620 | struct af_alg_tsgl *sgl; | 620 | struct af_alg_tsgl *sgl; |
621 | struct scatterlist *sg; | 621 | struct scatterlist *sg; |
622 | unsigned int i, j; | 622 | unsigned int i, j = 0; |
623 | 623 | ||
624 | while (!list_empty(&ctx->tsgl_list)) { | 624 | while (!list_empty(&ctx->tsgl_list)) { |
625 | sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, | 625 | sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, |
626 | list); | 626 | list); |
627 | sg = sgl->sg; | 627 | sg = sgl->sg; |
628 | 628 | ||
629 | for (i = 0, j = 0; i < sgl->cur; i++) { | 629 | for (i = 0; i < sgl->cur; i++) { |
630 | size_t plen = min_t(size_t, used, sg[i].length); | 630 | size_t plen = min_t(size_t, used, sg[i].length); |
631 | struct page *page = sg_page(sg + i); | 631 | struct page *page = sg_page(sg + i); |
632 | 632 | ||
diff --git a/crypto/drbg.c b/crypto/drbg.c index 633a88e93ab0..70018397e59a 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) | |||
1133 | { | 1133 | { |
1134 | if (!drbg) | 1134 | if (!drbg) |
1135 | return; | 1135 | return; |
1136 | kzfree(drbg->V); | 1136 | kzfree(drbg->Vbuf); |
1137 | drbg->Vbuf = NULL; | 1137 | drbg->V = NULL; |
1138 | kzfree(drbg->C); | 1138 | kzfree(drbg->Cbuf); |
1139 | drbg->Cbuf = NULL; | 1139 | drbg->C = NULL; |
1140 | kzfree(drbg->scratchpadbuf); | 1140 | kzfree(drbg->scratchpadbuf); |
1141 | drbg->scratchpadbuf = NULL; | 1141 | drbg->scratchpadbuf = NULL; |
1142 | drbg->reseed_ctr = 0; | 1142 | drbg->reseed_ctr = 0; |
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index bf22c29d2517..11b113f8e367 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c | |||
@@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void) | |||
66 | for (i = 0; i < wdat->entries; i++) { | 66 | for (i = 0; i < wdat->entries; i++) { |
67 | const struct acpi_generic_address *gas; | 67 | const struct acpi_generic_address *gas; |
68 | struct resource_entry *rentry; | 68 | struct resource_entry *rentry; |
69 | struct resource res; | 69 | struct resource res = {}; |
70 | bool found; | 70 | bool found; |
71 | 71 | ||
72 | gas = &entries[i].register_region; | 72 | gas = &entries[i].register_region; |
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 077f9bad6f44..3c3a37b8503b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes) | |||
743 | } | 743 | } |
744 | ghes_do_proc(ghes, ghes->estatus); | 744 | ghes_do_proc(ghes, ghes->estatus); |
745 | 745 | ||
746 | out: | ||
747 | ghes_clear_estatus(ghes); | ||
748 | |||
749 | if (rc == -ENOENT) | ||
750 | return rc; | ||
751 | |||
746 | /* | 752 | /* |
747 | * GHESv2 type HEST entries introduce support for error acknowledgment, | 753 | * GHESv2 type HEST entries introduce support for error acknowledgment, |
748 | * so only acknowledge the error if this support is present. | 754 | * so only acknowledge the error if this support is present. |
749 | */ | 755 | */ |
750 | if (is_hest_type_generic_v2(ghes)) { | 756 | if (is_hest_type_generic_v2(ghes)) |
751 | rc = ghes_ack_error(ghes->generic_v2); | 757 | return ghes_ack_error(ghes->generic_v2); |
752 | if (rc) | 758 | |
753 | return rc; | ||
754 | } | ||
755 | out: | ||
756 | ghes_clear_estatus(ghes); | ||
757 | return rc; | 759 | return rc; |
758 | } | 760 | } |
759 | 761 | ||
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 9565d572f8dd..de56394dd161 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -1178,12 +1178,44 @@ dev_put: | |||
1178 | return ret; | 1178 | return ret; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) | ||
1182 | { | ||
1183 | if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { | ||
1184 | struct acpi_iort_node *parent; | ||
1185 | struct acpi_iort_id_mapping *map; | ||
1186 | int i; | ||
1187 | |||
1188 | map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, | ||
1189 | iort_node->mapping_offset); | ||
1190 | |||
1191 | for (i = 0; i < iort_node->mapping_count; i++, map++) { | ||
1192 | if (!map->output_reference) | ||
1193 | continue; | ||
1194 | |||
1195 | parent = ACPI_ADD_PTR(struct acpi_iort_node, | ||
1196 | iort_table, map->output_reference); | ||
1197 | /* | ||
1198 | * If we detect a RC->SMMU mapping, make sure | ||
1199 | * we enable ACS on the system. | ||
1200 | */ | ||
1201 | if ((parent->type == ACPI_IORT_NODE_SMMU) || | ||
1202 | (parent->type == ACPI_IORT_NODE_SMMU_V3)) { | ||
1203 | pci_request_acs(); | ||
1204 | return true; | ||
1205 | } | ||
1206 | } | ||
1207 | } | ||
1208 | |||
1209 | return false; | ||
1210 | } | ||
1211 | |||
1181 | static void __init iort_init_platform_devices(void) | 1212 | static void __init iort_init_platform_devices(void) |
1182 | { | 1213 | { |
1183 | struct acpi_iort_node *iort_node, *iort_end; | 1214 | struct acpi_iort_node *iort_node, *iort_end; |
1184 | struct acpi_table_iort *iort; | 1215 | struct acpi_table_iort *iort; |
1185 | struct fwnode_handle *fwnode; | 1216 | struct fwnode_handle *fwnode; |
1186 | int i, ret; | 1217 | int i, ret; |
1218 | bool acs_enabled = false; | ||
1187 | 1219 | ||
1188 | /* | 1220 | /* |
1189 | * iort_table and iort both point to the start of IORT table, but | 1221 | * iort_table and iort both point to the start of IORT table, but |
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void) | |||
1203 | return; | 1235 | return; |
1204 | } | 1236 | } |
1205 | 1237 | ||
1238 | if (!acs_enabled) | ||
1239 | acs_enabled = iort_enable_acs(iort_node); | ||
1240 | |||
1206 | if ((iort_node->type == ACPI_IORT_NODE_SMMU) || | 1241 | if ((iort_node->type == ACPI_IORT_NODE_SMMU) || |
1207 | (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { | 1242 | (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { |
1208 | 1243 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index c1c216163de3..3fb8ff513461 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
@@ -908,11 +908,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, | |||
908 | struct fwnode_handle *child) | 908 | struct fwnode_handle *child) |
909 | { | 909 | { |
910 | const struct acpi_device *adev = to_acpi_device_node(fwnode); | 910 | const struct acpi_device *adev = to_acpi_device_node(fwnode); |
911 | struct acpi_device *child_adev = NULL; | ||
912 | const struct list_head *head; | 911 | const struct list_head *head; |
913 | struct list_head *next; | 912 | struct list_head *next; |
914 | 913 | ||
915 | if (!child || is_acpi_device_node(child)) { | 914 | if (!child || is_acpi_device_node(child)) { |
915 | struct acpi_device *child_adev; | ||
916 | |||
916 | if (adev) | 917 | if (adev) |
917 | head = &adev->children; | 918 | head = &adev->children; |
918 | else | 919 | else |
@@ -922,8 +923,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, | |||
922 | goto nondev; | 923 | goto nondev; |
923 | 924 | ||
924 | if (child) { | 925 | if (child) { |
925 | child_adev = to_acpi_device_node(child); | 926 | adev = to_acpi_device_node(child); |
926 | next = child_adev->node.next; | 927 | next = adev->node.next; |
927 | if (next == head) { | 928 | if (next == head) { |
928 | child = NULL; | 929 | child = NULL; |
929 | goto nondev; | 930 | goto nondev; |
@@ -941,8 +942,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, | |||
941 | const struct acpi_data_node *data = to_acpi_data_node(fwnode); | 942 | const struct acpi_data_node *data = to_acpi_data_node(fwnode); |
942 | struct acpi_data_node *dn; | 943 | struct acpi_data_node *dn; |
943 | 944 | ||
944 | if (child_adev) | 945 | if (adev) |
945 | head = &child_adev->data.subnodes; | 946 | head = &adev->data.subnodes; |
946 | else if (data) | 947 | else if (data) |
947 | head = &data->data.subnodes; | 948 | head = &data->data.subnodes; |
948 | else | 949 | else |
@@ -1293,3 +1294,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, | |||
1293 | DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); | 1294 | DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); |
1294 | DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); | 1295 | DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); |
1295 | const struct fwnode_operations acpi_static_fwnode_ops; | 1296 | const struct fwnode_operations acpi_static_fwnode_ops; |
1297 | |||
1298 | bool is_acpi_device_node(const struct fwnode_handle *fwnode) | ||
1299 | { | ||
1300 | return !IS_ERR_OR_NULL(fwnode) && | ||
1301 | fwnode->ops == &acpi_device_fwnode_ops; | ||
1302 | } | ||
1303 | EXPORT_SYMBOL(is_acpi_device_node); | ||
1304 | |||
1305 | bool is_acpi_data_node(const struct fwnode_handle *fwnode) | ||
1306 | { | ||
1307 | return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops; | ||
1308 | } | ||
1309 | EXPORT_SYMBOL(is_acpi_data_node); | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d055b3f2a207..ab34239a76ee 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2217 | debug_id, (u64)fda->num_fds); | 2217 | debug_id, (u64)fda->num_fds); |
2218 | continue; | 2218 | continue; |
2219 | } | 2219 | } |
2220 | fd_array = (u32 *)(parent_buffer + fda->parent_offset); | 2220 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); |
2221 | for (fd_index = 0; fd_index < fda->num_fds; fd_index++) | 2221 | for (fd_index = 0; fd_index < fda->num_fds; fd_index++) |
2222 | task_close_fd(proc, fd_array[fd_index]); | 2222 | task_close_fd(proc, fd_array[fd_index]); |
2223 | } break; | 2223 | } break; |
@@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp, | |||
2326 | (u64)node->ptr); | 2326 | (u64)node->ptr); |
2327 | binder_node_unlock(node); | 2327 | binder_node_unlock(node); |
2328 | } else { | 2328 | } else { |
2329 | int ret; | ||
2330 | struct binder_ref_data dest_rdata; | 2329 | struct binder_ref_data dest_rdata; |
2331 | 2330 | ||
2332 | binder_node_unlock(node); | 2331 | binder_node_unlock(node); |
@@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, | |||
2442 | */ | 2441 | */ |
2443 | parent_buffer = parent->buffer - | 2442 | parent_buffer = parent->buffer - |
2444 | binder_alloc_get_user_buffer_offset(&target_proc->alloc); | 2443 | binder_alloc_get_user_buffer_offset(&target_proc->alloc); |
2445 | fd_array = (u32 *)(parent_buffer + fda->parent_offset); | 2444 | fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); |
2446 | if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { | 2445 | if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { |
2447 | binder_user_error("%d:%d parent offset not aligned correctly.\n", | 2446 | binder_user_error("%d:%d parent offset not aligned correctly.\n", |
2448 | proc->pid, thread->pid); | 2447 | proc->pid, thread->pid); |
@@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t, | |||
2508 | proc->pid, thread->pid); | 2507 | proc->pid, thread->pid); |
2509 | return -EINVAL; | 2508 | return -EINVAL; |
2510 | } | 2509 | } |
2511 | parent_buffer = (u8 *)(parent->buffer - | 2510 | parent_buffer = (u8 *)((uintptr_t)parent->buffer - |
2512 | binder_alloc_get_user_buffer_offset( | 2511 | binder_alloc_get_user_buffer_offset( |
2513 | &target_proc->alloc)); | 2512 | &target_proc->alloc)); |
2514 | *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; | 2513 | *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; |
@@ -3083,6 +3082,7 @@ static void binder_transaction(struct binder_proc *proc, | |||
3083 | err_dead_proc_or_thread: | 3082 | err_dead_proc_or_thread: |
3084 | return_error = BR_DEAD_REPLY; | 3083 | return_error = BR_DEAD_REPLY; |
3085 | return_error_line = __LINE__; | 3084 | return_error_line = __LINE__; |
3085 | binder_dequeue_work(proc, tcomplete); | ||
3086 | err_translate_failed: | 3086 | err_translate_failed: |
3087 | err_bad_object_type: | 3087 | err_bad_object_type: |
3088 | err_bad_offset: | 3088 | err_bad_offset: |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 8fe165844e47..064f5e31ec55 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
913 | struct binder_alloc *alloc; | 913 | struct binder_alloc *alloc; |
914 | uintptr_t page_addr; | 914 | uintptr_t page_addr; |
915 | size_t index; | 915 | size_t index; |
916 | struct vm_area_struct *vma; | ||
916 | 917 | ||
917 | alloc = page->alloc; | 918 | alloc = page->alloc; |
918 | if (!mutex_trylock(&alloc->mutex)) | 919 | if (!mutex_trylock(&alloc->mutex)) |
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
923 | 924 | ||
924 | index = page - alloc->pages; | 925 | index = page - alloc->pages; |
925 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 926 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
926 | if (alloc->vma) { | 927 | vma = alloc->vma; |
928 | if (vma) { | ||
927 | mm = get_task_mm(alloc->tsk); | 929 | mm = get_task_mm(alloc->tsk); |
928 | if (!mm) | 930 | if (!mm) |
929 | goto err_get_task_mm_failed; | 931 | goto err_get_task_mm_failed; |
930 | if (!down_write_trylock(&mm->mmap_sem)) | 932 | if (!down_write_trylock(&mm->mmap_sem)) |
931 | goto err_down_write_mmap_sem_failed; | 933 | goto err_down_write_mmap_sem_failed; |
934 | } | ||
935 | |||
936 | list_lru_isolate(lru, item); | ||
937 | spin_unlock(lock); | ||
932 | 938 | ||
939 | if (vma) { | ||
933 | trace_binder_unmap_user_start(alloc, index); | 940 | trace_binder_unmap_user_start(alloc, index); |
934 | 941 | ||
935 | zap_page_range(alloc->vma, | 942 | zap_page_range(vma, |
936 | page_addr + alloc->user_buffer_offset, | 943 | page_addr + alloc->user_buffer_offset, |
937 | PAGE_SIZE); | 944 | PAGE_SIZE); |
938 | 945 | ||
@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
950 | 957 | ||
951 | trace_binder_unmap_kernel_end(alloc, index); | 958 | trace_binder_unmap_kernel_end(alloc, index); |
952 | 959 | ||
953 | list_lru_isolate(lru, item); | 960 | spin_lock(lock); |
954 | |||
955 | mutex_unlock(&alloc->mutex); | 961 | mutex_unlock(&alloc->mutex); |
956 | return LRU_REMOVED; | 962 | return LRU_REMOVED_RETRY; |
957 | 963 | ||
958 | err_down_write_mmap_sem_failed: | 964 | err_down_write_mmap_sem_failed: |
959 | mmput(mm); | 965 | mmput_async(mm); |
960 | err_get_task_mm_failed: | 966 | err_get_task_mm_failed: |
961 | err_page_already_freed: | 967 | err_page_already_freed: |
962 | mutex_unlock(&alloc->mutex); | 968 | mutex_unlock(&alloc->mutex); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index cb9b0e9090e3..9f78bb03bb76 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, | |||
621 | static int ahci_pci_reset_controller(struct ata_host *host) | 621 | static int ahci_pci_reset_controller(struct ata_host *host) |
622 | { | 622 | { |
623 | struct pci_dev *pdev = to_pci_dev(host->dev); | 623 | struct pci_dev *pdev = to_pci_dev(host->dev); |
624 | int rc; | ||
624 | 625 | ||
625 | ahci_reset_controller(host); | 626 | rc = ahci_reset_controller(host); |
627 | if (rc) | ||
628 | return rc; | ||
626 | 629 | ||
627 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 630 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
628 | struct ahci_host_priv *hpriv = host->private_data; | 631 | struct ahci_host_priv *hpriv = host->private_data; |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 8401c3b5be92..b702c20fbc2b 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = { | |||
492 | { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ | 492 | { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ |
493 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ | 493 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ |
494 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ | 494 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ |
495 | { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */ | ||
495 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ | 496 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ |
496 | { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ | 497 | { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ |
497 | { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ | 498 | { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1945a8ea2099..ee4c1ec9dca0 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = { | |||
3234 | }; | 3234 | }; |
3235 | 3235 | ||
3236 | #define ENOUGH(v, unit) (((v)-1)/(unit)+1) | 3236 | #define ENOUGH(v, unit) (((v)-1)/(unit)+1) |
3237 | #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) | 3237 | #define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0) |
3238 | 3238 | ||
3239 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) | 3239 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) |
3240 | { | 3240 | { |
3241 | q->setup = EZ(t->setup * 1000, T); | 3241 | q->setup = EZ(t->setup, T); |
3242 | q->act8b = EZ(t->act8b * 1000, T); | 3242 | q->act8b = EZ(t->act8b, T); |
3243 | q->rec8b = EZ(t->rec8b * 1000, T); | 3243 | q->rec8b = EZ(t->rec8b, T); |
3244 | q->cyc8b = EZ(t->cyc8b * 1000, T); | 3244 | q->cyc8b = EZ(t->cyc8b, T); |
3245 | q->active = EZ(t->active * 1000, T); | 3245 | q->active = EZ(t->active, T); |
3246 | q->recover = EZ(t->recover * 1000, T); | 3246 | q->recover = EZ(t->recover, T); |
3247 | q->dmack_hold = EZ(t->dmack_hold * 1000, T); | 3247 | q->dmack_hold = EZ(t->dmack_hold, T); |
3248 | q->cycle = EZ(t->cycle * 1000, T); | 3248 | q->cycle = EZ(t->cycle, T); |
3249 | q->udma = EZ(t->udma * 1000, UT); | 3249 | q->udma = EZ(t->udma, UT); |
3250 | } | 3250 | } |
3251 | 3251 | ||
3252 | void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, | 3252 | void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, |
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index cfeb049a01ef..642afd88870b 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c | |||
@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf, | |||
647 | static int charlcd_open(struct inode *inode, struct file *file) | 647 | static int charlcd_open(struct inode *inode, struct file *file) |
648 | { | 648 | { |
649 | struct charlcd_priv *priv = to_priv(the_charlcd); | 649 | struct charlcd_priv *priv = to_priv(the_charlcd); |
650 | int ret; | ||
650 | 651 | ||
652 | ret = -EBUSY; | ||
651 | if (!atomic_dec_and_test(&charlcd_available)) | 653 | if (!atomic_dec_and_test(&charlcd_available)) |
652 | return -EBUSY; /* open only once at a time */ | 654 | goto fail; /* open only once at a time */ |
653 | 655 | ||
656 | ret = -EPERM; | ||
654 | if (file->f_mode & FMODE_READ) /* device is write-only */ | 657 | if (file->f_mode & FMODE_READ) /* device is write-only */ |
655 | return -EPERM; | 658 | goto fail; |
656 | 659 | ||
657 | if (priv->must_clear) { | 660 | if (priv->must_clear) { |
658 | charlcd_clear_display(&priv->lcd); | 661 | charlcd_clear_display(&priv->lcd); |
659 | priv->must_clear = false; | 662 | priv->must_clear = false; |
660 | } | 663 | } |
661 | return nonseekable_open(inode, file); | 664 | return nonseekable_open(inode, file); |
665 | |||
666 | fail: | ||
667 | atomic_inc(&charlcd_available); | ||
668 | return ret; | ||
662 | } | 669 | } |
663 | 670 | ||
664 | static int charlcd_release(struct inode *inode, struct file *file) | 671 | static int charlcd_release(struct inode *inode, struct file *file) |
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index df126dcdaf18..6911acd896d9 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c | |||
@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file, | |||
1105 | 1105 | ||
1106 | static int keypad_open(struct inode *inode, struct file *file) | 1106 | static int keypad_open(struct inode *inode, struct file *file) |
1107 | { | 1107 | { |
1108 | int ret; | ||
1109 | |||
1110 | ret = -EBUSY; | ||
1108 | if (!atomic_dec_and_test(&keypad_available)) | 1111 | if (!atomic_dec_and_test(&keypad_available)) |
1109 | return -EBUSY; /* open only once at a time */ | 1112 | goto fail; /* open only once at a time */ |
1110 | 1113 | ||
1114 | ret = -EPERM; | ||
1111 | if (file->f_mode & FMODE_WRITE) /* device is read-only */ | 1115 | if (file->f_mode & FMODE_WRITE) /* device is read-only */ |
1112 | return -EPERM; | 1116 | goto fail; |
1113 | 1117 | ||
1114 | keypad_buflen = 0; /* flush the buffer on opening */ | 1118 | keypad_buflen = 0; /* flush the buffer on opening */ |
1115 | return 0; | 1119 | return 0; |
1120 | fail: | ||
1121 | atomic_inc(&keypad_available); | ||
1122 | return ret; | ||
1116 | } | 1123 | } |
1117 | 1124 | ||
1118 | static int keypad_release(struct inode *inode, struct file *file) | 1125 | static int keypad_release(struct inode *inode, struct file *file) |
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 41be9ff7d70a..6df7d6676a48 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c | |||
@@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | #ifdef CONFIG_CPU_FREQ | 168 | #ifdef CONFIG_CPU_FREQ |
169 | static cpumask_var_t cpus_to_visit; | 169 | static cpumask_var_t cpus_to_visit __initdata; |
170 | static void parsing_done_workfn(struct work_struct *work); | 170 | static void __init parsing_done_workfn(struct work_struct *work); |
171 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | 171 | static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); |
172 | 172 | ||
173 | static int | 173 | static int __init |
174 | init_cpu_capacity_callback(struct notifier_block *nb, | 174 | init_cpu_capacity_callback(struct notifier_block *nb, |
175 | unsigned long val, | 175 | unsigned long val, |
176 | void *data) | 176 | void *data) |
@@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, | |||
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | 208 | ||
209 | static struct notifier_block init_cpu_capacity_notifier = { | 209 | static struct notifier_block init_cpu_capacity_notifier __initdata = { |
210 | .notifier_call = init_cpu_capacity_callback, | 210 | .notifier_call = init_cpu_capacity_callback, |
211 | }; | 211 | }; |
212 | 212 | ||
@@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void) | |||
232 | } | 232 | } |
233 | core_initcall(register_cpufreq_notifier); | 233 | core_initcall(register_cpufreq_notifier); |
234 | 234 | ||
235 | static void parsing_done_workfn(struct work_struct *work) | 235 | static void __init parsing_done_workfn(struct work_struct *work) |
236 | { | 236 | { |
237 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | 237 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, |
238 | CPUFREQ_POLICY_NOTIFIER); | 238 | CPUFREQ_POLICY_NOTIFIER); |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index a39b2166b145..744f64f43454 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | |||
348 | struct dma_coherent_mem *mem = rmem->priv; | 348 | struct dma_coherent_mem *mem = rmem->priv; |
349 | int ret; | 349 | int ret; |
350 | 350 | ||
351 | if (!mem) | 351 | if (!mem) { |
352 | return -ENODEV; | 352 | ret = dma_init_coherent_memory(rmem->base, rmem->base, |
353 | 353 | rmem->size, | |
354 | ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, | 354 | DMA_MEMORY_EXCLUSIVE, &mem); |
355 | DMA_MEMORY_EXCLUSIVE, &mem); | 355 | if (ret) { |
356 | 356 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", | |
357 | if (ret) { | 357 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
358 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", | 358 | return ret; |
359 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | 359 | } |
360 | return ret; | ||
361 | } | 360 | } |
362 | mem->use_dev_dma_pfn_offset = true; | 361 | mem->use_dev_dma_pfn_offset = true; |
363 | rmem->priv = mem; | 362 | rmem->priv = mem; |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index d1bd99271066..9045c5f3734e 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
868 | struct platform_device *pdev = to_platform_device(dev); | 868 | struct platform_device *pdev = to_platform_device(dev); |
869 | char *driver_override, *old, *cp; | 869 | char *driver_override, *old, *cp; |
870 | 870 | ||
871 | if (count > PATH_MAX) | 871 | /* We need to keep extra room for a newline */ |
872 | if (count >= (PAGE_SIZE - 1)) | ||
872 | return -EINVAL; | 873 | return -EINVAL; |
873 | 874 | ||
874 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 875 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ea1732ed7a9d..770b1539a083 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev) | |||
1860 | { | 1860 | { |
1861 | spin_lock_irq(&dev->power.lock); | 1861 | spin_lock_irq(&dev->power.lock); |
1862 | dev->power.no_pm_callbacks = | 1862 | dev->power.no_pm_callbacks = |
1863 | (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && | 1863 | (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && |
1864 | (!dev->class || pm_ops_is_empty(dev->class->pm)) && | 1864 | !dev->bus->suspend && !dev->bus->resume)) && |
1865 | (!dev->class || (pm_ops_is_empty(dev->class->pm) && | ||
1866 | !dev->class->suspend && !dev->class->resume)) && | ||
1865 | (!dev->type || pm_ops_is_empty(dev->type->pm)) && | 1867 | (!dev->type || pm_ops_is_empty(dev->type->pm)) && |
1866 | (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && | 1868 | (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && |
1867 | (!dev->driver || pm_ops_is_empty(dev->driver->pm)); | 1869 | (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && |
1870 | !dev->driver->suspend && !dev->driver->resume)); | ||
1868 | spin_unlock_irq(&dev->power.lock); | 1871 | spin_unlock_irq(&dev->power.lock); |
1869 | } | 1872 | } |
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index a8cc14fd8ae4..a6de32530693 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c | |||
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, | |||
1581 | 1581 | ||
1582 | opp->available = availability_req; | 1582 | opp->available = availability_req; |
1583 | 1583 | ||
1584 | dev_pm_opp_get(opp); | ||
1585 | mutex_unlock(&opp_table->lock); | ||
1586 | |||
1584 | /* Notify the change of the OPP availability */ | 1587 | /* Notify the change of the OPP availability */ |
1585 | if (availability_req) | 1588 | if (availability_req) |
1586 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, | 1589 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, |
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, | |||
1589 | blocking_notifier_call_chain(&opp_table->head, | 1592 | blocking_notifier_call_chain(&opp_table->head, |
1590 | OPP_EVENT_DISABLE, opp); | 1593 | OPP_EVENT_DISABLE, opp); |
1591 | 1594 | ||
1595 | dev_pm_opp_put(opp); | ||
1596 | goto put_table; | ||
1597 | |||
1592 | unlock: | 1598 | unlock: |
1593 | mutex_unlock(&opp_table->lock); | 1599 | mutex_unlock(&opp_table->lock); |
1600 | put_table: | ||
1594 | dev_pm_opp_put_opp_table(opp_table); | 1601 | dev_pm_opp_put_opp_table(opp_table); |
1595 | return r; | 1602 | return r; |
1596 | } | 1603 | } |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index f850daeffba4..277d43a83f53 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
277 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | 277 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
278 | } | 278 | } |
279 | 279 | ||
280 | static bool dev_pm_qos_invalid_request(struct device *dev, | 280 | static bool dev_pm_qos_invalid_req_type(struct device *dev, |
281 | struct dev_pm_qos_request *req) | 281 | enum dev_pm_qos_req_type type) |
282 | { | 282 | { |
283 | return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE | 283 | return type == DEV_PM_QOS_LATENCY_TOLERANCE && |
284 | && !dev->power.set_latency_tolerance); | 284 | !dev->power.set_latency_tolerance; |
285 | } | 285 | } |
286 | 286 | ||
287 | static int __dev_pm_qos_add_request(struct device *dev, | 287 | static int __dev_pm_qos_add_request(struct device *dev, |
@@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev, | |||
290 | { | 290 | { |
291 | int ret = 0; | 291 | int ret = 0; |
292 | 292 | ||
293 | if (!dev || dev_pm_qos_invalid_request(dev, req)) | 293 | if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) |
294 | return -EINVAL; | 294 | return -EINVAL; |
295 | 295 | ||
296 | if (WARN(dev_pm_qos_request_active(req), | 296 | if (WARN(dev_pm_qos_request_active(req), |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 4a438b8abe27..2dfe99b328f8 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -17,7 +17,7 @@ if BLK_DEV | |||
17 | 17 | ||
18 | config BLK_DEV_NULL_BLK | 18 | config BLK_DEV_NULL_BLK |
19 | tristate "Null test block driver" | 19 | tristate "Null test block driver" |
20 | depends on CONFIGFS_FS | 20 | select CONFIGFS_FS |
21 | 21 | ||
22 | config BLK_DEV_FD | 22 | config BLK_DEV_FD |
23 | tristate "Normal floppy disk support" | 23 | tristate "Normal floppy disk support" |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bbd0d186cfc0..2d7178f7754e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff, | |||
342 | 342 | ||
343 | if (!brd) | 343 | if (!brd) |
344 | return -ENODEV; | 344 | return -ENODEV; |
345 | page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); | 345 | page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT); |
346 | if (!page) | 346 | if (!page) |
347 | return -ENOSPC; | 347 | return -ENOSPC; |
348 | *kaddr = page_address(page); | 348 | *kaddr = page_address(page); |
diff --git a/drivers/block/loop.h b/drivers/block/loop.h index f68c1d50802f..1f3956702993 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h | |||
@@ -67,10 +67,8 @@ struct loop_device { | |||
67 | struct loop_cmd { | 67 | struct loop_cmd { |
68 | struct kthread_work work; | 68 | struct kthread_work work; |
69 | struct request *rq; | 69 | struct request *rq; |
70 | union { | 70 | bool use_aio; /* use AIO interface to handle I/O */ |
71 | bool use_aio; /* use AIO interface to handle I/O */ | 71 | atomic_t ref; /* only for aio */ |
72 | atomic_t ref; /* only for aio */ | ||
73 | }; | ||
74 | long ret; | 72 | long ret; |
75 | struct kiocb iocb; | 73 | struct kiocb iocb; |
76 | struct bio_vec *bvec; | 74 | struct bio_vec *bvec; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 2aa87cbdede0..883dfebd3014 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
820 | * appropriate. | 820 | * appropriate. |
821 | */ | 821 | */ |
822 | ret = nbd_handle_cmd(cmd, hctx->queue_num); | 822 | ret = nbd_handle_cmd(cmd, hctx->queue_num); |
823 | if (ret < 0) | ||
824 | ret = BLK_STS_IOERR; | ||
825 | else if (!ret) | ||
826 | ret = BLK_STS_OK; | ||
823 | complete(&cmd->send_complete); | 827 | complete(&cmd->send_complete); |
824 | 828 | ||
825 | return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; | 829 | return ret; |
826 | } | 830 | } |
827 | 831 | ||
828 | static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, | 832 | static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, |
@@ -1194,6 +1198,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, | |||
1194 | if (!capable(CAP_SYS_ADMIN)) | 1198 | if (!capable(CAP_SYS_ADMIN)) |
1195 | return -EPERM; | 1199 | return -EPERM; |
1196 | 1200 | ||
1201 | /* The block layer will pass back some non-nbd ioctls in case we have | ||
1202 | * special handling for them, but we don't so just return an error. | ||
1203 | */ | ||
1204 | if (_IOC_TYPE(cmd) != 0xab) | ||
1205 | return -EINVAL; | ||
1206 | |||
1197 | mutex_lock(&nbd->config_lock); | 1207 | mutex_lock(&nbd->config_lock); |
1198 | 1208 | ||
1199 | /* Don't allow ioctl operations on a nbd device that was created with | 1209 | /* Don't allow ioctl operations on a nbd device that was created with |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 2981c27d3aae..f149d3e61234 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index) | |||
766 | bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); | 766 | bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); |
767 | } | 767 | } |
768 | 768 | ||
769 | static bool zram_same_page_read(struct zram *zram, u32 index, | ||
770 | struct page *page, | ||
771 | unsigned int offset, unsigned int len) | ||
772 | { | ||
773 | zram_slot_lock(zram, index); | ||
774 | if (unlikely(!zram_get_handle(zram, index) || | ||
775 | zram_test_flag(zram, index, ZRAM_SAME))) { | ||
776 | void *mem; | ||
777 | |||
778 | zram_slot_unlock(zram, index); | ||
779 | mem = kmap_atomic(page); | ||
780 | zram_fill_page(mem + offset, len, | ||
781 | zram_get_element(zram, index)); | ||
782 | kunmap_atomic(mem); | ||
783 | return true; | ||
784 | } | ||
785 | zram_slot_unlock(zram, index); | ||
786 | |||
787 | return false; | ||
788 | } | ||
789 | |||
790 | static void zram_meta_free(struct zram *zram, u64 disksize) | 769 | static void zram_meta_free(struct zram *zram, u64 disksize) |
791 | { | 770 | { |
792 | size_t num_pages = disksize >> PAGE_SHIFT; | 771 | size_t num_pages = disksize >> PAGE_SHIFT; |
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, | |||
884 | zram_slot_unlock(zram, index); | 863 | zram_slot_unlock(zram, index); |
885 | } | 864 | } |
886 | 865 | ||
887 | if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) | ||
888 | return 0; | ||
889 | |||
890 | zram_slot_lock(zram, index); | 866 | zram_slot_lock(zram, index); |
891 | handle = zram_get_handle(zram, index); | 867 | handle = zram_get_handle(zram, index); |
868 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { | ||
869 | unsigned long value; | ||
870 | void *mem; | ||
871 | |||
872 | value = handle ? zram_get_element(zram, index) : 0; | ||
873 | mem = kmap_atomic(page); | ||
874 | zram_fill_page(mem, PAGE_SIZE, value); | ||
875 | kunmap_atomic(mem); | ||
876 | zram_slot_unlock(zram, index); | ||
877 | return 0; | ||
878 | } | ||
879 | |||
892 | size = zram_get_obj_size(zram, index); | 880 | size = zram_get_obj_size(zram, index); |
893 | 881 | ||
894 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); | 882 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index fe597e6c55c4..1d6729be4cd6 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
@@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, | |||
455 | goto out; | 455 | goto out; |
456 | } | 456 | } |
457 | 457 | ||
458 | msleep(TPM_TIMEOUT); /* CHECK */ | 458 | tpm_msleep(TPM_TIMEOUT); |
459 | rmb(); | 459 | rmb(); |
460 | } while (time_before(jiffies, stop)); | 460 | } while (time_before(jiffies, stop)); |
461 | 461 | ||
@@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
970 | dev_info( | 970 | dev_info( |
971 | &chip->dev, HW_ERR | 971 | &chip->dev, HW_ERR |
972 | "TPM command timed out during continue self test"); | 972 | "TPM command timed out during continue self test"); |
973 | msleep(delay_msec); | 973 | tpm_msleep(delay_msec); |
974 | continue; | 974 | continue; |
975 | } | 975 | } |
976 | 976 | ||
@@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip) | |||
985 | } | 985 | } |
986 | if (rc != TPM_WARN_DOING_SELFTEST) | 986 | if (rc != TPM_WARN_DOING_SELFTEST) |
987 | return rc; | 987 | return rc; |
988 | msleep(delay_msec); | 988 | tpm_msleep(delay_msec); |
989 | } while (--loops > 0); | 989 | } while (--loops > 0); |
990 | 990 | ||
991 | return rc; | 991 | return rc; |
@@ -1085,7 +1085,7 @@ again: | |||
1085 | } | 1085 | } |
1086 | } else { | 1086 | } else { |
1087 | do { | 1087 | do { |
1088 | msleep(TPM_TIMEOUT); | 1088 | tpm_msleep(TPM_TIMEOUT); |
1089 | status = chip->ops->status(chip); | 1089 | status = chip->ops->status(chip); |
1090 | if ((status & mask) == mask) | 1090 | if ((status & mask) == mask) |
1091 | return 0; | 1091 | return 0; |
@@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev) | |||
1150 | */ | 1150 | */ |
1151 | if (rc != TPM_WARN_RETRY) | 1151 | if (rc != TPM_WARN_RETRY) |
1152 | break; | 1152 | break; |
1153 | msleep(TPM_TIMEOUT_RETRY); | 1153 | tpm_msleep(TPM_TIMEOUT_RETRY); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | if (rc) | 1156 | if (rc) |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 04fbff2edbf3..2d5466a72e40 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -50,7 +50,8 @@ enum tpm_const { | |||
50 | 50 | ||
51 | enum tpm_timeout { | 51 | enum tpm_timeout { |
52 | TPM_TIMEOUT = 5, /* msecs */ | 52 | TPM_TIMEOUT = 5, /* msecs */ |
53 | TPM_TIMEOUT_RETRY = 100 /* msecs */ | 53 | TPM_TIMEOUT_RETRY = 100, /* msecs */ |
54 | TPM_TIMEOUT_RANGE_US = 300 /* usecs */ | ||
54 | }; | 55 | }; |
55 | 56 | ||
56 | /* TPM addresses */ | 57 | /* TPM addresses */ |
@@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev); | |||
527 | int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, | 528 | int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, |
528 | wait_queue_head_t *queue, bool check_cancel); | 529 | wait_queue_head_t *queue, bool check_cancel); |
529 | 530 | ||
531 | static inline void tpm_msleep(unsigned int delay_msec) | ||
532 | { | ||
533 | usleep_range(delay_msec * 1000, | ||
534 | (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US); | ||
535 | }; | ||
536 | |||
530 | struct tpm_chip *tpm_chip_find_get(int chip_num); | 537 | struct tpm_chip *tpm_chip_find_get(int chip_num); |
531 | __must_check int tpm_try_get_ops(struct tpm_chip *chip); | 538 | __must_check int tpm_try_get_ops(struct tpm_chip *chip); |
532 | void tpm_put_ops(struct tpm_chip *chip); | 539 | void tpm_put_ops(struct tpm_chip *chip); |
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index f7f34b2aa981..e1a41b788f08 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) | |||
899 | if (rc != TPM2_RC_TESTING) | 899 | if (rc != TPM2_RC_TESTING) |
900 | break; | 900 | break; |
901 | 901 | ||
902 | msleep(delay_msec); | 902 | tpm_msleep(delay_msec); |
903 | } | 903 | } |
904 | 904 | ||
905 | return rc; | 905 | return rc; |
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index a4ac63a21d8a..8f0a98dea327 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c | |||
@@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = { | |||
665 | SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) | 665 | SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) |
666 | }; | 666 | }; |
667 | 667 | ||
668 | static struct acpi_device_id crb_device_ids[] = { | 668 | static const struct acpi_device_id crb_device_ids[] = { |
669 | {"MSFT0101", 0}, | 669 | {"MSFT0101", 0}, |
670 | {"", 0}, | 670 | {"", 0}, |
671 | }; | 671 | }; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index f01d083eced2..25f6e2665385 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -32,26 +32,70 @@ | |||
32 | 32 | ||
33 | static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; | 33 | static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; |
34 | 34 | ||
35 | static struct vio_device_id tpm_ibmvtpm_device_table[] = { | 35 | static const struct vio_device_id tpm_ibmvtpm_device_table[] = { |
36 | { "IBM,vtpm", "IBM,vtpm"}, | 36 | { "IBM,vtpm", "IBM,vtpm"}, |
37 | { "", "" } | 37 | { "", "" } |
38 | }; | 38 | }; |
39 | MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); | 39 | MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); |
40 | 40 | ||
41 | /** | 41 | /** |
42 | * | ||
43 | * ibmvtpm_send_crq_word - Send a CRQ request | ||
44 | * @vdev: vio device struct | ||
45 | * @w1: pre-constructed first word of tpm crq (second word is reserved) | ||
46 | * | ||
47 | * Return: | ||
48 | * 0 - Success | ||
49 | * Non-zero - Failure | ||
50 | */ | ||
51 | static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1) | ||
52 | { | ||
53 | return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * | ||
42 | * ibmvtpm_send_crq - Send a CRQ request | 58 | * ibmvtpm_send_crq - Send a CRQ request |
43 | * | 59 | * |
44 | * @vdev: vio device struct | 60 | * @vdev: vio device struct |
45 | * @w1: first word | 61 | * @valid: Valid field |
46 | * @w2: second word | 62 | * @msg: Type field |
63 | * @len: Length field | ||
64 | * @data: Data field | ||
65 | * | ||
66 | * The ibmvtpm crq is defined as follows: | ||
67 | * | ||
68 | * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ||
69 | * ----------------------------------------------------------------------- | ||
70 | * Word0 | Valid | Type | Length | Data | ||
71 | * ----------------------------------------------------------------------- | ||
72 | * Word1 | Reserved | ||
73 | * ----------------------------------------------------------------------- | ||
74 | * | ||
75 | * Which matches the following structure (on bigendian host): | ||
76 | * | ||
77 | * struct ibmvtpm_crq { | ||
78 | * u8 valid; | ||
79 | * u8 msg; | ||
80 | * __be16 len; | ||
81 | * __be32 data; | ||
82 | * __be64 reserved; | ||
83 | * } __attribute__((packed, aligned(8))); | ||
84 | * | ||
85 | * However, the value is passed in a register so just compute the numeric value | ||
86 | * to load into the register avoiding byteswap altogether. Endian only affects | ||
87 | * memory loads and stores - registers are internally represented the same. | ||
47 | * | 88 | * |
48 | * Return: | 89 | * Return: |
49 | * 0 -Sucess | 90 | * 0 (H_SUCCESS) - Success |
50 | * Non-zero - Failure | 91 | * Non-zero - Failure |
51 | */ | 92 | */ |
52 | static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) | 93 | static int ibmvtpm_send_crq(struct vio_dev *vdev, |
94 | u8 valid, u8 msg, u16 len, u32 data) | ||
53 | { | 95 | { |
54 | return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); | 96 | u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) | |
97 | (u64)data; | ||
98 | return ibmvtpm_send_crq_word(vdev, w1); | ||
55 | } | 99 | } |
56 | 100 | ||
57 | /** | 101 | /** |
@@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
109 | static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | 153 | static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) |
110 | { | 154 | { |
111 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); | 155 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
112 | struct ibmvtpm_crq crq; | ||
113 | __be64 *word = (__be64 *)&crq; | ||
114 | int rc, sig; | 156 | int rc, sig; |
115 | 157 | ||
116 | if (!ibmvtpm->rtce_buf) { | 158 | if (!ibmvtpm->rtce_buf) { |
@@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
137 | spin_lock(&ibmvtpm->rtce_lock); | 179 | spin_lock(&ibmvtpm->rtce_lock); |
138 | ibmvtpm->res_len = 0; | 180 | ibmvtpm->res_len = 0; |
139 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 181 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
140 | crq.valid = (u8)IBMVTPM_VALID_CMD; | ||
141 | crq.msg = (u8)VTPM_TPM_COMMAND; | ||
142 | crq.len = cpu_to_be16(count); | ||
143 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); | ||
144 | 182 | ||
145 | /* | 183 | /* |
146 | * set the processing flag before the Hcall, since we may get the | 184 | * set the processing flag before the Hcall, since we may get the |
@@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
148 | */ | 186 | */ |
149 | ibmvtpm->tpm_processing_cmd = true; | 187 | ibmvtpm->tpm_processing_cmd = true; |
150 | 188 | ||
151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), | 189 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, |
152 | be64_to_cpu(word[1])); | 190 | IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, |
191 | count, ibmvtpm->rtce_dma_handle); | ||
153 | if (rc != H_SUCCESS) { | 192 | if (rc != H_SUCCESS) { |
154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 193 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
155 | rc = 0; | 194 | rc = 0; |
@@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip) | |||
182 | */ | 221 | */ |
183 | static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) | 222 | static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) |
184 | { | 223 | { |
185 | struct ibmvtpm_crq crq; | ||
186 | u64 *buf = (u64 *) &crq; | ||
187 | int rc; | 224 | int rc; |
188 | 225 | ||
189 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 226 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, |
190 | crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; | 227 | IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0); |
191 | |||
192 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), | ||
193 | cpu_to_be64(buf[1])); | ||
194 | if (rc != H_SUCCESS) | 228 | if (rc != H_SUCCESS) |
195 | dev_err(ibmvtpm->dev, | 229 | dev_err(ibmvtpm->dev, |
196 | "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); | 230 | "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); |
@@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) | |||
210 | */ | 244 | */ |
211 | static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) | 245 | static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) |
212 | { | 246 | { |
213 | struct ibmvtpm_crq crq; | ||
214 | u64 *buf = (u64 *) &crq; | ||
215 | int rc; | 247 | int rc; |
216 | 248 | ||
217 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 249 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, |
218 | crq.msg = (u8)VTPM_GET_VERSION; | 250 | IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0); |
219 | |||
220 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), | ||
221 | cpu_to_be64(buf[1])); | ||
222 | if (rc != H_SUCCESS) | 251 | if (rc != H_SUCCESS) |
223 | dev_err(ibmvtpm->dev, | 252 | dev_err(ibmvtpm->dev, |
224 | "ibmvtpm_crq_get_version failed rc=%d\n", rc); | 253 | "ibmvtpm_crq_get_version failed rc=%d\n", rc); |
@@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm) | |||
238 | { | 267 | { |
239 | int rc; | 268 | int rc; |
240 | 269 | ||
241 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); | 270 | rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD); |
242 | if (rc != H_SUCCESS) | 271 | if (rc != H_SUCCESS) |
243 | dev_err(ibmvtpm->dev, | 272 | dev_err(ibmvtpm->dev, |
244 | "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); | 273 | "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); |
@@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm) | |||
258 | { | 287 | { |
259 | int rc; | 288 | int rc; |
260 | 289 | ||
261 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); | 290 | rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD); |
262 | if (rc != H_SUCCESS) | 291 | if (rc != H_SUCCESS) |
263 | dev_err(ibmvtpm->dev, | 292 | dev_err(ibmvtpm->dev, |
264 | "ibmvtpm_crq_send_init failed rc=%d\n", rc); | 293 | "ibmvtpm_crq_send_init failed rc=%d\n", rc); |
@@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev) | |||
340 | { | 369 | { |
341 | struct tpm_chip *chip = dev_get_drvdata(dev); | 370 | struct tpm_chip *chip = dev_get_drvdata(dev); |
342 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); | 371 | struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
343 | struct ibmvtpm_crq crq; | ||
344 | u64 *buf = (u64 *) &crq; | ||
345 | int rc = 0; | 372 | int rc = 0; |
346 | 373 | ||
347 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 374 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, |
348 | crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; | 375 | IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0); |
349 | |||
350 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), | ||
351 | cpu_to_be64(buf[1])); | ||
352 | if (rc != H_SUCCESS) | 376 | if (rc != H_SUCCESS) |
353 | dev_err(ibmvtpm->dev, | 377 | dev_err(ibmvtpm->dev, |
354 | "tpm_ibmvtpm_suspend failed rc=%d\n", rc); | 378 | "tpm_ibmvtpm_suspend failed rc=%d\n", rc); |
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 3b1b9f9322d5..d8f10047fbba 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c | |||
@@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) | |||
191 | /* check the status-register if wait_for_bit is set */ | 191 | /* check the status-register if wait_for_bit is set */ |
192 | if (status & 1 << wait_for_bit) | 192 | if (status & 1 << wait_for_bit) |
193 | break; | 193 | break; |
194 | msleep(TPM_MSLEEP_TIME); | 194 | tpm_msleep(TPM_MSLEEP_TIME); |
195 | } | 195 | } |
196 | if (i == TPM_MAX_TRIES) { /* timeout occurs */ | 196 | if (i == TPM_MAX_TRIES) { /* timeout occurs */ |
197 | if (wait_for_bit == STAT_XFE) | 197 | if (wait_for_bit == STAT_XFE) |
@@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip) | |||
226 | wait_and_send(chip, TPM_CTRL_WTX); | 226 | wait_and_send(chip, TPM_CTRL_WTX); |
227 | wait_and_send(chip, 0x00); | 227 | wait_and_send(chip, 0x00); |
228 | wait_and_send(chip, 0x00); | 228 | wait_and_send(chip, 0x00); |
229 | msleep(TPM_WTX_MSLEEP_TIME); | 229 | tpm_msleep(TPM_WTX_MSLEEP_TIME); |
230 | } | 230 | } |
231 | 231 | ||
232 | static void tpm_wtx_abort(struct tpm_chip *chip) | 232 | static void tpm_wtx_abort(struct tpm_chip *chip) |
@@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip) | |||
237 | wait_and_send(chip, 0x00); | 237 | wait_and_send(chip, 0x00); |
238 | wait_and_send(chip, 0x00); | 238 | wait_and_send(chip, 0x00); |
239 | number_of_wtx = 0; | 239 | number_of_wtx = 0; |
240 | msleep(TPM_WTX_MSLEEP_TIME); | 240 | tpm_msleep(TPM_WTX_MSLEEP_TIME); |
241 | } | 241 | } |
242 | 242 | ||
243 | static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) | 243 | static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) |
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b617b2eeb080..63bc6c3b949e 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c | |||
@@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l) | |||
51 | 51 | ||
52 | if (access & TPM_ACCESS_VALID) | 52 | if (access & TPM_ACCESS_VALID) |
53 | return 0; | 53 | return 0; |
54 | msleep(TPM_TIMEOUT); | 54 | tpm_msleep(TPM_TIMEOUT); |
55 | } while (time_before(jiffies, stop)); | 55 | } while (time_before(jiffies, stop)); |
56 | return -1; | 56 | return -1; |
57 | } | 57 | } |
@@ -117,7 +117,7 @@ again: | |||
117 | do { | 117 | do { |
118 | if (check_locality(chip, l)) | 118 | if (check_locality(chip, l)) |
119 | return l; | 119 | return l; |
120 | msleep(TPM_TIMEOUT); | 120 | tpm_msleep(TPM_TIMEOUT); |
121 | } while (time_before(jiffies, stop)); | 121 | } while (time_before(jiffies, stop)); |
122 | } | 122 | } |
123 | return -1; | 123 | return -1; |
@@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip) | |||
164 | burstcnt = (value >> 8) & 0xFFFF; | 164 | burstcnt = (value >> 8) & 0xFFFF; |
165 | if (burstcnt) | 165 | if (burstcnt) |
166 | return burstcnt; | 166 | return burstcnt; |
167 | msleep(TPM_TIMEOUT); | 167 | tpm_msleep(TPM_TIMEOUT); |
168 | } while (time_before(jiffies, stop)); | 168 | } while (time_before(jiffies, stop)); |
169 | return -EBUSY; | 169 | return -EBUSY; |
170 | } | 170 | } |
@@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) | |||
396 | priv->irq = irq; | 396 | priv->irq = irq; |
397 | chip->flags |= TPM_CHIP_FLAG_IRQ; | 397 | chip->flags |= TPM_CHIP_FLAG_IRQ; |
398 | if (!priv->irq_tested) | 398 | if (!priv->irq_tested) |
399 | msleep(1); | 399 | tpm_msleep(1); |
400 | if (!priv->irq_tested) | 400 | if (!priv->irq_tested) |
401 | disable_interrupts(chip); | 401 | disable_interrupts(chip); |
402 | priv->irq_tested = true; | 402 | priv->irq_tested = true; |
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index c834f5abfc49..4c10456f8a32 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c | |||
@@ -105,6 +105,7 @@ err: | |||
105 | 105 | ||
106 | return ret; | 106 | return ret; |
107 | } | 107 | } |
108 | EXPORT_SYMBOL_GPL(clk_bulk_prepare); | ||
108 | 109 | ||
109 | #endif /* CONFIG_HAVE_CLK_PREPARE */ | 110 | #endif /* CONFIG_HAVE_CLK_PREPARE */ |
110 | 111 | ||
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c index 62d7854e4b87..5970a50671b9 100644 --- a/drivers/clk/rockchip/clk-rk3128.c +++ b/drivers/clk/rockchip/clk-rk3128.c | |||
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { | |||
315 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 315 | RK2928_CLKGATE_CON(10), 8, GFLAGS), |
316 | 316 | ||
317 | GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, | 317 | GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, |
318 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 318 | RK2928_CLKGATE_CON(10), 0, GFLAGS), |
319 | GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, | 319 | GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, |
320 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 320 | RK2928_CLKGATE_CON(10), 1, GFLAGS), |
321 | GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, | 321 | GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, |
322 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 322 | RK2928_CLKGATE_CON(10), 2, GFLAGS), |
323 | GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, | 323 | GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, |
324 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 324 | RK2928_CLKGATE_CON(2), 15, GFLAGS), |
325 | 325 | ||
326 | COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, | 326 | COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, |
327 | RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, | 327 | RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, |
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { | |||
541 | GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), | 541 | GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), |
542 | GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), | 542 | GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), |
543 | 543 | ||
544 | GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS), | 544 | GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS), |
545 | GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), | 545 | GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), |
546 | 546 | ||
547 | /* PD_MMC */ | 547 | /* PD_MMC */ |
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = { | |||
577 | "aclk_peri", | 577 | "aclk_peri", |
578 | "hclk_peri", | 578 | "hclk_peri", |
579 | "pclk_peri", | 579 | "pclk_peri", |
580 | "pclk_pmu", | ||
581 | "sclk_timer5", | ||
580 | }; | 582 | }; |
581 | 583 | ||
582 | static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) | 584 | static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index e40b77583c47..d8d3cb67b402 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { | |||
294 | #define PLL_ENABLED (1 << 31) | 294 | #define PLL_ENABLED (1 << 31) |
295 | #define PLL_LOCKED (1 << 29) | 295 | #define PLL_LOCKED (1 << 29) |
296 | 296 | ||
297 | static void exynos4_clk_enable_pll(u32 reg) | ||
298 | { | ||
299 | u32 pll_con = readl(reg_base + reg); | ||
300 | pll_con |= PLL_ENABLED; | ||
301 | writel(pll_con, reg_base + reg); | ||
302 | |||
303 | while (!(pll_con & PLL_LOCKED)) { | ||
304 | cpu_relax(); | ||
305 | pll_con = readl(reg_base + reg); | ||
306 | } | ||
307 | } | ||
308 | |||
297 | static void exynos4_clk_wait_for_pll(u32 reg) | 309 | static void exynos4_clk_wait_for_pll(u32 reg) |
298 | { | 310 | { |
299 | u32 pll_con; | 311 | u32 pll_con; |
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void) | |||
315 | samsung_clk_save(reg_base, exynos4_save_pll, | 327 | samsung_clk_save(reg_base, exynos4_save_pll, |
316 | ARRAY_SIZE(exynos4_clk_pll_regs)); | 328 | ARRAY_SIZE(exynos4_clk_pll_regs)); |
317 | 329 | ||
330 | exynos4_clk_enable_pll(EPLL_CON0); | ||
331 | exynos4_clk_enable_pll(VPLL_CON0); | ||
332 | |||
318 | if (exynos4_soc == EXYNOS4210) { | 333 | if (exynos4_soc == EXYNOS4210) { |
319 | samsung_clk_save(reg_base, exynos4_save_soc, | 334 | samsung_clk_save(reg_base, exynos4_save_soc, |
320 | ARRAY_SIZE(exynos4210_clk_save)); | 335 | ARRAY_SIZE(exynos4210_clk_save)); |
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c index 6a20dc8b253f..9a7d7f0f23fe 100644 --- a/drivers/clocksource/numachip.c +++ b/drivers/clocksource/numachip.c | |||
@@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi | |||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
45 | 45 | ||
46 | static struct clock_event_device numachip2_clockevent = { | 46 | static const struct clock_event_device numachip2_clockevent __initconst = { |
47 | .name = "numachip2", | 47 | .name = "numachip2", |
48 | .rating = 400, | 48 | .rating = 400, |
49 | .set_next_event = numachip2_set_next_event, | 49 | .set_next_event = numachip2_set_next_event, |
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index 2ff64d9d4fb3..62d24690ba02 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c | |||
@@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void) | |||
36 | return -readl(sched_clk_base + TIMER_VALUE); | 36 | return -readl(sched_clk_base + TIMER_VALUE); |
37 | } | 37 | } |
38 | 38 | ||
39 | static int integrator_clocksource_init(unsigned long inrate, | 39 | static int __init integrator_clocksource_init(unsigned long inrate, |
40 | void __iomem *base) | 40 | void __iomem *base) |
41 | { | 41 | { |
42 | u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; | 42 | u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; |
43 | unsigned long rate = inrate; | 43 | unsigned long rate = inrate; |
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a020da7940d6..a753c50e9e41 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c | |||
@@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = { | |||
106 | * platforms using "operating-points-v2" property. | 106 | * platforms using "operating-points-v2" property. |
107 | */ | 107 | */ |
108 | static const struct of_device_id blacklist[] __initconst = { | 108 | static const struct of_device_id blacklist[] __initconst = { |
109 | { .compatible = "calxeda,highbank", }, | ||
110 | { .compatible = "calxeda,ecx-2000", }, | ||
111 | |||
112 | { .compatible = "marvell,armadaxp", }, | ||
113 | |||
114 | { .compatible = "nvidia,tegra124", }, | ||
115 | |||
116 | { .compatible = "st,stih407", }, | ||
117 | { .compatible = "st,stih410", }, | ||
118 | |||
119 | { .compatible = "sigma,tango4", }, | ||
120 | |||
121 | { .compatible = "ti,am33xx", }, | ||
122 | { .compatible = "ti,am43", }, | ||
123 | { .compatible = "ti,dra7", }, | ||
124 | |||
109 | { } | 125 | { } |
110 | }; | 126 | }; |
111 | 127 | ||
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index b29cd3398463..4bf47de6101f 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c | |||
@@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data) | |||
190 | 190 | ||
191 | static const struct of_device_id ti_cpufreq_of_match[] = { | 191 | static const struct of_device_id ti_cpufreq_of_match[] = { |
192 | { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, | 192 | { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, |
193 | { .compatible = "ti,am4372", .data = &am4x_soc_data, }, | 193 | { .compatible = "ti,am43", .data = &am4x_soc_data, }, |
194 | { .compatible = "ti,dra7", .data = &dra7_soc_data }, | 194 | { .compatible = "ti,dra7", .data = &dra7_soc_data }, |
195 | {}, | 195 | {}, |
196 | }; | 196 | }; |
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 7080c384ad5d..52a75053ee03 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c | |||
@@ -104,13 +104,13 @@ static int __init arm_idle_init(void) | |||
104 | ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); | 104 | ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); |
105 | if (ret <= 0) { | 105 | if (ret <= 0) { |
106 | ret = ret ? : -ENODEV; | 106 | ret = ret ? : -ENODEV; |
107 | goto out_fail; | 107 | goto init_fail; |
108 | } | 108 | } |
109 | 109 | ||
110 | ret = cpuidle_register_driver(drv); | 110 | ret = cpuidle_register_driver(drv); |
111 | if (ret) { | 111 | if (ret) { |
112 | pr_err("Failed to register cpuidle driver\n"); | 112 | pr_err("Failed to register cpuidle driver\n"); |
113 | goto out_fail; | 113 | goto init_fail; |
114 | } | 114 | } |
115 | 115 | ||
116 | /* | 116 | /* |
@@ -149,6 +149,8 @@ static int __init arm_idle_init(void) | |||
149 | } | 149 | } |
150 | 150 | ||
151 | return 0; | 151 | return 0; |
152 | init_fail: | ||
153 | kfree(drv); | ||
152 | out_fail: | 154 | out_fail: |
153 | while (--cpu >= 0) { | 155 | while (--cpu >= 0) { |
154 | dev = per_cpu(cpuidle_devices, cpu); | 156 | dev = per_cpu(cpuidle_devices, cpu); |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e36aeacd7635..1eb852765469 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config CRYPTO_DEV_FSL_CAAM | 1 | config CRYPTO_DEV_FSL_CAAM |
2 | tristate "Freescale CAAM-Multicore driver backend" | 2 | tristate "Freescale CAAM-Multicore driver backend" |
3 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE | 3 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE |
4 | select SOC_BUS | ||
4 | help | 5 | help |
5 | Enables the driver module for Freescale's Cryptographic Accelerator | 6 | Enables the driver module for Freescale's Cryptographic Accelerator |
6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). | 7 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). |
@@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
141 | To compile this as a module, choose M here: the module | 142 | To compile this as a module, choose M here: the module |
142 | will be called caamrng. | 143 | will be called caamrng. |
143 | 144 | ||
144 | config CRYPTO_DEV_FSL_CAAM_IMX | ||
145 | def_bool SOC_IMX6 || SOC_IMX7D | ||
146 | depends on CRYPTO_DEV_FSL_CAAM | ||
147 | |||
148 | config CRYPTO_DEV_FSL_CAAM_DEBUG | 145 | config CRYPTO_DEV_FSL_CAAM_DEBUG |
149 | bool "Enable debug output in CAAM driver" | 146 | bool "Enable debug output in CAAM driver" |
150 | depends on CRYPTO_DEV_FSL_CAAM | 147 | depends on CRYPTO_DEV_FSL_CAAM |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index dacb53fb690e..027e121c6f70 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/device.h> | 7 | #include <linux/device.h> |
8 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
9 | #include <linux/of_irq.h> | 9 | #include <linux/of_irq.h> |
10 | #include <linux/sys_soc.h> | ||
10 | 11 | ||
11 | #include "compat.h" | 12 | #include "compat.h" |
12 | #include "regs.h" | 13 | #include "regs.h" |
@@ -19,6 +20,8 @@ bool caam_little_end; | |||
19 | EXPORT_SYMBOL(caam_little_end); | 20 | EXPORT_SYMBOL(caam_little_end); |
20 | bool caam_dpaa2; | 21 | bool caam_dpaa2; |
21 | EXPORT_SYMBOL(caam_dpaa2); | 22 | EXPORT_SYMBOL(caam_dpaa2); |
23 | bool caam_imx; | ||
24 | EXPORT_SYMBOL(caam_imx); | ||
22 | 25 | ||
23 | #ifdef CONFIG_CAAM_QI | 26 | #ifdef CONFIG_CAAM_QI |
24 | #include "qi.h" | 27 | #include "qi.h" |
@@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2); | |||
28 | * i.MX targets tend to have clock control subsystems that can | 31 | * i.MX targets tend to have clock control subsystems that can |
29 | * enable/disable clocking to our device. | 32 | * enable/disable clocking to our device. |
30 | */ | 33 | */ |
31 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | ||
32 | static inline struct clk *caam_drv_identify_clk(struct device *dev, | 34 | static inline struct clk *caam_drv_identify_clk(struct device *dev, |
33 | char *clk_name) | 35 | char *clk_name) |
34 | { | 36 | { |
35 | return devm_clk_get(dev, clk_name); | 37 | return caam_imx ? devm_clk_get(dev, clk_name) : NULL; |
36 | } | 38 | } |
37 | #else | ||
38 | static inline struct clk *caam_drv_identify_clk(struct device *dev, | ||
39 | char *clk_name) | ||
40 | { | ||
41 | return NULL; | ||
42 | } | ||
43 | #endif | ||
44 | 39 | ||
45 | /* | 40 | /* |
46 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 41 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
@@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev) | |||
430 | { | 425 | { |
431 | int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; | 426 | int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; |
432 | u64 caam_id; | 427 | u64 caam_id; |
428 | static const struct soc_device_attribute imx_soc[] = { | ||
429 | {.family = "Freescale i.MX"}, | ||
430 | {}, | ||
431 | }; | ||
433 | struct device *dev; | 432 | struct device *dev; |
434 | struct device_node *nprop, *np; | 433 | struct device_node *nprop, *np; |
435 | struct caam_ctrl __iomem *ctrl; | 434 | struct caam_ctrl __iomem *ctrl; |
@@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev) | |||
451 | dev_set_drvdata(dev, ctrlpriv); | 450 | dev_set_drvdata(dev, ctrlpriv); |
452 | nprop = pdev->dev.of_node; | 451 | nprop = pdev->dev.of_node; |
453 | 452 | ||
453 | caam_imx = (bool)soc_device_match(imx_soc); | ||
454 | |||
454 | /* Enable clocking */ | 455 | /* Enable clocking */ |
455 | clk = caam_drv_identify_clk(&pdev->dev, "ipg"); | 456 | clk = caam_drv_identify_clk(&pdev->dev, "ipg"); |
456 | if (IS_ERR(clk)) { | 457 | if (IS_ERR(clk)) { |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 2b5efff9ec3c..17cfd23a38fa 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
@@ -67,6 +67,7 @@ | |||
67 | */ | 67 | */ |
68 | 68 | ||
69 | extern bool caam_little_end; | 69 | extern bool caam_little_end; |
70 | extern bool caam_imx; | ||
70 | 71 | ||
71 | #define caam_to_cpu(len) \ | 72 | #define caam_to_cpu(len) \ |
72 | static inline u##len caam##len ## _to_cpu(u##len val) \ | 73 | static inline u##len caam##len ## _to_cpu(u##len val) \ |
@@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg) | |||
154 | #else /* CONFIG_64BIT */ | 155 | #else /* CONFIG_64BIT */ |
155 | static inline void wr_reg64(void __iomem *reg, u64 data) | 156 | static inline void wr_reg64(void __iomem *reg, u64 data) |
156 | { | 157 | { |
157 | #ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | 158 | if (!caam_imx && caam_little_end) { |
158 | if (caam_little_end) { | ||
159 | wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); | 159 | wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); |
160 | wr_reg32((u32 __iomem *)(reg), data); | 160 | wr_reg32((u32 __iomem *)(reg), data); |
161 | } else | 161 | } else { |
162 | #endif | ||
163 | { | ||
164 | wr_reg32((u32 __iomem *)(reg), data >> 32); | 162 | wr_reg32((u32 __iomem *)(reg), data >> 32); |
165 | wr_reg32((u32 __iomem *)(reg) + 1, data); | 163 | wr_reg32((u32 __iomem *)(reg) + 1, data); |
166 | } | 164 | } |
@@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data) | |||
168 | 166 | ||
169 | static inline u64 rd_reg64(void __iomem *reg) | 167 | static inline u64 rd_reg64(void __iomem *reg) |
170 | { | 168 | { |
171 | #ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | 169 | if (!caam_imx && caam_little_end) |
172 | if (caam_little_end) | ||
173 | return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | | 170 | return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | |
174 | (u64)rd_reg32((u32 __iomem *)(reg))); | 171 | (u64)rd_reg32((u32 __iomem *)(reg))); |
175 | else | 172 | |
176 | #endif | 173 | return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | |
177 | return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | | 174 | (u64)rd_reg32((u32 __iomem *)(reg) + 1)); |
178 | (u64)rd_reg32((u32 __iomem *)(reg) + 1)); | ||
179 | } | 175 | } |
180 | #endif /* CONFIG_64BIT */ | 176 | #endif /* CONFIG_64BIT */ |
181 | 177 | ||
178 | static inline u64 cpu_to_caam_dma64(dma_addr_t value) | ||
179 | { | ||
180 | if (caam_imx) | ||
181 | return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | | ||
182 | (u64)cpu_to_caam32(upper_32_bits(value))); | ||
183 | |||
184 | return cpu_to_caam64(value); | ||
185 | } | ||
186 | |||
187 | static inline u64 caam_dma64_to_cpu(u64 value) | ||
188 | { | ||
189 | if (caam_imx) | ||
190 | return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | | ||
191 | (u64)caam32_to_cpu(upper_32_bits(value))); | ||
192 | |||
193 | return caam64_to_cpu(value); | ||
194 | } | ||
195 | |||
182 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | 196 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
183 | #ifdef CONFIG_SOC_IMX7D | 197 | #define cpu_to_caam_dma(value) cpu_to_caam_dma64(value) |
184 | #define cpu_to_caam_dma(value) \ | 198 | #define caam_dma_to_cpu(value) caam_dma64_to_cpu(value) |
185 | (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ | ||
186 | (u64)cpu_to_caam32(upper_32_bits(value))) | ||
187 | #define caam_dma_to_cpu(value) \ | ||
188 | (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \ | ||
189 | (u64)caam32_to_cpu(upper_32_bits(value))) | ||
190 | #else | ||
191 | #define cpu_to_caam_dma(value) cpu_to_caam64(value) | ||
192 | #define caam_dma_to_cpu(value) caam64_to_cpu(value) | ||
193 | #endif /* CONFIG_SOC_IMX7D */ | ||
194 | #else | 199 | #else |
195 | #define cpu_to_caam_dma(value) cpu_to_caam32(value) | 200 | #define cpu_to_caam_dma(value) cpu_to_caam32(value) |
196 | #define caam_dma_to_cpu(value) caam32_to_cpu(value) | 201 | #define caam_dma_to_cpu(value) caam32_to_cpu(value) |
197 | #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ | 202 | #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ |
198 | |||
199 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX | ||
200 | #define cpu_to_caam_dma64(value) \ | ||
201 | (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ | ||
202 | (u64)cpu_to_caam32(upper_32_bits(value))) | ||
203 | #else | ||
204 | #define cpu_to_caam_dma64(value) cpu_to_caam64(value) | ||
205 | #endif | ||
206 | 203 | ||
207 | /* | 204 | /* |
208 | * jr_outentry | 205 | * jr_outentry |
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d2207ac5ba19..5438552bc6d7 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c | |||
@@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) | |||
386 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | 386 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); |
387 | struct safexcel_crypto_priv *priv = ctx->priv; | 387 | struct safexcel_crypto_priv *priv = ctx->priv; |
388 | struct skcipher_request req; | 388 | struct skcipher_request req; |
389 | struct safexcel_inv_result result = { 0 }; | 389 | struct safexcel_inv_result result = {}; |
390 | int ring = ctx->base.ring; | 390 | int ring = ctx->base.ring; |
391 | 391 | ||
392 | memset(&req, 0, sizeof(struct skcipher_request)); | 392 | memset(&req, 0, sizeof(struct skcipher_request)); |
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3f819399cd95..3980f946874f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
419 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | 419 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); |
420 | struct safexcel_crypto_priv *priv = ctx->priv; | 420 | struct safexcel_crypto_priv *priv = ctx->priv; |
421 | struct ahash_request req; | 421 | struct ahash_request req; |
422 | struct safexcel_inv_result result = { 0 }; | 422 | struct safexcel_inv_result result = {}; |
423 | int ring = ctx->base.ring; | 423 | int ring = ctx->base.ring; |
424 | 424 | ||
425 | memset(&req, 0, sizeof(struct ahash_request)); | 425 | memset(&req, 0, sizeof(struct ahash_request)); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 79791c690858..dff88838dce7 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1756 | req_ctx->swinit = 0; | 1756 | req_ctx->swinit = 0; |
1757 | } else { | 1757 | } else { |
1758 | desc->ptr[1] = zero_entry; | 1758 | desc->ptr[1] = zero_entry; |
1759 | /* Indicate next op is not the first. */ | ||
1760 | req_ctx->first = 0; | ||
1761 | } | 1759 | } |
1760 | /* Indicate next op is not the first. */ | ||
1761 | req_ctx->first = 0; | ||
1762 | 1762 | ||
1763 | /* HMAC key */ | 1763 | /* HMAC key */ |
1764 | if (ctx->keylen) | 1764 | if (ctx->keylen) |
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1769 | 1769 | ||
1770 | sg_count = edesc->src_nents ?: 1; | 1770 | sg_count = edesc->src_nents ?: 1; |
1771 | if (is_sec1 && sg_count > 1) | 1771 | if (is_sec1 && sg_count > 1) |
1772 | sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); | 1772 | sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); |
1773 | else | 1773 | else |
1774 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, | 1774 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, |
1775 | DMA_TO_DEVICE); | 1775 | DMA_TO_DEVICE); |
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3057 | t_alg->algt.alg.hash.final = ahash_final; | 3057 | t_alg->algt.alg.hash.final = ahash_final; |
3058 | t_alg->algt.alg.hash.finup = ahash_finup; | 3058 | t_alg->algt.alg.hash.finup = ahash_finup; |
3059 | t_alg->algt.alg.hash.digest = ahash_digest; | 3059 | t_alg->algt.alg.hash.digest = ahash_digest; |
3060 | t_alg->algt.alg.hash.setkey = ahash_setkey; | 3060 | if (!strncmp(alg->cra_name, "hmac", 4)) |
3061 | t_alg->algt.alg.hash.setkey = ahash_setkey; | ||
3061 | t_alg->algt.alg.hash.import = ahash_import; | 3062 | t_alg->algt.alg.hash.import = ahash_import; |
3062 | t_alg->algt.alg.hash.export = ahash_export; | 3063 | t_alg->algt.alg.hash.export = ahash_export; |
3063 | 3064 | ||
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 08629ee69d11..00e73d28077c 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c | |||
@@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = { | |||
361 | .write_complete = altera_cvp_write_complete, | 361 | .write_complete = altera_cvp_write_complete, |
362 | }; | 362 | }; |
363 | 363 | ||
364 | static ssize_t show_chkcfg(struct device_driver *dev, char *buf) | 364 | static ssize_t chkcfg_show(struct device_driver *dev, char *buf) |
365 | { | 365 | { |
366 | return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); | 366 | return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); |
367 | } | 367 | } |
368 | 368 | ||
369 | static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, | 369 | static ssize_t chkcfg_store(struct device_driver *drv, const char *buf, |
370 | size_t count) | 370 | size_t count) |
371 | { | 371 | { |
372 | int ret; | 372 | int ret; |
@@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, | |||
378 | return count; | 378 | return count; |
379 | } | 379 | } |
380 | 380 | ||
381 | static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg); | 381 | static DRIVER_ATTR_RW(chkcfg); |
382 | 382 | ||
383 | static int altera_cvp_probe(struct pci_dev *pdev, | 383 | static int altera_cvp_probe(struct pci_dev *pdev, |
384 | const struct pci_device_id *dev_id); | 384 | const struct pci_device_id *dev_id); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d228f5a99044..dbbe986f90f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) | |||
636 | NUM_BANKS(ADDR_SURF_2_BANK); | 636 | NUM_BANKS(ADDR_SURF_2_BANK); |
637 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) | 637 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
638 | WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); | 638 | WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); |
639 | } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { | 639 | } else if (adev->asic_type == CHIP_OLAND) { |
640 | tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
641 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
642 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
643 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
644 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
645 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
646 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
647 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
648 | tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
649 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
650 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
651 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
652 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
653 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
654 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
655 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
656 | tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
657 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
658 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
659 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
660 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
661 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
662 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
663 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
664 | tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
665 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
666 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
667 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
668 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
669 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
670 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
671 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
672 | tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
673 | ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
674 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
675 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
676 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
677 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
678 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
679 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
680 | tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
681 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
682 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
683 | TILE_SPLIT(split_equal_to_row_size) | | ||
684 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
685 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
686 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
687 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
688 | tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
689 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
690 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
691 | TILE_SPLIT(split_equal_to_row_size) | | ||
692 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
693 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
694 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
695 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
696 | tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | ||
697 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
698 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
699 | TILE_SPLIT(split_equal_to_row_size) | | ||
700 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
701 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
702 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
703 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
704 | tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
705 | ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
706 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
707 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
708 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
709 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
710 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
711 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
712 | tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
713 | ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
714 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
715 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
716 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
717 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
718 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
719 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
720 | tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
721 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
722 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
723 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
724 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
725 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
726 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
727 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
728 | tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
729 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
730 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
731 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
732 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
733 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
734 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
735 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
736 | tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
737 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
738 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
739 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
740 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
741 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
742 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
743 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
744 | tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
745 | ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
746 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
747 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
748 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
749 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
750 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
751 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
752 | tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
753 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
754 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
755 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
756 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
757 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
758 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
759 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
760 | tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
761 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
762 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
763 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
764 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
765 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
766 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
767 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
768 | tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
769 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
770 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
771 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
772 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
773 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
774 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
775 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
776 | tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
777 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
778 | PIPE_CONFIG(ADDR_SURF_P4_8x16) | | ||
779 | TILE_SPLIT(split_equal_to_row_size) | | ||
780 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
781 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
782 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
783 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
784 | tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
785 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
786 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | ||
787 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
788 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
789 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
790 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
791 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
792 | tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
793 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
794 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | ||
795 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
796 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
797 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
798 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
799 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4); | ||
800 | tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
801 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
802 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | ||
803 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
804 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
805 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
806 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
807 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
808 | tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
809 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
810 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | ||
811 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
812 | NUM_BANKS(ADDR_SURF_16_BANK) | | ||
813 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
814 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
815 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2); | ||
816 | tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | | ||
817 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
818 | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | | ||
819 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | | ||
820 | NUM_BANKS(ADDR_SURF_8_BANK) | | ||
821 | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
822 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
823 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1); | ||
824 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) | ||
825 | WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); | ||
826 | } else if (adev->asic_type == CHIP_HAINAN) { | ||
640 | tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | | 827 | tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
641 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | 828 | ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
642 | PIPE_CONFIG(ADDR_SURF_P2) | | 829 | PIPE_CONFIG(ADDR_SURF_P2) | |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index e4a8c2e52cb2..660b3fbade41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | |||
@@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep, | |||
892 | int err = 0; | 892 | int err = 0; |
893 | 893 | ||
894 | dev = kfd_device_by_id(args->gpu_id); | 894 | dev = kfd_device_by_id(args->gpu_id); |
895 | if (!dev) | ||
896 | return -EINVAL; | ||
895 | 897 | ||
896 | dev->kfd2kgd->get_tile_config(dev->kgd, &config); | 898 | dev->kfd2kgd->get_tile_config(dev->kgd, &config); |
897 | 899 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 5979158c3f7b..944abfad39c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c | |||
@@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd, | |||
292 | struct kfd_event *ev) | 292 | struct kfd_event *ev) |
293 | { | 293 | { |
294 | if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { | 294 | if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { |
295 | pr_warn("Signal event wasn't created because limit was reached\n"); | 295 | if (!p->signal_event_limit_reached) { |
296 | pr_warn("Signal event wasn't created because limit was reached\n"); | ||
297 | p->signal_event_limit_reached = true; | ||
298 | } | ||
296 | return -ENOMEM; | 299 | return -ENOMEM; |
297 | } | 300 | } |
298 | 301 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 681b639f5133..ed71ad40e8f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | |||
@@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq) | |||
183 | { | 183 | { |
184 | if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) | 184 | if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) |
185 | kq->mqd->destroy_mqd(kq->mqd, | 185 | kq->mqd->destroy_mqd(kq->mqd, |
186 | NULL, | 186 | kq->queue->mqd, |
187 | false, | 187 | KFD_PREEMPT_TYPE_WAVEFRONT_RESET, |
188 | QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, | 188 | QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, |
189 | kq->queue->pipe, | 189 | kq->queue->pipe, |
190 | kq->queue->queue); | 190 | kq->queue->queue); |
@@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
210 | uint32_t wptr, rptr; | 210 | uint32_t wptr, rptr; |
211 | unsigned int *queue_address; | 211 | unsigned int *queue_address; |
212 | 212 | ||
213 | /* When rptr == wptr, the buffer is empty. | ||
214 | * When rptr == wptr + 1, the buffer is full. | ||
215 | * It is always rptr that advances to the position of wptr, rather than | ||
216 | * the opposite. So we can only use up to queue_size_dwords - 1 dwords. | ||
217 | */ | ||
213 | rptr = *kq->rptr_kernel; | 218 | rptr = *kq->rptr_kernel; |
214 | wptr = *kq->wptr_kernel; | 219 | wptr = *kq->wptr_kernel; |
215 | queue_address = (unsigned int *)kq->pq_kernel_addr; | 220 | queue_address = (unsigned int *)kq->pq_kernel_addr; |
@@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
219 | pr_debug("wptr: %d\n", wptr); | 224 | pr_debug("wptr: %d\n", wptr); |
220 | pr_debug("queue_address 0x%p\n", queue_address); | 225 | pr_debug("queue_address 0x%p\n", queue_address); |
221 | 226 | ||
222 | available_size = (rptr - 1 - wptr + queue_size_dwords) % | 227 | available_size = (rptr + queue_size_dwords - 1 - wptr) % |
223 | queue_size_dwords; | 228 | queue_size_dwords; |
224 | 229 | ||
225 | if (packet_size_in_dwords >= queue_size_dwords || | 230 | if (packet_size_in_dwords > available_size) { |
226 | packet_size_in_dwords >= available_size) { | ||
227 | /* | 231 | /* |
228 | * make sure calling functions know | 232 | * make sure calling functions know |
229 | * acquire_packet_buffer() failed | 233 | * acquire_packet_buffer() failed |
@@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq, | |||
233 | } | 237 | } |
234 | 238 | ||
235 | if (wptr + packet_size_in_dwords >= queue_size_dwords) { | 239 | if (wptr + packet_size_in_dwords >= queue_size_dwords) { |
240 | /* make sure after rolling back to position 0, there is | ||
241 | * still enough space. | ||
242 | */ | ||
243 | if (packet_size_in_dwords >= rptr) { | ||
244 | *buffer_ptr = NULL; | ||
245 | return -ENOMEM; | ||
246 | } | ||
247 | /* fill nops, roll back and start at position 0 */ | ||
236 | while (wptr > 0) { | 248 | while (wptr > 0) { |
237 | queue_address[wptr] = kq->nop_packet; | 249 | queue_address[wptr] = kq->nop_packet; |
238 | wptr = (wptr + 1) % queue_size_dwords; | 250 | wptr = (wptr + 1) % queue_size_dwords; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b397ec726400..b87e96cee5fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
@@ -521,6 +521,7 @@ struct kfd_process { | |||
521 | struct list_head signal_event_pages; | 521 | struct list_head signal_event_pages; |
522 | u32 next_nonsignal_event_id; | 522 | u32 next_nonsignal_event_id; |
523 | size_t signal_event_count; | 523 | size_t signal_event_count; |
524 | bool signal_event_limit_reached; | ||
524 | }; | 525 | }; |
525 | 526 | ||
526 | /** | 527 | /** |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1cae95e2b13a..03bec765b03d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
@@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
143 | int num_queues = 0; | 143 | int num_queues = 0; |
144 | struct queue *cur; | 144 | struct queue *cur; |
145 | 145 | ||
146 | memset(&q_properties, 0, sizeof(struct queue_properties)); | ||
147 | memcpy(&q_properties, properties, sizeof(struct queue_properties)); | 146 | memcpy(&q_properties, properties, sizeof(struct queue_properties)); |
148 | q = NULL; | 147 | q = NULL; |
149 | kq = NULL; | 148 | kq = NULL; |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5a634594a6ce..57881167ccd2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { | |||
551 | void etnaviv_gem_free_object(struct drm_gem_object *obj) | 551 | void etnaviv_gem_free_object(struct drm_gem_object *obj) |
552 | { | 552 | { |
553 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 553 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
554 | struct etnaviv_drm_private *priv = obj->dev->dev_private; | ||
554 | struct etnaviv_vram_mapping *mapping, *tmp; | 555 | struct etnaviv_vram_mapping *mapping, *tmp; |
555 | 556 | ||
556 | /* object should not be active */ | 557 | /* object should not be active */ |
557 | WARN_ON(is_active(etnaviv_obj)); | 558 | WARN_ON(is_active(etnaviv_obj)); |
558 | 559 | ||
560 | mutex_lock(&priv->gem_lock); | ||
559 | list_del(&etnaviv_obj->gem_node); | 561 | list_del(&etnaviv_obj->gem_node); |
562 | mutex_unlock(&priv->gem_lock); | ||
560 | 563 | ||
561 | list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, | 564 | list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, |
562 | obj_node) { | 565 | obj_node) { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 026ef4e02f85..46dfe0737f43 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
445 | cmdbuf->user_size = ALIGN(args->stream_size, 8); | 445 | cmdbuf->user_size = ALIGN(args->stream_size, 8); |
446 | 446 | ||
447 | ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); | 447 | ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); |
448 | if (ret == 0) | 448 | if (ret) |
449 | cmdbuf = NULL; | 449 | goto out; |
450 | |||
451 | cmdbuf = NULL; | ||
450 | 452 | ||
451 | if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { | 453 | if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { |
452 | /* | 454 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 730b8d9db187..6be5b53c3b27 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/component.h> | 15 | #include <linux/component.h> |
16 | #include <linux/iopoll.h> | 16 | #include <linux/iopoll.h> |
17 | #include <linux/irq.h> | ||
17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
18 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
19 | #include <linux/of_gpio.h> | 20 | #include <linux/of_gpio.h> |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index b1f7299600f0..e651a58c18cf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = { | |||
168 | static int exynos_drm_suspend(struct device *dev) | 168 | static int exynos_drm_suspend(struct device *dev) |
169 | { | 169 | { |
170 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
171 | struct drm_connector *connector; | 171 | struct exynos_drm_private *private = drm_dev->dev_private; |
172 | struct drm_connector_list_iter conn_iter; | ||
173 | 172 | ||
174 | if (pm_runtime_suspended(dev) || !drm_dev) | 173 | if (pm_runtime_suspended(dev) || !drm_dev) |
175 | return 0; | 174 | return 0; |
176 | 175 | ||
177 | drm_connector_list_iter_begin(drm_dev, &conn_iter); | 176 | drm_kms_helper_poll_disable(drm_dev); |
178 | drm_for_each_connector_iter(connector, &conn_iter) { | 177 | exynos_drm_fbdev_suspend(drm_dev); |
179 | int old_dpms = connector->dpms; | 178 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); |
180 | 179 | if (IS_ERR(private->suspend_state)) { | |
181 | if (connector->funcs->dpms) | 180 | exynos_drm_fbdev_resume(drm_dev); |
182 | connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); | 181 | drm_kms_helper_poll_enable(drm_dev); |
183 | 182 | return PTR_ERR(private->suspend_state); | |
184 | /* Set the old mode back to the connector for resume */ | ||
185 | connector->dpms = old_dpms; | ||
186 | } | 183 | } |
187 | drm_connector_list_iter_end(&conn_iter); | ||
188 | 184 | ||
189 | return 0; | 185 | return 0; |
190 | } | 186 | } |
@@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev) | |||
192 | static int exynos_drm_resume(struct device *dev) | 188 | static int exynos_drm_resume(struct device *dev) |
193 | { | 189 | { |
194 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 190 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
195 | struct drm_connector *connector; | 191 | struct exynos_drm_private *private = drm_dev->dev_private; |
196 | struct drm_connector_list_iter conn_iter; | ||
197 | 192 | ||
198 | if (pm_runtime_suspended(dev) || !drm_dev) | 193 | if (pm_runtime_suspended(dev) || !drm_dev) |
199 | return 0; | 194 | return 0; |
200 | 195 | ||
201 | drm_connector_list_iter_begin(drm_dev, &conn_iter); | 196 | drm_atomic_helper_resume(drm_dev, private->suspend_state); |
202 | drm_for_each_connector_iter(connector, &conn_iter) { | 197 | exynos_drm_fbdev_resume(drm_dev); |
203 | if (connector->funcs->dpms) { | 198 | drm_kms_helper_poll_enable(drm_dev); |
204 | int dpms = connector->dpms; | ||
205 | |||
206 | connector->dpms = DRM_MODE_DPMS_OFF; | ||
207 | connector->funcs->dpms(connector, dpms); | ||
208 | } | ||
209 | } | ||
210 | drm_connector_list_iter_end(&conn_iter); | ||
211 | 199 | ||
212 | return 0; | 200 | return 0; |
213 | } | 201 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cf131c2aa23e..f8bae4cb4823 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -202,6 +202,7 @@ struct drm_exynos_file_private { | |||
202 | */ | 202 | */ |
203 | struct exynos_drm_private { | 203 | struct exynos_drm_private { |
204 | struct drm_fb_helper *fb_helper; | 204 | struct drm_fb_helper *fb_helper; |
205 | struct drm_atomic_state *suspend_state; | ||
205 | 206 | ||
206 | struct device *dma_dev; | 207 | struct device *dma_dev; |
207 | void *mapping; | 208 | void *mapping; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index c3a068409b48..dfb66ecf417b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <drm/drm_crtc_helper.h> | 18 | #include <drm/drm_crtc_helper.h> |
19 | #include <drm/exynos_drm.h> | 19 | #include <drm/exynos_drm.h> |
20 | 20 | ||
21 | #include <linux/console.h> | ||
22 | |||
21 | #include "exynos_drm_drv.h" | 23 | #include "exynos_drm_drv.h" |
22 | #include "exynos_drm_fb.h" | 24 | #include "exynos_drm_fb.h" |
23 | #include "exynos_drm_fbdev.h" | 25 | #include "exynos_drm_fbdev.h" |
@@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev) | |||
285 | 287 | ||
286 | drm_fb_helper_hotplug_event(fb_helper); | 288 | drm_fb_helper_hotplug_event(fb_helper); |
287 | } | 289 | } |
290 | |||
291 | void exynos_drm_fbdev_suspend(struct drm_device *dev) | ||
292 | { | ||
293 | struct exynos_drm_private *private = dev->dev_private; | ||
294 | |||
295 | console_lock(); | ||
296 | drm_fb_helper_set_suspend(private->fb_helper, 1); | ||
297 | console_unlock(); | ||
298 | } | ||
299 | |||
300 | void exynos_drm_fbdev_resume(struct drm_device *dev) | ||
301 | { | ||
302 | struct exynos_drm_private *private = dev->dev_private; | ||
303 | |||
304 | console_lock(); | ||
305 | drm_fb_helper_set_suspend(private->fb_helper, 0); | ||
306 | console_unlock(); | ||
307 | } | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h index 330eef87f718..645d1bb7f665 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h | |||
@@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev); | |||
21 | void exynos_drm_fbdev_fini(struct drm_device *dev); | 21 | void exynos_drm_fbdev_fini(struct drm_device *dev); |
22 | void exynos_drm_fbdev_restore_mode(struct drm_device *dev); | 22 | void exynos_drm_fbdev_restore_mode(struct drm_device *dev); |
23 | void exynos_drm_output_poll_changed(struct drm_device *dev); | 23 | void exynos_drm_output_poll_changed(struct drm_device *dev); |
24 | void exynos_drm_fbdev_suspend(struct drm_device *drm); | ||
25 | void exynos_drm_fbdev_resume(struct drm_device *drm); | ||
24 | 26 | ||
25 | #else | 27 | #else |
26 | 28 | ||
@@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev) | |||
39 | 41 | ||
40 | #define exynos_drm_output_poll_changed (NULL) | 42 | #define exynos_drm_output_poll_changed (NULL) |
41 | 43 | ||
44 | static inline void exynos_drm_fbdev_suspend(struct drm_device *drm) | ||
45 | { | ||
46 | } | ||
47 | |||
48 | static inline void exynos_drm_fbdev_resume(struct drm_device *drm) | ||
49 | { | ||
50 | } | ||
51 | |||
42 | #endif | 52 | #endif |
43 | 53 | ||
44 | #endif | 54 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 214fa5e51963..0109ff40b1db 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, | |||
944 | struct drm_device *dev = encoder->dev; | 944 | struct drm_device *dev = encoder->dev; |
945 | struct drm_connector *connector; | 945 | struct drm_connector *connector; |
946 | struct drm_display_mode *m; | 946 | struct drm_display_mode *m; |
947 | struct drm_connector_list_iter conn_iter; | ||
947 | int mode_ok; | 948 | int mode_ok; |
948 | 949 | ||
949 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 950 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
950 | 951 | ||
951 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 952 | drm_connector_list_iter_begin(dev, &conn_iter); |
953 | drm_for_each_connector_iter(connector, &conn_iter) { | ||
952 | if (connector->encoder == encoder) | 954 | if (connector->encoder == encoder) |
953 | break; | 955 | break; |
954 | } | 956 | } |
957 | if (connector) | ||
958 | drm_connector_get(connector); | ||
959 | drm_connector_list_iter_end(&conn_iter); | ||
955 | 960 | ||
956 | if (connector->encoder != encoder) | 961 | if (!connector) |
957 | return true; | 962 | return true; |
958 | 963 | ||
959 | mode_ok = hdmi_mode_valid(connector, adjusted_mode); | 964 | mode_ok = hdmi_mode_valid(connector, adjusted_mode); |
960 | 965 | ||
961 | if (mode_ok == MODE_OK) | 966 | if (mode_ok == MODE_OK) |
962 | return true; | 967 | goto cleanup; |
963 | 968 | ||
964 | /* | 969 | /* |
965 | * Find the most suitable mode and copy it to adjusted_mode. | 970 | * Find the most suitable mode and copy it to adjusted_mode. |
@@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder, | |||
979 | } | 984 | } |
980 | } | 985 | } |
981 | 986 | ||
987 | cleanup: | ||
988 | drm_connector_put(connector); | ||
989 | |||
982 | return true; | 990 | return true; |
983 | } | 991 | } |
984 | 992 | ||
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 40af17ec6312..ff3154fe6588 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c | |||
@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, | |||
197 | static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, | 197 | static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, |
198 | void *p_data, unsigned int bytes) | 198 | void *p_data, unsigned int bytes) |
199 | { | 199 | { |
200 | unsigned int bar_index = | ||
201 | (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; | ||
202 | u32 new = *(u32 *)(p_data); | 200 | u32 new = *(u32 *)(p_data); |
203 | bool lo = IS_ALIGNED(offset, 8); | 201 | bool lo = IS_ALIGNED(offset, 8); |
204 | u64 size; | 202 | u64 size; |
205 | int ret = 0; | 203 | int ret = 0; |
206 | bool mmio_enabled = | 204 | bool mmio_enabled = |
207 | vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; | 205 | vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; |
206 | struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; | ||
208 | 207 | ||
209 | if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) | 208 | /* |
210 | return -EINVAL; | 209 | * Power-up software can determine how much address |
211 | 210 | * space the device requires by writing a value of | |
211 | * all 1's to the register and then reading the value | ||
212 | * back. The device will return 0's in all don't-care | ||
213 | * address bits. | ||
214 | */ | ||
212 | if (new == 0xffffffff) { | 215 | if (new == 0xffffffff) { |
213 | /* | 216 | switch (offset) { |
214 | * Power-up software can determine how much address | 217 | case PCI_BASE_ADDRESS_0: |
215 | * space the device requires by writing a value of | 218 | case PCI_BASE_ADDRESS_1: |
216 | * all 1's to the register and then reading the value | 219 | size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); |
217 | * back. The device will return 0's in all don't-care | 220 | intel_vgpu_write_pci_bar(vgpu, offset, |
218 | * address bits. | 221 | size >> (lo ? 0 : 32), lo); |
219 | */ | 222 | /* |
220 | size = vgpu->cfg_space.bar[bar_index].size; | 223 | * Untrap the BAR, since guest hasn't configured a |
221 | if (lo) { | 224 | * valid GPA |
222 | new = rounddown(new, size); | ||
223 | } else { | ||
224 | u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; | ||
225 | /* for 32bit mode bar it returns all-0 in upper 32 | ||
226 | * bit, for 64bit mode bar it will calculate the | ||
227 | * size with lower 32bit and return the corresponding | ||
228 | * value | ||
229 | */ | 225 | */ |
230 | if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
231 | new &= (~(size-1)) >> 32; | ||
232 | else | ||
233 | new = 0; | ||
234 | } | ||
235 | /* | ||
236 | * Unmapp & untrap the BAR, since guest hasn't configured a | ||
237 | * valid GPA | ||
238 | */ | ||
239 | switch (bar_index) { | ||
240 | case INTEL_GVT_PCI_BAR_GTTMMIO: | ||
241 | ret = trap_gttmmio(vgpu, false); | 226 | ret = trap_gttmmio(vgpu, false); |
242 | break; | 227 | break; |
243 | case INTEL_GVT_PCI_BAR_APERTURE: | 228 | case PCI_BASE_ADDRESS_2: |
229 | case PCI_BASE_ADDRESS_3: | ||
230 | size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); | ||
231 | intel_vgpu_write_pci_bar(vgpu, offset, | ||
232 | size >> (lo ? 0 : 32), lo); | ||
244 | ret = map_aperture(vgpu, false); | 233 | ret = map_aperture(vgpu, false); |
245 | break; | 234 | break; |
235 | default: | ||
236 | /* Unimplemented BARs */ | ||
237 | intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); | ||
246 | } | 238 | } |
247 | intel_vgpu_write_pci_bar(vgpu, offset, new, lo); | ||
248 | } else { | 239 | } else { |
249 | /* | 240 | switch (offset) { |
250 | * Unmapp & untrap the old BAR first, since guest has | 241 | case PCI_BASE_ADDRESS_0: |
251 | * re-configured the BAR | 242 | case PCI_BASE_ADDRESS_1: |
252 | */ | 243 | /* |
253 | switch (bar_index) { | 244 | * Untrap the old BAR first, since guest has |
254 | case INTEL_GVT_PCI_BAR_GTTMMIO: | 245 | * re-configured the BAR |
255 | ret = trap_gttmmio(vgpu, false); | 246 | */ |
247 | trap_gttmmio(vgpu, false); | ||
248 | intel_vgpu_write_pci_bar(vgpu, offset, new, lo); | ||
249 | ret = trap_gttmmio(vgpu, mmio_enabled); | ||
256 | break; | 250 | break; |
257 | case INTEL_GVT_PCI_BAR_APERTURE: | 251 | case PCI_BASE_ADDRESS_2: |
258 | ret = map_aperture(vgpu, false); | 252 | case PCI_BASE_ADDRESS_3: |
253 | map_aperture(vgpu, false); | ||
254 | intel_vgpu_write_pci_bar(vgpu, offset, new, lo); | ||
255 | ret = map_aperture(vgpu, mmio_enabled); | ||
259 | break; | 256 | break; |
260 | } | 257 | default: |
261 | intel_vgpu_write_pci_bar(vgpu, offset, new, lo); | 258 | intel_vgpu_write_pci_bar(vgpu, offset, new, lo); |
262 | /* Track the new BAR */ | ||
263 | if (mmio_enabled) { | ||
264 | switch (bar_index) { | ||
265 | case INTEL_GVT_PCI_BAR_GTTMMIO: | ||
266 | ret = trap_gttmmio(vgpu, true); | ||
267 | break; | ||
268 | case INTEL_GVT_PCI_BAR_APERTURE: | ||
269 | ret = map_aperture(vgpu, true); | ||
270 | break; | ||
271 | } | ||
272 | } | 259 | } |
273 | } | 260 | } |
274 | return ret; | 261 | return ret; |
@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
299 | } | 286 | } |
300 | 287 | ||
301 | switch (rounddown(offset, 4)) { | 288 | switch (rounddown(offset, 4)) { |
302 | case PCI_BASE_ADDRESS_0: | 289 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: |
303 | case PCI_BASE_ADDRESS_1: | ||
304 | case PCI_BASE_ADDRESS_2: | ||
305 | case PCI_BASE_ADDRESS_3: | ||
306 | if (WARN_ON(!IS_ALIGNED(offset, 4))) | 290 | if (WARN_ON(!IS_ALIGNED(offset, 4))) |
307 | return -EINVAL; | 291 | return -EINVAL; |
308 | return emulate_pci_bar_write(vgpu, offset, p_data, bytes); | 292 | return emulate_pci_bar_write(vgpu, offset, p_data, bytes); |
@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | |||
344 | struct intel_gvt *gvt = vgpu->gvt; | 328 | struct intel_gvt *gvt = vgpu->gvt; |
345 | const struct intel_gvt_device_info *info = &gvt->device_info; | 329 | const struct intel_gvt_device_info *info = &gvt->device_info; |
346 | u16 *gmch_ctl; | 330 | u16 *gmch_ctl; |
347 | int i; | ||
348 | 331 | ||
349 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, | 332 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, |
350 | info->cfg_space_size); | 333 | info->cfg_space_size); |
@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | |||
371 | */ | 354 | */ |
372 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); | 355 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); |
373 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); | 356 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); |
357 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); | ||
374 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); | 358 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); |
375 | 359 | ||
376 | for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { | 360 | vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = |
377 | vgpu->cfg_space.bar[i].size = pci_resource_len( | 361 | pci_resource_len(gvt->dev_priv->drm.pdev, 0); |
378 | gvt->dev_priv->drm.pdev, i * 2); | 362 | vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = |
379 | vgpu->cfg_space.bar[i].tracked = false; | 363 | pci_resource_len(gvt->dev_priv->drm.pdev, 2); |
380 | } | ||
381 | } | 364 | } |
382 | 365 | ||
383 | /** | 366 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e21ce9c18b6e..b63893eeca73 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
839 | pipe); | 839 | pipe); |
840 | int position; | 840 | int position; |
841 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; | 841 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; |
842 | bool in_vbl = true; | ||
843 | unsigned long irqflags; | 842 | unsigned long irqflags; |
844 | 843 | ||
845 | if (WARN_ON(!mode->crtc_clock)) { | 844 | if (WARN_ON(!mode->crtc_clock)) { |
@@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
922 | 921 | ||
923 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 922 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
924 | 923 | ||
925 | in_vbl = position >= vbl_start && position < vbl_end; | ||
926 | |||
927 | /* | 924 | /* |
928 | * While in vblank, position will be negative | 925 | * While in vblank, position will be negative |
929 | * counting up towards 0 at vbl_end. And outside | 926 | * counting up towards 0 at vbl_end. And outside |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index d805b6e6fe71..27743be5b768 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder, | |||
606 | connector->encoder->base.id, | 606 | connector->encoder->base.id, |
607 | connector->encoder->name); | 607 | connector->encoder->name); |
608 | 608 | ||
609 | /* ELD Conn_Type */ | ||
610 | connector->eld[5] &= ~(3 << 2); | ||
611 | if (intel_crtc_has_dp_encoder(crtc_state)) | ||
612 | connector->eld[5] |= (1 << 2); | ||
613 | |||
614 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; | 609 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; |
615 | 610 | ||
616 | if (dev_priv->display.audio_codec_enable) | 611 | if (dev_priv->display.audio_codec_enable) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 183e87e8ea31..00c6aee0a9a1 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1163 | is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; | 1163 | is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; |
1164 | is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); | 1164 | is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); |
1165 | 1165 | ||
1166 | if (port == PORT_A && is_dvi) { | ||
1167 | DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", | ||
1168 | is_hdmi ? "/HDMI" : ""); | ||
1169 | is_dvi = false; | ||
1170 | is_hdmi = false; | ||
1171 | } | ||
1172 | |||
1166 | info->supports_dvi = is_dvi; | 1173 | info->supports_dvi = is_dvi; |
1167 | info->supports_hdmi = is_hdmi; | 1174 | info->supports_hdmi = is_hdmi; |
1168 | info->supports_dp = is_dp; | 1175 | info->supports_dp = is_dp; |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 965988f79a55..92c1f8e166dc 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) | |||
216 | 216 | ||
217 | mask = DC_STATE_DEBUG_MASK_MEMORY_UP; | 217 | mask = DC_STATE_DEBUG_MASK_MEMORY_UP; |
218 | 218 | ||
219 | if (IS_BROXTON(dev_priv)) | 219 | if (IS_GEN9_LP(dev_priv)) |
220 | mask |= DC_STATE_DEBUG_MASK_CORES; | 220 | mask |= DC_STATE_DEBUG_MASK_CORES; |
221 | 221 | ||
222 | /* The below bit doesn't need to be cleared ever afterwards */ | 222 | /* The below bit doesn't need to be cleared ever afterwards */ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4b4fd1f8110b..476681d5940c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
1655 | out: | 1655 | out: |
1656 | if (ret && IS_GEN9_LP(dev_priv)) { | 1656 | if (ret && IS_GEN9_LP(dev_priv)) { |
1657 | tmp = I915_READ(BXT_PHY_CTL(port)); | 1657 | tmp = I915_READ(BXT_PHY_CTL(port)); |
1658 | if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | | 1658 | if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | |
1659 | BXT_PHY_LANE_POWERDOWN_ACK | | ||
1659 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) | 1660 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) |
1660 | DRM_ERROR("Port %c enabled but PHY powered down? " | 1661 | DRM_ERROR("Port %c enabled but PHY powered down? " |
1661 | "(PHY_CTL %08x)\n", port_name(port), tmp); | 1662 | "(PHY_CTL %08x)\n", port_name(port), tmp); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f17275519484..64f7b51ed97c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -12359,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12359 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 12359 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
12360 | struct drm_crtc *crtc; | 12360 | struct drm_crtc *crtc; |
12361 | struct intel_crtc_state *intel_cstate; | 12361 | struct intel_crtc_state *intel_cstate; |
12362 | bool hw_check = intel_state->modeset; | ||
12363 | u64 put_domains[I915_MAX_PIPES] = {}; | 12362 | u64 put_domains[I915_MAX_PIPES] = {}; |
12364 | unsigned crtc_vblank_mask = 0; | 12363 | unsigned crtc_vblank_mask = 0; |
12365 | int i; | 12364 | int i; |
@@ -12376,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12376 | 12375 | ||
12377 | if (needs_modeset(new_crtc_state) || | 12376 | if (needs_modeset(new_crtc_state) || |
12378 | to_intel_crtc_state(new_crtc_state)->update_pipe) { | 12377 | to_intel_crtc_state(new_crtc_state)->update_pipe) { |
12379 | hw_check = true; | ||
12380 | 12378 | ||
12381 | put_domains[to_intel_crtc(crtc)->pipe] = | 12379 | put_domains[to_intel_crtc(crtc)->pipe] = |
12382 | modeset_get_crtc_power_domains(crtc, | 12380 | modeset_get_crtc_power_domains(crtc, |
@@ -14030,7 +14028,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14030 | 14028 | ||
14031 | if (mode_cmd->handles[i] != mode_cmd->handles[0]) { | 14029 | if (mode_cmd->handles[i] != mode_cmd->handles[0]) { |
14032 | DRM_DEBUG_KMS("bad plane %d handle\n", i); | 14030 | DRM_DEBUG_KMS("bad plane %d handle\n", i); |
14033 | return -EINVAL; | 14031 | goto err; |
14034 | } | 14032 | } |
14035 | 14033 | ||
14036 | stride_alignment = intel_fb_stride_alignment(fb, i); | 14034 | stride_alignment = intel_fb_stride_alignment(fb, i); |
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 09b670929786..de38d014ed39 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c | |||
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { | |||
208 | }, | 208 | }, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) | ||
212 | { | ||
213 | return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | | ||
214 | BIT(phy_info->channel[DPIO_CH0].port); | ||
215 | } | ||
216 | |||
217 | static const struct bxt_ddi_phy_info * | 211 | static const struct bxt_ddi_phy_info * |
218 | bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) | 212 | bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) |
219 | { | 213 | { |
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
313 | enum dpio_phy phy) | 307 | enum dpio_phy phy) |
314 | { | 308 | { |
315 | const struct bxt_ddi_phy_info *phy_info; | 309 | const struct bxt_ddi_phy_info *phy_info; |
316 | enum port port; | ||
317 | 310 | ||
318 | phy_info = bxt_get_phy_info(dev_priv, phy); | 311 | phy_info = bxt_get_phy_info(dev_priv, phy); |
319 | 312 | ||
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
335 | return false; | 328 | return false; |
336 | } | 329 | } |
337 | 330 | ||
338 | for_each_port_masked(port, bxt_phy_port_mask(phy_info)) { | ||
339 | u32 tmp = I915_READ(BXT_PHY_CTL(port)); | ||
340 | |||
341 | if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { | ||
342 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " | ||
343 | "for port %c powered down " | ||
344 | "(PHY_CTL %08x)\n", | ||
345 | phy, port_name(port), tmp); | ||
346 | |||
347 | return false; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | return true; | 331 | return true; |
352 | } | 332 | } |
353 | 333 | ||
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index f0c11aec5ea5..7442891762be 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, | |||
892 | struct intel_crtc_state *old_crtc_state, | 892 | struct intel_crtc_state *old_crtc_state, |
893 | struct drm_connector_state *old_conn_state) | 893 | struct drm_connector_state *old_conn_state) |
894 | { | 894 | { |
895 | struct drm_device *dev = encoder->base.dev; | ||
896 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
897 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 895 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
898 | enum port port; | 896 | enum port port; |
899 | 897 | ||
@@ -903,15 +901,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder, | |||
903 | intel_panel_disable_backlight(old_conn_state); | 901 | intel_panel_disable_backlight(old_conn_state); |
904 | 902 | ||
905 | /* | 903 | /* |
906 | * Disable Device ready before the port shutdown in order | ||
907 | * to avoid split screen | ||
908 | */ | ||
909 | if (IS_BROXTON(dev_priv)) { | ||
910 | for_each_dsi_port(port, intel_dsi->ports) | ||
911 | I915_WRITE(MIPI_DEVICE_READY(port), 0); | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * According to the spec we should send SHUTDOWN before | 904 | * According to the spec we should send SHUTDOWN before |
916 | * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing | 905 | * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing |
917 | * has shown that the v3 sequence works for v2 VBTs too | 906 | * has shown that the v3 sequence works for v2 VBTs too |
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 951e834dd274..28a778b785ac 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
@@ -30,6 +30,21 @@ | |||
30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
32 | 32 | ||
33 | static void intel_connector_update_eld_conn_type(struct drm_connector *connector) | ||
34 | { | ||
35 | u8 conn_type; | ||
36 | |||
37 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || | ||
38 | connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
39 | conn_type = DRM_ELD_CONN_TYPE_DP; | ||
40 | } else { | ||
41 | conn_type = DRM_ELD_CONN_TYPE_HDMI; | ||
42 | } | ||
43 | |||
44 | connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK; | ||
45 | connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type; | ||
46 | } | ||
47 | |||
33 | /** | 48 | /** |
34 | * intel_connector_update_modes - update connector from edid | 49 | * intel_connector_update_modes - update connector from edid |
35 | * @connector: DRM connector device to use | 50 | * @connector: DRM connector device to use |
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector, | |||
44 | ret = drm_add_edid_modes(connector, edid); | 59 | ret = drm_add_edid_modes(connector, edid); |
45 | drm_edid_to_eld(connector, edid); | 60 | drm_edid_to_eld(connector, edid); |
46 | 61 | ||
62 | intel_connector_update_eld_conn_type(connector); | ||
63 | |||
47 | return ret; | 64 | return ret; |
48 | } | 65 | } |
49 | 66 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a17b1de7d7e0..3b1c5d783ee7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) | |||
1699 | if (!panel->backlight.max) | 1699 | if (!panel->backlight.max) |
1700 | return -ENODEV; | 1700 | return -ENODEV; |
1701 | 1701 | ||
1702 | panel->backlight.min = get_backlight_min_vbt(connector); | ||
1703 | |||
1702 | val = bxt_get_backlight(connector); | 1704 | val = bxt_get_backlight(connector); |
1703 | val = intel_panel_compute_brightness(connector, val); | 1705 | val = intel_panel_compute_brightness(connector, val); |
1704 | panel->backlight.level = clamp(val, panel->backlight.min, | 1706 | panel->backlight.level = clamp(val, panel->backlight.min, |
@@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) | |||
1735 | if (!panel->backlight.max) | 1737 | if (!panel->backlight.max) |
1736 | return -ENODEV; | 1738 | return -ENODEV; |
1737 | 1739 | ||
1740 | panel->backlight.min = get_backlight_min_vbt(connector); | ||
1741 | |||
1738 | val = bxt_get_backlight(connector); | 1742 | val = bxt_get_backlight(connector); |
1739 | val = intel_panel_compute_brightness(connector, val); | 1743 | val = intel_panel_compute_brightness(connector, val); |
1740 | panel->backlight.level = clamp(val, panel->backlight.min, | 1744 | panel->backlight.level = clamp(val, panel->backlight.min, |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b66d8e136aa3..b3a087cb0860 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume | |||
2782 | 2782 | ||
2783 | /* 6. Enable DBUF */ | 2783 | /* 6. Enable DBUF */ |
2784 | gen9_dbuf_enable(dev_priv); | 2784 | gen9_dbuf_enable(dev_priv); |
2785 | |||
2786 | if (resume && dev_priv->csr.dmc_payload) | ||
2787 | intel_csr_load_program(dev_priv); | ||
2785 | } | 2788 | } |
2786 | 2789 | ||
2787 | #undef CNL_PROCMON_IDX | 2790 | #undef CNL_PROCMON_IDX |
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 14c5613b4388..afbf50d0c08f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, | |||
509 | .y2 = qfb->base.height | 509 | .y2 = qfb->base.height |
510 | }; | 510 | }; |
511 | 511 | ||
512 | if (!old_state->fb) { | 512 | if (old_state->fb) { |
513 | qxl_io_log(qdev, | 513 | qfb_old = to_qxl_framebuffer(old_state->fb); |
514 | "create primary fb: %dx%d,%d,%d\n", | 514 | bo_old = gem_to_qxl_bo(qfb_old->obj); |
515 | bo->surf.width, bo->surf.height, | 515 | } else { |
516 | bo->surf.stride, bo->surf.format); | 516 | bo_old = NULL; |
517 | } | ||
517 | 518 | ||
518 | qxl_io_create_primary(qdev, 0, bo); | 519 | if (bo == bo_old) |
519 | bo->is_primary = true; | ||
520 | return; | 520 | return; |
521 | 521 | ||
522 | } else { | 522 | if (bo_old && bo_old->is_primary) { |
523 | qfb_old = to_qxl_framebuffer(old_state->fb); | 523 | qxl_io_destroy_primary(qdev); |
524 | bo_old = gem_to_qxl_bo(qfb_old->obj); | ||
525 | bo_old->is_primary = false; | 524 | bo_old->is_primary = false; |
526 | } | 525 | } |
527 | 526 | ||
528 | bo->is_primary = true; | 527 | if (!bo->is_primary) { |
528 | qxl_io_create_primary(qdev, 0, bo); | ||
529 | bo->is_primary = true; | ||
530 | } | ||
529 | qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); | 531 | qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); |
530 | } | 532 | } |
531 | 533 | ||
@@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane, | |||
534 | { | 536 | { |
535 | struct qxl_device *qdev = plane->dev->dev_private; | 537 | struct qxl_device *qdev = plane->dev->dev_private; |
536 | 538 | ||
537 | if (old_state->fb) | 539 | if (old_state->fb) { |
538 | { struct qxl_framebuffer *qfb = | 540 | struct qxl_framebuffer *qfb = |
539 | to_qxl_framebuffer(old_state->fb); | 541 | to_qxl_framebuffer(old_state->fb); |
540 | struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); | 542 | struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); |
541 | 543 | ||
542 | qxl_io_destroy_primary(qdev); | 544 | if (bo->is_primary) { |
543 | bo->is_primary = false; | 545 | qxl_io_destroy_primary(qdev); |
546 | bo->is_primary = false; | ||
547 | } | ||
544 | } | 548 | } |
545 | } | 549 | } |
546 | 550 | ||
@@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, | |||
698 | struct drm_gem_object *obj; | 702 | struct drm_gem_object *obj; |
699 | struct qxl_bo *user_bo; | 703 | struct qxl_bo *user_bo; |
700 | 704 | ||
701 | if (!plane->state->fb) { | 705 | if (!old_state->fb) { |
702 | /* we never executed prepare_fb, so there's nothing to | 706 | /* |
707 | * we never executed prepare_fb, so there's nothing to | ||
703 | * unpin. | 708 | * unpin. |
704 | */ | 709 | */ |
705 | return; | 710 | return; |
706 | } | 711 | } |
707 | 712 | ||
708 | obj = to_qxl_framebuffer(plane->state->fb)->obj; | 713 | obj = to_qxl_framebuffer(old_state->fb)->obj; |
709 | user_bo = gem_to_qxl_bo(obj); | 714 | user_bo = gem_to_qxl_bo(obj); |
710 | qxl_bo_unpin(user_bo); | 715 | qxl_bo_unpin(user_bo); |
711 | } | 716 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 997131d58c7f..ffc10cadcf34 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, | |||
1663 | radeon_agp_suspend(rdev); | 1663 | radeon_agp_suspend(rdev); |
1664 | 1664 | ||
1665 | pci_save_state(dev->pdev); | 1665 | pci_save_state(dev->pdev); |
1666 | if (freeze && rdev->family >= CHIP_CEDAR) { | 1666 | if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { |
1667 | rdev->asic->asic_reset(rdev, true); | 1667 | rdev->asic->asic_reset(rdev, true); |
1668 | pci_restore_state(dev->pdev); | 1668 | pci_restore_state(dev->pdev); |
1669 | } else if (suspend) { | 1669 | } else if (suspend) { |
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 06f05302ee75..882d85db9053 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig | |||
@@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC | |||
26 | bool "Allwinner A10 HDMI CEC Support" | 26 | bool "Allwinner A10 HDMI CEC Support" |
27 | depends on DRM_SUN4I_HDMI | 27 | depends on DRM_SUN4I_HDMI |
28 | select CEC_CORE | 28 | select CEC_CORE |
29 | depends on CEC_PIN | 29 | select CEC_PIN |
30 | help | 30 | help |
31 | Choose this option if you have an Allwinner SoC with an HDMI | 31 | Choose this option if you have an Allwinner SoC with an HDMI |
32 | controller and want to use CEC. | 32 | controller and want to use CEC. |
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h index 1457750988da..a1f8cba251a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <drm/drm_connector.h> | 15 | #include <drm/drm_connector.h> |
16 | #include <drm/drm_encoder.h> | 16 | #include <drm/drm_encoder.h> |
17 | 17 | ||
18 | #include <media/cec.h> | 18 | #include <media/cec-pin.h> |
19 | 19 | ||
20 | #define SUN4I_HDMI_CTRL_REG 0x004 | 20 | #define SUN4I_HDMI_CTRL_REG 0x004 |
21 | #define SUN4I_HDMI_CTRL_ENABLE BIT(31) | 21 | #define SUN4I_HDMI_CTRL_ENABLE BIT(31) |
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 9ea6cd5a1370..3cf1a6932fac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | |||
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
302 | hdmi->mod_clk = devm_clk_get(dev, "mod"); | 302 | hdmi->mod_clk = devm_clk_get(dev, "mod"); |
303 | if (IS_ERR(hdmi->mod_clk)) { | 303 | if (IS_ERR(hdmi->mod_clk)) { |
304 | dev_err(dev, "Couldn't get the HDMI mod clock\n"); | 304 | dev_err(dev, "Couldn't get the HDMI mod clock\n"); |
305 | return PTR_ERR(hdmi->mod_clk); | 305 | ret = PTR_ERR(hdmi->mod_clk); |
306 | goto err_disable_bus_clk; | ||
306 | } | 307 | } |
307 | clk_prepare_enable(hdmi->mod_clk); | 308 | clk_prepare_enable(hdmi->mod_clk); |
308 | 309 | ||
309 | hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); | 310 | hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); |
310 | if (IS_ERR(hdmi->pll0_clk)) { | 311 | if (IS_ERR(hdmi->pll0_clk)) { |
311 | dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); | 312 | dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); |
312 | return PTR_ERR(hdmi->pll0_clk); | 313 | ret = PTR_ERR(hdmi->pll0_clk); |
314 | goto err_disable_mod_clk; | ||
313 | } | 315 | } |
314 | 316 | ||
315 | hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); | 317 | hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); |
316 | if (IS_ERR(hdmi->pll1_clk)) { | 318 | if (IS_ERR(hdmi->pll1_clk)) { |
317 | dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); | 319 | dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); |
318 | return PTR_ERR(hdmi->pll1_clk); | 320 | ret = PTR_ERR(hdmi->pll1_clk); |
321 | goto err_disable_mod_clk; | ||
319 | } | 322 | } |
320 | 323 | ||
321 | ret = sun4i_tmds_create(hdmi); | 324 | ret = sun4i_tmds_create(hdmi); |
322 | if (ret) { | 325 | if (ret) { |
323 | dev_err(dev, "Couldn't create the TMDS clock\n"); | 326 | dev_err(dev, "Couldn't create the TMDS clock\n"); |
324 | return ret; | 327 | goto err_disable_mod_clk; |
325 | } | 328 | } |
326 | 329 | ||
327 | writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); | 330 | writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); |
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
362 | ret = sun4i_hdmi_i2c_create(dev, hdmi); | 365 | ret = sun4i_hdmi_i2c_create(dev, hdmi); |
363 | if (ret) { | 366 | if (ret) { |
364 | dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); | 367 | dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); |
365 | return ret; | 368 | goto err_disable_mod_clk; |
366 | } | 369 | } |
367 | 370 | ||
368 | drm_encoder_helper_add(&hdmi->encoder, | 371 | drm_encoder_helper_add(&hdmi->encoder, |
@@ -422,6 +425,10 @@ err_cleanup_connector: | |||
422 | drm_encoder_cleanup(&hdmi->encoder); | 425 | drm_encoder_cleanup(&hdmi->encoder); |
423 | err_del_i2c_adapter: | 426 | err_del_i2c_adapter: |
424 | i2c_del_adapter(hdmi->i2c); | 427 | i2c_del_adapter(hdmi->i2c); |
428 | err_disable_mod_clk: | ||
429 | clk_disable_unprepare(hdmi->mod_clk); | ||
430 | err_disable_bus_clk: | ||
431 | clk_disable_unprepare(hdmi->bus_clk); | ||
425 | return ret; | 432 | return ret; |
426 | } | 433 | } |
427 | 434 | ||
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, | |||
434 | drm_connector_cleanup(&hdmi->connector); | 441 | drm_connector_cleanup(&hdmi->connector); |
435 | drm_encoder_cleanup(&hdmi->encoder); | 442 | drm_encoder_cleanup(&hdmi->encoder); |
436 | i2c_del_adapter(hdmi->i2c); | 443 | i2c_del_adapter(hdmi->i2c); |
444 | clk_disable_unprepare(hdmi->mod_clk); | ||
445 | clk_disable_unprepare(hdmi->bus_clk); | ||
437 | } | 446 | } |
438 | 447 | ||
439 | static const struct component_ops sun4i_hdmi_ops = { | 448 | static const struct component_ops sun4i_hdmi_ops = { |
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h index e9b7cdad5c4c..5a1ab4046e92 100644 --- a/drivers/gpu/drm/tegra/trace.h +++ b/drivers/gpu/drm/tegra/trace.h | |||
@@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl, | |||
63 | 63 | ||
64 | /* This part must be outside protection */ | 64 | /* This part must be outside protection */ |
65 | #undef TRACE_INCLUDE_PATH | 65 | #undef TRACE_INCLUDE_PATH |
66 | #define TRACE_INCLUDE_PATH . | 66 | #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra |
67 | #define TRACE_INCLUDE_FILE trace | 67 | #define TRACE_INCLUDE_FILE trace |
68 | #include <trace/define_trace.h> | 68 | #include <trace/define_trace.h> |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b397a14ab970..a98919199858 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -533,6 +533,7 @@ | |||
533 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 | 533 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 |
534 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 | 534 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 |
535 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 | 535 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 |
536 | #define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680 | ||
536 | 537 | ||
537 | #define USB_VENDOR_ID_ILITEK 0x222a | 538 | #define USB_VENDOR_ID_ILITEK 0x222a |
538 | #define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 | 539 | #define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 |
@@ -660,6 +661,7 @@ | |||
660 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 | 661 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 |
661 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 | 662 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 |
662 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 | 663 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 |
664 | #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 | ||
663 | 665 | ||
664 | #define USB_VENDOR_ID_LG 0x1fd2 | 666 | #define USB_VENDOR_ID_LG 0x1fd2 |
665 | #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 | 667 | #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 |
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 440b999304a5..9e8c4d2ba11d 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
930 | field->application != HID_DG_PEN && | 930 | field->application != HID_DG_PEN && |
931 | field->application != HID_DG_TOUCHPAD && | 931 | field->application != HID_DG_TOUCHPAD && |
932 | field->application != HID_GD_KEYBOARD && | 932 | field->application != HID_GD_KEYBOARD && |
933 | field->application != HID_GD_SYSTEM_CONTROL && | ||
933 | field->application != HID_CP_CONSUMER_CONTROL && | 934 | field->application != HID_CP_CONSUMER_CONTROL && |
934 | field->application != HID_GD_WIRELESS_RADIO_CTLS && | 935 | field->application != HID_GD_WIRELESS_RADIO_CTLS && |
935 | !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && | 936 | !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && |
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = { | |||
1419 | USB_VENDOR_ID_ALPS_JP, | 1420 | USB_VENDOR_ID_ALPS_JP, |
1420 | HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, | 1421 | HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, |
1421 | 1422 | ||
1423 | /* Lenovo X1 TAB Gen 2 */ | ||
1424 | { .driver_data = MT_CLS_WIN_8_DUAL, | ||
1425 | HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, | ||
1426 | USB_VENDOR_ID_LENOVO, | ||
1427 | USB_DEVICE_ID_LENOVO_X1_TAB) }, | ||
1428 | |||
1422 | /* Anton devices */ | 1429 | /* Anton devices */ |
1423 | { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, | 1430 | { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, |
1424 | MT_USB_DEVICE(USB_VENDOR_ID_ANTON, | 1431 | MT_USB_DEVICE(USB_VENDOR_ID_ANTON, |
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 5b40c2614599..ef241d66562e 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c | |||
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev) | |||
436 | if (!(data->device_flags & RMI_DEVICE)) | 436 | if (!(data->device_flags & RMI_DEVICE)) |
437 | return 0; | 437 | return 0; |
438 | 438 | ||
439 | ret = rmi_reset_attn_mode(hdev); | 439 | /* Make sure the HID device is ready to receive events */ |
440 | ret = hid_hw_open(hdev); | ||
440 | if (ret) | 441 | if (ret) |
441 | return ret; | 442 | return ret; |
442 | 443 | ||
444 | ret = rmi_reset_attn_mode(hdev); | ||
445 | if (ret) | ||
446 | goto out; | ||
447 | |||
443 | ret = rmi_driver_resume(rmi_dev, false); | 448 | ret = rmi_driver_resume(rmi_dev, false); |
444 | if (ret) { | 449 | if (ret) { |
445 | hid_warn(hdev, "Failed to resume device: %d\n", ret); | 450 | hid_warn(hdev, "Failed to resume device: %d\n", ret); |
446 | return ret; | 451 | goto out; |
447 | } | 452 | } |
448 | 453 | ||
449 | return 0; | 454 | out: |
455 | hid_hw_close(hdev); | ||
456 | return ret; | ||
450 | } | 457 | } |
451 | #endif /* CONFIG_PM */ | 458 | #endif /* CONFIG_PM */ |
452 | 459 | ||
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index ec530454e6f6..5fbe0f81ab2e 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit) | |||
337 | kfree(hidraw); | 337 | kfree(hidraw); |
338 | } else { | 338 | } else { |
339 | /* close device for last reader */ | 339 | /* close device for last reader */ |
340 | hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||
341 | hid_hw_close(hidraw->hid); | 340 | hid_hw_close(hidraw->hid); |
341 | hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||
342 | } | 342 | } |
343 | } | 343 | } |
344 | } | 344 | } |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 77396145d2d0..9145c2129a96 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size) | |||
543 | { | 543 | { |
544 | /* the worst case is computed from the set_report command with a | 544 | /* the worst case is computed from the set_report command with a |
545 | * reportID > 15 and the maximum report length */ | 545 | * reportID > 15 and the maximum report length */ |
546 | int args_len = sizeof(__u8) + /* optional ReportID byte */ | 546 | int args_len = sizeof(__u8) + /* ReportID */ |
547 | sizeof(__u8) + /* optional ReportID byte */ | ||
547 | sizeof(__u16) + /* data register */ | 548 | sizeof(__u16) + /* data register */ |
548 | sizeof(__u16) + /* size of the report */ | 549 | sizeof(__u16) + /* size of the report */ |
549 | report_size; /* report */ | 550 | report_size; /* report */ |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index a83fa76655b9..f489a5cfcb48 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -99,6 +99,7 @@ static const struct hid_blacklist { | |||
99 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, | 99 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, |
100 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, | 100 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, |
101 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | 101 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
102 | { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT }, | ||
102 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, | 103 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, |
103 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, | 104 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, |
104 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, | 105 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index e82a696a1d07..906e654fb0ba 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev) | |||
668 | 668 | ||
669 | /* Try to find an already-probed interface from the same device */ | 669 | /* Try to find an already-probed interface from the same device */ |
670 | list_for_each_entry(data, &wacom_udev_list, list) { | 670 | list_for_each_entry(data, &wacom_udev_list, list) { |
671 | if (compare_device_paths(hdev, data->dev, '/')) | 671 | if (compare_device_paths(hdev, data->dev, '/')) { |
672 | kref_get(&data->kref); | ||
672 | return data; | 673 | return data; |
674 | } | ||
673 | } | 675 | } |
674 | 676 | ||
675 | /* Fallback to finding devices that appear to be "siblings" */ | 677 | /* Fallback to finding devices that appear to be "siblings" */ |
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom) | |||
766 | if (!wacom->led.groups) | 768 | if (!wacom->led.groups) |
767 | return -ENOTSUPP; | 769 | return -ENOTSUPP; |
768 | 770 | ||
771 | if (wacom->wacom_wac.features.type == REMOTE) | ||
772 | return -ENOTSUPP; | ||
773 | |||
769 | if (wacom->wacom_wac.pid) { /* wireless connected */ | 774 | if (wacom->wacom_wac.pid) { /* wireless connected */ |
770 | report_id = WAC_CMD_WL_LED_CONTROL; | 775 | report_id = WAC_CMD_WL_LED_CONTROL; |
771 | buf_size = 13; | 776 | buf_size = 13; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index bb17d7bbefd3..aa692e28b2cd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) | |||
567 | keys = data[9] & 0x07; | 567 | keys = data[9] & 0x07; |
568 | } | 568 | } |
569 | } else { | 569 | } else { |
570 | buttons = ((data[6] & 0x10) << 10) | | 570 | buttons = ((data[6] & 0x10) << 5) | |
571 | ((data[5] & 0x10) << 9) | | 571 | ((data[5] & 0x10) << 4) | |
572 | ((data[6] & 0x0F) << 4) | | 572 | ((data[6] & 0x0F) << 4) | |
573 | (data[5] & 0x0F); | 573 | (data[5] & 0x0F); |
574 | } | 574 | } |
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) | |||
1227 | continue; | 1227 | continue; |
1228 | 1228 | ||
1229 | if (range) { | 1229 | if (range) { |
1230 | /* Fix rotation alignment: userspace expects zero at left */ | ||
1231 | int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]); | ||
1232 | rotation += 1800/4; | ||
1233 | if (rotation > 899) | ||
1234 | rotation -= 1800; | ||
1235 | |||
1230 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); | 1236 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); |
1231 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); | 1237 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); |
1232 | input_report_abs(pen_input, ABS_TILT_X, frame[7]); | 1238 | input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]); |
1233 | input_report_abs(pen_input, ABS_TILT_Y, frame[8]); | 1239 | input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]); |
1234 | input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9])); | 1240 | input_report_abs(pen_input, ABS_Z, rotation); |
1235 | input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); | 1241 | input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); |
1236 | } | 1242 | } |
1237 | input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); | 1243 | input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); |
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) | |||
1319 | unsigned char *data = wacom->data; | 1325 | unsigned char *data = wacom->data; |
1320 | 1326 | ||
1321 | int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); | 1327 | int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); |
1322 | int ring = data[285]; | 1328 | int ring = data[285] & 0x7F; |
1323 | int prox = buttons | (ring & 0x80); | 1329 | bool ringstatus = data[285] & 0x80; |
1330 | bool prox = buttons || ringstatus; | ||
1331 | |||
1332 | /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ | ||
1333 | ring = 71 - ring; | ||
1334 | ring += 3*72/16; | ||
1335 | if (ring > 71) | ||
1336 | ring -= 72; | ||
1324 | 1337 | ||
1325 | wacom_report_numbered_buttons(pad_input, 9, buttons); | 1338 | wacom_report_numbered_buttons(pad_input, 9, buttons); |
1326 | 1339 | ||
1327 | input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0); | 1340 | input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); |
1328 | 1341 | ||
1329 | input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); | 1342 | input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); |
1330 | input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); | 1343 | input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); |
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) | |||
1616 | return 0; | 1629 | return 0; |
1617 | } | 1630 | } |
1618 | 1631 | ||
1632 | static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage, | ||
1633 | int value, int num, int denom) | ||
1634 | { | ||
1635 | struct input_absinfo *abs = &input->absinfo[usage->code]; | ||
1636 | int range = (abs->maximum - abs->minimum + 1); | ||
1637 | |||
1638 | value += num*range/denom; | ||
1639 | if (value > abs->maximum) | ||
1640 | value -= range; | ||
1641 | else if (value < abs->minimum) | ||
1642 | value += range; | ||
1643 | return value; | ||
1644 | } | ||
1645 | |||
1619 | int wacom_equivalent_usage(int usage) | 1646 | int wacom_equivalent_usage(int usage) |
1620 | { | 1647 | { |
1621 | if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { | 1648 | if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { |
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
1898 | unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); | 1925 | unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); |
1899 | int i; | 1926 | int i; |
1900 | bool is_touch_on = value; | 1927 | bool is_touch_on = value; |
1928 | bool do_report = false; | ||
1901 | 1929 | ||
1902 | /* | 1930 | /* |
1903 | * Avoid reporting this event and setting inrange_state if this usage | 1931 | * Avoid reporting this event and setting inrange_state if this usage |
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
1912 | } | 1940 | } |
1913 | 1941 | ||
1914 | switch (equivalent_usage) { | 1942 | switch (equivalent_usage) { |
1943 | case WACOM_HID_WD_TOUCHRING: | ||
1944 | /* | ||
1945 | * Userspace expects touchrings to increase in value with | ||
1946 | * clockwise gestures and have their zero point at the | ||
1947 | * tablet's left. HID events "should" be clockwise- | ||
1948 | * increasing and zero at top, though the MobileStudio | ||
1949 | * Pro and 2nd-gen Intuos Pro don't do this... | ||
1950 | */ | ||
1951 | if (hdev->vendor == 0x56a && | ||
1952 | (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ | ||
1953 | hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */ | ||
1954 | value = (field->logical_maximum - value); | ||
1955 | |||
1956 | if (hdev->product == 0x357 || hdev->product == 0x358) | ||
1957 | value = wacom_offset_rotation(input, usage, value, 3, 16); | ||
1958 | else if (hdev->product == 0x34d || hdev->product == 0x34e) | ||
1959 | value = wacom_offset_rotation(input, usage, value, 1, 2); | ||
1960 | } | ||
1961 | else { | ||
1962 | value = wacom_offset_rotation(input, usage, value, 1, 4); | ||
1963 | } | ||
1964 | do_report = true; | ||
1965 | break; | ||
1915 | case WACOM_HID_WD_TOUCHRINGSTATUS: | 1966 | case WACOM_HID_WD_TOUCHRINGSTATUS: |
1916 | if (!value) | 1967 | if (!value) |
1917 | input_event(input, usage->type, usage->code, 0); | 1968 | input_event(input, usage->type, usage->code, 0); |
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
1945 | value, i); | 1996 | value, i); |
1946 | /* fall through*/ | 1997 | /* fall through*/ |
1947 | default: | 1998 | default: |
1999 | do_report = true; | ||
2000 | break; | ||
2001 | } | ||
2002 | |||
2003 | if (do_report) { | ||
1948 | input_event(input, usage->type, usage->code, value); | 2004 | input_event(input, usage->type, usage->code, value); |
1949 | if (value) | 2005 | if (value) |
1950 | wacom_wac->hid_data.pad_input_event_flag = true; | 2006 | wacom_wac->hid_data.pad_input_event_flag = true; |
1951 | break; | ||
1952 | } | 2007 | } |
1953 | } | 2008 | } |
1954 | 2009 | ||
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field | |||
2086 | wacom_wac->hid_data.tipswitch |= value; | 2141 | wacom_wac->hid_data.tipswitch |= value; |
2087 | return; | 2142 | return; |
2088 | case HID_DG_TOOLSERIALNUMBER: | 2143 | case HID_DG_TOOLSERIALNUMBER: |
2089 | wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); | 2144 | if (value) { |
2090 | wacom_wac->serial[0] |= (__u32)value; | 2145 | wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); |
2146 | wacom_wac->serial[0] |= (__u32)value; | ||
2147 | } | ||
2091 | return; | 2148 | return; |
2149 | case HID_DG_TWIST: | ||
2150 | /* | ||
2151 | * Userspace expects pen twist to have its zero point when | ||
2152 | * the buttons/finger is on the tablet's left. HID values | ||
2153 | * are zero when buttons are toward the top. | ||
2154 | */ | ||
2155 | value = wacom_offset_rotation(input, usage, value, 1, 4); | ||
2156 | break; | ||
2092 | case WACOM_HID_WD_SENSE: | 2157 | case WACOM_HID_WD_SENSE: |
2093 | wacom_wac->hid_data.sense_state = value; | 2158 | wacom_wac->hid_data.sense_state = value; |
2094 | return; | 2159 | return; |
2095 | case WACOM_HID_WD_SERIALHI: | 2160 | case WACOM_HID_WD_SERIALHI: |
2096 | wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); | 2161 | if (value) { |
2097 | wacom_wac->serial[0] |= ((__u64)value) << 32; | 2162 | wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); |
2098 | /* | 2163 | wacom_wac->serial[0] |= ((__u64)value) << 32; |
2099 | * Non-USI EMR devices may contain additional tool type | 2164 | /* |
2100 | * information here. See WACOM_HID_WD_TOOLTYPE case for | 2165 | * Non-USI EMR devices may contain additional tool type |
2101 | * more details. | 2166 | * information here. See WACOM_HID_WD_TOOLTYPE case for |
2102 | */ | 2167 | * more details. |
2103 | if (value >> 20 == 1) { | 2168 | */ |
2104 | wacom_wac->id[0] |= value & 0xFFFFF; | 2169 | if (value >> 20 == 1) { |
2170 | wacom_wac->id[0] |= value & 0xFFFFF; | ||
2171 | } | ||
2105 | } | 2172 | } |
2106 | return; | 2173 | return; |
2107 | case WACOM_HID_WD_TOOLTYPE: | 2174 | case WACOM_HID_WD_TOOLTYPE: |
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev, | |||
2205 | input_report_key(input, wacom_wac->tool[0], prox); | 2272 | input_report_key(input, wacom_wac->tool[0], prox); |
2206 | if (wacom_wac->serial[0]) { | 2273 | if (wacom_wac->serial[0]) { |
2207 | input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); | 2274 | input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); |
2208 | input_report_abs(input, ABS_MISC, id); | 2275 | input_report_abs(input, ABS_MISC, prox ? id : 0); |
2209 | } | 2276 | } |
2210 | 2277 | ||
2211 | wacom_wac->hid_data.tipswitch = false; | 2278 | wacom_wac->hid_data.tipswitch = false; |
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev, | |||
2216 | if (!prox) { | 2283 | if (!prox) { |
2217 | wacom_wac->tool[0] = 0; | 2284 | wacom_wac->tool[0] = 0; |
2218 | wacom_wac->id[0] = 0; | 2285 | wacom_wac->id[0] = 0; |
2286 | wacom_wac->serial[0] = 0; | ||
2219 | } | 2287 | } |
2220 | } | 2288 | } |
2221 | 2289 | ||
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 060df71c2e8b..bcbb031f7263 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -936,14 +936,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
936 | 936 | ||
937 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) | 937 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) |
938 | { | 938 | { |
939 | mutex_lock(&vmbus_connection.channel_mutex); | ||
940 | |||
941 | BUG_ON(!is_hvsock_channel(channel)); | 939 | BUG_ON(!is_hvsock_channel(channel)); |
942 | 940 | ||
943 | channel->rescind = true; | 941 | channel->rescind = true; |
944 | vmbus_device_unregister(channel->device_obj); | 942 | vmbus_device_unregister(channel->device_obj); |
945 | |||
946 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
947 | } | 943 | } |
948 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); | 944 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); |
949 | 945 | ||
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index daa75bd41f86..2364281d8593 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c | |||
@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy) | |||
170 | out_src = smsg_out; | 170 | out_src = smsg_out; |
171 | break; | 171 | break; |
172 | 172 | ||
173 | case WRITE_TO_FILE: | ||
174 | out_src = fcopy_transaction.fcopy_msg; | ||
175 | out_len = sizeof(struct hv_do_fcopy); | ||
176 | break; | ||
173 | default: | 177 | default: |
174 | out_src = fcopy_transaction.fcopy_msg; | 178 | out_src = fcopy_transaction.fcopy_msg; |
175 | out_len = fcopy_transaction.recv_len; | 179 | out_len = fcopy_transaction.recv_len; |
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 9c0dbb8191ad..e1be61095532 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c | |||
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
630 | sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, | 630 | sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, |
631 | GFP_KERNEL); | 631 | GFP_KERNEL); |
632 | if (rc) | 632 | if (rc) |
633 | goto out_mbox_free; | 633 | return -ENOMEM; |
634 | 634 | ||
635 | INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); | 635 | INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); |
636 | 636 | ||
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
646 | if (IS_ERR(ctx->mbox_chan)) { | 646 | if (IS_ERR(ctx->mbox_chan)) { |
647 | dev_err(&pdev->dev, | 647 | dev_err(&pdev->dev, |
648 | "SLIMpro mailbox channel request failed\n"); | 648 | "SLIMpro mailbox channel request failed\n"); |
649 | return -ENODEV; | 649 | rc = -ENODEV; |
650 | goto out_mbox_free; | ||
650 | } | 651 | } |
651 | } else { | 652 | } else { |
652 | struct acpi_pcct_hw_reduced *cppc_ss; | 653 | struct acpi_pcct_hw_reduced *cppc_ss; |
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
654 | if (device_property_read_u32(&pdev->dev, "pcc-channel", | 655 | if (device_property_read_u32(&pdev->dev, "pcc-channel", |
655 | &ctx->mbox_idx)) { | 656 | &ctx->mbox_idx)) { |
656 | dev_err(&pdev->dev, "no pcc-channel property\n"); | 657 | dev_err(&pdev->dev, "no pcc-channel property\n"); |
657 | return -ENODEV; | 658 | rc = -ENODEV; |
659 | goto out_mbox_free; | ||
658 | } | 660 | } |
659 | 661 | ||
660 | cl->rx_callback = xgene_hwmon_pcc_rx_cb; | 662 | cl->rx_callback = xgene_hwmon_pcc_rx_cb; |
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
662 | if (IS_ERR(ctx->mbox_chan)) { | 664 | if (IS_ERR(ctx->mbox_chan)) { |
663 | dev_err(&pdev->dev, | 665 | dev_err(&pdev->dev, |
664 | "PPC channel request failed\n"); | 666 | "PPC channel request failed\n"); |
665 | return -ENODEV; | 667 | rc = -ENODEV; |
668 | goto out_mbox_free; | ||
666 | } | 669 | } |
667 | 670 | ||
668 | /* | 671 | /* |
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
675 | if (!cppc_ss) { | 678 | if (!cppc_ss) { |
676 | dev_err(&pdev->dev, "PPC subspace not found\n"); | 679 | dev_err(&pdev->dev, "PPC subspace not found\n"); |
677 | rc = -ENODEV; | 680 | rc = -ENODEV; |
678 | goto out_mbox_free; | 681 | goto out; |
679 | } | 682 | } |
680 | 683 | ||
681 | if (!ctx->mbox_chan->mbox->txdone_irq) { | 684 | if (!ctx->mbox_chan->mbox->txdone_irq) { |
682 | dev_err(&pdev->dev, "PCC IRQ not supported\n"); | 685 | dev_err(&pdev->dev, "PCC IRQ not supported\n"); |
683 | rc = -ENODEV; | 686 | rc = -ENODEV; |
684 | goto out_mbox_free; | 687 | goto out; |
685 | } | 688 | } |
686 | 689 | ||
687 | /* | 690 | /* |
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
696 | } else { | 699 | } else { |
697 | dev_err(&pdev->dev, "Failed to get PCC comm region\n"); | 700 | dev_err(&pdev->dev, "Failed to get PCC comm region\n"); |
698 | rc = -ENODEV; | 701 | rc = -ENODEV; |
699 | goto out_mbox_free; | 702 | goto out; |
700 | } | 703 | } |
701 | 704 | ||
702 | if (!ctx->pcc_comm_addr) { | 705 | if (!ctx->pcc_comm_addr) { |
703 | dev_err(&pdev->dev, | 706 | dev_err(&pdev->dev, |
704 | "Failed to ioremap PCC comm region\n"); | 707 | "Failed to ioremap PCC comm region\n"); |
705 | rc = -ENOMEM; | 708 | rc = -ENOMEM; |
706 | goto out_mbox_free; | 709 | goto out; |
707 | } | 710 | } |
708 | 711 | ||
709 | /* | 712 | /* |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index bc9cebc30526..c2a2ce8ee541 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
@@ -144,6 +144,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
144 | .driver_data = (kernel_ulong_t)0, | 144 | .driver_data = (kernel_ulong_t)0, |
145 | }, | 145 | }, |
146 | { | 146 | { |
147 | /* Lewisburg PCH */ | ||
148 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6), | ||
149 | .driver_data = (kernel_ulong_t)0, | ||
150 | }, | ||
151 | { | ||
147 | /* Gemini Lake */ | 152 | /* Gemini Lake */ |
148 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), | 153 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), |
149 | .driver_data = (kernel_ulong_t)&intel_th_2x, | 154 | .driver_data = (kernel_ulong_t)&intel_th_2x, |
@@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
158 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), | 163 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), |
159 | .driver_data = (kernel_ulong_t)&intel_th_2x, | 164 | .driver_data = (kernel_ulong_t)&intel_th_2x, |
160 | }, | 165 | }, |
166 | { | ||
167 | /* Cedar Fork PCH */ | ||
168 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), | ||
169 | .driver_data = (kernel_ulong_t)&intel_th_2x, | ||
170 | }, | ||
161 | { 0 }, | 171 | { 0 }, |
162 | }; | 172 | }; |
163 | 173 | ||
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 9414900575d8..f129869e05a9 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c | |||
@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data) | |||
1119 | 1119 | ||
1120 | stm_source_link_drop(src); | 1120 | stm_source_link_drop(src); |
1121 | 1121 | ||
1122 | device_destroy(&stm_source_class, src->dev.devt); | 1122 | device_unregister(&src->dev); |
1123 | } | 1123 | } |
1124 | EXPORT_SYMBOL_GPL(stm_source_unregister_device); | 1124 | EXPORT_SYMBOL_GPL(stm_source_unregister_device); |
1125 | 1125 | ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c06dce2c1da7..45a3f3ca29b3 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -131,6 +131,7 @@ config I2C_I801 | |||
131 | Gemini Lake (SOC) | 131 | Gemini Lake (SOC) |
132 | Cannon Lake-H (PCH) | 132 | Cannon Lake-H (PCH) |
133 | Cannon Lake-LP (PCH) | 133 | Cannon Lake-LP (PCH) |
134 | Cedar Fork (PCH) | ||
134 | 135 | ||
135 | This driver can also be built as a module. If so, the module | 136 | This driver can also be built as a module. If so, the module |
136 | will be called i2c-i801. | 137 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e114e4e00d29..9e12a53ef7b8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -68,6 +68,7 @@ | |||
68 | * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes | 68 | * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes |
69 | * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes | 69 | * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes |
70 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes | 70 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes |
71 | * Cedar Fork (PCH) 0x18df 32 hard yes yes yes | ||
71 | * | 72 | * |
72 | * Features supported by this driver: | 73 | * Features supported by this driver: |
73 | * Software PEC no | 74 | * Software PEC no |
@@ -204,6 +205,7 @@ | |||
204 | 205 | ||
205 | /* Older devices have their ID defined in <linux/pci_ids.h> */ | 206 | /* Older devices have their ID defined in <linux/pci_ids.h> */ |
206 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 | 207 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 |
208 | #define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df | ||
207 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df | 209 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df |
208 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 | 210 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 |
209 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 | 211 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 |
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = { | |||
1025 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, | 1027 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, |
1026 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, | 1028 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, |
1027 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, | 1029 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, |
1030 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) }, | ||
1028 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, | 1031 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, |
1029 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, | 1032 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, |
1030 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, | 1033 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, |
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1513 | case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: | 1516 | case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: |
1514 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: | 1517 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: |
1515 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: | 1518 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: |
1519 | case PCI_DEVICE_ID_INTEL_CDF_SMBUS: | ||
1516 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: | 1520 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: |
1517 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: | 1521 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: |
1518 | priv->features |= FEATURE_I2C_BLOCK_READ; | 1522 | priv->features |= FEATURE_I2C_BLOCK_READ; |
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 84fb35f6837f..eb1d91b986fd 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c | |||
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = { | |||
1459 | }; | 1459 | }; |
1460 | module_platform_driver(img_scb_i2c_driver); | 1460 | module_platform_driver(img_scb_i2c_driver); |
1461 | 1461 | ||
1462 | MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); | 1462 | MODULE_AUTHOR("James Hogan <jhogan@kernel.org>"); |
1463 | MODULE_DESCRIPTION("IMG host I2C driver"); | 1463 | MODULE_DESCRIPTION("IMG host I2C driver"); |
1464 | MODULE_LICENSE("GPL v2"); | 1464 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 22e08ae1704f..25fcc3c1e32b 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c | |||
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = { | |||
627 | 627 | ||
628 | static const struct of_device_id sprd_i2c_of_match[] = { | 628 | static const struct of_device_id sprd_i2c_of_match[] = { |
629 | { .compatible = "sprd,sc9860-i2c", }, | 629 | { .compatible = "sprd,sc9860-i2c", }, |
630 | {}, | ||
630 | }; | 631 | }; |
631 | 632 | ||
632 | static struct platform_driver sprd_i2c_driver = { | 633 | static struct platform_driver sprd_i2c_driver = { |
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 47c67b0ca896..d4a6e9c2e9aa 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c | |||
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev { | |||
215 | unsigned int msg_num; | 215 | unsigned int msg_num; |
216 | unsigned int msg_id; | 216 | unsigned int msg_id; |
217 | struct stm32f7_i2c_msg f7_msg; | 217 | struct stm32f7_i2c_msg f7_msg; |
218 | struct stm32f7_i2c_setup *setup; | 218 | struct stm32f7_i2c_setup setup; |
219 | struct stm32f7_i2c_timings timing; | 219 | struct stm32f7_i2c_timings timing; |
220 | }; | 220 | }; |
221 | 221 | ||
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = { | |||
265 | }, | 265 | }, |
266 | }; | 266 | }; |
267 | 267 | ||
268 | struct stm32f7_i2c_setup stm32f7_setup = { | 268 | static const struct stm32f7_i2c_setup stm32f7_setup = { |
269 | .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, | 269 | .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, |
270 | .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, | 270 | .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, |
271 | .dnf = STM32F7_I2C_DNF_DEFAULT, | 271 | .dnf = STM32F7_I2C_DNF_DEFAULT, |
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) | |||
537 | writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); | 537 | writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); |
538 | 538 | ||
539 | /* Enable I2C */ | 539 | /* Enable I2C */ |
540 | if (i2c_dev->setup->analog_filter) | 540 | if (i2c_dev->setup.analog_filter) |
541 | stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, | 541 | stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, |
542 | STM32F7_I2C_CR1_ANFOFF); | 542 | STM32F7_I2C_CR1_ANFOFF); |
543 | else | 543 | else |
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) | |||
887 | } | 887 | } |
888 | 888 | ||
889 | setup = of_device_get_match_data(&pdev->dev); | 889 | setup = of_device_get_match_data(&pdev->dev); |
890 | i2c_dev->setup->rise_time = setup->rise_time; | 890 | i2c_dev->setup = *setup; |
891 | i2c_dev->setup->fall_time = setup->fall_time; | ||
892 | i2c_dev->setup->dnf = setup->dnf; | ||
893 | i2c_dev->setup->analog_filter = setup->analog_filter; | ||
894 | 891 | ||
895 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", | 892 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", |
896 | &rise_time); | 893 | &rise_time); |
897 | if (!ret) | 894 | if (!ret) |
898 | i2c_dev->setup->rise_time = rise_time; | 895 | i2c_dev->setup.rise_time = rise_time; |
899 | 896 | ||
900 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", | 897 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", |
901 | &fall_time); | 898 | &fall_time); |
902 | if (!ret) | 899 | if (!ret) |
903 | i2c_dev->setup->fall_time = fall_time; | 900 | i2c_dev->setup.fall_time = fall_time; |
904 | 901 | ||
905 | ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); | 902 | ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup); |
906 | if (ret) | 903 | if (ret) |
907 | goto clk_free; | 904 | goto clk_free; |
908 | 905 | ||
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 01b2adfd8226..eaf39e5db08b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, | |||
1451 | if (hwif_init(hwif) == 0) { | 1451 | if (hwif_init(hwif) == 0) { |
1452 | printk(KERN_INFO "%s: failed to initialize IDE " | 1452 | printk(KERN_INFO "%s: failed to initialize IDE " |
1453 | "interface\n", hwif->name); | 1453 | "interface\n", hwif->name); |
1454 | device_unregister(hwif->portdev); | ||
1454 | device_unregister(&hwif->gendev); | 1455 | device_unregister(&hwif->gendev); |
1455 | ide_disable_port(hwif); | 1456 | ide_disable_port(hwif); |
1456 | continue; | 1457 | continue; |
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c index 86aa88aeb3a6..acf874800ca4 100644 --- a/drivers/ide/ide-scan-pci.c +++ b/drivers/ide/ide-scan-pci.c | |||
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) | |||
56 | { | 56 | { |
57 | struct list_head *l; | 57 | struct list_head *l; |
58 | struct pci_driver *d; | 58 | struct pci_driver *d; |
59 | int ret; | ||
59 | 60 | ||
60 | list_for_each(l, &ide_pci_drivers) { | 61 | list_for_each(l, &ide_pci_drivers) { |
61 | d = list_entry(l, struct pci_driver, node); | 62 | d = list_entry(l, struct pci_driver, node); |
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) | |||
63 | const struct pci_device_id *id = | 64 | const struct pci_device_id *id = |
64 | pci_match_id(d->id_table, dev); | 65 | pci_match_id(d->id_table, dev); |
65 | 66 | ||
66 | if (id != NULL && d->probe(dev, id) >= 0) { | 67 | if (id != NULL) { |
67 | dev->driver = d; | 68 | pci_assign_irq(dev); |
68 | pci_dev_get(dev); | 69 | ret = d->probe(dev, id); |
69 | return 1; | 70 | if (ret >= 0) { |
71 | dev->driver = d; | ||
72 | pci_dev_get(dev); | ||
73 | return 1; | ||
74 | } | ||
70 | } | 75 | } |
71 | } | 76 | } |
72 | } | 77 | } |
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 112d2fe1bcdb..fdc8e813170c 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c | |||
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); | |||
179 | /** | 179 | /** |
180 | * ide_pci_enable - do PCI enables | 180 | * ide_pci_enable - do PCI enables |
181 | * @dev: PCI device | 181 | * @dev: PCI device |
182 | * @bars: PCI BARs mask | ||
182 | * @d: IDE port info | 183 | * @d: IDE port info |
183 | * | 184 | * |
184 | * Enable the IDE PCI device. We attempt to enable the device in full | 185 | * Enable the IDE PCI device. We attempt to enable the device in full |
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); | |||
189 | * Returns zero on success or an error code | 190 | * Returns zero on success or an error code |
190 | */ | 191 | */ |
191 | 192 | ||
192 | static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) | 193 | static int ide_pci_enable(struct pci_dev *dev, int bars, |
194 | const struct ide_port_info *d) | ||
193 | { | 195 | { |
194 | int ret, bars; | 196 | int ret; |
195 | 197 | ||
196 | if (pci_enable_device(dev)) { | 198 | if (pci_enable_device(dev)) { |
197 | ret = pci_enable_device_io(dev); | 199 | ret = pci_enable_device_io(dev); |
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) | |||
216 | goto out; | 218 | goto out; |
217 | } | 219 | } |
218 | 220 | ||
219 | if (d->host_flags & IDE_HFLAG_SINGLE) | ||
220 | bars = (1 << 2) - 1; | ||
221 | else | ||
222 | bars = (1 << 4) - 1; | ||
223 | |||
224 | if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { | ||
225 | if (d->host_flags & IDE_HFLAG_CS5520) | ||
226 | bars |= (1 << 2); | ||
227 | else | ||
228 | bars |= (1 << 4); | ||
229 | } | ||
230 | |||
231 | ret = pci_request_selected_regions(dev, bars, d->name); | 221 | ret = pci_request_selected_regions(dev, bars, d->name); |
232 | if (ret < 0) | 222 | if (ret < 0) |
233 | printk(KERN_ERR "%s %s: can't reserve resources\n", | 223 | printk(KERN_ERR "%s %s: can't reserve resources\n", |
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
403 | /** | 393 | /** |
404 | * ide_setup_pci_controller - set up IDE PCI | 394 | * ide_setup_pci_controller - set up IDE PCI |
405 | * @dev: PCI device | 395 | * @dev: PCI device |
396 | * @bars: PCI BARs mask | ||
406 | * @d: IDE port info | 397 | * @d: IDE port info |
407 | * @noisy: verbose flag | 398 | * @noisy: verbose flag |
408 | * | 399 | * |
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
411 | * and enables it if need be | 402 | * and enables it if need be |
412 | */ | 403 | */ |
413 | 404 | ||
414 | static int ide_setup_pci_controller(struct pci_dev *dev, | 405 | static int ide_setup_pci_controller(struct pci_dev *dev, int bars, |
415 | const struct ide_port_info *d, int noisy) | 406 | const struct ide_port_info *d, int noisy) |
416 | { | 407 | { |
417 | int ret; | 408 | int ret; |
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev, | |||
420 | if (noisy) | 411 | if (noisy) |
421 | ide_setup_pci_noise(dev, d); | 412 | ide_setup_pci_noise(dev, d); |
422 | 413 | ||
423 | ret = ide_pci_enable(dev, d); | 414 | ret = ide_pci_enable(dev, bars, d); |
424 | if (ret < 0) | 415 | if (ret < 0) |
425 | goto out; | 416 | goto out; |
426 | 417 | ||
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev, | |||
428 | if (ret < 0) { | 419 | if (ret < 0) { |
429 | printk(KERN_ERR "%s %s: error accessing PCI regs\n", | 420 | printk(KERN_ERR "%s %s: error accessing PCI regs\n", |
430 | d->name, pci_name(dev)); | 421 | d->name, pci_name(dev)); |
431 | goto out; | 422 | goto out_free_bars; |
432 | } | 423 | } |
433 | if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ | 424 | if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ |
434 | ret = ide_pci_configure(dev, d); | 425 | ret = ide_pci_configure(dev, d); |
435 | if (ret < 0) | 426 | if (ret < 0) |
436 | goto out; | 427 | goto out_free_bars; |
437 | printk(KERN_INFO "%s %s: device enabled (Linux)\n", | 428 | printk(KERN_INFO "%s %s: device enabled (Linux)\n", |
438 | d->name, pci_name(dev)); | 429 | d->name, pci_name(dev)); |
439 | } | 430 | } |
440 | 431 | ||
432 | goto out; | ||
433 | |||
434 | out_free_bars: | ||
435 | pci_release_selected_regions(dev, bars); | ||
441 | out: | 436 | out: |
442 | return ret; | 437 | return ret; |
443 | } | 438 | } |
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
540 | { | 535 | { |
541 | struct pci_dev *pdev[] = { dev1, dev2 }; | 536 | struct pci_dev *pdev[] = { dev1, dev2 }; |
542 | struct ide_host *host; | 537 | struct ide_host *host; |
543 | int ret, i, n_ports = dev2 ? 4 : 2; | 538 | int ret, i, n_ports = dev2 ? 4 : 2, bars; |
544 | struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; | 539 | struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; |
545 | 540 | ||
541 | if (d->host_flags & IDE_HFLAG_SINGLE) | ||
542 | bars = (1 << 2) - 1; | ||
543 | else | ||
544 | bars = (1 << 4) - 1; | ||
545 | |||
546 | if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { | ||
547 | if (d->host_flags & IDE_HFLAG_CS5520) | ||
548 | bars |= (1 << 2); | ||
549 | else | ||
550 | bars |= (1 << 4); | ||
551 | } | ||
552 | |||
546 | for (i = 0; i < n_ports / 2; i++) { | 553 | for (i = 0; i < n_ports / 2; i++) { |
547 | ret = ide_setup_pci_controller(pdev[i], d, !i); | 554 | ret = ide_setup_pci_controller(pdev[i], bars, d, !i); |
548 | if (ret < 0) | 555 | if (ret < 0) { |
556 | if (i == 1) | ||
557 | pci_release_selected_regions(pdev[0], bars); | ||
549 | goto out; | 558 | goto out; |
559 | } | ||
550 | 560 | ||
551 | ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); | 561 | ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); |
552 | } | 562 | } |
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
554 | host = ide_host_alloc(d, hws, n_ports); | 564 | host = ide_host_alloc(d, hws, n_ports); |
555 | if (host == NULL) { | 565 | if (host == NULL) { |
556 | ret = -ENOMEM; | 566 | ret = -ENOMEM; |
557 | goto out; | 567 | goto out_free_bars; |
558 | } | 568 | } |
559 | 569 | ||
560 | host->dev[0] = &dev1->dev; | 570 | host->dev[0] = &dev1->dev; |
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
576 | * do_ide_setup_pci_device() on the first device! | 586 | * do_ide_setup_pci_device() on the first device! |
577 | */ | 587 | */ |
578 | if (ret < 0) | 588 | if (ret < 0) |
579 | goto out; | 589 | goto out_free_bars; |
580 | 590 | ||
581 | /* fixup IRQ */ | 591 | /* fixup IRQ */ |
582 | if (ide_pci_is_in_compatibility_mode(pdev[i])) { | 592 | if (ide_pci_is_in_compatibility_mode(pdev[i])) { |
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
589 | ret = ide_host_register(host, d, hws); | 599 | ret = ide_host_register(host, d, hws); |
590 | if (ret) | 600 | if (ret) |
591 | ide_host_free(host); | 601 | ide_host_free(host); |
602 | else | ||
603 | goto out; | ||
604 | |||
605 | out_free_bars: | ||
606 | i = n_ports / 2; | ||
607 | while (i--) | ||
608 | pci_release_selected_regions(pdev[i], bars); | ||
592 | out: | 609 | out: |
593 | return ret; | 610 | return ret; |
594 | } | 611 | } |
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index c40263ad881f..801afb61310b 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c | |||
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, | |||
257 | unsigned int vref_mv) | 257 | unsigned int vref_mv) |
258 | { | 258 | { |
259 | struct ad7793_state *st = iio_priv(indio_dev); | 259 | struct ad7793_state *st = iio_priv(indio_dev); |
260 | int i, ret = -1; | 260 | int i, ret; |
261 | unsigned long long scale_uv; | 261 | unsigned long long scale_uv; |
262 | u32 id; | 262 | u32 id; |
263 | 263 | ||
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, | |||
266 | return ret; | 266 | return ret; |
267 | 267 | ||
268 | /* reset the serial interface */ | 268 | /* reset the serial interface */ |
269 | ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret)); | 269 | ret = ad_sd_reset(&st->sd, 32); |
270 | if (ret < 0) | 270 | if (ret < 0) |
271 | goto out; | 271 | goto out; |
272 | usleep_range(500, 2000); /* Wait for at least 500us */ | 272 | usleep_range(500, 2000); /* Wait for at least 500us */ |
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index e3ed74ee41d1..cf1b048b0665 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c | |||
@@ -177,6 +177,34 @@ out: | |||
177 | } | 177 | } |
178 | EXPORT_SYMBOL_GPL(ad_sd_read_reg); | 178 | EXPORT_SYMBOL_GPL(ad_sd_read_reg); |
179 | 179 | ||
180 | /** | ||
181 | * ad_sd_reset() - Reset the serial interface | ||
182 | * | ||
183 | * @sigma_delta: The sigma delta device | ||
184 | * @reset_length: Number of SCLKs with DIN = 1 | ||
185 | * | ||
186 | * Returns 0 on success, an error code otherwise. | ||
187 | **/ | ||
188 | int ad_sd_reset(struct ad_sigma_delta *sigma_delta, | ||
189 | unsigned int reset_length) | ||
190 | { | ||
191 | uint8_t *buf; | ||
192 | unsigned int size; | ||
193 | int ret; | ||
194 | |||
195 | size = DIV_ROUND_UP(reset_length, 8); | ||
196 | buf = kcalloc(size, sizeof(*buf), GFP_KERNEL); | ||
197 | if (!buf) | ||
198 | return -ENOMEM; | ||
199 | |||
200 | memset(buf, 0xff, size); | ||
201 | ret = spi_write(sigma_delta->spi, buf, size); | ||
202 | kfree(buf); | ||
203 | |||
204 | return ret; | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(ad_sd_reset); | ||
207 | |||
180 | static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, | 208 | static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, |
181 | unsigned int mode, unsigned int channel) | 209 | unsigned int mode, unsigned int channel) |
182 | { | 210 | { |
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index a41956eb3379..32859188d653 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * MCP3204 | 17 | * MCP3204 |
18 | * MCP3208 | 18 | * MCP3208 |
19 | * ------------ | 19 | * ------------ |
20 | * 13 bit converter | ||
21 | * MCP3301 | ||
20 | * | 22 | * |
21 | * Datasheet can be found here: | 23 | * Datasheet can be found here: |
22 | * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001 | 24 | * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001 |
@@ -103,7 +105,7 @@ static int mcp320x_channel_to_tx_data(int device_index, | |||
103 | } | 105 | } |
104 | 106 | ||
105 | static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, | 107 | static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, |
106 | bool differential, int device_index) | 108 | bool differential, int device_index, int *val) |
107 | { | 109 | { |
108 | int ret; | 110 | int ret; |
109 | 111 | ||
@@ -118,19 +120,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, | |||
118 | 120 | ||
119 | switch (device_index) { | 121 | switch (device_index) { |
120 | case mcp3001: | 122 | case mcp3001: |
121 | return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3); | 123 | *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3); |
124 | return 0; | ||
122 | case mcp3002: | 125 | case mcp3002: |
123 | case mcp3004: | 126 | case mcp3004: |
124 | case mcp3008: | 127 | case mcp3008: |
125 | return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6); | 128 | *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6); |
129 | return 0; | ||
126 | case mcp3201: | 130 | case mcp3201: |
127 | return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1); | 131 | *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1); |
132 | return 0; | ||
128 | case mcp3202: | 133 | case mcp3202: |
129 | case mcp3204: | 134 | case mcp3204: |
130 | case mcp3208: | 135 | case mcp3208: |
131 | return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4); | 136 | *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4); |
137 | return 0; | ||
132 | case mcp3301: | 138 | case mcp3301: |
133 | return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12); | 139 | *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8 |
140 | | adc->rx_buf[1], 12); | ||
141 | return 0; | ||
134 | default: | 142 | default: |
135 | return -EINVAL; | 143 | return -EINVAL; |
136 | } | 144 | } |
@@ -151,12 +159,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev, | |||
151 | switch (mask) { | 159 | switch (mask) { |
152 | case IIO_CHAN_INFO_RAW: | 160 | case IIO_CHAN_INFO_RAW: |
153 | ret = mcp320x_adc_conversion(adc, channel->address, | 161 | ret = mcp320x_adc_conversion(adc, channel->address, |
154 | channel->differential, device_index); | 162 | channel->differential, device_index, val); |
155 | |||
156 | if (ret < 0) | 163 | if (ret < 0) |
157 | goto out; | 164 | goto out; |
158 | 165 | ||
159 | *val = ret; | ||
160 | ret = IIO_VAL_INT; | 166 | ret = IIO_VAL_INT; |
161 | break; | 167 | break; |
162 | 168 | ||
@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi) | |||
312 | indio_dev->name = spi_get_device_id(spi)->name; | 318 | indio_dev->name = spi_get_device_id(spi)->name; |
313 | indio_dev->modes = INDIO_DIRECT_MODE; | 319 | indio_dev->modes = INDIO_DIRECT_MODE; |
314 | indio_dev->info = &mcp320x_info; | 320 | indio_dev->info = &mcp320x_info; |
321 | spi_set_drvdata(spi, indio_dev); | ||
315 | 322 | ||
316 | chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data]; | 323 | chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data]; |
317 | indio_dev->channels = chip_info->channels; | 324 | indio_dev->channels = chip_info->channels; |
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index d965b6121e5d..8b7c24780a8a 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c | |||
@@ -1665,7 +1665,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev) | |||
1665 | 1665 | ||
1666 | num_channels = of_property_count_u32_elems(node, "st,adc-channels"); | 1666 | num_channels = of_property_count_u32_elems(node, "st,adc-channels"); |
1667 | if (num_channels < 0 || | 1667 | if (num_channels < 0 || |
1668 | num_channels >= adc_info->max_channels) { | 1668 | num_channels > adc_info->max_channels) { |
1669 | dev_err(&indio_dev->dev, "Bad st,adc-channels?\n"); | 1669 | dev_err(&indio_dev->dev, "Bad st,adc-channels?\n"); |
1670 | return num_channels < 0 ? num_channels : -EINVAL; | 1670 | return num_channels < 0 ? num_channels : -EINVAL; |
1671 | } | 1671 | } |
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 3e9719e8cf3b..6a114dcb4a3a 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c | |||
@@ -52,7 +52,7 @@ | |||
52 | 52 | ||
53 | #define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0) | 53 | #define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0) |
54 | #define ADS1015_CFG_COMP_LAT_MASK BIT(2) | 54 | #define ADS1015_CFG_COMP_LAT_MASK BIT(2) |
55 | #define ADS1015_CFG_COMP_POL_MASK BIT(2) | 55 | #define ADS1015_CFG_COMP_POL_MASK BIT(3) |
56 | #define ADS1015_CFG_COMP_MODE_MASK BIT(4) | 56 | #define ADS1015_CFG_COMP_MODE_MASK BIT(4) |
57 | #define ADS1015_CFG_DR_MASK GENMASK(7, 5) | 57 | #define ADS1015_CFG_DR_MASK GENMASK(7, 5) |
58 | #define ADS1015_CFG_MOD_MASK BIT(8) | 58 | #define ADS1015_CFG_MOD_MASK BIT(8) |
@@ -1015,10 +1015,12 @@ static int ads1015_probe(struct i2c_client *client, | |||
1015 | 1015 | ||
1016 | switch (irq_trig) { | 1016 | switch (irq_trig) { |
1017 | case IRQF_TRIGGER_LOW: | 1017 | case IRQF_TRIGGER_LOW: |
1018 | cfg_comp |= ADS1015_CFG_COMP_POL_LOW; | 1018 | cfg_comp |= ADS1015_CFG_COMP_POL_LOW << |
1019 | ADS1015_CFG_COMP_POL_SHIFT; | ||
1019 | break; | 1020 | break; |
1020 | case IRQF_TRIGGER_HIGH: | 1021 | case IRQF_TRIGGER_HIGH: |
1021 | cfg_comp |= ADS1015_CFG_COMP_POL_HIGH; | 1022 | cfg_comp |= ADS1015_CFG_COMP_POL_HIGH << |
1023 | ADS1015_CFG_COMP_POL_SHIFT; | ||
1022 | break; | 1024 | break; |
1023 | default: | 1025 | default: |
1024 | return -EINVAL; | 1026 | return -EINVAL; |
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index 438aa25f548d..8c019bb6625f 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c | |||
@@ -886,21 +886,27 @@ static int twl4030_madc_probe(struct platform_device *pdev) | |||
886 | 886 | ||
887 | /* Enable 3v1 bias regulator for MADC[3:6] */ | 887 | /* Enable 3v1 bias regulator for MADC[3:6] */ |
888 | madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); | 888 | madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); |
889 | if (IS_ERR(madc->usb3v1)) | 889 | if (IS_ERR(madc->usb3v1)) { |
890 | return -ENODEV; | 890 | ret = -ENODEV; |
891 | goto err_i2c; | ||
892 | } | ||
891 | 893 | ||
892 | ret = regulator_enable(madc->usb3v1); | 894 | ret = regulator_enable(madc->usb3v1); |
893 | if (ret) | 895 | if (ret) { |
894 | dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); | 896 | dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); |
897 | goto err_i2c; | ||
898 | } | ||
895 | 899 | ||
896 | ret = iio_device_register(iio_dev); | 900 | ret = iio_device_register(iio_dev); |
897 | if (ret) { | 901 | if (ret) { |
898 | dev_err(&pdev->dev, "could not register iio device\n"); | 902 | dev_err(&pdev->dev, "could not register iio device\n"); |
899 | goto err_i2c; | 903 | goto err_usb3v1; |
900 | } | 904 | } |
901 | 905 | ||
902 | return 0; | 906 | return 0; |
903 | 907 | ||
908 | err_usb3v1: | ||
909 | regulator_disable(madc->usb3v1); | ||
904 | err_i2c: | 910 | err_i2c: |
905 | twl4030_madc_set_current_generator(madc, 0, 0); | 911 | twl4030_madc_set_current_generator(madc, 0, 0); |
906 | err_current_generator: | 912 | err_current_generator: |
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index d99bb1460fe2..02e833b14db0 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
@@ -463,8 +463,17 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) | |||
463 | u8 drdy_mask; | 463 | u8 drdy_mask; |
464 | struct st_sensor_data *sdata = iio_priv(indio_dev); | 464 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
465 | 465 | ||
466 | if (!sdata->sensor_settings->drdy_irq.addr) | 466 | if (!sdata->sensor_settings->drdy_irq.addr) { |
467 | /* | ||
468 | * there are some devices (e.g. LIS3MDL) where drdy line is | ||
469 | * routed to a given pin and it is not possible to select a | ||
470 | * different one. Take into account irq status register | ||
471 | * to understand if irq trigger can be properly supported | ||
472 | */ | ||
473 | if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) | ||
474 | sdata->hw_irq_trigger = enable; | ||
467 | return 0; | 475 | return 0; |
476 | } | ||
468 | 477 | ||
469 | /* Enable/Disable the interrupt generator 1. */ | 478 | /* Enable/Disable the interrupt generator 1. */ |
470 | if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) { | 479 | if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) { |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 7a5aa127c52e..9c4cfd19b739 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
@@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, | |||
310 | ret = indio_dev->info->debugfs_reg_access(indio_dev, | 310 | ret = indio_dev->info->debugfs_reg_access(indio_dev, |
311 | indio_dev->cached_reg_addr, | 311 | indio_dev->cached_reg_addr, |
312 | 0, &val); | 312 | 0, &val); |
313 | if (ret) | 313 | if (ret) { |
314 | dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); | 314 | dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); |
315 | return ret; | ||
316 | } | ||
315 | 317 | ||
316 | len = snprintf(buf, sizeof(buf), "0x%X\n", val); | 318 | len = snprintf(buf, sizeof(buf), "0x%X\n", val); |
317 | 319 | ||
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 926031d64978..703de313c418 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c | |||
@@ -315,6 +315,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { | |||
315 | }, | 315 | }, |
316 | }, | 316 | }, |
317 | }, | 317 | }, |
318 | .drdy_irq = { | ||
319 | /* drdy line is routed drdy pin */ | ||
320 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | ||
321 | }, | ||
318 | .multi_read_bit = true, | 322 | .multi_read_bit = true, |
319 | .bootime = 2, | 323 | .bootime = 2, |
320 | }, | 324 | }, |
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 8d5c7b250b11..fd1da26a62e4 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c | |||
@@ -572,7 +572,7 @@ static int bmp280_chip_config(struct bmp280_data *data) | |||
572 | u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) | | 572 | u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) | |
573 | BMP280_OSRS_PRESS_X(data->oversampling_press + 1); | 573 | BMP280_OSRS_PRESS_X(data->oversampling_press + 1); |
574 | 574 | ||
575 | ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS, | 575 | ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS, |
576 | BMP280_OSRS_TEMP_MASK | | 576 | BMP280_OSRS_TEMP_MASK | |
577 | BMP280_OSRS_PRESS_MASK | | 577 | BMP280_OSRS_PRESS_MASK | |
578 | BMP280_MODE_MASK, | 578 | BMP280_MODE_MASK, |
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index d523b056f468..b542dc484969 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c | |||
@@ -174,6 +174,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv) | |||
174 | clk_disable(priv->clk); | 174 | clk_disable(priv->clk); |
175 | 175 | ||
176 | /* Stop timer */ | 176 | /* Stop timer */ |
177 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); | ||
177 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); | 178 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); |
178 | regmap_write(priv->regmap, TIM_PSC, 0); | 179 | regmap_write(priv->regmap, TIM_PSC, 0); |
179 | regmap_write(priv->regmap, TIM_ARR, 0); | 180 | regmap_write(priv->regmap, TIM_ARR, 0); |
@@ -713,8 +714,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev, | |||
713 | if (ret) | 714 | if (ret) |
714 | return ret; | 715 | return ret; |
715 | 716 | ||
717 | /* TIMx_ARR register shouldn't be buffered (ARPE=0) */ | ||
718 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0); | ||
716 | regmap_write(priv->regmap, TIM_ARR, preset); | 719 | regmap_write(priv->regmap, TIM_ARR, preset); |
717 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE); | ||
718 | 720 | ||
719 | return len; | 721 | return len; |
720 | } | 722 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 30825bb9b8e9..8861c052155a 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) | |||
100 | if (ret) | 100 | if (ret) |
101 | goto pid_query_error; | 101 | goto pid_query_error; |
102 | 102 | ||
103 | nlmsg_end(skb, nlh); | ||
104 | |||
103 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", | 105 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", |
104 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); | 106 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); |
105 | 107 | ||
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
170 | &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); | 172 | &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); |
171 | if (ret) | 173 | if (ret) |
172 | goto add_mapping_error; | 174 | goto add_mapping_error; |
175 | |||
176 | nlmsg_end(skb, nlh); | ||
173 | nlmsg_request->req_buffer = pm_msg; | 177 | nlmsg_request->req_buffer = pm_msg; |
174 | 178 | ||
175 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 179 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
246 | &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); | 250 | &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); |
247 | if (ret) | 251 | if (ret) |
248 | goto query_mapping_error; | 252 | goto query_mapping_error; |
253 | |||
254 | nlmsg_end(skb, nlh); | ||
249 | nlmsg_request->req_buffer = pm_msg; | 255 | nlmsg_request->req_buffer = pm_msg; |
250 | 256 | ||
251 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 257 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) | |||
308 | if (ret) | 314 | if (ret) |
309 | goto remove_mapping_error; | 315 | goto remove_mapping_error; |
310 | 316 | ||
317 | nlmsg_end(skb, nlh); | ||
318 | |||
311 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 319 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
312 | if (ret) { | 320 | if (ret) { |
313 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 321 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index c81c55942626..3c4faadb8cdd 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | |||
597 | &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); | 597 | &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); |
598 | if (ret) | 598 | if (ret) |
599 | goto mapinfo_num_error; | 599 | goto mapinfo_num_error; |
600 | |||
601 | nlmsg_end(skb, nlh); | ||
602 | |||
600 | ret = rdma_nl_unicast(skb, iwpm_pid); | 603 | ret = rdma_nl_unicast(skb, iwpm_pid); |
601 | if (ret) { | 604 | if (ret) { |
602 | skb = NULL; | 605 | skb = NULL; |
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) | |||
678 | if (ret) | 681 | if (ret) |
679 | goto send_mapping_info_unlock; | 682 | goto send_mapping_info_unlock; |
680 | 683 | ||
684 | nlmsg_end(skb, nlh); | ||
685 | |||
681 | iwpm_print_sockaddr(&map_info->local_sockaddr, | 686 | iwpm_print_sockaddr(&map_info->local_sockaddr, |
682 | "send_mapping_info: Local sockaddr:"); | 687 | "send_mapping_info: Local sockaddr:"); |
683 | iwpm_print_sockaddr(&map_info->mapped_sockaddr, | 688 | iwpm_print_sockaddr(&map_info->mapped_sockaddr, |
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 70ad19c4c73e..88bdafb297f5 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c | |||
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) | |||
432 | atomic_set(&qp->qp_sec->error_list_count, 0); | 432 | atomic_set(&qp->qp_sec->error_list_count, 0); |
433 | init_completion(&qp->qp_sec->error_complete); | 433 | init_completion(&qp->qp_sec->error_complete); |
434 | ret = security_ib_alloc_security(&qp->qp_sec->security); | 434 | ret = security_ib_alloc_security(&qp->qp_sec->security); |
435 | if (ret) | 435 | if (ret) { |
436 | kfree(qp->qp_sec); | 436 | kfree(qp->qp_sec); |
437 | qp->qp_sec = NULL; | ||
438 | } | ||
437 | 439 | ||
438 | return ret; | 440 | return ret; |
439 | } | 441 | } |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 4ab30d832ac5..52a2cf2d83aa 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, | |||
3869 | resp.raw_packet_caps = attr.raw_packet_caps; | 3869 | resp.raw_packet_caps = attr.raw_packet_caps; |
3870 | resp.response_length += sizeof(resp.raw_packet_caps); | 3870 | resp.response_length += sizeof(resp.raw_packet_caps); |
3871 | 3871 | ||
3872 | if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) | 3872 | if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) |
3873 | goto end; | 3873 | goto end; |
3874 | 3874 | ||
3875 | resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; | 3875 | resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; |
3876 | resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; | 3876 | resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; |
3877 | resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; | 3877 | resp.tm_caps.max_ops = attr.tm_caps.max_ops; |
3878 | resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; | 3878 | resp.tm_caps.max_sge = attr.tm_caps.max_sge; |
3879 | resp.xrq_caps.flags = attr.xrq_caps.flags; | 3879 | resp.tm_caps.flags = attr.tm_caps.flags; |
3880 | resp.response_length += sizeof(resp.xrq_caps); | 3880 | resp.response_length += sizeof(resp.tm_caps); |
3881 | end: | 3881 | end: |
3882 | err = ib_copy_to_udata(ucore, &resp, resp.response_length); | 3882 | err = ib_copy_to_udata(ucore, &resp, resp.response_length); |
3883 | return err; | 3883 | return err; |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index ee9e27dc799b..de57d6c11a25 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) | |||
1646 | */ | 1646 | */ |
1647 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { | 1647 | if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { |
1648 | if (attr.qp_state >= IB_QPS_INIT) { | 1648 | if (attr.qp_state >= IB_QPS_INIT) { |
1649 | if (qp->device->get_link_layer(qp->device, attr.port_num) != | 1649 | if (rdma_port_get_link_layer(qp->device, attr.port_num) != |
1650 | IB_LINK_LAYER_INFINIBAND) | 1650 | IB_LINK_LAYER_INFINIBAND) |
1651 | return true; | 1651 | return true; |
1652 | goto lid_check; | 1652 | goto lid_check; |
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) | |||
1655 | 1655 | ||
1656 | /* Can't get a quick answer, iterate over all ports */ | 1656 | /* Can't get a quick answer, iterate over all ports */ |
1657 | for (port = 0; port < qp->device->phys_port_cnt; port++) | 1657 | for (port = 0; port < qp->device->phys_port_cnt; port++) |
1658 | if (qp->device->get_link_layer(qp->device, port) != | 1658 | if (rdma_port_get_link_layer(qp->device, port) != |
1659 | IB_LINK_LAYER_INFINIBAND) | 1659 | IB_LINK_LAYER_INFINIBAND) |
1660 | num_eth_ports++; | 1660 | num_eth_ports++; |
1661 | 1661 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index b3ad37fec578..ecbac91b2e14 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h | |||
@@ -93,11 +93,13 @@ struct bnxt_re_dev { | |||
93 | struct ib_device ibdev; | 93 | struct ib_device ibdev; |
94 | struct list_head list; | 94 | struct list_head list; |
95 | unsigned long flags; | 95 | unsigned long flags; |
96 | #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 | 96 | #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 |
97 | #define BNXT_RE_FLAG_IBDEV_REGISTERED 1 | 97 | #define BNXT_RE_FLAG_IBDEV_REGISTERED 1 |
98 | #define BNXT_RE_FLAG_GOT_MSIX 2 | 98 | #define BNXT_RE_FLAG_GOT_MSIX 2 |
99 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 | 99 | #define BNXT_RE_FLAG_HAVE_L2_REF 3 |
100 | #define BNXT_RE_FLAG_QOS_WORK_REG 16 | 100 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 |
101 | #define BNXT_RE_FLAG_QOS_WORK_REG 5 | ||
102 | #define BNXT_RE_FLAG_TASK_IN_PROG 6 | ||
101 | struct net_device *netdev; | 103 | struct net_device *netdev; |
102 | unsigned int version, major, minor; | 104 | unsigned int version, major, minor; |
103 | struct bnxt_en_dev *en_dev; | 105 | struct bnxt_en_dev *en_dev; |
@@ -108,6 +110,8 @@ struct bnxt_re_dev { | |||
108 | 110 | ||
109 | struct delayed_work worker; | 111 | struct delayed_work worker; |
110 | u8 cur_prio_map; | 112 | u8 cur_prio_map; |
113 | u8 active_speed; | ||
114 | u8 active_width; | ||
111 | 115 | ||
112 | /* FP Notification Queue (CQ & SRQ) */ | 116 | /* FP Notification Queue (CQ & SRQ) */ |
113 | struct tasklet_struct nq_task; | 117 | struct tasklet_struct nq_task; |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 01eee15bbd65..0d89621d9fe8 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, | |||
259 | port_attr->sm_sl = 0; | 259 | port_attr->sm_sl = 0; |
260 | port_attr->subnet_timeout = 0; | 260 | port_attr->subnet_timeout = 0; |
261 | port_attr->init_type_reply = 0; | 261 | port_attr->init_type_reply = 0; |
262 | /* call the underlying netdev's ethtool hooks to query speed settings | 262 | port_attr->active_speed = rdev->active_speed; |
263 | * for which we acquire rtnl_lock _only_ if it's registered with | 263 | port_attr->active_width = rdev->active_width; |
264 | * IB stack to avoid race in the NETDEV_UNREG path | 264 | |
265 | */ | ||
266 | if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) | ||
267 | if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed, | ||
268 | &port_attr->active_width)) | ||
269 | return -EINVAL; | ||
270 | return 0; | 265 | return 0; |
271 | } | 266 | } |
272 | 267 | ||
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, | |||
319 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; | 314 | struct bnxt_re_gid_ctx *ctx, **ctx_tbl; |
320 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); | 315 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
321 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; | 316 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
317 | struct bnxt_qplib_gid *gid_to_del; | ||
322 | 318 | ||
323 | /* Delete the entry from the hardware */ | 319 | /* Delete the entry from the hardware */ |
324 | ctx = *context; | 320 | ctx = *context; |
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, | |||
328 | if (sgid_tbl && sgid_tbl->active) { | 324 | if (sgid_tbl && sgid_tbl->active) { |
329 | if (ctx->idx >= sgid_tbl->max) | 325 | if (ctx->idx >= sgid_tbl->max) |
330 | return -EINVAL; | 326 | return -EINVAL; |
327 | gid_to_del = &sgid_tbl->tbl[ctx->idx]; | ||
328 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) | ||
329 | * or via the ib_unregister_device path. In the former case QP1 | ||
330 | * may not be destroyed yet, in which case just return as FW | ||
331 | * needs that entry to be present and will fail it's deletion. | ||
332 | * We could get invoked again after QP1 is destroyed OR get an | ||
333 | * ADD_GID call with a different GID value for the same index | ||
334 | * where we issue MODIFY_GID cmd to update the GID entry -- TBD | ||
335 | */ | ||
336 | if (ctx->idx == 0 && | ||
337 | rdma_link_local_addr((struct in6_addr *)gid_to_del) && | ||
338 | ctx->refcnt == 1 && rdev->qp1_sqp) { | ||
339 | dev_dbg(rdev_to_dev(rdev), | ||
340 | "Trying to delete GID0 while QP1 is alive\n"); | ||
341 | return -EFAULT; | ||
342 | } | ||
331 | ctx->refcnt--; | 343 | ctx->refcnt--; |
332 | if (!ctx->refcnt) { | 344 | if (!ctx->refcnt) { |
333 | rc = bnxt_qplib_del_sgid(sgid_tbl, | 345 | rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); |
334 | &sgid_tbl->tbl[ctx->idx], | ||
335 | true); | ||
336 | if (rc) { | 346 | if (rc) { |
337 | dev_err(rdev_to_dev(rdev), | 347 | dev_err(rdev_to_dev(rdev), |
338 | "Failed to remove GID: %#x", rc); | 348 | "Failed to remove GID: %#x", rc); |
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | |||
816 | 826 | ||
817 | kfree(rdev->sqp_ah); | 827 | kfree(rdev->sqp_ah); |
818 | kfree(rdev->qp1_sqp); | 828 | kfree(rdev->qp1_sqp); |
829 | rdev->qp1_sqp = NULL; | ||
830 | rdev->sqp_ah = NULL; | ||
819 | } | 831 | } |
820 | 832 | ||
821 | if (!IS_ERR_OR_NULL(qp->rumem)) | 833 | if (!IS_ERR_OR_NULL(qp->rumem)) |
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1436 | qp->qplib_qp.modify_flags |= | 1448 | qp->qplib_qp.modify_flags |= |
1437 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; | 1449 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; |
1438 | qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); | 1450 | qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); |
1451 | qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); | ||
1439 | } else if (qp_attr->qp_state == IB_QPS_RTR) { | 1452 | } else if (qp_attr->qp_state == IB_QPS_RTR) { |
1440 | qp->qplib_qp.modify_flags |= | 1453 | qp->qplib_qp.modify_flags |= |
1441 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; | 1454 | CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; |
1442 | qp->qplib_qp.path_mtu = | 1455 | qp->qplib_qp.path_mtu = |
1443 | __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); | 1456 | __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); |
1457 | qp->qplib_qp.mtu = | ||
1458 | ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); | ||
1444 | } | 1459 | } |
1445 | 1460 | ||
1446 | if (qp_attr_mask & IB_QP_TIMEOUT) { | 1461 | if (qp_attr_mask & IB_QP_TIMEOUT) { |
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1551 | { | 1566 | { |
1552 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 1567 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
1553 | struct bnxt_re_dev *rdev = qp->rdev; | 1568 | struct bnxt_re_dev *rdev = qp->rdev; |
1554 | struct bnxt_qplib_qp qplib_qp; | 1569 | struct bnxt_qplib_qp *qplib_qp; |
1555 | int rc; | 1570 | int rc; |
1556 | 1571 | ||
1557 | memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); | 1572 | qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL); |
1558 | qplib_qp.id = qp->qplib_qp.id; | 1573 | if (!qplib_qp) |
1559 | qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; | 1574 | return -ENOMEM; |
1575 | |||
1576 | qplib_qp->id = qp->qplib_qp.id; | ||
1577 | qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; | ||
1560 | 1578 | ||
1561 | rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); | 1579 | rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); |
1562 | if (rc) { | 1580 | if (rc) { |
1563 | dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); | 1581 | dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); |
1564 | return rc; | 1582 | goto out; |
1565 | } | 1583 | } |
1566 | qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); | 1584 | qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); |
1567 | qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; | 1585 | qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0; |
1568 | qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); | 1586 | qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access); |
1569 | qp_attr->pkey_index = qplib_qp.pkey_index; | 1587 | qp_attr->pkey_index = qplib_qp->pkey_index; |
1570 | qp_attr->qkey = qplib_qp.qkey; | 1588 | qp_attr->qkey = qplib_qp->qkey; |
1571 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; | 1589 | qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; |
1572 | rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, | 1590 | rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, |
1573 | qplib_qp.ah.host_sgid_index, | 1591 | qplib_qp->ah.host_sgid_index, |
1574 | qplib_qp.ah.hop_limit, | 1592 | qplib_qp->ah.hop_limit, |
1575 | qplib_qp.ah.traffic_class); | 1593 | qplib_qp->ah.traffic_class); |
1576 | rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); | 1594 | rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data); |
1577 | rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); | 1595 | rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl); |
1578 | ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); | 1596 | ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac); |
1579 | qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); | 1597 | qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu); |
1580 | qp_attr->timeout = qplib_qp.timeout; | 1598 | qp_attr->timeout = qplib_qp->timeout; |
1581 | qp_attr->retry_cnt = qplib_qp.retry_cnt; | 1599 | qp_attr->retry_cnt = qplib_qp->retry_cnt; |
1582 | qp_attr->rnr_retry = qplib_qp.rnr_retry; | 1600 | qp_attr->rnr_retry = qplib_qp->rnr_retry; |
1583 | qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; | 1601 | qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer; |
1584 | qp_attr->rq_psn = qplib_qp.rq.psn; | 1602 | qp_attr->rq_psn = qplib_qp->rq.psn; |
1585 | qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; | 1603 | qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic; |
1586 | qp_attr->sq_psn = qplib_qp.sq.psn; | 1604 | qp_attr->sq_psn = qplib_qp->sq.psn; |
1587 | qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; | 1605 | qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic; |
1588 | qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : | 1606 | qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR : |
1589 | IB_SIGNAL_REQ_WR; | 1607 | IB_SIGNAL_REQ_WR; |
1590 | qp_attr->dest_qp_num = qplib_qp.dest_qpn; | 1608 | qp_attr->dest_qp_num = qplib_qp->dest_qpn; |
1591 | 1609 | ||
1592 | qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; | 1610 | qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; |
1593 | qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; | 1611 | qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; |
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1596 | qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; | 1614 | qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; |
1597 | qp_init_attr->cap = qp_attr->cap; | 1615 | qp_init_attr->cap = qp_attr->cap; |
1598 | 1616 | ||
1599 | return 0; | 1617 | out: |
1618 | kfree(qplib_qp); | ||
1619 | return rc; | ||
1600 | } | 1620 | } |
1601 | 1621 | ||
1602 | /* Routine for sending QP1 packets for RoCE V1 an V2 | 1622 | /* Routine for sending QP1 packets for RoCE V1 an V2 |
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, | |||
1908 | switch (wr->opcode) { | 1928 | switch (wr->opcode) { |
1909 | case IB_WR_ATOMIC_CMP_AND_SWP: | 1929 | case IB_WR_ATOMIC_CMP_AND_SWP: |
1910 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; | 1930 | wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; |
1931 | wqe->atomic.cmp_data = atomic_wr(wr)->compare_add; | ||
1911 | wqe->atomic.swap_data = atomic_wr(wr)->swap; | 1932 | wqe->atomic.swap_data = atomic_wr(wr)->swap; |
1912 | break; | 1933 | break; |
1913 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 1934 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) | |||
3062 | return rc; | 3083 | return rc; |
3063 | } | 3084 | } |
3064 | 3085 | ||
3065 | if (mr->npages && mr->pages) { | 3086 | if (mr->pages) { |
3066 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, | 3087 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, |
3067 | &mr->qplib_frpl); | 3088 | &mr->qplib_frpl); |
3068 | kfree(mr->pages); | 3089 | kfree(mr->pages); |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 82d1cbc27aee..e7450ea92aa9 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
1161 | } | 1161 | } |
1162 | } | 1162 | } |
1163 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); | 1163 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); |
1164 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, | ||
1165 | &rdev->active_width); | ||
1164 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); | 1166 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); |
1165 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); | 1167 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); |
1166 | 1168 | ||
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work) | |||
1255 | else if (netif_carrier_ok(rdev->netdev)) | 1257 | else if (netif_carrier_ok(rdev->netdev)) |
1256 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, | 1258 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, |
1257 | IB_EVENT_PORT_ACTIVE); | 1259 | IB_EVENT_PORT_ACTIVE); |
1260 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, | ||
1261 | &rdev->active_width); | ||
1258 | break; | 1262 | break; |
1259 | default: | 1263 | default: |
1260 | break; | 1264 | break; |
1261 | } | 1265 | } |
1266 | smp_mb__before_atomic(); | ||
1267 | clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | ||
1262 | kfree(re_work); | 1268 | kfree(re_work); |
1263 | } | 1269 | } |
1264 | 1270 | ||
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1317 | break; | 1323 | break; |
1318 | 1324 | ||
1319 | case NETDEV_UNREGISTER: | 1325 | case NETDEV_UNREGISTER: |
1326 | /* netdev notifier will call NETDEV_UNREGISTER again later since | ||
1327 | * we are still holding the reference to the netdev | ||
1328 | */ | ||
1329 | if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) | ||
1330 | goto exit; | ||
1320 | bnxt_re_ib_unreg(rdev, false); | 1331 | bnxt_re_ib_unreg(rdev, false); |
1321 | bnxt_re_remove_one(rdev); | 1332 | bnxt_re_remove_one(rdev); |
1322 | bnxt_re_dev_unreg(rdev); | 1333 | bnxt_re_dev_unreg(rdev); |
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1335 | re_work->vlan_dev = (real_dev == netdev ? | 1346 | re_work->vlan_dev = (real_dev == netdev ? |
1336 | NULL : netdev); | 1347 | NULL : netdev); |
1337 | INIT_WORK(&re_work->work, bnxt_re_task); | 1348 | INIT_WORK(&re_work->work, bnxt_re_task); |
1349 | set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | ||
1338 | queue_work(bnxt_re_wq, &re_work->work); | 1350 | queue_work(bnxt_re_wq, &re_work->work); |
1339 | } | 1351 | } |
1340 | } | 1352 | } |
@@ -1375,6 +1387,22 @@ err_netdev: | |||
1375 | 1387 | ||
1376 | static void __exit bnxt_re_mod_exit(void) | 1388 | static void __exit bnxt_re_mod_exit(void) |
1377 | { | 1389 | { |
1390 | struct bnxt_re_dev *rdev; | ||
1391 | LIST_HEAD(to_be_deleted); | ||
1392 | |||
1393 | mutex_lock(&bnxt_re_dev_lock); | ||
1394 | /* Free all adapter allocated resources */ | ||
1395 | if (!list_empty(&bnxt_re_dev_list)) | ||
1396 | list_splice_init(&bnxt_re_dev_list, &to_be_deleted); | ||
1397 | mutex_unlock(&bnxt_re_dev_lock); | ||
1398 | |||
1399 | list_for_each_entry(rdev, &to_be_deleted, list) { | ||
1400 | dev_info(rdev_to_dev(rdev), "Unregistering Device"); | ||
1401 | bnxt_re_dev_stop(rdev); | ||
1402 | bnxt_re_ib_unreg(rdev, true); | ||
1403 | bnxt_re_remove_one(rdev); | ||
1404 | bnxt_re_dev_unreg(rdev); | ||
1405 | } | ||
1378 | unregister_netdevice_notifier(&bnxt_re_netdev_notifier); | 1406 | unregister_netdevice_notifier(&bnxt_re_netdev_notifier); |
1379 | if (bnxt_re_wq) | 1407 | if (bnxt_re_wq) |
1380 | destroy_workqueue(bnxt_re_wq); | 1408 | destroy_workqueue(bnxt_re_wq); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 391bb7006e8f..2bdb1562bd21 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, | |||
107 | return -EINVAL; | 107 | return -EINVAL; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) | ||
111 | return -ETIMEDOUT; | ||
112 | |||
110 | /* Cmdq are in 16-byte units, each request can consume 1 or more | 113 | /* Cmdq are in 16-byte units, each request can consume 1 or more |
111 | * cmdqe | 114 | * cmdqe |
112 | */ | 115 | */ |
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, | |||
226 | /* timed out */ | 229 | /* timed out */ |
227 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", | 230 | dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", |
228 | cookie, opcode, RCFW_CMD_WAIT_TIME_MS); | 231 | cookie, opcode, RCFW_CMD_WAIT_TIME_MS); |
232 | set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); | ||
229 | return rc; | 233 | return rc; |
230 | } | 234 | } |
231 | 235 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 0ed312f17c8d..85b16da287f9 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw { | |||
162 | unsigned long *cmdq_bitmap; | 162 | unsigned long *cmdq_bitmap; |
163 | u32 bmap_size; | 163 | u32 bmap_size; |
164 | unsigned long flags; | 164 | unsigned long flags; |
165 | #define FIRMWARE_INITIALIZED_FLAG 1 | 165 | #define FIRMWARE_INITIALIZED_FLAG BIT(0) |
166 | #define FIRMWARE_FIRST_FLAG BIT(31) | 166 | #define FIRMWARE_FIRST_FLAG BIT(31) |
167 | #define FIRMWARE_TIMED_OUT BIT(3) | ||
167 | wait_queue_head_t waitq; | 168 | wait_queue_head_t waitq; |
168 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, | 169 | int (*aeq_handler)(struct bnxt_qplib_rcfw *, |
169 | struct creq_func_event *); | 170 | struct creq_func_event *); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index ceaa2fa54d32..daf7a56e5d7e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2333 | unsigned int stid = GET_TID(rpl); | 2333 | unsigned int stid = GET_TID(rpl); |
2334 | struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); | 2334 | struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); |
2335 | 2335 | ||
2336 | if (!ep) { | ||
2337 | pr_debug("%s stid %d lookup failure!\n", __func__, stid); | ||
2338 | goto out; | ||
2339 | } | ||
2336 | pr_debug("%s ep %p\n", __func__, ep); | 2340 | pr_debug("%s ep %p\n", __func__, ep); |
2337 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); | 2341 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
2338 | c4iw_put_ep(&ep->com); | 2342 | c4iw_put_ep(&ep->com); |
2343 | out: | ||
2339 | return 0; | 2344 | return 0; |
2340 | } | 2345 | } |
2341 | 2346 | ||
@@ -2594,9 +2599,9 @@ fail: | |||
2594 | c4iw_put_ep(&child_ep->com); | 2599 | c4iw_put_ep(&child_ep->com); |
2595 | reject: | 2600 | reject: |
2596 | reject_cr(dev, hwtid, skb); | 2601 | reject_cr(dev, hwtid, skb); |
2602 | out: | ||
2597 | if (parent_ep) | 2603 | if (parent_ep) |
2598 | c4iw_put_ep(&parent_ep->com); | 2604 | c4iw_put_ep(&parent_ep->com); |
2599 | out: | ||
2600 | return 0; | 2605 | return 0; |
2601 | } | 2606 | } |
2602 | 2607 | ||
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3457 | cm_id->provider_data = ep; | 3462 | cm_id->provider_data = ep; |
3458 | goto out; | 3463 | goto out; |
3459 | } | 3464 | } |
3460 | 3465 | remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); | |
3461 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, | 3466 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, |
3462 | ep->com.local_addr.ss_family); | 3467 | ep->com.local_addr.ss_family); |
3463 | fail2: | 3468 | fail2: |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b2ed4b9cda6e..0be42787759f 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); | |||
1066 | static int thermal_init(struct hfi1_devdata *dd); | 1066 | static int thermal_init(struct hfi1_devdata *dd); |
1067 | 1067 | ||
1068 | static void update_statusp(struct hfi1_pportdata *ppd, u32 state); | 1068 | static void update_statusp(struct hfi1_pportdata *ppd, u32 state); |
1069 | static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, | ||
1070 | int msecs); | ||
1069 | static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, | 1071 | static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, |
1070 | int msecs); | 1072 | int msecs); |
1071 | static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); | 1073 | static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); |
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data) | |||
8238 | u64 regs[CCE_NUM_INT_CSRS]; | 8240 | u64 regs[CCE_NUM_INT_CSRS]; |
8239 | u32 bit; | 8241 | u32 bit; |
8240 | int i; | 8242 | int i; |
8243 | irqreturn_t handled = IRQ_NONE; | ||
8241 | 8244 | ||
8242 | this_cpu_inc(*dd->int_counter); | 8245 | this_cpu_inc(*dd->int_counter); |
8243 | 8246 | ||
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data) | |||
8258 | for_each_set_bit(bit, (unsigned long *)®s[0], | 8261 | for_each_set_bit(bit, (unsigned long *)®s[0], |
8259 | CCE_NUM_INT_CSRS * 64) { | 8262 | CCE_NUM_INT_CSRS * 64) { |
8260 | is_interrupt(dd, bit); | 8263 | is_interrupt(dd, bit); |
8264 | handled = IRQ_HANDLED; | ||
8261 | } | 8265 | } |
8262 | 8266 | ||
8263 | return IRQ_HANDLED; | 8267 | return handled; |
8264 | } | 8268 | } |
8265 | 8269 | ||
8266 | static irqreturn_t sdma_interrupt(int irq, void *data) | 8270 | static irqreturn_t sdma_interrupt(int irq, void *data) |
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) | |||
9413 | write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); | 9417 | write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); |
9414 | } | 9418 | } |
9415 | 9419 | ||
9416 | void reset_qsfp(struct hfi1_pportdata *ppd) | 9420 | int reset_qsfp(struct hfi1_pportdata *ppd) |
9417 | { | 9421 | { |
9418 | struct hfi1_devdata *dd = ppd->dd; | 9422 | struct hfi1_devdata *dd = ppd->dd; |
9419 | u64 mask, qsfp_mask; | 9423 | u64 mask, qsfp_mask; |
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd) | |||
9443 | * for alarms and warnings | 9447 | * for alarms and warnings |
9444 | */ | 9448 | */ |
9445 | set_qsfp_int_n(ppd, 1); | 9449 | set_qsfp_int_n(ppd, 1); |
9450 | |||
9451 | /* | ||
9452 | * After the reset, AOC transmitters are enabled by default. They need | ||
9453 | * to be turned off to complete the QSFP setup before they can be | ||
9454 | * enabled again. | ||
9455 | */ | ||
9456 | return set_qsfp_tx(ppd, 0); | ||
9446 | } | 9457 | } |
9447 | 9458 | ||
9448 | static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, | 9459 | static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, |
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) | |||
10305 | { | 10316 | { |
10306 | struct hfi1_devdata *dd = ppd->dd; | 10317 | struct hfi1_devdata *dd = ppd->dd; |
10307 | u32 previous_state; | 10318 | u32 previous_state; |
10319 | int offline_state_ret; | ||
10308 | int ret; | 10320 | int ret; |
10309 | 10321 | ||
10310 | update_lcb_cache(dd); | 10322 | update_lcb_cache(dd); |
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) | |||
10326 | ppd->offline_disabled_reason = | 10338 | ppd->offline_disabled_reason = |
10327 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); | 10339 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); |
10328 | 10340 | ||
10329 | /* | 10341 | offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); |
10330 | * Wait for offline transition. It can take a while for | 10342 | if (offline_state_ret < 0) |
10331 | * the link to go down. | 10343 | return offline_state_ret; |
10332 | */ | ||
10333 | ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000); | ||
10334 | if (ret < 0) | ||
10335 | return ret; | ||
10336 | |||
10337 | /* | ||
10338 | * Now in charge of LCB - must be after the physical state is | ||
10339 | * offline.quiet and before host_link_state is changed. | ||
10340 | */ | ||
10341 | set_host_lcb_access(dd); | ||
10342 | write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ | ||
10343 | |||
10344 | /* make sure the logical state is also down */ | ||
10345 | ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); | ||
10346 | if (ret) | ||
10347 | force_logical_link_state_down(ppd); | ||
10348 | |||
10349 | ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ | ||
10350 | 10344 | ||
10345 | /* Disabling AOC transmitters */ | ||
10351 | if (ppd->port_type == PORT_TYPE_QSFP && | 10346 | if (ppd->port_type == PORT_TYPE_QSFP && |
10352 | ppd->qsfp_info.limiting_active && | 10347 | ppd->qsfp_info.limiting_active && |
10353 | qsfp_mod_present(ppd)) { | 10348 | qsfp_mod_present(ppd)) { |
@@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) | |||
10365 | } | 10360 | } |
10366 | 10361 | ||
10367 | /* | 10362 | /* |
10363 | * Wait for the offline.Quiet transition if it hasn't happened yet. It | ||
10364 | * can take a while for the link to go down. | ||
10365 | */ | ||
10366 | if (offline_state_ret != PLS_OFFLINE_QUIET) { | ||
10367 | ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); | ||
10368 | if (ret < 0) | ||
10369 | return ret; | ||
10370 | } | ||
10371 | |||
10372 | /* | ||
10373 | * Now in charge of LCB - must be after the physical state is | ||
10374 | * offline.quiet and before host_link_state is changed. | ||
10375 | */ | ||
10376 | set_host_lcb_access(dd); | ||
10377 | write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ | ||
10378 | |||
10379 | /* make sure the logical state is also down */ | ||
10380 | ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); | ||
10381 | if (ret) | ||
10382 | force_logical_link_state_down(ppd); | ||
10383 | |||
10384 | ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ | ||
10385 | |||
10386 | /* | ||
10368 | * The LNI has a mandatory wait time after the physical state | 10387 | * The LNI has a mandatory wait time after the physical state |
10369 | * moves to Offline.Quiet. The wait time may be different | 10388 | * moves to Offline.Quiet. The wait time may be different |
10370 | * depending on how the link went down. The 8051 firmware | 10389 | * depending on how the link went down. The 8051 firmware |
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) | |||
10396 | & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { | 10415 | & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { |
10397 | /* went down while attempting link up */ | 10416 | /* went down while attempting link up */ |
10398 | check_lni_states(ppd); | 10417 | check_lni_states(ppd); |
10418 | |||
10419 | /* The QSFP doesn't need to be reset on LNI failure */ | ||
10420 | ppd->qsfp_info.reset_needed = 0; | ||
10399 | } | 10421 | } |
10400 | 10422 | ||
10401 | /* the active link width (downgrade) is 0 on link down */ | 10423 | /* the active link width (downgrade) is 0 on link down */ |
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, | |||
12804 | return 0; | 12826 | return 0; |
12805 | } | 12827 | } |
12806 | 12828 | ||
12829 | /* | ||
12830 | * wait_phys_link_offline_quiet_substates - wait for any offline substate | ||
12831 | * @ppd: port device | ||
12832 | * @msecs: the number of milliseconds to wait | ||
12833 | * | ||
12834 | * Wait up to msecs milliseconds for any offline physical link | ||
12835 | * state change to occur. | ||
12836 | * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. | ||
12837 | */ | ||
12838 | static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, | ||
12839 | int msecs) | ||
12840 | { | ||
12841 | u32 read_state; | ||
12842 | unsigned long timeout; | ||
12843 | |||
12844 | timeout = jiffies + msecs_to_jiffies(msecs); | ||
12845 | while (1) { | ||
12846 | read_state = read_physical_state(ppd->dd); | ||
12847 | if ((read_state & 0xF0) == PLS_OFFLINE) | ||
12848 | break; | ||
12849 | if (time_after(jiffies, timeout)) { | ||
12850 | dd_dev_err(ppd->dd, | ||
12851 | "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", | ||
12852 | read_state, msecs); | ||
12853 | return -ETIMEDOUT; | ||
12854 | } | ||
12855 | usleep_range(1950, 2050); /* sleep 2ms-ish */ | ||
12856 | } | ||
12857 | |||
12858 | log_state_transition(ppd, read_state); | ||
12859 | return read_state; | ||
12860 | } | ||
12861 | |||
12807 | #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ | 12862 | #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ |
12808 | (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) | 12863 | (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) |
12809 | 12864 | ||
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b8345a60a0fb..50b8645d0b87 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -204,6 +204,7 @@ | |||
204 | #define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 | 204 | #define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 |
205 | #define PLS_OFFLINE_REPORT_FAILURE 0x93 | 205 | #define PLS_OFFLINE_REPORT_FAILURE 0x93 |
206 | #define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 | 206 | #define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 |
207 | #define PLS_OFFLINE_QUIET_DURATION 0x95 | ||
207 | #define PLS_POLLING 0x20 | 208 | #define PLS_POLLING 0x20 |
208 | #define PLS_POLLING_QUIET 0x20 | 209 | #define PLS_POLLING_QUIET 0x20 |
209 | #define PLS_POLLING_ACTIVE 0x21 | 210 | #define PLS_POLLING_ACTIVE 0x21 |
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work); | |||
722 | void handle_link_bounce(struct work_struct *work); | 723 | void handle_link_bounce(struct work_struct *work); |
723 | void handle_start_link(struct work_struct *work); | 724 | void handle_start_link(struct work_struct *work); |
724 | void handle_sma_message(struct work_struct *work); | 725 | void handle_sma_message(struct work_struct *work); |
725 | void reset_qsfp(struct hfi1_pportdata *ppd); | 726 | int reset_qsfp(struct hfi1_pportdata *ppd); |
726 | void qsfp_event(struct work_struct *work); | 727 | void qsfp_event(struct work_struct *work); |
727 | void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); | 728 | void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); |
728 | int send_idle_sma(struct hfi1_devdata *dd, u64 message); | 729 | int send_idle_sma(struct hfi1_devdata *dd, u64 message); |
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index d46b17107901..1613af1c58d9 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c | |||
@@ -204,7 +204,10 @@ done_asic: | |||
204 | return ret; | 204 | return ret; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* magic character sequence that trails an image */ | 207 | /* magic character sequence that begins an image */ |
208 | #define IMAGE_START_MAGIC "APO=" | ||
209 | |||
210 | /* magic character sequence that might trail an image */ | ||
208 | #define IMAGE_TRAIL_MAGIC "egamiAPO" | 211 | #define IMAGE_TRAIL_MAGIC "egamiAPO" |
209 | 212 | ||
210 | /* EPROM file types */ | 213 | /* EPROM file types */ |
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, | |||
250 | { | 253 | { |
251 | void *buffer; | 254 | void *buffer; |
252 | void *p; | 255 | void *p; |
256 | u32 length; | ||
253 | int ret; | 257 | int ret; |
254 | 258 | ||
255 | buffer = kmalloc(P1_SIZE, GFP_KERNEL); | 259 | buffer = kmalloc(P1_SIZE, GFP_KERNEL); |
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data, | |||
262 | return ret; | 266 | return ret; |
263 | } | 267 | } |
264 | 268 | ||
265 | /* scan for image magic that may trail the actual data */ | 269 | /* config partition is valid only if it starts with IMAGE_START_MAGIC */ |
266 | p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); | 270 | if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) { |
267 | if (!p) { | ||
268 | kfree(buffer); | 271 | kfree(buffer); |
269 | return -ENOENT; | 272 | return -ENOENT; |
270 | } | 273 | } |
271 | 274 | ||
275 | /* scan for image magic that may trail the actual data */ | ||
276 | p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); | ||
277 | if (p) | ||
278 | length = p - buffer; | ||
279 | else | ||
280 | length = P1_SIZE; | ||
281 | |||
272 | *data = buffer; | 282 | *data = buffer; |
273 | *size = p - buffer; | 283 | *size = length; |
274 | return 0; | 284 | return 0; |
275 | } | 285 | } |
276 | 286 | ||
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 2bc89260235a..d9a1e9893136 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo) | |||
930 | switch (ret) { | 930 | switch (ret) { |
931 | case 0: | 931 | case 0: |
932 | ret = setup_base_ctxt(fd, uctxt); | 932 | ret = setup_base_ctxt(fd, uctxt); |
933 | if (uctxt->subctxt_cnt) { | 933 | if (ret) |
934 | /* | 934 | deallocate_ctxt(uctxt); |
935 | * Base context is done (successfully or not), notify | ||
936 | * anybody using a sub-context that is waiting for | ||
937 | * this completion. | ||
938 | */ | ||
939 | clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); | ||
940 | wake_up(&uctxt->wait); | ||
941 | } | ||
942 | break; | 935 | break; |
943 | case 1: | 936 | case 1: |
944 | ret = complete_subctxt(fd); | 937 | ret = complete_subctxt(fd); |
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, | |||
1305 | /* Now allocate the RcvHdr queue and eager buffers. */ | 1298 | /* Now allocate the RcvHdr queue and eager buffers. */ |
1306 | ret = hfi1_create_rcvhdrq(dd, uctxt); | 1299 | ret = hfi1_create_rcvhdrq(dd, uctxt); |
1307 | if (ret) | 1300 | if (ret) |
1308 | return ret; | 1301 | goto done; |
1309 | 1302 | ||
1310 | ret = hfi1_setup_eagerbufs(uctxt); | 1303 | ret = hfi1_setup_eagerbufs(uctxt); |
1311 | if (ret) | 1304 | if (ret) |
1312 | goto setup_failed; | 1305 | goto done; |
1313 | 1306 | ||
1314 | /* If sub-contexts are enabled, do the appropriate setup */ | 1307 | /* If sub-contexts are enabled, do the appropriate setup */ |
1315 | if (uctxt->subctxt_cnt) | 1308 | if (uctxt->subctxt_cnt) |
1316 | ret = setup_subctxt(uctxt); | 1309 | ret = setup_subctxt(uctxt); |
1317 | if (ret) | 1310 | if (ret) |
1318 | goto setup_failed; | 1311 | goto done; |
1319 | 1312 | ||
1320 | ret = hfi1_alloc_ctxt_rcv_groups(uctxt); | 1313 | ret = hfi1_alloc_ctxt_rcv_groups(uctxt); |
1321 | if (ret) | 1314 | if (ret) |
1322 | goto setup_failed; | 1315 | goto done; |
1323 | 1316 | ||
1324 | ret = init_user_ctxt(fd, uctxt); | 1317 | ret = init_user_ctxt(fd, uctxt); |
1325 | if (ret) | 1318 | if (ret) |
1326 | goto setup_failed; | 1319 | goto done; |
1327 | 1320 | ||
1328 | user_init(uctxt); | 1321 | user_init(uctxt); |
1329 | 1322 | ||
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, | |||
1331 | fd->uctxt = uctxt; | 1324 | fd->uctxt = uctxt; |
1332 | hfi1_rcd_get(uctxt); | 1325 | hfi1_rcd_get(uctxt); |
1333 | 1326 | ||
1334 | return 0; | 1327 | done: |
1328 | if (uctxt->subctxt_cnt) { | ||
1329 | /* | ||
1330 | * On error, set the failed bit so sub-contexts will clean up | ||
1331 | * correctly. | ||
1332 | */ | ||
1333 | if (ret) | ||
1334 | set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); | ||
1335 | 1335 | ||
1336 | setup_failed: | 1336 | /* |
1337 | /* Set the failed bit so sub-context init can do the right thing */ | 1337 | * Base context is done (successfully or not), notify anybody |
1338 | set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); | 1338 | * using a sub-context that is waiting for this completion. |
1339 | deallocate_ctxt(uctxt); | 1339 | */ |
1340 | clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); | ||
1341 | wake_up(&uctxt->wait); | ||
1342 | } | ||
1340 | 1343 | ||
1341 | return ret; | 1344 | return ret; |
1342 | } | 1345 | } |
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 82447b7cdda1..09e50fd2a08f 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
@@ -68,7 +68,7 @@ | |||
68 | /* | 68 | /* |
69 | * Code to adjust PCIe capabilities. | 69 | * Code to adjust PCIe capabilities. |
70 | */ | 70 | */ |
71 | static int tune_pcie_caps(struct hfi1_devdata *); | 71 | static void tune_pcie_caps(struct hfi1_devdata *); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Do all the common PCIe setup and initialization. | 74 | * Do all the common PCIe setup and initialization. |
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd) | |||
351 | */ | 351 | */ |
352 | int request_msix(struct hfi1_devdata *dd, u32 msireq) | 352 | int request_msix(struct hfi1_devdata *dd, u32 msireq) |
353 | { | 353 | { |
354 | int nvec, ret; | 354 | int nvec; |
355 | 355 | ||
356 | nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, | 356 | nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, |
357 | PCI_IRQ_MSIX | PCI_IRQ_LEGACY); | 357 | PCI_IRQ_MSIX | PCI_IRQ_LEGACY); |
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq) | |||
360 | return nvec; | 360 | return nvec; |
361 | } | 361 | } |
362 | 362 | ||
363 | ret = tune_pcie_caps(dd); | 363 | tune_pcie_caps(dd); |
364 | if (ret) { | ||
365 | dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret); | ||
366 | pci_free_irq_vectors(dd->pcidev); | ||
367 | return ret; | ||
368 | } | ||
369 | 364 | ||
370 | /* check for legacy IRQ */ | 365 | /* check for legacy IRQ */ |
371 | if (nvec == 1 && !dd->pcidev->msix_enabled) | 366 | if (nvec == 1 && !dd->pcidev->msix_enabled) |
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED; | |||
502 | module_param_named(aspm, aspm_mode, uint, S_IRUGO); | 497 | module_param_named(aspm, aspm_mode, uint, S_IRUGO); |
503 | MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); | 498 | MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); |
504 | 499 | ||
505 | static int tune_pcie_caps(struct hfi1_devdata *dd) | 500 | static void tune_pcie_caps(struct hfi1_devdata *dd) |
506 | { | 501 | { |
507 | struct pci_dev *parent; | 502 | struct pci_dev *parent; |
508 | u16 rc_mpss, rc_mps, ep_mpss, ep_mps; | 503 | u16 rc_mpss, rc_mps, ep_mpss, ep_mps; |
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) | |||
513 | * Turn on extended tags in DevCtl in case the BIOS has turned it off | 508 | * Turn on extended tags in DevCtl in case the BIOS has turned it off |
514 | * to improve WFR SDMA bandwidth | 509 | * to improve WFR SDMA bandwidth |
515 | */ | 510 | */ |
516 | ret = pcie_capability_read_word(dd->pcidev, | 511 | ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl); |
517 | PCI_EXP_DEVCTL, &ectl); | 512 | if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { |
518 | if (ret) { | ||
519 | dd_dev_err(dd, "Unable to read from PCI config\n"); | ||
520 | return ret; | ||
521 | } | ||
522 | |||
523 | if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) { | ||
524 | dd_dev_info(dd, "Enabling PCIe extended tags\n"); | 513 | dd_dev_info(dd, "Enabling PCIe extended tags\n"); |
525 | ectl |= PCI_EXP_DEVCTL_EXT_TAG; | 514 | ectl |= PCI_EXP_DEVCTL_EXT_TAG; |
526 | ret = pcie_capability_write_word(dd->pcidev, | 515 | ret = pcie_capability_write_word(dd->pcidev, |
527 | PCI_EXP_DEVCTL, ectl); | 516 | PCI_EXP_DEVCTL, ectl); |
528 | if (ret) { | 517 | if (ret) |
529 | dd_dev_err(dd, "Unable to write to PCI config\n"); | 518 | dd_dev_info(dd, "Unable to write to PCI config\n"); |
530 | return ret; | ||
531 | } | ||
532 | } | 519 | } |
533 | /* Find out supported and configured values for parent (root) */ | 520 | /* Find out supported and configured values for parent (root) */ |
534 | parent = dd->pcidev->bus->self; | 521 | parent = dd->pcidev->bus->self; |
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) | |||
536 | * The driver cannot perform the tuning if it does not have | 523 | * The driver cannot perform the tuning if it does not have |
537 | * access to the upstream component. | 524 | * access to the upstream component. |
538 | */ | 525 | */ |
539 | if (!parent) | 526 | if (!parent) { |
540 | return -EINVAL; | 527 | dd_dev_info(dd, "Parent not found\n"); |
528 | return; | ||
529 | } | ||
541 | if (!pci_is_root_bus(parent->bus)) { | 530 | if (!pci_is_root_bus(parent->bus)) { |
542 | dd_dev_info(dd, "Parent not root\n"); | 531 | dd_dev_info(dd, "Parent not root\n"); |
543 | return -EINVAL; | 532 | return; |
533 | } | ||
534 | if (!pci_is_pcie(parent)) { | ||
535 | dd_dev_info(dd, "Parent is not PCI Express capable\n"); | ||
536 | return; | ||
537 | } | ||
538 | if (!pci_is_pcie(dd->pcidev)) { | ||
539 | dd_dev_info(dd, "PCI device is not PCI Express capable\n"); | ||
540 | return; | ||
544 | } | 541 | } |
545 | |||
546 | if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev)) | ||
547 | return -EINVAL; | ||
548 | rc_mpss = parent->pcie_mpss; | 542 | rc_mpss = parent->pcie_mpss; |
549 | rc_mps = ffs(pcie_get_mps(parent)) - 8; | 543 | rc_mps = ffs(pcie_get_mps(parent)) - 8; |
550 | /* Find out supported and configured values for endpoint (us) */ | 544 | /* Find out supported and configured values for endpoint (us) */ |
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd) | |||
590 | ep_mrrs = max_mrrs; | 584 | ep_mrrs = max_mrrs; |
591 | pcie_set_readrq(dd->pcidev, ep_mrrs); | 585 | pcie_set_readrq(dd->pcidev, ep_mrrs); |
592 | } | 586 | } |
593 | |||
594 | return 0; | ||
595 | } | 587 | } |
596 | 588 | ||
597 | /* End of PCIe capability tuning */ | 589 | /* End of PCIe capability tuning */ |
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index a8af96d2b1b0..d486355880cb 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c | |||
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, | |||
790 | * reuse of stale settings established in our previous pass through. | 790 | * reuse of stale settings established in our previous pass through. |
791 | */ | 791 | */ |
792 | if (ppd->qsfp_info.reset_needed) { | 792 | if (ppd->qsfp_info.reset_needed) { |
793 | reset_qsfp(ppd); | 793 | ret = reset_qsfp(ppd); |
794 | if (ret) | ||
795 | return ret; | ||
794 | refresh_qsfp_cache(ppd, &ppd->qsfp_info); | 796 | refresh_qsfp_cache(ppd, &ppd->qsfp_info); |
795 | } else { | 797 | } else { |
796 | ppd->qsfp_info.reset_needed = 1; | 798 | ppd->qsfp_info.reset_needed = 1; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 9b1566468744..a65e4cbdce2f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
@@ -201,7 +201,6 @@ enum init_completion_state { | |||
201 | CEQ_CREATED, | 201 | CEQ_CREATED, |
202 | ILQ_CREATED, | 202 | ILQ_CREATED, |
203 | IEQ_CREATED, | 203 | IEQ_CREATED, |
204 | INET_NOTIFIER, | ||
205 | IP_ADDR_REGISTERED, | 204 | IP_ADDR_REGISTERED, |
206 | RDMA_DEV_REGISTERED | 205 | RDMA_DEV_REGISTERED |
207 | }; | 206 | }; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 14f36ba4e5be..5230dd3c938c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, | |||
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | /** | 1506 | /** |
1507 | * listen_port_in_use - determine if port is in use | 1507 | * i40iw_port_in_use - determine if port is in use |
1508 | * @port: Listen port number | 1508 | * @port: port number |
1509 | * @active_side: flag for listener side vs active side | ||
1509 | */ | 1510 | */ |
1510 | static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) | 1511 | static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side) |
1511 | { | 1512 | { |
1512 | struct i40iw_cm_listener *listen_node; | 1513 | struct i40iw_cm_listener *listen_node; |
1514 | struct i40iw_cm_node *cm_node; | ||
1513 | unsigned long flags; | 1515 | unsigned long flags; |
1514 | bool ret = false; | 1516 | bool ret = false; |
1515 | 1517 | ||
1516 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | 1518 | if (active_side) { |
1517 | list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { | 1519 | /* search connected node list */ |
1518 | if (listen_node->loc_port == port) { | 1520 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
1519 | ret = true; | 1521 | list_for_each_entry(cm_node, &cm_core->connected_nodes, list) { |
1520 | break; | 1522 | if (cm_node->loc_port == port) { |
1523 | ret = true; | ||
1524 | break; | ||
1525 | } | ||
1526 | } | ||
1527 | if (!ret) | ||
1528 | clear_bit(port, cm_core->active_side_ports); | ||
1529 | spin_unlock_irqrestore(&cm_core->ht_lock, flags); | ||
1530 | } else { | ||
1531 | spin_lock_irqsave(&cm_core->listen_list_lock, flags); | ||
1532 | list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { | ||
1533 | if (listen_node->loc_port == port) { | ||
1534 | ret = true; | ||
1535 | break; | ||
1536 | } | ||
1521 | } | 1537 | } |
1538 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | ||
1522 | } | 1539 | } |
1523 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1540 | |
1524 | return ret; | 1541 | return ret; |
1525 | } | 1542 | } |
1526 | 1543 | ||
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, | |||
1868 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 1885 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
1869 | 1886 | ||
1870 | if (listener->iwdev) { | 1887 | if (listener->iwdev) { |
1871 | if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) | 1888 | if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false)) |
1872 | i40iw_manage_apbvt(listener->iwdev, | 1889 | i40iw_manage_apbvt(listener->iwdev, |
1873 | listener->loc_port, | 1890 | listener->loc_port, |
1874 | I40IW_MANAGE_APBVT_DEL); | 1891 | I40IW_MANAGE_APBVT_DEL); |
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) | |||
2247 | if (cm_node->listener) { | 2264 | if (cm_node->listener) { |
2248 | i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); | 2265 | i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); |
2249 | } else { | 2266 | } else { |
2250 | if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && | 2267 | if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) { |
2251 | cm_node->apbvt_set) { | ||
2252 | i40iw_manage_apbvt(cm_node->iwdev, | 2268 | i40iw_manage_apbvt(cm_node->iwdev, |
2253 | cm_node->loc_port, | 2269 | cm_node->loc_port, |
2254 | I40IW_MANAGE_APBVT_DEL); | 2270 | I40IW_MANAGE_APBVT_DEL); |
2255 | i40iw_get_addr_info(cm_node, &nfo); | 2271 | cm_node->apbvt_set = 0; |
2256 | if (cm_node->qhash_set) { | 2272 | } |
2257 | i40iw_manage_qhash(cm_node->iwdev, | 2273 | i40iw_get_addr_info(cm_node, &nfo); |
2258 | &nfo, | 2274 | if (cm_node->qhash_set) { |
2259 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | 2275 | i40iw_manage_qhash(cm_node->iwdev, |
2260 | I40IW_QHASH_MANAGE_TYPE_DELETE, | 2276 | &nfo, |
2261 | NULL, | 2277 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, |
2262 | false); | 2278 | I40IW_QHASH_MANAGE_TYPE_DELETE, |
2263 | cm_node->qhash_set = 0; | 2279 | NULL, |
2264 | } | 2280 | false); |
2281 | cm_node->qhash_set = 0; | ||
2265 | } | 2282 | } |
2266 | } | 2283 | } |
2267 | 2284 | ||
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, | |||
3255 | tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); | 3272 | tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); |
3256 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) { | 3273 | if (cm_node->vlan_id < VLAN_TAG_PRESENT) { |
3257 | tcp_info->insert_vlan_tag = true; | 3274 | tcp_info->insert_vlan_tag = true; |
3258 | tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); | 3275 | tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | |
3276 | cm_node->vlan_id); | ||
3259 | } | 3277 | } |
3260 | if (cm_node->ipv4) { | 3278 | if (cm_node->ipv4) { |
3261 | tcp_info->src_port = cpu_to_le16(cm_node->loc_port); | 3279 | tcp_info->src_port = cpu_to_le16(cm_node->loc_port); |
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3737 | struct sockaddr_in *raddr; | 3755 | struct sockaddr_in *raddr; |
3738 | struct sockaddr_in6 *laddr6; | 3756 | struct sockaddr_in6 *laddr6; |
3739 | struct sockaddr_in6 *raddr6; | 3757 | struct sockaddr_in6 *raddr6; |
3740 | bool qhash_set = false; | 3758 | int ret = 0; |
3741 | int apbvt_set = 0; | 3759 | unsigned long flags; |
3742 | int err = 0; | ||
3743 | enum i40iw_status_code status; | ||
3744 | 3760 | ||
3745 | ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); | 3761 | ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); |
3746 | if (!ibqp) | 3762 | if (!ibqp) |
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3789 | cm_info.user_pri = rt_tos2priority(cm_id->tos); | 3805 | cm_info.user_pri = rt_tos2priority(cm_id->tos); |
3790 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", | 3806 | i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", |
3791 | __func__, cm_id->tos, cm_info.user_pri); | 3807 | __func__, cm_id->tos, cm_info.user_pri); |
3792 | if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || | ||
3793 | (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, | ||
3794 | raddr6->sin6_addr.in6_u.u6_addr32, | ||
3795 | sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { | ||
3796 | status = i40iw_manage_qhash(iwdev, | ||
3797 | &cm_info, | ||
3798 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | ||
3799 | I40IW_QHASH_MANAGE_TYPE_ADD, | ||
3800 | NULL, | ||
3801 | true); | ||
3802 | if (status) | ||
3803 | return -EINVAL; | ||
3804 | qhash_set = true; | ||
3805 | } | ||
3806 | status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); | ||
3807 | if (status) { | ||
3808 | i40iw_manage_qhash(iwdev, | ||
3809 | &cm_info, | ||
3810 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | ||
3811 | I40IW_QHASH_MANAGE_TYPE_DELETE, | ||
3812 | NULL, | ||
3813 | false); | ||
3814 | return -EINVAL; | ||
3815 | } | ||
3816 | |||
3817 | apbvt_set = 1; | ||
3818 | cm_id->add_ref(cm_id); | 3808 | cm_id->add_ref(cm_id); |
3819 | cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, | 3809 | cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, |
3820 | conn_param->private_data_len, | 3810 | conn_param->private_data_len, |
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3822 | &cm_info); | 3812 | &cm_info); |
3823 | 3813 | ||
3824 | if (IS_ERR(cm_node)) { | 3814 | if (IS_ERR(cm_node)) { |
3825 | err = PTR_ERR(cm_node); | 3815 | ret = PTR_ERR(cm_node); |
3826 | goto err_out; | 3816 | cm_id->rem_ref(cm_id); |
3817 | return ret; | ||
3818 | } | ||
3819 | |||
3820 | if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || | ||
3821 | (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, | ||
3822 | raddr6->sin6_addr.in6_u.u6_addr32, | ||
3823 | sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { | ||
3824 | if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, | ||
3825 | I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { | ||
3826 | ret = -EINVAL; | ||
3827 | goto err; | ||
3828 | } | ||
3829 | cm_node->qhash_set = true; | ||
3827 | } | 3830 | } |
3828 | 3831 | ||
3832 | spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); | ||
3833 | if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) { | ||
3834 | spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); | ||
3835 | if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) { | ||
3836 | ret = -EINVAL; | ||
3837 | goto err; | ||
3838 | } | ||
3839 | } else { | ||
3840 | spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); | ||
3841 | } | ||
3842 | |||
3843 | cm_node->apbvt_set = true; | ||
3829 | i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); | 3844 | i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); |
3830 | if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && | 3845 | if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && |
3831 | !cm_node->ord_size) | 3846 | !cm_node->ord_size) |
3832 | cm_node->ord_size = 1; | 3847 | cm_node->ord_size = 1; |
3833 | 3848 | ||
3834 | cm_node->apbvt_set = apbvt_set; | ||
3835 | cm_node->qhash_set = qhash_set; | ||
3836 | iwqp->cm_node = cm_node; | 3849 | iwqp->cm_node = cm_node; |
3837 | cm_node->iwqp = iwqp; | 3850 | cm_node->iwqp = iwqp; |
3838 | iwqp->cm_id = cm_id; | 3851 | iwqp->cm_id = cm_id; |
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3840 | 3853 | ||
3841 | if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { | 3854 | if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { |
3842 | cm_node->state = I40IW_CM_STATE_SYN_SENT; | 3855 | cm_node->state = I40IW_CM_STATE_SYN_SENT; |
3843 | err = i40iw_send_syn(cm_node, 0); | 3856 | ret = i40iw_send_syn(cm_node, 0); |
3844 | if (err) { | 3857 | if (ret) |
3845 | i40iw_rem_ref_cm_node(cm_node); | 3858 | goto err; |
3846 | goto err_out; | ||
3847 | } | ||
3848 | } | 3859 | } |
3849 | 3860 | ||
3850 | i40iw_debug(cm_node->dev, | 3861 | i40iw_debug(cm_node->dev, |
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3853 | cm_node->rem_port, | 3864 | cm_node->rem_port, |
3854 | cm_node, | 3865 | cm_node, |
3855 | cm_node->cm_id); | 3866 | cm_node->cm_id); |
3867 | |||
3856 | return 0; | 3868 | return 0; |
3857 | 3869 | ||
3858 | err_out: | 3870 | err: |
3859 | if (cm_info.ipv4) | 3871 | if (cm_info.ipv4) |
3860 | i40iw_debug(&iwdev->sc_dev, | 3872 | i40iw_debug(&iwdev->sc_dev, |
3861 | I40IW_DEBUG_CM, | 3873 | I40IW_DEBUG_CM, |
@@ -3867,22 +3879,10 @@ err_out: | |||
3867 | "Api - connect() FAILED: dest addr=%pI6", | 3879 | "Api - connect() FAILED: dest addr=%pI6", |
3868 | cm_info.rem_addr); | 3880 | cm_info.rem_addr); |
3869 | 3881 | ||
3870 | if (qhash_set) | 3882 | i40iw_rem_ref_cm_node(cm_node); |
3871 | i40iw_manage_qhash(iwdev, | ||
3872 | &cm_info, | ||
3873 | I40IW_QHASH_TYPE_TCP_ESTABLISHED, | ||
3874 | I40IW_QHASH_MANAGE_TYPE_DELETE, | ||
3875 | NULL, | ||
3876 | false); | ||
3877 | |||
3878 | if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, | ||
3879 | cm_info.loc_port)) | ||
3880 | i40iw_manage_apbvt(iwdev, | ||
3881 | cm_info.loc_port, | ||
3882 | I40IW_MANAGE_APBVT_DEL); | ||
3883 | cm_id->rem_ref(cm_id); | 3883 | cm_id->rem_ref(cm_id); |
3884 | iwdev->cm_core.stats_connect_errs++; | 3884 | iwdev->cm_core.stats_connect_errs++; |
3885 | return err; | 3885 | return ret; |
3886 | } | 3886 | } |
3887 | 3887 | ||
3888 | /** | 3888 | /** |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h index 2e52e38ffcf3..45abef76295b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.h +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h | |||
@@ -71,6 +71,9 @@ | |||
71 | #define I40IW_HW_IRD_SETTING_32 32 | 71 | #define I40IW_HW_IRD_SETTING_32 32 |
72 | #define I40IW_HW_IRD_SETTING_64 64 | 72 | #define I40IW_HW_IRD_SETTING_64 64 |
73 | 73 | ||
74 | #define MAX_PORTS 65536 | ||
75 | #define I40IW_VLAN_PRIO_SHIFT 13 | ||
76 | |||
74 | enum ietf_mpa_flags { | 77 | enum ietf_mpa_flags { |
75 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ | 78 | IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ |
76 | IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ | 79 | IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ |
@@ -411,6 +414,8 @@ struct i40iw_cm_core { | |||
411 | spinlock_t ht_lock; /* manage hash table */ | 414 | spinlock_t ht_lock; /* manage hash table */ |
412 | spinlock_t listen_list_lock; /* listen list */ | 415 | spinlock_t listen_list_lock; /* listen list */ |
413 | 416 | ||
417 | unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)]; | ||
418 | |||
414 | u64 stats_nodes_created; | 419 | u64 stats_nodes_created; |
415 | u64 stats_nodes_destroyed; | 420 | u64 stats_nodes_destroyed; |
416 | u64 stats_listen_created; | 421 | u64 stats_listen_created; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index d1f5345f04f0..42ca5346777d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
@@ -48,7 +48,7 @@ | |||
48 | * @wqe: cqp wqe for header | 48 | * @wqe: cqp wqe for header |
49 | * @header: header for the cqp wqe | 49 | * @header: header for the cqp wqe |
50 | */ | 50 | */ |
51 | static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) | 51 | void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) |
52 | { | 52 | { |
53 | wmb(); /* make sure WQE is populated before polarity is set */ | 53 | wmb(); /* make sure WQE is populated before polarity is set */ |
54 | set_64bit_val(wqe, 24, header); | 54 | set_64bit_val(wqe, 24, header); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index cc742c3132c6..27590ae21881 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = { | |||
99 | .notifier_call = i40iw_net_event | 99 | .notifier_call = i40iw_net_event |
100 | }; | 100 | }; |
101 | 101 | ||
102 | static atomic_t i40iw_notifiers_registered; | ||
103 | |||
104 | /** | 102 | /** |
105 | * i40iw_find_i40e_handler - find a handler given a client info | 103 | * i40iw_find_i40e_handler - find a handler given a client info |
106 | * @ldev: pointer to a client info | 104 | * @ldev: pointer to a client info |
@@ -1376,11 +1374,20 @@ error: | |||
1376 | */ | 1374 | */ |
1377 | static void i40iw_register_notifiers(void) | 1375 | static void i40iw_register_notifiers(void) |
1378 | { | 1376 | { |
1379 | if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { | 1377 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); |
1380 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); | 1378 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); |
1381 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); | 1379 | register_netevent_notifier(&i40iw_net_notifier); |
1382 | register_netevent_notifier(&i40iw_net_notifier); | 1380 | } |
1383 | } | 1381 | |
1382 | /** | ||
1383 | * i40iw_unregister_notifiers - unregister tcp ip notifiers | ||
1384 | */ | ||
1385 | |||
1386 | static void i40iw_unregister_notifiers(void) | ||
1387 | { | ||
1388 | unregister_netevent_notifier(&i40iw_net_notifier); | ||
1389 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); | ||
1390 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); | ||
1384 | } | 1391 | } |
1385 | 1392 | ||
1386 | /** | 1393 | /** |
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, | |||
1400 | u32 i; | 1407 | u32 i; |
1401 | u32 size; | 1408 | u32 size; |
1402 | 1409 | ||
1410 | if (!ldev->msix_count) { | ||
1411 | i40iw_pr_err("No MSI-X vectors\n"); | ||
1412 | return I40IW_ERR_CONFIG; | ||
1413 | } | ||
1414 | |||
1403 | iwdev->msix_count = ldev->msix_count; | 1415 | iwdev->msix_count = ldev->msix_count; |
1404 | 1416 | ||
1405 | size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; | 1417 | size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; |
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) | |||
1462 | if (!iwdev->reset) | 1474 | if (!iwdev->reset) |
1463 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); | 1475 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); |
1464 | /* fallthrough */ | 1476 | /* fallthrough */ |
1465 | case INET_NOTIFIER: | ||
1466 | if (!atomic_dec_return(&i40iw_notifiers_registered)) { | ||
1467 | unregister_netevent_notifier(&i40iw_net_notifier); | ||
1468 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); | ||
1469 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); | ||
1470 | } | ||
1471 | /* fallthrough */ | 1477 | /* fallthrough */ |
1472 | case PBLE_CHUNK_MEM: | 1478 | case PBLE_CHUNK_MEM: |
1473 | i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); | 1479 | i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); |
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, | |||
1550 | 1556 | ||
1551 | status = i40iw_save_msix_info(iwdev, ldev); | 1557 | status = i40iw_save_msix_info(iwdev, ldev); |
1552 | if (status) | 1558 | if (status) |
1553 | goto exit; | 1559 | return status; |
1554 | iwdev->hw.dev_context = (void *)ldev->pcidev; | 1560 | iwdev->hw.dev_context = (void *)ldev->pcidev; |
1555 | iwdev->hw.hw_addr = ldev->hw_addr; | 1561 | iwdev->hw.hw_addr = ldev->hw_addr; |
1556 | status = i40iw_allocate_dma_mem(&iwdev->hw, | 1562 | status = i40iw_allocate_dma_mem(&iwdev->hw, |
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) | |||
1667 | break; | 1673 | break; |
1668 | iwdev->init_state = PBLE_CHUNK_MEM; | 1674 | iwdev->init_state = PBLE_CHUNK_MEM; |
1669 | iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); | 1675 | iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); |
1670 | i40iw_register_notifiers(); | ||
1671 | iwdev->init_state = INET_NOTIFIER; | ||
1672 | status = i40iw_add_mac_ip(iwdev); | 1676 | status = i40iw_add_mac_ip(iwdev); |
1673 | if (status) | 1677 | if (status) |
1674 | break; | 1678 | break; |
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void) | |||
2018 | i40iw_client.type = I40E_CLIENT_IWARP; | 2022 | i40iw_client.type = I40E_CLIENT_IWARP; |
2019 | spin_lock_init(&i40iw_handler_lock); | 2023 | spin_lock_init(&i40iw_handler_lock); |
2020 | ret = i40e_register_client(&i40iw_client); | 2024 | ret = i40e_register_client(&i40iw_client); |
2025 | i40iw_register_notifiers(); | ||
2026 | |||
2021 | return ret; | 2027 | return ret; |
2022 | } | 2028 | } |
2023 | 2029 | ||
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void) | |||
2029 | */ | 2035 | */ |
2030 | static void __exit i40iw_exit_module(void) | 2036 | static void __exit i40iw_exit_module(void) |
2031 | { | 2037 | { |
2038 | i40iw_unregister_notifiers(); | ||
2032 | i40e_unregister_client(&i40iw_client); | 2039 | i40e_unregister_client(&i40iw_client); |
2033 | } | 2040 | } |
2034 | 2041 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h index e217a1259f57..5498ad01c280 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_p.h +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h | |||
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, | |||
59 | struct i40iw_fast_reg_stag_info *info, | 59 | struct i40iw_fast_reg_stag_info *info, |
60 | bool post_sq); | 60 | bool post_sq); |
61 | 61 | ||
62 | void i40iw_insert_wqe_hdr(u64 *wqe, u64 header); | ||
63 | |||
62 | /* HMC/FPM functions */ | 64 | /* HMC/FPM functions */ |
63 | enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, | 65 | enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, |
64 | u8 hmc_fn_id); | 66 | u8 hmc_fn_id); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index c2cab20c4bc5..59f70676f0e0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c | |||
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, | |||
123 | get_64bit_val(wqe, 24, &offset24); | 123 | get_64bit_val(wqe, 24, &offset24); |
124 | 124 | ||
125 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); | 125 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); |
126 | set_64bit_val(wqe, 24, offset24); | ||
127 | 126 | ||
128 | set_64bit_val(wqe, 0, buf->mem.pa); | 127 | set_64bit_val(wqe, 0, buf->mem.pa); |
129 | set_64bit_val(wqe, 8, | 128 | set_64bit_val(wqe, 8, |
130 | LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); | 129 | LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); |
131 | set_64bit_val(wqe, 24, offset24); | 130 | i40iw_insert_wqe_hdr(wqe, offset24); |
132 | } | 131 | } |
133 | 132 | ||
134 | /** | 133 | /** |
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, | |||
409 | set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); | 408 | set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); |
410 | set_64bit_val(wqe, 16, header[0]); | 409 | set_64bit_val(wqe, 16, header[0]); |
411 | 410 | ||
412 | /* Ensure all data is written before writing valid bit */ | 411 | i40iw_insert_wqe_hdr(wqe, header[1]); |
413 | wmb(); | ||
414 | set_64bit_val(wqe, 24, header[1]); | ||
415 | 412 | ||
416 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); | 413 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); |
417 | i40iw_qp_post_wr(&qp->qp_uk); | 414 | i40iw_qp_post_wr(&qp->qp_uk); |
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct | |||
539 | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | | 536 | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | |
540 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | 537 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); |
541 | 538 | ||
542 | set_64bit_val(wqe, 24, header); | 539 | i40iw_insert_wqe_hdr(wqe, header); |
543 | 540 | ||
544 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); | 541 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); |
545 | i40iw_sc_cqp_post_sq(cqp); | 542 | i40iw_sc_cqp_post_sq(cqp); |
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct | |||
655 | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | | 652 | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | |
656 | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | | 653 | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | |
657 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | 654 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); |
658 | set_64bit_val(wqe, 24, header); | 655 | i40iw_insert_wqe_hdr(wqe, header); |
659 | 656 | ||
660 | i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", | 657 | i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", |
661 | wqe, I40IW_CQP_WQE_SIZE * 8); | 658 | wqe, I40IW_CQP_WQE_SIZE * 8); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 62f1f45b8737..e52dbbb4165e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, | |||
160 | return NOTIFY_DONE; | 160 | return NOTIFY_DONE; |
161 | 161 | ||
162 | iwdev = &hdl->device; | 162 | iwdev = &hdl->device; |
163 | if (iwdev->init_state < INET_NOTIFIER) | 163 | if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) |
164 | return NOTIFY_DONE; | 164 | return NOTIFY_DONE; |
165 | 165 | ||
166 | netdev = iwdev->ldev->netdev; | 166 | netdev = iwdev->ldev->netdev; |
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, | |||
217 | return NOTIFY_DONE; | 217 | return NOTIFY_DONE; |
218 | 218 | ||
219 | iwdev = &hdl->device; | 219 | iwdev = &hdl->device; |
220 | if (iwdev->init_state < INET_NOTIFIER) | 220 | if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) |
221 | return NOTIFY_DONE; | 221 | return NOTIFY_DONE; |
222 | 222 | ||
223 | netdev = iwdev->ldev->netdev; | 223 | netdev = iwdev->ldev->netdev; |
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * | |||
266 | if (!iwhdl) | 266 | if (!iwhdl) |
267 | return NOTIFY_DONE; | 267 | return NOTIFY_DONE; |
268 | iwdev = &iwhdl->device; | 268 | iwdev = &iwhdl->device; |
269 | if (iwdev->init_state < INET_NOTIFIER) | 269 | if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) |
270 | return NOTIFY_DONE; | 270 | return NOTIFY_DONE; |
271 | p = (__be32 *)neigh->primary_key; | 271 | p = (__be32 *)neigh->primary_key; |
272 | i40iw_copy_ip_ntohl(local_ipaddr, p); | 272 | i40iw_copy_ip_ntohl(local_ipaddr, p); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 1aa411034a27..62be0a41ad0b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp, | |||
826 | attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; | 826 | attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; |
827 | attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; | 827 | attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; |
828 | attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; | 828 | attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; |
829 | attr->port_num = 1; | ||
829 | init_attr->event_handler = iwqp->ibqp.event_handler; | 830 | init_attr->event_handler = iwqp->ibqp.event_handler; |
830 | init_attr->qp_context = iwqp->ibqp.qp_context; | 831 | init_attr->qp_context = iwqp->ibqp.qp_context; |
831 | init_attr->send_cq = iwqp->ibqp.send_cq; | 832 | init_attr->send_cq = iwqp->ibqp.send_cq; |
832 | init_attr->recv_cq = iwqp->ibqp.recv_cq; | 833 | init_attr->recv_cq = iwqp->ibqp.recv_cq; |
833 | init_attr->srq = iwqp->ibqp.srq; | 834 | init_attr->srq = iwqp->ibqp.srq; |
834 | init_attr->cap = attr->cap; | 835 | init_attr->cap = attr->cap; |
836 | init_attr->port_num = 1; | ||
835 | return 0; | 837 | return 0; |
836 | } | 838 | } |
837 | 839 | ||
@@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1027 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; | 1029 | iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; |
1028 | iwqp->last_aeq = I40IW_AE_RESET_SENT; | 1030 | iwqp->last_aeq = I40IW_AE_RESET_SENT; |
1029 | spin_unlock_irqrestore(&iwqp->lock, flags); | 1031 | spin_unlock_irqrestore(&iwqp->lock, flags); |
1032 | i40iw_cm_disconn(iwqp); | ||
1030 | } | 1033 | } |
1034 | } else { | ||
1035 | spin_lock_irqsave(&iwqp->lock, flags); | ||
1036 | if (iwqp->cm_id) { | ||
1037 | if (atomic_inc_return(&iwqp->close_timer_started) == 1) { | ||
1038 | iwqp->cm_id->add_ref(iwqp->cm_id); | ||
1039 | i40iw_schedule_cm_timer(iwqp->cm_node, | ||
1040 | (struct i40iw_puda_buf *)iwqp, | ||
1041 | I40IW_TIMER_TYPE_CLOSE, 1, 0); | ||
1042 | } | ||
1043 | } | ||
1044 | spin_unlock_irqrestore(&iwqp->lock, flags); | ||
1031 | } | 1045 | } |
1032 | } | 1046 | } |
1033 | return 0; | 1047 | return 0; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ab3c562d5ba7..552f7bd4ecc3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
778 | } | 778 | } |
779 | 779 | ||
780 | if (MLX5_CAP_GEN(mdev, tag_matching)) { | 780 | if (MLX5_CAP_GEN(mdev, tag_matching)) { |
781 | props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; | 781 | props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; |
782 | props->xrq_caps.max_num_tags = | 782 | props->tm_caps.max_num_tags = |
783 | (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; | 783 | (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; |
784 | props->xrq_caps.flags = IB_TM_CAP_RC; | 784 | props->tm_caps.flags = IB_TM_CAP_RC; |
785 | props->xrq_caps.max_ops = | 785 | props->tm_caps.max_ops = |
786 | 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); | 786 | 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); |
787 | props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; | 787 | props->tm_caps.max_sge = MLX5_TM_MAX_SGE; |
788 | } | 788 | } |
789 | 789 | ||
790 | if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { | 790 | if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { |
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) | |||
3837 | if (!dbg) | 3837 | if (!dbg) |
3838 | return -ENOMEM; | 3838 | return -ENOMEM; |
3839 | 3839 | ||
3840 | dev->delay_drop.dbg = dbg; | ||
3841 | |||
3840 | dbg->dir_debugfs = | 3842 | dbg->dir_debugfs = |
3841 | debugfs_create_dir("delay_drop", | 3843 | debugfs_create_dir("delay_drop", |
3842 | dev->mdev->priv.dbg_root); | 3844 | dev->mdev->priv.dbg_root); |
3843 | if (!dbg->dir_debugfs) | 3845 | if (!dbg->dir_debugfs) |
3844 | return -ENOMEM; | 3846 | goto out_debugfs; |
3845 | 3847 | ||
3846 | dbg->events_cnt_debugfs = | 3848 | dbg->events_cnt_debugfs = |
3847 | debugfs_create_atomic_t("num_timeout_events", 0400, | 3849 | debugfs_create_atomic_t("num_timeout_events", 0400, |
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) | |||
3865 | if (!dbg->timeout_debugfs) | 3867 | if (!dbg->timeout_debugfs) |
3866 | goto out_debugfs; | 3868 | goto out_debugfs; |
3867 | 3869 | ||
3868 | dev->delay_drop.dbg = dbg; | ||
3869 | |||
3870 | return 0; | 3870 | return 0; |
3871 | 3871 | ||
3872 | out_debugfs: | 3872 | out_debugfs: |
@@ -4174,9 +4174,9 @@ err_bfreg: | |||
4174 | err_uar_page: | 4174 | err_uar_page: |
4175 | mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); | 4175 | mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); |
4176 | 4176 | ||
4177 | err_cnt: | ||
4178 | mlx5_ib_cleanup_cong_debugfs(dev); | ||
4179 | err_cong: | 4177 | err_cong: |
4178 | mlx5_ib_cleanup_cong_debugfs(dev); | ||
4179 | err_cnt: | ||
4180 | if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) | 4180 | if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) |
4181 | mlx5_ib_dealloc_counters(dev); | 4181 | mlx5_ib_dealloc_counters(dev); |
4182 | 4182 | ||
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 914f212e7ef6..f3dbd75a0a96 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c | |||
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, | |||
50 | { | 50 | { |
51 | unsigned long tmp; | 51 | unsigned long tmp; |
52 | unsigned long m; | 52 | unsigned long m; |
53 | int i, k; | 53 | u64 base = ~0, p = 0; |
54 | u64 base = 0; | 54 | u64 len, pfn; |
55 | int p = 0; | 55 | int i = 0; |
56 | int skip; | ||
57 | int mask; | ||
58 | u64 len; | ||
59 | u64 pfn; | ||
60 | struct scatterlist *sg; | 56 | struct scatterlist *sg; |
61 | int entry; | 57 | int entry; |
62 | unsigned long page_shift = umem->page_shift; | 58 | unsigned long page_shift = umem->page_shift; |
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, | |||
76 | m = find_first_bit(&tmp, BITS_PER_LONG); | 72 | m = find_first_bit(&tmp, BITS_PER_LONG); |
77 | if (max_page_shift) | 73 | if (max_page_shift) |
78 | m = min_t(unsigned long, max_page_shift - page_shift, m); | 74 | m = min_t(unsigned long, max_page_shift - page_shift, m); |
79 | skip = 1 << m; | 75 | |
80 | mask = skip - 1; | ||
81 | i = 0; | ||
82 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { | 76 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
83 | len = sg_dma_len(sg) >> page_shift; | 77 | len = sg_dma_len(sg) >> page_shift; |
84 | pfn = sg_dma_address(sg) >> page_shift; | 78 | pfn = sg_dma_address(sg) >> page_shift; |
85 | for (k = 0; k < len; k++) { | 79 | if (base + p != pfn) { |
86 | if (!(i & mask)) { | 80 | /* If either the offset or the new |
87 | tmp = (unsigned long)pfn; | 81 | * base are unaligned update m |
88 | m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); | 82 | */ |
89 | skip = 1 << m; | 83 | tmp = (unsigned long)(pfn | p); |
90 | mask = skip - 1; | 84 | if (!IS_ALIGNED(tmp, 1 << m)) |
91 | base = pfn; | 85 | m = find_first_bit(&tmp, BITS_PER_LONG); |
92 | p = 0; | 86 | |
93 | } else { | 87 | base = pfn; |
94 | if (base + p != pfn) { | 88 | p = 0; |
95 | tmp = (unsigned long)p; | ||
96 | m = find_first_bit(&tmp, BITS_PER_LONG); | ||
97 | skip = 1 << m; | ||
98 | mask = skip - 1; | ||
99 | base = pfn; | ||
100 | p = 0; | ||
101 | } | ||
102 | } | ||
103 | p++; | ||
104 | i++; | ||
105 | } | 89 | } |
90 | |||
91 | p += len; | ||
92 | i += len; | ||
106 | } | 93 | } |
107 | 94 | ||
108 | if (i) { | 95 | if (i) { |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 0e2789d9bb4d..37bbc543847a 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -47,7 +47,8 @@ enum { | |||
47 | 47 | ||
48 | #define MLX5_UMR_ALIGN 2048 | 48 | #define MLX5_UMR_ALIGN 2048 |
49 | 49 | ||
50 | static int clean_mr(struct mlx5_ib_mr *mr); | 50 | static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
51 | static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | ||
51 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); | 52 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); |
52 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | 53 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
53 | 54 | ||
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1270 | 1271 | ||
1271 | err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, | 1272 | err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, |
1272 | update_xlt_flags); | 1273 | update_xlt_flags); |
1274 | |||
1273 | if (err) { | 1275 | if (err) { |
1274 | mlx5_ib_dereg_mr(&mr->ibmr); | 1276 | dereg_mr(dev, mr); |
1275 | return ERR_PTR(err); | 1277 | return ERR_PTR(err); |
1276 | } | 1278 | } |
1277 | } | 1279 | } |
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |||
1356 | err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, | 1358 | err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, |
1357 | &npages, &page_shift, &ncont, &order); | 1359 | &npages, &page_shift, &ncont, &order); |
1358 | if (err < 0) { | 1360 | if (err < 0) { |
1359 | clean_mr(mr); | 1361 | clean_mr(dev, mr); |
1360 | return err; | 1362 | return err; |
1361 | } | 1363 | } |
1362 | } | 1364 | } |
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |||
1410 | if (err) { | 1412 | if (err) { |
1411 | mlx5_ib_warn(dev, "Failed to rereg UMR\n"); | 1413 | mlx5_ib_warn(dev, "Failed to rereg UMR\n"); |
1412 | ib_umem_release(mr->umem); | 1414 | ib_umem_release(mr->umem); |
1413 | clean_mr(mr); | 1415 | clean_mr(dev, mr); |
1414 | return err; | 1416 | return err; |
1415 | } | 1417 | } |
1416 | } | 1418 | } |
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) | |||
1469 | } | 1471 | } |
1470 | } | 1472 | } |
1471 | 1473 | ||
1472 | static int clean_mr(struct mlx5_ib_mr *mr) | 1474 | static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
1473 | { | 1475 | { |
1474 | struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); | ||
1475 | int allocated_from_cache = mr->allocated_from_cache; | 1476 | int allocated_from_cache = mr->allocated_from_cache; |
1476 | int err; | 1477 | int err; |
1477 | 1478 | ||
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr) | |||
1507 | return 0; | 1508 | return 0; |
1508 | } | 1509 | } |
1509 | 1510 | ||
1510 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | 1511 | static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
1511 | { | 1512 | { |
1512 | struct mlx5_ib_dev *dev = to_mdev(ibmr->device); | ||
1513 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | ||
1514 | int npages = mr->npages; | 1513 | int npages = mr->npages; |
1515 | struct ib_umem *umem = mr->umem; | 1514 | struct ib_umem *umem = mr->umem; |
1516 | 1515 | ||
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | |||
1539 | } | 1538 | } |
1540 | #endif | 1539 | #endif |
1541 | 1540 | ||
1542 | clean_mr(mr); | 1541 | clean_mr(dev, mr); |
1543 | 1542 | ||
1544 | if (umem) { | 1543 | if (umem) { |
1545 | ib_umem_release(umem); | 1544 | ib_umem_release(umem); |
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | |||
1549 | return 0; | 1548 | return 0; |
1550 | } | 1549 | } |
1551 | 1550 | ||
1551 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr) | ||
1552 | { | ||
1553 | struct mlx5_ib_dev *dev = to_mdev(ibmr->device); | ||
1554 | struct mlx5_ib_mr *mr = to_mmr(ibmr); | ||
1555 | |||
1556 | return dereg_mr(dev, mr); | ||
1557 | } | ||
1558 | |||
1552 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, | 1559 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
1553 | enum ib_mr_type mr_type, | 1560 | enum ib_mr_type mr_type, |
1554 | u32 max_num_sg) | 1561 | u32 max_num_sg) |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f0dc5f4aa177..442b9bdc0f03 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3232 | mr->ibmr.iova); | 3232 | mr->ibmr.iova); |
3233 | set_wqe_32bit_value(wqe->wqe_words, | 3233 | set_wqe_32bit_value(wqe->wqe_words, |
3234 | NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, | 3234 | NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, |
3235 | mr->ibmr.length); | 3235 | lower_32_bits(mr->ibmr.length)); |
3236 | set_wqe_32bit_value(wqe->wqe_words, | 3236 | set_wqe_32bit_value(wqe->wqe_words, |
3237 | NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); | 3237 | NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); |
3238 | set_wqe_32bit_value(wqe->wqe_words, | 3238 | set_wqe_32bit_value(wqe->wqe_words, |
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
3274 | mr->npages * 8); | 3274 | mr->npages * 8); |
3275 | 3275 | ||
3276 | nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " | 3276 | nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " |
3277 | "length: %d, rkey: %0x, pgl_paddr: %llx, " | 3277 | "length: %lld, rkey: %0x, pgl_paddr: %llx, " |
3278 | "page_list_len: %u, wqe_misc: %x\n", | 3278 | "page_list_len: %u, wqe_misc: %x\n", |
3279 | (unsigned long long) mr->ibmr.iova, | 3279 | (unsigned long long) mr->ibmr.iova, |
3280 | mr->ibmr.length, | 3280 | mr->ibmr.length, |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index dcb5942f9fb5..65b166cc7437 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status) | |||
252 | case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: | 252 | case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: |
253 | err_num = -EAGAIN; | 253 | err_num = -EAGAIN; |
254 | break; | 254 | break; |
255 | default: | ||
256 | err_num = -EFAULT; | ||
255 | } | 257 | } |
258 | break; | ||
256 | default: | 259 | default: |
257 | err_num = -EFAULT; | 260 | err_num = -EFAULT; |
258 | } | 261 | } |
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index b2bb42e2805d..254083b524bd 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
@@ -387,7 +387,7 @@ struct qedr_qp { | |||
387 | u8 wqe_size; | 387 | u8 wqe_size; |
388 | 388 | ||
389 | u8 smac[ETH_ALEN]; | 389 | u8 smac[ETH_ALEN]; |
390 | u16 vlan_id; | 390 | u16 vlan; |
391 | int rc; | 391 | int rc; |
392 | } *rqe_wr_id; | 392 | } *rqe_wr_id; |
393 | 393 | ||
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 4689e802b332..ad8965397cf7 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c | |||
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt, | |||
105 | 105 | ||
106 | qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? | 106 | qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? |
107 | -EINVAL : 0; | 107 | -EINVAL : 0; |
108 | qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; | 108 | qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan; |
109 | /* note: length stands for data length i.e. GRH is excluded */ | 109 | /* note: length stands for data length i.e. GRH is excluded */ |
110 | qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = | 110 | qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = |
111 | data->length.data_length; | 111 | data->length.data_length; |
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
694 | struct qedr_cq *cq = get_qedr_cq(ibcq); | 694 | struct qedr_cq *cq = get_qedr_cq(ibcq); |
695 | struct qedr_qp *qp = dev->gsi_qp; | 695 | struct qedr_qp *qp = dev->gsi_qp; |
696 | unsigned long flags; | 696 | unsigned long flags; |
697 | u16 vlan_id; | ||
697 | int i = 0; | 698 | int i = 0; |
698 | 699 | ||
699 | spin_lock_irqsave(&cq->cq_lock, flags); | 700 | spin_lock_irqsave(&cq->cq_lock, flags); |
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
712 | wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; | 713 | wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; |
713 | ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); | 714 | ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); |
714 | wc[i].wc_flags |= IB_WC_WITH_SMAC; | 715 | wc[i].wc_flags |= IB_WC_WITH_SMAC; |
715 | if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { | 716 | |
717 | vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & | ||
718 | VLAN_VID_MASK; | ||
719 | if (vlan_id) { | ||
716 | wc[i].wc_flags |= IB_WC_WITH_VLAN; | 720 | wc[i].wc_flags |= IB_WC_WITH_VLAN; |
717 | wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; | 721 | wc[i].vlan_id = vlan_id; |
722 | wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan & | ||
723 | VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | ||
718 | } | 724 | } |
719 | 725 | ||
720 | qedr_inc_sw_cons(&qp->rq); | 726 | qedr_inc_sw_cons(&qp->rq); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 663a0c301c43..984aa3484928 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib( | |||
416 | return (enum ib_wc_status)status; | 416 | return (enum ib_wc_status)status; |
417 | } | 417 | } |
418 | 418 | ||
419 | static inline int pvrdma_wc_opcode_to_ib(int opcode) | 419 | static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) |
420 | { | 420 | { |
421 | return opcode; | 421 | switch (opcode) { |
422 | case PVRDMA_WC_SEND: | ||
423 | return IB_WC_SEND; | ||
424 | case PVRDMA_WC_RDMA_WRITE: | ||
425 | return IB_WC_RDMA_WRITE; | ||
426 | case PVRDMA_WC_RDMA_READ: | ||
427 | return IB_WC_RDMA_READ; | ||
428 | case PVRDMA_WC_COMP_SWAP: | ||
429 | return IB_WC_COMP_SWAP; | ||
430 | case PVRDMA_WC_FETCH_ADD: | ||
431 | return IB_WC_FETCH_ADD; | ||
432 | case PVRDMA_WC_LOCAL_INV: | ||
433 | return IB_WC_LOCAL_INV; | ||
434 | case PVRDMA_WC_FAST_REG_MR: | ||
435 | return IB_WC_REG_MR; | ||
436 | case PVRDMA_WC_MASKED_COMP_SWAP: | ||
437 | return IB_WC_MASKED_COMP_SWAP; | ||
438 | case PVRDMA_WC_MASKED_FETCH_ADD: | ||
439 | return IB_WC_MASKED_FETCH_ADD; | ||
440 | case PVRDMA_WC_RECV: | ||
441 | return IB_WC_RECV; | ||
442 | case PVRDMA_WC_RECV_RDMA_WITH_IMM: | ||
443 | return IB_WC_RECV_RDMA_WITH_IMM; | ||
444 | default: | ||
445 | return IB_WC_SEND; | ||
446 | } | ||
422 | } | 447 | } |
423 | 448 | ||
424 | static inline int pvrdma_wc_flags_to_ib(int flags) | 449 | static inline int pvrdma_wc_flags_to_ib(int flags) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 14b62f7472b4..7774654c2ccb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
823 | wc->status != IB_WC_WR_FLUSH_ERR) { | 823 | wc->status != IB_WC_WR_FLUSH_ERR) { |
824 | struct ipoib_neigh *neigh; | 824 | struct ipoib_neigh *neigh; |
825 | 825 | ||
826 | if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) | 826 | /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle, |
827 | ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", | 827 | * so don't make waves. |
828 | wc->status, wr_id, wc->vendor_err); | 828 | */ |
829 | if (wc->status == IB_WC_RNR_RETRY_EXC_ERR || | ||
830 | wc->status == IB_WC_RETRY_EXC_ERR) | ||
831 | ipoib_dbg(priv, | ||
832 | "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", | ||
833 | __func__, wc->status, wr_id, wc->vendor_err); | ||
829 | else | 834 | else |
830 | ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", | 835 | ipoib_warn(priv, |
831 | wc->status, wr_id, wc->vendor_err); | 836 | "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n", |
837 | __func__, wc->status, wr_id, wc->vendor_err); | ||
832 | 838 | ||
833 | spin_lock_irqsave(&priv->lock, flags); | 839 | spin_lock_irqsave(&priv->lock, flags); |
834 | neigh = tx->neigh; | 840 | neigh = tx->neigh; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 2e075377242e..6cd61638b441 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) | |||
1000 | */ | 1000 | */ |
1001 | priv->dev->broadcast[8] = priv->pkey >> 8; | 1001 | priv->dev->broadcast[8] = priv->pkey >> 8; |
1002 | priv->dev->broadcast[9] = priv->pkey & 0xff; | 1002 | priv->dev->broadcast[9] = priv->pkey & 0xff; |
1003 | |||
1004 | /* | ||
1005 | * Update the broadcast address in the priv->broadcast object, | ||
1006 | * in case it already exists, otherwise no one will do that. | ||
1007 | */ | ||
1008 | if (priv->broadcast) { | ||
1009 | spin_lock_irq(&priv->lock); | ||
1010 | memcpy(priv->broadcast->mcmember.mgid.raw, | ||
1011 | priv->dev->broadcast + 4, | ||
1012 | sizeof(union ib_gid)); | ||
1013 | spin_unlock_irq(&priv->lock); | ||
1014 | } | ||
1015 | |||
1016 | return 0; | 1003 | return 0; |
1017 | } | 1004 | } |
1018 | 1005 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bac95b509a9b..dcc77014018d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
2180 | { | 2180 | { |
2181 | struct ipoib_dev_priv *priv; | 2181 | struct ipoib_dev_priv *priv; |
2182 | struct ib_port_attr attr; | 2182 | struct ib_port_attr attr; |
2183 | struct rdma_netdev *rn; | ||
2183 | int result = -ENOMEM; | 2184 | int result = -ENOMEM; |
2184 | 2185 | ||
2185 | priv = ipoib_intf_alloc(hca, port, format); | 2186 | priv = ipoib_intf_alloc(hca, port, format); |
@@ -2279,7 +2280,8 @@ register_failed: | |||
2279 | ipoib_dev_cleanup(priv->dev); | 2280 | ipoib_dev_cleanup(priv->dev); |
2280 | 2281 | ||
2281 | device_init_failed: | 2282 | device_init_failed: |
2282 | free_netdev(priv->dev); | 2283 | rn = netdev_priv(priv->dev); |
2284 | rn->free_rdma_netdev(priv->dev); | ||
2283 | kfree(priv); | 2285 | kfree(priv); |
2284 | 2286 | ||
2285 | alloc_mem_failed: | 2287 | alloc_mem_failed: |
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
2328 | return; | 2330 | return; |
2329 | 2331 | ||
2330 | list_for_each_entry_safe(priv, tmp, dev_list, list) { | 2332 | list_for_each_entry_safe(priv, tmp, dev_list, list) { |
2331 | struct rdma_netdev *rn = netdev_priv(priv->dev); | 2333 | struct rdma_netdev *parent_rn = netdev_priv(priv->dev); |
2332 | 2334 | ||
2333 | ib_unregister_event_handler(&priv->event_handler); | 2335 | ib_unregister_event_handler(&priv->event_handler); |
2334 | flush_workqueue(ipoib_workqueue); | 2336 | flush_workqueue(ipoib_workqueue); |
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
2350 | unregister_netdev(priv->dev); | 2352 | unregister_netdev(priv->dev); |
2351 | mutex_unlock(&priv->sysfs_mutex); | 2353 | mutex_unlock(&priv->sysfs_mutex); |
2352 | 2354 | ||
2353 | rn->free_rdma_netdev(priv->dev); | 2355 | parent_rn->free_rdma_netdev(priv->dev); |
2356 | |||
2357 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { | ||
2358 | struct rdma_netdev *child_rn; | ||
2354 | 2359 | ||
2355 | list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) | 2360 | child_rn = netdev_priv(cpriv->dev); |
2361 | child_rn->free_rdma_netdev(cpriv->dev); | ||
2356 | kfree(cpriv); | 2362 | kfree(cpriv); |
2363 | } | ||
2357 | 2364 | ||
2358 | kfree(priv); | 2365 | kfree(priv); |
2359 | } | 2366 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 9927cd6b7082..55a9b71ed05a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
141 | return restart_syscall(); | 141 | return restart_syscall(); |
142 | } | 142 | } |
143 | 143 | ||
144 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); | 144 | if (!down_write_trylock(&ppriv->vlan_rwsem)) { |
145 | if (!priv) { | ||
146 | rtnl_unlock(); | 145 | rtnl_unlock(); |
147 | mutex_unlock(&ppriv->sysfs_mutex); | 146 | mutex_unlock(&ppriv->sysfs_mutex); |
148 | return -ENOMEM; | 147 | return restart_syscall(); |
149 | } | 148 | } |
150 | 149 | ||
151 | down_write(&ppriv->vlan_rwsem); | 150 | priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); |
151 | if (!priv) { | ||
152 | result = -ENOMEM; | ||
153 | goto out; | ||
154 | } | ||
152 | 155 | ||
153 | /* | 156 | /* |
154 | * First ensure this isn't a duplicate. We check the parent device and | 157 | * First ensure this isn't a duplicate. We check the parent device and |
@@ -175,8 +178,11 @@ out: | |||
175 | rtnl_unlock(); | 178 | rtnl_unlock(); |
176 | mutex_unlock(&ppriv->sysfs_mutex); | 179 | mutex_unlock(&ppriv->sysfs_mutex); |
177 | 180 | ||
178 | if (result) { | 181 | if (result && priv) { |
179 | free_netdev(priv->dev); | 182 | struct rdma_netdev *rn; |
183 | |||
184 | rn = netdev_priv(priv->dev); | ||
185 | rn->free_rdma_netdev(priv->dev); | ||
180 | kfree(priv); | 186 | kfree(priv); |
181 | } | 187 | } |
182 | 188 | ||
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
204 | return restart_syscall(); | 210 | return restart_syscall(); |
205 | } | 211 | } |
206 | 212 | ||
207 | down_write(&ppriv->vlan_rwsem); | 213 | if (!down_write_trylock(&ppriv->vlan_rwsem)) { |
214 | rtnl_unlock(); | ||
215 | mutex_unlock(&ppriv->sysfs_mutex); | ||
216 | return restart_syscall(); | ||
217 | } | ||
218 | |||
208 | list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { | 219 | list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { |
209 | if (priv->pkey == pkey && | 220 | if (priv->pkey == pkey && |
210 | priv->child_type == IPOIB_LEGACY_CHILD) { | 221 | priv->child_type == IPOIB_LEGACY_CHILD) { |
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
224 | mutex_unlock(&ppriv->sysfs_mutex); | 235 | mutex_unlock(&ppriv->sysfs_mutex); |
225 | 236 | ||
226 | if (dev) { | 237 | if (dev) { |
227 | free_netdev(dev); | 238 | struct rdma_netdev *rn; |
239 | |||
240 | rn = netdev_priv(dev); | ||
241 | rn->free_rdma_netdev(priv->dev); | ||
228 | kfree(priv); | 242 | kfree(priv); |
229 | return 0; | 243 | return 0; |
230 | } | 244 | } |
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9c3e9ab53a41..322209d5ff58 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c | |||
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) | |||
154 | { | 154 | { |
155 | int i; | 155 | int i; |
156 | 156 | ||
157 | iser_err("page vec npages %d data length %d\n", | 157 | iser_err("page vec npages %d data length %lld\n", |
158 | page_vec->npages, page_vec->fake_mr.length); | 158 | page_vec->npages, page_vec->fake_mr.length); |
159 | for (i = 0; i < page_vec->npages; i++) | 159 | for (i = 0; i < page_vec->npages; i++) |
160 | iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); | 160 | iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); |
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 8f2042432c85..66a46c84e28f 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c | |||
@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) | |||
237 | EXPORT_SYMBOL_GPL(input_ff_erase); | 237 | EXPORT_SYMBOL_GPL(input_ff_erase); |
238 | 238 | ||
239 | /* | 239 | /* |
240 | * flush_effects - erase all effects owned by a file handle | 240 | * input_ff_flush - erase all effects owned by a file handle |
241 | * @dev: input device to erase effect from | ||
242 | * @file: purported owner of the effects | ||
243 | * | ||
244 | * This function erases all force-feedback effects associated with | ||
245 | * the given owner from specified device. Note that @file may be %NULL, | ||
246 | * in which case all effects will be erased. | ||
241 | */ | 247 | */ |
242 | static int flush_effects(struct input_dev *dev, struct file *file) | 248 | int input_ff_flush(struct input_dev *dev, struct file *file) |
243 | { | 249 | { |
244 | struct ff_device *ff = dev->ff; | 250 | struct ff_device *ff = dev->ff; |
245 | int i; | 251 | int i; |
@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file) | |||
255 | 261 | ||
256 | return 0; | 262 | return 0; |
257 | } | 263 | } |
264 | EXPORT_SYMBOL_GPL(input_ff_flush); | ||
258 | 265 | ||
259 | /** | 266 | /** |
260 | * input_ff_event() - generic handler for force-feedback events | 267 | * input_ff_event() - generic handler for force-feedback events |
@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) | |||
343 | mutex_init(&ff->mutex); | 350 | mutex_init(&ff->mutex); |
344 | 351 | ||
345 | dev->ff = ff; | 352 | dev->ff = ff; |
346 | dev->flush = flush_effects; | 353 | dev->flush = input_ff_flush; |
347 | dev->event = input_ff_event; | 354 | dev->event = input_ff_event; |
348 | __set_bit(EV_FF, dev->evbit); | 355 | __set_bit(EV_FF, dev->evbit); |
349 | 356 | ||
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 022be0e22eba..443151de90c6 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
@@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev, | |||
98 | uinput_request_alloc_id(udev, request)); | 98 | uinput_request_alloc_id(udev, request)); |
99 | } | 99 | } |
100 | 100 | ||
101 | static void uinput_request_done(struct uinput_device *udev, | 101 | static void uinput_request_release_slot(struct uinput_device *udev, |
102 | struct uinput_request *request) | 102 | unsigned int id) |
103 | { | 103 | { |
104 | /* Mark slot as available */ | 104 | /* Mark slot as available */ |
105 | udev->requests[request->id] = NULL; | 105 | spin_lock(&udev->requests_lock); |
106 | wake_up(&udev->requests_waitq); | 106 | udev->requests[id] = NULL; |
107 | spin_unlock(&udev->requests_lock); | ||
107 | 108 | ||
108 | complete(&request->done); | 109 | wake_up(&udev->requests_waitq); |
109 | } | 110 | } |
110 | 111 | ||
111 | static int uinput_request_send(struct uinput_device *udev, | 112 | static int uinput_request_send(struct uinput_device *udev, |
@@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev, | |||
138 | static int uinput_request_submit(struct uinput_device *udev, | 139 | static int uinput_request_submit(struct uinput_device *udev, |
139 | struct uinput_request *request) | 140 | struct uinput_request *request) |
140 | { | 141 | { |
141 | int error; | 142 | int retval; |
142 | 143 | ||
143 | error = uinput_request_reserve_slot(udev, request); | 144 | retval = uinput_request_reserve_slot(udev, request); |
144 | if (error) | 145 | if (retval) |
145 | return error; | 146 | return retval; |
146 | 147 | ||
147 | error = uinput_request_send(udev, request); | 148 | retval = uinput_request_send(udev, request); |
148 | if (error) { | 149 | if (retval) |
149 | uinput_request_done(udev, request); | 150 | goto out; |
150 | return error; | ||
151 | } | ||
152 | 151 | ||
153 | wait_for_completion(&request->done); | 152 | wait_for_completion(&request->done); |
154 | return request->retval; | 153 | retval = request->retval; |
154 | |||
155 | out: | ||
156 | uinput_request_release_slot(udev, request->id); | ||
157 | return retval; | ||
155 | } | 158 | } |
156 | 159 | ||
157 | /* | 160 | /* |
@@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev) | |||
169 | request = udev->requests[i]; | 172 | request = udev->requests[i]; |
170 | if (request) { | 173 | if (request) { |
171 | request->retval = -ENODEV; | 174 | request->retval = -ENODEV; |
172 | uinput_request_done(udev, request); | 175 | complete(&request->done); |
173 | } | 176 | } |
174 | } | 177 | } |
175 | 178 | ||
@@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) | |||
230 | return uinput_request_submit(udev, &request); | 233 | return uinput_request_submit(udev, &request); |
231 | } | 234 | } |
232 | 235 | ||
236 | static int uinput_dev_flush(struct input_dev *dev, struct file *file) | ||
237 | { | ||
238 | /* | ||
239 | * If we are called with file == NULL that means we are tearing | ||
240 | * down the device, and therefore we can not handle FF erase | ||
241 | * requests: either we are handling UI_DEV_DESTROY (and holding | ||
242 | * the udev->mutex), or the file descriptor is closed and there is | ||
243 | * nobody on the other side anymore. | ||
244 | */ | ||
245 | return file ? input_ff_flush(dev, file) : 0; | ||
246 | } | ||
247 | |||
233 | static void uinput_destroy_device(struct uinput_device *udev) | 248 | static void uinput_destroy_device(struct uinput_device *udev) |
234 | { | 249 | { |
235 | const char *name, *phys; | 250 | const char *name, *phys; |
@@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev) | |||
297 | dev->ff->playback = uinput_dev_playback; | 312 | dev->ff->playback = uinput_dev_playback; |
298 | dev->ff->set_gain = uinput_dev_set_gain; | 313 | dev->ff->set_gain = uinput_dev_set_gain; |
299 | dev->ff->set_autocenter = uinput_dev_set_autocenter; | 314 | dev->ff->set_autocenter = uinput_dev_set_autocenter; |
315 | /* | ||
316 | * The standard input_ff_flush() implementation does | ||
317 | * not quite work for uinput as we can't reasonably | ||
318 | * handle FF requests during device teardown. | ||
319 | */ | ||
320 | dev->flush = uinput_dev_flush; | ||
300 | } | 321 | } |
301 | 322 | ||
302 | error = input_register_device(udev->dev); | 323 | error = input_register_device(udev->dev); |
@@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, | |||
939 | } | 960 | } |
940 | 961 | ||
941 | req->retval = ff_up.retval; | 962 | req->retval = ff_up.retval; |
942 | uinput_request_done(udev, req); | 963 | complete(&req->done); |
943 | goto out; | 964 | goto out; |
944 | 965 | ||
945 | case UI_END_FF_ERASE: | 966 | case UI_END_FF_ERASE: |
@@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, | |||
955 | } | 976 | } |
956 | 977 | ||
957 | req->retval = ff_erase.retval; | 978 | req->retval = ff_erase.retval; |
958 | uinput_request_done(udev, req); | 979 | complete(&req->done); |
959 | goto out; | 980 | goto out; |
960 | } | 981 | } |
961 | 982 | ||
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 15b1330606c1..e19eb60b3d2f 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
@@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client, | |||
598 | } | 598 | } |
599 | 599 | ||
600 | /* Wait for F/W to update one page ROM data. */ | 600 | /* Wait for F/W to update one page ROM data. */ |
601 | msleep(20); | 601 | msleep(35); |
602 | 602 | ||
603 | error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); | 603 | error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); |
604 | if (error) { | 604 | if (error) { |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 49bd2ab8c507..f3a21343e636 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG | |||
278 | config IPMMU_VMSA | 278 | config IPMMU_VMSA |
279 | bool "Renesas VMSA-compatible IPMMU" | 279 | bool "Renesas VMSA-compatible IPMMU" |
280 | depends on ARM || IOMMU_DMA | 280 | depends on ARM || IOMMU_DMA |
281 | depends on ARCH_RENESAS || COMPILE_TEST | 281 | depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) |
282 | select IOMMU_API | 282 | select IOMMU_API |
283 | select IOMMU_IO_PGTABLE_LPAE | 283 | select IOMMU_IO_PGTABLE_LPAE |
284 | select ARM_DMA_USE_IOMMU | 284 | select ARM_DMA_USE_IOMMU |
@@ -373,7 +373,8 @@ config MTK_IOMMU_V1 | |||
373 | config QCOM_IOMMU | 373 | config QCOM_IOMMU |
374 | # Note: iommu drivers cannot (yet?) be built as modules | 374 | # Note: iommu drivers cannot (yet?) be built as modules |
375 | bool "Qualcomm IOMMU Support" | 375 | bool "Qualcomm IOMMU Support" |
376 | depends on ARCH_QCOM || COMPILE_TEST | 376 | depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) |
377 | depends on HAS_DMA | ||
377 | select IOMMU_API | 378 | select IOMMU_API |
378 | select IOMMU_IO_PGTABLE_LPAE | 379 | select IOMMU_IO_PGTABLE_LPAE |
379 | select ARM_DMA_USE_IOMMU | 380 | select ARM_DMA_USE_IOMMU |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 382de42b8359..6fe2d0346073 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -874,7 +874,7 @@ static bool copy_device_table(void) | |||
874 | hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); | 874 | hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); |
875 | entry = (((u64) hi) << 32) + lo; | 875 | entry = (((u64) hi) << 32) + lo; |
876 | if (last_entry && last_entry != entry) { | 876 | if (last_entry && last_entry != entry) { |
877 | pr_err("IOMMU:%d should use the same dev table as others!/n", | 877 | pr_err("IOMMU:%d should use the same dev table as others!\n", |
878 | iommu->index); | 878 | iommu->index); |
879 | return false; | 879 | return false; |
880 | } | 880 | } |
@@ -882,7 +882,7 @@ static bool copy_device_table(void) | |||
882 | 882 | ||
883 | old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; | 883 | old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; |
884 | if (old_devtb_size != dev_table_size) { | 884 | if (old_devtb_size != dev_table_size) { |
885 | pr_err("The device table size of IOMMU:%d is not expected!/n", | 885 | pr_err("The device table size of IOMMU:%d is not expected!\n", |
886 | iommu->index); | 886 | iommu->index); |
887 | return false; | 887 | return false; |
888 | } | 888 | } |
@@ -890,7 +890,7 @@ static bool copy_device_table(void) | |||
890 | 890 | ||
891 | old_devtb_phys = entry & PAGE_MASK; | 891 | old_devtb_phys = entry & PAGE_MASK; |
892 | if (old_devtb_phys >= 0x100000000ULL) { | 892 | if (old_devtb_phys >= 0x100000000ULL) { |
893 | pr_err("The address of old device table is above 4G, not trustworthy!/n"); | 893 | pr_err("The address of old device table is above 4G, not trustworthy!\n"); |
894 | return false; | 894 | return false; |
895 | } | 895 | } |
896 | old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); | 896 | old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); |
@@ -901,7 +901,7 @@ static bool copy_device_table(void) | |||
901 | old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, | 901 | old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, |
902 | get_order(dev_table_size)); | 902 | get_order(dev_table_size)); |
903 | if (old_dev_tbl_cpy == NULL) { | 903 | if (old_dev_tbl_cpy == NULL) { |
904 | pr_err("Failed to allocate memory for copying old device table!/n"); | 904 | pr_err("Failed to allocate memory for copying old device table!\n"); |
905 | return false; | 905 | return false; |
906 | } | 906 | } |
907 | 907 | ||
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index ca5ebaeafd6a..57c920c1372d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) | |||
497 | #define dmar_parse_one_rhsa dmar_res_noop | 497 | #define dmar_parse_one_rhsa dmar_res_noop |
498 | #endif | 498 | #endif |
499 | 499 | ||
500 | static void __init | 500 | static void |
501 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) | 501 | dmar_table_print_dmar_entry(struct acpi_dmar_header *header) |
502 | { | 502 | { |
503 | struct acpi_dmar_hardware_unit *drhd; | 503 | struct acpi_dmar_hardware_unit *drhd; |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index d665d0dc16e8..6961fc393f0b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl, | |||
245 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, | 245 | static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, |
246 | struct io_pgtable_cfg *cfg) | 246 | struct io_pgtable_cfg *cfg) |
247 | { | 247 | { |
248 | if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) | 248 | if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) |
249 | return; | 249 | return; |
250 | 250 | ||
251 | dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), | 251 | dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), |
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index bd515be5b380..16d33ac19db0 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c | |||
@@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
371 | int ret; | 371 | int ret; |
372 | 372 | ||
373 | spin_lock_irqsave(&dom->pgtlock, flags); | 373 | spin_lock_irqsave(&dom->pgtlock, flags); |
374 | ret = dom->iop->map(dom->iop, iova, paddr, size, prot); | 374 | ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), |
375 | size, prot); | ||
375 | spin_unlock_irqrestore(&dom->pgtlock, flags); | 376 | spin_unlock_irqrestore(&dom->pgtlock, flags); |
376 | 377 | ||
377 | return ret; | 378 | return ret; |
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index e60e3dba85a0..50947ebb6d17 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c | |||
@@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) | |||
157 | 157 | ||
158 | err = of_iommu_xlate(info->dev, &iommu_spec); | 158 | err = of_iommu_xlate(info->dev, &iommu_spec); |
159 | of_node_put(iommu_spec.np); | 159 | of_node_put(iommu_spec.np); |
160 | if (err) | 160 | return err; |
161 | return err; | ||
162 | |||
163 | return info->np == pdev->bus->dev.of_node; | ||
164 | } | 161 | } |
165 | 162 | ||
166 | const struct iommu_ops *of_iommu_configure(struct device *dev, | 163 | const struct iommu_ops *of_iommu_configure(struct device *dev, |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 519149ec9053..b5df99c6f680 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn) | |||
1042 | { | 1042 | { |
1043 | const __be32 *cell; | 1043 | const __be32 *cell; |
1044 | u64 hwid; | 1044 | u64 hwid; |
1045 | int i; | 1045 | int cpu; |
1046 | 1046 | ||
1047 | cell = of_get_property(dn, "reg", NULL); | 1047 | cell = of_get_property(dn, "reg", NULL); |
1048 | if (!cell) | 1048 | if (!cell) |
@@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn) | |||
1056 | if (hwid & ~MPIDR_HWID_BITMASK) | 1056 | if (hwid & ~MPIDR_HWID_BITMASK) |
1057 | return -1; | 1057 | return -1; |
1058 | 1058 | ||
1059 | for (i = 0; i < num_possible_cpus(); i++) | 1059 | for_each_possible_cpu(cpu) |
1060 | if (cpu_logical_map(i) == hwid) | 1060 | if (cpu_logical_map(cpu) == hwid) |
1061 | return i; | 1061 | return cpu; |
1062 | 1062 | ||
1063 | return -1; | 1063 | return -1; |
1064 | } | 1064 | } |
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 2370e6d9e603..cd0bcc3b7e33 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c | |||
@@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map) | |||
173 | { | 173 | { |
174 | struct its_cmd_info info = { | 174 | struct its_cmd_info info = { |
175 | .cmd_type = MAP_VLPI, | 175 | .cmd_type = MAP_VLPI, |
176 | .map = map, | 176 | { |
177 | .map = map, | ||
178 | }, | ||
177 | }; | 179 | }; |
178 | 180 | ||
179 | /* | 181 | /* |
@@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map) | |||
189 | { | 191 | { |
190 | struct its_cmd_info info = { | 192 | struct its_cmd_info info = { |
191 | .cmd_type = GET_VLPI, | 193 | .cmd_type = GET_VLPI, |
192 | .map = map, | 194 | { |
195 | .map = map, | ||
196 | }, | ||
193 | }; | 197 | }; |
194 | 198 | ||
195 | return irq_set_vcpu_affinity(irq, &info); | 199 | return irq_set_vcpu_affinity(irq, &info); |
@@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) | |||
205 | { | 209 | { |
206 | struct its_cmd_info info = { | 210 | struct its_cmd_info info = { |
207 | .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, | 211 | .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, |
208 | .config = config, | 212 | { |
213 | .config = config, | ||
214 | }, | ||
209 | }; | 215 | }; |
210 | 216 | ||
211 | return irq_set_vcpu_affinity(irq, &info); | 217 | return irq_set_vcpu_affinity(irq, &info); |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 6e52a88bbd9e..c90976d7e53c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -169,20 +169,19 @@ static void gic_mask_irq(struct irq_data *d) | |||
169 | { | 169 | { |
170 | unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); | 170 | unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); |
171 | 171 | ||
172 | write_gic_rmask(BIT(intr)); | 172 | write_gic_rmask(intr); |
173 | gic_clear_pcpu_masks(intr); | 173 | gic_clear_pcpu_masks(intr); |
174 | } | 174 | } |
175 | 175 | ||
176 | static void gic_unmask_irq(struct irq_data *d) | 176 | static void gic_unmask_irq(struct irq_data *d) |
177 | { | 177 | { |
178 | struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
179 | unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); | 178 | unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); |
180 | unsigned int cpu; | 179 | unsigned int cpu; |
181 | 180 | ||
182 | write_gic_smask(BIT(intr)); | 181 | write_gic_smask(intr); |
183 | 182 | ||
184 | gic_clear_pcpu_masks(intr); | 183 | gic_clear_pcpu_masks(intr); |
185 | cpu = cpumask_first_and(affinity, cpu_online_mask); | 184 | cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); |
186 | set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); | 185 | set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); |
187 | } | 186 | } |
188 | 187 | ||
@@ -420,13 +419,17 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
420 | irq_hw_number_t hw, unsigned int cpu) | 419 | irq_hw_number_t hw, unsigned int cpu) |
421 | { | 420 | { |
422 | int intr = GIC_HWIRQ_TO_SHARED(hw); | 421 | int intr = GIC_HWIRQ_TO_SHARED(hw); |
422 | struct irq_data *data; | ||
423 | unsigned long flags; | 423 | unsigned long flags; |
424 | 424 | ||
425 | data = irq_get_irq_data(virq); | ||
426 | |||
425 | spin_lock_irqsave(&gic_lock, flags); | 427 | spin_lock_irqsave(&gic_lock, flags); |
426 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); | 428 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); |
427 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); | 429 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); |
428 | gic_clear_pcpu_masks(intr); | 430 | gic_clear_pcpu_masks(intr); |
429 | set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); | 431 | set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); |
432 | irq_data_update_effective_affinity(data, cpumask_of(cpu)); | ||
430 | spin_unlock_irqrestore(&gic_lock, flags); | 433 | spin_unlock_irqrestore(&gic_lock, flags); |
431 | 434 | ||
432 | return 0; | 435 | return 0; |
@@ -645,7 +648,7 @@ static int __init gic_of_init(struct device_node *node, | |||
645 | 648 | ||
646 | /* Find the first available CPU vector. */ | 649 | /* Find the first available CPU vector. */ |
647 | i = 0; | 650 | i = 0; |
648 | reserved = (C_SW0 | C_SW1) >> __fls(C_SW0); | 651 | reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); |
649 | while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", | 652 | while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", |
650 | i++, &cpu_vec)) | 653 | i++, &cpu_vec)) |
651 | reserved |= BIT(cpu_vec); | 654 | reserved |= BIT(cpu_vec); |
@@ -684,11 +687,11 @@ static int __init gic_of_init(struct device_node *node, | |||
684 | 687 | ||
685 | gicconfig = read_gic_config(); | 688 | gicconfig = read_gic_config(); |
686 | gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; | 689 | gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; |
687 | gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS); | 690 | gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); |
688 | gic_shared_intrs = (gic_shared_intrs + 1) * 8; | 691 | gic_shared_intrs = (gic_shared_intrs + 1) * 8; |
689 | 692 | ||
690 | gic_vpes = gicconfig & GIC_CONFIG_PVPS; | 693 | gic_vpes = gicconfig & GIC_CONFIG_PVPS; |
691 | gic_vpes >>= __fls(GIC_CONFIG_PVPS); | 694 | gic_vpes >>= __ffs(GIC_CONFIG_PVPS); |
692 | gic_vpes = gic_vpes + 1; | 695 | gic_vpes = gic_vpes + 1; |
693 | 696 | ||
694 | if (cpu_has_veic) { | 697 | if (cpu_has_veic) { |
@@ -767,7 +770,7 @@ static int __init gic_of_init(struct device_node *node, | |||
767 | for (i = 0; i < gic_shared_intrs; i++) { | 770 | for (i = 0; i < gic_shared_intrs; i++) { |
768 | change_gic_pol(i, GIC_POL_ACTIVE_HIGH); | 771 | change_gic_pol(i, GIC_POL_ACTIVE_HIGH); |
769 | change_gic_trig(i, GIC_TRIG_LEVEL); | 772 | change_gic_trig(i, GIC_TRIG_LEVEL); |
770 | write_gic_rmask(BIT(i)); | 773 | write_gic_rmask(i); |
771 | } | 774 | } |
772 | 775 | ||
773 | for (i = 0; i < gic_vpes; i++) { | 776 | for (i = 0; i < gic_vpes; i++) { |
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 6c44609fd83a..cd2b3c69771a 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) | |||
825 | isdn_net_local *lp; | 825 | isdn_net_local *lp; |
826 | struct ippp_struct *is; | 826 | struct ippp_struct *is; |
827 | int proto; | 827 | int proto; |
828 | unsigned char protobuf[4]; | ||
829 | 828 | ||
830 | is = file->private_data; | 829 | is = file->private_data; |
831 | 830 | ||
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) | |||
839 | if (!lp) | 838 | if (!lp) |
840 | printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); | 839 | printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); |
841 | else { | 840 | else { |
842 | /* | 841 | if (lp->isdn_device < 0 || lp->isdn_channel < 0) { |
843 | * Don't reset huptimer for | 842 | unsigned char protobuf[4]; |
844 | * LCP packets. (Echo requests). | 843 | /* |
845 | */ | 844 | * Don't reset huptimer for |
846 | if (copy_from_user(protobuf, buf, 4)) | 845 | * LCP packets. (Echo requests). |
847 | return -EFAULT; | 846 | */ |
848 | proto = PPP_PROTOCOL(protobuf); | 847 | if (copy_from_user(protobuf, buf, 4)) |
849 | if (proto != PPP_LCP) | 848 | return -EFAULT; |
850 | lp->huptimer = 0; | 849 | |
850 | proto = PPP_PROTOCOL(protobuf); | ||
851 | if (proto != PPP_LCP) | ||
852 | lp->huptimer = 0; | ||
851 | 853 | ||
852 | if (lp->isdn_device < 0 || lp->isdn_channel < 0) | ||
853 | return 0; | 854 | return 0; |
855 | } | ||
854 | 856 | ||
855 | if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && | 857 | if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && |
856 | lp->dialstate == 0 && | 858 | lp->dialstate == 0 && |
857 | (lp->flags & ISDN_NET_CONNECTED)) { | 859 | (lp->flags & ISDN_NET_CONNECTED)) { |
858 | unsigned short hl; | 860 | unsigned short hl; |
859 | struct sk_buff *skb; | 861 | struct sk_buff *skb; |
862 | unsigned char *cpy_buf; | ||
860 | /* | 863 | /* |
861 | * we need to reserve enough space in front of | 864 | * we need to reserve enough space in front of |
862 | * sk_buff. old call to dev_alloc_skb only reserved | 865 | * sk_buff. old call to dev_alloc_skb only reserved |
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) | |||
869 | return count; | 872 | return count; |
870 | } | 873 | } |
871 | skb_reserve(skb, hl); | 874 | skb_reserve(skb, hl); |
872 | if (copy_from_user(skb_put(skb, count), buf, count)) | 875 | cpy_buf = skb_put(skb, count); |
876 | if (copy_from_user(cpy_buf, buf, count)) | ||
873 | { | 877 | { |
874 | kfree_skb(skb); | 878 | kfree_skb(skb); |
875 | return -EFAULT; | 879 | return -EFAULT; |
876 | } | 880 | } |
881 | |||
882 | /* | ||
883 | * Don't reset huptimer for | ||
884 | * LCP packets. (Echo requests). | ||
885 | */ | ||
886 | proto = PPP_PROTOCOL(cpy_buf); | ||
887 | if (proto != PPP_LCP) | ||
888 | lp->huptimer = 0; | ||
889 | |||
877 | if (is->debug & 0x40) { | 890 | if (is->debug & 0x40) { |
878 | printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); | 891 | printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); |
879 | isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); | 892 | isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); |
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c index bbbbe0898233..9a257f969300 100644 --- a/drivers/leds/leds-as3645a.c +++ b/drivers/leds/leds-as3645a.c | |||
@@ -112,6 +112,10 @@ | |||
112 | #define AS_PEAK_mA_TO_REG(a) \ | 112 | #define AS_PEAK_mA_TO_REG(a) \ |
113 | ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) | 113 | ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) |
114 | 114 | ||
115 | /* LED numbers for Devicetree */ | ||
116 | #define AS_LED_FLASH 0 | ||
117 | #define AS_LED_INDICATOR 1 | ||
118 | |||
115 | enum as_mode { | 119 | enum as_mode { |
116 | AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, | 120 | AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, |
117 | AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, | 121 | AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, |
@@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash, | |||
491 | struct device_node *node) | 495 | struct device_node *node) |
492 | { | 496 | { |
493 | struct as3645a_config *cfg = &flash->cfg; | 497 | struct as3645a_config *cfg = &flash->cfg; |
498 | struct device_node *child; | ||
494 | const char *name; | 499 | const char *name; |
495 | int rval; | 500 | int rval; |
496 | 501 | ||
497 | flash->flash_node = of_get_child_by_name(node, "flash"); | 502 | for_each_child_of_node(node, child) { |
503 | u32 id = 0; | ||
504 | |||
505 | of_property_read_u32(child, "reg", &id); | ||
506 | |||
507 | switch (id) { | ||
508 | case AS_LED_FLASH: | ||
509 | flash->flash_node = of_node_get(child); | ||
510 | break; | ||
511 | case AS_LED_INDICATOR: | ||
512 | flash->indicator_node = of_node_get(child); | ||
513 | break; | ||
514 | default: | ||
515 | dev_warn(&flash->client->dev, | ||
516 | "unknown LED %u encountered, ignoring\n", id); | ||
517 | break; | ||
518 | } | ||
519 | } | ||
520 | |||
498 | if (!flash->flash_node) { | 521 | if (!flash->flash_node) { |
499 | dev_err(&flash->client->dev, "can't find flash node\n"); | 522 | dev_err(&flash->client->dev, "can't find flash node\n"); |
500 | return -ENODEV; | 523 | return -ENODEV; |
@@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash, | |||
534 | of_property_read_u32(flash->flash_node, "voltage-reference", | 557 | of_property_read_u32(flash->flash_node, "voltage-reference", |
535 | &cfg->voltage_reference); | 558 | &cfg->voltage_reference); |
536 | 559 | ||
537 | of_property_read_u32(flash->flash_node, "peak-current-limit", | 560 | of_property_read_u32(flash->flash_node, "ams,input-max-microamp", |
538 | &cfg->peak); | 561 | &cfg->peak); |
539 | cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); | 562 | cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); |
540 | 563 | ||
541 | flash->indicator_node = of_get_child_by_name(node, "indicator"); | ||
542 | if (!flash->indicator_node) { | 564 | if (!flash->indicator_node) { |
543 | dev_warn(&flash->client->dev, | 565 | dev_warn(&flash->client->dev, |
544 | "can't find indicator node\n"); | 566 | "can't find indicator node\n"); |
@@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client) | |||
721 | as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); | 743 | as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); |
722 | 744 | ||
723 | v4l2_flash_release(flash->vf); | 745 | v4l2_flash_release(flash->vf); |
746 | v4l2_flash_release(flash->vfind); | ||
724 | 747 | ||
725 | led_classdev_flash_unregister(&flash->fled); | 748 | led_classdev_flash_unregister(&flash->fled); |
726 | led_classdev_unregister(&flash->iled_cdev); | 749 | led_classdev_unregister(&flash->iled_cdev); |
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 7d5286b05036..1841d0359bac 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c | |||
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put); | |||
64 | void __closure_wake_up(struct closure_waitlist *wait_list) | 64 | void __closure_wake_up(struct closure_waitlist *wait_list) |
65 | { | 65 | { |
66 | struct llist_node *list; | 66 | struct llist_node *list; |
67 | struct closure *cl; | 67 | struct closure *cl, *t; |
68 | struct llist_node *reverse = NULL; | 68 | struct llist_node *reverse = NULL; |
69 | 69 | ||
70 | list = llist_del_all(&wait_list->list); | 70 | list = llist_del_all(&wait_list->list); |
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list) | |||
73 | reverse = llist_reverse_order(list); | 73 | reverse = llist_reverse_order(list); |
74 | 74 | ||
75 | /* Then do the wakeups */ | 75 | /* Then do the wakeups */ |
76 | llist_for_each_entry(cl, reverse, list) { | 76 | llist_for_each_entry_safe(cl, t, reverse, list) { |
77 | closure_set_waiting(cl, 0); | 77 | closure_set_waiting(cl, 0); |
78 | closure_sub(cl, CLOSURE_WAITING + 1); | 78 | closure_sub(cl, CLOSURE_WAITING + 1); |
79 | } | 79 | } |
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 24eddbdf2ab4..203144762f36 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h | |||
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen | |||
149 | 149 | ||
150 | extern atomic_t dm_global_event_nr; | 150 | extern atomic_t dm_global_event_nr; |
151 | extern wait_queue_head_t dm_global_eventq; | 151 | extern wait_queue_head_t dm_global_eventq; |
152 | void dm_issue_global_event(void); | ||
152 | 153 | ||
153 | #endif | 154 | #endif |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index a55ffd4f5933..96ab46512e1f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key | |||
2466 | kfree(cipher_api); | 2466 | kfree(cipher_api); |
2467 | return ret; | 2467 | return ret; |
2468 | } | 2468 | } |
2469 | kfree(cipher_api); | ||
2469 | 2470 | ||
2470 | return 0; | 2471 | return 0; |
2471 | bad_mem: | 2472 | bad_mem: |
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar | |||
2584 | ti->error = "Invalid feature value for sector_size"; | 2585 | ti->error = "Invalid feature value for sector_size"; |
2585 | return -EINVAL; | 2586 | return -EINVAL; |
2586 | } | 2587 | } |
2588 | if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { | ||
2589 | ti->error = "Device size is not multiple of sector_size feature"; | ||
2590 | return -EINVAL; | ||
2591 | } | ||
2587 | cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; | 2592 | cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; |
2588 | } else if (!strcasecmp(opt_string, "iv_large_sectors")) | 2593 | } else if (!strcasecmp(opt_string, "iv_large_sectors")) |
2589 | set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); | 2594 | set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 8756a6850431..e52676fa9832 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si | |||
477 | * Round up the ptr to an 8-byte boundary. | 477 | * Round up the ptr to an 8-byte boundary. |
478 | */ | 478 | */ |
479 | #define ALIGN_MASK 7 | 479 | #define ALIGN_MASK 7 |
480 | static inline size_t align_val(size_t val) | ||
481 | { | ||
482 | return (val + ALIGN_MASK) & ~ALIGN_MASK; | ||
483 | } | ||
480 | static inline void *align_ptr(void *ptr) | 484 | static inline void *align_ptr(void *ptr) |
481 | { | 485 | { |
482 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); | 486 | return (void *)align_val((size_t)ptr); |
483 | } | 487 | } |
484 | 488 | ||
485 | /* | 489 | /* |
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
505 | struct hash_cell *hc; | 509 | struct hash_cell *hc; |
506 | size_t len, needed = 0; | 510 | size_t len, needed = 0; |
507 | struct gendisk *disk; | 511 | struct gendisk *disk; |
508 | struct dm_name_list *nl, *old_nl = NULL; | 512 | struct dm_name_list *orig_nl, *nl, *old_nl = NULL; |
509 | uint32_t *event_nr; | 513 | uint32_t *event_nr; |
510 | 514 | ||
511 | down_write(&_hash_lock); | 515 | down_write(&_hash_lock); |
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
516 | */ | 520 | */ |
517 | for (i = 0; i < NUM_BUCKETS; i++) { | 521 | for (i = 0; i < NUM_BUCKETS; i++) { |
518 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 522 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
519 | needed += sizeof(struct dm_name_list); | 523 | needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); |
520 | needed += strlen(hc->name) + 1; | 524 | needed += align_val(sizeof(uint32_t)); |
521 | needed += ALIGN_MASK; | ||
522 | needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK; | ||
523 | } | 525 | } |
524 | } | 526 | } |
525 | 527 | ||
526 | /* | 528 | /* |
527 | * Grab our output buffer. | 529 | * Grab our output buffer. |
528 | */ | 530 | */ |
529 | nl = get_result_buffer(param, param_size, &len); | 531 | nl = orig_nl = get_result_buffer(param, param_size, &len); |
530 | if (len < needed) { | 532 | if (len < needed) { |
531 | param->flags |= DM_BUFFER_FULL_FLAG; | 533 | param->flags |= DM_BUFFER_FULL_FLAG; |
532 | goto out; | 534 | goto out; |
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
549 | strcpy(nl->name, hc->name); | 551 | strcpy(nl->name, hc->name); |
550 | 552 | ||
551 | old_nl = nl; | 553 | old_nl = nl; |
552 | event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); | 554 | event_nr = align_ptr(nl->name + strlen(hc->name) + 1); |
553 | *event_nr = dm_get_event_nr(hc->md); | 555 | *event_nr = dm_get_event_nr(hc->md); |
554 | nl = align_ptr(event_nr + 1); | 556 | nl = align_ptr(event_nr + 1); |
555 | } | 557 | } |
556 | } | 558 | } |
559 | /* | ||
560 | * If mismatch happens, security may be compromised due to buffer | ||
561 | * overflow, so it's better to crash. | ||
562 | */ | ||
563 | BUG_ON((char *)nl - (char *)orig_nl != needed); | ||
557 | 564 | ||
558 | out: | 565 | out: |
559 | up_write(&_hash_lock); | 566 | up_write(&_hash_lock); |
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para | |||
1621 | * which has a variable size, is not used by the function processing | 1628 | * which has a variable size, is not used by the function processing |
1622 | * the ioctl. | 1629 | * the ioctl. |
1623 | */ | 1630 | */ |
1624 | #define IOCTL_FLAGS_NO_PARAMS 1 | 1631 | #define IOCTL_FLAGS_NO_PARAMS 1 |
1632 | #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2 | ||
1625 | 1633 | ||
1626 | /*----------------------------------------------------------------- | 1634 | /*----------------------------------------------------------------- |
1627 | * Implementation of open/close/ioctl on the special char | 1635 | * Implementation of open/close/ioctl on the special char |
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) | |||
1635 | ioctl_fn fn; | 1643 | ioctl_fn fn; |
1636 | } _ioctls[] = { | 1644 | } _ioctls[] = { |
1637 | {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ | 1645 | {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ |
1638 | {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, | 1646 | {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all}, |
1639 | {DM_LIST_DEVICES_CMD, 0, list_devices}, | 1647 | {DM_LIST_DEVICES_CMD, 0, list_devices}, |
1640 | 1648 | ||
1641 | {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, | 1649 | {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create}, |
1642 | {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, | 1650 | {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove}, |
1643 | {DM_DEV_RENAME_CMD, 0, dev_rename}, | 1651 | {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename}, |
1644 | {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, | 1652 | {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, |
1645 | {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, | 1653 | {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, |
1646 | {DM_DEV_WAIT_CMD, 0, dev_wait}, | 1654 | {DM_DEV_WAIT_CMD, 0, dev_wait}, |
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us | |||
1869 | unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) | 1877 | unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) |
1870 | DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); | 1878 | DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); |
1871 | 1879 | ||
1880 | if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT) | ||
1881 | dm_issue_global_event(); | ||
1882 | |||
1872 | /* | 1883 | /* |
1873 | * Copy the results back to userland. | 1884 | * Copy the results back to userland. |
1874 | */ | 1885 | */ |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5bfe285ea9d1..2245d06d2045 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio) | |||
3238 | if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) | 3238 | if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) |
3239 | return DM_MAPIO_REQUEUE; | 3239 | return DM_MAPIO_REQUEUE; |
3240 | 3240 | ||
3241 | mddev->pers->make_request(mddev, bio); | 3241 | md_handle_request(mddev, bio); |
3242 | 3242 | ||
3243 | return DM_MAPIO_SUBMITTED; | 3243 | return DM_MAPIO_SUBMITTED; |
3244 | } | 3244 | } |
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, | |||
3297 | static sector_t rs_get_progress(struct raid_set *rs, | 3297 | static sector_t rs_get_progress(struct raid_set *rs, |
3298 | sector_t resync_max_sectors, bool *array_in_sync) | 3298 | sector_t resync_max_sectors, bool *array_in_sync) |
3299 | { | 3299 | { |
3300 | sector_t r, recovery_cp, curr_resync_completed; | 3300 | sector_t r, curr_resync_completed; |
3301 | struct mddev *mddev = &rs->md; | 3301 | struct mddev *mddev = &rs->md; |
3302 | 3302 | ||
3303 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; | 3303 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; |
3304 | recovery_cp = mddev->recovery_cp; | ||
3305 | *array_in_sync = false; | 3304 | *array_in_sync = false; |
3306 | 3305 | ||
3307 | if (rs_is_raid0(rs)) { | 3306 | if (rs_is_raid0(rs)) { |
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs, | |||
3330 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 3329 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
3331 | r = curr_resync_completed; | 3330 | r = curr_resync_completed; |
3332 | else | 3331 | else |
3333 | r = recovery_cp; | 3332 | r = mddev->recovery_cp; |
3334 | 3333 | ||
3335 | if (r == MaxSector) { | 3334 | if ((r == MaxSector) || |
3335 | (test_bit(MD_RECOVERY_DONE, &mddev->recovery) && | ||
3336 | (mddev->curr_resync_completed == resync_max_sectors))) { | ||
3336 | /* | 3337 | /* |
3337 | * Sync complete. | 3338 | * Sync complete. |
3338 | */ | 3339 | */ |
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti) | |||
3892 | 3893 | ||
3893 | static struct target_type raid_target = { | 3894 | static struct target_type raid_target = { |
3894 | .name = "raid", | 3895 | .name = "raid", |
3895 | .version = {1, 12, 1}, | 3896 | .version = {1, 13, 0}, |
3896 | .module = THIS_MODULE, | 3897 | .module = THIS_MODULE, |
3897 | .ctr = raid_ctr, | 3898 | .ctr = raid_ctr, |
3898 | .dtr = raid_dtr, | 3899 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e54145969c5..4be85324f44d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue; | |||
52 | atomic_t dm_global_event_nr = ATOMIC_INIT(0); | 52 | atomic_t dm_global_event_nr = ATOMIC_INIT(0); |
53 | DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); | 53 | DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); |
54 | 54 | ||
55 | void dm_issue_global_event(void) | ||
56 | { | ||
57 | atomic_inc(&dm_global_event_nr); | ||
58 | wake_up(&dm_global_eventq); | ||
59 | } | ||
60 | |||
55 | /* | 61 | /* |
56 | * One of these is allocated per bio. | 62 | * One of these is allocated per bio. |
57 | */ | 63 | */ |
@@ -1865,9 +1871,8 @@ static void event_callback(void *context) | |||
1865 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); | 1871 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); |
1866 | 1872 | ||
1867 | atomic_inc(&md->event_nr); | 1873 | atomic_inc(&md->event_nr); |
1868 | atomic_inc(&dm_global_event_nr); | ||
1869 | wake_up(&md->eventq); | 1874 | wake_up(&md->eventq); |
1870 | wake_up(&dm_global_eventq); | 1875 | dm_issue_global_event(); |
1871 | } | 1876 | } |
1872 | 1877 | ||
1873 | /* | 1878 | /* |
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2283 | } | 2288 | } |
2284 | 2289 | ||
2285 | map = __bind(md, table, &limits); | 2290 | map = __bind(md, table, &limits); |
2291 | dm_issue_global_event(); | ||
2286 | 2292 | ||
2287 | out: | 2293 | out: |
2288 | mutex_unlock(&md->suspend_lock); | 2294 | mutex_unlock(&md->suspend_lock); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 08fcaebc61bd..0ff1bbf6c90e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
266 | * call has finished, the bio has been linked into some internal structure | 266 | * call has finished, the bio has been linked into some internal structure |
267 | * and so is visible to ->quiesce(), so we don't need the refcount any more. | 267 | * and so is visible to ->quiesce(), so we don't need the refcount any more. |
268 | */ | 268 | */ |
269 | void md_handle_request(struct mddev *mddev, struct bio *bio) | ||
270 | { | ||
271 | check_suspended: | ||
272 | rcu_read_lock(); | ||
273 | if (mddev->suspended) { | ||
274 | DEFINE_WAIT(__wait); | ||
275 | for (;;) { | ||
276 | prepare_to_wait(&mddev->sb_wait, &__wait, | ||
277 | TASK_UNINTERRUPTIBLE); | ||
278 | if (!mddev->suspended) | ||
279 | break; | ||
280 | rcu_read_unlock(); | ||
281 | schedule(); | ||
282 | rcu_read_lock(); | ||
283 | } | ||
284 | finish_wait(&mddev->sb_wait, &__wait); | ||
285 | } | ||
286 | atomic_inc(&mddev->active_io); | ||
287 | rcu_read_unlock(); | ||
288 | |||
289 | if (!mddev->pers->make_request(mddev, bio)) { | ||
290 | atomic_dec(&mddev->active_io); | ||
291 | wake_up(&mddev->sb_wait); | ||
292 | goto check_suspended; | ||
293 | } | ||
294 | |||
295 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | ||
296 | wake_up(&mddev->sb_wait); | ||
297 | } | ||
298 | EXPORT_SYMBOL(md_handle_request); | ||
299 | |||
269 | static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | 300 | static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) |
270 | { | 301 | { |
271 | const int rw = bio_data_dir(bio); | 302 | const int rw = bio_data_dir(bio); |
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
285 | bio_endio(bio); | 316 | bio_endio(bio); |
286 | return BLK_QC_T_NONE; | 317 | return BLK_QC_T_NONE; |
287 | } | 318 | } |
288 | check_suspended: | ||
289 | rcu_read_lock(); | ||
290 | if (mddev->suspended) { | ||
291 | DEFINE_WAIT(__wait); | ||
292 | for (;;) { | ||
293 | prepare_to_wait(&mddev->sb_wait, &__wait, | ||
294 | TASK_UNINTERRUPTIBLE); | ||
295 | if (!mddev->suspended) | ||
296 | break; | ||
297 | rcu_read_unlock(); | ||
298 | schedule(); | ||
299 | rcu_read_lock(); | ||
300 | } | ||
301 | finish_wait(&mddev->sb_wait, &__wait); | ||
302 | } | ||
303 | atomic_inc(&mddev->active_io); | ||
304 | rcu_read_unlock(); | ||
305 | 319 | ||
306 | /* | 320 | /* |
307 | * save the sectors now since our bio can | 321 | * save the sectors now since our bio can |
@@ -310,20 +324,14 @@ check_suspended: | |||
310 | sectors = bio_sectors(bio); | 324 | sectors = bio_sectors(bio); |
311 | /* bio could be mergeable after passing to underlayer */ | 325 | /* bio could be mergeable after passing to underlayer */ |
312 | bio->bi_opf &= ~REQ_NOMERGE; | 326 | bio->bi_opf &= ~REQ_NOMERGE; |
313 | if (!mddev->pers->make_request(mddev, bio)) { | 327 | |
314 | atomic_dec(&mddev->active_io); | 328 | md_handle_request(mddev, bio); |
315 | wake_up(&mddev->sb_wait); | ||
316 | goto check_suspended; | ||
317 | } | ||
318 | 329 | ||
319 | cpu = part_stat_lock(); | 330 | cpu = part_stat_lock(); |
320 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 331 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
321 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); | 332 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); |
322 | part_stat_unlock(); | 333 | part_stat_unlock(); |
323 | 334 | ||
324 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | ||
325 | wake_up(&mddev->sb_wait); | ||
326 | |||
327 | return BLK_QC_T_NONE; | 335 | return BLK_QC_T_NONE; |
328 | } | 336 | } |
329 | 337 | ||
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
439 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); | 447 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); |
440 | struct bio *bio = mddev->flush_bio; | 448 | struct bio *bio = mddev->flush_bio; |
441 | 449 | ||
450 | /* | ||
451 | * must reset flush_bio before calling into md_handle_request to avoid a | ||
452 | * deadlock, because other bios passed md_handle_request suspend check | ||
453 | * could wait for this and below md_handle_request could wait for those | ||
454 | * bios because of suspend check | ||
455 | */ | ||
456 | mddev->flush_bio = NULL; | ||
457 | wake_up(&mddev->sb_wait); | ||
458 | |||
442 | if (bio->bi_iter.bi_size == 0) | 459 | if (bio->bi_iter.bi_size == 0) |
443 | /* an empty barrier - all done */ | 460 | /* an empty barrier - all done */ |
444 | bio_endio(bio); | 461 | bio_endio(bio); |
445 | else { | 462 | else { |
446 | bio->bi_opf &= ~REQ_PREFLUSH; | 463 | bio->bi_opf &= ~REQ_PREFLUSH; |
447 | mddev->pers->make_request(mddev, bio); | 464 | md_handle_request(mddev, bio); |
448 | } | 465 | } |
449 | |||
450 | mddev->flush_bio = NULL; | ||
451 | wake_up(&mddev->sb_wait); | ||
452 | } | 466 | } |
453 | 467 | ||
454 | void md_flush_request(struct mddev *mddev, struct bio *bio) | 468 | void md_flush_request(struct mddev *mddev, struct bio *bio) |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 561d22b9a9a8..d8287d3cd1bf 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev); | |||
692 | extern int md_rdev_init(struct md_rdev *rdev); | 692 | extern int md_rdev_init(struct md_rdev *rdev); |
693 | extern void md_rdev_clear(struct md_rdev *rdev); | 693 | extern void md_rdev_clear(struct md_rdev *rdev); |
694 | 694 | ||
695 | extern void md_handle_request(struct mddev *mddev, struct bio *bio); | ||
695 | extern void mddev_suspend(struct mddev *mddev); | 696 | extern void mddev_suspend(struct mddev *mddev); |
696 | extern void mddev_resume(struct mddev *mddev); | 697 | extern void mddev_resume(struct mddev *mddev); |
697 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 698 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4188a4881148..928e24a07133 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
811 | spin_unlock(&head->batch_head->batch_lock); | 811 | spin_unlock(&head->batch_head->batch_lock); |
812 | goto unlock_out; | 812 | goto unlock_out; |
813 | } | 813 | } |
814 | /* | ||
815 | * We must assign batch_head of this stripe within the | ||
816 | * batch_lock, otherwise clear_batch_ready of batch head | ||
817 | * stripe could clear BATCH_READY bit of this stripe and | ||
818 | * this stripe->batch_head doesn't get assigned, which | ||
819 | * could confuse clear_batch_ready for this stripe | ||
820 | */ | ||
821 | sh->batch_head = head->batch_head; | ||
814 | 822 | ||
815 | /* | 823 | /* |
816 | * at this point, head's BATCH_READY could be cleared, but we | 824 | * at this point, head's BATCH_READY could be cleared, but we |
@@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
818 | */ | 826 | */ |
819 | list_add(&sh->batch_list, &head->batch_list); | 827 | list_add(&sh->batch_list, &head->batch_list); |
820 | spin_unlock(&head->batch_head->batch_lock); | 828 | spin_unlock(&head->batch_head->batch_lock); |
821 | |||
822 | sh->batch_head = head->batch_head; | ||
823 | } else { | 829 | } else { |
824 | head->batch_head = head; | 830 | head->batch_head = head; |
825 | sh->batch_head = head->batch_head; | 831 | sh->batch_head = head->batch_head; |
@@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, | |||
4599 | 4605 | ||
4600 | set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | | 4606 | set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | |
4601 | (1 << STRIPE_PREREAD_ACTIVE) | | 4607 | (1 << STRIPE_PREREAD_ACTIVE) | |
4602 | (1 << STRIPE_DEGRADED)), | 4608 | (1 << STRIPE_DEGRADED) | |
4609 | (1 << STRIPE_ON_UNPLUG_LIST)), | ||
4603 | head_sh->state & (1 << STRIPE_INSYNC)); | 4610 | head_sh->state & (1 << STRIPE_INSYNC)); |
4604 | 4611 | ||
4605 | sh->check_state = head_sh->check_state; | 4612 | sh->check_state = head_sh->check_state; |
@@ -6568,14 +6575,17 @@ static ssize_t | |||
6568 | raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) | 6575 | raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) |
6569 | { | 6576 | { |
6570 | struct r5conf *conf; | 6577 | struct r5conf *conf; |
6571 | unsigned long new; | 6578 | unsigned int new; |
6572 | int err; | 6579 | int err; |
6573 | struct r5worker_group *new_groups, *old_groups; | 6580 | struct r5worker_group *new_groups, *old_groups; |
6574 | int group_cnt, worker_cnt_per_group; | 6581 | int group_cnt, worker_cnt_per_group; |
6575 | 6582 | ||
6576 | if (len >= PAGE_SIZE) | 6583 | if (len >= PAGE_SIZE) |
6577 | return -EINVAL; | 6584 | return -EINVAL; |
6578 | if (kstrtoul(page, 10, &new)) | 6585 | if (kstrtouint(page, 10, &new)) |
6586 | return -EINVAL; | ||
6587 | /* 8192 should be big enough */ | ||
6588 | if (new > 8192) | ||
6579 | return -EINVAL; | 6589 | return -EINVAL; |
6580 | 6590 | ||
6581 | err = mddev_lock(mddev); | 6591 | err = mddev_lock(mddev); |
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c index ed43a4212479..129b558acc92 100644 --- a/drivers/media/rc/ir-sharp-decoder.c +++ b/drivers/media/rc/ir-sharp-decoder.c | |||
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init); | |||
245 | module_exit(ir_sharp_decode_exit); | 245 | module_exit(ir_sharp_decode_exit); |
246 | 246 | ||
247 | MODULE_LICENSE("GPL"); | 247 | MODULE_LICENSE("GPL"); |
248 | MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); | 248 | MODULE_AUTHOR("James Hogan <jhogan@kernel.org>"); |
249 | MODULE_DESCRIPTION("Sharp IR protocol decoder"); | 249 | MODULE_DESCRIPTION("Sharp IR protocol decoder"); |
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 5dba23ca2e5f..dc9bc1807fdf 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c | |||
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) | |||
219 | 219 | ||
220 | down_read(&mm->mmap_sem); | 220 | down_read(&mm->mmap_sem); |
221 | 221 | ||
222 | for (dar = addr; dar < addr + size; dar += page_size) { | 222 | vma = find_vma(mm, addr); |
223 | if (!vma || dar < vma->vm_start || dar > vma->vm_end) { | 223 | if (!vma) { |
224 | pr_err("Can't find vma for addr %016llx\n", addr); | ||
225 | rc = -EFAULT; | ||
226 | goto out; | ||
227 | } | ||
228 | /* get the size of the pages allocated */ | ||
229 | page_size = vma_kernel_pagesize(vma); | ||
230 | |||
231 | for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) { | ||
232 | if (dar < vma->vm_start || dar >= vma->vm_end) { | ||
224 | vma = find_vma(mm, addr); | 233 | vma = find_vma(mm, addr); |
225 | if (!vma) { | 234 | if (!vma) { |
226 | pr_err("Can't find vma for addr %016llx\n", addr); | 235 | pr_err("Can't find vma for addr %016llx\n", addr); |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 29fc1e662891..2ad7b5c69156 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, | |||
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | mqrq->areq.mrq = &brq->mrq; | 1636 | mqrq->areq.mrq = &brq->mrq; |
1637 | |||
1638 | mmc_queue_bounce_pre(mqrq); | ||
1639 | } | 1637 | } |
1640 | 1638 | ||
1641 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 1639 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) | |||
1829 | brq = &mq_rq->brq; | 1827 | brq = &mq_rq->brq; |
1830 | old_req = mmc_queue_req_to_req(mq_rq); | 1828 | old_req = mmc_queue_req_to_req(mq_rq); |
1831 | type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | 1829 | type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
1832 | mmc_queue_bounce_post(mq_rq); | ||
1833 | 1830 | ||
1834 | switch (status) { | 1831 | switch (status) { |
1835 | case MMC_BLK_SUCCESS: | 1832 | case MMC_BLK_SUCCESS: |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index a7eb623f8daa..36217ad5e9b1 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
@@ -1286,6 +1286,23 @@ out_err: | |||
1286 | return err; | 1286 | return err; |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | static void mmc_select_driver_type(struct mmc_card *card) | ||
1290 | { | ||
1291 | int card_drv_type, drive_strength, drv_type; | ||
1292 | |||
1293 | card_drv_type = card->ext_csd.raw_driver_strength | | ||
1294 | mmc_driver_type_mask(0); | ||
1295 | |||
1296 | drive_strength = mmc_select_drive_strength(card, | ||
1297 | card->ext_csd.hs200_max_dtr, | ||
1298 | card_drv_type, &drv_type); | ||
1299 | |||
1300 | card->drive_strength = drive_strength; | ||
1301 | |||
1302 | if (drv_type) | ||
1303 | mmc_set_driver_type(card->host, drv_type); | ||
1304 | } | ||
1305 | |||
1289 | static int mmc_select_hs400es(struct mmc_card *card) | 1306 | static int mmc_select_hs400es(struct mmc_card *card) |
1290 | { | 1307 | { |
1291 | struct mmc_host *host = card->host; | 1308 | struct mmc_host *host = card->host; |
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card) | |||
1341 | goto out_err; | 1358 | goto out_err; |
1342 | } | 1359 | } |
1343 | 1360 | ||
1361 | mmc_select_driver_type(card); | ||
1362 | |||
1344 | /* Switch card to HS400 */ | 1363 | /* Switch card to HS400 */ |
1345 | val = EXT_CSD_TIMING_HS400 | | 1364 | val = EXT_CSD_TIMING_HS400 | |
1346 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1365 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
@@ -1374,23 +1393,6 @@ out_err: | |||
1374 | return err; | 1393 | return err; |
1375 | } | 1394 | } |
1376 | 1395 | ||
1377 | static void mmc_select_driver_type(struct mmc_card *card) | ||
1378 | { | ||
1379 | int card_drv_type, drive_strength, drv_type; | ||
1380 | |||
1381 | card_drv_type = card->ext_csd.raw_driver_strength | | ||
1382 | mmc_driver_type_mask(0); | ||
1383 | |||
1384 | drive_strength = mmc_select_drive_strength(card, | ||
1385 | card->ext_csd.hs200_max_dtr, | ||
1386 | card_drv_type, &drv_type); | ||
1387 | |||
1388 | card->drive_strength = drive_strength; | ||
1389 | |||
1390 | if (drv_type) | ||
1391 | mmc_set_driver_type(card->host, drv_type); | ||
1392 | } | ||
1393 | |||
1394 | /* | 1396 | /* |
1395 | * For device supporting HS200 mode, the following sequence | 1397 | * For device supporting HS200 mode, the following sequence |
1396 | * should be done before executing the tuning process. | 1398 | * should be done before executing the tuning process. |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index affa7370ba82..0a4e77a5ba33 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -23,8 +23,6 @@ | |||
23 | #include "core.h" | 23 | #include "core.h" |
24 | #include "card.h" | 24 | #include "card.h" |
25 | 25 | ||
26 | #define MMC_QUEUE_BOUNCESZ 65536 | ||
27 | |||
28 | /* | 26 | /* |
29 | * Prepare a MMC request. This just filters out odd stuff. | 27 | * Prepare a MMC request. This just filters out odd stuff. |
30 | */ | 28 | */ |
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, | |||
150 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); | 148 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
151 | } | 149 | } |
152 | 150 | ||
153 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | ||
154 | { | ||
155 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | ||
156 | |||
157 | if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) | ||
158 | return 0; | ||
159 | |||
160 | if (bouncesz > host->max_req_size) | ||
161 | bouncesz = host->max_req_size; | ||
162 | if (bouncesz > host->max_seg_size) | ||
163 | bouncesz = host->max_seg_size; | ||
164 | if (bouncesz > host->max_blk_count * 512) | ||
165 | bouncesz = host->max_blk_count * 512; | ||
166 | |||
167 | if (bouncesz <= 512) | ||
168 | return 0; | ||
169 | |||
170 | return bouncesz; | ||
171 | } | ||
172 | |||
173 | /** | 151 | /** |
174 | * mmc_init_request() - initialize the MMC-specific per-request data | 152 | * mmc_init_request() - initialize the MMC-specific per-request data |
175 | * @q: the request queue | 153 | * @q: the request queue |
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, | |||
184 | struct mmc_card *card = mq->card; | 162 | struct mmc_card *card = mq->card; |
185 | struct mmc_host *host = card->host; | 163 | struct mmc_host *host = card->host; |
186 | 164 | ||
187 | if (card->bouncesz) { | 165 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
188 | mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); | 166 | if (!mq_rq->sg) |
189 | if (!mq_rq->bounce_buf) | 167 | return -ENOMEM; |
190 | return -ENOMEM; | ||
191 | if (card->bouncesz > 512) { | ||
192 | mq_rq->sg = mmc_alloc_sg(1, gfp); | ||
193 | if (!mq_rq->sg) | ||
194 | return -ENOMEM; | ||
195 | mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, | ||
196 | gfp); | ||
197 | if (!mq_rq->bounce_sg) | ||
198 | return -ENOMEM; | ||
199 | } | ||
200 | } else { | ||
201 | mq_rq->bounce_buf = NULL; | ||
202 | mq_rq->bounce_sg = NULL; | ||
203 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); | ||
204 | if (!mq_rq->sg) | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | 168 | ||
208 | return 0; | 169 | return 0; |
209 | } | 170 | } |
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) | |||
212 | { | 173 | { |
213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); | 174 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
214 | 175 | ||
215 | /* It is OK to kfree(NULL) so this will be smooth */ | ||
216 | kfree(mq_rq->bounce_sg); | ||
217 | mq_rq->bounce_sg = NULL; | ||
218 | |||
219 | kfree(mq_rq->bounce_buf); | ||
220 | mq_rq->bounce_buf = NULL; | ||
221 | |||
222 | kfree(mq_rq->sg); | 176 | kfree(mq_rq->sg); |
223 | mq_rq->sg = NULL; | 177 | mq_rq->sg = NULL; |
224 | } | 178 | } |
@@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
265 | if (mmc_can_erase(card)) | 219 | if (mmc_can_erase(card)) |
266 | mmc_queue_setup_discard(mq->queue, card); | 220 | mmc_queue_setup_discard(mq->queue, card); |
267 | 221 | ||
268 | card->bouncesz = mmc_queue_calc_bouncesz(host); | 222 | blk_queue_bounce_limit(mq->queue, limit); |
269 | if (card->bouncesz) { | 223 | blk_queue_max_hw_sectors(mq->queue, |
270 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); | 224 | min(host->max_blk_count, host->max_req_size / 512)); |
271 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | 225 | blk_queue_max_segments(mq->queue, host->max_segs); |
272 | blk_queue_max_segment_size(mq->queue, card->bouncesz); | 226 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
273 | } else { | ||
274 | blk_queue_bounce_limit(mq->queue, limit); | ||
275 | blk_queue_max_hw_sectors(mq->queue, | ||
276 | min(host->max_blk_count, host->max_req_size / 512)); | ||
277 | blk_queue_max_segments(mq->queue, host->max_segs); | ||
278 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
279 | } | ||
280 | 227 | ||
281 | sema_init(&mq->thread_sem, 1); | 228 | sema_init(&mq->thread_sem, 1); |
282 | 229 | ||
@@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
365 | */ | 312 | */ |
366 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) | 313 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
367 | { | 314 | { |
368 | unsigned int sg_len; | ||
369 | size_t buflen; | ||
370 | struct scatterlist *sg; | ||
371 | struct request *req = mmc_queue_req_to_req(mqrq); | 315 | struct request *req = mmc_queue_req_to_req(mqrq); |
372 | int i; | ||
373 | |||
374 | if (!mqrq->bounce_buf) | ||
375 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); | ||
376 | |||
377 | sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); | ||
378 | |||
379 | mqrq->bounce_sg_len = sg_len; | ||
380 | |||
381 | buflen = 0; | ||
382 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) | ||
383 | buflen += sg->length; | ||
384 | |||
385 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); | ||
386 | |||
387 | return 1; | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * If writing, bounce the data to the buffer before the request | ||
392 | * is sent to the host driver | ||
393 | */ | ||
394 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) | ||
395 | { | ||
396 | if (!mqrq->bounce_buf) | ||
397 | return; | ||
398 | |||
399 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) | ||
400 | return; | ||
401 | |||
402 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | ||
403 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * If reading, bounce the data from the buffer after the request | ||
408 | * has been handled by the host driver | ||
409 | */ | ||
410 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) | ||
411 | { | ||
412 | if (!mqrq->bounce_buf) | ||
413 | return; | ||
414 | |||
415 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) | ||
416 | return; | ||
417 | 316 | ||
418 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | 317 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
419 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
420 | } | 318 | } |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 04fc89360a7a..f18d3f656baa 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
@@ -49,9 +49,6 @@ enum mmc_drv_op { | |||
49 | struct mmc_queue_req { | 49 | struct mmc_queue_req { |
50 | struct mmc_blk_request brq; | 50 | struct mmc_blk_request brq; |
51 | struct scatterlist *sg; | 51 | struct scatterlist *sg; |
52 | char *bounce_buf; | ||
53 | struct scatterlist *bounce_sg; | ||
54 | unsigned int bounce_sg_len; | ||
55 | struct mmc_async_req areq; | 52 | struct mmc_async_req areq; |
56 | enum mmc_drv_op drv_op; | 53 | enum mmc_drv_op drv_op; |
57 | int drv_op_result; | 54 | int drv_op_result; |
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, | |||
81 | extern void mmc_cleanup_queue(struct mmc_queue *); | 78 | extern void mmc_cleanup_queue(struct mmc_queue *); |
82 | extern void mmc_queue_suspend(struct mmc_queue *); | 79 | extern void mmc_queue_suspend(struct mmc_queue *); |
83 | extern void mmc_queue_resume(struct mmc_queue *); | 80 | extern void mmc_queue_resume(struct mmc_queue *); |
84 | |||
85 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *, | 81 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *, |
86 | struct mmc_queue_req *); | 82 | struct mmc_queue_req *); |
87 | extern void mmc_queue_bounce_pre(struct mmc_queue_req *); | ||
88 | extern void mmc_queue_bounce_post(struct mmc_queue_req *); | ||
89 | 83 | ||
90 | extern int mmc_access_rpmb(struct mmc_queue *); | 84 | extern int mmc_access_rpmb(struct mmc_queue *); |
91 | 85 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 02179ed2a40d..8c15637178ff 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -5,7 +5,7 @@ | |||
5 | comment "MMC/SD/SDIO Host Controller Drivers" | 5 | comment "MMC/SD/SDIO Host Controller Drivers" |
6 | 6 | ||
7 | config MMC_DEBUG | 7 | config MMC_DEBUG |
8 | bool "MMC host drivers debugginG" | 8 | bool "MMC host drivers debugging" |
9 | depends on MMC != n | 9 | depends on MMC != n |
10 | help | 10 | help |
11 | This is an option for use by developers; most people should | 11 | This is an option for use by developers; most people should |
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index b9cc95998799..eee08d81b242 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2016 Cavium Inc. | 8 | * Copyright (C) 2016 Cavium Inc. |
9 | */ | 9 | */ |
10 | #include <linux/device.h> | ||
10 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
11 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
12 | #include <linux/mmc/mmc.h> | 13 | #include <linux/mmc/mmc.h> |
@@ -149,8 +150,11 @@ error: | |||
149 | for (i = 0; i < CAVIUM_MAX_MMC; i++) { | 150 | for (i = 0; i < CAVIUM_MAX_MMC; i++) { |
150 | if (host->slot[i]) | 151 | if (host->slot[i]) |
151 | cvm_mmc_of_slot_remove(host->slot[i]); | 152 | cvm_mmc_of_slot_remove(host->slot[i]); |
152 | if (host->slot_pdev[i]) | 153 | if (host->slot_pdev[i]) { |
154 | get_device(&host->slot_pdev[i]->dev); | ||
153 | of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); | 155 | of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); |
156 | put_device(&host->slot_pdev[i]->dev); | ||
157 | } | ||
154 | } | 158 | } |
155 | clk_disable_unprepare(host->clk); | 159 | clk_disable_unprepare(host->clk); |
156 | return ret; | 160 | return ret; |
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 27fb625cbcf3..fbd29f00fca0 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c | |||
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) | |||
1038 | */ | 1038 | */ |
1039 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 1039 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
1040 | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | | 1040 | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | |
1041 | MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; | 1041 | MMC_CAP_3_3V_DDR; |
1042 | 1042 | ||
1043 | if (host->use_sg) | 1043 | if (host->use_sg) |
1044 | mmc->max_segs = 16; | 1044 | mmc->max_segs = 16; |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c885c2d4b904..85745ef179e2 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host) | |||
531 | div->shift = __ffs(CLK_DIV_MASK); | 531 | div->shift = __ffs(CLK_DIV_MASK); |
532 | div->width = __builtin_popcountl(CLK_DIV_MASK); | 532 | div->width = __builtin_popcountl(CLK_DIV_MASK); |
533 | div->hw.init = &init; | 533 | div->hw.init = &init; |
534 | div->flags = (CLK_DIVIDER_ONE_BASED | | 534 | div->flags = CLK_DIVIDER_ONE_BASED; |
535 | CLK_DIVIDER_ROUND_CLOSEST); | ||
536 | 535 | ||
537 | clk = devm_clk_register(host->dev, &div->hw); | 536 | clk = devm_clk_register(host->dev, &div->hw); |
538 | if (WARN_ON(IS_ERR(clk))) | 537 | if (WARN_ON(IS_ERR(clk))) |
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, | |||
717 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) | 716 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
718 | { | 717 | { |
719 | struct meson_host *host = mmc_priv(mmc); | 718 | struct meson_host *host = mmc_priv(mmc); |
719 | int ret; | ||
720 | |||
721 | /* | ||
722 | * If this is the initial tuning, try to get a sane Rx starting | ||
723 | * phase before doing the actual tuning. | ||
724 | */ | ||
725 | if (!mmc->doing_retune) { | ||
726 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | ||
727 | |||
728 | if (ret) | ||
729 | return ret; | ||
730 | } | ||
731 | |||
732 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); | ||
733 | if (ret) | ||
734 | return ret; | ||
720 | 735 | ||
721 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | 736 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
722 | } | 737 | } |
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
746 | case MMC_POWER_UP: | 761 | case MMC_POWER_UP: |
747 | if (!IS_ERR(mmc->supply.vmmc)) | 762 | if (!IS_ERR(mmc->supply.vmmc)) |
748 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); | 763 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
764 | |||
765 | /* Reset phases */ | ||
766 | clk_set_phase(host->rx_clk, 0); | ||
767 | clk_set_phase(host->tx_clk, 270); | ||
768 | |||
749 | break; | 769 | break; |
750 | 770 | ||
751 | case MMC_POWER_ON: | 771 | case MMC_POWER_ON: |
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
759 | host->vqmmc_enabled = true; | 779 | host->vqmmc_enabled = true; |
760 | } | 780 | } |
761 | 781 | ||
762 | /* Reset rx phase */ | ||
763 | clk_set_phase(host->rx_clk, 0); | ||
764 | break; | 782 | break; |
765 | } | 783 | } |
766 | 784 | ||
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 59ab194cb009..c763b404510f 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
702 | 702 | ||
703 | pxamci_init_ocr(host); | 703 | pxamci_init_ocr(host); |
704 | 704 | ||
705 | /* | 705 | mmc->caps = 0; |
706 | * This architecture used to disable bounce buffers through its | ||
707 | * defconfig, now it is done at runtime as a host property. | ||
708 | */ | ||
709 | mmc->caps = MMC_CAP_NO_BOUNCE_BUFF; | ||
710 | host->cmdat = 0; | 706 | host->cmdat = 0; |
711 | if (!cpu_is_pxa25x()) { | 707 | if (!cpu_is_pxa25x()) { |
712 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; | 708 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bbaddf18a1b3..d0ccc6729fd2 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { | |||
392 | 392 | ||
393 | enum { | 393 | enum { |
394 | INTEL_DSM_FNS = 0, | 394 | INTEL_DSM_FNS = 0, |
395 | INTEL_DSM_V18_SWITCH = 3, | ||
395 | INTEL_DSM_DRV_STRENGTH = 9, | 396 | INTEL_DSM_DRV_STRENGTH = 9, |
396 | INTEL_DSM_D3_RETUNE = 10, | 397 | INTEL_DSM_D3_RETUNE = 10, |
397 | }; | 398 | }; |
@@ -557,6 +558,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, | |||
557 | sdhci_writel(host, val, INTEL_HS400_ES_REG); | 558 | sdhci_writel(host, val, INTEL_HS400_ES_REG); |
558 | } | 559 | } |
559 | 560 | ||
561 | static void sdhci_intel_voltage_switch(struct sdhci_host *host) | ||
562 | { | ||
563 | struct sdhci_pci_slot *slot = sdhci_priv(host); | ||
564 | struct intel_host *intel_host = sdhci_pci_priv(slot); | ||
565 | struct device *dev = &slot->chip->pdev->dev; | ||
566 | u32 result = 0; | ||
567 | int err; | ||
568 | |||
569 | err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result); | ||
570 | pr_debug("%s: %s DSM error %d result %u\n", | ||
571 | mmc_hostname(host->mmc), __func__, err, result); | ||
572 | } | ||
573 | |||
560 | static const struct sdhci_ops sdhci_intel_byt_ops = { | 574 | static const struct sdhci_ops sdhci_intel_byt_ops = { |
561 | .set_clock = sdhci_set_clock, | 575 | .set_clock = sdhci_set_clock, |
562 | .set_power = sdhci_intel_set_power, | 576 | .set_power = sdhci_intel_set_power, |
@@ -565,6 +579,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = { | |||
565 | .reset = sdhci_reset, | 579 | .reset = sdhci_reset, |
566 | .set_uhs_signaling = sdhci_set_uhs_signaling, | 580 | .set_uhs_signaling = sdhci_set_uhs_signaling, |
567 | .hw_reset = sdhci_pci_hw_reset, | 581 | .hw_reset = sdhci_pci_hw_reset, |
582 | .voltage_switch = sdhci_intel_voltage_switch, | ||
568 | }; | 583 | }; |
569 | 584 | ||
570 | static void byt_read_dsm(struct sdhci_pci_slot *slot) | 585 | static void byt_read_dsm(struct sdhci_pci_slot *slot) |
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 2eec2e652c53..0842bbc2d7ad 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c | |||
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev) | |||
466 | { | 466 | { |
467 | struct sdhci_pltfm_host *pltfm_host; | 467 | struct sdhci_pltfm_host *pltfm_host; |
468 | struct sdhci_host *host; | 468 | struct sdhci_host *host; |
469 | struct xenon_priv *priv; | ||
469 | int err; | 470 | int err; |
470 | 471 | ||
471 | host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, | 472 | host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, |
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev) | |||
474 | return PTR_ERR(host); | 475 | return PTR_ERR(host); |
475 | 476 | ||
476 | pltfm_host = sdhci_priv(host); | 477 | pltfm_host = sdhci_priv(host); |
478 | priv = sdhci_pltfm_priv(pltfm_host); | ||
477 | 479 | ||
478 | /* | 480 | /* |
479 | * Link Xenon specific mmc_host_ops function, | 481 | * Link Xenon specific mmc_host_ops function, |
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev) | |||
491 | if (err) | 493 | if (err) |
492 | goto free_pltfm; | 494 | goto free_pltfm; |
493 | 495 | ||
496 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); | ||
497 | if (IS_ERR(priv->axi_clk)) { | ||
498 | err = PTR_ERR(priv->axi_clk); | ||
499 | if (err == -EPROBE_DEFER) | ||
500 | goto err_clk; | ||
501 | } else { | ||
502 | err = clk_prepare_enable(priv->axi_clk); | ||
503 | if (err) | ||
504 | goto err_clk; | ||
505 | } | ||
506 | |||
494 | err = mmc_of_parse(host->mmc); | 507 | err = mmc_of_parse(host->mmc); |
495 | if (err) | 508 | if (err) |
496 | goto err_clk; | 509 | goto err_clk_axi; |
497 | 510 | ||
498 | sdhci_get_of_property(pdev); | 511 | sdhci_get_of_property(pdev); |
499 | 512 | ||
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev) | |||
502 | /* Xenon specific dt parse */ | 515 | /* Xenon specific dt parse */ |
503 | err = xenon_probe_dt(pdev); | 516 | err = xenon_probe_dt(pdev); |
504 | if (err) | 517 | if (err) |
505 | goto err_clk; | 518 | goto err_clk_axi; |
506 | 519 | ||
507 | err = xenon_sdhc_prepare(host); | 520 | err = xenon_sdhc_prepare(host); |
508 | if (err) | 521 | if (err) |
509 | goto err_clk; | 522 | goto err_clk_axi; |
510 | 523 | ||
511 | pm_runtime_get_noresume(&pdev->dev); | 524 | pm_runtime_get_noresume(&pdev->dev); |
512 | pm_runtime_set_active(&pdev->dev); | 525 | pm_runtime_set_active(&pdev->dev); |
@@ -527,6 +540,8 @@ remove_sdhc: | |||
527 | pm_runtime_disable(&pdev->dev); | 540 | pm_runtime_disable(&pdev->dev); |
528 | pm_runtime_put_noidle(&pdev->dev); | 541 | pm_runtime_put_noidle(&pdev->dev); |
529 | xenon_sdhc_unprepare(host); | 542 | xenon_sdhc_unprepare(host); |
543 | err_clk_axi: | ||
544 | clk_disable_unprepare(priv->axi_clk); | ||
530 | err_clk: | 545 | err_clk: |
531 | clk_disable_unprepare(pltfm_host->clk); | 546 | clk_disable_unprepare(pltfm_host->clk); |
532 | free_pltfm: | 547 | free_pltfm: |
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev) | |||
538 | { | 553 | { |
539 | struct sdhci_host *host = platform_get_drvdata(pdev); | 554 | struct sdhci_host *host = platform_get_drvdata(pdev); |
540 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 555 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
556 | struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); | ||
541 | 557 | ||
542 | pm_runtime_get_sync(&pdev->dev); | 558 | pm_runtime_get_sync(&pdev->dev); |
543 | pm_runtime_disable(&pdev->dev); | 559 | pm_runtime_disable(&pdev->dev); |
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev) | |||
546 | sdhci_remove_host(host, 0); | 562 | sdhci_remove_host(host, 0); |
547 | 563 | ||
548 | xenon_sdhc_unprepare(host); | 564 | xenon_sdhc_unprepare(host); |
549 | 565 | clk_disable_unprepare(priv->axi_clk); | |
550 | clk_disable_unprepare(pltfm_host->clk); | 566 | clk_disable_unprepare(pltfm_host->clk); |
551 | 567 | ||
552 | sdhci_pltfm_free(pdev); | 568 | sdhci_pltfm_free(pdev); |
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 2bc0510c0769..9994995c7c56 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h | |||
@@ -83,6 +83,7 @@ struct xenon_priv { | |||
83 | unsigned char bus_width; | 83 | unsigned char bus_width; |
84 | unsigned char timing; | 84 | unsigned char timing; |
85 | unsigned int clock; | 85 | unsigned int clock; |
86 | struct clk *axi_clk; | ||
86 | 87 | ||
87 | int phy_type; | 88 | int phy_type; |
88 | /* | 89 | /* |
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 12cf8288d663..a7293e186e03 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c | |||
@@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host) | |||
129 | 129 | ||
130 | #define CMDREQ_TIMEOUT 5000 | 130 | #define CMDREQ_TIMEOUT 5000 |
131 | 131 | ||
132 | #ifdef CONFIG_MMC_DEBUG | ||
133 | |||
134 | #define STATUS_TO_TEXT(a, status, i) \ | ||
135 | do { \ | ||
136 | if ((status) & TMIO_STAT_##a) { \ | ||
137 | if ((i)++) \ | ||
138 | printk(KERN_DEBUG " | "); \ | ||
139 | printk(KERN_DEBUG #a); \ | ||
140 | } \ | ||
141 | } while (0) | ||
142 | |||
143 | static void pr_debug_status(u32 status) | ||
144 | { | ||
145 | int i = 0; | ||
146 | |||
147 | pr_debug("status: %08x = ", status); | ||
148 | STATUS_TO_TEXT(CARD_REMOVE, status, i); | ||
149 | STATUS_TO_TEXT(CARD_INSERT, status, i); | ||
150 | STATUS_TO_TEXT(SIGSTATE, status, i); | ||
151 | STATUS_TO_TEXT(WRPROTECT, status, i); | ||
152 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); | ||
153 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); | ||
154 | STATUS_TO_TEXT(SIGSTATE_A, status, i); | ||
155 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); | ||
156 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); | ||
157 | STATUS_TO_TEXT(ILL_FUNC, status, i); | ||
158 | STATUS_TO_TEXT(CMD_BUSY, status, i); | ||
159 | STATUS_TO_TEXT(CMDRESPEND, status, i); | ||
160 | STATUS_TO_TEXT(DATAEND, status, i); | ||
161 | STATUS_TO_TEXT(CRCFAIL, status, i); | ||
162 | STATUS_TO_TEXT(DATATIMEOUT, status, i); | ||
163 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); | ||
164 | STATUS_TO_TEXT(RXOVERFLOW, status, i); | ||
165 | STATUS_TO_TEXT(TXUNDERRUN, status, i); | ||
166 | STATUS_TO_TEXT(RXRDY, status, i); | ||
167 | STATUS_TO_TEXT(TXRQ, status, i); | ||
168 | STATUS_TO_TEXT(ILL_ACCESS, status, i); | ||
169 | printk("\n"); | ||
170 | } | ||
171 | |||
172 | #else | ||
173 | #define pr_debug_status(s) do { } while (0) | ||
174 | #endif | ||
175 | |||
176 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) | 132 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) |
177 | { | 133 | { |
178 | struct tmio_mmc_host *host = mmc_priv(mmc); | 134 | struct tmio_mmc_host *host = mmc_priv(mmc); |
@@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) | |||
762 | status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); | 718 | status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); |
763 | ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; | 719 | ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; |
764 | 720 | ||
765 | pr_debug_status(status); | ||
766 | pr_debug_status(ireg); | ||
767 | |||
768 | /* Clear the status except the interrupt status */ | 721 | /* Clear the status except the interrupt status */ |
769 | sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); | 722 | sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); |
770 | 723 | ||
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 5736b0c90b33..a308e707392d 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, | |||
581 | slave->mtd.erasesize = parent->erasesize; | 581 | slave->mtd.erasesize = parent->erasesize; |
582 | } | 582 | } |
583 | 583 | ||
584 | /* | ||
585 | * Slave erasesize might differ from the master one if the master | ||
586 | * exposes several regions with different erasesize. Adjust | ||
587 | * wr_alignment accordingly. | ||
588 | */ | ||
589 | if (!(slave->mtd.flags & MTD_NO_ERASE)) | ||
590 | wr_alignment = slave->mtd.erasesize; | ||
591 | |||
584 | tmp = slave->offset; | 592 | tmp = slave->offset; |
585 | remainder = do_div(tmp, wr_alignment); | 593 | remainder = do_div(tmp, wr_alignment); |
586 | if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { | 594 | if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { |
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 146af8218314..8268636675ef 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c | |||
@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, | |||
363 | size += (req->ecc.strength + 1) * sizeof(u16); | 363 | size += (req->ecc.strength + 1) * sizeof(u16); |
364 | /* Reserve space for mu, dmu and delta. */ | 364 | /* Reserve space for mu, dmu and delta. */ |
365 | size = ALIGN(size, sizeof(s32)); | 365 | size = ALIGN(size, sizeof(s32)); |
366 | size += (req->ecc.strength + 1) * sizeof(s32); | 366 | size += (req->ecc.strength + 1) * sizeof(s32) * 3; |
367 | 367 | ||
368 | user = kzalloc(size, GFP_KERNEL); | 368 | user = kzalloc(size, GFP_KERNEL); |
369 | if (!user) | 369 | if (!user) |
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index c3bb358ef01e..5796468db653 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c | |||
@@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) | |||
707 | } | 707 | } |
708 | res = clk_prepare_enable(host->clk); | 708 | res = clk_prepare_enable(host->clk); |
709 | if (res) | 709 | if (res) |
710 | goto err_exit1; | 710 | goto err_put_clk; |
711 | 711 | ||
712 | nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; | 712 | nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; |
713 | nand_chip->dev_ready = lpc32xx_nand_device_ready; | 713 | nand_chip->dev_ready = lpc32xx_nand_device_ready; |
@@ -814,6 +814,7 @@ err_exit3: | |||
814 | dma_release_channel(host->dma_chan); | 814 | dma_release_channel(host->dma_chan); |
815 | err_exit2: | 815 | err_exit2: |
816 | clk_disable_unprepare(host->clk); | 816 | clk_disable_unprepare(host->clk); |
817 | err_put_clk: | ||
817 | clk_put(host->clk); | 818 | clk_put(host->clk); |
818 | err_exit1: | 819 | err_exit1: |
819 | lpc32xx_wp_enable(host); | 820 | lpc32xx_wp_enable(host); |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index bcc8cef1c615..12edaae17d81 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, | |||
2668 | static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | 2668 | static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, |
2669 | struct mtd_oob_ops *ops) | 2669 | struct mtd_oob_ops *ops) |
2670 | { | 2670 | { |
2671 | int chipnr, realpage, page, blockmask, column; | 2671 | int chipnr, realpage, page, column; |
2672 | struct nand_chip *chip = mtd_to_nand(mtd); | 2672 | struct nand_chip *chip = mtd_to_nand(mtd); |
2673 | uint32_t writelen = ops->len; | 2673 | uint32_t writelen = ops->len; |
2674 | 2674 | ||
@@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2704 | 2704 | ||
2705 | realpage = (int)(to >> chip->page_shift); | 2705 | realpage = (int)(to >> chip->page_shift); |
2706 | page = realpage & chip->pagemask; | 2706 | page = realpage & chip->pagemask; |
2707 | blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; | ||
2708 | 2707 | ||
2709 | /* Invalidate the page cache, when we write to the cached page */ | 2708 | /* Invalidate the page cache, when we write to the cached page */ |
2710 | if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && | 2709 | if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && |
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index cf1d4a15e10a..19c000722cbc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
@@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, | |||
1784 | * @nor: pointer to a 'struct spi_nor' | 1784 | * @nor: pointer to a 'struct spi_nor' |
1785 | * @addr: offset in the SFDP area to start reading data from | 1785 | * @addr: offset in the SFDP area to start reading data from |
1786 | * @len: number of bytes to read | 1786 | * @len: number of bytes to read |
1787 | * @buf: buffer where the SFDP data are copied into | 1787 | * @buf: buffer where the SFDP data are copied into (dma-safe memory) |
1788 | * | 1788 | * |
1789 | * Whatever the actual numbers of bytes for address and dummy cycles are | 1789 | * Whatever the actual numbers of bytes for address and dummy cycles are |
1790 | * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always | 1790 | * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always |
@@ -1829,6 +1829,36 @@ read_err: | |||
1829 | return ret; | 1829 | return ret; |
1830 | } | 1830 | } |
1831 | 1831 | ||
1832 | /** | ||
1833 | * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. | ||
1834 | * @nor: pointer to a 'struct spi_nor' | ||
1835 | * @addr: offset in the SFDP area to start reading data from | ||
1836 | * @len: number of bytes to read | ||
1837 | * @buf: buffer where the SFDP data are copied into | ||
1838 | * | ||
1839 | * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not | ||
1840 | * guaranteed to be dma-safe. | ||
1841 | * | ||
1842 | * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() | ||
1843 | * otherwise. | ||
1844 | */ | ||
1845 | static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, | ||
1846 | size_t len, void *buf) | ||
1847 | { | ||
1848 | void *dma_safe_buf; | ||
1849 | int ret; | ||
1850 | |||
1851 | dma_safe_buf = kmalloc(len, GFP_KERNEL); | ||
1852 | if (!dma_safe_buf) | ||
1853 | return -ENOMEM; | ||
1854 | |||
1855 | ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); | ||
1856 | memcpy(buf, dma_safe_buf, len); | ||
1857 | kfree(dma_safe_buf); | ||
1858 | |||
1859 | return ret; | ||
1860 | } | ||
1861 | |||
1832 | struct sfdp_parameter_header { | 1862 | struct sfdp_parameter_header { |
1833 | u8 id_lsb; | 1863 | u8 id_lsb; |
1834 | u8 minor; | 1864 | u8 minor; |
@@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, | |||
2101 | bfpt_header->length * sizeof(u32)); | 2131 | bfpt_header->length * sizeof(u32)); |
2102 | addr = SFDP_PARAM_HEADER_PTP(bfpt_header); | 2132 | addr = SFDP_PARAM_HEADER_PTP(bfpt_header); |
2103 | memset(&bfpt, 0, sizeof(bfpt)); | 2133 | memset(&bfpt, 0, sizeof(bfpt)); |
2104 | err = spi_nor_read_sfdp(nor, addr, len, &bfpt); | 2134 | err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); |
2105 | if (err < 0) | 2135 | if (err < 0) |
2106 | return err; | 2136 | return err; |
2107 | 2137 | ||
@@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, | |||
2127 | params->size = bfpt.dwords[BFPT_DWORD(2)]; | 2157 | params->size = bfpt.dwords[BFPT_DWORD(2)]; |
2128 | if (params->size & BIT(31)) { | 2158 | if (params->size & BIT(31)) { |
2129 | params->size &= ~BIT(31); | 2159 | params->size &= ~BIT(31); |
2160 | |||
2161 | /* | ||
2162 | * Prevent overflows on params->size. Anyway, a NOR of 2^64 | ||
2163 | * bits is unlikely to exist so this error probably means | ||
2164 | * the BFPT we are reading is corrupted/wrong. | ||
2165 | */ | ||
2166 | if (params->size > 63) | ||
2167 | return -EINVAL; | ||
2168 | |||
2130 | params->size = 1ULL << params->size; | 2169 | params->size = 1ULL << params->size; |
2131 | } else { | 2170 | } else { |
2132 | params->size++; | 2171 | params->size++; |
@@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, | |||
2243 | int i, err; | 2282 | int i, err; |
2244 | 2283 | ||
2245 | /* Get the SFDP header. */ | 2284 | /* Get the SFDP header. */ |
2246 | err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header); | 2285 | err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); |
2247 | if (err < 0) | 2286 | if (err < 0) |
2248 | return err; | 2287 | return err; |
2249 | 2288 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index c6678aa9b4ef..d74c7335c512 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, | |||
1100 | }; | 1100 | }; |
1101 | int i, err; | 1101 | int i, err; |
1102 | 1102 | ||
1103 | /* DSA and CPU ports have to be members of multiple vlans */ | ||
1104 | if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) | ||
1105 | return 0; | ||
1106 | |||
1103 | if (!vid_begin) | 1107 | if (!vid_begin) |
1104 | return -EOPNOTSUPP; | 1108 | return -EOPNOTSUPP; |
1105 | 1109 | ||
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) | |||
3947 | if (chip->irq > 0) { | 3951 | if (chip->irq > 0) { |
3948 | if (chip->info->g2_irqs > 0) | 3952 | if (chip->info->g2_irqs > 0) |
3949 | mv88e6xxx_g2_irq_free(chip); | 3953 | mv88e6xxx_g2_irq_free(chip); |
3954 | mutex_lock(&chip->reg_lock); | ||
3950 | mv88e6xxx_g1_irq_free(chip); | 3955 | mv88e6xxx_g1_irq_free(chip); |
3956 | mutex_unlock(&chip->reg_lock); | ||
3951 | } | 3957 | } |
3952 | } | 3958 | } |
3953 | 3959 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 214986436ece..0fdaaa643073 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
@@ -51,6 +51,10 @@ | |||
51 | 51 | ||
52 | #define AQ_CFG_SKB_FRAGS_MAX 32U | 52 | #define AQ_CFG_SKB_FRAGS_MAX 32U |
53 | 53 | ||
54 | /* Number of descriptors available in one ring to resume this ring queue | ||
55 | */ | ||
56 | #define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) | ||
57 | |||
54 | #define AQ_CFG_NAPI_WEIGHT 64U | 58 | #define AQ_CFG_NAPI_WEIGHT 64U |
55 | 59 | ||
56 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U | 60 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6ac9e2602d6d..0a5bb4114eb4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self) | |||
119 | return 0; | 119 | return 0; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int aq_nic_update_link_status(struct aq_nic_s *self) | ||
123 | { | ||
124 | int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | ||
125 | |||
126 | if (err) | ||
127 | return err; | ||
128 | |||
129 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) | ||
130 | pr_info("%s: link change old %d new %d\n", | ||
131 | AQ_CFG_DRV_NAME, self->link_status.mbps, | ||
132 | self->aq_hw->aq_link_status.mbps); | ||
133 | |||
134 | self->link_status = self->aq_hw->aq_link_status; | ||
135 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { | ||
136 | aq_utils_obj_set(&self->header.flags, | ||
137 | AQ_NIC_FLAG_STARTED); | ||
138 | aq_utils_obj_clear(&self->header.flags, | ||
139 | AQ_NIC_LINK_DOWN); | ||
140 | netif_carrier_on(self->ndev); | ||
141 | netif_tx_wake_all_queues(self->ndev); | ||
142 | } | ||
143 | if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { | ||
144 | netif_carrier_off(self->ndev); | ||
145 | netif_tx_disable(self->ndev); | ||
146 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
147 | } | ||
148 | return 0; | ||
149 | } | ||
150 | |||
122 | static void aq_nic_service_timer_cb(unsigned long param) | 151 | static void aq_nic_service_timer_cb(unsigned long param) |
123 | { | 152 | { |
124 | struct aq_nic_s *self = (struct aq_nic_s *)param; | 153 | struct aq_nic_s *self = (struct aq_nic_s *)param; |
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
131 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 160 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
132 | goto err_exit; | 161 | goto err_exit; |
133 | 162 | ||
134 | err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | 163 | err = aq_nic_update_link_status(self); |
135 | if (err < 0) | 164 | if (err) |
136 | goto err_exit; | 165 | goto err_exit; |
137 | 166 | ||
138 | self->link_status = self->aq_hw->aq_link_status; | ||
139 | |||
140 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 167 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, |
141 | self->aq_nic_cfg.is_interrupt_moderation); | 168 | self->aq_nic_cfg.is_interrupt_moderation); |
142 | 169 | ||
143 | if (self->link_status.mbps) { | ||
144 | aq_utils_obj_set(&self->header.flags, | ||
145 | AQ_NIC_FLAG_STARTED); | ||
146 | aq_utils_obj_clear(&self->header.flags, | ||
147 | AQ_NIC_LINK_DOWN); | ||
148 | netif_carrier_on(self->ndev); | ||
149 | } else { | ||
150 | netif_carrier_off(self->ndev); | ||
151 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
152 | } | ||
153 | |||
154 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 170 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
155 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 171 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
156 | for (i = AQ_DIMOF(self->aq_vec); i--;) { | 172 | for (i = AQ_DIMOF(self->aq_vec); i--;) { |
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
214 | SET_NETDEV_DEV(ndev, dev); | 230 | SET_NETDEV_DEV(ndev, dev); |
215 | 231 | ||
216 | ndev->if_port = port; | 232 | ndev->if_port = port; |
217 | ndev->min_mtu = ETH_MIN_MTU; | ||
218 | self->ndev = ndev; | 233 | self->ndev = ndev; |
219 | 234 | ||
220 | self->aq_pci_func = aq_pci_func; | 235 | self->aq_pci_func = aq_pci_func; |
@@ -241,7 +256,6 @@ err_exit: | |||
241 | int aq_nic_ndev_register(struct aq_nic_s *self) | 256 | int aq_nic_ndev_register(struct aq_nic_s *self) |
242 | { | 257 | { |
243 | int err = 0; | 258 | int err = 0; |
244 | unsigned int i = 0U; | ||
245 | 259 | ||
246 | if (!self->ndev) { | 260 | if (!self->ndev) { |
247 | err = -EINVAL; | 261 | err = -EINVAL; |
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) | |||
263 | 277 | ||
264 | netif_carrier_off(self->ndev); | 278 | netif_carrier_off(self->ndev); |
265 | 279 | ||
266 | for (i = AQ_CFG_VECS_MAX; i--;) | 280 | netif_tx_disable(self->ndev); |
267 | aq_nic_ndev_queue_stop(self, i); | ||
268 | 281 | ||
269 | err = register_netdev(self->ndev); | 282 | err = register_netdev(self->ndev); |
270 | if (err < 0) | 283 | if (err < 0) |
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self) | |||
283 | self->ndev->features = aq_hw_caps->hw_features; | 296 | self->ndev->features = aq_hw_caps->hw_features; |
284 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; | 297 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
285 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; | 298 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; |
299 | self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; | ||
286 | 300 | ||
287 | return 0; | 301 | return 0; |
288 | } | 302 | } |
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) | |||
318 | err = -EINVAL; | 332 | err = -EINVAL; |
319 | goto err_exit; | 333 | goto err_exit; |
320 | } | 334 | } |
321 | if (netif_running(ndev)) { | 335 | if (netif_running(ndev)) |
322 | unsigned int i; | 336 | netif_tx_disable(ndev); |
323 | |||
324 | for (i = AQ_CFG_VECS_MAX; i--;) | ||
325 | netif_stop_subqueue(ndev, i); | ||
326 | } | ||
327 | 337 | ||
328 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; | 338 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; |
329 | self->aq_vecs++) { | 339 | self->aq_vecs++) { |
@@ -383,16 +393,6 @@ err_exit: | |||
383 | return err; | 393 | return err; |
384 | } | 394 | } |
385 | 395 | ||
386 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) | ||
387 | { | ||
388 | netif_start_subqueue(self->ndev, idx); | ||
389 | } | ||
390 | |||
391 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) | ||
392 | { | ||
393 | netif_stop_subqueue(self->ndev, idx); | ||
394 | } | ||
395 | |||
396 | int aq_nic_start(struct aq_nic_s *self) | 396 | int aq_nic_start(struct aq_nic_s *self) |
397 | { | 397 | { |
398 | struct aq_vec_s *aq_vec = NULL; | 398 | struct aq_vec_s *aq_vec = NULL; |
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self) | |||
451 | goto err_exit; | 451 | goto err_exit; |
452 | } | 452 | } |
453 | 453 | ||
454 | for (i = 0U, aq_vec = self->aq_vec[0]; | ||
455 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
456 | aq_nic_ndev_queue_start(self, i); | ||
457 | |||
458 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); | 454 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); |
459 | if (err < 0) | 455 | if (err < 0) |
460 | goto err_exit; | 456 | goto err_exit; |
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self) | |||
463 | if (err < 0) | 459 | if (err < 0) |
464 | goto err_exit; | 460 | goto err_exit; |
465 | 461 | ||
462 | netif_tx_start_all_queues(self->ndev); | ||
463 | |||
466 | err_exit: | 464 | err_exit: |
467 | return err; | 465 | return err; |
468 | } | 466 | } |
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
475 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 473 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
476 | unsigned int frag_count = 0U; | 474 | unsigned int frag_count = 0U; |
477 | unsigned int dx = ring->sw_tail; | 475 | unsigned int dx = ring->sw_tail; |
476 | struct aq_ring_buff_s *first = NULL; | ||
478 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; | 477 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; |
479 | 478 | ||
480 | if (unlikely(skb_is_gso(skb))) { | 479 | if (unlikely(skb_is_gso(skb))) { |
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
485 | dx_buff->len_l4 = tcp_hdrlen(skb); | 484 | dx_buff->len_l4 = tcp_hdrlen(skb); |
486 | dx_buff->mss = skb_shinfo(skb)->gso_size; | 485 | dx_buff->mss = skb_shinfo(skb)->gso_size; |
487 | dx_buff->is_txc = 1U; | 486 | dx_buff->is_txc = 1U; |
487 | dx_buff->eop_index = 0xffffU; | ||
488 | 488 | ||
489 | dx_buff->is_ipv6 = | 489 | dx_buff->is_ipv6 = |
490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; | 490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; |
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) | 504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) |
505 | goto exit; | 505 | goto exit; |
506 | 506 | ||
507 | first = dx_buff; | ||
507 | dx_buff->len_pkt = skb->len; | 508 | dx_buff->len_pkt = skb->len; |
508 | dx_buff->is_sop = 1U; | 509 | dx_buff->is_sop = 1U; |
509 | dx_buff->is_mapped = 1U; | 510 | dx_buff->is_mapped = 1U; |
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
532 | 533 | ||
533 | for (; nr_frags--; ++frag_count) { | 534 | for (; nr_frags--; ++frag_count) { |
534 | unsigned int frag_len = 0U; | 535 | unsigned int frag_len = 0U; |
536 | unsigned int buff_offset = 0U; | ||
537 | unsigned int buff_size = 0U; | ||
535 | dma_addr_t frag_pa; | 538 | dma_addr_t frag_pa; |
536 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; | 539 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; |
537 | 540 | ||
538 | frag_len = skb_frag_size(frag); | 541 | frag_len = skb_frag_size(frag); |
539 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, | ||
540 | frag_len, DMA_TO_DEVICE); | ||
541 | 542 | ||
542 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) | 543 | while (frag_len) { |
543 | goto mapping_error; | 544 | if (frag_len > AQ_CFG_TX_FRAME_MAX) |
545 | buff_size = AQ_CFG_TX_FRAME_MAX; | ||
546 | else | ||
547 | buff_size = frag_len; | ||
548 | |||
549 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), | ||
550 | frag, | ||
551 | buff_offset, | ||
552 | buff_size, | ||
553 | DMA_TO_DEVICE); | ||
554 | |||
555 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), | ||
556 | frag_pa))) | ||
557 | goto mapping_error; | ||
544 | 558 | ||
545 | while (frag_len > AQ_CFG_TX_FRAME_MAX) { | ||
546 | dx = aq_ring_next_dx(ring, dx); | 559 | dx = aq_ring_next_dx(ring, dx); |
547 | dx_buff = &ring->buff_ring[dx]; | 560 | dx_buff = &ring->buff_ring[dx]; |
548 | 561 | ||
549 | dx_buff->flags = 0U; | 562 | dx_buff->flags = 0U; |
550 | dx_buff->len = AQ_CFG_TX_FRAME_MAX; | 563 | dx_buff->len = buff_size; |
551 | dx_buff->pa = frag_pa; | 564 | dx_buff->pa = frag_pa; |
552 | dx_buff->is_mapped = 1U; | 565 | dx_buff->is_mapped = 1U; |
566 | dx_buff->eop_index = 0xffffU; | ||
567 | |||
568 | frag_len -= buff_size; | ||
569 | buff_offset += buff_size; | ||
553 | 570 | ||
554 | frag_len -= AQ_CFG_TX_FRAME_MAX; | ||
555 | frag_pa += AQ_CFG_TX_FRAME_MAX; | ||
556 | ++ret; | 571 | ++ret; |
557 | } | 572 | } |
558 | |||
559 | dx = aq_ring_next_dx(ring, dx); | ||
560 | dx_buff = &ring->buff_ring[dx]; | ||
561 | |||
562 | dx_buff->flags = 0U; | ||
563 | dx_buff->len = frag_len; | ||
564 | dx_buff->pa = frag_pa; | ||
565 | dx_buff->is_mapped = 1U; | ||
566 | ++ret; | ||
567 | } | 573 | } |
568 | 574 | ||
575 | first->eop_index = dx; | ||
569 | dx_buff->is_eop = 1U; | 576 | dx_buff->is_eop = 1U; |
570 | dx_buff->skb = skb; | 577 | dx_buff->skb = skb; |
571 | goto exit; | 578 | goto exit; |
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
602 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; | 609 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; |
603 | unsigned int tc = 0U; | 610 | unsigned int tc = 0U; |
604 | int err = NETDEV_TX_OK; | 611 | int err = NETDEV_TX_OK; |
605 | bool is_nic_in_bad_state; | ||
606 | 612 | ||
607 | frags = skb_shinfo(skb)->nr_frags + 1; | 613 | frags = skb_shinfo(skb)->nr_frags + 1; |
608 | 614 | ||
@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
613 | goto err_exit; | 619 | goto err_exit; |
614 | } | 620 | } |
615 | 621 | ||
616 | is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, | 622 | aq_ring_update_queue_state(ring); |
617 | AQ_NIC_FLAGS_IS_NOT_TX_READY) || | ||
618 | (aq_ring_avail_dx(ring) < | ||
619 | AQ_CFG_SKB_FRAGS_MAX); | ||
620 | 623 | ||
621 | if (is_nic_in_bad_state) { | 624 | /* Above status update may stop the queue. Check this. */ |
622 | aq_nic_ndev_queue_stop(self, ring->idx); | 625 | if (__netif_subqueue_stopped(self->ndev, ring->idx)) { |
623 | err = NETDEV_TX_BUSY; | 626 | err = NETDEV_TX_BUSY; |
624 | goto err_exit; | 627 | goto err_exit; |
625 | } | 628 | } |
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
631 | ring, | 634 | ring, |
632 | frags); | 635 | frags); |
633 | if (err >= 0) { | 636 | if (err >= 0) { |
634 | if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) | ||
635 | aq_nic_ndev_queue_stop(self, ring->idx); | ||
636 | |||
637 | ++ring->stats.tx.packets; | 637 | ++ring->stats.tx.packets; |
638 | ring->stats.tx.bytes += skb->len; | 638 | ring->stats.tx.bytes += skb->len; |
639 | } | 639 | } |
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | |||
693 | 693 | ||
694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) | 694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
695 | { | 695 | { |
696 | int err = 0; | ||
697 | |||
698 | if (new_mtu > self->aq_hw_caps.mtu) { | ||
699 | err = -EINVAL; | ||
700 | goto err_exit; | ||
701 | } | ||
702 | self->aq_nic_cfg.mtu = new_mtu; | 696 | self->aq_nic_cfg.mtu = new_mtu; |
703 | 697 | ||
704 | err_exit: | 698 | return 0; |
705 | return err; | ||
706 | } | 699 | } |
707 | 700 | ||
708 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) | 701 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) |
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self) | |||
905 | struct aq_vec_s *aq_vec = NULL; | 898 | struct aq_vec_s *aq_vec = NULL; |
906 | unsigned int i = 0U; | 899 | unsigned int i = 0U; |
907 | 900 | ||
908 | for (i = 0U, aq_vec = self->aq_vec[0]; | 901 | netif_tx_disable(self->ndev); |
909 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
910 | aq_nic_ndev_queue_stop(self, i); | ||
911 | 902 | ||
912 | del_timer_sync(&self->service_timer); | 903 | del_timer_sync(&self->service_timer); |
913 | 904 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 7fc2a5ecb2b7..0ddd556ff901 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); | |||
83 | int aq_nic_init(struct aq_nic_s *self); | 83 | int aq_nic_init(struct aq_nic_s *self); |
84 | int aq_nic_cfg_start(struct aq_nic_s *self); | 84 | int aq_nic_cfg_start(struct aq_nic_s *self); |
85 | int aq_nic_ndev_register(struct aq_nic_s *self); | 85 | int aq_nic_ndev_register(struct aq_nic_s *self); |
86 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); | ||
87 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); | ||
88 | void aq_nic_ndev_free(struct aq_nic_s *self); | 86 | void aq_nic_ndev_free(struct aq_nic_s *self); |
89 | int aq_nic_start(struct aq_nic_s *self); | 87 | int aq_nic_start(struct aq_nic_s *self); |
90 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); | 88 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4eee1996a825..0654e0c76bc2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self) | |||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, | ||
108 | unsigned int t) | ||
109 | { | ||
110 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
111 | } | ||
112 | |||
113 | void aq_ring_update_queue_state(struct aq_ring_s *ring) | ||
114 | { | ||
115 | if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) | ||
116 | aq_ring_queue_stop(ring); | ||
117 | else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) | ||
118 | aq_ring_queue_wake(ring); | ||
119 | } | ||
120 | |||
121 | void aq_ring_queue_wake(struct aq_ring_s *ring) | ||
122 | { | ||
123 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
124 | |||
125 | if (__netif_subqueue_stopped(ndev, ring->idx)) { | ||
126 | netif_wake_subqueue(ndev, ring->idx); | ||
127 | ring->stats.tx.queue_restarts++; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | void aq_ring_queue_stop(struct aq_ring_s *ring) | ||
132 | { | ||
133 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
134 | |||
135 | if (!__netif_subqueue_stopped(ndev, ring->idx)) | ||
136 | netif_stop_subqueue(ndev, ring->idx); | ||
137 | } | ||
138 | |||
107 | void aq_ring_tx_clean(struct aq_ring_s *self) | 139 | void aq_ring_tx_clean(struct aq_ring_s *self) |
108 | { | 140 | { |
109 | struct device *dev = aq_nic_get_dev(self->aq_nic); | 141 | struct device *dev = aq_nic_get_dev(self->aq_nic); |
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self) | |||
113 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; | 145 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
114 | 146 | ||
115 | if (likely(buff->is_mapped)) { | 147 | if (likely(buff->is_mapped)) { |
116 | if (unlikely(buff->is_sop)) | 148 | if (unlikely(buff->is_sop)) { |
149 | if (!buff->is_eop && | ||
150 | buff->eop_index != 0xffffU && | ||
151 | (!aq_ring_dx_in_range(self->sw_head, | ||
152 | buff->eop_index, | ||
153 | self->hw_head))) | ||
154 | break; | ||
155 | |||
117 | dma_unmap_single(dev, buff->pa, buff->len, | 156 | dma_unmap_single(dev, buff->pa, buff->len, |
118 | DMA_TO_DEVICE); | 157 | DMA_TO_DEVICE); |
119 | else | 158 | } else { |
120 | dma_unmap_page(dev, buff->pa, buff->len, | 159 | dma_unmap_page(dev, buff->pa, buff->len, |
121 | DMA_TO_DEVICE); | 160 | DMA_TO_DEVICE); |
161 | } | ||
122 | } | 162 | } |
123 | 163 | ||
124 | if (unlikely(buff->is_eop)) | 164 | if (unlikely(buff->is_eop)) |
125 | dev_kfree_skb_any(buff->skb); | 165 | dev_kfree_skb_any(buff->skb); |
126 | } | ||
127 | } | ||
128 | 166 | ||
129 | static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, | 167 | buff->pa = 0U; |
130 | unsigned int t) | 168 | buff->eop_index = 0xffffU; |
131 | { | 169 | } |
132 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
133 | } | 170 | } |
134 | 171 | ||
135 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 172 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 782176c5f4f8..5844078764bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h | |||
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s { | |||
65 | }; | 65 | }; |
66 | union { | 66 | union { |
67 | struct { | 67 | struct { |
68 | u32 len:16; | 68 | u16 len; |
69 | u32 is_ip_cso:1; | 69 | u32 is_ip_cso:1; |
70 | u32 is_udp_cso:1; | 70 | u32 is_udp_cso:1; |
71 | u32 is_tcp_cso:1; | 71 | u32 is_tcp_cso:1; |
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s { | |||
77 | u32 is_cleaned:1; | 77 | u32 is_cleaned:1; |
78 | u32 is_error:1; | 78 | u32 is_error:1; |
79 | u32 rsvd3:6; | 79 | u32 rsvd3:6; |
80 | u16 eop_index; | ||
81 | u16 rsvd4; | ||
80 | }; | 82 | }; |
81 | u32 flags; | 83 | u64 flags; |
82 | }; | 84 | }; |
83 | }; | 85 | }; |
84 | 86 | ||
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s { | |||
94 | u64 errors; | 96 | u64 errors; |
95 | u64 packets; | 97 | u64 packets; |
96 | u64 bytes; | 98 | u64 bytes; |
99 | u64 queue_restarts; | ||
97 | }; | 100 | }; |
98 | 101 | ||
99 | union aq_ring_stats_s { | 102 | union aq_ring_stats_s { |
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, | |||
147 | int aq_ring_init(struct aq_ring_s *self); | 150 | int aq_ring_init(struct aq_ring_s *self); |
148 | void aq_ring_rx_deinit(struct aq_ring_s *self); | 151 | void aq_ring_rx_deinit(struct aq_ring_s *self); |
149 | void aq_ring_free(struct aq_ring_s *self); | 152 | void aq_ring_free(struct aq_ring_s *self); |
153 | void aq_ring_update_queue_state(struct aq_ring_s *ring); | ||
154 | void aq_ring_queue_wake(struct aq_ring_s *ring); | ||
155 | void aq_ring_queue_stop(struct aq_ring_s *ring); | ||
150 | void aq_ring_tx_clean(struct aq_ring_s *self); | 156 | void aq_ring_tx_clean(struct aq_ring_s *self); |
151 | int aq_ring_rx_clean(struct aq_ring_s *self, | 157 | int aq_ring_rx_clean(struct aq_ring_s *self, |
152 | struct napi_struct *napi, | 158 | struct napi_struct *napi, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ebf588004c46..305ff8ffac2c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) | |||
59 | if (ring[AQ_VEC_TX_ID].sw_head != | 59 | if (ring[AQ_VEC_TX_ID].sw_head != |
60 | ring[AQ_VEC_TX_ID].hw_head) { | 60 | ring[AQ_VEC_TX_ID].hw_head) { |
61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); | 61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); |
62 | 62 | aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); | |
63 | if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > | ||
64 | AQ_CFG_SKB_FRAGS_MAX) { | ||
65 | aq_nic_ndev_queue_start(self->aq_nic, | ||
66 | ring[AQ_VEC_TX_ID].idx); | ||
67 | } | ||
68 | was_tx_cleaned = true; | 63 | was_tx_cleaned = true; |
69 | } | 64 | } |
70 | 65 | ||
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self, | |||
364 | stats_tx->packets += tx->packets; | 359 | stats_tx->packets += tx->packets; |
365 | stats_tx->bytes += tx->bytes; | 360 | stats_tx->bytes += tx->bytes; |
366 | stats_tx->errors += tx->errors; | 361 | stats_tx->errors += tx->errors; |
362 | stats_tx->queue_restarts += tx->queue_restarts; | ||
367 | } | 363 | } |
368 | } | 364 | } |
369 | 365 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index f3957e930340..fcf89e25a773 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #include "../aq_common.h" | 17 | #include "../aq_common.h" |
18 | 18 | ||
19 | #define HW_ATL_B0_MTU_JUMBO (16000U) | 19 | #define HW_ATL_B0_MTU_JUMBO 16352U |
20 | #define HW_ATL_B0_MTU 1514U | 20 | #define HW_ATL_B0_MTU 1514U |
21 | 21 | ||
22 | #define HW_ATL_B0_TX_RINGS 4U | 22 | #define HW_ATL_B0_TX_RINGS 4U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 4f5ec9a0fbfb..bf734b32e44b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) | |||
351 | break; | 351 | break; |
352 | 352 | ||
353 | default: | 353 | default: |
354 | link_status->mbps = 0U; | 354 | return -EBUSY; |
355 | break; | ||
356 | } | 355 | } |
357 | } | 356 | } |
358 | 357 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c3c53f6cd9e6..83eec9a8c275 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) | |||
432 | netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); | 432 | netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); |
433 | } | 433 | } |
434 | 434 | ||
435 | static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, | ||
436 | u64 *tx_bytes, u64 *tx_packets) | ||
437 | { | ||
438 | struct bcm_sysport_tx_ring *ring; | ||
439 | u64 bytes = 0, packets = 0; | ||
440 | unsigned int start; | ||
441 | unsigned int q; | ||
442 | |||
443 | for (q = 0; q < priv->netdev->num_tx_queues; q++) { | ||
444 | ring = &priv->tx_rings[q]; | ||
445 | do { | ||
446 | start = u64_stats_fetch_begin_irq(&priv->syncp); | ||
447 | bytes = ring->bytes; | ||
448 | packets = ring->packets; | ||
449 | } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); | ||
450 | |||
451 | *tx_bytes += bytes; | ||
452 | *tx_packets += packets; | ||
453 | } | ||
454 | } | ||
455 | |||
435 | static void bcm_sysport_get_stats(struct net_device *dev, | 456 | static void bcm_sysport_get_stats(struct net_device *dev, |
436 | struct ethtool_stats *stats, u64 *data) | 457 | struct ethtool_stats *stats, u64 *data) |
437 | { | 458 | { |
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev, | |||
439 | struct bcm_sysport_stats64 *stats64 = &priv->stats64; | 460 | struct bcm_sysport_stats64 *stats64 = &priv->stats64; |
440 | struct u64_stats_sync *syncp = &priv->syncp; | 461 | struct u64_stats_sync *syncp = &priv->syncp; |
441 | struct bcm_sysport_tx_ring *ring; | 462 | struct bcm_sysport_tx_ring *ring; |
463 | u64 tx_bytes = 0, tx_packets = 0; | ||
442 | unsigned int start; | 464 | unsigned int start; |
443 | int i, j; | 465 | int i, j; |
444 | 466 | ||
445 | if (netif_running(dev)) | 467 | if (netif_running(dev)) { |
446 | bcm_sysport_update_mib_counters(priv); | 468 | bcm_sysport_update_mib_counters(priv); |
469 | bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); | ||
470 | stats64->tx_bytes = tx_bytes; | ||
471 | stats64->tx_packets = tx_packets; | ||
472 | } | ||
447 | 473 | ||
448 | for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { | 474 | for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { |
449 | const struct bcm_sysport_stats *s; | 475 | const struct bcm_sysport_stats *s; |
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev, | |||
461 | continue; | 487 | continue; |
462 | p += s->stat_offset; | 488 | p += s->stat_offset; |
463 | 489 | ||
464 | if (s->stat_sizeof == sizeof(u64)) | 490 | if (s->stat_sizeof == sizeof(u64) && |
491 | s->type == BCM_SYSPORT_STAT_NETDEV64) { | ||
465 | do { | 492 | do { |
466 | start = u64_stats_fetch_begin_irq(syncp); | 493 | start = u64_stats_fetch_begin_irq(syncp); |
467 | data[i] = *(u64 *)p; | 494 | data[i] = *(u64 *)p; |
468 | } while (u64_stats_fetch_retry_irq(syncp, start)); | 495 | } while (u64_stats_fetch_retry_irq(syncp, start)); |
469 | else | 496 | } else |
470 | data[i] = *(u32 *)p; | 497 | data[i] = *(u32 *)p; |
471 | j++; | 498 | j++; |
472 | } | 499 | } |
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev, | |||
1716 | { | 1743 | { |
1717 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 1744 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
1718 | struct bcm_sysport_stats64 *stats64 = &priv->stats64; | 1745 | struct bcm_sysport_stats64 *stats64 = &priv->stats64; |
1719 | struct bcm_sysport_tx_ring *ring; | ||
1720 | u64 tx_packets = 0, tx_bytes = 0; | ||
1721 | unsigned int start; | 1746 | unsigned int start; |
1722 | unsigned int q; | ||
1723 | 1747 | ||
1724 | netdev_stats_to_stats64(stats, &dev->stats); | 1748 | netdev_stats_to_stats64(stats, &dev->stats); |
1725 | 1749 | ||
1726 | for (q = 0; q < dev->num_tx_queues; q++) { | 1750 | bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, |
1727 | ring = &priv->tx_rings[q]; | 1751 | &stats->tx_packets); |
1728 | do { | ||
1729 | start = u64_stats_fetch_begin_irq(&priv->syncp); | ||
1730 | tx_bytes = ring->bytes; | ||
1731 | tx_packets = ring->packets; | ||
1732 | } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); | ||
1733 | |||
1734 | stats->tx_bytes += tx_bytes; | ||
1735 | stats->tx_packets += tx_packets; | ||
1736 | } | ||
1737 | |||
1738 | stats64->tx_bytes = stats->tx_bytes; | ||
1739 | stats64->tx_packets = stats->tx_packets; | ||
1740 | 1752 | ||
1741 | do { | 1753 | do { |
1742 | start = u64_stats_fetch_begin_irq(&priv->syncp); | 1754 | start = u64_stats_fetch_begin_irq(&priv->syncp); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index ccd699fb2d70..7dd3d131043a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, | |||
750 | { | 750 | { |
751 | int rc = 0; | 751 | int rc = 0; |
752 | 752 | ||
753 | if (!is_classid_clsact_ingress(cls_flower->common.classid) || | ||
754 | cls_flower->common.chain_index) | ||
755 | return -EOPNOTSUPP; | ||
756 | |||
753 | switch (cls_flower->command) { | 757 | switch (cls_flower->command) { |
754 | case TC_CLSFLOWER_REPLACE: | 758 | case TC_CLSFLOWER_REPLACE: |
755 | rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); | 759 | rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index cec94bbb2ea5..8bc126a156e8 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1278 | 1278 | ||
1279 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1279 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
1280 | if (ret) | 1280 | if (ret) |
1281 | return -ENOMEM; | 1281 | goto error; |
1282 | 1282 | ||
1283 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1283 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
1284 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1284 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 38c7b21e5d63..ede1876a9a19 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -374,8 +374,8 @@ struct bufdesc_ex { | |||
374 | #define FEC_ENET_TS_AVAIL ((uint)0x00010000) | 374 | #define FEC_ENET_TS_AVAIL ((uint)0x00010000) |
375 | #define FEC_ENET_TS_TIMER ((uint)0x00008000) | 375 | #define FEC_ENET_TS_TIMER ((uint)0x00008000) |
376 | 376 | ||
377 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) | 377 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) |
378 | #define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) | 378 | #define FEC_NAPI_IMASK FEC_ENET_MII |
379 | #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) | 379 | #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) |
380 | 380 | ||
381 | /* ENET interrupt coalescing macro define */ | 381 | /* ENET interrupt coalescing macro define */ |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 56f56d6ada9c..3dc2d771a222 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) | |||
1559 | if (int_events == 0) | 1559 | if (int_events == 0) |
1560 | return false; | 1560 | return false; |
1561 | 1561 | ||
1562 | if (int_events & FEC_ENET_RXF) | 1562 | if (int_events & FEC_ENET_RXF_0) |
1563 | fep->work_rx |= (1 << 2); | 1563 | fep->work_rx |= (1 << 2); |
1564 | if (int_events & FEC_ENET_RXF_1) | 1564 | if (int_events & FEC_ENET_RXF_1) |
1565 | fep->work_rx |= (1 << 0); | 1565 | fep->work_rx |= (1 << 0); |
1566 | if (int_events & FEC_ENET_RXF_2) | 1566 | if (int_events & FEC_ENET_RXF_2) |
1567 | fep->work_rx |= (1 << 1); | 1567 | fep->work_rx |= (1 << 1); |
1568 | 1568 | ||
1569 | if (int_events & FEC_ENET_TXF) | 1569 | if (int_events & FEC_ENET_TXF_0) |
1570 | fep->work_tx |= (1 << 2); | 1570 | fep->work_tx |= (1 << 2); |
1571 | if (int_events & FEC_ENET_TXF_1) | 1571 | if (int_events & FEC_ENET_TXF_1) |
1572 | fep->work_tx |= (1 << 0); | 1572 | fep->work_tx |= (1 << 0); |
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | if (fep->ptp_clock) | 1606 | if (fep->ptp_clock) |
1607 | fec_ptp_check_pps_event(fep); | 1607 | if (fec_ptp_check_pps_event(fep)) |
1608 | 1608 | ret = IRQ_HANDLED; | |
1609 | return ret; | 1609 | return ret; |
1610 | } | 1610 | } |
1611 | 1611 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 59efbd605416..5bcb2238acb2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c | |||
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, | |||
37 | } | 37 | } |
38 | 38 | ||
39 | static int hnae3_match_n_instantiate(struct hnae3_client *client, | 39 | static int hnae3_match_n_instantiate(struct hnae3_client *client, |
40 | struct hnae3_ae_dev *ae_dev, | 40 | struct hnae3_ae_dev *ae_dev, bool is_reg) |
41 | bool is_reg, bool *matched) | ||
42 | { | 41 | { |
43 | int ret; | 42 | int ret; |
44 | 43 | ||
45 | *matched = false; | ||
46 | |||
47 | /* check if this client matches the type of ae_dev */ | 44 | /* check if this client matches the type of ae_dev */ |
48 | if (!(hnae3_client_match(client->type, ae_dev->dev_type) && | 45 | if (!(hnae3_client_match(client->type, ae_dev->dev_type) && |
49 | hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { | 46 | hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { |
50 | return 0; | 47 | return 0; |
51 | } | 48 | } |
52 | /* there is a match of client and dev */ | ||
53 | *matched = true; | ||
54 | 49 | ||
55 | /* now, (un-)instantiate client by calling lower layer */ | 50 | /* now, (un-)instantiate client by calling lower layer */ |
56 | if (is_reg) { | 51 | if (is_reg) { |
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client) | |||
69 | { | 64 | { |
70 | struct hnae3_client *client_tmp; | 65 | struct hnae3_client *client_tmp; |
71 | struct hnae3_ae_dev *ae_dev; | 66 | struct hnae3_ae_dev *ae_dev; |
72 | bool matched; | ||
73 | int ret = 0; | 67 | int ret = 0; |
74 | 68 | ||
75 | mutex_lock(&hnae3_common_lock); | 69 | mutex_lock(&hnae3_common_lock); |
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client) | |||
86 | /* if the client could not be initialized on current port, for | 80 | /* if the client could not be initialized on current port, for |
87 | * any error reasons, move on to next available port | 81 | * any error reasons, move on to next available port |
88 | */ | 82 | */ |
89 | ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); | 83 | ret = hnae3_match_n_instantiate(client, ae_dev, true); |
90 | if (ret) | 84 | if (ret) |
91 | dev_err(&ae_dev->pdev->dev, | 85 | dev_err(&ae_dev->pdev->dev, |
92 | "match and instantiation failed for port\n"); | 86 | "match and instantiation failed for port\n"); |
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client); | |||
102 | void hnae3_unregister_client(struct hnae3_client *client) | 96 | void hnae3_unregister_client(struct hnae3_client *client) |
103 | { | 97 | { |
104 | struct hnae3_ae_dev *ae_dev; | 98 | struct hnae3_ae_dev *ae_dev; |
105 | bool matched; | ||
106 | 99 | ||
107 | mutex_lock(&hnae3_common_lock); | 100 | mutex_lock(&hnae3_common_lock); |
108 | /* un-initialize the client on every matched port */ | 101 | /* un-initialize the client on every matched port */ |
109 | list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { | 102 | list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { |
110 | hnae3_match_n_instantiate(client, ae_dev, false, &matched); | 103 | hnae3_match_n_instantiate(client, ae_dev, false); |
111 | } | 104 | } |
112 | 105 | ||
113 | list_del(&client->node); | 106 | list_del(&client->node); |
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) | |||
124 | const struct pci_device_id *id; | 117 | const struct pci_device_id *id; |
125 | struct hnae3_ae_dev *ae_dev; | 118 | struct hnae3_ae_dev *ae_dev; |
126 | struct hnae3_client *client; | 119 | struct hnae3_client *client; |
127 | bool matched; | ||
128 | int ret = 0; | 120 | int ret = 0; |
129 | 121 | ||
130 | mutex_lock(&hnae3_common_lock); | 122 | mutex_lock(&hnae3_common_lock); |
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) | |||
151 | * initialize the figure out client instance | 143 | * initialize the figure out client instance |
152 | */ | 144 | */ |
153 | list_for_each_entry(client, &hnae3_client_list, node) { | 145 | list_for_each_entry(client, &hnae3_client_list, node) { |
154 | ret = hnae3_match_n_instantiate(client, ae_dev, true, | 146 | ret = hnae3_match_n_instantiate(client, ae_dev, true); |
155 | &matched); | ||
156 | if (ret) | 147 | if (ret) |
157 | dev_err(&ae_dev->pdev->dev, | 148 | dev_err(&ae_dev->pdev->dev, |
158 | "match and instantiation failed\n"); | 149 | "match and instantiation failed\n"); |
159 | if (matched) | ||
160 | break; | ||
161 | } | 150 | } |
162 | } | 151 | } |
163 | 152 | ||
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) | |||
175 | const struct pci_device_id *id; | 164 | const struct pci_device_id *id; |
176 | struct hnae3_ae_dev *ae_dev; | 165 | struct hnae3_ae_dev *ae_dev; |
177 | struct hnae3_client *client; | 166 | struct hnae3_client *client; |
178 | bool matched; | ||
179 | 167 | ||
180 | mutex_lock(&hnae3_common_lock); | 168 | mutex_lock(&hnae3_common_lock); |
181 | /* Check if there are matched ae_dev */ | 169 | /* Check if there are matched ae_dev */ |
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) | |||
187 | /* check the client list for the match with this ae_dev type and | 175 | /* check the client list for the match with this ae_dev type and |
188 | * un-initialize the figure out client instance | 176 | * un-initialize the figure out client instance |
189 | */ | 177 | */ |
190 | list_for_each_entry(client, &hnae3_client_list, node) { | 178 | list_for_each_entry(client, &hnae3_client_list, node) |
191 | hnae3_match_n_instantiate(client, ae_dev, false, | 179 | hnae3_match_n_instantiate(client, ae_dev, false); |
192 | &matched); | ||
193 | if (matched) | ||
194 | break; | ||
195 | } | ||
196 | 180 | ||
197 | ae_algo->ops->uninit_ae_dev(ae_dev); | 181 | ae_algo->ops->uninit_ae_dev(ae_dev); |
198 | hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); | 182 | hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); |
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
212 | const struct pci_device_id *id; | 196 | const struct pci_device_id *id; |
213 | struct hnae3_ae_algo *ae_algo; | 197 | struct hnae3_ae_algo *ae_algo; |
214 | struct hnae3_client *client; | 198 | struct hnae3_client *client; |
215 | bool matched; | ||
216 | int ret = 0; | 199 | int ret = 0; |
217 | 200 | ||
218 | mutex_lock(&hnae3_common_lock); | 201 | mutex_lock(&hnae3_common_lock); |
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
246 | * initialize the figure out client instance | 229 | * initialize the figure out client instance |
247 | */ | 230 | */ |
248 | list_for_each_entry(client, &hnae3_client_list, node) { | 231 | list_for_each_entry(client, &hnae3_client_list, node) { |
249 | ret = hnae3_match_n_instantiate(client, ae_dev, true, | 232 | ret = hnae3_match_n_instantiate(client, ae_dev, true); |
250 | &matched); | ||
251 | if (ret) | 233 | if (ret) |
252 | dev_err(&ae_dev->pdev->dev, | 234 | dev_err(&ae_dev->pdev->dev, |
253 | "match and instantiation failed\n"); | 235 | "match and instantiation failed\n"); |
254 | if (matched) | ||
255 | break; | ||
256 | } | 236 | } |
257 | 237 | ||
258 | out_err: | 238 | out_err: |
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
270 | const struct pci_device_id *id; | 250 | const struct pci_device_id *id; |
271 | struct hnae3_ae_algo *ae_algo; | 251 | struct hnae3_ae_algo *ae_algo; |
272 | struct hnae3_client *client; | 252 | struct hnae3_client *client; |
273 | bool matched; | ||
274 | 253 | ||
275 | mutex_lock(&hnae3_common_lock); | 254 | mutex_lock(&hnae3_common_lock); |
276 | /* Check if there are matched ae_algo */ | 255 | /* Check if there are matched ae_algo */ |
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
279 | if (!id) | 258 | if (!id) |
280 | continue; | 259 | continue; |
281 | 260 | ||
282 | list_for_each_entry(client, &hnae3_client_list, node) { | 261 | list_for_each_entry(client, &hnae3_client_list, node) |
283 | hnae3_match_n_instantiate(client, ae_dev, false, | 262 | hnae3_match_n_instantiate(client, ae_dev, false); |
284 | &matched); | ||
285 | if (matched) | ||
286 | break; | ||
287 | } | ||
288 | 263 | ||
289 | ae_algo->ops->uninit_ae_dev(ae_dev); | 264 | ae_algo->ops->uninit_ae_dev(ae_dev); |
290 | hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); | 265 | hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index b2f28ae81273..1a01cadfe5f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h | |||
@@ -49,7 +49,17 @@ | |||
49 | #define HNAE3_CLASS_NAME_SIZE 16 | 49 | #define HNAE3_CLASS_NAME_SIZE 16 |
50 | 50 | ||
51 | #define HNAE3_DEV_INITED_B 0x0 | 51 | #define HNAE3_DEV_INITED_B 0x0 |
52 | #define HNAE_DEV_SUPPORT_ROCE_B 0x1 | 52 | #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 |
53 | #define HNAE3_DEV_SUPPORT_DCB_B 0x2 | ||
54 | |||
55 | #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ | ||
56 | BIT(HNAE3_DEV_SUPPORT_ROCE_B)) | ||
57 | |||
58 | #define hnae3_dev_roce_supported(hdev) \ | ||
59 | hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) | ||
60 | |||
61 | #define hnae3_dev_dcb_supported(hdev) \ | ||
62 | hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) | ||
53 | 63 | ||
54 | #define ring_ptr_move_fw(ring, p) \ | 64 | #define ring_ptr_move_fw(ring, p) \ |
55 | ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) | 65 | ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) |
@@ -366,12 +376,12 @@ struct hnae3_ae_algo { | |||
366 | struct hnae3_tc_info { | 376 | struct hnae3_tc_info { |
367 | u16 tqp_offset; /* TQP offset from base TQP */ | 377 | u16 tqp_offset; /* TQP offset from base TQP */ |
368 | u16 tqp_count; /* Total TQPs */ | 378 | u16 tqp_count; /* Total TQPs */ |
369 | u8 up; /* user priority */ | ||
370 | u8 tc; /* TC index */ | 379 | u8 tc; /* TC index */ |
371 | bool enable; /* If this TC is enable or not */ | 380 | bool enable; /* If this TC is enable or not */ |
372 | }; | 381 | }; |
373 | 382 | ||
374 | #define HNAE3_MAX_TC 8 | 383 | #define HNAE3_MAX_TC 8 |
384 | #define HNAE3_MAX_USER_PRIO 8 | ||
375 | struct hnae3_knic_private_info { | 385 | struct hnae3_knic_private_info { |
376 | struct net_device *netdev; /* Set by KNIC client when init instance */ | 386 | struct net_device *netdev; /* Set by KNIC client when init instance */ |
377 | u16 rss_size; /* Allocated RSS queues */ | 387 | u16 rss_size; /* Allocated RSS queues */ |
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info { | |||
379 | u16 num_desc; | 389 | u16 num_desc; |
380 | 390 | ||
381 | u8 num_tc; /* Total number of enabled TCs */ | 391 | u8 num_tc; /* Total number of enabled TCs */ |
392 | u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ | ||
382 | struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ | 393 | struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ |
383 | 394 | ||
384 | u16 num_tqps; /* total number of TQPs in this handle */ | 395 | u16 num_tqps; /* total number of TQPs in this handle */ |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 91ae0135ee50..758cf3948131 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | |||
@@ -238,7 +238,7 @@ struct hclge_tqp_map { | |||
238 | u8 rsv[18]; | 238 | u8 rsv[18]; |
239 | }; | 239 | }; |
240 | 240 | ||
241 | #define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 | 241 | #define HCLGE_VECTOR_ELEMENTS_PER_CMD 10 |
242 | 242 | ||
243 | enum hclge_int_type { | 243 | enum hclge_int_type { |
244 | HCLGE_INT_TX, | 244 | HCLGE_INT_TX, |
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain { | |||
252 | #define HCLGE_INT_TYPE_S 0 | 252 | #define HCLGE_INT_TYPE_S 0 |
253 | #define HCLGE_INT_TYPE_M 0x3 | 253 | #define HCLGE_INT_TYPE_M 0x3 |
254 | #define HCLGE_TQP_ID_S 2 | 254 | #define HCLGE_TQP_ID_S 2 |
255 | #define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) | 255 | #define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S) |
256 | #define HCLGE_INT_GL_IDX_S 13 | ||
257 | #define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S) | ||
256 | __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; | 258 | __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; |
259 | u8 vfid; | ||
260 | u8 rsv; | ||
257 | }; | 261 | }; |
258 | 262 | ||
259 | #define HCLGE_TC_NUM 8 | 263 | #define HCLGE_TC_NUM 8 |
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc { | |||
266 | 270 | ||
267 | struct hclge_rx_priv_buff { | 271 | struct hclge_rx_priv_buff { |
268 | __le16 buf_num[HCLGE_TC_NUM]; | 272 | __le16 buf_num[HCLGE_TC_NUM]; |
269 | u8 rsv[8]; | 273 | __le16 shared_buf; |
274 | u8 rsv[6]; | ||
270 | }; | 275 | }; |
271 | 276 | ||
272 | struct hclge_query_version { | 277 | struct hclge_query_version { |
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue { | |||
684 | #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ | 689 | #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ |
685 | #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ | 690 | #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ |
686 | #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ | 691 | #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ |
692 | #define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ | ||
687 | 693 | ||
688 | #define HCLGE_TYPE_CRQ 0 | 694 | #define HCLGE_TYPE_CRQ 0 |
689 | #define HCLGE_TYPE_CSQ 1 | 695 | #define HCLGE_TYPE_CSQ 1 |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bb45365fb817..c1cdbfd83bdb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | |||
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { | |||
46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | 46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, |
47 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | 47 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, |
48 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | 48 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, |
49 | /* Required last entry */ | 49 | /* required last entry */ |
50 | {0, } | ||
51 | }; | ||
52 | |||
53 | static const struct pci_device_id roce_pci_tbl[] = { | ||
54 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | ||
55 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | ||
56 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | ||
57 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | ||
58 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | ||
59 | /* Required last entry */ | ||
60 | {0, } | 50 | {0, } |
61 | }; | 51 | }; |
62 | 52 | ||
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) | |||
894 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); | 884 | hdev->num_tqps = __le16_to_cpu(req->tqp_num); |
895 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; | 885 | hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; |
896 | 886 | ||
897 | if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { | 887 | if (hnae3_dev_roce_supported(hdev)) { |
898 | hdev->num_roce_msix = | 888 | hdev->num_roce_msix = |
899 | hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), | 889 | hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), |
900 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); | 890 | HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); |
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev) | |||
1063 | hdev->base_tqp_pid = 0; | 1053 | hdev->base_tqp_pid = 0; |
1064 | hdev->rss_size_max = 1; | 1054 | hdev->rss_size_max = 1; |
1065 | hdev->rx_buf_len = cfg.rx_buf_len; | 1055 | hdev->rx_buf_len = cfg.rx_buf_len; |
1066 | for (i = 0; i < ETH_ALEN; i++) | 1056 | ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); |
1067 | hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i]; | ||
1068 | hdev->hw.mac.media_type = cfg.media_type; | 1057 | hdev->hw.mac.media_type = cfg.media_type; |
1058 | hdev->hw.mac.phy_addr = cfg.phy_addr; | ||
1069 | hdev->num_desc = cfg.tqp_desc_num; | 1059 | hdev->num_desc = cfg.tqp_desc_num; |
1070 | hdev->tm_info.num_pg = 1; | 1060 | hdev->tm_info.num_pg = 1; |
1071 | hdev->tm_info.num_tc = cfg.tc_num; | 1061 | hdev->tm_info.num_tc = cfg.tc_num; |
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) | |||
1454 | tc_num = hclge_get_tc_num(hdev); | 1444 | tc_num = hclge_get_tc_num(hdev); |
1455 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); | 1445 | pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); |
1456 | 1446 | ||
1457 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | 1447 | if (hnae3_dev_dcb_supported(hdev)) |
1448 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; | ||
1449 | else | ||
1450 | shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; | ||
1451 | |||
1458 | shared_buf_tc = pfc_enable_num * hdev->mps + | 1452 | shared_buf_tc = pfc_enable_num * hdev->mps + |
1459 | (tc_num - pfc_enable_num) * hdev->mps / 2 + | 1453 | (tc_num - pfc_enable_num) * hdev->mps / 2 + |
1460 | hdev->mps; | 1454 | hdev->mps; |
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) | |||
1495 | struct hclge_priv_buf *priv; | 1489 | struct hclge_priv_buf *priv; |
1496 | int i; | 1490 | int i; |
1497 | 1491 | ||
1492 | /* When DCB is not supported, rx private | ||
1493 | * buffer is not allocated. | ||
1494 | */ | ||
1495 | if (!hnae3_dev_dcb_supported(hdev)) { | ||
1496 | if (!hclge_is_rx_buf_ok(hdev, rx_all)) | ||
1497 | return -ENOMEM; | ||
1498 | |||
1499 | return 0; | ||
1500 | } | ||
1501 | |||
1498 | /* step 1, try to alloc private buffer for all enabled tc */ | 1502 | /* step 1, try to alloc private buffer for all enabled tc */ |
1499 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | 1503 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1500 | priv = &hdev->priv_buf[i]; | 1504 | priv = &hdev->priv_buf[i]; |
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) | |||
1510 | priv->wl.high = 2 * hdev->mps; | 1514 | priv->wl.high = 2 * hdev->mps; |
1511 | priv->buf_size = priv->wl.high; | 1515 | priv->buf_size = priv->wl.high; |
1512 | } | 1516 | } |
1517 | } else { | ||
1518 | priv->enable = 0; | ||
1519 | priv->wl.low = 0; | ||
1520 | priv->wl.high = 0; | ||
1521 | priv->buf_size = 0; | ||
1513 | } | 1522 | } |
1514 | } | 1523 | } |
1515 | 1524 | ||
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) | |||
1522 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | 1531 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
1523 | priv = &hdev->priv_buf[i]; | 1532 | priv = &hdev->priv_buf[i]; |
1524 | 1533 | ||
1525 | if (hdev->hw_tc_map & BIT(i)) | 1534 | priv->enable = 0; |
1526 | priv->enable = 1; | 1535 | priv->wl.low = 0; |
1536 | priv->wl.high = 0; | ||
1537 | priv->buf_size = 0; | ||
1538 | |||
1539 | if (!(hdev->hw_tc_map & BIT(i))) | ||
1540 | continue; | ||
1541 | |||
1542 | priv->enable = 1; | ||
1527 | 1543 | ||
1528 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { | 1544 | if (hdev->tm_info.hw_pfc_map & BIT(i)) { |
1529 | priv->wl.low = 128; | 1545 | priv->wl.low = 128; |
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) | |||
1616 | cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); | 1632 | cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); |
1617 | } | 1633 | } |
1618 | 1634 | ||
1635 | req->shared_buf = | ||
1636 | cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | | ||
1637 | (1 << HCLGE_TC0_PRI_BUF_EN_B)); | ||
1638 | |||
1619 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); | 1639 | ret = hclge_cmd_send(&hdev->hw, &desc, 1); |
1620 | if (ret) { | 1640 | if (ret) { |
1621 | dev_err(&hdev->pdev->dev, | 1641 | dev_err(&hdev->pdev->dev, |
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev) | |||
1782 | return ret; | 1802 | return ret; |
1783 | } | 1803 | } |
1784 | 1804 | ||
1785 | ret = hclge_rx_priv_wl_config(hdev); | 1805 | if (hnae3_dev_dcb_supported(hdev)) { |
1786 | if (ret) { | 1806 | ret = hclge_rx_priv_wl_config(hdev); |
1787 | dev_err(&hdev->pdev->dev, | 1807 | if (ret) { |
1788 | "could not configure rx private waterline %d\n", ret); | 1808 | dev_err(&hdev->pdev->dev, |
1789 | return ret; | 1809 | "could not configure rx private waterline %d\n", |
1790 | } | 1810 | ret); |
1811 | return ret; | ||
1812 | } | ||
1791 | 1813 | ||
1792 | ret = hclge_common_thrd_config(hdev); | 1814 | ret = hclge_common_thrd_config(hdev); |
1793 | if (ret) { | 1815 | if (ret) { |
1794 | dev_err(&hdev->pdev->dev, | 1816 | dev_err(&hdev->pdev->dev, |
1795 | "could not configure common threshold %d\n", ret); | 1817 | "could not configure common threshold %d\n", |
1796 | return ret; | 1818 | ret); |
1819 | return ret; | ||
1820 | } | ||
1797 | } | 1821 | } |
1798 | 1822 | ||
1799 | ret = hclge_common_wl_config(hdev); | 1823 | ret = hclge_common_wl_config(hdev); |
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) | |||
2582 | u16 tc_valid[HCLGE_MAX_TC_NUM]; | 2606 | u16 tc_valid[HCLGE_MAX_TC_NUM]; |
2583 | u16 tc_size[HCLGE_MAX_TC_NUM]; | 2607 | u16 tc_size[HCLGE_MAX_TC_NUM]; |
2584 | u32 *rss_indir = NULL; | 2608 | u32 *rss_indir = NULL; |
2609 | u16 rss_size = 0, roundup_size; | ||
2585 | const u8 *key; | 2610 | const u8 *key; |
2586 | int i, ret, j; | 2611 | int i, ret, j; |
2587 | 2612 | ||
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) | |||
2596 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { | 2621 | for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { |
2597 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { | 2622 | for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { |
2598 | vport[j].rss_indirection_tbl[i] = | 2623 | vport[j].rss_indirection_tbl[i] = |
2599 | i % hdev->rss_size_max; | 2624 | i % vport[j].alloc_rss_size; |
2625 | |||
2626 | /* vport 0 is for PF */ | ||
2627 | if (j != 0) | ||
2628 | continue; | ||
2629 | |||
2630 | rss_size = vport[j].alloc_rss_size; | ||
2600 | rss_indir[i] = vport[j].rss_indirection_tbl[i]; | 2631 | rss_indir[i] = vport[j].rss_indirection_tbl[i]; |
2601 | } | 2632 | } |
2602 | } | 2633 | } |
@@ -2613,42 +2644,32 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) | |||
2613 | if (ret) | 2644 | if (ret) |
2614 | goto err; | 2645 | goto err; |
2615 | 2646 | ||
2647 | /* Each TC have the same queue size, and tc_size set to hardware is | ||
2648 | * the log2 of roundup power of two of rss_size, the acutal queue | ||
2649 | * size is limited by indirection table. | ||
2650 | */ | ||
2651 | if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { | ||
2652 | dev_err(&hdev->pdev->dev, | ||
2653 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | ||
2654 | rss_size); | ||
2655 | ret = -EINVAL; | ||
2656 | goto err; | ||
2657 | } | ||
2658 | |||
2659 | roundup_size = roundup_pow_of_two(rss_size); | ||
2660 | roundup_size = ilog2(roundup_size); | ||
2661 | |||
2616 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { | 2662 | for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { |
2617 | if (hdev->hw_tc_map & BIT(i)) | 2663 | tc_valid[i] = 0; |
2618 | tc_valid[i] = 1; | ||
2619 | else | ||
2620 | tc_valid[i] = 0; | ||
2621 | 2664 | ||
2622 | switch (hdev->rss_size_max) { | 2665 | if (!(hdev->hw_tc_map & BIT(i))) |
2623 | case HCLGE_RSS_TC_SIZE_0: | 2666 | continue; |
2624 | tc_size[i] = 0; | 2667 | |
2625 | break; | 2668 | tc_valid[i] = 1; |
2626 | case HCLGE_RSS_TC_SIZE_1: | 2669 | tc_size[i] = roundup_size; |
2627 | tc_size[i] = 1; | 2670 | tc_offset[i] = rss_size * i; |
2628 | break; | ||
2629 | case HCLGE_RSS_TC_SIZE_2: | ||
2630 | tc_size[i] = 2; | ||
2631 | break; | ||
2632 | case HCLGE_RSS_TC_SIZE_3: | ||
2633 | tc_size[i] = 3; | ||
2634 | break; | ||
2635 | case HCLGE_RSS_TC_SIZE_4: | ||
2636 | tc_size[i] = 4; | ||
2637 | break; | ||
2638 | case HCLGE_RSS_TC_SIZE_5: | ||
2639 | tc_size[i] = 5; | ||
2640 | break; | ||
2641 | case HCLGE_RSS_TC_SIZE_6: | ||
2642 | tc_size[i] = 6; | ||
2643 | break; | ||
2644 | case HCLGE_RSS_TC_SIZE_7: | ||
2645 | tc_size[i] = 7; | ||
2646 | break; | ||
2647 | default: | ||
2648 | break; | ||
2649 | } | ||
2650 | tc_offset[i] = hdev->rss_size_max * i; | ||
2651 | } | 2671 | } |
2672 | |||
2652 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); | 2673 | ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); |
2653 | 2674 | ||
2654 | err: | 2675 | err: |
@@ -2679,7 +2700,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, | |||
2679 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); | 2700 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); |
2680 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, | 2701 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, |
2681 | HCLGE_TQP_ID_S, node->tqp_index); | 2702 | HCLGE_TQP_ID_S, node->tqp_index); |
2703 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, | ||
2704 | HCLGE_INT_GL_IDX_S, | ||
2705 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); | ||
2682 | req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); | 2706 | req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); |
2707 | req->vfid = vport->vport_id; | ||
2683 | 2708 | ||
2684 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { | 2709 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
2685 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | 2710 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; |
@@ -2763,8 +2788,12 @@ static int hclge_unmap_ring_from_vector( | |||
2763 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); | 2788 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); |
2764 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, | 2789 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, |
2765 | HCLGE_TQP_ID_S, node->tqp_index); | 2790 | HCLGE_TQP_ID_S, node->tqp_index); |
2791 | hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, | ||
2792 | HCLGE_INT_GL_IDX_S, | ||
2793 | hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); | ||
2766 | 2794 | ||
2767 | req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); | 2795 | req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); |
2796 | req->vfid = vport->vport_id; | ||
2768 | 2797 | ||
2769 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { | 2798 | if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { |
2770 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; | 2799 | req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; |
@@ -2778,7 +2807,7 @@ static int hclge_unmap_ring_from_vector( | |||
2778 | } | 2807 | } |
2779 | i = 0; | 2808 | i = 0; |
2780 | hclge_cmd_setup_basic_desc(&desc, | 2809 | hclge_cmd_setup_basic_desc(&desc, |
2781 | HCLGE_OPC_ADD_RING_TO_VECTOR, | 2810 | HCLGE_OPC_DEL_RING_TO_VECTOR, |
2782 | false); | 2811 | false); |
2783 | req->int_vector_id = vector_id; | 2812 | req->int_vector_id = vector_id; |
2784 | } | 2813 | } |
@@ -3665,6 +3694,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) | |||
3665 | { | 3694 | { |
3666 | #define HCLGE_VLAN_TYPE_VF_TABLE 0 | 3695 | #define HCLGE_VLAN_TYPE_VF_TABLE 0 |
3667 | #define HCLGE_VLAN_TYPE_PORT_TABLE 1 | 3696 | #define HCLGE_VLAN_TYPE_PORT_TABLE 1 |
3697 | struct hnae3_handle *handle; | ||
3668 | int ret; | 3698 | int ret; |
3669 | 3699 | ||
3670 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, | 3700 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, |
@@ -3674,8 +3704,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) | |||
3674 | 3704 | ||
3675 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, | 3705 | ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, |
3676 | true); | 3706 | true); |
3707 | if (ret) | ||
3708 | return ret; | ||
3677 | 3709 | ||
3678 | return ret; | 3710 | handle = &hdev->vport[0].nic; |
3711 | return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); | ||
3679 | } | 3712 | } |
3680 | 3713 | ||
3681 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) | 3714 | static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) |
@@ -3920,8 +3953,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, | |||
3920 | goto err; | 3953 | goto err; |
3921 | 3954 | ||
3922 | if (hdev->roce_client && | 3955 | if (hdev->roce_client && |
3923 | hnae_get_bit(hdev->ae_dev->flag, | 3956 | hnae3_dev_roce_supported(hdev)) { |
3924 | HNAE_DEV_SUPPORT_ROCE_B)) { | ||
3925 | struct hnae3_client *rc = hdev->roce_client; | 3957 | struct hnae3_client *rc = hdev->roce_client; |
3926 | 3958 | ||
3927 | ret = hclge_init_roce_base_info(vport); | 3959 | ret = hclge_init_roce_base_info(vport); |
@@ -3944,8 +3976,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, | |||
3944 | 3976 | ||
3945 | break; | 3977 | break; |
3946 | case HNAE3_CLIENT_ROCE: | 3978 | case HNAE3_CLIENT_ROCE: |
3947 | if (hnae_get_bit(hdev->ae_dev->flag, | 3979 | if (hnae3_dev_roce_supported(hdev)) { |
3948 | HNAE_DEV_SUPPORT_ROCE_B)) { | ||
3949 | hdev->roce_client = client; | 3980 | hdev->roce_client = client; |
3950 | vport->roce.client = client; | 3981 | vport->roce.client = client; |
3951 | } | 3982 | } |
@@ -4057,7 +4088,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) | |||
4057 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) | 4088 | static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) |
4058 | { | 4089 | { |
4059 | struct pci_dev *pdev = ae_dev->pdev; | 4090 | struct pci_dev *pdev = ae_dev->pdev; |
4060 | const struct pci_device_id *id; | ||
4061 | struct hclge_dev *hdev; | 4091 | struct hclge_dev *hdev; |
4062 | int ret; | 4092 | int ret; |
4063 | 4093 | ||
@@ -4072,10 +4102,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
4072 | hdev->ae_dev = ae_dev; | 4102 | hdev->ae_dev = ae_dev; |
4073 | ae_dev->priv = hdev; | 4103 | ae_dev->priv = hdev; |
4074 | 4104 | ||
4075 | id = pci_match_id(roce_pci_tbl, ae_dev->pdev); | ||
4076 | if (id) | ||
4077 | hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1); | ||
4078 | |||
4079 | ret = hclge_pci_init(hdev); | 4105 | ret = hclge_pci_init(hdev); |
4080 | if (ret) { | 4106 | if (ret) { |
4081 | dev_err(&pdev->dev, "PCI init failed\n"); | 4107 | dev_err(&pdev->dev, "PCI init failed\n"); |
@@ -4138,12 +4164,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
4138 | return ret; | 4164 | return ret; |
4139 | } | 4165 | } |
4140 | 4166 | ||
4141 | ret = hclge_rss_init_hw(hdev); | ||
4142 | if (ret) { | ||
4143 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | ||
4144 | return ret; | ||
4145 | } | ||
4146 | |||
4147 | ret = hclge_init_vlan_config(hdev); | 4167 | ret = hclge_init_vlan_config(hdev); |
4148 | if (ret) { | 4168 | if (ret) { |
4149 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); | 4169 | dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); |
@@ -4156,6 +4176,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) | |||
4156 | return ret; | 4176 | return ret; |
4157 | } | 4177 | } |
4158 | 4178 | ||
4179 | ret = hclge_rss_init_hw(hdev); | ||
4180 | if (ret) { | ||
4181 | dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); | ||
4182 | return ret; | ||
4183 | } | ||
4184 | |||
4159 | setup_timer(&hdev->service_timer, hclge_service_timer, | 4185 | setup_timer(&hdev->service_timer, hclge_service_timer, |
4160 | (unsigned long)hdev); | 4186 | (unsigned long)hdev); |
4161 | INIT_WORK(&hdev->service_task, hclge_service_task); | 4187 | INIT_WORK(&hdev->service_task, hclge_service_task); |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index edb10ad075eb..9fcfd9395424 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | |||
@@ -176,7 +176,6 @@ struct hclge_pg_info { | |||
176 | struct hclge_tc_info { | 176 | struct hclge_tc_info { |
177 | u8 tc_id; | 177 | u8 tc_id; |
178 | u8 tc_sch_mode; /* 0: sp; 1: dwrr */ | 178 | u8 tc_sch_mode; /* 0: sp; 1: dwrr */ |
179 | u8 up; | ||
180 | u8 pgid; | 179 | u8 pgid; |
181 | u32 bw_limit; | 180 | u32 bw_limit; |
182 | }; | 181 | }; |
@@ -197,6 +196,7 @@ struct hclge_tm_info { | |||
197 | u8 num_tc; | 196 | u8 num_tc; |
198 | u8 num_pg; /* It must be 1 if vNET-Base schd */ | 197 | u8 num_pg; /* It must be 1 if vNET-Base schd */ |
199 | u8 pg_dwrr[HCLGE_PG_NUM]; | 198 | u8 pg_dwrr[HCLGE_PG_NUM]; |
199 | u8 prio_tc[HNAE3_MAX_USER_PRIO]; | ||
200 | struct hclge_pg_info pg_info[HCLGE_PG_NUM]; | 200 | struct hclge_pg_info pg_info[HCLGE_PG_NUM]; |
201 | struct hclge_tc_info tc_info[HNAE3_MAX_TC]; | 201 | struct hclge_tc_info tc_info[HNAE3_MAX_TC]; |
202 | enum hclge_fc_mode fc_mode; | 202 | enum hclge_fc_mode fc_mode; |
@@ -477,6 +477,7 @@ struct hclge_vport { | |||
477 | u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ | 477 | u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ |
478 | /* User configured lookup table entries */ | 478 | /* User configured lookup table entries */ |
479 | u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; | 479 | u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; |
480 | u16 alloc_rss_size; | ||
480 | 481 | ||
481 | u16 qs_offset; | 482 | u16 qs_offset; |
482 | u16 bw_limit; /* VSI BW Limit (0 = disabled) */ | 483 | u16 bw_limit; /* VSI BW Limit (0 = disabled) */ |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1c577d268f00..73a75d7cc551 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | |||
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) | |||
128 | { | 128 | { |
129 | u8 tc; | 129 | u8 tc; |
130 | 130 | ||
131 | for (tc = 0; tc < hdev->tm_info.num_tc; tc++) | 131 | tc = hdev->tm_info.prio_tc[pri_id]; |
132 | if (hdev->tm_info.tc_info[tc].up == pri_id) | ||
133 | break; | ||
134 | 132 | ||
135 | if (tc >= hdev->tm_info.num_tc) | 133 | if (tc >= hdev->tm_info.num_tc) |
136 | return -EINVAL; | 134 | return -EINVAL; |
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev) | |||
158 | 156 | ||
159 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); | 157 | hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); |
160 | 158 | ||
161 | for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { | 159 | for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { |
162 | ret = hclge_fill_pri_array(hdev, pri, pri_id); | 160 | ret = hclge_fill_pri_array(hdev, pri, pri_id); |
163 | if (ret) | 161 | if (ret) |
164 | return ret; | 162 | return ret; |
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, | |||
280 | 278 | ||
281 | shap_cfg_cmd->pg_id = pg_id; | 279 | shap_cfg_cmd->pg_id = pg_id; |
282 | 280 | ||
283 | hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); | 281 | hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); |
284 | hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); | 282 | hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); |
285 | hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); | 283 | hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); |
286 | hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); | 284 | hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); |
287 | hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); | 285 | hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); |
288 | 286 | ||
289 | return hclge_cmd_send(&hdev->hw, &desc, 1); | 287 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
290 | } | 288 | } |
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, | |||
307 | 305 | ||
308 | shap_cfg_cmd->pri_id = pri_id; | 306 | shap_cfg_cmd->pri_id = pri_id; |
309 | 307 | ||
310 | hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); | 308 | hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); |
311 | hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); | 309 | hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); |
312 | hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); | 310 | hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); |
313 | hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); | 311 | hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); |
314 | hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); | 312 | hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); |
315 | 313 | ||
316 | return hclge_cmd_send(&hdev->hw, &desc, 1); | 314 | return hclge_cmd_send(&hdev->hw, &desc, 1); |
317 | } | 315 | } |
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) | |||
397 | kinfo->num_tqps / kinfo->num_tc); | 395 | kinfo->num_tqps / kinfo->num_tc); |
398 | vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; | 396 | vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; |
399 | vport->dwrr = 100; /* 100 percent as init */ | 397 | vport->dwrr = 100; /* 100 percent as init */ |
398 | vport->alloc_rss_size = kinfo->rss_size; | ||
400 | 399 | ||
401 | for (i = 0; i < kinfo->num_tc; i++) { | 400 | for (i = 0; i < kinfo->num_tc; i++) { |
402 | if (hdev->hw_tc_map & BIT(i)) { | 401 | if (hdev->hw_tc_map & BIT(i)) { |
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) | |||
404 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; | 403 | kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; |
405 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; | 404 | kinfo->tc_info[i].tqp_count = kinfo->rss_size; |
406 | kinfo->tc_info[i].tc = i; | 405 | kinfo->tc_info[i].tc = i; |
407 | kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up; | ||
408 | } else { | 406 | } else { |
409 | /* Set to default queue if TC is disable */ | 407 | /* Set to default queue if TC is disable */ |
410 | kinfo->tc_info[i].enable = false; | 408 | kinfo->tc_info[i].enable = false; |
411 | kinfo->tc_info[i].tqp_offset = 0; | 409 | kinfo->tc_info[i].tqp_offset = 0; |
412 | kinfo->tc_info[i].tqp_count = 1; | 410 | kinfo->tc_info[i].tqp_count = 1; |
413 | kinfo->tc_info[i].tc = 0; | 411 | kinfo->tc_info[i].tc = 0; |
414 | kinfo->tc_info[i].up = 0; | ||
415 | } | 412 | } |
416 | } | 413 | } |
414 | |||
415 | memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, | ||
416 | FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); | ||
417 | } | 417 | } |
418 | 418 | ||
419 | static void hclge_tm_vport_info_update(struct hclge_dev *hdev) | 419 | static void hclge_tm_vport_info_update(struct hclge_dev *hdev) |
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) | |||
435 | for (i = 0; i < hdev->tm_info.num_tc; i++) { | 435 | for (i = 0; i < hdev->tm_info.num_tc; i++) { |
436 | hdev->tm_info.tc_info[i].tc_id = i; | 436 | hdev->tm_info.tc_info[i].tc_id = i; |
437 | hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; | 437 | hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; |
438 | hdev->tm_info.tc_info[i].up = i; | ||
439 | hdev->tm_info.tc_info[i].pgid = 0; | 438 | hdev->tm_info.tc_info[i].pgid = 0; |
440 | hdev->tm_info.tc_info[i].bw_limit = | 439 | hdev->tm_info.tc_info[i].bw_limit = |
441 | hdev->tm_info.pg_info[0].bw_limit; | 440 | hdev->tm_info.pg_info[0].bw_limit; |
442 | } | 441 | } |
443 | 442 | ||
443 | for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) | ||
444 | hdev->tm_info.prio_tc[i] = | ||
445 | (i >= hdev->tm_info.num_tc) ? 0 : i; | ||
446 | |||
444 | hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; | 447 | hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; |
445 | } | 448 | } |
446 | 449 | ||
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) | |||
976 | if (ret) | 979 | if (ret) |
977 | return ret; | 980 | return ret; |
978 | 981 | ||
982 | /* Only DCB-supported dev supports qset back pressure setting */ | ||
983 | if (!hnae3_dev_dcb_supported(hdev)) | ||
984 | return 0; | ||
985 | |||
979 | for (i = 0; i < hdev->tm_info.num_tc; i++) { | 986 | for (i = 0; i < hdev->tm_info.num_tc; i++) { |
980 | ret = hclge_tm_qs_bp_cfg(hdev, i); | 987 | ret = hclge_tm_qs_bp_cfg(hdev, i); |
981 | if (ret) | 988 | if (ret) |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 7e67337dfaf2..85158b0d73fe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | |||
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd { | |||
94 | u32 rsvd1; | 94 | u32 rsvd1; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | #define hclge_tm_set_feild(dest, string, val) \ | 97 | #define hclge_tm_set_field(dest, string, val) \ |
98 | hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ | 98 | hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ |
99 | (HCLGE_TM_SHAP_##string##_LSH), val) | 99 | (HCLGE_TM_SHAP_##string##_LSH), val) |
100 | #define hclge_tm_get_feild(src, string) \ | 100 | #define hclge_tm_get_field(src, string) \ |
101 | hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ | 101 | hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ |
102 | (HCLGE_TM_SHAP_##string##_LSH)) | 102 | (HCLGE_TM_SHAP_##string##_LSH)) |
103 | 103 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 1c3e29447891..35369e1c8036 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | |||
@@ -41,11 +41,16 @@ static struct hnae3_client client; | |||
41 | static const struct pci_device_id hns3_pci_tbl[] = { | 41 | static const struct pci_device_id hns3_pci_tbl[] = { |
42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | 42 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, |
43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | 43 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, |
44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, | 44 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), |
45 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, | 45 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, | 46 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), |
47 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, | 47 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
48 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, | 48 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), |
49 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, | ||
50 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), | ||
51 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, | ||
52 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), | ||
53 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, | ||
49 | /* required last entry */ | 54 | /* required last entry */ |
50 | {0, } | 55 | {0, } |
51 | }; | 56 | }; |
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1348 | } | 1353 | } |
1349 | 1354 | ||
1350 | ae_dev->pdev = pdev; | 1355 | ae_dev->pdev = pdev; |
1356 | ae_dev->flag = ent->driver_data; | ||
1351 | ae_dev->dev_type = HNAE3_DEV_KNIC; | 1357 | ae_dev->dev_type = HNAE3_DEV_KNIC; |
1352 | pci_set_drvdata(pdev, ae_dev); | 1358 | pci_set_drvdata(pdev, ae_dev); |
1353 | 1359 | ||
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev) | |||
2705 | eth_hw_addr_random(netdev); | 2711 | eth_hw_addr_random(netdev); |
2706 | dev_warn(priv->dev, "using random MAC address %pM\n", | 2712 | dev_warn(priv->dev, "using random MAC address %pM\n", |
2707 | netdev->dev_addr); | 2713 | netdev->dev_addr); |
2708 | /* Also copy this new MAC address into hdev */ | ||
2709 | if (h->ae_algo->ops->set_mac_addr) | ||
2710 | h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); | ||
2711 | } | 2714 | } |
2715 | |||
2716 | if (h->ae_algo->ops->set_mac_addr) | ||
2717 | h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); | ||
2718 | |||
2712 | } | 2719 | } |
2713 | 2720 | ||
2714 | static void hns3_nic_set_priv_ops(struct net_device *netdev) | 2721 | static void hns3_nic_set_priv_ops(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 2c74baa2398a..fff09dcf9e34 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c | |||
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget) | |||
402 | unsigned long flags; | 402 | unsigned long flags; |
403 | 403 | ||
404 | MAL_DBG2(mal, "poll(%d)" NL, budget); | 404 | MAL_DBG2(mal, "poll(%d)" NL, budget); |
405 | again: | 405 | |
406 | /* Process TX skbs */ | 406 | /* Process TX skbs */ |
407 | list_for_each(l, &mal->poll_list) { | 407 | list_for_each(l, &mal->poll_list) { |
408 | struct mal_commac *mc = | 408 | struct mal_commac *mc = |
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget) | |||
451 | spin_lock_irqsave(&mal->lock, flags); | 451 | spin_lock_irqsave(&mal->lock, flags); |
452 | mal_disable_eob_irq(mal); | 452 | mal_disable_eob_irq(mal); |
453 | spin_unlock_irqrestore(&mal->lock, flags); | 453 | spin_unlock_irqrestore(&mal->lock, flags); |
454 | goto again; | ||
455 | } | 454 | } |
456 | mc->ops->poll_tx(mc->dev); | 455 | mc->ops->poll_tx(mc->dev); |
457 | } | 456 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index dd0ee2691c86..9c86cb7cb988 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -333,7 +333,7 @@ | |||
333 | #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) | 333 | #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) |
334 | #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) | 334 | #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) |
335 | #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) | 335 | #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) |
336 | #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) | 336 | #define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) |
337 | #define MVPP2_GMAC_DISABLE_PADDING BIT(5) | 337 | #define MVPP2_GMAC_DISABLE_PADDING BIT(5) |
338 | #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) | 338 | #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) |
339 | #define MVPP2_GMAC_AUTONEG_CONFIG 0xc | 339 | #define MVPP2_GMAC_AUTONEG_CONFIG 0xc |
@@ -676,6 +676,7 @@ enum mvpp2_tag_type { | |||
676 | #define MVPP2_PRS_RI_L3_MCAST BIT(15) | 676 | #define MVPP2_PRS_RI_L3_MCAST BIT(15) |
677 | #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) | 677 | #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) |
678 | #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 | 678 | #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 |
679 | #define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) | ||
679 | #define MVPP2_PRS_RI_UDF3_MASK 0x300000 | 680 | #define MVPP2_PRS_RI_UDF3_MASK 0x300000 |
680 | #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) | 681 | #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) |
681 | #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 | 682 | #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 |
@@ -792,6 +793,7 @@ struct mvpp2 { | |||
792 | struct clk *pp_clk; | 793 | struct clk *pp_clk; |
793 | struct clk *gop_clk; | 794 | struct clk *gop_clk; |
794 | struct clk *mg_clk; | 795 | struct clk *mg_clk; |
796 | struct clk *axi_clk; | ||
795 | 797 | ||
796 | /* List of pointers to port structures */ | 798 | /* List of pointers to port structures */ |
797 | struct mvpp2_port **port_list; | 799 | struct mvpp2_port **port_list; |
@@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
2315 | (proto != IPPROTO_IGMP)) | 2317 | (proto != IPPROTO_IGMP)) |
2316 | return -EINVAL; | 2318 | return -EINVAL; |
2317 | 2319 | ||
2318 | /* Fragmented packet */ | 2320 | /* Not fragmented packet */ |
2319 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | 2321 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, |
2320 | MVPP2_PE_LAST_FREE_TID); | 2322 | MVPP2_PE_LAST_FREE_TID); |
2321 | if (tid < 0) | 2323 | if (tid < 0) |
@@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
2334 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | 2336 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); |
2335 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | 2337 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, |
2336 | MVPP2_PRS_IPV4_DIP_AI_BIT); | 2338 | MVPP2_PRS_IPV4_DIP_AI_BIT); |
2337 | mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, | 2339 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); |
2338 | ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); | 2340 | |
2341 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, | ||
2342 | MVPP2_PRS_TCAM_PROTO_MASK_L); | ||
2343 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, | ||
2344 | MVPP2_PRS_TCAM_PROTO_MASK); | ||
2339 | 2345 | ||
2340 | mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); | 2346 | mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); |
2341 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); | 2347 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); |
@@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
2346 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | 2352 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); |
2347 | mvpp2_prs_hw_write(priv, &pe); | 2353 | mvpp2_prs_hw_write(priv, &pe); |
2348 | 2354 | ||
2349 | /* Not fragmented packet */ | 2355 | /* Fragmented packet */ |
2350 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | 2356 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, |
2351 | MVPP2_PE_LAST_FREE_TID); | 2357 | MVPP2_PE_LAST_FREE_TID); |
2352 | if (tid < 0) | 2358 | if (tid < 0) |
@@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
2358 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; | 2364 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; |
2359 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); | 2365 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); |
2360 | 2366 | ||
2361 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); | 2367 | mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, |
2362 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); | 2368 | ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); |
2369 | |||
2370 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); | ||
2371 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); | ||
2363 | 2372 | ||
2364 | /* Update shadow table and hw entry */ | 2373 | /* Update shadow table and hw entry */ |
2365 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | 2374 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); |
@@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) | |||
4591 | val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; | 4600 | val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; |
4592 | } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { | 4601 | } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { |
4593 | val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; | 4602 | val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; |
4594 | val |= MVPP2_GMAC_PORT_RGMII_MASK; | ||
4595 | } | 4603 | } |
4596 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); | 4604 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); |
4597 | 4605 | ||
@@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, | |||
7496 | /* Ports initialization */ | 7504 | /* Ports initialization */ |
7497 | static int mvpp2_port_probe(struct platform_device *pdev, | 7505 | static int mvpp2_port_probe(struct platform_device *pdev, |
7498 | struct device_node *port_node, | 7506 | struct device_node *port_node, |
7499 | struct mvpp2 *priv) | 7507 | struct mvpp2 *priv, int index) |
7500 | { | 7508 | { |
7501 | struct device_node *phy_node; | 7509 | struct device_node *phy_node; |
7502 | struct phy *comphy; | 7510 | struct phy *comphy; |
@@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
7670 | } | 7678 | } |
7671 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); | 7679 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); |
7672 | 7680 | ||
7673 | priv->port_list[id] = port; | 7681 | priv->port_list[index] = port; |
7674 | return 0; | 7682 | return 0; |
7675 | 7683 | ||
7676 | err_free_port_pcpu: | 7684 | err_free_port_pcpu: |
@@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
7963 | err = clk_prepare_enable(priv->mg_clk); | 7971 | err = clk_prepare_enable(priv->mg_clk); |
7964 | if (err < 0) | 7972 | if (err < 0) |
7965 | goto err_gop_clk; | 7973 | goto err_gop_clk; |
7974 | |||
7975 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); | ||
7976 | if (IS_ERR(priv->axi_clk)) { | ||
7977 | err = PTR_ERR(priv->axi_clk); | ||
7978 | if (err == -EPROBE_DEFER) | ||
7979 | goto err_gop_clk; | ||
7980 | priv->axi_clk = NULL; | ||
7981 | } else { | ||
7982 | err = clk_prepare_enable(priv->axi_clk); | ||
7983 | if (err < 0) | ||
7984 | goto err_gop_clk; | ||
7985 | } | ||
7966 | } | 7986 | } |
7967 | 7987 | ||
7968 | /* Get system's tclk rate */ | 7988 | /* Get system's tclk rate */ |
@@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
8005 | } | 8025 | } |
8006 | 8026 | ||
8007 | /* Initialize ports */ | 8027 | /* Initialize ports */ |
8028 | i = 0; | ||
8008 | for_each_available_child_of_node(dn, port_node) { | 8029 | for_each_available_child_of_node(dn, port_node) { |
8009 | err = mvpp2_port_probe(pdev, port_node, priv); | 8030 | err = mvpp2_port_probe(pdev, port_node, priv, i); |
8010 | if (err < 0) | 8031 | if (err < 0) |
8011 | goto err_mg_clk; | 8032 | goto err_mg_clk; |
8033 | i++; | ||
8012 | } | 8034 | } |
8013 | 8035 | ||
8014 | platform_set_drvdata(pdev, priv); | 8036 | platform_set_drvdata(pdev, priv); |
8015 | return 0; | 8037 | return 0; |
8016 | 8038 | ||
8017 | err_mg_clk: | 8039 | err_mg_clk: |
8040 | clk_disable_unprepare(priv->axi_clk); | ||
8018 | if (priv->hw_version == MVPP22) | 8041 | if (priv->hw_version == MVPP22) |
8019 | clk_disable_unprepare(priv->mg_clk); | 8042 | clk_disable_unprepare(priv->mg_clk); |
8020 | err_gop_clk: | 8043 | err_gop_clk: |
@@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev) | |||
8052 | aggr_txq->descs_dma); | 8075 | aggr_txq->descs_dma); |
8053 | } | 8076 | } |
8054 | 8077 | ||
8078 | clk_disable_unprepare(priv->axi_clk); | ||
8055 | clk_disable_unprepare(priv->mg_clk); | 8079 | clk_disable_unprepare(priv->mg_clk); |
8056 | clk_disable_unprepare(priv->pp_clk); | 8080 | clk_disable_unprepare(priv->pp_clk); |
8057 | clk_disable_unprepare(priv->gop_clk); | 8081 | clk_disable_unprepare(priv->gop_clk); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 1e3a6c3e4132..80eef4163f52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h | |||
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg, | |||
139 | {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} | 139 | {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} |
140 | 140 | ||
141 | TRACE_EVENT(mlx5_fs_set_fte, | 141 | TRACE_EVENT(mlx5_fs_set_fte, |
142 | TP_PROTO(const struct fs_fte *fte, bool new_fte), | 142 | TP_PROTO(const struct fs_fte *fte, int new_fte), |
143 | TP_ARGS(fte, new_fte), | 143 | TP_ARGS(fte, new_fte), |
144 | TP_STRUCT__entry( | 144 | TP_STRUCT__entry( |
145 | __field(const struct fs_fte *, fte) | 145 | __field(const struct fs_fte *, fte) |
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte, | |||
149 | __field(u32, action) | 149 | __field(u32, action) |
150 | __field(u32, flow_tag) | 150 | __field(u32, flow_tag) |
151 | __field(u8, mask_enable) | 151 | __field(u8, mask_enable) |
152 | __field(bool, new_fte) | 152 | __field(int, new_fte) |
153 | __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) | 153 | __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) |
154 | __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) | 154 | __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) |
155 | __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) | 155 | __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f11fd07ac4dd..850cdc980ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | |||
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) | |||
291 | priv->fs.vlan.filter_disabled = false; | 291 | priv->fs.vlan.filter_disabled = false; |
292 | if (priv->netdev->flags & IFF_PROMISC) | 292 | if (priv->netdev->flags & IFF_PROMISC) |
293 | return; | 293 | return; |
294 | mlx5e_del_any_vid_rules(priv); | 294 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); |
295 | } | 295 | } |
296 | 296 | ||
297 | void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) | 297 | void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) |
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) | |||
302 | priv->fs.vlan.filter_disabled = true; | 302 | priv->fs.vlan.filter_disabled = true; |
303 | if (priv->netdev->flags & IFF_PROMISC) | 303 | if (priv->netdev->flags & IFF_PROMISC) |
304 | return; | 304 | return; |
305 | mlx5e_add_any_vid_rules(priv); | 305 | mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); |
306 | } | 306 | } |
307 | 307 | ||
308 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, | 308 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dfc29720ab77..cc11bbbd0309 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
184 | struct mlx5e_sw_stats temp, *s = &temp; | 184 | struct mlx5e_sw_stats temp, *s = &temp; |
185 | struct mlx5e_rq_stats *rq_stats; | 185 | struct mlx5e_rq_stats *rq_stats; |
186 | struct mlx5e_sq_stats *sq_stats; | 186 | struct mlx5e_sq_stats *sq_stats; |
187 | u64 tx_offload_none = 0; | ||
188 | int i, j; | 187 | int i, j; |
189 | 188 | ||
190 | memset(s, 0, sizeof(*s)); | 189 | memset(s, 0, sizeof(*s)); |
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
199 | s->rx_lro_bytes += rq_stats->lro_bytes; | 198 | s->rx_lro_bytes += rq_stats->lro_bytes; |
200 | s->rx_csum_none += rq_stats->csum_none; | 199 | s->rx_csum_none += rq_stats->csum_none; |
201 | s->rx_csum_complete += rq_stats->csum_complete; | 200 | s->rx_csum_complete += rq_stats->csum_complete; |
201 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; | ||
202 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; | 202 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; |
203 | s->rx_xdp_drop += rq_stats->xdp_drop; | 203 | s->rx_xdp_drop += rq_stats->xdp_drop; |
204 | s->rx_xdp_tx += rq_stats->xdp_tx; | 204 | s->rx_xdp_tx += rq_stats->xdp_tx; |
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
229 | s->tx_queue_dropped += sq_stats->dropped; | 229 | s->tx_queue_dropped += sq_stats->dropped; |
230 | s->tx_xmit_more += sq_stats->xmit_more; | 230 | s->tx_xmit_more += sq_stats->xmit_more; |
231 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; | 231 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; |
232 | tx_offload_none += sq_stats->csum_none; | 232 | s->tx_csum_none += sq_stats->csum_none; |
233 | s->tx_csum_partial += sq_stats->csum_partial; | ||
233 | } | 234 | } |
234 | } | 235 | } |
235 | 236 | ||
236 | /* Update calculated offload counters */ | ||
237 | s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; | ||
238 | s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete; | ||
239 | |||
240 | s->link_down_events_phy = MLX5_GET(ppcnt_reg, | 237 | s->link_down_events_phy = MLX5_GET(ppcnt_reg, |
241 | priv->stats.pport.phy_counters, | 238 | priv->stats.pport.phy_counters, |
242 | counter_set.phys_layer_cntrs.link_down_events); | 239 | counter_set.phys_layer_cntrs.link_down_events); |
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev, | |||
3333 | 3330 | ||
3334 | err = feature_handler(netdev, enable); | 3331 | err = feature_handler(netdev, enable); |
3335 | if (err) { | 3332 | if (err) { |
3336 | netdev_err(netdev, "%s feature 0x%llx failed err %d\n", | 3333 | netdev_err(netdev, "%s feature %pNF failed, err %d\n", |
3337 | enable ? "Enable" : "Disable", feature, err); | 3334 | enable ? "Enable" : "Disable", &feature, err); |
3338 | return err; | 3335 | return err; |
3339 | } | 3336 | } |
3340 | 3337 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f1dd638384d3..15a1687483cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
627 | 627 | ||
628 | if (lro) { | 628 | if (lro) { |
629 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 629 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
630 | rq->stats.csum_unnecessary++; | ||
630 | return; | 631 | return; |
631 | } | 632 | } |
632 | 633 | ||
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
644 | skb->csum_level = 1; | 645 | skb->csum_level = 1; |
645 | skb->encapsulation = 1; | 646 | skb->encapsulation = 1; |
646 | rq->stats.csum_unnecessary_inner++; | 647 | rq->stats.csum_unnecessary_inner++; |
648 | return; | ||
647 | } | 649 | } |
650 | rq->stats.csum_unnecessary++; | ||
648 | return; | 651 | return; |
649 | } | 652 | } |
650 | csum_none: | 653 | csum_none: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 6d199ffb1c0b..f8637213afc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats { | |||
68 | u64 rx_xdp_drop; | 68 | u64 rx_xdp_drop; |
69 | u64 rx_xdp_tx; | 69 | u64 rx_xdp_tx; |
70 | u64 rx_xdp_tx_full; | 70 | u64 rx_xdp_tx_full; |
71 | u64 tx_csum_none; | ||
71 | u64 tx_csum_partial; | 72 | u64 tx_csum_partial; |
72 | u64 tx_csum_partial_inner; | 73 | u64 tx_csum_partial_inner; |
73 | u64 tx_queue_stopped; | 74 | u64 tx_queue_stopped; |
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = { | |||
108 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, | 109 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, |
109 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, | 110 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, |
110 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, | 111 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, |
112 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, | ||
111 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, | 113 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, |
112 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, | 114 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, |
113 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, | 115 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, |
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats { | |||
339 | u64 packets; | 341 | u64 packets; |
340 | u64 bytes; | 342 | u64 bytes; |
341 | u64 csum_complete; | 343 | u64 csum_complete; |
344 | u64 csum_unnecessary; | ||
342 | u64 csum_unnecessary_inner; | 345 | u64 csum_unnecessary_inner; |
343 | u64 csum_none; | 346 | u64 csum_none; |
344 | u64 lro_packets; | 347 | u64 lro_packets; |
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = { | |||
363 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, | 366 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, |
364 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, | 367 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, |
365 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, | 368 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, |
369 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, | ||
366 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, | 370 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, |
367 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, | 371 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, |
368 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, | 372 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, |
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats { | |||
392 | u64 tso_bytes; | 396 | u64 tso_bytes; |
393 | u64 tso_inner_packets; | 397 | u64 tso_inner_packets; |
394 | u64 tso_inner_bytes; | 398 | u64 tso_inner_bytes; |
399 | u64 csum_partial; | ||
395 | u64 csum_partial_inner; | 400 | u64 csum_partial_inner; |
396 | u64 nop; | 401 | u64 nop; |
397 | /* less likely accessed in data path */ | 402 | /* less likely accessed in data path */ |
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = { | |||
408 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, | 413 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, |
409 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, | 414 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, |
410 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, | 415 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, |
416 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, | ||
411 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, | 417 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, |
412 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, | 418 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, |
413 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, | 419 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index da503e6411da..1aa2028ed995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda | |||
1317 | return true; | 1317 | return true; |
1318 | } | 1318 | } |
1319 | 1319 | ||
1320 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | ||
1321 | struct tcf_exts *exts) | ||
1322 | { | ||
1323 | const struct tc_action *a; | ||
1324 | bool modify_ip_header; | ||
1325 | LIST_HEAD(actions); | ||
1326 | u8 htype, ip_proto; | ||
1327 | void *headers_v; | ||
1328 | u16 ethertype; | ||
1329 | int nkeys, i; | ||
1330 | |||
1331 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | ||
1332 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); | ||
1333 | |||
1334 | /* for non-IP we only re-write MACs, so we're okay */ | ||
1335 | if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) | ||
1336 | goto out_ok; | ||
1337 | |||
1338 | modify_ip_header = false; | ||
1339 | tcf_exts_to_list(exts, &actions); | ||
1340 | list_for_each_entry(a, &actions, list) { | ||
1341 | if (!is_tcf_pedit(a)) | ||
1342 | continue; | ||
1343 | |||
1344 | nkeys = tcf_pedit_nkeys(a); | ||
1345 | for (i = 0; i < nkeys; i++) { | ||
1346 | htype = tcf_pedit_htype(a, i); | ||
1347 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || | ||
1348 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { | ||
1349 | modify_ip_header = true; | ||
1350 | break; | ||
1351 | } | ||
1352 | } | ||
1353 | } | ||
1354 | |||
1355 | ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); | ||
1356 | if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { | ||
1357 | pr_info("can't offload re-write of ip proto %d\n", ip_proto); | ||
1358 | return false; | ||
1359 | } | ||
1360 | |||
1361 | out_ok: | ||
1362 | return true; | ||
1363 | } | ||
1364 | |||
1365 | static bool actions_match_supported(struct mlx5e_priv *priv, | ||
1366 | struct tcf_exts *exts, | ||
1367 | struct mlx5e_tc_flow_parse_attr *parse_attr, | ||
1368 | struct mlx5e_tc_flow *flow) | ||
1369 | { | ||
1370 | u32 actions; | ||
1371 | |||
1372 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) | ||
1373 | actions = flow->esw_attr->action; | ||
1374 | else | ||
1375 | actions = flow->nic_attr->action; | ||
1376 | |||
1377 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | ||
1378 | return modify_header_match_supported(&parse_attr->spec, exts); | ||
1379 | |||
1380 | return true; | ||
1381 | } | ||
1382 | |||
1320 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | 1383 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, |
1321 | struct mlx5e_tc_flow_parse_attr *parse_attr, | 1384 | struct mlx5e_tc_flow_parse_attr *parse_attr, |
1322 | struct mlx5e_tc_flow *flow) | 1385 | struct mlx5e_tc_flow *flow) |
@@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1378 | return -EINVAL; | 1441 | return -EINVAL; |
1379 | } | 1442 | } |
1380 | 1443 | ||
1444 | if (!actions_match_supported(priv, exts, parse_attr, flow)) | ||
1445 | return -EOPNOTSUPP; | ||
1446 | |||
1381 | return 0; | 1447 | return 0; |
1382 | } | 1448 | } |
1383 | 1449 | ||
@@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
1564 | break; | 1630 | break; |
1565 | default: | 1631 | default: |
1566 | err = -EOPNOTSUPP; | 1632 | err = -EOPNOTSUPP; |
1567 | goto out; | 1633 | goto free_encap; |
1568 | } | 1634 | } |
1569 | fl4.flowi4_tos = tun_key->tos; | 1635 | fl4.flowi4_tos = tun_key->tos; |
1570 | fl4.daddr = tun_key->u.ipv4.dst; | 1636 | fl4.daddr = tun_key->u.ipv4.dst; |
@@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
1573 | err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, | 1639 | err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, |
1574 | &fl4, &n, &ttl); | 1640 | &fl4, &n, &ttl); |
1575 | if (err) | 1641 | if (err) |
1576 | goto out; | 1642 | goto free_encap; |
1577 | 1643 | ||
1578 | /* used by mlx5e_detach_encap to lookup a neigh hash table | 1644 | /* used by mlx5e_detach_encap to lookup a neigh hash table |
1579 | * entry in the neigh hash table when a user deletes a rule | 1645 | * entry in the neigh hash table when a user deletes a rule |
@@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
1590 | */ | 1656 | */ |
1591 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); | 1657 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); |
1592 | if (err) | 1658 | if (err) |
1593 | goto out; | 1659 | goto free_encap; |
1594 | 1660 | ||
1595 | read_lock_bh(&n->lock); | 1661 | read_lock_bh(&n->lock); |
1596 | nud_state = n->nud_state; | 1662 | nud_state = n->nud_state; |
@@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
1630 | 1696 | ||
1631 | destroy_neigh_entry: | 1697 | destroy_neigh_entry: |
1632 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); | 1698 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); |
1633 | out: | 1699 | free_encap: |
1634 | kfree(encap_header); | 1700 | kfree(encap_header); |
1701 | out: | ||
1635 | if (n) | 1702 | if (n) |
1636 | neigh_release(n); | 1703 | neigh_release(n); |
1637 | return err; | 1704 | return err; |
@@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
1668 | break; | 1735 | break; |
1669 | default: | 1736 | default: |
1670 | err = -EOPNOTSUPP; | 1737 | err = -EOPNOTSUPP; |
1671 | goto out; | 1738 | goto free_encap; |
1672 | } | 1739 | } |
1673 | 1740 | ||
1674 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); | 1741 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); |
@@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
1678 | err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, | 1745 | err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, |
1679 | &fl6, &n, &ttl); | 1746 | &fl6, &n, &ttl); |
1680 | if (err) | 1747 | if (err) |
1681 | goto out; | 1748 | goto free_encap; |
1682 | 1749 | ||
1683 | /* used by mlx5e_detach_encap to lookup a neigh hash table | 1750 | /* used by mlx5e_detach_encap to lookup a neigh hash table |
1684 | * entry in the neigh hash table when a user deletes a rule | 1751 | * entry in the neigh hash table when a user deletes a rule |
@@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
1695 | */ | 1762 | */ |
1696 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); | 1763 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); |
1697 | if (err) | 1764 | if (err) |
1698 | goto out; | 1765 | goto free_encap; |
1699 | 1766 | ||
1700 | read_lock_bh(&n->lock); | 1767 | read_lock_bh(&n->lock); |
1701 | nud_state = n->nud_state; | 1768 | nud_state = n->nud_state; |
@@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
1736 | 1803 | ||
1737 | destroy_neigh_entry: | 1804 | destroy_neigh_entry: |
1738 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); | 1805 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); |
1739 | out: | 1806 | free_encap: |
1740 | kfree(encap_header); | 1807 | kfree(encap_header); |
1808 | out: | ||
1741 | if (n) | 1809 | if (n) |
1742 | neigh_release(n); | 1810 | neigh_release(n); |
1743 | return err; | 1811 | return err; |
@@ -1791,6 +1859,7 @@ vxlan_encap_offload_err: | |||
1791 | } | 1859 | } |
1792 | } | 1860 | } |
1793 | 1861 | ||
1862 | /* must verify if encap is valid or not */ | ||
1794 | if (found) | 1863 | if (found) |
1795 | goto attach_flow; | 1864 | goto attach_flow; |
1796 | 1865 | ||
@@ -1817,6 +1886,8 @@ attach_flow: | |||
1817 | *encap_dev = e->out_dev; | 1886 | *encap_dev = e->out_dev; |
1818 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) | 1887 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) |
1819 | attr->encap_id = e->encap_id; | 1888 | attr->encap_id = e->encap_id; |
1889 | else | ||
1890 | err = -EAGAIN; | ||
1820 | 1891 | ||
1821 | return err; | 1892 | return err; |
1822 | 1893 | ||
@@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1934 | 2005 | ||
1935 | return -EINVAL; | 2006 | return -EINVAL; |
1936 | } | 2007 | } |
2008 | |||
2009 | if (!actions_match_supported(priv, exts, parse_attr, flow)) | ||
2010 | return -EOPNOTSUPP; | ||
2011 | |||
1937 | return err; | 2012 | return err; |
1938 | } | 2013 | } |
1939 | 2014 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index fee43e40fa16..1d6925d4369a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct | |||
193 | sq->stats.csum_partial_inner++; | 193 | sq->stats.csum_partial_inner++; |
194 | } else { | 194 | } else { |
195 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; | 195 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; |
196 | sq->stats.csum_partial++; | ||
196 | } | 197 | } |
197 | } else | 198 | } else |
198 | sq->stats.csum_none++; | 199 | sq->stats.csum_none++; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c index e37453d838db..c0fd2212e890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c | |||
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, | |||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) | 74 | int mlx5_fpga_caps(struct mlx5_core_dev *dev) |
75 | { | 75 | { |
76 | u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; | 76 | u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; |
77 | 77 | ||
78 | return mlx5_core_access_reg(dev, in, sizeof(in), caps, | 78 | return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga, |
79 | MLX5_ST_SZ_BYTES(fpga_cap), | 79 | MLX5_ST_SZ_BYTES(fpga_cap), |
80 | MLX5_REG_FPGA_CAP, 0, 0); | 80 | MLX5_REG_FPGA_CAP, 0, 0); |
81 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index 94bdfd47c3f0..d05233c9b4f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h | |||
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters { | |||
65 | u64 rx_total_drop; | 65 | u64 rx_total_drop; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); | 68 | int mlx5_fpga_caps(struct mlx5_core_dev *dev); |
69 | int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); | 69 | int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); |
70 | int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); | 70 | int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); |
71 | int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, | 71 | int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 9034e9960a76..dc8970346521 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c | |||
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) | |||
139 | if (err) | 139 | if (err) |
140 | goto out; | 140 | goto out; |
141 | 141 | ||
142 | err = mlx5_fpga_caps(fdev->mdev, | 142 | err = mlx5_fpga_caps(fdev->mdev); |
143 | fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]); | ||
144 | if (err) | 143 | if (err) |
145 | goto out; | 144 | goto out; |
146 | 145 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e0d0efd903bc..36ecc2b2e187 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | |||
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, | |||
293 | } | 293 | } |
294 | 294 | ||
295 | if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { | 295 | if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { |
296 | int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, | ||
297 | log_max_flow_counter, | ||
298 | ft->type)); | ||
296 | int list_size = 0; | 299 | int list_size = 0; |
297 | 300 | ||
298 | list_for_each_entry(dst, &fte->node.children, node.list) { | 301 | list_for_each_entry(dst, &fte->node.children, node.list) { |
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, | |||
305 | in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); | 308 | in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); |
306 | list_size++; | 309 | list_size++; |
307 | } | 310 | } |
311 | if (list_size > max_list_size) { | ||
312 | err = -EINVAL; | ||
313 | goto err_out; | ||
314 | } | ||
308 | 315 | ||
309 | MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, | 316 | MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, |
310 | list_size); | 317 | list_size); |
311 | } | 318 | } |
312 | 319 | ||
313 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 320 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
321 | err_out: | ||
314 | kvfree(in); | 322 | kvfree(in); |
315 | return err; | 323 | return err; |
316 | } | 324 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 5509a752f98e..48dd78975062 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
@@ -52,6 +52,7 @@ enum fs_flow_table_type { | |||
52 | FS_FT_FDB = 0X4, | 52 | FS_FT_FDB = 0X4, |
53 | FS_FT_SNIFFER_RX = 0X5, | 53 | FS_FT_SNIFFER_RX = 0X5, |
54 | FS_FT_SNIFFER_TX = 0X6, | 54 | FS_FT_SNIFFER_TX = 0X6, |
55 | FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | enum fs_flow_table_op_mod { | 58 | enum fs_flow_table_op_mod { |
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); | |||
260 | #define fs_for_each_dst(pos, fte) \ | 261 | #define fs_for_each_dst(pos, fte) \ |
261 | fs_list_for_each_entry(pos, &(fte)->node.children) | 262 | fs_list_for_each_entry(pos, &(fte)->node.children) |
262 | 263 | ||
264 | #define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \ | ||
265 | (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \ | ||
266 | (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \ | ||
267 | (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \ | ||
268 | (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ | ||
269 | (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ | ||
270 | (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ | ||
271 | (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\ | ||
272 | ) | ||
273 | |||
263 | #endif | 274 | #endif |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 85298051a3e4..145e392ab849 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev) | |||
572 | { | 572 | { |
573 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | 573 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); |
574 | const struct mlx5e_profile *profile = priv->profile; | 574 | const struct mlx5e_profile *profile = priv->profile; |
575 | struct mlx5_core_dev *mdev = priv->mdev; | ||
575 | 576 | ||
576 | mlx5e_detach_netdev(priv); | 577 | mlx5e_detach_netdev(priv); |
577 | profile->cleanup(priv); | 578 | profile->cleanup(priv); |
578 | destroy_workqueue(priv->wq); | 579 | destroy_workqueue(priv->wq); |
579 | free_netdev(netdev); | 580 | free_netdev(netdev); |
580 | 581 | ||
581 | mlx5e_destroy_mdev_resources(priv->mdev); | 582 | mlx5e_destroy_mdev_resources(mdev); |
582 | } | 583 | } |
583 | EXPORT_SYMBOL(mlx5_rdma_netdev_free); | 584 | EXPORT_SYMBOL(mlx5_rdma_netdev_free); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 6c48e9959b65..2a8b529ce6dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c | |||
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
109 | mlx5_core_warn(dev, | 109 | mlx5_core_warn(dev, |
110 | "failed to restore VF %d settings, err %d\n", | 110 | "failed to restore VF %d settings, err %d\n", |
111 | vf, err); | 111 | vf, err); |
112 | continue; | 112 | continue; |
113 | } | 113 | } |
114 | } | 114 | } |
115 | mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); | 115 | mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2cfb3f5d092d..032089efc1a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, | |||
2723 | mlxsw_sp_nexthop_rif_fini(nh); | 2723 | mlxsw_sp_nexthop_rif_fini(nh); |
2724 | break; | 2724 | break; |
2725 | case MLXSW_SP_NEXTHOP_TYPE_IPIP: | 2725 | case MLXSW_SP_NEXTHOP_TYPE_IPIP: |
2726 | mlxsw_sp_nexthop_rif_fini(nh); | ||
2726 | mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); | 2727 | mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); |
2727 | break; | 2728 | break; |
2728 | } | 2729 | } |
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, | |||
2742 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, | 2743 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, |
2743 | MLXSW_SP_L3_PROTO_IPV4)) { | 2744 | MLXSW_SP_L3_PROTO_IPV4)) { |
2744 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; | 2745 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; |
2745 | return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); | 2746 | err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); |
2747 | if (err) | ||
2748 | return err; | ||
2749 | mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); | ||
2750 | return 0; | ||
2746 | } | 2751 | } |
2747 | 2752 | ||
2748 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; | 2753 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; |
@@ -4009,7 +4014,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, | |||
4009 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, | 4014 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, |
4010 | MLXSW_SP_L3_PROTO_IPV6)) { | 4015 | MLXSW_SP_L3_PROTO_IPV6)) { |
4011 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; | 4016 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; |
4012 | return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); | 4017 | err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); |
4018 | if (err) | ||
4019 | return err; | ||
4020 | mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); | ||
4021 | return 0; | ||
4013 | } | 4022 | } |
4014 | 4023 | ||
4015 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; | 4024 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; |
@@ -5068,6 +5077,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, | |||
5068 | vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); | 5077 | vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); |
5069 | if (IS_ERR(vr)) | 5078 | if (IS_ERR(vr)) |
5070 | return ERR_CAST(vr); | 5079 | return ERR_CAST(vr); |
5080 | vr->rif_count++; | ||
5071 | 5081 | ||
5072 | err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); | 5082 | err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); |
5073 | if (err) | 5083 | if (err) |
@@ -5099,7 +5109,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, | |||
5099 | 5109 | ||
5100 | mlxsw_sp_rif_counters_alloc(rif); | 5110 | mlxsw_sp_rif_counters_alloc(rif); |
5101 | mlxsw_sp->router->rifs[rif_index] = rif; | 5111 | mlxsw_sp->router->rifs[rif_index] = rif; |
5102 | vr->rif_count++; | ||
5103 | 5112 | ||
5104 | return rif; | 5113 | return rif; |
5105 | 5114 | ||
@@ -5110,6 +5119,7 @@ err_fid_get: | |||
5110 | kfree(rif); | 5119 | kfree(rif); |
5111 | err_rif_alloc: | 5120 | err_rif_alloc: |
5112 | err_rif_index_alloc: | 5121 | err_rif_index_alloc: |
5122 | vr->rif_count--; | ||
5113 | mlxsw_sp_vr_put(vr); | 5123 | mlxsw_sp_vr_put(vr); |
5114 | return ERR_PTR(err); | 5124 | return ERR_PTR(err); |
5115 | } | 5125 | } |
@@ -5124,7 +5134,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
5124 | mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); | 5134 | mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); |
5125 | vr = &mlxsw_sp->router->vrs[rif->vr_id]; | 5135 | vr = &mlxsw_sp->router->vrs[rif->vr_id]; |
5126 | 5136 | ||
5127 | vr->rif_count--; | ||
5128 | mlxsw_sp->router->rifs[rif->rif_index] = NULL; | 5137 | mlxsw_sp->router->rifs[rif->rif_index] = NULL; |
5129 | mlxsw_sp_rif_counters_free(rif); | 5138 | mlxsw_sp_rif_counters_free(rif); |
5130 | ops->deconfigure(rif); | 5139 | ops->deconfigure(rif); |
@@ -5132,6 +5141,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
5132 | /* Loopback RIFs are not associated with a FID. */ | 5141 | /* Loopback RIFs are not associated with a FID. */ |
5133 | mlxsw_sp_fid_put(fid); | 5142 | mlxsw_sp_fid_put(fid); |
5134 | kfree(rif); | 5143 | kfree(rif); |
5144 | vr->rif_count--; | ||
5135 | mlxsw_sp_vr_put(vr); | 5145 | mlxsw_sp_vr_put(vr); |
5136 | } | 5146 | } |
5137 | 5147 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c index bbe24639aa5a..c8c6231b87f3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c | |||
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data) | |||
88 | static int emac_get_sset_count(struct net_device *netdev, int sset) | 88 | static int emac_get_sset_count(struct net_device *netdev, int sset) |
89 | { | 89 | { |
90 | switch (sset) { | 90 | switch (sset) { |
91 | case ETH_SS_PRIV_FLAGS: | ||
92 | return 1; | ||
91 | case ETH_SS_STATS: | 93 | case ETH_SS_STATS: |
92 | return EMAC_STATS_LEN; | 94 | return EMAC_STATS_LEN; |
93 | default: | 95 | default: |
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
100 | unsigned int i; | 102 | unsigned int i; |
101 | 103 | ||
102 | switch (stringset) { | 104 | switch (stringset) { |
105 | case ETH_SS_PRIV_FLAGS: | ||
106 | strcpy(data, "single-pause-mode"); | ||
107 | break; | ||
108 | |||
103 | case ETH_SS_STATS: | 109 | case ETH_SS_STATS: |
104 | for (i = 0; i < EMAC_STATS_LEN; i++) { | 110 | for (i = 0; i < EMAC_STATS_LEN; i++) { |
105 | strlcpy(data, emac_ethtool_stat_strings[i], | 111 | strlcpy(data, emac_ethtool_stat_strings[i], |
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev) | |||
230 | return EMAC_MAX_REG_SIZE * sizeof(u32); | 236 | return EMAC_MAX_REG_SIZE * sizeof(u32); |
231 | } | 237 | } |
232 | 238 | ||
239 | #define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0) | ||
240 | |||
241 | static int emac_set_priv_flags(struct net_device *netdev, u32 flags) | ||
242 | { | ||
243 | struct emac_adapter *adpt = netdev_priv(netdev); | ||
244 | |||
245 | adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE); | ||
246 | |||
247 | if (netif_running(netdev)) | ||
248 | return emac_reinit_locked(adpt); | ||
249 | |||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static u32 emac_get_priv_flags(struct net_device *netdev) | ||
254 | { | ||
255 | struct emac_adapter *adpt = netdev_priv(netdev); | ||
256 | |||
257 | return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0; | ||
258 | } | ||
259 | |||
233 | static const struct ethtool_ops emac_ethtool_ops = { | 260 | static const struct ethtool_ops emac_ethtool_ops = { |
234 | .get_link_ksettings = phy_ethtool_get_link_ksettings, | 261 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
235 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | 262 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = { | |||
253 | 280 | ||
254 | .get_regs_len = emac_get_regs_len, | 281 | .get_regs_len = emac_get_regs_len, |
255 | .get_regs = emac_get_regs, | 282 | .get_regs = emac_get_regs, |
283 | |||
284 | .set_priv_flags = emac_set_priv_flags, | ||
285 | .get_priv_flags = emac_get_priv_flags, | ||
256 | }; | 286 | }; |
257 | 287 | ||
258 | void emac_set_ethtool_ops(struct net_device *netdev) | 288 | void emac_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index bcd4708b3745..3ed9033e56db 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt) | |||
551 | mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | | 551 | mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | |
552 | DEBUG_MODE | SINGLE_PAUSE_MODE); | 552 | DEBUG_MODE | SINGLE_PAUSE_MODE); |
553 | 553 | ||
554 | /* Enable single-pause-frame mode if requested. | ||
555 | * | ||
556 | * If enabled, the EMAC will send a single pause frame when the RX | ||
557 | * queue is full. This normally leads to packet loss because | ||
558 | * the pause frame disables the remote MAC only for 33ms (the quanta), | ||
559 | * and then the remote MAC continues sending packets even though | ||
560 | * the RX queue is still full. | ||
561 | * | ||
562 | * If disabled, the EMAC sends a pause frame every 31ms until the RX | ||
563 | * queue is no longer full. Normally, this is the preferred | ||
564 | * method of operation. However, when the system is hung (e.g. | ||
565 | * cores are halted), the EMAC interrupt handler is never called | ||
566 | * and so the RX queue fills up quickly and stays full. The resuling | ||
567 | * non-stop "flood" of pause frames sometimes has the effect of | ||
568 | * disabling nearby switches. In some cases, other nearby switches | ||
569 | * are also affected, shutting down the entire network. | ||
570 | * | ||
571 | * The user can enable or disable single-pause-frame mode | ||
572 | * via ethtool. | ||
573 | */ | ||
574 | mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0; | ||
575 | |||
554 | writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); | 576 | writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); |
555 | 577 | ||
556 | writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); | 578 | writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); |
@@ -876,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, | |||
876 | 898 | ||
877 | curr_rxbuf->dma_addr = | 899 | curr_rxbuf->dma_addr = |
878 | dma_map_single(adpt->netdev->dev.parent, skb->data, | 900 | dma_map_single(adpt->netdev->dev.parent, skb->data, |
879 | curr_rxbuf->length, DMA_FROM_DEVICE); | 901 | adpt->rxbuf_size, DMA_FROM_DEVICE); |
902 | |||
880 | ret = dma_mapping_error(adpt->netdev->dev.parent, | 903 | ret = dma_mapping_error(adpt->netdev->dev.parent, |
881 | curr_rxbuf->dma_addr); | 904 | curr_rxbuf->dma_addr); |
882 | if (ret) { | 905 | if (ret) { |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 60850bfa3d32..759543512117 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c | |||
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt) | |||
443 | 443 | ||
444 | /* default to automatic flow control */ | 444 | /* default to automatic flow control */ |
445 | adpt->automatic = true; | 445 | adpt->automatic = true; |
446 | |||
447 | /* Disable single-pause-frame mode by default */ | ||
448 | adpt->single_pause_mode = false; | ||
446 | } | 449 | } |
447 | 450 | ||
448 | /* Get the clock */ | 451 | /* Get the clock */ |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h index 8ee4ec6aef2e..d7c9f44209d4 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.h +++ b/drivers/net/ethernet/qualcomm/emac/emac.h | |||
@@ -363,6 +363,9 @@ struct emac_adapter { | |||
363 | bool tx_flow_control; | 363 | bool tx_flow_control; |
364 | bool rx_flow_control; | 364 | bool rx_flow_control; |
365 | 365 | ||
366 | /* True == use single-pause-frame mode. */ | ||
367 | bool single_pause_mode; | ||
368 | |||
366 | /* Ring parameter */ | 369 | /* Ring parameter */ |
367 | u8 tpd_burst; | 370 | u8 tpd_burst; |
368 | u8 rfd_burst; | 371 | u8 rfd_burst; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 98f22551eb45..1e33aea59f50 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
@@ -51,10 +51,7 @@ struct rmnet_walk_data { | |||
51 | 51 | ||
52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) | 52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) |
53 | { | 53 | { |
54 | rx_handler_func_t *rx_handler; | 54 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; |
55 | |||
56 | rx_handler = rcu_dereference(real_dev->rx_handler); | ||
57 | return (rx_handler == rmnet_rx_handler); | ||
58 | } | 55 | } |
59 | 56 | ||
60 | /* Needs rtnl lock */ | 57 | /* Needs rtnl lock */ |
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ca22f2898664..d24b47b8e0b2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c | |||
@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) | |||
2135 | if (likely(RTL_R16(IntrStatus) & RxAckBits)) | 2135 | if (likely(RTL_R16(IntrStatus) & RxAckBits)) |
2136 | work_done += rtl8139_rx(dev, tp, budget); | 2136 | work_done += rtl8139_rx(dev, tp, budget); |
2137 | 2137 | ||
2138 | if (work_done < budget && napi_complete_done(napi, work_done)) { | 2138 | if (work_done < budget) { |
2139 | unsigned long flags; | 2139 | unsigned long flags; |
2140 | 2140 | ||
2141 | spin_lock_irqsave(&tp->lock, flags); | 2141 | spin_lock_irqsave(&tp->lock, flags); |
2142 | RTL_W16_F(IntrMask, rtl8139_intr_mask); | 2142 | if (napi_complete_done(napi, work_done)) |
2143 | RTL_W16_F(IntrMask, rtl8139_intr_mask); | ||
2143 | spin_unlock_irqrestore(&tp->lock, flags); | 2144 | spin_unlock_irqrestore(&tp->lock, flags); |
2144 | } | 2145 | } |
2145 | spin_unlock(&tp->rx_lock); | 2146 | spin_unlock(&tp->rx_lock); |
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h index a63ef82e7c72..dfae3c9d57c6 100644 --- a/drivers/net/ethernet/rocker/rocker_tlv.h +++ b/drivers/net/ethernet/rocker/rocker_tlv.h | |||
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info) | |||
139 | int rocker_tlv_put(struct rocker_desc_info *desc_info, | 139 | int rocker_tlv_put(struct rocker_desc_info *desc_info, |
140 | int attrtype, int attrlen, const void *data); | 140 | int attrtype, int attrlen, const void *data); |
141 | 141 | ||
142 | static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, | 142 | static inline int |
143 | int attrtype, u8 value) | 143 | rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value) |
144 | { | 144 | { |
145 | return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); | 145 | u8 tmp = value; /* work around GCC PR81715 */ |
146 | |||
147 | return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp); | ||
146 | } | 148 | } |
147 | 149 | ||
148 | static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, | 150 | static inline int |
149 | int attrtype, u16 value) | 151 | rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value) |
150 | { | 152 | { |
151 | return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); | 153 | u16 tmp = value; |
154 | |||
155 | return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp); | ||
152 | } | 156 | } |
153 | 157 | ||
154 | static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, | 158 | static inline int |
155 | int attrtype, __be16 value) | 159 | rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value) |
156 | { | 160 | { |
157 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); | 161 | __be16 tmp = value; |
162 | |||
163 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp); | ||
158 | } | 164 | } |
159 | 165 | ||
160 | static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, | 166 | static inline int |
161 | int attrtype, u32 value) | 167 | rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value) |
162 | { | 168 | { |
163 | return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); | 169 | u32 tmp = value; |
170 | |||
171 | return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp); | ||
164 | } | 172 | } |
165 | 173 | ||
166 | static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, | 174 | static inline int |
167 | int attrtype, __be32 value) | 175 | rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value) |
168 | { | 176 | { |
169 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); | 177 | __be32 tmp = value; |
178 | |||
179 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp); | ||
170 | } | 180 | } |
171 | 181 | ||
172 | static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, | 182 | static inline int |
173 | int attrtype, u64 value) | 183 | rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value) |
174 | { | 184 | { |
175 | return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); | 185 | u64 tmp = value; |
186 | |||
187 | return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp); | ||
176 | } | 188 | } |
177 | 189 | ||
178 | static inline struct rocker_tlv * | 190 | static inline struct rocker_tlv * |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index dd6a2f9791cc..5efef8001edf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | |||
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = { | |||
511 | .remove = dwc_eth_dwmac_remove, | 511 | .remove = dwc_eth_dwmac_remove, |
512 | .driver = { | 512 | .driver = { |
513 | .name = "dwc-eth-dwmac", | 513 | .name = "dwc-eth-dwmac", |
514 | .pm = &stmmac_pltfr_pm_ops, | ||
514 | .of_match_table = dwc_eth_dwmac_match, | 515 | .of_match_table = dwc_eth_dwmac_match, |
515 | }, | 516 | }, |
516 | }; | 517 | }; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 99823f54696a..13133b30b575 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | |||
@@ -83,6 +83,117 @@ struct rk_priv_data { | |||
83 | (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ | 83 | (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ |
84 | ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) | 84 | ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) |
85 | 85 | ||
86 | #define RK3128_GRF_MAC_CON0 0x0168 | ||
87 | #define RK3128_GRF_MAC_CON1 0x016c | ||
88 | |||
89 | /* RK3128_GRF_MAC_CON0 */ | ||
90 | #define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) | ||
91 | #define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) | ||
92 | #define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) | ||
93 | #define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) | ||
94 | #define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) | ||
95 | #define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) | ||
96 | |||
97 | /* RK3128_GRF_MAC_CON1 */ | ||
98 | #define RK3128_GMAC_PHY_INTF_SEL_RGMII \ | ||
99 | (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8)) | ||
100 | #define RK3128_GMAC_PHY_INTF_SEL_RMII \ | ||
101 | (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8)) | ||
102 | #define RK3128_GMAC_FLOW_CTRL GRF_BIT(9) | ||
103 | #define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) | ||
104 | #define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10) | ||
105 | #define RK3128_GMAC_SPEED_100M GRF_BIT(10) | ||
106 | #define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11) | ||
107 | #define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) | ||
108 | #define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) | ||
109 | #define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) | ||
110 | #define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) | ||
111 | #define RK3128_GMAC_RMII_MODE GRF_BIT(14) | ||
112 | #define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) | ||
113 | |||
114 | static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, | ||
115 | int tx_delay, int rx_delay) | ||
116 | { | ||
117 | struct device *dev = &bsp_priv->pdev->dev; | ||
118 | |||
119 | if (IS_ERR(bsp_priv->grf)) { | ||
120 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
121 | return; | ||
122 | } | ||
123 | |||
124 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
125 | RK3128_GMAC_PHY_INTF_SEL_RGMII | | ||
126 | RK3128_GMAC_RMII_MODE_CLR); | ||
127 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0, | ||
128 | DELAY_ENABLE(RK3128, tx_delay, rx_delay) | | ||
129 | RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) | | ||
130 | RK3128_GMAC_CLK_TX_DL_CFG(tx_delay)); | ||
131 | } | ||
132 | |||
133 | static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) | ||
134 | { | ||
135 | struct device *dev = &bsp_priv->pdev->dev; | ||
136 | |||
137 | if (IS_ERR(bsp_priv->grf)) { | ||
138 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
143 | RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); | ||
144 | } | ||
145 | |||
146 | static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) | ||
147 | { | ||
148 | struct device *dev = &bsp_priv->pdev->dev; | ||
149 | |||
150 | if (IS_ERR(bsp_priv->grf)) { | ||
151 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | if (speed == 10) | ||
156 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
157 | RK3128_GMAC_CLK_2_5M); | ||
158 | else if (speed == 100) | ||
159 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
160 | RK3128_GMAC_CLK_25M); | ||
161 | else if (speed == 1000) | ||
162 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
163 | RK3128_GMAC_CLK_125M); | ||
164 | else | ||
165 | dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); | ||
166 | } | ||
167 | |||
168 | static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) | ||
169 | { | ||
170 | struct device *dev = &bsp_priv->pdev->dev; | ||
171 | |||
172 | if (IS_ERR(bsp_priv->grf)) { | ||
173 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
174 | return; | ||
175 | } | ||
176 | |||
177 | if (speed == 10) { | ||
178 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
179 | RK3128_GMAC_RMII_CLK_2_5M | | ||
180 | RK3128_GMAC_SPEED_10M); | ||
181 | } else if (speed == 100) { | ||
182 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
183 | RK3128_GMAC_RMII_CLK_25M | | ||
184 | RK3128_GMAC_SPEED_100M); | ||
185 | } else { | ||
186 | dev_err(dev, "unknown speed value for RMII! speed=%d", speed); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static const struct rk_gmac_ops rk3128_ops = { | ||
191 | .set_to_rgmii = rk3128_set_to_rgmii, | ||
192 | .set_to_rmii = rk3128_set_to_rmii, | ||
193 | .set_rgmii_speed = rk3128_set_rgmii_speed, | ||
194 | .set_rmii_speed = rk3128_set_rmii_speed, | ||
195 | }; | ||
196 | |||
86 | #define RK3228_GRF_MAC_CON0 0x0900 | 197 | #define RK3228_GRF_MAC_CON0 0x0900 |
87 | #define RK3228_GRF_MAC_CON1 0x0904 | 198 | #define RK3228_GRF_MAC_CON1 0x0904 |
88 | 199 | ||
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev) | |||
1313 | static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); | 1424 | static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); |
1314 | 1425 | ||
1315 | static const struct of_device_id rk_gmac_dwmac_match[] = { | 1426 | static const struct of_device_id rk_gmac_dwmac_match[] = { |
1427 | { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, | ||
1316 | { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, | 1428 | { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, |
1317 | { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, | 1429 | { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, |
1318 | { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, | 1430 | { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c4407e8e39a3..2f7d7ec59962 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) | |||
296 | { | 296 | { |
297 | void __iomem *ioaddr = hw->pcsr; | 297 | void __iomem *ioaddr = hw->pcsr; |
298 | unsigned int pmt = 0; | 298 | unsigned int pmt = 0; |
299 | u32 config; | ||
299 | 300 | ||
300 | if (mode & WAKE_MAGIC) { | 301 | if (mode & WAKE_MAGIC) { |
301 | pr_debug("GMAC: WOL Magic frame\n"); | 302 | pr_debug("GMAC: WOL Magic frame\n"); |
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) | |||
306 | pmt |= power_down | global_unicast | wake_up_frame_en; | 307 | pmt |= power_down | global_unicast | wake_up_frame_en; |
307 | } | 308 | } |
308 | 309 | ||
310 | if (pmt) { | ||
311 | /* The receiver must be enabled for WOL before powering down */ | ||
312 | config = readl(ioaddr + GMAC_CONFIG); | ||
313 | config |= GMAC_CONFIG_RE; | ||
314 | writel(config, ioaddr + GMAC_CONFIG); | ||
315 | } | ||
309 | writel(pmt, ioaddr + GMAC_PMT); | 316 | writel(pmt, ioaddr + GMAC_PMT); |
310 | } | 317 | } |
311 | 318 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index a366b3747eeb..8a280b48e3a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, | |||
315 | { .compatible = "allwinner,sun8i-h3-emac" }, | 315 | { .compatible = "allwinner,sun8i-h3-emac" }, |
316 | { .compatible = "allwinner,sun8i-v3s-emac" }, | 316 | { .compatible = "allwinner,sun8i-v3s-emac" }, |
317 | { .compatible = "allwinner,sun50i-a64-emac" }, | 317 | { .compatible = "allwinner,sun50i-a64-emac" }, |
318 | {}, | ||
318 | }; | 319 | }; |
319 | 320 | ||
320 | /* If phy-handle property is passed from DT, use it as the PHY */ | 321 | /* If phy-handle property is passed from DT, use it as the PHY */ |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d98cdfb1536b..5176be76ca7d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -150,6 +150,8 @@ struct netvsc_device_info { | |||
150 | u32 num_chn; | 150 | u32 num_chn; |
151 | u32 send_sections; | 151 | u32 send_sections; |
152 | u32 recv_sections; | 152 | u32 recv_sections; |
153 | u32 send_section_size; | ||
154 | u32 recv_section_size; | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | enum rndis_device_state { | 157 | enum rndis_device_state { |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index a5511b7326af..8d5077fb0492 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void) | |||
76 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 76 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
77 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 77 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
78 | 78 | ||
79 | net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE; | ||
80 | net_device->send_section_size = NETVSC_SEND_SECTION_SIZE; | ||
81 | |||
82 | init_completion(&net_device->channel_init_wait); | 79 | init_completion(&net_device->channel_init_wait); |
83 | init_waitqueue_head(&net_device->subchan_open); | 80 | init_waitqueue_head(&net_device->subchan_open); |
84 | INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); | 81 | INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); |
@@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device, | |||
262 | int ret = 0; | 259 | int ret = 0; |
263 | 260 | ||
264 | /* Get receive buffer area. */ | 261 | /* Get receive buffer area. */ |
265 | buf_size = device_info->recv_sections * net_device->recv_section_size; | 262 | buf_size = device_info->recv_sections * device_info->recv_section_size; |
266 | buf_size = roundup(buf_size, PAGE_SIZE); | 263 | buf_size = roundup(buf_size, PAGE_SIZE); |
267 | 264 | ||
268 | net_device->recv_buf = vzalloc(buf_size); | 265 | net_device->recv_buf = vzalloc(buf_size); |
@@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device, | |||
344 | goto cleanup; | 341 | goto cleanup; |
345 | 342 | ||
346 | /* Now setup the send buffer. */ | 343 | /* Now setup the send buffer. */ |
347 | buf_size = device_info->send_sections * net_device->send_section_size; | 344 | buf_size = device_info->send_sections * device_info->send_section_size; |
348 | buf_size = round_up(buf_size, PAGE_SIZE); | 345 | buf_size = round_up(buf_size, PAGE_SIZE); |
349 | 346 | ||
350 | net_device->send_buf = vzalloc(buf_size); | 347 | net_device->send_buf = vzalloc(buf_size); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d4902ee5f260..a32ae02e1b6c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net, | |||
848 | device_info.num_chn = count; | 848 | device_info.num_chn = count; |
849 | device_info.ring_size = ring_size; | 849 | device_info.ring_size = ring_size; |
850 | device_info.send_sections = nvdev->send_section_cnt; | 850 | device_info.send_sections = nvdev->send_section_cnt; |
851 | device_info.send_section_size = nvdev->send_section_size; | ||
851 | device_info.recv_sections = nvdev->recv_section_cnt; | 852 | device_info.recv_sections = nvdev->recv_section_cnt; |
853 | device_info.recv_section_size = nvdev->recv_section_size; | ||
852 | 854 | ||
853 | rndis_filter_device_remove(dev, nvdev); | 855 | rndis_filter_device_remove(dev, nvdev); |
854 | 856 | ||
@@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
963 | device_info.ring_size = ring_size; | 965 | device_info.ring_size = ring_size; |
964 | device_info.num_chn = nvdev->num_chn; | 966 | device_info.num_chn = nvdev->num_chn; |
965 | device_info.send_sections = nvdev->send_section_cnt; | 967 | device_info.send_sections = nvdev->send_section_cnt; |
968 | device_info.send_section_size = nvdev->send_section_size; | ||
966 | device_info.recv_sections = nvdev->recv_section_cnt; | 969 | device_info.recv_sections = nvdev->recv_section_cnt; |
970 | device_info.recv_section_size = nvdev->recv_section_size; | ||
967 | 971 | ||
968 | rndis_filter_device_remove(hdev, nvdev); | 972 | rndis_filter_device_remove(hdev, nvdev); |
969 | 973 | ||
@@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, | |||
1485 | device_info.num_chn = nvdev->num_chn; | 1489 | device_info.num_chn = nvdev->num_chn; |
1486 | device_info.ring_size = ring_size; | 1490 | device_info.ring_size = ring_size; |
1487 | device_info.send_sections = new_tx; | 1491 | device_info.send_sections = new_tx; |
1492 | device_info.send_section_size = nvdev->send_section_size; | ||
1488 | device_info.recv_sections = new_rx; | 1493 | device_info.recv_sections = new_rx; |
1494 | device_info.recv_section_size = nvdev->recv_section_size; | ||
1489 | 1495 | ||
1490 | netif_device_detach(ndev); | 1496 | netif_device_detach(ndev); |
1491 | was_opened = rndis_filter_opened(nvdev); | 1497 | was_opened = rndis_filter_opened(nvdev); |
@@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev, | |||
1934 | device_info.ring_size = ring_size; | 1940 | device_info.ring_size = ring_size; |
1935 | device_info.num_chn = VRSS_CHANNEL_DEFAULT; | 1941 | device_info.num_chn = VRSS_CHANNEL_DEFAULT; |
1936 | device_info.send_sections = NETVSC_DEFAULT_TX; | 1942 | device_info.send_sections = NETVSC_DEFAULT_TX; |
1943 | device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; | ||
1937 | device_info.recv_sections = NETVSC_DEFAULT_RX; | 1944 | device_info.recv_sections = NETVSC_DEFAULT_RX; |
1945 | device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; | ||
1938 | 1946 | ||
1939 | nvdev = rndis_filter_device_add(dev, &device_info); | 1947 | nvdev = rndis_filter_device_add(dev, &device_info); |
1940 | if (IS_ERR(nvdev)) { | 1948 | if (IS_ERR(nvdev)) { |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a9d16a3af514..cd931cf9dcc2 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -160,15 +160,6 @@ config MDIO_XGENE | |||
160 | 160 | ||
161 | endif | 161 | endif |
162 | 162 | ||
163 | menuconfig PHYLIB | ||
164 | tristate "PHY Device support and infrastructure" | ||
165 | depends on NETDEVICES | ||
166 | select MDIO_DEVICE | ||
167 | help | ||
168 | Ethernet controllers are usually attached to PHY | ||
169 | devices. This option provides infrastructure for | ||
170 | managing PHY devices. | ||
171 | |||
172 | config PHYLINK | 163 | config PHYLINK |
173 | tristate | 164 | tristate |
174 | depends on NETDEVICES | 165 | depends on NETDEVICES |
@@ -179,6 +170,15 @@ config PHYLINK | |||
179 | configuration links, PHYs, and Serdes links with MAC level | 170 | configuration links, PHYs, and Serdes links with MAC level |
180 | autonegotiation modes. | 171 | autonegotiation modes. |
181 | 172 | ||
173 | menuconfig PHYLIB | ||
174 | tristate "PHY Device support and infrastructure" | ||
175 | depends on NETDEVICES | ||
176 | select MDIO_DEVICE | ||
177 | help | ||
178 | Ethernet controllers are usually attached to PHY | ||
179 | devices. This option provides infrastructure for | ||
180 | managing PHY devices. | ||
181 | |||
182 | if PHYLIB | 182 | if PHYLIB |
183 | 183 | ||
184 | config SWPHY | 184 | config SWPHY |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e842d2cd1ee7..2b1e67bc1e73 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, | |||
373 | cmd->base.port = PORT_BNC; | 373 | cmd->base.port = PORT_BNC; |
374 | else | 374 | else |
375 | cmd->base.port = PORT_MII; | 375 | cmd->base.port = PORT_MII; |
376 | 376 | cmd->base.transceiver = phy_is_internal(phydev) ? | |
377 | XCVR_INTERNAL : XCVR_EXTERNAL; | ||
377 | cmd->base.phy_address = phydev->mdio.addr; | 378 | cmd->base.phy_address = phydev->mdio.addr; |
378 | cmd->base.autoneg = phydev->autoneg; | 379 | cmd->base.autoneg = phydev->autoneg; |
379 | cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; | 380 | cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8cf0c5901f95..67f25ac29025 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) | |||
879 | { | 879 | { |
880 | const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; | 880 | const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; |
881 | char *irq_str; | 881 | char *irq_str; |
882 | char irq_num[4]; | 882 | char irq_num[8]; |
883 | 883 | ||
884 | switch(phydev->irq) { | 884 | switch(phydev->irq) { |
885 | case PHY_POLL: | 885 | case PHY_POLL: |
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index d15dd3938ba8..2e5150b0b8d5 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c | |||
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) | |||
44 | priv->phy_drv->read_status(phydev); | 44 | priv->phy_drv->read_status(phydev); |
45 | 45 | ||
46 | val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); | 46 | val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); |
47 | val &= XILINX_GMII2RGMII_SPEED_MASK; | 47 | val &= ~XILINX_GMII2RGMII_SPEED_MASK; |
48 | 48 | ||
49 | if (phydev->speed == SPEED_1000) | 49 | if (phydev->speed == SPEED_1000) |
50 | val |= BMCR_SPEED1000; | 50 | val |= BMCR_SPEED1000; |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a404552555d4..c3f77e3b7819 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -120,7 +120,7 @@ struct ppp { | |||
120 | int n_channels; /* how many channels are attached 54 */ | 120 | int n_channels; /* how many channels are attached 54 */ |
121 | spinlock_t rlock; /* lock for receive side 58 */ | 121 | spinlock_t rlock; /* lock for receive side 58 */ |
122 | spinlock_t wlock; /* lock for transmit side 5c */ | 122 | spinlock_t wlock; /* lock for transmit side 5c */ |
123 | int *xmit_recursion __percpu; /* xmit recursion detect */ | 123 | int __percpu *xmit_recursion; /* xmit recursion detect */ |
124 | int mru; /* max receive unit 60 */ | 124 | int mru; /* max receive unit 60 */ |
125 | unsigned int flags; /* control bits 64 */ | 125 | unsigned int flags; /* control bits 64 */ |
126 | unsigned int xstate; /* transmit state bits 68 */ | 126 | unsigned int xstate; /* transmit state bits 68 */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3c9985f29950..5ce580f413b9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1496 | switch (tun->flags & TUN_TYPE_MASK) { | 1496 | switch (tun->flags & TUN_TYPE_MASK) { |
1497 | case IFF_TUN: | 1497 | case IFF_TUN: |
1498 | if (tun->flags & IFF_NO_PI) { | 1498 | if (tun->flags & IFF_NO_PI) { |
1499 | switch (skb->data[0] & 0xf0) { | 1499 | u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; |
1500 | case 0x40: | 1500 | |
1501 | switch (ip_version) { | ||
1502 | case 4: | ||
1501 | pi.proto = htons(ETH_P_IP); | 1503 | pi.proto = htons(ETH_P_IP); |
1502 | break; | 1504 | break; |
1503 | case 0x60: | 1505 | case 6: |
1504 | pi.proto = htons(ETH_P_IPV6); | 1506 | pi.proto = htons(ETH_P_IPV6); |
1505 | break; | 1507 | break; |
1506 | default: | 1508 | default: |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8ab281b478f2..29c7e2ec0dcb 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc) | |||
54 | desc->bInterfaceProtocol == 3); | 54 | desc->bInterfaceProtocol == 3); |
55 | } | 55 | } |
56 | 56 | ||
57 | static int is_novatel_rndis(struct usb_interface_descriptor *desc) | ||
58 | { | ||
59 | return (desc->bInterfaceClass == USB_CLASS_MISC && | ||
60 | desc->bInterfaceSubClass == 4 && | ||
61 | desc->bInterfaceProtocol == 1); | ||
62 | } | ||
63 | |||
57 | #else | 64 | #else |
58 | 65 | ||
59 | #define is_rndis(desc) 0 | 66 | #define is_rndis(desc) 0 |
60 | #define is_activesync(desc) 0 | 67 | #define is_activesync(desc) 0 |
61 | #define is_wireless_rndis(desc) 0 | 68 | #define is_wireless_rndis(desc) 0 |
69 | #define is_novatel_rndis(desc) 0 | ||
62 | 70 | ||
63 | #endif | 71 | #endif |
64 | 72 | ||
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
150 | */ | 158 | */ |
151 | rndis = (is_rndis(&intf->cur_altsetting->desc) || | 159 | rndis = (is_rndis(&intf->cur_altsetting->desc) || |
152 | is_activesync(&intf->cur_altsetting->desc) || | 160 | is_activesync(&intf->cur_altsetting->desc) || |
153 | is_wireless_rndis(&intf->cur_altsetting->desc)); | 161 | is_wireless_rndis(&intf->cur_altsetting->desc) || |
162 | is_novatel_rndis(&intf->cur_altsetting->desc)); | ||
154 | 163 | ||
155 | memset(info, 0, sizeof(*info)); | 164 | memset(info, 0, sizeof(*info)); |
156 | info->control = intf; | 165 | info->control = intf; |
@@ -547,6 +556,7 @@ static const struct driver_info wwan_info = { | |||
547 | #define REALTEK_VENDOR_ID 0x0bda | 556 | #define REALTEK_VENDOR_ID 0x0bda |
548 | #define SAMSUNG_VENDOR_ID 0x04e8 | 557 | #define SAMSUNG_VENDOR_ID 0x04e8 |
549 | #define LENOVO_VENDOR_ID 0x17ef | 558 | #define LENOVO_VENDOR_ID 0x17ef |
559 | #define LINKSYS_VENDOR_ID 0x13b1 | ||
550 | #define NVIDIA_VENDOR_ID 0x0955 | 560 | #define NVIDIA_VENDOR_ID 0x0955 |
551 | #define HP_VENDOR_ID 0x03f0 | 561 | #define HP_VENDOR_ID 0x03f0 |
552 | #define MICROSOFT_VENDOR_ID 0x045e | 562 | #define MICROSOFT_VENDOR_ID 0x045e |
@@ -737,6 +747,15 @@ static const struct usb_device_id products[] = { | |||
737 | .driver_info = 0, | 747 | .driver_info = 0, |
738 | }, | 748 | }, |
739 | 749 | ||
750 | #if IS_ENABLED(CONFIG_USB_RTL8152) | ||
751 | /* Linksys USB3GIGV1 Ethernet Adapter */ | ||
752 | { | ||
753 | USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM, | ||
754 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
755 | .driver_info = 0, | ||
756 | }, | ||
757 | #endif | ||
758 | |||
740 | /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ | 759 | /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ |
741 | { | 760 | { |
742 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, | 761 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b99a7fb09f8e..0161f77641fa 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, | |||
1265 | struct ethtool_eeprom *ee, u8 *data) | 1265 | struct ethtool_eeprom *ee, u8 *data) |
1266 | { | 1266 | { |
1267 | struct lan78xx_net *dev = netdev_priv(netdev); | 1267 | struct lan78xx_net *dev = netdev_priv(netdev); |
1268 | int ret; | ||
1269 | |||
1270 | ret = usb_autopm_get_interface(dev->intf); | ||
1271 | if (ret) | ||
1272 | return ret; | ||
1268 | 1273 | ||
1269 | ee->magic = LAN78XX_EEPROM_MAGIC; | 1274 | ee->magic = LAN78XX_EEPROM_MAGIC; |
1270 | 1275 | ||
1271 | return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); | 1276 | ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); |
1277 | |||
1278 | usb_autopm_put_interface(dev->intf); | ||
1279 | |||
1280 | return ret; | ||
1272 | } | 1281 | } |
1273 | 1282 | ||
1274 | static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, | 1283 | static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, |
1275 | struct ethtool_eeprom *ee, u8 *data) | 1284 | struct ethtool_eeprom *ee, u8 *data) |
1276 | { | 1285 | { |
1277 | struct lan78xx_net *dev = netdev_priv(netdev); | 1286 | struct lan78xx_net *dev = netdev_priv(netdev); |
1287 | int ret; | ||
1288 | |||
1289 | ret = usb_autopm_get_interface(dev->intf); | ||
1290 | if (ret) | ||
1291 | return ret; | ||
1278 | 1292 | ||
1279 | /* Allow entire eeprom update only */ | 1293 | /* Invalid EEPROM_INDICATOR at offset zero will result in a failure |
1280 | if ((ee->magic == LAN78XX_EEPROM_MAGIC) && | 1294 | * to load data from EEPROM |
1281 | (ee->offset == 0) && | 1295 | */ |
1282 | (ee->len == 512) && | 1296 | if (ee->magic == LAN78XX_EEPROM_MAGIC) |
1283 | (data[0] == EEPROM_INDICATOR)) | 1297 | ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); |
1284 | return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); | ||
1285 | else if ((ee->magic == LAN78XX_OTP_MAGIC) && | 1298 | else if ((ee->magic == LAN78XX_OTP_MAGIC) && |
1286 | (ee->offset == 0) && | 1299 | (ee->offset == 0) && |
1287 | (ee->len == 512) && | 1300 | (ee->len == 512) && |
1288 | (data[0] == OTP_INDICATOR_1)) | 1301 | (data[0] == OTP_INDICATOR_1)) |
1289 | return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); | 1302 | ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); |
1303 | |||
1304 | usb_autopm_put_interface(dev->intf); | ||
1290 | 1305 | ||
1291 | return -EINVAL; | 1306 | return ret; |
1292 | } | 1307 | } |
1293 | 1308 | ||
1294 | static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, | 1309 | static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, |
@@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
2434 | /* LAN7801 only has RGMII mode */ | 2449 | /* LAN7801 only has RGMII mode */ |
2435 | if (dev->chipid == ID_REV_CHIP_ID_7801_) | 2450 | if (dev->chipid == ID_REV_CHIP_ID_7801_) |
2436 | buf &= ~MAC_CR_GMII_EN_; | 2451 | buf &= ~MAC_CR_GMII_EN_; |
2437 | buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; | ||
2438 | ret = lan78xx_write_reg(dev, MAC_CR, buf); | 2452 | ret = lan78xx_write_reg(dev, MAC_CR, buf); |
2439 | 2453 | ||
2440 | ret = lan78xx_read_reg(dev, MAC_TX, &buf); | 2454 | ret = lan78xx_read_reg(dev, MAC_TX, &buf); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ceb78e2ea4f0..941ece08ba78 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -613,6 +613,7 @@ enum rtl8152_flags { | |||
613 | #define VENDOR_ID_MICROSOFT 0x045e | 613 | #define VENDOR_ID_MICROSOFT 0x045e |
614 | #define VENDOR_ID_SAMSUNG 0x04e8 | 614 | #define VENDOR_ID_SAMSUNG 0x04e8 |
615 | #define VENDOR_ID_LENOVO 0x17ef | 615 | #define VENDOR_ID_LENOVO 0x17ef |
616 | #define VENDOR_ID_LINKSYS 0x13b1 | ||
616 | #define VENDOR_ID_NVIDIA 0x0955 | 617 | #define VENDOR_ID_NVIDIA 0x0955 |
617 | 618 | ||
618 | #define MCU_TYPE_PLA 0x0100 | 619 | #define MCU_TYPE_PLA 0x0100 |
@@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = { | |||
5316 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | 5317 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, |
5317 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, | 5318 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, |
5318 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, | 5319 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, |
5320 | {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, | ||
5319 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, | 5321 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, |
5320 | {} | 5322 | {} |
5321 | }; | 5323 | }; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a151f267aebb..b807c91abe1d 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -632,6 +632,10 @@ static const struct usb_device_id products [] = { | |||
632 | /* RNDIS for tethering */ | 632 | /* RNDIS for tethering */ |
633 | USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), | 633 | USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), |
634 | .driver_info = (unsigned long) &rndis_info, | 634 | .driver_info = (unsigned long) &rndis_info, |
635 | }, { | ||
636 | /* Novatel Verizon USB730L */ | ||
637 | USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), | ||
638 | .driver_info = (unsigned long) &rndis_info, | ||
635 | }, | 639 | }, |
636 | { }, // END | 640 | { }, // END |
637 | }; | 641 | }; |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index bc1633945a56..195dafb98131 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) | |||
3396 | 3396 | ||
3397 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); | 3397 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
3398 | 3398 | ||
3399 | #ifdef CONFIG_PM | 3399 | static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) |
3400 | |||
3401 | static int ath10k_pci_pm_suspend(struct device *dev) | ||
3402 | { | 3400 | { |
3403 | struct ath10k *ar = dev_get_drvdata(dev); | 3401 | struct ath10k *ar = dev_get_drvdata(dev); |
3404 | int ret; | 3402 | int ret; |
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev) | |||
3414 | return ret; | 3412 | return ret; |
3415 | } | 3413 | } |
3416 | 3414 | ||
3417 | static int ath10k_pci_pm_resume(struct device *dev) | 3415 | static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) |
3418 | { | 3416 | { |
3419 | struct ath10k *ar = dev_get_drvdata(dev); | 3417 | struct ath10k *ar = dev_get_drvdata(dev); |
3420 | int ret; | 3418 | int ret; |
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev) | |||
3433 | static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, | 3431 | static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, |
3434 | ath10k_pci_pm_suspend, | 3432 | ath10k_pci_pm_suspend, |
3435 | ath10k_pci_pm_resume); | 3433 | ath10k_pci_pm_resume); |
3436 | #endif | ||
3437 | 3434 | ||
3438 | static struct pci_driver ath10k_pci_driver = { | 3435 | static struct pci_driver ath10k_pci_driver = { |
3439 | .name = "ath10k_pci", | 3436 | .name = "ath10k_pci", |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index aaed4ab503ad..4157c90ad973 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
980 | 980 | ||
981 | eth_broadcast_addr(params_le->bssid); | 981 | eth_broadcast_addr(params_le->bssid); |
982 | params_le->bss_type = DOT11_BSSTYPE_ANY; | 982 | params_le->bss_type = DOT11_BSSTYPE_ANY; |
983 | params_le->scan_type = 0; | 983 | params_le->scan_type = BRCMF_SCANTYPE_ACTIVE; |
984 | params_le->channel_num = 0; | 984 | params_le->channel_num = 0; |
985 | params_le->nprobes = cpu_to_le32(-1); | 985 | params_le->nprobes = cpu_to_le32(-1); |
986 | params_le->active_time = cpu_to_le32(-1); | 986 | params_le->active_time = cpu_to_le32(-1); |
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
988 | params_le->home_time = cpu_to_le32(-1); | 988 | params_le->home_time = cpu_to_le32(-1); |
989 | memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); | 989 | memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); |
990 | 990 | ||
991 | /* if request is null exit so it will be all channel broadcast scan */ | ||
992 | if (!request) | ||
993 | return; | ||
994 | |||
995 | n_ssids = request->n_ssids; | 991 | n_ssids = request->n_ssids; |
996 | n_channels = request->n_channels; | 992 | n_channels = request->n_channels; |
993 | |||
997 | /* Copy channel array if applicable */ | 994 | /* Copy channel array if applicable */ |
998 | brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", | 995 | brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", |
999 | n_channels); | 996 | n_channels); |
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
1030 | ptr += sizeof(ssid_le); | 1027 | ptr += sizeof(ssid_le); |
1031 | } | 1028 | } |
1032 | } else { | 1029 | } else { |
1033 | brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids); | 1030 | brcmf_dbg(SCAN, "Performing passive scan\n"); |
1034 | if ((request->ssids) && request->ssids->ssid_len) { | 1031 | params_le->scan_type = BRCMF_SCANTYPE_PASSIVE; |
1035 | brcmf_dbg(SCAN, "SSID %s len=%d\n", | ||
1036 | params_le->ssid_le.SSID, | ||
1037 | request->ssids->ssid_len); | ||
1038 | params_le->ssid_le.SSID_len = | ||
1039 | cpu_to_le32(request->ssids->ssid_len); | ||
1040 | memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid, | ||
1041 | request->ssids->ssid_len); | ||
1042 | } | ||
1043 | } | 1032 | } |
1044 | /* Adding mask to channel numbers */ | 1033 | /* Adding mask to channel numbers */ |
1045 | params_le->channel_num = | 1034 | params_le->channel_num = |
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
3162 | struct brcmf_cfg80211_info *cfg = ifp->drvr->config; | 3151 | struct brcmf_cfg80211_info *cfg = ifp->drvr->config; |
3163 | s32 status; | 3152 | s32 status; |
3164 | struct brcmf_escan_result_le *escan_result_le; | 3153 | struct brcmf_escan_result_le *escan_result_le; |
3154 | u32 escan_buflen; | ||
3165 | struct brcmf_bss_info_le *bss_info_le; | 3155 | struct brcmf_bss_info_le *bss_info_le; |
3166 | struct brcmf_bss_info_le *bss = NULL; | 3156 | struct brcmf_bss_info_le *bss = NULL; |
3167 | u32 bi_length; | 3157 | u32 bi_length; |
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
3181 | 3171 | ||
3182 | if (status == BRCMF_E_STATUS_PARTIAL) { | 3172 | if (status == BRCMF_E_STATUS_PARTIAL) { |
3183 | brcmf_dbg(SCAN, "ESCAN Partial result\n"); | 3173 | brcmf_dbg(SCAN, "ESCAN Partial result\n"); |
3174 | if (e->datalen < sizeof(*escan_result_le)) { | ||
3175 | brcmf_err("invalid event data length\n"); | ||
3176 | goto exit; | ||
3177 | } | ||
3184 | escan_result_le = (struct brcmf_escan_result_le *) data; | 3178 | escan_result_le = (struct brcmf_escan_result_le *) data; |
3185 | if (!escan_result_le) { | 3179 | if (!escan_result_le) { |
3186 | brcmf_err("Invalid escan result (NULL pointer)\n"); | 3180 | brcmf_err("Invalid escan result (NULL pointer)\n"); |
3187 | goto exit; | 3181 | goto exit; |
3188 | } | 3182 | } |
3183 | escan_buflen = le32_to_cpu(escan_result_le->buflen); | ||
3184 | if (escan_buflen > BRCMF_ESCAN_BUF_SIZE || | ||
3185 | escan_buflen > e->datalen || | ||
3186 | escan_buflen < sizeof(*escan_result_le)) { | ||
3187 | brcmf_err("Invalid escan buffer length: %d\n", | ||
3188 | escan_buflen); | ||
3189 | goto exit; | ||
3190 | } | ||
3189 | if (le16_to_cpu(escan_result_le->bss_count) != 1) { | 3191 | if (le16_to_cpu(escan_result_le->bss_count) != 1) { |
3190 | brcmf_err("Invalid bss_count %d: ignoring\n", | 3192 | brcmf_err("Invalid bss_count %d: ignoring\n", |
3191 | escan_result_le->bss_count); | 3193 | escan_result_le->bss_count); |
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
3202 | } | 3204 | } |
3203 | 3205 | ||
3204 | bi_length = le32_to_cpu(bss_info_le->length); | 3206 | bi_length = le32_to_cpu(bss_info_le->length); |
3205 | if (bi_length != (le32_to_cpu(escan_result_le->buflen) - | 3207 | if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) { |
3206 | WL_ESCAN_RESULTS_FIXED_SIZE)) { | 3208 | brcmf_err("Ignoring invalid bss_info length: %d\n", |
3207 | brcmf_err("Invalid bss_info length %d: ignoring\n", | ||
3208 | bi_length); | 3209 | bi_length); |
3209 | goto exit; | 3210 | goto exit; |
3210 | } | 3211 | } |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 8391989b1882..e0d22fedb2b4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h | |||
@@ -45,6 +45,11 @@ | |||
45 | #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff | 45 | #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff |
46 | #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 | 46 | #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 |
47 | 47 | ||
48 | /* scan type definitions */ | ||
49 | #define BRCMF_SCANTYPE_DEFAULT 0xFF | ||
50 | #define BRCMF_SCANTYPE_ACTIVE 0 | ||
51 | #define BRCMF_SCANTYPE_PASSIVE 1 | ||
52 | |||
48 | #define BRCMF_WSEC_MAX_PSK_LEN 32 | 53 | #define BRCMF_WSEC_MAX_PSK_LEN 32 |
49 | #define BRCMF_WSEC_PASSPHRASE BIT(0) | 54 | #define BRCMF_WSEC_PASSPHRASE BIT(0) |
50 | 55 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 5de19ea10575..b205a7bfb828 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
@@ -2167,7 +2167,7 @@ out: | |||
2167 | * 1. We are not using a unified image | 2167 | * 1. We are not using a unified image |
2168 | * 2. We are using a unified image but had an error while exiting D3 | 2168 | * 2. We are using a unified image but had an error while exiting D3 |
2169 | */ | 2169 | */ |
2170 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | 2170 | set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); |
2171 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); | 2171 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); |
2172 | /* | 2172 | /* |
2173 | * When switching images we return 1, which causes mac80211 | 2173 | * When switching images we return 1, which causes mac80211 |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 15f2d826bb4b..3bcaa82f59b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, | |||
1546 | struct iwl_mvm_mc_iter_data *data = _data; | 1546 | struct iwl_mvm_mc_iter_data *data = _data; |
1547 | struct iwl_mvm *mvm = data->mvm; | 1547 | struct iwl_mvm *mvm = data->mvm; |
1548 | struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; | 1548 | struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; |
1549 | struct iwl_host_cmd hcmd = { | ||
1550 | .id = MCAST_FILTER_CMD, | ||
1551 | .flags = CMD_ASYNC, | ||
1552 | .dataflags[0] = IWL_HCMD_DFL_NOCOPY, | ||
1553 | }; | ||
1549 | int ret, len; | 1554 | int ret, len; |
1550 | 1555 | ||
1551 | /* if we don't have free ports, mcast frames will be dropped */ | 1556 | /* if we don't have free ports, mcast frames will be dropped */ |
@@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, | |||
1560 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); | 1565 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); |
1561 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); | 1566 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); |
1562 | 1567 | ||
1563 | ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); | 1568 | hcmd.len[0] = len; |
1569 | hcmd.data[0] = cmd; | ||
1570 | |||
1571 | ret = iwl_mvm_send_cmd(mvm, &hcmd); | ||
1564 | if (ret) | 1572 | if (ret) |
1565 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); | 1573 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); |
1566 | } | 1574 | } |
@@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, | |||
1635 | if (!cmd) | 1643 | if (!cmd) |
1636 | goto out; | 1644 | goto out; |
1637 | 1645 | ||
1646 | if (changed_flags & FIF_ALLMULTI) | ||
1647 | cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); | ||
1648 | |||
1649 | if (cmd->pass_all) | ||
1650 | cmd->count = 0; | ||
1651 | |||
1638 | iwl_mvm_recalc_multicast(mvm); | 1652 | iwl_mvm_recalc_multicast(mvm); |
1639 | out: | 1653 | out: |
1640 | mutex_unlock(&mvm->mutex); | 1654 | mutex_unlock(&mvm->mutex); |
@@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, | |||
2563 | * queues, so we should never get a second deferred | 2577 | * queues, so we should never get a second deferred |
2564 | * frame for the RA/TID. | 2578 | * frame for the RA/TID. |
2565 | */ | 2579 | */ |
2566 | iwl_mvm_start_mac_queues(mvm, info->hw_queue); | 2580 | iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); |
2567 | ieee80211_free_txskb(mvm->hw, skb); | 2581 | ieee80211_free_txskb(mvm->hw, skb); |
2568 | } | 2582 | } |
2569 | } | 2583 | } |
@@ -3975,6 +3989,43 @@ out_unlock: | |||
3975 | return ret; | 3989 | return ret; |
3976 | } | 3990 | } |
3977 | 3991 | ||
3992 | static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) | ||
3993 | { | ||
3994 | if (drop) { | ||
3995 | if (iwl_mvm_has_new_tx_api(mvm)) | ||
3996 | /* TODO new tx api */ | ||
3997 | WARN_ONCE(1, | ||
3998 | "Need to implement flush TX queue\n"); | ||
3999 | else | ||
4000 | iwl_mvm_flush_tx_path(mvm, | ||
4001 | iwl_mvm_flushable_queues(mvm) & queues, | ||
4002 | 0); | ||
4003 | } else { | ||
4004 | if (iwl_mvm_has_new_tx_api(mvm)) { | ||
4005 | struct ieee80211_sta *sta; | ||
4006 | int i; | ||
4007 | |||
4008 | mutex_lock(&mvm->mutex); | ||
4009 | |||
4010 | for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { | ||
4011 | sta = rcu_dereference_protected( | ||
4012 | mvm->fw_id_to_mac_id[i], | ||
4013 | lockdep_is_held(&mvm->mutex)); | ||
4014 | if (IS_ERR_OR_NULL(sta)) | ||
4015 | continue; | ||
4016 | |||
4017 | iwl_mvm_wait_sta_queues_empty(mvm, | ||
4018 | iwl_mvm_sta_from_mac80211(sta)); | ||
4019 | } | ||
4020 | |||
4021 | mutex_unlock(&mvm->mutex); | ||
4022 | } else { | ||
4023 | iwl_trans_wait_tx_queues_empty(mvm->trans, | ||
4024 | queues); | ||
4025 | } | ||
4026 | } | ||
4027 | } | ||
4028 | |||
3978 | static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | 4029 | static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, |
3979 | struct ieee80211_vif *vif, u32 queues, bool drop) | 4030 | struct ieee80211_vif *vif, u32 queues, bool drop) |
3980 | { | 4031 | { |
@@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
3985 | int i; | 4036 | int i; |
3986 | u32 msk = 0; | 4037 | u32 msk = 0; |
3987 | 4038 | ||
3988 | if (!vif || vif->type != NL80211_IFTYPE_STATION) | 4039 | if (!vif) { |
4040 | iwl_mvm_flush_no_vif(mvm, queues, drop); | ||
4041 | return; | ||
4042 | } | ||
4043 | |||
4044 | if (vif->type != NL80211_IFTYPE_STATION) | ||
3989 | return; | 4045 | return; |
3990 | 4046 | ||
3991 | /* Make sure we're done with the deferred traffic before flushing */ | 4047 | /* Make sure we're done with the deferred traffic before flushing */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index ba7bd049d3d4..0fe723ca844e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | |||
661 | (lq_sta->tx_agg_tid_en & BIT(tid)) && | 661 | (lq_sta->tx_agg_tid_en & BIT(tid)) && |
662 | (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { | 662 | (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { |
663 | IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); | 663 | IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); |
664 | rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta); | 664 | if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) |
665 | tid_data->state = IWL_AGG_QUEUED; | ||
665 | } | 666 | } |
666 | } | 667 | } |
667 | 668 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 67ffd9774712..77f77bc5d083 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
672 | * If there was a significant jump in the nssn - adjust. | 672 | * If there was a significant jump in the nssn - adjust. |
673 | * If the SN is smaller than the NSSN it might need to first go into | 673 | * If the SN is smaller than the NSSN it might need to first go into |
674 | * the reorder buffer, in which case we just release up to it and the | 674 | * the reorder buffer, in which case we just release up to it and the |
675 | * rest of the function will take of storing it and releasing up to the | 675 | * rest of the function will take care of storing it and releasing up to |
676 | * nssn | 676 | * the nssn |
677 | */ | 677 | */ |
678 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, | 678 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, |
679 | buffer->buf_size)) { | 679 | buffer->buf_size) || |
680 | !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { | ||
680 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; | 681 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; |
681 | 682 | ||
682 | iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); | 683 | iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 50983615dce6..774122fed454 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c | |||
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) | |||
555 | struct iwl_host_cmd cmd = { | 555 | struct iwl_host_cmd cmd = { |
556 | .id = SCAN_OFFLOAD_ABORT_CMD, | 556 | .id = SCAN_OFFLOAD_ABORT_CMD, |
557 | }; | 557 | }; |
558 | u32 status; | 558 | u32 status = CAN_ABORT_STATUS; |
559 | 559 | ||
560 | ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); | 560 | ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); |
561 | if (ret) | 561 | if (ret) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 411a2055dc45..c4a343534c5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, | |||
1285 | { | 1285 | { |
1286 | struct iwl_mvm_add_sta_cmd cmd; | 1286 | struct iwl_mvm_add_sta_cmd cmd; |
1287 | int ret; | 1287 | int ret; |
1288 | u32 status; | 1288 | u32 status = ADD_STA_SUCCESS; |
1289 | 1289 | ||
1290 | lockdep_assert_held(&mvm->mutex); | 1290 | lockdep_assert_held(&mvm->mutex); |
1291 | 1291 | ||
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
2385 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | 2385 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
2386 | return -EINVAL; | 2386 | return -EINVAL; |
2387 | 2387 | ||
2388 | if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { | 2388 | if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && |
2389 | IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", | 2389 | mvmsta->tid_data[tid].state != IWL_AGG_OFF) { |
2390 | IWL_ERR(mvm, | ||
2391 | "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", | ||
2390 | mvmsta->tid_data[tid].state); | 2392 | mvmsta->tid_data[tid].state); |
2391 | return -ENXIO; | 2393 | return -ENXIO; |
2392 | } | 2394 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d13893806513..aedabe101cf0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -281,6 +281,7 @@ struct iwl_mvm_vif; | |||
281 | * These states relate to a specific RA / TID. | 281 | * These states relate to a specific RA / TID. |
282 | * | 282 | * |
283 | * @IWL_AGG_OFF: aggregation is not used | 283 | * @IWL_AGG_OFF: aggregation is not used |
284 | * @IWL_AGG_QUEUED: aggregation start work has been queued | ||
284 | * @IWL_AGG_STARTING: aggregation are starting (between start and oper) | 285 | * @IWL_AGG_STARTING: aggregation are starting (between start and oper) |
285 | * @IWL_AGG_ON: aggregation session is up | 286 | * @IWL_AGG_ON: aggregation session is up |
286 | * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the | 287 | * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the |
@@ -290,6 +291,7 @@ struct iwl_mvm_vif; | |||
290 | */ | 291 | */ |
291 | enum iwl_mvm_agg_state { | 292 | enum iwl_mvm_agg_state { |
292 | IWL_AGG_OFF = 0, | 293 | IWL_AGG_OFF = 0, |
294 | IWL_AGG_QUEUED, | ||
293 | IWL_AGG_STARTING, | 295 | IWL_AGG_STARTING, |
294 | IWL_AGG_ON, | 296 | IWL_AGG_ON, |
295 | IWL_EMPTYING_HW_QUEUE_ADDBA, | 297 | IWL_EMPTYING_HW_QUEUE_ADDBA, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 8876c2abc440..4d907f60bce9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) | |||
529 | 529 | ||
530 | lockdep_assert_held(&mvm->mutex); | 530 | lockdep_assert_held(&mvm->mutex); |
531 | 531 | ||
532 | status = 0; | ||
532 | ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, | 533 | ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, |
533 | CTDP_CONFIG_CMD), | 534 | CTDP_CONFIG_CMD), |
534 | sizeof(cmd), &cmd, &status); | 535 | sizeof(cmd), &cmd, &status); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 172b5e63d3fb..6f2e2af23219 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, | |||
564 | case NL80211_IFTYPE_AP: | 564 | case NL80211_IFTYPE_AP: |
565 | case NL80211_IFTYPE_ADHOC: | 565 | case NL80211_IFTYPE_ADHOC: |
566 | /* | 566 | /* |
567 | * Handle legacy hostapd as well, where station will be added | 567 | * Non-bufferable frames use the broadcast station, thus they |
568 | * only just before sending the association response. | 568 | * use the probe queue. |
569 | * Also take care of the case where we send a deauth to a | 569 | * Also take care of the case where we send a deauth to a |
570 | * station that we don't have, or similarly an association | 570 | * station that we don't have, or similarly an association |
571 | * response (with non-success status) for a station we can't | 571 | * response (with non-success status) for a station we can't |
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, | |||
573 | * Also, disassociate frames might happen, particular with | 573 | * Also, disassociate frames might happen, particular with |
574 | * reason 7 ("Class 3 frame received from nonassociated STA"). | 574 | * reason 7 ("Class 3 frame received from nonassociated STA"). |
575 | */ | 575 | */ |
576 | if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || | 576 | if (ieee80211_is_mgmt(fc) && |
577 | ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) || | 577 | (!ieee80211_is_bufferable_mmpdu(fc) || |
578 | ieee80211_is_disassoc(fc)) | 578 | ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) |
579 | return mvm->probe_queue; | 579 | return mvm->probe_queue; |
580 | if (info->hw_queue == info->control.vif->cab_queue) | 580 | if (info->hw_queue == info->control.vif->cab_queue) |
581 | return mvmvif->cab_queue; | 581 | return mvmvif->cab_queue; |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 856fa6e8327e..a450bc6bc774 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | |||
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) | |||
115 | 115 | ||
116 | vif = qtnf_netdev_get_priv(wdev->netdev); | 116 | vif = qtnf_netdev_get_priv(wdev->netdev); |
117 | 117 | ||
118 | qtnf_scan_done(vif->mac, true); | ||
119 | |||
118 | if (qtnf_cmd_send_del_intf(vif)) | 120 | if (qtnf_cmd_send_del_intf(vif)) |
119 | pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, | 121 | pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, |
120 | vif->vifid); | 122 | vif->vifid); |
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev) | |||
335 | struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); | 337 | struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); |
336 | int ret; | 338 | int ret; |
337 | 339 | ||
340 | qtnf_scan_done(vif->mac, true); | ||
341 | |||
338 | ret = qtnf_cmd_send_stop_ap(vif); | 342 | ret = qtnf_cmd_send_stop_ap(vif); |
339 | if (ret) { | 343 | if (ret) { |
340 | pr_err("VIF%u.%u: failed to stop AP operation in FW\n", | 344 | pr_err("VIF%u.%u: failed to stop AP operation in FW\n", |
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, | |||
570 | !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) | 574 | !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) |
571 | return 0; | 575 | return 0; |
572 | 576 | ||
573 | qtnf_scan_done(vif->mac, true); | ||
574 | |||
575 | ret = qtnf_cmd_send_del_sta(vif, params); | 577 | ret = qtnf_cmd_send_del_sta(vif, params); |
576 | if (ret) | 578 | if (ret) |
577 | pr_err("VIF%u.%u: failed to delete STA %pM\n", | 579 | pr_err("VIF%u.%u: failed to delete STA %pM\n", |
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev) | |||
1134 | } | 1136 | } |
1135 | 1137 | ||
1136 | vif->sta_state = QTNF_STA_DISCONNECTED; | 1138 | vif->sta_state = QTNF_STA_DISCONNECTED; |
1137 | qtnf_scan_done(mac, true); | ||
1138 | } | 1139 | } |
1140 | |||
1141 | qtnf_scan_done(mac, true); | ||
1139 | } | 1142 | } |
1140 | 1143 | ||
1141 | void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) | 1144 | void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 6a4af52522b8..66db26613b1f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h | |||
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) | |||
34 | .aborted = aborted, | 34 | .aborted = aborted, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | if (timer_pending(&mac->scan_timeout)) | ||
38 | del_timer_sync(&mac->scan_timeout); | ||
39 | |||
37 | mutex_lock(&mac->mac_lock); | 40 | mutex_lock(&mac->mac_lock); |
38 | 41 | ||
39 | if (mac->scan_req) { | 42 | if (mac->scan_req) { |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 0fc2814eafad..43d2e7fd6e02 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c | |||
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, | |||
345 | return -EINVAL; | 345 | return -EINVAL; |
346 | } | 346 | } |
347 | 347 | ||
348 | if (timer_pending(&mac->scan_timeout)) | ||
349 | del_timer_sync(&mac->scan_timeout); | ||
350 | qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); | 348 | qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); |
351 | 349 | ||
352 | return 0; | 350 | return 0; |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 502e72b7cdcc..69131965a298 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | |||
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) | |||
661 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | 661 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); |
662 | dma_addr_t txbd_paddr, skb_paddr; | 662 | dma_addr_t txbd_paddr, skb_paddr; |
663 | struct qtnf_tx_bd *txbd; | 663 | struct qtnf_tx_bd *txbd; |
664 | unsigned long flags; | ||
664 | int len, i; | 665 | int len, i; |
665 | u32 info; | 666 | u32 info; |
666 | int ret = 0; | 667 | int ret = 0; |
667 | 668 | ||
669 | spin_lock_irqsave(&priv->tx0_lock, flags); | ||
670 | |||
668 | if (!qtnf_tx_queue_ready(priv)) { | 671 | if (!qtnf_tx_queue_ready(priv)) { |
669 | if (skb->dev) | 672 | if (skb->dev) |
670 | netif_stop_queue(skb->dev); | 673 | netif_stop_queue(skb->dev); |
671 | 674 | ||
675 | spin_unlock_irqrestore(&priv->tx0_lock, flags); | ||
672 | return NETDEV_TX_BUSY; | 676 | return NETDEV_TX_BUSY; |
673 | } | 677 | } |
674 | 678 | ||
@@ -717,8 +721,10 @@ tx_done: | |||
717 | dev_kfree_skb_any(skb); | 721 | dev_kfree_skb_any(skb); |
718 | } | 722 | } |
719 | 723 | ||
720 | qtnf_pcie_data_tx_reclaim(priv); | ||
721 | priv->tx_done_count++; | 724 | priv->tx_done_count++; |
725 | spin_unlock_irqrestore(&priv->tx0_lock, flags); | ||
726 | |||
727 | qtnf_pcie_data_tx_reclaim(priv); | ||
722 | 728 | ||
723 | return NETDEV_TX_OK; | 729 | return NETDEV_TX_OK; |
724 | } | 730 | } |
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1247 | strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); | 1253 | strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); |
1248 | init_completion(&bus->request_firmware_complete); | 1254 | init_completion(&bus->request_firmware_complete); |
1249 | mutex_init(&bus->bus_lock); | 1255 | mutex_init(&bus->bus_lock); |
1256 | spin_lock_init(&pcie_priv->tx0_lock); | ||
1250 | spin_lock_init(&pcie_priv->irq_lock); | 1257 | spin_lock_init(&pcie_priv->irq_lock); |
1251 | spin_lock_init(&pcie_priv->tx_reclaim_lock); | 1258 | spin_lock_init(&pcie_priv->tx_reclaim_lock); |
1252 | 1259 | ||
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index e76a23716ee0..86ac1ccedb52 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h | |||
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv { | |||
34 | 34 | ||
35 | /* lock for tx reclaim operations */ | 35 | /* lock for tx reclaim operations */ |
36 | spinlock_t tx_reclaim_lock; | 36 | spinlock_t tx_reclaim_lock; |
37 | /* lock for tx0 operations */ | ||
38 | spinlock_t tx0_lock; | ||
37 | u8 msi_enabled; | 39 | u8 msi_enabled; |
38 | int mps; | 40 | int mps; |
39 | 41 | ||
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 1427a386a033..3e4d1e7998da 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev) | |||
1417 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 1417 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
1418 | struct nd_namespace_index *nsindex; | 1418 | struct nd_namespace_index *nsindex; |
1419 | 1419 | ||
1420 | /* | ||
1421 | * If any of the DIMMs do not support labels the only | ||
1422 | * possible BTT format is v1. | ||
1423 | */ | ||
1424 | if (!ndd) { | ||
1425 | loop_bitmask = 0; | ||
1426 | break; | ||
1427 | } | ||
1428 | |||
1420 | nsindex = to_namespace_index(ndd, ndd->ns_current); | 1429 | nsindex = to_namespace_index(ndd, ndd->ns_current); |
1421 | if (nsindex == NULL) | 1430 | if (nsindex == NULL) |
1422 | loop_bitmask |= 1; | 1431 | loop_bitmask |= 1; |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index acc816b67582..5a14cc7f28ee 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req) | |||
134 | return false; | 134 | return false; |
135 | if (nvme_req(req)->status & NVME_SC_DNR) | 135 | if (nvme_req(req)->status & NVME_SC_DNR) |
136 | return false; | 136 | return false; |
137 | if (jiffies - req->start_time >= req->timeout) | ||
138 | return false; | ||
139 | if (nvme_req(req)->retries >= nvme_max_retries) | 137 | if (nvme_req(req)->retries >= nvme_max_retries) |
140 | return false; | 138 | return false; |
141 | return true; | 139 | return true; |
@@ -2138,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, | |||
2138 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); | 2136 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
2139 | 2137 | ||
2140 | if (a == &dev_attr_uuid.attr) { | 2138 | if (a == &dev_attr_uuid.attr) { |
2141 | if (uuid_is_null(&ns->uuid) || | 2139 | if (uuid_is_null(&ns->uuid) && |
2142 | !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) | 2140 | !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) |
2143 | return 0; | 2141 | return 0; |
2144 | } | 2142 | } |
@@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work) | |||
2590 | container_of(work, struct nvme_ctrl, async_event_work); | 2588 | container_of(work, struct nvme_ctrl, async_event_work); |
2591 | 2589 | ||
2592 | spin_lock_irq(&ctrl->lock); | 2590 | spin_lock_irq(&ctrl->lock); |
2593 | while (ctrl->event_limit > 0) { | 2591 | while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) { |
2594 | int aer_idx = --ctrl->event_limit; | 2592 | int aer_idx = --ctrl->event_limit; |
2595 | 2593 | ||
2596 | spin_unlock_irq(&ctrl->lock); | 2594 | spin_unlock_irq(&ctrl->lock); |
@@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, | |||
2677 | /*FALLTHRU*/ | 2675 | /*FALLTHRU*/ |
2678 | case NVME_SC_ABORT_REQ: | 2676 | case NVME_SC_ABORT_REQ: |
2679 | ++ctrl->event_limit; | 2677 | ++ctrl->event_limit; |
2680 | queue_work(nvme_wq, &ctrl->async_event_work); | 2678 | if (ctrl->state == NVME_CTRL_LIVE) |
2679 | queue_work(nvme_wq, &ctrl->async_event_work); | ||
2681 | break; | 2680 | break; |
2682 | default: | 2681 | default: |
2683 | break; | 2682 | break; |
@@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, | |||
2692 | nvme_queue_scan(ctrl); | 2691 | nvme_queue_scan(ctrl); |
2693 | break; | 2692 | break; |
2694 | case NVME_AER_NOTICE_FW_ACT_STARTING: | 2693 | case NVME_AER_NOTICE_FW_ACT_STARTING: |
2695 | schedule_work(&ctrl->fw_act_work); | 2694 | queue_work(nvme_wq, &ctrl->fw_act_work); |
2696 | break; | 2695 | break; |
2697 | default: | 2696 | default: |
2698 | dev_warn(ctrl->device, "async event result %08x\n", result); | 2697 | dev_warn(ctrl->device, "async event result %08x\n", result); |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 47307752dc65..555c976cc2ee 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
565 | opts->queue_size = NVMF_DEF_QUEUE_SIZE; | 565 | opts->queue_size = NVMF_DEF_QUEUE_SIZE; |
566 | opts->nr_io_queues = num_online_cpus(); | 566 | opts->nr_io_queues = num_online_cpus(); |
567 | opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; | 567 | opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; |
568 | opts->kato = NVME_DEFAULT_KATO; | ||
568 | 569 | ||
569 | options = o = kstrdup(buf, GFP_KERNEL); | 570 | options = o = kstrdup(buf, GFP_KERNEL); |
570 | if (!options) | 571 | if (!options) |
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
655 | goto out; | 656 | goto out; |
656 | } | 657 | } |
657 | 658 | ||
658 | if (opts->discovery_nqn) { | ||
659 | pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n"); | ||
660 | ret = -EINVAL; | ||
661 | goto out; | ||
662 | } | ||
663 | |||
664 | if (token < 0) { | 659 | if (token < 0) { |
665 | pr_err("Invalid keep_alive_tmo %d\n", token); | 660 | pr_err("Invalid keep_alive_tmo %d\n", token); |
666 | ret = -EINVAL; | 661 | ret = -EINVAL; |
667 | goto out; | 662 | goto out; |
668 | } else if (token == 0) { | 663 | } else if (token == 0 && !opts->discovery_nqn) { |
669 | /* Allowed for debug */ | 664 | /* Allowed for debug */ |
670 | pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); | 665 | pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); |
671 | } | 666 | } |
672 | opts->kato = token; | 667 | opts->kato = token; |
668 | |||
669 | if (opts->discovery_nqn && opts->kato) { | ||
670 | pr_err("Discovery controllers cannot accept KATO != 0\n"); | ||
671 | ret = -EINVAL; | ||
672 | goto out; | ||
673 | } | ||
674 | |||
673 | break; | 675 | break; |
674 | case NVMF_OPT_CTRL_LOSS_TMO: | 676 | case NVMF_OPT_CTRL_LOSS_TMO: |
675 | if (match_int(args, &token)) { | 677 | if (match_int(args, &token)) { |
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
762 | uuid_copy(&opts->host->id, &hostid); | 764 | uuid_copy(&opts->host->id, &hostid); |
763 | 765 | ||
764 | out: | 766 | out: |
765 | if (!opts->discovery_nqn && !opts->kato) | ||
766 | opts->kato = NVME_DEFAULT_KATO; | ||
767 | kfree(options); | 767 | kfree(options); |
768 | return ret; | 768 | return ret; |
769 | } | 769 | } |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d2e882c0f496..af075e998944 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1376 | if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) | 1376 | if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) |
1377 | status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); | 1377 | status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); |
1378 | else if (freq->status) | 1378 | else if (freq->status) |
1379 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); | 1379 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
1380 | 1380 | ||
1381 | /* | 1381 | /* |
1382 | * For the linux implementation, if we have an unsuccesful | 1382 | * For the linux implementation, if we have an unsuccesful |
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1404 | */ | 1404 | */ |
1405 | if (freq->transferred_length != | 1405 | if (freq->transferred_length != |
1406 | be32_to_cpu(op->cmd_iu.data_len)) { | 1406 | be32_to_cpu(op->cmd_iu.data_len)) { |
1407 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); | 1407 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
1408 | goto done; | 1408 | goto done; |
1409 | } | 1409 | } |
1410 | result.u64 = 0; | 1410 | result.u64 = 0; |
@@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1421 | freq->transferred_length || | 1421 | freq->transferred_length || |
1422 | op->rsp_iu.status_code || | 1422 | op->rsp_iu.status_code || |
1423 | sqe->common.command_id != cqe->command_id)) { | 1423 | sqe->common.command_id != cqe->command_id)) { |
1424 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); | 1424 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
1425 | goto done; | 1425 | goto done; |
1426 | } | 1426 | } |
1427 | result = cqe->result; | 1427 | result = cqe->result; |
@@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1429 | break; | 1429 | break; |
1430 | 1430 | ||
1431 | default: | 1431 | default: |
1432 | status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); | 1432 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
1433 | goto done; | 1433 | goto done; |
1434 | } | 1434 | } |
1435 | 1435 | ||
@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1989 | * as well as those by FC-NVME spec. | 1989 | * as well as those by FC-NVME spec. |
1990 | */ | 1990 | */ |
1991 | WARN_ON_ONCE(sqe->common.metadata); | 1991 | WARN_ON_ONCE(sqe->common.metadata); |
1992 | WARN_ON_ONCE(sqe->common.dptr.prp1); | ||
1993 | WARN_ON_ONCE(sqe->common.dptr.prp2); | ||
1994 | sqe->common.flags |= NVME_CMD_SGL_METABUF; | 1992 | sqe->common.flags |= NVME_CMD_SGL_METABUF; |
1995 | 1993 | ||
1996 | /* | 1994 | /* |
1997 | * format SQE DPTR field per FC-NVME rules | 1995 | * format SQE DPTR field per FC-NVME rules: |
1998 | * type=data block descr; subtype=offset; | 1996 | * type=0x5 Transport SGL Data Block Descriptor |
1999 | * offset is currently 0. | 1997 | * subtype=0xA Transport-specific value |
1998 | * address=0 | ||
1999 | * length=length of the data series | ||
2000 | */ | 2000 | */ |
2001 | sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; | 2001 | sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | |
2002 | NVME_SGL_FMT_TRANSPORT_A; | ||
2002 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); | 2003 | sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); |
2003 | sqe->rw.dptr.sgl.addr = 0; | 2004 | sqe->rw.dptr.sgl.addr = 0; |
2004 | 2005 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4a2121335f48..3f5a04c586ce 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/once.h> | ||
27 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
28 | #include <linux/poison.h> | 29 | #include <linux/poison.h> |
29 | #include <linux/t10-pi.h> | 30 | #include <linux/t10-pi.h> |
@@ -93,7 +94,7 @@ struct nvme_dev { | |||
93 | struct mutex shutdown_lock; | 94 | struct mutex shutdown_lock; |
94 | bool subsystem; | 95 | bool subsystem; |
95 | void __iomem *cmb; | 96 | void __iomem *cmb; |
96 | dma_addr_t cmb_dma_addr; | 97 | pci_bus_addr_t cmb_bus_addr; |
97 | u64 cmb_size; | 98 | u64 cmb_size; |
98 | u32 cmbsz; | 99 | u32 cmbsz; |
99 | u32 cmbloc; | 100 | u32 cmbloc; |
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) | |||
540 | } | 541 | } |
541 | #endif | 542 | #endif |
542 | 543 | ||
544 | static void nvme_print_sgl(struct scatterlist *sgl, int nents) | ||
545 | { | ||
546 | int i; | ||
547 | struct scatterlist *sg; | ||
548 | |||
549 | for_each_sg(sgl, sg, nents, i) { | ||
550 | dma_addr_t phys = sg_phys(sg); | ||
551 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " | ||
552 | "dma_address:%pad dma_length:%d\n", | ||
553 | i, &phys, sg->offset, sg->length, &sg_dma_address(sg), | ||
554 | sg_dma_len(sg)); | ||
555 | } | ||
556 | } | ||
557 | |||
543 | static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) | 558 | static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) |
544 | { | 559 | { |
545 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); | 560 | struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) | |||
622 | return BLK_STS_OK; | 637 | return BLK_STS_OK; |
623 | 638 | ||
624 | bad_sgl: | 639 | bad_sgl: |
625 | if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", | 640 | WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), |
626 | blk_rq_payload_bytes(req), iod->nents)) { | 641 | "Invalid SGL for payload:%d nents:%d\n", |
627 | for_each_sg(iod->sg, sg, iod->nents, i) { | 642 | blk_rq_payload_bytes(req), iod->nents); |
628 | dma_addr_t phys = sg_phys(sg); | ||
629 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " | ||
630 | "dma_address:%pad dma_length:%d\n", i, &phys, | ||
631 | sg->offset, sg->length, | ||
632 | &sg_dma_address(sg), | ||
633 | sg_dma_len(sg)); | ||
634 | } | ||
635 | } | ||
636 | return BLK_STS_IOERR; | 643 | return BLK_STS_IOERR; |
637 | |||
638 | } | 644 | } |
639 | 645 | ||
640 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, | 646 | static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, |
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, | |||
1220 | if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { | 1226 | if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { |
1221 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), | 1227 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), |
1222 | dev->ctrl.page_size); | 1228 | dev->ctrl.page_size); |
1223 | nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; | 1229 | nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; |
1224 | nvmeq->sq_cmds_io = dev->cmb + offset; | 1230 | nvmeq->sq_cmds_io = dev->cmb + offset; |
1225 | } else { | 1231 | } else { |
1226 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), | 1232 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), |
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1313 | if (result < 0) | 1319 | if (result < 0) |
1314 | goto release_cq; | 1320 | goto release_cq; |
1315 | 1321 | ||
1322 | nvme_init_queue(nvmeq, qid); | ||
1316 | result = queue_request_irq(nvmeq); | 1323 | result = queue_request_irq(nvmeq); |
1317 | if (result < 0) | 1324 | if (result < 0) |
1318 | goto release_sq; | 1325 | goto release_sq; |
1319 | 1326 | ||
1320 | nvme_init_queue(nvmeq, qid); | ||
1321 | return result; | 1327 | return result; |
1322 | 1328 | ||
1323 | release_sq: | 1329 | release_sq: |
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) | |||
1464 | return result; | 1470 | return result; |
1465 | 1471 | ||
1466 | nvmeq->cq_vector = 0; | 1472 | nvmeq->cq_vector = 0; |
1473 | nvme_init_queue(nvmeq, 0); | ||
1467 | result = queue_request_irq(nvmeq); | 1474 | result = queue_request_irq(nvmeq); |
1468 | if (result) { | 1475 | if (result) { |
1469 | nvmeq->cq_vector = -1; | 1476 | nvmeq->cq_vector = -1; |
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
1520 | resource_size_t bar_size; | 1527 | resource_size_t bar_size; |
1521 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 1528 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
1522 | void __iomem *cmb; | 1529 | void __iomem *cmb; |
1523 | dma_addr_t dma_addr; | 1530 | int bar; |
1524 | 1531 | ||
1525 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); | 1532 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); |
1526 | if (!(NVME_CMB_SZ(dev->cmbsz))) | 1533 | if (!(NVME_CMB_SZ(dev->cmbsz))) |
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
1533 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); | 1540 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); |
1534 | size = szu * NVME_CMB_SZ(dev->cmbsz); | 1541 | size = szu * NVME_CMB_SZ(dev->cmbsz); |
1535 | offset = szu * NVME_CMB_OFST(dev->cmbloc); | 1542 | offset = szu * NVME_CMB_OFST(dev->cmbloc); |
1536 | bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); | 1543 | bar = NVME_CMB_BIR(dev->cmbloc); |
1544 | bar_size = pci_resource_len(pdev, bar); | ||
1537 | 1545 | ||
1538 | if (offset > bar_size) | 1546 | if (offset > bar_size) |
1539 | return NULL; | 1547 | return NULL; |
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
1546 | if (size > bar_size - offset) | 1554 | if (size > bar_size - offset) |
1547 | size = bar_size - offset; | 1555 | size = bar_size - offset; |
1548 | 1556 | ||
1549 | dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; | 1557 | cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size); |
1550 | cmb = ioremap_wc(dma_addr, size); | ||
1551 | if (!cmb) | 1558 | if (!cmb) |
1552 | return NULL; | 1559 | return NULL; |
1553 | 1560 | ||
1554 | dev->cmb_dma_addr = dma_addr; | 1561 | dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset; |
1555 | dev->cmb_size = size; | 1562 | dev->cmb_size = size; |
1556 | return cmb; | 1563 | return cmb; |
1557 | } | 1564 | } |
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work) | |||
2156 | if (result) | 2163 | if (result) |
2157 | goto out; | 2164 | goto out; |
2158 | 2165 | ||
2159 | nvme_init_queue(dev->queues[0], 0); | ||
2160 | result = nvme_alloc_admin_tags(dev); | 2166 | result = nvme_alloc_admin_tags(dev); |
2161 | if (result) | 2167 | if (result) |
2162 | goto out; | 2168 | goto out; |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 58983000964b..92a03ff5fb4d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
942 | } | 942 | } |
943 | 943 | ||
944 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 944 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
945 | WARN_ON_ONCE(!changed); | 945 | if (!changed) { |
946 | /* state change failure is ok if we're in DELETING state */ | ||
947 | WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); | ||
948 | return; | ||
949 | } | ||
950 | |||
946 | ctrl->ctrl.nr_reconnects = 0; | 951 | ctrl->ctrl.nr_reconnects = 0; |
947 | 952 | ||
948 | nvme_start_ctrl(&ctrl->ctrl); | 953 | nvme_start_ctrl(&ctrl->ctrl); |
@@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
962 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 967 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
963 | struct nvme_rdma_ctrl, err_work); | 968 | struct nvme_rdma_ctrl, err_work); |
964 | 969 | ||
965 | nvme_stop_ctrl(&ctrl->ctrl); | 970 | nvme_stop_keep_alive(&ctrl->ctrl); |
966 | 971 | ||
967 | if (ctrl->ctrl.queue_count > 1) { | 972 | if (ctrl->ctrl.queue_count > 1) { |
968 | nvme_stop_queues(&ctrl->ctrl); | 973 | nvme_stop_queues(&ctrl->ctrl); |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 7c23eaf8e563..1b208beeef50 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) | |||
390 | if (status) | 390 | if (status) |
391 | nvmet_set_status(req, status); | 391 | nvmet_set_status(req, status); |
392 | 392 | ||
393 | /* XXX: need to fill in something useful for sq_head */ | 393 | if (req->sq->size) |
394 | req->rsp->sq_head = 0; | 394 | req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; |
395 | if (likely(req->sq)) /* may happen during early failure */ | 395 | req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); |
396 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); | 396 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); |
397 | req->rsp->command_id = req->cmd->common.command_id; | 397 | req->rsp->command_id = req->cmd->common.command_id; |
398 | 398 | ||
399 | if (req->ns) | 399 | if (req->ns) |
@@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, | |||
420 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | 420 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, |
421 | u16 qid, u16 size) | 421 | u16 qid, u16 size) |
422 | { | 422 | { |
423 | sq->sqhd = 0; | ||
423 | sq->qid = qid; | 424 | sq->qid = qid; |
424 | sq->size = size; | 425 | sq->size = size; |
425 | 426 | ||
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 859a66725291..db3bf6b8bf9e 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c | |||
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) | |||
109 | pr_warn("queue already connected!\n"); | 109 | pr_warn("queue already connected!\n"); |
110 | return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; | 110 | return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
111 | } | 111 | } |
112 | if (!sqsize) { | ||
113 | pr_warn("queue size zero!\n"); | ||
114 | return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; | ||
115 | } | ||
112 | 116 | ||
113 | nvmet_cq_setup(ctrl, req->cq, qid, sqsize); | 117 | /* note: convert queue size from 0's-based value to 1's-based value */ |
114 | nvmet_sq_setup(ctrl, req->sq, qid, sqsize); | 118 | nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); |
119 | nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); | ||
115 | return 0; | 120 | return 0; |
116 | } | 121 | } |
117 | 122 | ||
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 421e43bf1dd7..58e010bdda3e 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc { | |||
148 | u32 a_id; | 148 | u32 a_id; |
149 | struct nvmet_fc_tgtport *tgtport; | 149 | struct nvmet_fc_tgtport *tgtport; |
150 | struct list_head a_list; | 150 | struct list_head a_list; |
151 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; | 151 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; |
152 | struct kref ref; | 152 | struct kref ref; |
153 | }; | 153 | }; |
154 | 154 | ||
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, | |||
608 | unsigned long flags; | 608 | unsigned long flags; |
609 | int ret; | 609 | int ret; |
610 | 610 | ||
611 | if (qid >= NVMET_NR_QUEUES) | 611 | if (qid > NVMET_NR_QUEUES) |
612 | return NULL; | 612 | return NULL; |
613 | 613 | ||
614 | queue = kzalloc((sizeof(*queue) + | 614 | queue = kzalloc((sizeof(*queue) + |
@@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, | |||
783 | u16 qid = nvmet_fc_getqueueid(connection_id); | 783 | u16 qid = nvmet_fc_getqueueid(connection_id); |
784 | unsigned long flags; | 784 | unsigned long flags; |
785 | 785 | ||
786 | if (qid > NVMET_NR_QUEUES) | ||
787 | return NULL; | ||
788 | |||
786 | spin_lock_irqsave(&tgtport->lock, flags); | 789 | spin_lock_irqsave(&tgtport->lock, flags); |
787 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { | 790 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { |
788 | if (association_id == assoc->association_id) { | 791 | if (association_id == assoc->association_id) { |
@@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) | |||
888 | int i; | 891 | int i; |
889 | 892 | ||
890 | spin_lock_irqsave(&tgtport->lock, flags); | 893 | spin_lock_irqsave(&tgtport->lock, flags); |
891 | for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { | 894 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
892 | queue = assoc->queues[i]; | 895 | queue = assoc->queues[i]; |
893 | if (queue) { | 896 | if (queue) { |
894 | if (!nvmet_fc_tgt_q_get(queue)) | 897 | if (!nvmet_fc_tgt_q_get(queue)) |
@@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, | |||
1910 | spin_lock_irqsave(&fod->flock, flags); | 1913 | spin_lock_irqsave(&fod->flock, flags); |
1911 | fod->writedataactive = false; | 1914 | fod->writedataactive = false; |
1912 | spin_unlock_irqrestore(&fod->flock, flags); | 1915 | spin_unlock_irqrestore(&fod->flock, flags); |
1913 | nvmet_req_complete(&fod->req, | 1916 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
1914 | NVME_SC_FC_TRANSPORT_ERROR); | ||
1915 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { | 1917 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
1916 | fcpreq->fcp_error = ret; | 1918 | fcpreq->fcp_error = ret; |
1917 | fcpreq->transferred_length = 0; | 1919 | fcpreq->transferred_length = 0; |
@@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) | |||
1929 | /* if in the middle of an io and we need to tear down */ | 1931 | /* if in the middle of an io and we need to tear down */ |
1930 | if (abort) { | 1932 | if (abort) { |
1931 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { | 1933 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { |
1932 | nvmet_req_complete(&fod->req, | 1934 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
1933 | NVME_SC_FC_TRANSPORT_ERROR); | ||
1934 | return true; | 1935 | return true; |
1935 | } | 1936 | } |
1936 | 1937 | ||
@@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) | |||
1968 | fod->abort = true; | 1969 | fod->abort = true; |
1969 | spin_unlock(&fod->flock); | 1970 | spin_unlock(&fod->flock); |
1970 | 1971 | ||
1971 | nvmet_req_complete(&fod->req, | 1972 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
1972 | NVME_SC_FC_TRANSPORT_ERROR); | ||
1973 | return; | 1973 | return; |
1974 | } | 1974 | } |
1975 | 1975 | ||
@@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port) | |||
2533 | { | 2533 | { |
2534 | struct nvmet_fc_tgtport *tgtport = port->priv; | 2534 | struct nvmet_fc_tgtport *tgtport = port->priv; |
2535 | unsigned long flags; | 2535 | unsigned long flags; |
2536 | bool matched = false; | ||
2536 | 2537 | ||
2537 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | 2538 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); |
2538 | if (tgtport->port == port) { | 2539 | if (tgtport->port == port) { |
2539 | nvmet_fc_tgtport_put(tgtport); | 2540 | matched = true; |
2540 | tgtport->port = NULL; | 2541 | tgtport->port = NULL; |
2541 | } | 2542 | } |
2542 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | 2543 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); |
2544 | |||
2545 | if (matched) | ||
2546 | nvmet_fc_tgtport_put(tgtport); | ||
2543 | } | 2547 | } |
2544 | 2548 | ||
2545 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { | 2549 | static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 1cb9847ec261..7b75d9de55ab 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c | |||
@@ -224,8 +224,6 @@ struct fcloop_nport { | |||
224 | struct fcloop_lport *lport; | 224 | struct fcloop_lport *lport; |
225 | struct list_head nport_list; | 225 | struct list_head nport_list; |
226 | struct kref ref; | 226 | struct kref ref; |
227 | struct completion rport_unreg_done; | ||
228 | struct completion tport_unreg_done; | ||
229 | u64 node_name; | 227 | u64 node_name; |
230 | u64 port_name; | 228 | u64 port_name; |
231 | u32 port_role; | 229 | u32 port_role; |
@@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, | |||
576 | tfcp_req->aborted = true; | 574 | tfcp_req->aborted = true; |
577 | spin_unlock(&tfcp_req->reqlock); | 575 | spin_unlock(&tfcp_req->reqlock); |
578 | 576 | ||
579 | tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; | 577 | tfcp_req->status = NVME_SC_INTERNAL; |
580 | 578 | ||
581 | /* | 579 | /* |
582 | * nothing more to do. If io wasn't active, the transport should | 580 | * nothing more to do. If io wasn't active, the transport should |
@@ -631,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, | |||
631 | } | 629 | } |
632 | 630 | ||
633 | static void | 631 | static void |
632 | fcloop_nport_free(struct kref *ref) | ||
633 | { | ||
634 | struct fcloop_nport *nport = | ||
635 | container_of(ref, struct fcloop_nport, ref); | ||
636 | unsigned long flags; | ||
637 | |||
638 | spin_lock_irqsave(&fcloop_lock, flags); | ||
639 | list_del(&nport->nport_list); | ||
640 | spin_unlock_irqrestore(&fcloop_lock, flags); | ||
641 | |||
642 | kfree(nport); | ||
643 | } | ||
644 | |||
645 | static void | ||
646 | fcloop_nport_put(struct fcloop_nport *nport) | ||
647 | { | ||
648 | kref_put(&nport->ref, fcloop_nport_free); | ||
649 | } | ||
650 | |||
651 | static int | ||
652 | fcloop_nport_get(struct fcloop_nport *nport) | ||
653 | { | ||
654 | return kref_get_unless_zero(&nport->ref); | ||
655 | } | ||
656 | |||
657 | static void | ||
634 | fcloop_localport_delete(struct nvme_fc_local_port *localport) | 658 | fcloop_localport_delete(struct nvme_fc_local_port *localport) |
635 | { | 659 | { |
636 | struct fcloop_lport *lport = localport->private; | 660 | struct fcloop_lport *lport = localport->private; |
@@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) | |||
644 | { | 668 | { |
645 | struct fcloop_rport *rport = remoteport->private; | 669 | struct fcloop_rport *rport = remoteport->private; |
646 | 670 | ||
647 | /* release any threads waiting for the unreg to complete */ | 671 | fcloop_nport_put(rport->nport); |
648 | complete(&rport->nport->rport_unreg_done); | ||
649 | } | 672 | } |
650 | 673 | ||
651 | static void | 674 | static void |
@@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) | |||
653 | { | 676 | { |
654 | struct fcloop_tport *tport = targetport->private; | 677 | struct fcloop_tport *tport = targetport->private; |
655 | 678 | ||
656 | /* release any threads waiting for the unreg to complete */ | 679 | fcloop_nport_put(tport->nport); |
657 | complete(&tport->nport->tport_unreg_done); | ||
658 | } | 680 | } |
659 | 681 | ||
660 | #define FCLOOP_HW_QUEUES 4 | 682 | #define FCLOOP_HW_QUEUES 4 |
@@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, | |||
722 | goto out_free_opts; | 744 | goto out_free_opts; |
723 | } | 745 | } |
724 | 746 | ||
747 | memset(&pinfo, 0, sizeof(pinfo)); | ||
725 | pinfo.node_name = opts->wwnn; | 748 | pinfo.node_name = opts->wwnn; |
726 | pinfo.port_name = opts->wwpn; | 749 | pinfo.port_name = opts->wwpn; |
727 | pinfo.port_role = opts->roles; | 750 | pinfo.port_role = opts->roles; |
@@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, | |||
804 | return ret ? ret : count; | 827 | return ret ? ret : count; |
805 | } | 828 | } |
806 | 829 | ||
807 | static void | ||
808 | fcloop_nport_free(struct kref *ref) | ||
809 | { | ||
810 | struct fcloop_nport *nport = | ||
811 | container_of(ref, struct fcloop_nport, ref); | ||
812 | unsigned long flags; | ||
813 | |||
814 | spin_lock_irqsave(&fcloop_lock, flags); | ||
815 | list_del(&nport->nport_list); | ||
816 | spin_unlock_irqrestore(&fcloop_lock, flags); | ||
817 | |||
818 | kfree(nport); | ||
819 | } | ||
820 | |||
821 | static void | ||
822 | fcloop_nport_put(struct fcloop_nport *nport) | ||
823 | { | ||
824 | kref_put(&nport->ref, fcloop_nport_free); | ||
825 | } | ||
826 | |||
827 | static int | ||
828 | fcloop_nport_get(struct fcloop_nport *nport) | ||
829 | { | ||
830 | return kref_get_unless_zero(&nport->ref); | ||
831 | } | ||
832 | |||
833 | static struct fcloop_nport * | 830 | static struct fcloop_nport * |
834 | fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) | 831 | fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) |
835 | { | 832 | { |
@@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, | |||
938 | if (!nport) | 935 | if (!nport) |
939 | return -EIO; | 936 | return -EIO; |
940 | 937 | ||
938 | memset(&pinfo, 0, sizeof(pinfo)); | ||
941 | pinfo.node_name = nport->node_name; | 939 | pinfo.node_name = nport->node_name; |
942 | pinfo.port_name = nport->port_name; | 940 | pinfo.port_name = nport->port_name; |
943 | pinfo.port_role = nport->port_role; | 941 | pinfo.port_role = nport->port_role; |
@@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport) | |||
979 | } | 977 | } |
980 | 978 | ||
981 | static int | 979 | static int |
982 | __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) | 980 | __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) |
983 | { | 981 | { |
984 | int ret; | ||
985 | |||
986 | if (!rport) | 982 | if (!rport) |
987 | return -EALREADY; | 983 | return -EALREADY; |
988 | 984 | ||
989 | init_completion(&nport->rport_unreg_done); | 985 | return nvme_fc_unregister_remoteport(rport->remoteport); |
990 | |||
991 | ret = nvme_fc_unregister_remoteport(rport->remoteport); | ||
992 | if (ret) | ||
993 | return ret; | ||
994 | |||
995 | wait_for_completion(&nport->rport_unreg_done); | ||
996 | |||
997 | fcloop_nport_put(nport); | ||
998 | |||
999 | return ret; | ||
1000 | } | 986 | } |
1001 | 987 | ||
1002 | static ssize_t | 988 | static ssize_t |
@@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, | |||
1029 | if (!nport) | 1015 | if (!nport) |
1030 | return -ENOENT; | 1016 | return -ENOENT; |
1031 | 1017 | ||
1032 | ret = __wait_remoteport_unreg(nport, rport); | 1018 | ret = __remoteport_unreg(nport, rport); |
1033 | 1019 | ||
1034 | return ret ? ret : count; | 1020 | return ret ? ret : count; |
1035 | } | 1021 | } |
@@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport) | |||
1086 | } | 1072 | } |
1087 | 1073 | ||
1088 | static int | 1074 | static int |
1089 | __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) | 1075 | __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) |
1090 | { | 1076 | { |
1091 | int ret; | ||
1092 | |||
1093 | if (!tport) | 1077 | if (!tport) |
1094 | return -EALREADY; | 1078 | return -EALREADY; |
1095 | 1079 | ||
1096 | init_completion(&nport->tport_unreg_done); | 1080 | return nvmet_fc_unregister_targetport(tport->targetport); |
1097 | |||
1098 | ret = nvmet_fc_unregister_targetport(tport->targetport); | ||
1099 | if (ret) | ||
1100 | return ret; | ||
1101 | |||
1102 | wait_for_completion(&nport->tport_unreg_done); | ||
1103 | |||
1104 | fcloop_nport_put(nport); | ||
1105 | |||
1106 | return ret; | ||
1107 | } | 1081 | } |
1108 | 1082 | ||
1109 | static ssize_t | 1083 | static ssize_t |
@@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, | |||
1136 | if (!nport) | 1110 | if (!nport) |
1137 | return -ENOENT; | 1111 | return -ENOENT; |
1138 | 1112 | ||
1139 | ret = __wait_targetport_unreg(nport, tport); | 1113 | ret = __targetport_unreg(nport, tport); |
1140 | 1114 | ||
1141 | return ret ? ret : count; | 1115 | return ret ? ret : count; |
1142 | } | 1116 | } |
@@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void) | |||
1223 | 1197 | ||
1224 | spin_unlock_irqrestore(&fcloop_lock, flags); | 1198 | spin_unlock_irqrestore(&fcloop_lock, flags); |
1225 | 1199 | ||
1226 | ret = __wait_targetport_unreg(nport, tport); | 1200 | ret = __targetport_unreg(nport, tport); |
1227 | if (ret) | 1201 | if (ret) |
1228 | pr_warn("%s: Failed deleting target port\n", __func__); | 1202 | pr_warn("%s: Failed deleting target port\n", __func__); |
1229 | 1203 | ||
1230 | ret = __wait_remoteport_unreg(nport, rport); | 1204 | ret = __remoteport_unreg(nport, rport); |
1231 | if (ret) | 1205 | if (ret) |
1232 | pr_warn("%s: Failed deleting remote port\n", __func__); | 1206 | pr_warn("%s: Failed deleting remote port\n", __func__); |
1233 | 1207 | ||
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7d261ab894f4..7b8e20adf760 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -74,6 +74,7 @@ struct nvmet_sq { | |||
74 | struct percpu_ref ref; | 74 | struct percpu_ref ref; |
75 | u16 qid; | 75 | u16 qid; |
76 | u16 size; | 76 | u16 size; |
77 | u16 sqhd; | ||
77 | struct completion free_done; | 78 | struct completion free_done; |
78 | struct completion confirm_done; | 79 | struct completion confirm_done; |
79 | }; | 80 | }; |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index de54c7f5048a..d12e5de78e70 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
@@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
135 | 135 | ||
136 | /* Stop the user from writing */ | 136 | /* Stop the user from writing */ |
137 | if (pos >= nvmem->size) | 137 | if (pos >= nvmem->size) |
138 | return 0; | 138 | return -EFBIG; |
139 | 139 | ||
140 | if (count < nvmem->word_size) | 140 | if (count < nvmem->word_size) |
141 | return -EINVAL; | 141 | return -EINVAL; |
@@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, | |||
789 | return ERR_PTR(-EINVAL); | 789 | return ERR_PTR(-EINVAL); |
790 | 790 | ||
791 | nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); | 791 | nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); |
792 | of_node_put(nvmem_np); | ||
792 | if (IS_ERR(nvmem)) | 793 | if (IS_ERR(nvmem)) |
793 | return ERR_CAST(nvmem); | 794 | return ERR_CAST(nvmem); |
794 | 795 | ||
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 4ddc6e8f9fe7..f9308c2f22e6 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c | |||
@@ -251,9 +251,8 @@ err: | |||
251 | return ret; | 251 | return ret; |
252 | } | 252 | } |
253 | 253 | ||
254 | static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) | 254 | static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq) |
255 | { | 255 | { |
256 | u8 irq; | ||
257 | u8 msi_count; | 256 | u8 msi_count; |
258 | struct pci_epf *epf = epf_test->epf; | 257 | struct pci_epf *epf = epf_test->epf; |
259 | struct pci_epc *epc = epf->epc; | 258 | struct pci_epc *epc = epf->epc; |
@@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) | |||
262 | 261 | ||
263 | reg->status |= STATUS_IRQ_RAISED; | 262 | reg->status |= STATUS_IRQ_RAISED; |
264 | msi_count = pci_epc_get_msi(epc); | 263 | msi_count = pci_epc_get_msi(epc); |
265 | irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; | ||
266 | if (irq > msi_count || msi_count <= 0) | 264 | if (irq > msi_count || msi_count <= 0) |
267 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); | 265 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); |
268 | else | 266 | else |
@@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) | |||
289 | reg->command = 0; | 287 | reg->command = 0; |
290 | reg->status = 0; | 288 | reg->status = 0; |
291 | 289 | ||
290 | irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; | ||
291 | |||
292 | if (command & COMMAND_RAISE_LEGACY_IRQ) { | 292 | if (command & COMMAND_RAISE_LEGACY_IRQ) { |
293 | reg->status = STATUS_IRQ_RAISED; | 293 | reg->status = STATUS_IRQ_RAISED; |
294 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); | 294 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); |
@@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) | |||
301 | reg->status |= STATUS_WRITE_FAIL; | 301 | reg->status |= STATUS_WRITE_FAIL; |
302 | else | 302 | else |
303 | reg->status |= STATUS_WRITE_SUCCESS; | 303 | reg->status |= STATUS_WRITE_SUCCESS; |
304 | pci_epf_test_raise_irq(epf_test); | 304 | pci_epf_test_raise_irq(epf_test, irq); |
305 | goto reset_handler; | 305 | goto reset_handler; |
306 | } | 306 | } |
307 | 307 | ||
@@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) | |||
311 | reg->status |= STATUS_READ_SUCCESS; | 311 | reg->status |= STATUS_READ_SUCCESS; |
312 | else | 312 | else |
313 | reg->status |= STATUS_READ_FAIL; | 313 | reg->status |= STATUS_READ_FAIL; |
314 | pci_epf_test_raise_irq(epf_test); | 314 | pci_epf_test_raise_irq(epf_test, irq); |
315 | goto reset_handler; | 315 | goto reset_handler; |
316 | } | 316 | } |
317 | 317 | ||
@@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) | |||
321 | reg->status |= STATUS_COPY_SUCCESS; | 321 | reg->status |= STATUS_COPY_SUCCESS; |
322 | else | 322 | else |
323 | reg->status |= STATUS_COPY_FAIL; | 323 | reg->status |= STATUS_COPY_FAIL; |
324 | pci_epf_test_raise_irq(epf_test); | 324 | pci_epf_test_raise_irq(epf_test, irq); |
325 | goto reset_handler; | 325 | goto reset_handler; |
326 | } | 326 | } |
327 | 327 | ||
328 | if (command & COMMAND_RAISE_MSI_IRQ) { | 328 | if (command & COMMAND_RAISE_MSI_IRQ) { |
329 | msi_count = pci_epc_get_msi(epc); | 329 | msi_count = pci_epc_get_msi(epc); |
330 | irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; | ||
331 | if (irq > msi_count || msi_count <= 0) | 330 | if (irq > msi_count || msi_count <= 0) |
332 | goto reset_handler; | 331 | goto reset_handler; |
333 | reg->status = STATUS_IRQ_RAISED; | 332 | reg->status = STATUS_IRQ_RAISED; |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 1eecfa301f7f..8e075ea2743e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev, | |||
686 | const char *buf, size_t count) | 686 | const char *buf, size_t count) |
687 | { | 687 | { |
688 | struct pci_dev *pdev = to_pci_dev(dev); | 688 | struct pci_dev *pdev = to_pci_dev(dev); |
689 | char *driver_override, *old = pdev->driver_override, *cp; | 689 | char *driver_override, *old, *cp; |
690 | 690 | ||
691 | /* We need to keep extra room for a newline */ | 691 | /* We need to keep extra room for a newline */ |
692 | if (count >= (PAGE_SIZE - 1)) | 692 | if (count >= (PAGE_SIZE - 1)) |
@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev, | |||
700 | if (cp) | 700 | if (cp) |
701 | *cp = '\0'; | 701 | *cp = '\0'; |
702 | 702 | ||
703 | device_lock(dev); | ||
704 | old = pdev->driver_override; | ||
703 | if (strlen(driver_override)) { | 705 | if (strlen(driver_override)) { |
704 | pdev->driver_override = driver_override; | 706 | pdev->driver_override = driver_override; |
705 | } else { | 707 | } else { |
706 | kfree(driver_override); | 708 | kfree(driver_override); |
707 | pdev->driver_override = NULL; | 709 | pdev->driver_override = NULL; |
708 | } | 710 | } |
711 | device_unlock(dev); | ||
709 | 712 | ||
710 | kfree(old); | 713 | kfree(old); |
711 | 714 | ||
@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev, | |||
716 | struct device_attribute *attr, char *buf) | 719 | struct device_attribute *attr, char *buf) |
717 | { | 720 | { |
718 | struct pci_dev *pdev = to_pci_dev(dev); | 721 | struct pci_dev *pdev = to_pci_dev(dev); |
722 | ssize_t len; | ||
719 | 723 | ||
720 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); | 724 | device_lock(dev); |
725 | len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); | ||
726 | device_unlock(dev); | ||
727 | return len; | ||
721 | } | 728 | } |
722 | static DEVICE_ATTR_RW(driver_override); | 729 | static DEVICE_ATTR_RW(driver_override); |
723 | 730 | ||
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 0a9b78705ee8..3303dd8d8eb5 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c | |||
@@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn) | |||
235 | ret = armpmu_register(pmu); | 235 | ret = armpmu_register(pmu); |
236 | if (ret) { | 236 | if (ret) { |
237 | pr_warn("Failed to register PMU for CPU%d\n", cpu); | 237 | pr_warn("Failed to register PMU for CPU%d\n", cpu); |
238 | kfree(pmu->name); | ||
238 | return ret; | 239 | return ret; |
239 | } | 240 | } |
240 | } | 241 | } |
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 85de30f93a9c..56a8195096a2 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b) | |||
254 | { | 254 | { |
255 | struct acpi_device *device = bl_get_data(b); | 255 | struct acpi_device *device = bl_get_data(b); |
256 | 256 | ||
257 | if (b->props.power == FB_BLANK_POWERDOWN) | 257 | if (fext) { |
258 | call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); | 258 | if (b->props.power == FB_BLANK_POWERDOWN) |
259 | else | 259 | call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); |
260 | call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); | 260 | else |
261 | call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); | ||
262 | } | ||
261 | 263 | ||
262 | return set_lcd_level(device, b->props.brightness); | 264 | return set_lcd_level(device, b->props.brightness); |
263 | } | 265 | } |
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 315a4be8dc1e..9a68914100ad 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO); | |||
51 | MODULE_PARM_DESC(mbox_sel, | 51 | MODULE_PARM_DESC(mbox_sel, |
52 | "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); | 52 | "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); |
53 | 53 | ||
54 | static DEFINE_SPINLOCK(tsi721_maint_lock); | ||
55 | |||
54 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); | 56 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); |
55 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); | 57 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); |
56 | 58 | ||
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
124 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); | 126 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); |
125 | struct tsi721_dma_desc *bd_ptr; | 127 | struct tsi721_dma_desc *bd_ptr; |
126 | u32 rd_count, swr_ptr, ch_stat; | 128 | u32 rd_count, swr_ptr, ch_stat; |
129 | unsigned long flags; | ||
127 | int i, err = 0; | 130 | int i, err = 0; |
128 | u32 op = do_wr ? MAINT_WR : MAINT_RD; | 131 | u32 op = do_wr ? MAINT_WR : MAINT_RD; |
129 | 132 | ||
130 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) | 133 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) |
131 | return -EINVAL; | 134 | return -EINVAL; |
132 | 135 | ||
136 | spin_lock_irqsave(&tsi721_maint_lock, flags); | ||
137 | |||
133 | bd_ptr = priv->mdma.bd_base; | 138 | bd_ptr = priv->mdma.bd_base; |
134 | 139 | ||
135 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); | 140 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); |
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
197 | */ | 202 | */ |
198 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); | 203 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); |
199 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); | 204 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); |
205 | |||
200 | err_out: | 206 | err_out: |
207 | spin_unlock_irqrestore(&tsi721_maint_lock, flags); | ||
201 | 208 | ||
202 | return err; | 209 | return err; |
203 | } | 210 | } |
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c index a3824baca2e5..3ee9af83b638 100644 --- a/drivers/rapidio/rio-access.c +++ b/drivers/rapidio/rio-access.c | |||
@@ -14,16 +14,8 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * These interrupt-safe spinlocks protect all accesses to RIO | ||
18 | * configuration space and doorbell access. | ||
19 | */ | ||
20 | static DEFINE_SPINLOCK(rio_config_lock); | ||
21 | static DEFINE_SPINLOCK(rio_doorbell_lock); | ||
22 | |||
23 | /* | ||
24 | * Wrappers for all RIO configuration access functions. They just check | 17 | * Wrappers for all RIO configuration access functions. They just check |
25 | * alignment, do locking and call the low-level functions pointed to | 18 | * alignment and call the low-level functions pointed to by rio_mport->ops. |
26 | * by rio_mport->ops. | ||
27 | */ | 19 | */ |
28 | 20 | ||
29 | #define RIO_8_BAD 0 | 21 | #define RIO_8_BAD 0 |
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \ | |||
44 | (struct rio_mport *mport, u32 offset, type *value) \ | 36 | (struct rio_mport *mport, u32 offset, type *value) \ |
45 | { \ | 37 | { \ |
46 | int res; \ | 38 | int res; \ |
47 | unsigned long flags; \ | ||
48 | u32 data = 0; \ | 39 | u32 data = 0; \ |
49 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 40 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
50 | spin_lock_irqsave(&rio_config_lock, flags); \ | ||
51 | res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ | 41 | res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ |
52 | *value = (type)data; \ | 42 | *value = (type)data; \ |
53 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
54 | return res; \ | 43 | return res; \ |
55 | } | 44 | } |
56 | 45 | ||
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \ | |||
67 | int __rio_local_write_config_##size \ | 56 | int __rio_local_write_config_##size \ |
68 | (struct rio_mport *mport, u32 offset, type value) \ | 57 | (struct rio_mport *mport, u32 offset, type value) \ |
69 | { \ | 58 | { \ |
70 | int res; \ | ||
71 | unsigned long flags; \ | ||
72 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 59 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
73 | spin_lock_irqsave(&rio_config_lock, flags); \ | 60 | return mport->ops->lcwrite(mport, mport->id, offset, len, value);\ |
74 | res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\ | ||
75 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
76 | return res; \ | ||
77 | } | 61 | } |
78 | 62 | ||
79 | RIO_LOP_READ(8, u8, 1) | 63 | RIO_LOP_READ(8, u8, 1) |
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \ | |||
104 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ | 88 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ |
105 | { \ | 89 | { \ |
106 | int res; \ | 90 | int res; \ |
107 | unsigned long flags; \ | ||
108 | u32 data = 0; \ | 91 | u32 data = 0; \ |
109 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 92 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
110 | spin_lock_irqsave(&rio_config_lock, flags); \ | ||
111 | res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ | 93 | res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ |
112 | *value = (type)data; \ | 94 | *value = (type)data; \ |
113 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
114 | return res; \ | 95 | return res; \ |
115 | } | 96 | } |
116 | 97 | ||
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \ | |||
127 | int rio_mport_write_config_##size \ | 108 | int rio_mport_write_config_##size \ |
128 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ | 109 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ |
129 | { \ | 110 | { \ |
130 | int res; \ | ||
131 | unsigned long flags; \ | ||
132 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 111 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
133 | spin_lock_irqsave(&rio_config_lock, flags); \ | 112 | return mport->ops->cwrite(mport, mport->id, destid, hopcount, \ |
134 | res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ | 113 | offset, len, value); \ |
135 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
136 | return res; \ | ||
137 | } | 114 | } |
138 | 115 | ||
139 | RIO_OP_READ(8, u8, 1) | 116 | RIO_OP_READ(8, u8, 1) |
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32); | |||
162 | */ | 139 | */ |
163 | int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) | 140 | int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) |
164 | { | 141 | { |
165 | int res; | 142 | return mport->ops->dsend(mport, mport->id, destid, data); |
166 | unsigned long flags; | ||
167 | |||
168 | spin_lock_irqsave(&rio_doorbell_lock, flags); | ||
169 | res = mport->ops->dsend(mport, mport->id, destid, data); | ||
170 | spin_unlock_irqrestore(&rio_doorbell_lock, flags); | ||
171 | |||
172 | return res; | ||
173 | } | 143 | } |
174 | 144 | ||
175 | EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); | 145 | EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); |
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index e0c393214264..e2baecbb9dd3 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig | |||
@@ -34,11 +34,12 @@ config RESET_BERLIN | |||
34 | help | 34 | help |
35 | This enables the reset controller driver for Marvell Berlin SoCs. | 35 | This enables the reset controller driver for Marvell Berlin SoCs. |
36 | 36 | ||
37 | config RESET_HSDK_V1 | 37 | config RESET_HSDK |
38 | bool "HSDK v1 Reset Driver" | 38 | bool "Synopsys HSDK Reset Driver" |
39 | default n | 39 | depends on HAS_IOMEM |
40 | depends on ARC_SOC_HSDK || COMPILE_TEST | ||
40 | help | 41 | help |
41 | This enables the reset controller driver for HSDK v1. | 42 | This enables the reset controller driver for HSDK board. |
42 | 43 | ||
43 | config RESET_IMX7 | 44 | config RESET_IMX7 |
44 | bool "i.MX7 Reset Driver" if COMPILE_TEST | 45 | bool "i.MX7 Reset Driver" if COMPILE_TEST |
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index d368367110e5..af1c15c330b3 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/ | |||
5 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o | 5 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o |
6 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o | 6 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o |
7 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o | 7 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o |
8 | obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o | 8 | obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o |
9 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o | 9 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o |
10 | obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o | 10 | obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o |
11 | obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o | 11 | obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o |
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c index bca13e4bf622..8bce391c6943 100644 --- a/drivers/reset/reset-hsdk-v1.c +++ b/drivers/reset/reset-hsdk.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2017 Synopsys. | 2 | * Copyright (C) 2017 Synopsys. |
3 | * | 3 | * |
4 | * Synopsys HSDKv1 SDP reset driver. | 4 | * Synopsys HSDK Development platform reset driver. |
5 | * | 5 | * |
6 | * This file is licensed under the terms of the GNU General Public | 6 | * This file is licensed under the terms of the GNU General Public |
7 | * License version 2. This program is licensed "as is" without any | 7 | * License version 2. This program is licensed "as is" without any |
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | 20 | ||
21 | #define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev) | 21 | #define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev) |
22 | 22 | ||
23 | struct hsdkv1_rst { | 23 | struct hsdk_rst { |
24 | void __iomem *regs_ctl; | 24 | void __iomem *regs_ctl; |
25 | void __iomem *regs_rst; | 25 | void __iomem *regs_rst; |
26 | spinlock_t lock; | 26 | spinlock_t lock; |
@@ -49,12 +49,12 @@ static const u32 rst_map[] = { | |||
49 | #define CGU_IP_SW_RESET_RESET BIT(0) | 49 | #define CGU_IP_SW_RESET_RESET BIT(0) |
50 | #define SW_RESET_TIMEOUT 10000 | 50 | #define SW_RESET_TIMEOUT 10000 |
51 | 51 | ||
52 | static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id) | 52 | static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id) |
53 | { | 53 | { |
54 | writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); | 54 | writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); |
55 | } | 55 | } |
56 | 56 | ||
57 | static int hsdkv1_reset_do(struct hsdkv1_rst *rst) | 57 | static int hsdk_reset_do(struct hsdk_rst *rst) |
58 | { | 58 | { |
59 | u32 reg; | 59 | u32 reg; |
60 | 60 | ||
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst) | |||
69 | !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); | 69 | !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); |
70 | } | 70 | } |
71 | 71 | ||
72 | static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev, | 72 | static int hsdk_reset_reset(struct reset_controller_dev *rcdev, |
73 | unsigned long id) | 73 | unsigned long id) |
74 | { | 74 | { |
75 | struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev); | 75 | struct hsdk_rst *rst = to_hsdk_rst(rcdev); |
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | int ret; | 77 | int ret; |
78 | 78 | ||
79 | spin_lock_irqsave(&rst->lock, flags); | 79 | spin_lock_irqsave(&rst->lock, flags); |
80 | hsdkv1_reset_config(rst, id); | 80 | hsdk_reset_config(rst, id); |
81 | ret = hsdkv1_reset_do(rst); | 81 | ret = hsdk_reset_do(rst); |
82 | spin_unlock_irqrestore(&rst->lock, flags); | 82 | spin_unlock_irqrestore(&rst->lock, flags); |
83 | 83 | ||
84 | return ret; | 84 | return ret; |
85 | } | 85 | } |
86 | 86 | ||
87 | static const struct reset_control_ops hsdkv1_reset_ops = { | 87 | static const struct reset_control_ops hsdk_reset_ops = { |
88 | .reset = hsdkv1_reset_reset, | 88 | .reset = hsdk_reset_reset, |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static int hsdkv1_reset_probe(struct platform_device *pdev) | 91 | static int hsdk_reset_probe(struct platform_device *pdev) |
92 | { | 92 | { |
93 | struct hsdkv1_rst *rst; | 93 | struct hsdk_rst *rst; |
94 | struct resource *mem; | 94 | struct resource *mem; |
95 | 95 | ||
96 | rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); | 96 | rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); |
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev) | |||
110 | spin_lock_init(&rst->lock); | 110 | spin_lock_init(&rst->lock); |
111 | 111 | ||
112 | rst->rcdev.owner = THIS_MODULE; | 112 | rst->rcdev.owner = THIS_MODULE; |
113 | rst->rcdev.ops = &hsdkv1_reset_ops; | 113 | rst->rcdev.ops = &hsdk_reset_ops; |
114 | rst->rcdev.of_node = pdev->dev.of_node; | 114 | rst->rcdev.of_node = pdev->dev.of_node; |
115 | rst->rcdev.nr_resets = HSDK_MAX_RESETS; | 115 | rst->rcdev.nr_resets = HSDK_MAX_RESETS; |
116 | rst->rcdev.of_reset_n_cells = 1; | 116 | rst->rcdev.of_reset_n_cells = 1; |
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev) | |||
118 | return reset_controller_register(&rst->rcdev); | 118 | return reset_controller_register(&rst->rcdev); |
119 | } | 119 | } |
120 | 120 | ||
121 | static const struct of_device_id hsdkv1_reset_dt_match[] = { | 121 | static const struct of_device_id hsdk_reset_dt_match[] = { |
122 | { .compatible = "snps,hsdk-v1.0-reset" }, | 122 | { .compatible = "snps,hsdk-reset" }, |
123 | { }, | 123 | { }, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static struct platform_driver hsdkv1_reset_driver = { | 126 | static struct platform_driver hsdk_reset_driver = { |
127 | .probe = hsdkv1_reset_probe, | 127 | .probe = hsdk_reset_probe, |
128 | .driver = { | 128 | .driver = { |
129 | .name = "hsdk-v1.0-reset", | 129 | .name = "hsdk-reset", |
130 | .of_match_table = hsdkv1_reset_dt_match, | 130 | .of_match_table = hsdk_reset_dt_match, |
131 | }, | 131 | }, |
132 | }; | 132 | }; |
133 | builtin_platform_driver(hsdkv1_reset_driver); | 133 | builtin_platform_driver(hsdk_reset_driver); |
134 | 134 | ||
135 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); | 135 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); |
136 | MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver"); | 136 | MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver"); |
137 | MODULE_LICENSE("GPL v2"); | 137 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index ea19b4ff87a2..29f35e29d480 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device) | |||
1644 | dasd_schedule_device_bh(device); | 1644 | dasd_schedule_device_bh(device); |
1645 | if (device->block) { | 1645 | if (device->block) { |
1646 | dasd_schedule_block_bh(device->block); | 1646 | dasd_schedule_block_bh(device->block); |
1647 | blk_mq_run_hw_queues(device->block->request_queue, true); | 1647 | if (device->block->request_queue) |
1648 | blk_mq_run_hw_queues(device->block->request_queue, | ||
1649 | true); | ||
1648 | } | 1650 | } |
1649 | } | 1651 | } |
1650 | EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); | 1652 | EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); |
@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device) | |||
3759 | dasd_schedule_device_bh(device); | 3761 | dasd_schedule_device_bh(device); |
3760 | if (device->block) { | 3762 | if (device->block) { |
3761 | dasd_schedule_block_bh(device->block); | 3763 | dasd_schedule_block_bh(device->block); |
3762 | blk_mq_run_hw_queues(device->block->request_queue, true); | 3764 | if (device->block->request_queue) |
3765 | blk_mq_run_hw_queues(device->block->request_queue, | ||
3766 | true); | ||
3763 | } | 3767 | } |
3764 | 3768 | ||
3765 | if (!device->stopped) | 3769 | if (!device->stopped) |
@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev) | |||
4025 | 4029 | ||
4026 | if (device->block) { | 4030 | if (device->block) { |
4027 | dasd_schedule_block_bh(device->block); | 4031 | dasd_schedule_block_bh(device->block); |
4028 | blk_mq_run_hw_queues(device->block->request_queue, true); | 4032 | if (device->block->request_queue) |
4033 | blk_mq_run_hw_queues(device->block->request_queue, | ||
4034 | true); | ||
4029 | } | 4035 | } |
4030 | 4036 | ||
4031 | clear_bit(DASD_FLAG_SUSPENDED, &device->flags); | 4037 | clear_bit(DASD_FLAG_SUSPENDED, &device->flags); |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 2e7fd966c515..eb51893c74a4 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq) | |||
249 | static void scm_request_finish(struct scm_request *scmrq) | 249 | static void scm_request_finish(struct scm_request *scmrq) |
250 | { | 250 | { |
251 | struct scm_blk_dev *bdev = scmrq->bdev; | 251 | struct scm_blk_dev *bdev = scmrq->bdev; |
252 | int *error; | 252 | blk_status_t *error; |
253 | int i; | 253 | int i; |
254 | 254 | ||
255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { | 255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) | |||
415 | 415 | ||
416 | static void scm_blk_request_done(struct request *req) | 416 | static void scm_blk_request_done(struct request *req) |
417 | { | 417 | { |
418 | int *error = blk_mq_rq_to_pdu(req); | 418 | blk_status_t *error = blk_mq_rq_to_pdu(req); |
419 | 419 | ||
420 | blk_mq_end_request(req, *error); | 420 | blk_mq_end_request(req, *error); |
421 | } | 421 | } |
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
450 | atomic_set(&bdev->queued_reqs, 0); | 450 | atomic_set(&bdev->queued_reqs, 0); |
451 | 451 | ||
452 | bdev->tag_set.ops = &scm_mq_ops; | 452 | bdev->tag_set.ops = &scm_mq_ops; |
453 | bdev->tag_set.cmd_size = sizeof(int); | 453 | bdev->tag_set.cmd_size = sizeof(blk_status_t); |
454 | bdev->tag_set.nr_hw_queues = nr_requests; | 454 | bdev->tag_set.nr_hw_queues = nr_requests; |
455 | bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; | 455 | bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; |
456 | bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 456 | bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 489b583f263d..e5c32f4b5287 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev) | |||
1225 | static int recovery_check(struct device *dev, void *data) | 1225 | static int recovery_check(struct device *dev, void *data) |
1226 | { | 1226 | { |
1227 | struct ccw_device *cdev = to_ccwdev(dev); | 1227 | struct ccw_device *cdev = to_ccwdev(dev); |
1228 | struct subchannel *sch; | ||
1228 | int *redo = data; | 1229 | int *redo = data; |
1229 | 1230 | ||
1230 | spin_lock_irq(cdev->ccwlock); | 1231 | spin_lock_irq(cdev->ccwlock); |
1231 | switch (cdev->private->state) { | 1232 | switch (cdev->private->state) { |
1233 | case DEV_STATE_ONLINE: | ||
1234 | sch = to_subchannel(cdev->dev.parent); | ||
1235 | if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) | ||
1236 | break; | ||
1237 | /* fall through */ | ||
1232 | case DEV_STATE_DISCONNECTED: | 1238 | case DEV_STATE_DISCONNECTED: |
1233 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", | 1239 | CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", |
1234 | cdev->private->dev_id.ssid, | 1240 | cdev->private->dev_id.ssid, |
@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused) | |||
1260 | } | 1266 | } |
1261 | spin_unlock_irq(&recovery_lock); | 1267 | spin_unlock_irq(&recovery_lock); |
1262 | } else | 1268 | } else |
1263 | CIO_MSG_EVENT(4, "recovery: end\n"); | 1269 | CIO_MSG_EVENT(3, "recovery: end\n"); |
1264 | } | 1270 | } |
1265 | 1271 | ||
1266 | static DECLARE_WORK(recovery_work, recovery_work_func); | 1272 | static DECLARE_WORK(recovery_work, recovery_work_func); |
@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data) | |||
1274 | schedule_work(&recovery_work); | 1280 | schedule_work(&recovery_work); |
1275 | } | 1281 | } |
1276 | 1282 | ||
1277 | static void ccw_device_schedule_recovery(void) | 1283 | void ccw_device_schedule_recovery(void) |
1278 | { | 1284 | { |
1279 | unsigned long flags; | 1285 | unsigned long flags; |
1280 | 1286 | ||
1281 | CIO_MSG_EVENT(4, "recovery: schedule\n"); | 1287 | CIO_MSG_EVENT(3, "recovery: schedule\n"); |
1282 | spin_lock_irqsave(&recovery_lock, flags); | 1288 | spin_lock_irqsave(&recovery_lock, flags); |
1283 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { | 1289 | if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { |
1284 | recovery_phase = 0; | 1290 | recovery_phase = 0; |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index ec497af99dd8..69cb70f080a5 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev); | |||
134 | void ccw_device_set_notoper(struct ccw_device *cdev); | 134 | void ccw_device_set_notoper(struct ccw_device *cdev); |
135 | 135 | ||
136 | void ccw_device_set_timeout(struct ccw_device *, int); | 136 | void ccw_device_set_timeout(struct ccw_device *, int); |
137 | void ccw_device_schedule_recovery(void); | ||
137 | 138 | ||
138 | /* Channel measurement facility related */ | 139 | /* Channel measurement facility related */ |
139 | void retry_set_schib(struct ccw_device *cdev); | 140 | void retry_set_schib(struct ccw_device *cdev); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 12016e32e519..f98ea674c3d8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type) | |||
476 | } | 476 | } |
477 | } | 477 | } |
478 | 478 | ||
479 | static void ccw_device_handle_broken_paths(struct ccw_device *cdev) | ||
480 | { | ||
481 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
482 | u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; | ||
483 | |||
484 | if (broken_paths && (cdev->private->path_broken_mask != broken_paths)) | ||
485 | ccw_device_schedule_recovery(); | ||
486 | |||
487 | cdev->private->path_broken_mask = broken_paths; | ||
488 | } | ||
489 | |||
479 | void ccw_device_verify_done(struct ccw_device *cdev, int err) | 490 | void ccw_device_verify_done(struct ccw_device *cdev, int err) |
480 | { | 491 | { |
481 | struct subchannel *sch; | 492 | struct subchannel *sch; |
@@ -508,6 +519,7 @@ callback: | |||
508 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 519 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
509 | } | 520 | } |
510 | ccw_device_report_path_events(cdev); | 521 | ccw_device_report_path_events(cdev); |
522 | ccw_device_handle_broken_paths(cdev); | ||
511 | break; | 523 | break; |
512 | case -ETIME: | 524 | case -ETIME: |
513 | case -EUSERS: | 525 | case -EUSERS: |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 220f49145b2f..9a1b56b2df3e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -131,6 +131,8 @@ struct ccw_device_private { | |||
131 | not operable */ | 131 | not operable */ |
132 | u8 path_gone_mask; /* mask of paths, that became unavailable */ | 132 | u8 path_gone_mask; /* mask of paths, that became unavailable */ |
133 | u8 path_new_mask; /* mask of paths, that became available */ | 133 | u8 path_new_mask; /* mask of paths, that became available */ |
134 | u8 path_broken_mask; /* mask of paths, which were found to be | ||
135 | unusable */ | ||
134 | struct { | 136 | struct { |
135 | unsigned int fast:1; /* post with "channel end" */ | 137 | unsigned int fast:1; /* post with "channel end" */ |
136 | unsigned int repall:1; /* report every interrupt status */ | 138 | unsigned int repall:1; /* report every interrupt status */ |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a64285ab0728..af3e4d3f9735 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) | |||
699 | int status; | 699 | int status; |
700 | 700 | ||
701 | dresp = (struct aac_mount *) fib_data(fibptr); | 701 | dresp = (struct aac_mount *) fib_data(fibptr); |
702 | if (!(fibptr->dev->supplement_adapter_info.supported_options2 & | 702 | if (!aac_supports_2T(fibptr->dev)) { |
703 | AAC_OPTION_VARIABLE_BLOCK_SIZE)) | ||
704 | dresp->mnt[0].capacityhigh = 0; | 703 | dresp->mnt[0].capacityhigh = 0; |
705 | if ((le32_to_cpu(dresp->status) != ST_OK) || | 704 | if ((le32_to_cpu(dresp->status) == ST_OK) && |
706 | (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { | 705 | (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { |
707 | _aac_probe_container2(context, fibptr); | 706 | _aac_probe_container2(context, fibptr); |
708 | return; | 707 | return; |
708 | } | ||
709 | } | 709 | } |
710 | scsicmd = (struct scsi_cmnd *) context; | 710 | scsicmd = (struct scsi_cmnd *) context; |
711 | 711 | ||
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 92fabf2b0c24..403a639574e5 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
@@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev) | |||
2701 | return 0; | 2701 | return 0; |
2702 | } | 2702 | } |
2703 | 2703 | ||
2704 | static inline int aac_supports_2T(struct aac_dev *dev) | ||
2705 | { | ||
2706 | return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64); | ||
2707 | } | ||
2708 | |||
2704 | char * get_container_type(unsigned type); | 2709 | char * get_container_type(unsigned type); |
2705 | extern int numacb; | 2710 | extern int numacb; |
2706 | extern char aac_driver_version[]; | 2711 | extern char aac_driver_version[]; |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 87cc4a93e637..62beb2596466 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) | |||
906 | 906 | ||
907 | bus = aac_logical_to_phys(scmd_channel(cmd)); | 907 | bus = aac_logical_to_phys(scmd_channel(cmd)); |
908 | cid = scmd_id(cmd); | 908 | cid = scmd_id(cmd); |
909 | info = &aac->hba_map[bus][cid]; | 909 | |
910 | if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || | 910 | if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) |
911 | info->devtype != AAC_DEVTYPE_NATIVE_RAW) | ||
912 | return FAILED; | 911 | return FAILED; |
913 | 912 | ||
914 | if (info->reset_state > 0) | 913 | info = &aac->hba_map[bus][cid]; |
914 | |||
915 | if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && | ||
916 | info->reset_state > 0) | ||
915 | return FAILED; | 917 | return FAILED; |
916 | 918 | ||
917 | pr_err("%s: Host adapter reset request. SCSI hang ?\n", | 919 | pr_err("%s: Host adapter reset request. SCSI hang ?\n", |
@@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) | |||
962 | 964 | ||
963 | bus = aac_logical_to_phys(scmd_channel(cmd)); | 965 | bus = aac_logical_to_phys(scmd_channel(cmd)); |
964 | cid = scmd_id(cmd); | 966 | cid = scmd_id(cmd); |
965 | info = &aac->hba_map[bus][cid]; | 967 | |
966 | if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || | 968 | if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS) |
967 | info->devtype != AAC_DEVTYPE_NATIVE_RAW) | ||
968 | return FAILED; | 969 | return FAILED; |
969 | 970 | ||
970 | if (info->reset_state > 0) | 971 | info = &aac->hba_map[bus][cid]; |
972 | |||
973 | if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && | ||
974 | info->reset_state > 0) | ||
971 | return FAILED; | 975 | return FAILED; |
972 | 976 | ||
973 | pr_err("%s: Host adapter reset request. SCSI hang ?\n", | 977 | pr_err("%s: Host adapter reset request. SCSI hang ?\n", |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 48c2b2b34b72..0c9361c87ec8 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c | |||
@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev) | |||
740 | aac_set_intx_mode(dev); | 740 | aac_set_intx_mode(dev); |
741 | 741 | ||
742 | src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); | 742 | src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); |
743 | |||
744 | msleep(5000); | ||
743 | } | 745 | } |
744 | 746 | ||
745 | static void aac_send_hardware_soft_reset(struct aac_dev *dev) | 747 | static void aac_send_hardware_soft_reset(struct aac_dev *dev) |
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 690816f3c6af..421fe869a11e 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c | |||
@@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt) | |||
2725 | * Params : SCpnt - command causing reset | 2725 | * Params : SCpnt - command causing reset |
2726 | * Returns : one of SCSI_RESET_ macros | 2726 | * Returns : one of SCSI_RESET_ macros |
2727 | */ | 2727 | */ |
2728 | int acornscsi_host_reset(struct Scsi_Host *shpnt) | 2728 | int acornscsi_host_reset(struct scsi_cmnd *SCpnt) |
2729 | { | 2729 | { |
2730 | AS_Host *host = (AS_Host *)shpnt->hostdata; | 2730 | AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; |
2731 | struct scsi_cmnd *SCptr; | 2731 | struct scsi_cmnd *SCptr; |
2732 | 2732 | ||
2733 | host->stats.resets += 1; | 2733 | host->stats.resets += 1; |
@@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt) | |||
2741 | 2741 | ||
2742 | printk(KERN_WARNING "acornscsi_reset: "); | 2742 | printk(KERN_WARNING "acornscsi_reset: "); |
2743 | print_sbic_status(asr, ssr, host->scsi.phase); | 2743 | print_sbic_status(asr, ssr, host->scsi.phase); |
2744 | for (devidx = 0; devidx < 9; devidx ++) { | 2744 | for (devidx = 0; devidx < 9; devidx++) |
2745 | acornscsi_dumplog(host, devidx); | 2745 | acornscsi_dumplog(host, devidx); |
2746 | } | 2746 | } |
2747 | #endif | 2747 | #endif |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 785fb42f6650..2799a6b08f73 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) | |||
3767 | */ | 3767 | */ |
3768 | if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { | 3768 | if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { |
3769 | pr_err("write_pending failed since: %d\n", vscsi->flags); | 3769 | pr_err("write_pending failed since: %d\n", vscsi->flags); |
3770 | return 0; | 3770 | return -EIO; |
3771 | } | 3771 | } |
3772 | 3772 | ||
3773 | rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, | 3773 | rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index bd4605a34f54..c62e8d111fd9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup); | |||
2851 | /** | 2851 | /** |
2852 | * iscsi_session_teardown - destroy session, host, and cls_session | 2852 | * iscsi_session_teardown - destroy session, host, and cls_session |
2853 | * @cls_session: iscsi session | 2853 | * @cls_session: iscsi session |
2854 | * | ||
2855 | * The driver must have called iscsi_remove_session before | ||
2856 | * calling this. | ||
2857 | */ | 2854 | */ |
2858 | void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | 2855 | void iscsi_session_teardown(struct iscsi_cls_session *cls_session) |
2859 | { | 2856 | { |
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
2863 | 2860 | ||
2864 | iscsi_pool_free(&session->cmdpool); | 2861 | iscsi_pool_free(&session->cmdpool); |
2865 | 2862 | ||
2863 | iscsi_remove_session(cls_session); | ||
2864 | |||
2866 | kfree(session->password); | 2865 | kfree(session->password); |
2867 | kfree(session->password_in); | 2866 | kfree(session->password_in); |
2868 | kfree(session->username); | 2867 | kfree(session->username); |
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
2877 | kfree(session->portal_type); | 2876 | kfree(session->portal_type); |
2878 | kfree(session->discovery_parent_type); | 2877 | kfree(session->discovery_parent_type); |
2879 | 2878 | ||
2880 | iscsi_destroy_session(cls_session); | 2879 | iscsi_free_session(cls_session); |
2880 | |||
2881 | iscsi_host_dec_session_cnt(shost); | 2881 | iscsi_host_dec_session_cnt(shost); |
2882 | module_put(owner); | 2882 | module_put(owner); |
2883 | } | 2883 | } |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7e7ae786121b..100bc4c8798d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
6131 | "Extents and RPI headers enabled.\n"); | 6131 | "Extents and RPI headers enabled.\n"); |
6132 | } | 6132 | } |
6133 | mempool_free(mboxq, phba->mbox_mem_pool); | 6133 | mempool_free(mboxq, phba->mbox_mem_pool); |
6134 | rc = -EIO; | ||
6134 | goto out_free_bsmbx; | 6135 | goto out_free_bsmbx; |
6135 | } | 6136 | } |
6136 | 6137 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 79ba3ce063a4..23bdb1ca106e 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
@@ -884,7 +884,7 @@ out_err: | |||
884 | wcqe->total_data_placed); | 884 | wcqe->total_data_placed); |
885 | nCmd->transferred_length = 0; | 885 | nCmd->transferred_length = 0; |
886 | nCmd->rcv_rsplen = 0; | 886 | nCmd->rcv_rsplen = 0; |
887 | nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; | 887 | nCmd->status = NVME_SC_INTERNAL; |
888 | } | 888 | } |
889 | } | 889 | } |
890 | 890 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 1f59e7a74c7b..6b33a1f24f56 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c | |||
@@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res) | |||
180 | goto rel; | 180 | goto rel; |
181 | 181 | ||
182 | if (unlikely(res == QLA_FUNCTION_FAILED)) | 182 | if (unlikely(res == QLA_FUNCTION_FAILED)) |
183 | fd->status = NVME_SC_FC_TRANSPORT_ERROR; | 183 | fd->status = NVME_SC_INTERNAL; |
184 | else | 184 | else |
185 | fd->status = 0; | 185 | fd->status = 0; |
186 | 186 | ||
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 38942050b265..dab876c65473 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd) | |||
580 | if (sshdr.asc == 0x20 || /* Invalid command operation code */ | 580 | if (sshdr.asc == 0x20 || /* Invalid command operation code */ |
581 | sshdr.asc == 0x21 || /* Logical block address out of range */ | 581 | sshdr.asc == 0x21 || /* Logical block address out of range */ |
582 | sshdr.asc == 0x24 || /* Invalid field in cdb */ | 582 | sshdr.asc == 0x24 || /* Invalid field in cdb */ |
583 | sshdr.asc == 0x26) { /* Parameter value invalid */ | 583 | sshdr.asc == 0x26 || /* Parameter value invalid */ |
584 | sshdr.asc == 0x27) { /* Write protected */ | ||
584 | set_host_byte(scmd, DID_TARGET_FAILURE); | 585 | set_host_byte(scmd, DID_TARGET_FAILURE); |
585 | } | 586 | } |
586 | return SUCCESS; | 587 | return SUCCESS; |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e7818afeda2b..15590a063ad9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
956 | if (*bflags & BLIST_NO_DIF) | 956 | if (*bflags & BLIST_NO_DIF) |
957 | sdev->no_dif = 1; | 957 | sdev->no_dif = 1; |
958 | 958 | ||
959 | if (*bflags & BLIST_UNMAP_LIMIT_WS) | ||
960 | sdev->unmap_limit_for_ws = 1; | ||
961 | |||
959 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; | 962 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; |
960 | 963 | ||
961 | if (*bflags & BLIST_TRY_VPD_PAGES) | 964 | if (*bflags & BLIST_TRY_VPD_PAGES) |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 3c6bc0081fcb..cbd4495d0ff9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2739 | 2739 | ||
2740 | list_for_each_entry(rport, &fc_host->rports, peers) { | 2740 | list_for_each_entry(rport, &fc_host->rports, peers) { |
2741 | 2741 | ||
2742 | if ((rport->port_state == FC_PORTSTATE_BLOCKED) && | 2742 | if ((rport->port_state == FC_PORTSTATE_BLOCKED || |
2743 | rport->port_state == FC_PORTSTATE_NOTPRESENT) && | ||
2743 | (rport->channel == channel)) { | 2744 | (rport->channel == channel)) { |
2744 | 2745 | ||
2745 | switch (fc_host->tgtid_bind_type) { | 2746 | switch (fc_host->tgtid_bind_type) { |
@@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2876 | memcpy(&rport->port_name, &ids->port_name, | 2877 | memcpy(&rport->port_name, &ids->port_name, |
2877 | sizeof(rport->port_name)); | 2878 | sizeof(rport->port_name)); |
2878 | rport->port_id = ids->port_id; | 2879 | rport->port_id = ids->port_id; |
2879 | rport->roles = ids->roles; | ||
2880 | rport->port_state = FC_PORTSTATE_ONLINE; | 2880 | rport->port_state = FC_PORTSTATE_ONLINE; |
2881 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; | 2881 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; |
2882 | 2882 | ||
@@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
2885 | fci->f->dd_fcrport_size); | 2885 | fci->f->dd_fcrport_size); |
2886 | spin_unlock_irqrestore(shost->host_lock, flags); | 2886 | spin_unlock_irqrestore(shost->host_lock, flags); |
2887 | 2887 | ||
2888 | if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { | 2888 | fc_remote_port_rolechg(rport, ids->roles); |
2889 | scsi_target_unblock(&rport->dev, SDEV_RUNNING); | ||
2890 | |||
2891 | /* initiate a scan of the target */ | ||
2892 | spin_lock_irqsave(shost->host_lock, flags); | ||
2893 | rport->flags |= FC_RPORT_SCAN_PENDING; | ||
2894 | scsi_queue_work(shost, &rport->scan_work); | ||
2895 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
2896 | } | ||
2897 | return rport; | 2889 | return rport; |
2898 | } | 2890 | } |
2899 | } | 2891 | } |
@@ -3571,7 +3563,7 @@ fc_vport_sched_delete(struct work_struct *work) | |||
3571 | static enum blk_eh_timer_return | 3563 | static enum blk_eh_timer_return |
3572 | fc_bsg_job_timeout(struct request *req) | 3564 | fc_bsg_job_timeout(struct request *req) |
3573 | { | 3565 | { |
3574 | struct bsg_job *job = (void *) req->special; | 3566 | struct bsg_job *job = blk_mq_rq_to_pdu(req); |
3575 | struct Scsi_Host *shost = fc_bsg_to_shost(job); | 3567 | struct Scsi_Host *shost = fc_bsg_to_shost(job); |
3576 | struct fc_rport *rport = fc_bsg_to_rport(job); | 3568 | struct fc_rport *rport = fc_bsg_to_rport(job); |
3577 | struct fc_internal *i = to_fc_internal(shost->transportt); | 3569 | struct fc_internal *i = to_fc_internal(shost->transportt); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 8934f19bce8e..7404d26895f5 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session) | |||
2211 | EXPORT_SYMBOL_GPL(iscsi_free_session); | 2211 | EXPORT_SYMBOL_GPL(iscsi_free_session); |
2212 | 2212 | ||
2213 | /** | 2213 | /** |
2214 | * iscsi_destroy_session - destroy iscsi session | ||
2215 | * @session: iscsi_session | ||
2216 | * | ||
2217 | * Can be called by a LLD or iscsi_transport. There must not be | ||
2218 | * any running connections. | ||
2219 | */ | ||
2220 | int iscsi_destroy_session(struct iscsi_cls_session *session) | ||
2221 | { | ||
2222 | iscsi_remove_session(session); | ||
2223 | ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n"); | ||
2224 | iscsi_free_session(session); | ||
2225 | return 0; | ||
2226 | } | ||
2227 | EXPORT_SYMBOL_GPL(iscsi_destroy_session); | ||
2228 | |||
2229 | /** | ||
2230 | * iscsi_create_conn - create iscsi class connection | 2214 | * iscsi_create_conn - create iscsi class connection |
2231 | * @session: iscsi cls session | 2215 | * @session: iscsi cls session |
2232 | * @dd_size: private driver data size | 2216 | * @dd_size: private driver data size |
@@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb) | |||
3689 | uint32_t group; | 3673 | uint32_t group; |
3690 | 3674 | ||
3691 | nlh = nlmsg_hdr(skb); | 3675 | nlh = nlmsg_hdr(skb); |
3692 | if (nlh->nlmsg_len < sizeof(*nlh) || | 3676 | if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || |
3693 | skb->len < nlh->nlmsg_len) { | 3677 | skb->len < nlh->nlmsg_len) { |
3694 | break; | 3678 | break; |
3695 | } | 3679 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 11c1738c2100..d175c5c5ccf8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
715 | break; | 715 | break; |
716 | 716 | ||
717 | case SD_LBP_WS16: | 717 | case SD_LBP_WS16: |
718 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 718 | if (sdkp->device->unmap_limit_for_ws) |
719 | (u32)SD_MAX_WS16_BLOCKS); | 719 | max_blocks = sdkp->max_unmap_blocks; |
720 | else | ||
721 | max_blocks = sdkp->max_ws_blocks; | ||
722 | |||
723 | max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); | ||
720 | break; | 724 | break; |
721 | 725 | ||
722 | case SD_LBP_WS10: | 726 | case SD_LBP_WS10: |
723 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 727 | if (sdkp->device->unmap_limit_for_ws) |
724 | (u32)SD_MAX_WS10_BLOCKS); | 728 | max_blocks = sdkp->max_unmap_blocks; |
729 | else | ||
730 | max_blocks = sdkp->max_ws_blocks; | ||
731 | |||
732 | max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); | ||
725 | break; | 733 | break; |
726 | 734 | ||
727 | case SD_LBP_ZERO: | 735 | case SD_LBP_ZERO: |
@@ -2915,8 +2923,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2915 | sd_config_discard(sdkp, SD_LBP_WS16); | 2923 | sd_config_discard(sdkp, SD_LBP_WS16); |
2916 | else if (sdkp->lbpws10) | 2924 | else if (sdkp->lbpws10) |
2917 | sd_config_discard(sdkp, SD_LBP_WS10); | 2925 | sd_config_discard(sdkp, SD_LBP_WS10); |
2918 | else if (sdkp->lbpu && sdkp->max_unmap_blocks) | ||
2919 | sd_config_discard(sdkp, SD_LBP_UNMAP); | ||
2920 | else | 2926 | else |
2921 | sd_config_discard(sdkp, SD_LBP_DISABLE); | 2927 | sd_config_discard(sdkp, SD_LBP_DISABLE); |
2922 | } | 2928 | } |
@@ -3101,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3101 | sd_read_security(sdkp, buffer); | 3107 | sd_read_security(sdkp, buffer); |
3102 | } | 3108 | } |
3103 | 3109 | ||
3104 | sdkp->first_scan = 0; | ||
3105 | |||
3106 | /* | 3110 | /* |
3107 | * We now have all cache related info, determine how we deal | 3111 | * We now have all cache related info, determine how we deal |
3108 | * with flush requests. | 3112 | * with flush requests. |
@@ -3117,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3117 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); | 3121 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); |
3118 | 3122 | ||
3119 | /* | 3123 | /* |
3120 | * Use the device's preferred I/O size for reads and writes | 3124 | * Determine the device's preferred I/O size for reads and writes |
3121 | * unless the reported value is unreasonably small, large, or | 3125 | * unless the reported value is unreasonably small, large, or |
3122 | * garbage. | 3126 | * garbage. |
3123 | */ | 3127 | */ |
@@ -3131,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3131 | rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), | 3135 | rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), |
3132 | (sector_t)BLK_DEF_MAX_SECTORS); | 3136 | (sector_t)BLK_DEF_MAX_SECTORS); |
3133 | 3137 | ||
3134 | /* Combine with controller limits */ | 3138 | /* Do not exceed controller limit */ |
3135 | q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); | 3139 | rw_max = min(rw_max, queue_max_hw_sectors(q)); |
3140 | |||
3141 | /* | ||
3142 | * Only update max_sectors if previously unset or if the current value | ||
3143 | * exceeds the capabilities of the hardware. | ||
3144 | */ | ||
3145 | if (sdkp->first_scan || | ||
3146 | q->limits.max_sectors > q->limits.max_dev_sectors || | ||
3147 | q->limits.max_sectors > q->limits.max_hw_sectors) | ||
3148 | q->limits.max_sectors = rw_max; | ||
3149 | |||
3150 | sdkp->first_scan = 0; | ||
3136 | 3151 | ||
3137 | set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); | 3152 | set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); |
3138 | sd_config_write_same(sdkp); | 3153 | sd_config_write_same(sdkp); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cf0e71db9e51..0419c2298eab 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q) | |||
828 | return max_sectors << 9; | 828 | return max_sectors << 9; |
829 | } | 829 | } |
830 | 830 | ||
831 | static void | ||
832 | sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) | ||
833 | { | ||
834 | Sg_request *srp; | ||
835 | int val; | ||
836 | unsigned int ms; | ||
837 | |||
838 | val = 0; | ||
839 | list_for_each_entry(srp, &sfp->rq_list, entry) { | ||
840 | if (val > SG_MAX_QUEUE) | ||
841 | break; | ||
842 | rinfo[val].req_state = srp->done + 1; | ||
843 | rinfo[val].problem = | ||
844 | srp->header.masked_status & | ||
845 | srp->header.host_status & | ||
846 | srp->header.driver_status; | ||
847 | if (srp->done) | ||
848 | rinfo[val].duration = | ||
849 | srp->header.duration; | ||
850 | else { | ||
851 | ms = jiffies_to_msecs(jiffies); | ||
852 | rinfo[val].duration = | ||
853 | (ms > srp->header.duration) ? | ||
854 | (ms - srp->header.duration) : 0; | ||
855 | } | ||
856 | rinfo[val].orphan = srp->orphan; | ||
857 | rinfo[val].sg_io_owned = srp->sg_io_owned; | ||
858 | rinfo[val].pack_id = srp->header.pack_id; | ||
859 | rinfo[val].usr_ptr = srp->header.usr_ptr; | ||
860 | val++; | ||
861 | } | ||
862 | } | ||
863 | |||
831 | static long | 864 | static long |
832 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | 865 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
833 | { | 866 | { |
@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
1012 | return -EFAULT; | 1045 | return -EFAULT; |
1013 | else { | 1046 | else { |
1014 | sg_req_info_t *rinfo; | 1047 | sg_req_info_t *rinfo; |
1015 | unsigned int ms; | ||
1016 | 1048 | ||
1017 | rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, | 1049 | rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, |
1018 | GFP_KERNEL); | 1050 | GFP_KERNEL); |
1019 | if (!rinfo) | 1051 | if (!rinfo) |
1020 | return -ENOMEM; | 1052 | return -ENOMEM; |
1021 | read_lock_irqsave(&sfp->rq_list_lock, iflags); | 1053 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
1022 | val = 0; | 1054 | sg_fill_request_table(sfp, rinfo); |
1023 | list_for_each_entry(srp, &sfp->rq_list, entry) { | ||
1024 | if (val >= SG_MAX_QUEUE) | ||
1025 | break; | ||
1026 | memset(&rinfo[val], 0, SZ_SG_REQ_INFO); | ||
1027 | rinfo[val].req_state = srp->done + 1; | ||
1028 | rinfo[val].problem = | ||
1029 | srp->header.masked_status & | ||
1030 | srp->header.host_status & | ||
1031 | srp->header.driver_status; | ||
1032 | if (srp->done) | ||
1033 | rinfo[val].duration = | ||
1034 | srp->header.duration; | ||
1035 | else { | ||
1036 | ms = jiffies_to_msecs(jiffies); | ||
1037 | rinfo[val].duration = | ||
1038 | (ms > srp->header.duration) ? | ||
1039 | (ms - srp->header.duration) : 0; | ||
1040 | } | ||
1041 | rinfo[val].orphan = srp->orphan; | ||
1042 | rinfo[val].sg_io_owned = srp->sg_io_owned; | ||
1043 | rinfo[val].pack_id = srp->header.pack_id; | ||
1044 | rinfo[val].usr_ptr = srp->header.usr_ptr; | ||
1045 | val++; | ||
1046 | } | ||
1047 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); | 1055 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1048 | result = __copy_to_user(p, rinfo, | 1056 | result = __copy_to_user(p, rinfo, |
1049 | SZ_SG_REQ_INFO * SG_MAX_QUEUE); | 1057 | SZ_SG_REQ_INFO * SG_MAX_QUEUE); |
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index d577d7b32c71..1617628c34f9 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c | |||
@@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st, | |||
223 | struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi); | 223 | struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi); |
224 | unsigned long long scale_uv; | 224 | unsigned long long scale_uv; |
225 | int i, ret, id; | 225 | int i, ret, id; |
226 | u8 ones[6]; | ||
227 | 226 | ||
228 | /* reset the serial interface */ | 227 | /* reset the serial interface */ |
229 | memset(&ones, 0xFF, 6); | 228 | ret = ad_sd_reset(&st->sd, 48); |
230 | ret = spi_write(st->sd.spi, &ones, 6); | ||
231 | if (ret < 0) | 229 | if (ret < 0) |
232 | goto out; | 230 | goto out; |
233 | usleep_range(500, 1000); /* Wait for at least 500us */ | 231 | usleep_range(500, 1000); /* Wait for at least 500us */ |
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index 13eaf16ecd16..87595c594b12 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c | |||
@@ -496,8 +496,12 @@ static int spinand_program_page(struct spi_device *spi_nand, | |||
496 | if (!wbuf) | 496 | if (!wbuf) |
497 | return -ENOMEM; | 497 | return -ENOMEM; |
498 | 498 | ||
499 | enable_read_hw_ecc = 0; | 499 | enable_read_hw_ecc = 1; |
500 | spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); | 500 | retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); |
501 | if (retval < 0) { | ||
502 | dev_err(&spi_nand->dev, "ecc error on read page!!!\n"); | ||
503 | return retval; | ||
504 | } | ||
501 | 505 | ||
502 | for (i = offset, j = 0; i < len; i++, j++) | 506 | for (i = offset, j = 0; i < len; i++, j++) |
503 | wbuf[i] &= buf[j]; | 507 | wbuf[i] &= buf[j]; |
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c index f5b90aa759ea..0305edc16861 100644 --- a/drivers/staging/pi433/rf69.c +++ b/drivers/staging/pi433/rf69.c | |||
@@ -570,12 +570,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value) | |||
570 | dev_dbg(&spi->dev, "set: DIO mapping"); | 570 | dev_dbg(&spi->dev, "set: DIO mapping"); |
571 | #endif | 571 | #endif |
572 | 572 | ||
573 | // check DIO number | ||
574 | if (DIONumber > 5) { | ||
575 | dev_dbg(&spi->dev, "set: illegal input param"); | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | switch (DIONumber) { | 573 | switch (DIONumber) { |
580 | case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break; | 574 | case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break; |
581 | case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break; | 575 | case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break; |
@@ -583,6 +577,9 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value) | |||
583 | case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break; | 577 | case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break; |
584 | case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break; | 578 | case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break; |
585 | case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break; | 579 | case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break; |
580 | default: | ||
581 | dev_dbg(&spi->dev, "set: illegal input param"); | ||
582 | return -EINVAL; | ||
586 | } | 583 | } |
587 | 584 | ||
588 | // read reg | 585 | // read reg |
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c index 008063a181cb..f9247a0a1539 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c | |||
@@ -116,9 +116,8 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) | |||
116 | 116 | ||
117 | void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) | 117 | void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) |
118 | { | 118 | { |
119 | rtw_free_mlme_priv_ie_data(pmlmepriv); | ||
120 | |||
121 | if (pmlmepriv) { | 119 | if (pmlmepriv) { |
120 | rtw_free_mlme_priv_ie_data(pmlmepriv); | ||
122 | if (pmlmepriv->free_bss_buf) { | 121 | if (pmlmepriv->free_bss_buf) { |
123 | vfree(pmlmepriv->free_bss_buf); | 122 | vfree(pmlmepriv->free_bss_buf); |
124 | } | 123 | } |
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c index 92277457aba4..ce1dd6f9036f 100644 --- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c +++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c | |||
@@ -311,6 +311,8 @@ static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t | |||
311 | 311 | ||
312 | if (num < 2) | 312 | if (num < 2) |
313 | return count; | 313 | return count; |
314 | if (id >= TOTAL_CAM_ENTRY) | ||
315 | return -EINVAL; | ||
314 | 316 | ||
315 | if (strcmp("c", cmd) == 0) { | 317 | if (strcmp("c", cmd) == 0) { |
316 | _clear_cam_entry(adapter, id); | 318 | _clear_cam_entry(adapter, id); |
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c index 5f84526cb5b5..edbf6af1c8b7 100644 --- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c +++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c | |||
@@ -2901,11 +2901,11 @@ halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter, | |||
2901 | if (halmac_adapter->fw_version.h2c_version < 4) | 2901 | if (halmac_adapter->fw_version.h2c_version < 4) |
2902 | return HALMAC_RET_FW_NO_SUPPORT; | 2902 | return HALMAC_RET_FW_NO_SUPPORT; |
2903 | 2903 | ||
2904 | driver_adapter = halmac_adapter->driver_adapter; | ||
2905 | |||
2904 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 2906 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
2905 | "[TRACE]%s ==========>\n", __func__); | 2907 | "[TRACE]%s ==========>\n", __func__); |
2906 | 2908 | ||
2907 | driver_adapter = halmac_adapter->driver_adapter; | ||
2908 | |||
2909 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 2909 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
2910 | "[TRACE]%s <==========\n", __func__); | 2910 | "[TRACE]%s <==========\n", __func__); |
2911 | 2911 | ||
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c index c4a8eb4c82bb..c4cb217d3d1f 100644 --- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c +++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c | |||
@@ -1603,10 +1603,11 @@ halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter, | |||
1603 | void *driver_adapter = NULL; | 1603 | void *driver_adapter = NULL; |
1604 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; | 1604 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; |
1605 | 1605 | ||
1606 | driver_adapter = halmac_adapter->driver_adapter; | ||
1607 | |||
1606 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 1608 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
1607 | "%s!!\n", __func__); | 1609 | "%s!!\n", __func__); |
1608 | 1610 | ||
1609 | driver_adapter = halmac_adapter->driver_adapter; | ||
1610 | h2c_header = h2c_buff; | 1611 | h2c_header = h2c_buff; |
1611 | h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; | 1612 | h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; |
1612 | 1613 | ||
@@ -1698,10 +1699,11 @@ halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode, | |||
1698 | void *driver_adapter = NULL; | 1699 | void *driver_adapter = NULL; |
1699 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; | 1700 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; |
1700 | 1701 | ||
1702 | driver_adapter = halmac_adapter->driver_adapter; | ||
1703 | |||
1701 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 1704 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
1702 | "halmac_send_h2c_set_pwr_mode_88xx!!\n"); | 1705 | "halmac_send_h2c_set_pwr_mode_88xx!!\n"); |
1703 | 1706 | ||
1704 | driver_adapter = halmac_adapter->driver_adapter; | ||
1705 | h2c_header = H2c_buff; | 1707 | h2c_header = H2c_buff; |
1706 | h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; | 1708 | h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; |
1707 | 1709 | ||
@@ -2128,10 +2130,11 @@ halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter, | |||
2128 | enum halmac_cmd_process_status *process_status = | 2130 | enum halmac_cmd_process_status *process_status = |
2129 | &halmac_adapter->halmac_state.scan_state_set.process_status; | 2131 | &halmac_adapter->halmac_state.scan_state_set.process_status; |
2130 | 2132 | ||
2133 | driver_adapter = halmac_adapter->driver_adapter; | ||
2134 | |||
2131 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 2135 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
2132 | "halmac_ctrl_ch_switch!!\n"); | 2136 | "halmac_ctrl_ch_switch!!\n"); |
2133 | 2137 | ||
2134 | driver_adapter = halmac_adapter->driver_adapter; | ||
2135 | halmac_api = (struct halmac_api *)halmac_adapter->halmac_api; | 2138 | halmac_api = (struct halmac_api *)halmac_adapter->halmac_api; |
2136 | 2139 | ||
2137 | if (halmac_transition_scan_state_88xx( | 2140 | if (halmac_transition_scan_state_88xx( |
@@ -2261,15 +2264,13 @@ enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx( | |||
2261 | { | 2264 | { |
2262 | u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0}; | 2265 | u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0}; |
2263 | u16 h2c_seq_mum = 0; | 2266 | u16 h2c_seq_mum = 0; |
2264 | void *driver_adapter = NULL; | 2267 | void *driver_adapter = halmac_adapter->driver_adapter; |
2265 | struct halmac_h2c_header_info h2c_header_info; | 2268 | struct halmac_h2c_header_info h2c_header_info; |
2266 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; | 2269 | enum halmac_ret_status status = HALMAC_RET_SUCCESS; |
2267 | 2270 | ||
2268 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, | 2271 | HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, |
2269 | "%s!!\n", __func__); | 2272 | "%s!!\n", __func__); |
2270 | 2273 | ||
2271 | driver_adapter = halmac_adapter->driver_adapter; | ||
2272 | |||
2273 | UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en); | 2274 | UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en); |
2274 | UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th); | 2275 | UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th); |
2275 | UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout); | 2276 | UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout); |
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index e215d05fcffe..3809cd2ad838 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c | |||
@@ -1376,6 +1376,8 @@ static void reset_highlight_buffers(struct vc_data *); | |||
1376 | 1376 | ||
1377 | static int read_all_key; | 1377 | static int read_all_key; |
1378 | 1378 | ||
1379 | static int in_keyboard_notifier; | ||
1380 | |||
1379 | static void start_read_all_timer(struct vc_data *vc, int command); | 1381 | static void start_read_all_timer(struct vc_data *vc, int command); |
1380 | 1382 | ||
1381 | enum { | 1383 | enum { |
@@ -1408,7 +1410,10 @@ static void read_all_doc(struct vc_data *vc) | |||
1408 | cursor_track = read_all_mode; | 1410 | cursor_track = read_all_mode; |
1409 | spk_reset_index_count(0); | 1411 | spk_reset_index_count(0); |
1410 | if (get_sentence_buf(vc, 0) == -1) { | 1412 | if (get_sentence_buf(vc, 0) == -1) { |
1411 | kbd_fakekey2(vc, RA_DOWN_ARROW); | 1413 | del_timer(&cursor_timer); |
1414 | if (!in_keyboard_notifier) | ||
1415 | speakup_fake_down_arrow(); | ||
1416 | start_read_all_timer(vc, RA_DOWN_ARROW); | ||
1412 | } else { | 1417 | } else { |
1413 | say_sentence_num(0, 0); | 1418 | say_sentence_num(0, 0); |
1414 | synth_insert_next_index(0); | 1419 | synth_insert_next_index(0); |
@@ -2212,8 +2217,10 @@ static int keyboard_notifier_call(struct notifier_block *nb, | |||
2212 | int ret = NOTIFY_OK; | 2217 | int ret = NOTIFY_OK; |
2213 | static int keycode; /* to hold the current keycode */ | 2218 | static int keycode; /* to hold the current keycode */ |
2214 | 2219 | ||
2220 | in_keyboard_notifier = 1; | ||
2221 | |||
2215 | if (vc->vc_mode == KD_GRAPHICS) | 2222 | if (vc->vc_mode == KD_GRAPHICS) |
2216 | return ret; | 2223 | goto out; |
2217 | 2224 | ||
2218 | /* | 2225 | /* |
2219 | * First, determine whether we are handling a fake keypress on | 2226 | * First, determine whether we are handling a fake keypress on |
@@ -2225,7 +2232,7 @@ static int keyboard_notifier_call(struct notifier_block *nb, | |||
2225 | */ | 2232 | */ |
2226 | 2233 | ||
2227 | if (speakup_fake_key_pressed()) | 2234 | if (speakup_fake_key_pressed()) |
2228 | return ret; | 2235 | goto out; |
2229 | 2236 | ||
2230 | switch (code) { | 2237 | switch (code) { |
2231 | case KBD_KEYCODE: | 2238 | case KBD_KEYCODE: |
@@ -2266,6 +2273,8 @@ static int keyboard_notifier_call(struct notifier_block *nb, | |||
2266 | break; | 2273 | break; |
2267 | } | 2274 | } |
2268 | } | 2275 | } |
2276 | out: | ||
2277 | in_keyboard_notifier = 0; | ||
2269 | return ret; | 2278 | return ret; |
2270 | } | 2279 | } |
2271 | 2280 | ||
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index d9045a4b7620..fed554a43151 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c | |||
@@ -1668,7 +1668,7 @@ static __init int visorutil_spar_detect(void) | |||
1668 | return 0; | 1668 | return 0; |
1669 | } | 1669 | } |
1670 | 1670 | ||
1671 | static int init_unisys(void) | 1671 | static int __init init_unisys(void) |
1672 | { | 1672 | { |
1673 | int result; | 1673 | int result; |
1674 | 1674 | ||
@@ -1681,7 +1681,7 @@ static int init_unisys(void) | |||
1681 | return 0; | 1681 | return 0; |
1682 | }; | 1682 | }; |
1683 | 1683 | ||
1684 | static void exit_unisys(void) | 1684 | static void __exit exit_unisys(void) |
1685 | { | 1685 | { |
1686 | acpi_bus_unregister_driver(&unisys_acpi_driver); | 1686 | acpi_bus_unregister_driver(&unisys_acpi_driver); |
1687 | } | 1687 | } |
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index a96bae0dd6a6..81827b8a7601 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c | |||
@@ -610,18 +610,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo, | |||
610 | if (head_bytes > actual) | 610 | if (head_bytes > actual) |
611 | head_bytes = actual; | 611 | head_bytes = actual; |
612 | 612 | ||
613 | memcpy((char *)page_address(pages[0]) + | 613 | memcpy((char *)kmap(pages[0]) + |
614 | pagelist->offset, | 614 | pagelist->offset, |
615 | fragments, | 615 | fragments, |
616 | head_bytes); | 616 | head_bytes); |
617 | kunmap(pages[0]); | ||
617 | } | 618 | } |
618 | if ((actual >= 0) && (head_bytes < actual) && | 619 | if ((actual >= 0) && (head_bytes < actual) && |
619 | (tail_bytes != 0)) { | 620 | (tail_bytes != 0)) { |
620 | memcpy((char *)page_address(pages[num_pages - 1]) + | 621 | memcpy((char *)kmap(pages[num_pages - 1]) + |
621 | ((pagelist->offset + actual) & | 622 | ((pagelist->offset + actual) & |
622 | (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)), | 623 | (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)), |
623 | fragments + g_cache_line_size, | 624 | fragments + g_cache_line_size, |
624 | tail_bytes); | 625 | tail_bytes); |
626 | kunmap(pages[num_pages - 1]); | ||
625 | } | 627 | } |
626 | 628 | ||
627 | down(&g_free_fragments_mutex); | 629 | down(&g_free_fragments_mutex); |
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 1c0c9553bc05..7dd38047ba23 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c | |||
@@ -246,11 +246,11 @@ struct mxser_port { | |||
246 | unsigned char err_shadow; | 246 | unsigned char err_shadow; |
247 | 247 | ||
248 | struct async_icount icount; /* kernel counters for 4 input interrupts */ | 248 | struct async_icount icount; /* kernel counters for 4 input interrupts */ |
249 | int timeout; | 249 | unsigned int timeout; |
250 | 250 | ||
251 | int read_status_mask; | 251 | int read_status_mask; |
252 | int ignore_status_mask; | 252 | int ignore_status_mask; |
253 | int xmit_fifo_size; | 253 | unsigned int xmit_fifo_size; |
254 | int xmit_head; | 254 | int xmit_head; |
255 | int xmit_tail; | 255 | int xmit_tail; |
256 | int xmit_cnt; | 256 | int xmit_cnt; |
@@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on) | |||
572 | static int mxser_set_baud(struct tty_struct *tty, long newspd) | 572 | static int mxser_set_baud(struct tty_struct *tty, long newspd) |
573 | { | 573 | { |
574 | struct mxser_port *info = tty->driver_data; | 574 | struct mxser_port *info = tty->driver_data; |
575 | int quot = 0, baud; | 575 | unsigned int quot = 0, baud; |
576 | unsigned char cval; | 576 | unsigned char cval; |
577 | u64 timeout; | ||
577 | 578 | ||
578 | if (!info->ioaddr) | 579 | if (!info->ioaddr) |
579 | return -1; | 580 | return -1; |
@@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd) | |||
594 | quot = 0; | 595 | quot = 0; |
595 | } | 596 | } |
596 | 597 | ||
597 | info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base); | 598 | /* |
598 | info->timeout += HZ / 50; /* Add .02 seconds of slop */ | 599 | * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the |
600 | * u64 domain | ||
601 | */ | ||
602 | timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot; | ||
603 | do_div(timeout, info->baud_base); | ||
604 | info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */ | ||
599 | 605 | ||
600 | if (quot) { | 606 | if (quot) { |
601 | info->MCR |= UART_MCR_DTR; | 607 | info->MCR |= UART_MCR_DTR; |
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 583c9a0c7ecc..8c48c3784831 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c | |||
@@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port, | |||
507 | { | 507 | { |
508 | unsigned int ctl, baud, quot, ier; | 508 | unsigned int ctl, baud, quot, ier; |
509 | unsigned long flags; | 509 | unsigned long flags; |
510 | int tries; | ||
510 | 511 | ||
511 | spin_lock_irqsave(&port->lock, flags); | 512 | spin_lock_irqsave(&port->lock, flags); |
512 | 513 | ||
514 | /* Drain the hot tub fully before we power it off for the winter. */ | ||
515 | for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--) | ||
516 | mdelay(10); | ||
517 | |||
513 | /* disable uart while changing speed */ | 518 | /* disable uart while changing speed */ |
514 | bcm_uart_disable(port); | 519 | bcm_uart_disable(port); |
515 | bcm_uart_flush(port); | 520 | bcm_uart_flush(port); |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 849c1f9991ce..f0252184291e 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
@@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport) | |||
1276 | static int lpuart_startup(struct uart_port *port) | 1276 | static int lpuart_startup(struct uart_port *port) |
1277 | { | 1277 | { |
1278 | struct lpuart_port *sport = container_of(port, struct lpuart_port, port); | 1278 | struct lpuart_port *sport = container_of(port, struct lpuart_port, port); |
1279 | int ret; | ||
1280 | unsigned long flags; | 1279 | unsigned long flags; |
1281 | unsigned char temp; | 1280 | unsigned char temp; |
1282 | 1281 | ||
@@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port) | |||
1291 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & | 1290 | sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & |
1292 | UARTPFIFO_FIFOSIZE_MASK) + 1); | 1291 | UARTPFIFO_FIFOSIZE_MASK) + 1); |
1293 | 1292 | ||
1294 | ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0, | ||
1295 | DRIVER_NAME, sport); | ||
1296 | if (ret) | ||
1297 | return ret; | ||
1298 | |||
1299 | spin_lock_irqsave(&sport->port.lock, flags); | 1293 | spin_lock_irqsave(&sport->port.lock, flags); |
1300 | 1294 | ||
1301 | lpuart_setup_watermark(sport); | 1295 | lpuart_setup_watermark(sport); |
@@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port) | |||
1333 | static int lpuart32_startup(struct uart_port *port) | 1327 | static int lpuart32_startup(struct uart_port *port) |
1334 | { | 1328 | { |
1335 | struct lpuart_port *sport = container_of(port, struct lpuart_port, port); | 1329 | struct lpuart_port *sport = container_of(port, struct lpuart_port, port); |
1336 | int ret; | ||
1337 | unsigned long flags; | 1330 | unsigned long flags; |
1338 | unsigned long temp; | 1331 | unsigned long temp; |
1339 | 1332 | ||
@@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port) | |||
1346 | sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & | 1339 | sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & |
1347 | UARTFIFO_FIFOSIZE_MASK) - 1); | 1340 | UARTFIFO_FIFOSIZE_MASK) - 1); |
1348 | 1341 | ||
1349 | ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0, | ||
1350 | DRIVER_NAME, sport); | ||
1351 | if (ret) | ||
1352 | return ret; | ||
1353 | |||
1354 | spin_lock_irqsave(&sport->port.lock, flags); | 1342 | spin_lock_irqsave(&sport->port.lock, flags); |
1355 | 1343 | ||
1356 | lpuart32_setup_watermark(sport); | 1344 | lpuart32_setup_watermark(sport); |
@@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port) | |||
1380 | 1368 | ||
1381 | spin_unlock_irqrestore(&port->lock, flags); | 1369 | spin_unlock_irqrestore(&port->lock, flags); |
1382 | 1370 | ||
1383 | devm_free_irq(port->dev, port->irq, sport); | ||
1384 | |||
1385 | if (sport->lpuart_dma_rx_use) { | 1371 | if (sport->lpuart_dma_rx_use) { |
1386 | del_timer_sync(&sport->lpuart_timer); | 1372 | del_timer_sync(&sport->lpuart_timer); |
1387 | lpuart_dma_rx_free(&sport->port); | 1373 | lpuart_dma_rx_free(&sport->port); |
@@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port) | |||
1400 | 1386 | ||
1401 | static void lpuart32_shutdown(struct uart_port *port) | 1387 | static void lpuart32_shutdown(struct uart_port *port) |
1402 | { | 1388 | { |
1403 | struct lpuart_port *sport = container_of(port, struct lpuart_port, port); | ||
1404 | unsigned long temp; | 1389 | unsigned long temp; |
1405 | unsigned long flags; | 1390 | unsigned long flags; |
1406 | 1391 | ||
@@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port) | |||
1413 | lpuart32_write(port, temp, UARTCTRL); | 1398 | lpuart32_write(port, temp, UARTCTRL); |
1414 | 1399 | ||
1415 | spin_unlock_irqrestore(&port->lock, flags); | 1400 | spin_unlock_irqrestore(&port->lock, flags); |
1416 | |||
1417 | devm_free_irq(port->dev, port->irq, sport); | ||
1418 | } | 1401 | } |
1419 | 1402 | ||
1420 | static void | 1403 | static void |
@@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev) | |||
2212 | 2195 | ||
2213 | platform_set_drvdata(pdev, &sport->port); | 2196 | platform_set_drvdata(pdev, &sport->port); |
2214 | 2197 | ||
2215 | if (lpuart_is_32(sport)) | 2198 | if (lpuart_is_32(sport)) { |
2216 | lpuart_reg.cons = LPUART32_CONSOLE; | 2199 | lpuart_reg.cons = LPUART32_CONSOLE; |
2217 | else | 2200 | ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0, |
2201 | DRIVER_NAME, sport); | ||
2202 | } else { | ||
2218 | lpuart_reg.cons = LPUART_CONSOLE; | 2203 | lpuart_reg.cons = LPUART_CONSOLE; |
2204 | ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0, | ||
2205 | DRIVER_NAME, sport); | ||
2206 | } | ||
2207 | |||
2208 | if (ret) | ||
2209 | goto failed_irq_request; | ||
2219 | 2210 | ||
2220 | ret = uart_add_one_port(&lpuart_reg, &sport->port); | 2211 | ret = uart_add_one_port(&lpuart_reg, &sport->port); |
2221 | if (ret) { | 2212 | if (ret) |
2222 | clk_disable_unprepare(sport->clk); | 2213 | goto failed_attach_port; |
2223 | return ret; | ||
2224 | } | ||
2225 | 2214 | ||
2226 | sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); | 2215 | sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); |
2227 | if (!sport->dma_tx_chan) | 2216 | if (!sport->dma_tx_chan) |
@@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev) | |||
2240 | } | 2229 | } |
2241 | 2230 | ||
2242 | return 0; | 2231 | return 0; |
2232 | |||
2233 | failed_attach_port: | ||
2234 | failed_irq_request: | ||
2235 | clk_disable_unprepare(sport->clk); | ||
2236 | return ret; | ||
2243 | } | 2237 | } |
2244 | 2238 | ||
2245 | static int lpuart_remove(struct platform_device *pdev) | 2239 | static int lpuart_remove(struct platform_device *pdev) |
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index cdd2f942317c..b9c7a904c1ea 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c | |||
@@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev) | |||
889 | goto err_out; | 889 | goto err_out; |
890 | uartclk = 0; | 890 | uartclk = 0; |
891 | } else { | 891 | } else { |
892 | clk_prepare_enable(clk); | 892 | ret = clk_prepare_enable(clk); |
893 | if (ret) | ||
894 | goto err_out; | ||
895 | |||
896 | ret = devm_add_action_or_reset(&pdev->dev, | ||
897 | (void(*)(void *))clk_disable_unprepare, | ||
898 | clk); | ||
899 | if (ret) | ||
900 | goto err_out; | ||
901 | |||
893 | uartclk = clk_get_rate(clk); | 902 | uartclk = clk_get_rate(clk); |
894 | } | 903 | } |
895 | 904 | ||
@@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev) | |||
988 | uart_unregister_driver(&s->uart); | 997 | uart_unregister_driver(&s->uart); |
989 | err_out: | 998 | err_out: |
990 | if (!IS_ERR(s->regulator)) | 999 | if (!IS_ERR(s->regulator)) |
991 | return regulator_disable(s->regulator); | 1000 | regulator_disable(s->regulator); |
992 | 1001 | ||
993 | return ret; | 1002 | return ret; |
994 | } | 1003 | } |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 5aacea1978a5..3e865dbf878c 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb) | |||
190 | /* | 190 | /* |
191 | * only set a new error if there is no previous error. | 191 | * only set a new error if there is no previous error. |
192 | * Errors are only cleared during read/open | 192 | * Errors are only cleared during read/open |
193 | * Avoid propagating -EPIPE (stall) to userspace since it is | ||
194 | * better handled as an empty read | ||
193 | */ | 195 | */ |
194 | if (desc->rerr == 0) | 196 | if (desc->rerr == 0 && status != -EPIPE) |
195 | desc->rerr = status; | 197 | desc->rerr = status; |
196 | 198 | ||
197 | if (length + desc->length > desc->wMaxCommand) { | 199 | if (length + desc->length > desc->wMaxCommand) { |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 4be52c602e9b..68b54bd88d1e 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, | |||
643 | 643 | ||
644 | } else if (header->bDescriptorType == | 644 | } else if (header->bDescriptorType == |
645 | USB_DT_INTERFACE_ASSOCIATION) { | 645 | USB_DT_INTERFACE_ASSOCIATION) { |
646 | struct usb_interface_assoc_descriptor *d; | ||
647 | |||
648 | d = (struct usb_interface_assoc_descriptor *)header; | ||
649 | if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { | ||
650 | dev_warn(ddev, | ||
651 | "config %d has an invalid interface association descriptor of length %d, skipping\n", | ||
652 | cfgno, d->bLength); | ||
653 | continue; | ||
654 | } | ||
655 | |||
646 | if (iad_num == USB_MAXIADS) { | 656 | if (iad_num == USB_MAXIADS) { |
647 | dev_warn(ddev, "found more Interface " | 657 | dev_warn(ddev, "found more Interface " |
648 | "Association Descriptors " | 658 | "Association Descriptors " |
649 | "than allocated for in " | 659 | "than allocated for in " |
650 | "configuration %d\n", cfgno); | 660 | "configuration %d\n", cfgno); |
651 | } else { | 661 | } else { |
652 | config->intf_assoc[iad_num] = | 662 | config->intf_assoc[iad_num] = d; |
653 | (struct usb_interface_assoc_descriptor | ||
654 | *)header; | ||
655 | iad_num++; | 663 | iad_num++; |
656 | } | 664 | } |
657 | 665 | ||
@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev) | |||
852 | } | 860 | } |
853 | 861 | ||
854 | if (dev->quirks & USB_QUIRK_DELAY_INIT) | 862 | if (dev->quirks & USB_QUIRK_DELAY_INIT) |
855 | msleep(100); | 863 | msleep(200); |
856 | 864 | ||
857 | result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, | 865 | result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, |
858 | bigbuffer, length); | 866 | bigbuffer, length); |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 318bb3b96687..4664e543cf2f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644); | |||
140 | MODULE_PARM_DESC(usbfs_memory_mb, | 140 | MODULE_PARM_DESC(usbfs_memory_mb, |
141 | "maximum MB allowed for usbfs buffers (0 = no limit)"); | 141 | "maximum MB allowed for usbfs buffers (0 = no limit)"); |
142 | 142 | ||
143 | /* Hard limit, necessary to avoid arithmetic overflow */ | ||
144 | #define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) | ||
145 | |||
143 | static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ | 146 | static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ |
144 | 147 | ||
145 | /* Check whether it's okay to allocate more memory for a transfer */ | 148 | /* Check whether it's okay to allocate more memory for a transfer */ |
@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1460 | USBDEVFS_URB_ZERO_PACKET | | 1463 | USBDEVFS_URB_ZERO_PACKET | |
1461 | USBDEVFS_URB_NO_INTERRUPT)) | 1464 | USBDEVFS_URB_NO_INTERRUPT)) |
1462 | return -EINVAL; | 1465 | return -EINVAL; |
1466 | if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) | ||
1467 | return -EINVAL; | ||
1463 | if (uurb->buffer_length > 0 && !uurb->buffer) | 1468 | if (uurb->buffer_length > 0 && !uurb->buffer) |
1464 | return -EINVAL; | 1469 | return -EINVAL; |
1465 | if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && | 1470 | if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && |
@@ -1571,7 +1576,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1571 | totlen += isopkt[u].length; | 1576 | totlen += isopkt[u].length; |
1572 | } | 1577 | } |
1573 | u *= sizeof(struct usb_iso_packet_descriptor); | 1578 | u *= sizeof(struct usb_iso_packet_descriptor); |
1574 | uurb->buffer_length = totlen; | 1579 | if (totlen <= uurb->buffer_length) |
1580 | uurb->buffer_length = totlen; | ||
1581 | else | ||
1582 | WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d", | ||
1583 | totlen, uurb->buffer_length); | ||
1575 | break; | 1584 | break; |
1576 | 1585 | ||
1577 | default: | 1586 | default: |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 41eaf0b52518..b5c733613823 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -4838,7 +4838,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, | |||
4838 | goto loop; | 4838 | goto loop; |
4839 | 4839 | ||
4840 | if (udev->quirks & USB_QUIRK_DELAY_INIT) | 4840 | if (udev->quirks & USB_QUIRK_DELAY_INIT) |
4841 | msleep(1000); | 4841 | msleep(2000); |
4842 | 4842 | ||
4843 | /* consecutive bus-powered hubs aren't reliable; they can | 4843 | /* consecutive bus-powered hubs aren't reliable; they can |
4844 | * violate the voltage drop budget. if the new child has | 4844 | * violate the voltage drop budget. if the new child has |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 4c38ea41ae96..371a07d874a3 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, | |||
2069 | elength = 1; | 2069 | elength = 1; |
2070 | goto next_desc; | 2070 | goto next_desc; |
2071 | } | 2071 | } |
2072 | if ((buflen < elength) || (elength < 3)) { | ||
2073 | dev_err(&intf->dev, "invalid descriptor buffer length\n"); | ||
2074 | break; | ||
2075 | } | ||
2072 | if (buffer[1] != USB_DT_CS_INTERFACE) { | 2076 | if (buffer[1] != USB_DT_CS_INTERFACE) { |
2073 | dev_err(&intf->dev, "skipping garbage\n"); | 2077 | dev_err(&intf->dev, "skipping garbage\n"); |
2074 | goto next_desc; | 2078 | goto next_desc; |
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 4cef7d4f9cd0..a26d1fde0f5e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = { | |||
177 | { .compatible = "rockchip,rk3399-dwc3" }, | 177 | { .compatible = "rockchip,rk3399-dwc3" }, |
178 | { .compatible = "xlnx,zynqmp-dwc3" }, | 178 | { .compatible = "xlnx,zynqmp-dwc3" }, |
179 | { .compatible = "cavium,octeon-7130-usb-uctl" }, | 179 | { .compatible = "cavium,octeon-7130-usb-uctl" }, |
180 | { .compatible = "sprd,sc9860-dwc3" }, | ||
180 | { /* Sentinel */ } | 181 | { /* Sentinel */ } |
181 | }; | 182 | }; |
182 | MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); | 183 | MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); |
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 827e376bfa97..75e6cb044eb2 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c | |||
@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, | |||
990 | DWC3_TRBCTL_CONTROL_DATA, | 990 | DWC3_TRBCTL_CONTROL_DATA, |
991 | true); | 991 | true); |
992 | 992 | ||
993 | req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; | ||
994 | |||
993 | /* Now prepare one extra TRB to align transfer size */ | 995 | /* Now prepare one extra TRB to align transfer size */ |
994 | dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, | 996 | dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, |
995 | maxpacket - rem, | 997 | maxpacket - rem, |
@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, | |||
1015 | DWC3_TRBCTL_CONTROL_DATA, | 1017 | DWC3_TRBCTL_CONTROL_DATA, |
1016 | true); | 1018 | true); |
1017 | 1019 | ||
1020 | req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; | ||
1021 | |||
1018 | /* Now prepare one extra TRB to align transfer size */ | 1022 | /* Now prepare one extra TRB to align transfer size */ |
1019 | dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, | 1023 | dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, |
1020 | 0, DWC3_TRBCTL_CONTROL_DATA, | 1024 | 0, DWC3_TRBCTL_CONTROL_DATA, |
@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, | |||
1029 | dwc3_ep0_prepare_one_trb(dep, req->request.dma, | 1033 | dwc3_ep0_prepare_one_trb(dep, req->request.dma, |
1030 | req->request.length, DWC3_TRBCTL_CONTROL_DATA, | 1034 | req->request.length, DWC3_TRBCTL_CONTROL_DATA, |
1031 | false); | 1035 | false); |
1036 | |||
1037 | req->trb = &dwc->ep0_trb[dep->trb_enqueue]; | ||
1038 | |||
1032 | ret = dwc3_ep0_start_trans(dep); | 1039 | ret = dwc3_ep0_start_trans(dep); |
1033 | } | 1040 | } |
1034 | 1041 | ||
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 9990944a7245..8b342587f8ad 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -46,7 +46,8 @@ | |||
46 | static void ffs_data_get(struct ffs_data *ffs); | 46 | static void ffs_data_get(struct ffs_data *ffs); |
47 | static void ffs_data_put(struct ffs_data *ffs); | 47 | static void ffs_data_put(struct ffs_data *ffs); |
48 | /* Creates new ffs_data object. */ | 48 | /* Creates new ffs_data object. */ |
49 | static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); | 49 | static struct ffs_data *__must_check ffs_data_new(const char *dev_name) |
50 | __attribute__((malloc)); | ||
50 | 51 | ||
51 | /* Opened counter handling. */ | 52 | /* Opened counter handling. */ |
52 | static void ffs_data_opened(struct ffs_data *ffs); | 53 | static void ffs_data_opened(struct ffs_data *ffs); |
@@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, | |||
780 | struct usb_request *req) | 781 | struct usb_request *req) |
781 | { | 782 | { |
782 | struct ffs_io_data *io_data = req->context; | 783 | struct ffs_io_data *io_data = req->context; |
784 | struct ffs_data *ffs = io_data->ffs; | ||
783 | 785 | ||
784 | ENTER(); | 786 | ENTER(); |
785 | 787 | ||
786 | INIT_WORK(&io_data->work, ffs_user_copy_worker); | 788 | INIT_WORK(&io_data->work, ffs_user_copy_worker); |
787 | schedule_work(&io_data->work); | 789 | queue_work(ffs->io_completion_wq, &io_data->work); |
788 | } | 790 | } |
789 | 791 | ||
790 | static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) | 792 | static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) |
@@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, | |||
1500 | if (unlikely(ret < 0)) | 1502 | if (unlikely(ret < 0)) |
1501 | return ERR_PTR(ret); | 1503 | return ERR_PTR(ret); |
1502 | 1504 | ||
1503 | ffs = ffs_data_new(); | 1505 | ffs = ffs_data_new(dev_name); |
1504 | if (unlikely(!ffs)) | 1506 | if (unlikely(!ffs)) |
1505 | return ERR_PTR(-ENOMEM); | 1507 | return ERR_PTR(-ENOMEM); |
1506 | ffs->file_perms = data.perms; | 1508 | ffs->file_perms = data.perms; |
@@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs) | |||
1610 | BUG_ON(waitqueue_active(&ffs->ev.waitq) || | 1612 | BUG_ON(waitqueue_active(&ffs->ev.waitq) || |
1611 | waitqueue_active(&ffs->ep0req_completion.wait) || | 1613 | waitqueue_active(&ffs->ep0req_completion.wait) || |
1612 | waitqueue_active(&ffs->wait)); | 1614 | waitqueue_active(&ffs->wait)); |
1615 | destroy_workqueue(ffs->io_completion_wq); | ||
1613 | kfree(ffs->dev_name); | 1616 | kfree(ffs->dev_name); |
1614 | kfree(ffs); | 1617 | kfree(ffs); |
1615 | } | 1618 | } |
@@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs) | |||
1642 | ffs_data_put(ffs); | 1645 | ffs_data_put(ffs); |
1643 | } | 1646 | } |
1644 | 1647 | ||
1645 | static struct ffs_data *ffs_data_new(void) | 1648 | static struct ffs_data *ffs_data_new(const char *dev_name) |
1646 | { | 1649 | { |
1647 | struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); | 1650 | struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); |
1648 | if (unlikely(!ffs)) | 1651 | if (unlikely(!ffs)) |
@@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void) | |||
1650 | 1653 | ||
1651 | ENTER(); | 1654 | ENTER(); |
1652 | 1655 | ||
1656 | ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name); | ||
1657 | if (!ffs->io_completion_wq) { | ||
1658 | kfree(ffs); | ||
1659 | return NULL; | ||
1660 | } | ||
1661 | |||
1653 | refcount_set(&ffs->ref, 1); | 1662 | refcount_set(&ffs->ref, 1); |
1654 | atomic_set(&ffs->opened, 0); | 1663 | atomic_set(&ffs->opened, 0); |
1655 | ffs->state = FFS_READ_DESCRIPTORS; | 1664 | ffs->state = FFS_READ_DESCRIPTORS; |
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index d6bd0244b008..5153e29870c3 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c | |||
@@ -307,8 +307,6 @@ struct fsg_common { | |||
307 | struct completion thread_notifier; | 307 | struct completion thread_notifier; |
308 | struct task_struct *thread_task; | 308 | struct task_struct *thread_task; |
309 | 309 | ||
310 | /* Callback functions. */ | ||
311 | const struct fsg_operations *ops; | ||
312 | /* Gadget's private data. */ | 310 | /* Gadget's private data. */ |
313 | void *private_data; | 311 | void *private_data; |
314 | 312 | ||
@@ -2438,6 +2436,7 @@ static void handle_exception(struct fsg_common *common) | |||
2438 | static int fsg_main_thread(void *common_) | 2436 | static int fsg_main_thread(void *common_) |
2439 | { | 2437 | { |
2440 | struct fsg_common *common = common_; | 2438 | struct fsg_common *common = common_; |
2439 | int i; | ||
2441 | 2440 | ||
2442 | /* | 2441 | /* |
2443 | * Allow the thread to be killed by a signal, but set the signal mask | 2442 | * Allow the thread to be killed by a signal, but set the signal mask |
@@ -2476,21 +2475,16 @@ static int fsg_main_thread(void *common_) | |||
2476 | common->thread_task = NULL; | 2475 | common->thread_task = NULL; |
2477 | spin_unlock_irq(&common->lock); | 2476 | spin_unlock_irq(&common->lock); |
2478 | 2477 | ||
2479 | if (!common->ops || !common->ops->thread_exits | 2478 | /* Eject media from all LUNs */ |
2480 | || common->ops->thread_exits(common) < 0) { | ||
2481 | int i; | ||
2482 | 2479 | ||
2483 | down_write(&common->filesem); | 2480 | down_write(&common->filesem); |
2484 | for (i = 0; i < ARRAY_SIZE(common->luns); i++) { | 2481 | for (i = 0; i < ARRAY_SIZE(common->luns); i++) { |
2485 | struct fsg_lun *curlun = common->luns[i]; | 2482 | struct fsg_lun *curlun = common->luns[i]; |
2486 | if (!curlun || !fsg_lun_is_open(curlun)) | ||
2487 | continue; | ||
2488 | 2483 | ||
2484 | if (curlun && fsg_lun_is_open(curlun)) | ||
2489 | fsg_lun_close(curlun); | 2485 | fsg_lun_close(curlun); |
2490 | curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; | ||
2491 | } | ||
2492 | up_write(&common->filesem); | ||
2493 | } | 2486 | } |
2487 | up_write(&common->filesem); | ||
2494 | 2488 | ||
2495 | /* Let fsg_unbind() know the thread has exited */ | 2489 | /* Let fsg_unbind() know the thread has exited */ |
2496 | complete_and_exit(&common->thread_notifier, 0); | 2490 | complete_and_exit(&common->thread_notifier, 0); |
@@ -2681,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common) | |||
2681 | } | 2675 | } |
2682 | EXPORT_SYMBOL_GPL(fsg_common_remove_luns); | 2676 | EXPORT_SYMBOL_GPL(fsg_common_remove_luns); |
2683 | 2677 | ||
2684 | void fsg_common_set_ops(struct fsg_common *common, | ||
2685 | const struct fsg_operations *ops) | ||
2686 | { | ||
2687 | common->ops = ops; | ||
2688 | } | ||
2689 | EXPORT_SYMBOL_GPL(fsg_common_set_ops); | ||
2690 | |||
2691 | void fsg_common_free_buffers(struct fsg_common *common) | 2678 | void fsg_common_free_buffers(struct fsg_common *common) |
2692 | { | 2679 | { |
2693 | _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); | 2680 | _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); |
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h index d3902313b8ac..dc05ca0c4359 100644 --- a/drivers/usb/gadget/function/f_mass_storage.h +++ b/drivers/usb/gadget/function/f_mass_storage.h | |||
@@ -60,17 +60,6 @@ struct fsg_module_parameters { | |||
60 | struct fsg_common; | 60 | struct fsg_common; |
61 | 61 | ||
62 | /* FSF callback functions */ | 62 | /* FSF callback functions */ |
63 | struct fsg_operations { | ||
64 | /* | ||
65 | * Callback function to call when thread exits. If no | ||
66 | * callback is set or it returns value lower then zero MSF | ||
67 | * will force eject all LUNs it operates on (including those | ||
68 | * marked as non-removable or with prevent_medium_removal flag | ||
69 | * set). | ||
70 | */ | ||
71 | int (*thread_exits)(struct fsg_common *common); | ||
72 | }; | ||
73 | |||
74 | struct fsg_lun_opts { | 63 | struct fsg_lun_opts { |
75 | struct config_group group; | 64 | struct config_group group; |
76 | struct fsg_lun *lun; | 65 | struct fsg_lun *lun; |
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun); | |||
142 | 131 | ||
143 | void fsg_common_remove_luns(struct fsg_common *common); | 132 | void fsg_common_remove_luns(struct fsg_common *common); |
144 | 133 | ||
145 | void fsg_common_set_ops(struct fsg_common *common, | ||
146 | const struct fsg_operations *ops); | ||
147 | |||
148 | int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, | 134 | int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, |
149 | unsigned int id, const char *name, | 135 | unsigned int id, const char *name, |
150 | const char **name_pfx); | 136 | const char **name_pfx); |
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 8df244fc9d80..ea0da35a44e2 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c | |||
@@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
555 | size_t size; /* Amount of data in a TX request. */ | 555 | size_t size; /* Amount of data in a TX request. */ |
556 | size_t bytes_copied = 0; | 556 | size_t bytes_copied = 0; |
557 | struct usb_request *req; | 557 | struct usb_request *req; |
558 | int value; | ||
558 | 559 | ||
559 | DBG(dev, "printer_write trying to send %d bytes\n", (int)len); | 560 | DBG(dev, "printer_write trying to send %d bytes\n", (int)len); |
560 | 561 | ||
@@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
634 | return -EAGAIN; | 635 | return -EAGAIN; |
635 | } | 636 | } |
636 | 637 | ||
637 | if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { | 638 | /* here, we unlock, and only unlock, to avoid deadlock. */ |
639 | spin_unlock(&dev->lock); | ||
640 | value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC); | ||
641 | spin_lock(&dev->lock); | ||
642 | if (value) { | ||
638 | list_add(&req->list, &dev->tx_reqs); | 643 | list_add(&req->list, &dev->tx_reqs); |
639 | spin_unlock_irqrestore(&dev->lock, flags); | 644 | spin_unlock_irqrestore(&dev->lock, flags); |
640 | mutex_unlock(&dev->lock_printer_io); | 645 | mutex_unlock(&dev->lock_printer_io); |
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h index 540f1c48c1a8..79f70ebf85dc 100644 --- a/drivers/usb/gadget/function/u_fs.h +++ b/drivers/usb/gadget/function/u_fs.h | |||
@@ -279,6 +279,7 @@ struct ffs_data { | |||
279 | } file_perms; | 279 | } file_perms; |
280 | 280 | ||
281 | struct eventfd_ctx *ffs_eventfd; | 281 | struct eventfd_ctx *ffs_eventfd; |
282 | struct workqueue_struct *io_completion_wq; | ||
282 | bool no_disconnect; | 283 | bool no_disconnect; |
283 | struct work_struct reset_work; | 284 | struct work_struct reset_work; |
284 | 285 | ||
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 684900fcfe24..5c28bee327e1 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/aio.h> | 28 | #include <linux/aio.h> |
29 | #include <linux/uio.h> | 29 | #include <linux/uio.h> |
30 | #include <linux/refcount.h> | 30 | #include <linux/refcount.h> |
31 | 31 | #include <linux/delay.h> | |
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/moduleparam.h> | 33 | #include <linux/moduleparam.h> |
34 | 34 | ||
@@ -116,6 +116,7 @@ enum ep0_state { | |||
116 | struct dev_data { | 116 | struct dev_data { |
117 | spinlock_t lock; | 117 | spinlock_t lock; |
118 | refcount_t count; | 118 | refcount_t count; |
119 | int udc_usage; | ||
119 | enum ep0_state state; /* P: lock */ | 120 | enum ep0_state state; /* P: lock */ |
120 | struct usb_gadgetfs_event event [N_EVENT]; | 121 | struct usb_gadgetfs_event event [N_EVENT]; |
121 | unsigned ev_next; | 122 | unsigned ev_next; |
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
513 | INIT_WORK(&priv->work, ep_user_copy_worker); | 514 | INIT_WORK(&priv->work, ep_user_copy_worker); |
514 | schedule_work(&priv->work); | 515 | schedule_work(&priv->work); |
515 | } | 516 | } |
516 | spin_unlock(&epdata->dev->lock); | ||
517 | 517 | ||
518 | usb_ep_free_request(ep, req); | 518 | usb_ep_free_request(ep, req); |
519 | spin_unlock(&epdata->dev->lock); | ||
519 | put_ep(epdata); | 520 | put_ep(epdata); |
520 | } | 521 | } |
521 | 522 | ||
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
939 | struct usb_request *req = dev->req; | 940 | struct usb_request *req = dev->req; |
940 | 941 | ||
941 | if ((retval = setup_req (ep, req, 0)) == 0) { | 942 | if ((retval = setup_req (ep, req, 0)) == 0) { |
943 | ++dev->udc_usage; | ||
942 | spin_unlock_irq (&dev->lock); | 944 | spin_unlock_irq (&dev->lock); |
943 | retval = usb_ep_queue (ep, req, GFP_KERNEL); | 945 | retval = usb_ep_queue (ep, req, GFP_KERNEL); |
944 | spin_lock_irq (&dev->lock); | 946 | spin_lock_irq (&dev->lock); |
947 | --dev->udc_usage; | ||
945 | } | 948 | } |
946 | dev->state = STATE_DEV_CONNECTED; | 949 | dev->state = STATE_DEV_CONNECTED; |
947 | 950 | ||
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
983 | retval = -EIO; | 986 | retval = -EIO; |
984 | else { | 987 | else { |
985 | len = min (len, (size_t)dev->req->actual); | 988 | len = min (len, (size_t)dev->req->actual); |
986 | // FIXME don't call this with the spinlock held ... | 989 | ++dev->udc_usage; |
990 | spin_unlock_irq(&dev->lock); | ||
987 | if (copy_to_user (buf, dev->req->buf, len)) | 991 | if (copy_to_user (buf, dev->req->buf, len)) |
988 | retval = -EFAULT; | 992 | retval = -EFAULT; |
989 | else | 993 | else |
990 | retval = len; | 994 | retval = len; |
995 | spin_lock_irq(&dev->lock); | ||
996 | --dev->udc_usage; | ||
991 | clean_req (dev->gadget->ep0, dev->req); | 997 | clean_req (dev->gadget->ep0, dev->req); |
992 | /* NOTE userspace can't yet choose to stall */ | 998 | /* NOTE userspace can't yet choose to stall */ |
993 | } | 999 | } |
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1131 | retval = setup_req (dev->gadget->ep0, dev->req, len); | 1137 | retval = setup_req (dev->gadget->ep0, dev->req, len); |
1132 | if (retval == 0) { | 1138 | if (retval == 0) { |
1133 | dev->state = STATE_DEV_CONNECTED; | 1139 | dev->state = STATE_DEV_CONNECTED; |
1140 | ++dev->udc_usage; | ||
1134 | spin_unlock_irq (&dev->lock); | 1141 | spin_unlock_irq (&dev->lock); |
1135 | if (copy_from_user (dev->req->buf, buf, len)) | 1142 | if (copy_from_user (dev->req->buf, buf, len)) |
1136 | retval = -EFAULT; | 1143 | retval = -EFAULT; |
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1142 | GFP_KERNEL); | 1149 | GFP_KERNEL); |
1143 | } | 1150 | } |
1144 | spin_lock_irq(&dev->lock); | 1151 | spin_lock_irq(&dev->lock); |
1152 | --dev->udc_usage; | ||
1145 | if (retval < 0) { | 1153 | if (retval < 0) { |
1146 | clean_req (dev->gadget->ep0, dev->req); | 1154 | clean_req (dev->gadget->ep0, dev->req); |
1147 | } else | 1155 | } else |
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
1243 | struct usb_gadget *gadget = dev->gadget; | 1251 | struct usb_gadget *gadget = dev->gadget; |
1244 | long ret = -ENOTTY; | 1252 | long ret = -ENOTTY; |
1245 | 1253 | ||
1246 | if (gadget->ops->ioctl) | 1254 | spin_lock_irq(&dev->lock); |
1255 | if (dev->state == STATE_DEV_OPENED || | ||
1256 | dev->state == STATE_DEV_UNBOUND) { | ||
1257 | /* Not bound to a UDC */ | ||
1258 | } else if (gadget->ops->ioctl) { | ||
1259 | ++dev->udc_usage; | ||
1260 | spin_unlock_irq(&dev->lock); | ||
1261 | |||
1247 | ret = gadget->ops->ioctl (gadget, code, value); | 1262 | ret = gadget->ops->ioctl (gadget, code, value); |
1248 | 1263 | ||
1264 | spin_lock_irq(&dev->lock); | ||
1265 | --dev->udc_usage; | ||
1266 | } | ||
1267 | spin_unlock_irq(&dev->lock); | ||
1268 | |||
1249 | return ret; | 1269 | return ret; |
1250 | } | 1270 | } |
1251 | 1271 | ||
@@ -1463,10 +1483,12 @@ delegate: | |||
1463 | if (value < 0) | 1483 | if (value < 0) |
1464 | break; | 1484 | break; |
1465 | 1485 | ||
1486 | ++dev->udc_usage; | ||
1466 | spin_unlock (&dev->lock); | 1487 | spin_unlock (&dev->lock); |
1467 | value = usb_ep_queue (gadget->ep0, dev->req, | 1488 | value = usb_ep_queue (gadget->ep0, dev->req, |
1468 | GFP_KERNEL); | 1489 | GFP_KERNEL); |
1469 | spin_lock (&dev->lock); | 1490 | spin_lock (&dev->lock); |
1491 | --dev->udc_usage; | ||
1470 | if (value < 0) { | 1492 | if (value < 0) { |
1471 | clean_req (gadget->ep0, dev->req); | 1493 | clean_req (gadget->ep0, dev->req); |
1472 | break; | 1494 | break; |
@@ -1490,8 +1512,12 @@ delegate: | |||
1490 | req->length = value; | 1512 | req->length = value; |
1491 | req->zero = value < w_length; | 1513 | req->zero = value < w_length; |
1492 | 1514 | ||
1515 | ++dev->udc_usage; | ||
1493 | spin_unlock (&dev->lock); | 1516 | spin_unlock (&dev->lock); |
1494 | value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); | 1517 | value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); |
1518 | spin_lock(&dev->lock); | ||
1519 | --dev->udc_usage; | ||
1520 | spin_unlock(&dev->lock); | ||
1495 | if (value < 0) { | 1521 | if (value < 0) { |
1496 | DBG (dev, "ep_queue --> %d\n", value); | 1522 | DBG (dev, "ep_queue --> %d\n", value); |
1497 | req->status = 0; | 1523 | req->status = 0; |
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev) | |||
1518 | /* break link to FS */ | 1544 | /* break link to FS */ |
1519 | ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); | 1545 | ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); |
1520 | list_del_init (&ep->epfiles); | 1546 | list_del_init (&ep->epfiles); |
1547 | spin_unlock_irq (&dev->lock); | ||
1548 | |||
1521 | dentry = ep->dentry; | 1549 | dentry = ep->dentry; |
1522 | ep->dentry = NULL; | 1550 | ep->dentry = NULL; |
1523 | parent = d_inode(dentry->d_parent); | 1551 | parent = d_inode(dentry->d_parent); |
1524 | 1552 | ||
1525 | /* break link to controller */ | 1553 | /* break link to controller */ |
1554 | mutex_lock(&ep->lock); | ||
1526 | if (ep->state == STATE_EP_ENABLED) | 1555 | if (ep->state == STATE_EP_ENABLED) |
1527 | (void) usb_ep_disable (ep->ep); | 1556 | (void) usb_ep_disable (ep->ep); |
1528 | ep->state = STATE_EP_UNBOUND; | 1557 | ep->state = STATE_EP_UNBOUND; |
1529 | usb_ep_free_request (ep->ep, ep->req); | 1558 | usb_ep_free_request (ep->ep, ep->req); |
1530 | ep->ep = NULL; | 1559 | ep->ep = NULL; |
1560 | mutex_unlock(&ep->lock); | ||
1561 | |||
1531 | wake_up (&ep->wait); | 1562 | wake_up (&ep->wait); |
1532 | put_ep (ep); | 1563 | put_ep (ep); |
1533 | 1564 | ||
1534 | spin_unlock_irq (&dev->lock); | ||
1535 | |||
1536 | /* break link to dcache */ | 1565 | /* break link to dcache */ |
1537 | inode_lock(parent); | 1566 | inode_lock(parent); |
1538 | d_delete (dentry); | 1567 | d_delete (dentry); |
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget) | |||
1603 | 1632 | ||
1604 | spin_lock_irq (&dev->lock); | 1633 | spin_lock_irq (&dev->lock); |
1605 | dev->state = STATE_DEV_UNBOUND; | 1634 | dev->state = STATE_DEV_UNBOUND; |
1635 | while (dev->udc_usage > 0) { | ||
1636 | spin_unlock_irq(&dev->lock); | ||
1637 | usleep_range(1000, 2000); | ||
1638 | spin_lock_irq(&dev->lock); | ||
1639 | } | ||
1606 | spin_unlock_irq (&dev->lock); | 1640 | spin_unlock_irq (&dev->lock); |
1607 | 1641 | ||
1608 | destroy_ep_files (dev); | 1642 | destroy_ep_files (dev); |
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c index e99ab57ee3e5..fcba59782f26 100644 --- a/drivers/usb/gadget/legacy/mass_storage.c +++ b/drivers/usb/gadget/legacy/mass_storage.c | |||
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS; | |||
107 | 107 | ||
108 | FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); | 108 | FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); |
109 | 109 | ||
110 | static unsigned long msg_registered; | ||
111 | static void msg_cleanup(void); | ||
112 | |||
113 | static int msg_thread_exits(struct fsg_common *common) | ||
114 | { | ||
115 | msg_cleanup(); | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int msg_do_config(struct usb_configuration *c) | 110 | static int msg_do_config(struct usb_configuration *c) |
120 | { | 111 | { |
121 | struct fsg_opts *opts; | 112 | struct fsg_opts *opts; |
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = { | |||
154 | 145 | ||
155 | static int msg_bind(struct usb_composite_dev *cdev) | 146 | static int msg_bind(struct usb_composite_dev *cdev) |
156 | { | 147 | { |
157 | static const struct fsg_operations ops = { | ||
158 | .thread_exits = msg_thread_exits, | ||
159 | }; | ||
160 | struct fsg_opts *opts; | 148 | struct fsg_opts *opts; |
161 | struct fsg_config config; | 149 | struct fsg_config config; |
162 | int status; | 150 | int status; |
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev) | |||
173 | if (status) | 161 | if (status) |
174 | goto fail; | 162 | goto fail; |
175 | 163 | ||
176 | fsg_common_set_ops(opts->common, &ops); | ||
177 | |||
178 | status = fsg_common_set_cdev(opts->common, cdev, config.can_stall); | 164 | status = fsg_common_set_cdev(opts->common, cdev, config.can_stall); |
179 | if (status) | 165 | if (status) |
180 | goto fail_set_cdev; | 166 | goto fail_set_cdev; |
@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL"); | |||
256 | 242 | ||
257 | static int __init msg_init(void) | 243 | static int __init msg_init(void) |
258 | { | 244 | { |
259 | int ret; | 245 | return usb_composite_probe(&msg_driver); |
260 | |||
261 | ret = usb_composite_probe(&msg_driver); | ||
262 | set_bit(0, &msg_registered); | ||
263 | |||
264 | return ret; | ||
265 | } | 246 | } |
266 | module_init(msg_init); | 247 | module_init(msg_init); |
267 | 248 | ||
268 | static void msg_cleanup(void) | 249 | static void __exit msg_cleanup(void) |
269 | { | 250 | { |
270 | if (test_and_clear_bit(0, &msg_registered)) | 251 | usb_composite_unregister(&msg_driver); |
271 | usb_composite_unregister(&msg_driver); | ||
272 | } | 252 | } |
273 | module_exit(msg_cleanup); | 253 | module_exit(msg_cleanup); |
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 7cd5c969fcbe..1e9567091d86 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig | |||
@@ -273,6 +273,7 @@ config USB_SNP_CORE | |||
273 | config USB_SNP_UDC_PLAT | 273 | config USB_SNP_UDC_PLAT |
274 | tristate "Synopsys USB 2.0 Device controller" | 274 | tristate "Synopsys USB 2.0 Device controller" |
275 | depends on USB_GADGET && OF && HAS_DMA | 275 | depends on USB_GADGET && OF && HAS_DMA |
276 | depends on EXTCON || EXTCON=n | ||
276 | select USB_GADGET_DUALSPEED | 277 | select USB_GADGET_DUALSPEED |
277 | select USB_SNP_CORE | 278 | select USB_SNP_CORE |
278 | default ARCH_BCM_IPROC | 279 | default ARCH_BCM_IPROC |
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 98d71400f8a1..a884c022df7a 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/of_gpio.h> | 29 | #include <linux/of_gpio.h> |
30 | 30 | ||
31 | #include "atmel_usba_udc.h" | 31 | #include "atmel_usba_udc.h" |
32 | #define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \ | ||
33 | | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING) | ||
32 | 34 | ||
33 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | 35 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
34 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev) | |||
2361 | IRQ_NOAUTOEN); | 2363 | IRQ_NOAUTOEN); |
2362 | ret = devm_request_threaded_irq(&pdev->dev, | 2364 | ret = devm_request_threaded_irq(&pdev->dev, |
2363 | gpio_to_irq(udc->vbus_pin), NULL, | 2365 | gpio_to_irq(udc->vbus_pin), NULL, |
2364 | usba_vbus_irq_thread, IRQF_ONESHOT, | 2366 | usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS, |
2365 | "atmel_usba_udc", udc); | 2367 | "atmel_usba_udc", udc); |
2366 | if (ret) { | 2368 | if (ret) { |
2367 | udc->vbus_pin = -ENODEV; | 2369 | udc->vbus_pin = -ENODEV; |
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 75c51ca4ee0f..d41d07aae0ce 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri | |||
1320 | udc->dev.driver = &driver->driver; | 1320 | udc->dev.driver = &driver->driver; |
1321 | udc->gadget->dev.driver = &driver->driver; | 1321 | udc->gadget->dev.driver = &driver->driver; |
1322 | 1322 | ||
1323 | if (driver->max_speed < udc->gadget->max_speed) | 1323 | usb_gadget_udc_set_speed(udc, driver->max_speed); |
1324 | usb_gadget_udc_set_speed(udc, driver->max_speed); | ||
1325 | 1324 | ||
1326 | ret = driver->bind(udc->gadget, driver); | 1325 | ret = driver->bind(udc->gadget, driver); |
1327 | if (ret) | 1326 | if (ret) |
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index a030d7923d7d..b17618a55f1b 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -237,6 +237,8 @@ struct dummy_hcd { | |||
237 | 237 | ||
238 | struct usb_device *udev; | 238 | struct usb_device *udev; |
239 | struct list_head urbp_list; | 239 | struct list_head urbp_list; |
240 | struct urbp *next_frame_urbp; | ||
241 | |||
240 | u32 stream_en_ep; | 242 | u32 stream_en_ep; |
241 | u8 num_stream[30 / 2]; | 243 | u8 num_stream[30 / 2]; |
242 | 244 | ||
@@ -253,11 +255,13 @@ struct dummy { | |||
253 | */ | 255 | */ |
254 | struct dummy_ep ep[DUMMY_ENDPOINTS]; | 256 | struct dummy_ep ep[DUMMY_ENDPOINTS]; |
255 | int address; | 257 | int address; |
258 | int callback_usage; | ||
256 | struct usb_gadget gadget; | 259 | struct usb_gadget gadget; |
257 | struct usb_gadget_driver *driver; | 260 | struct usb_gadget_driver *driver; |
258 | struct dummy_request fifo_req; | 261 | struct dummy_request fifo_req; |
259 | u8 fifo_buf[FIFO_SIZE]; | 262 | u8 fifo_buf[FIFO_SIZE]; |
260 | u16 devstatus; | 263 | u16 devstatus; |
264 | unsigned ints_enabled:1; | ||
261 | unsigned udc_suspended:1; | 265 | unsigned udc_suspended:1; |
262 | unsigned pullup:1; | 266 | unsigned pullup:1; |
263 | 267 | ||
@@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) | |||
375 | USB_PORT_STAT_CONNECTION) == 0) | 379 | USB_PORT_STAT_CONNECTION) == 0) |
376 | dum_hcd->port_status |= | 380 | dum_hcd->port_status |= |
377 | (USB_PORT_STAT_C_CONNECTION << 16); | 381 | (USB_PORT_STAT_C_CONNECTION << 16); |
378 | if ((dum_hcd->port_status & | 382 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) && |
379 | USB_PORT_STAT_ENABLE) == 1 && | 383 | (dum_hcd->port_status & |
380 | (dum_hcd->port_status & | 384 | USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 && |
381 | USB_SS_PORT_LS_U0) == 1 && | 385 | dum_hcd->rh_state != DUMMY_RH_SUSPENDED) |
382 | dum_hcd->rh_state != DUMMY_RH_SUSPENDED) | ||
383 | dum_hcd->active = 1; | 386 | dum_hcd->active = 1; |
384 | } | 387 | } |
385 | } else { | 388 | } else { |
@@ -440,18 +443,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
440 | (~dum_hcd->old_status) & dum_hcd->port_status; | 443 | (~dum_hcd->old_status) & dum_hcd->port_status; |
441 | 444 | ||
442 | /* Report reset and disconnect events to the driver */ | 445 | /* Report reset and disconnect events to the driver */ |
443 | if (dum->driver && (disconnect || reset)) { | 446 | if (dum->ints_enabled && (disconnect || reset)) { |
444 | stop_activity(dum); | 447 | stop_activity(dum); |
448 | ++dum->callback_usage; | ||
449 | spin_unlock(&dum->lock); | ||
445 | if (reset) | 450 | if (reset) |
446 | usb_gadget_udc_reset(&dum->gadget, dum->driver); | 451 | usb_gadget_udc_reset(&dum->gadget, dum->driver); |
447 | else | 452 | else |
448 | dum->driver->disconnect(&dum->gadget); | 453 | dum->driver->disconnect(&dum->gadget); |
454 | spin_lock(&dum->lock); | ||
455 | --dum->callback_usage; | ||
449 | } | 456 | } |
450 | } else if (dum_hcd->active != dum_hcd->old_active) { | 457 | } else if (dum_hcd->active != dum_hcd->old_active && |
458 | dum->ints_enabled) { | ||
459 | ++dum->callback_usage; | ||
460 | spin_unlock(&dum->lock); | ||
451 | if (dum_hcd->old_active && dum->driver->suspend) | 461 | if (dum_hcd->old_active && dum->driver->suspend) |
452 | dum->driver->suspend(&dum->gadget); | 462 | dum->driver->suspend(&dum->gadget); |
453 | else if (!dum_hcd->old_active && dum->driver->resume) | 463 | else if (!dum_hcd->old_active && dum->driver->resume) |
454 | dum->driver->resume(&dum->gadget); | 464 | dum->driver->resume(&dum->gadget); |
465 | spin_lock(&dum->lock); | ||
466 | --dum->callback_usage; | ||
455 | } | 467 | } |
456 | 468 | ||
457 | dum_hcd->old_status = dum_hcd->port_status; | 469 | dum_hcd->old_status = dum_hcd->port_status; |
@@ -972,8 +984,11 @@ static int dummy_udc_start(struct usb_gadget *g, | |||
972 | * can't enumerate without help from the driver we're binding. | 984 | * can't enumerate without help from the driver we're binding. |
973 | */ | 985 | */ |
974 | 986 | ||
987 | spin_lock_irq(&dum->lock); | ||
975 | dum->devstatus = 0; | 988 | dum->devstatus = 0; |
976 | dum->driver = driver; | 989 | dum->driver = driver; |
990 | dum->ints_enabled = 1; | ||
991 | spin_unlock_irq(&dum->lock); | ||
977 | 992 | ||
978 | return 0; | 993 | return 0; |
979 | } | 994 | } |
@@ -984,6 +999,16 @@ static int dummy_udc_stop(struct usb_gadget *g) | |||
984 | struct dummy *dum = dum_hcd->dum; | 999 | struct dummy *dum = dum_hcd->dum; |
985 | 1000 | ||
986 | spin_lock_irq(&dum->lock); | 1001 | spin_lock_irq(&dum->lock); |
1002 | dum->ints_enabled = 0; | ||
1003 | stop_activity(dum); | ||
1004 | |||
1005 | /* emulate synchronize_irq(): wait for callbacks to finish */ | ||
1006 | while (dum->callback_usage > 0) { | ||
1007 | spin_unlock_irq(&dum->lock); | ||
1008 | usleep_range(1000, 2000); | ||
1009 | spin_lock_irq(&dum->lock); | ||
1010 | } | ||
1011 | |||
987 | dum->driver = NULL; | 1012 | dum->driver = NULL; |
988 | spin_unlock_irq(&dum->lock); | 1013 | spin_unlock_irq(&dum->lock); |
989 | 1014 | ||
@@ -1037,7 +1062,12 @@ static int dummy_udc_probe(struct platform_device *pdev) | |||
1037 | memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); | 1062 | memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); |
1038 | dum->gadget.name = gadget_name; | 1063 | dum->gadget.name = gadget_name; |
1039 | dum->gadget.ops = &dummy_ops; | 1064 | dum->gadget.ops = &dummy_ops; |
1040 | dum->gadget.max_speed = USB_SPEED_SUPER; | 1065 | if (mod_data.is_super_speed) |
1066 | dum->gadget.max_speed = USB_SPEED_SUPER; | ||
1067 | else if (mod_data.is_high_speed) | ||
1068 | dum->gadget.max_speed = USB_SPEED_HIGH; | ||
1069 | else | ||
1070 | dum->gadget.max_speed = USB_SPEED_FULL; | ||
1041 | 1071 | ||
1042 | dum->gadget.dev.parent = &pdev->dev; | 1072 | dum->gadget.dev.parent = &pdev->dev; |
1043 | init_dummy_udc_hw(dum); | 1073 | init_dummy_udc_hw(dum); |
@@ -1246,6 +1276,8 @@ static int dummy_urb_enqueue( | |||
1246 | 1276 | ||
1247 | list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); | 1277 | list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); |
1248 | urb->hcpriv = urbp; | 1278 | urb->hcpriv = urbp; |
1279 | if (!dum_hcd->next_frame_urbp) | ||
1280 | dum_hcd->next_frame_urbp = urbp; | ||
1249 | if (usb_pipetype(urb->pipe) == PIPE_CONTROL) | 1281 | if (usb_pipetype(urb->pipe) == PIPE_CONTROL) |
1250 | urb->error_count = 1; /* mark as a new urb */ | 1282 | urb->error_count = 1; /* mark as a new urb */ |
1251 | 1283 | ||
@@ -1521,6 +1553,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address) | |||
1521 | if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? | 1553 | if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? |
1522 | dum->ss_hcd : dum->hs_hcd))) | 1554 | dum->ss_hcd : dum->hs_hcd))) |
1523 | return NULL; | 1555 | return NULL; |
1556 | if (!dum->ints_enabled) | ||
1557 | return NULL; | ||
1524 | if ((address & ~USB_DIR_IN) == 0) | 1558 | if ((address & ~USB_DIR_IN) == 0) |
1525 | return &dum->ep[0]; | 1559 | return &dum->ep[0]; |
1526 | for (i = 1; i < DUMMY_ENDPOINTS; i++) { | 1560 | for (i = 1; i < DUMMY_ENDPOINTS; i++) { |
@@ -1762,6 +1796,7 @@ static void dummy_timer(unsigned long _dum_hcd) | |||
1762 | spin_unlock_irqrestore(&dum->lock, flags); | 1796 | spin_unlock_irqrestore(&dum->lock, flags); |
1763 | return; | 1797 | return; |
1764 | } | 1798 | } |
1799 | dum_hcd->next_frame_urbp = NULL; | ||
1765 | 1800 | ||
1766 | for (i = 0; i < DUMMY_ENDPOINTS; i++) { | 1801 | for (i = 0; i < DUMMY_ENDPOINTS; i++) { |
1767 | if (!ep_info[i].name) | 1802 | if (!ep_info[i].name) |
@@ -1778,6 +1813,10 @@ restart: | |||
1778 | int type; | 1813 | int type; |
1779 | int status = -EINPROGRESS; | 1814 | int status = -EINPROGRESS; |
1780 | 1815 | ||
1816 | /* stop when we reach URBs queued after the timer interrupt */ | ||
1817 | if (urbp == dum_hcd->next_frame_urbp) | ||
1818 | break; | ||
1819 | |||
1781 | urb = urbp->urb; | 1820 | urb = urbp->urb; |
1782 | if (urb->unlinked) | 1821 | if (urb->unlinked) |
1783 | goto return_urb; | 1822 | goto return_urb; |
@@ -1857,10 +1896,12 @@ restart: | |||
1857 | * until setup() returns; no reentrancy issues etc. | 1896 | * until setup() returns; no reentrancy issues etc. |
1858 | */ | 1897 | */ |
1859 | if (value > 0) { | 1898 | if (value > 0) { |
1899 | ++dum->callback_usage; | ||
1860 | spin_unlock(&dum->lock); | 1900 | spin_unlock(&dum->lock); |
1861 | value = dum->driver->setup(&dum->gadget, | 1901 | value = dum->driver->setup(&dum->gadget, |
1862 | &setup); | 1902 | &setup); |
1863 | spin_lock(&dum->lock); | 1903 | spin_lock(&dum->lock); |
1904 | --dum->callback_usage; | ||
1864 | 1905 | ||
1865 | if (value >= 0) { | 1906 | if (value >= 0) { |
1866 | /* no delays (max 64KB data stage) */ | 1907 | /* no delays (max 64KB data stage) */ |
@@ -2561,8 +2602,6 @@ static struct hc_driver dummy_hcd = { | |||
2561 | .product_desc = "Dummy host controller", | 2602 | .product_desc = "Dummy host controller", |
2562 | .hcd_priv_size = sizeof(struct dummy_hcd), | 2603 | .hcd_priv_size = sizeof(struct dummy_hcd), |
2563 | 2604 | ||
2564 | .flags = HCD_USB3 | HCD_SHARED, | ||
2565 | |||
2566 | .reset = dummy_setup, | 2605 | .reset = dummy_setup, |
2567 | .start = dummy_start, | 2606 | .start = dummy_start, |
2568 | .stop = dummy_stop, | 2607 | .stop = dummy_stop, |
@@ -2591,8 +2630,12 @@ static int dummy_hcd_probe(struct platform_device *pdev) | |||
2591 | dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); | 2630 | dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); |
2592 | dum = *((void **)dev_get_platdata(&pdev->dev)); | 2631 | dum = *((void **)dev_get_platdata(&pdev->dev)); |
2593 | 2632 | ||
2594 | if (!mod_data.is_super_speed) | 2633 | if (mod_data.is_super_speed) |
2634 | dummy_hcd.flags = HCD_USB3 | HCD_SHARED; | ||
2635 | else if (mod_data.is_high_speed) | ||
2595 | dummy_hcd.flags = HCD_USB2; | 2636 | dummy_hcd.flags = HCD_USB2; |
2637 | else | ||
2638 | dummy_hcd.flags = HCD_USB11; | ||
2596 | hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); | 2639 | hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); |
2597 | if (!hs_hcd) | 2640 | if (!hs_hcd) |
2598 | return -ENOMEM; | 2641 | return -ENOMEM; |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index df37c1e6e9d5..63a206122058 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
@@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep, | |||
1038 | usb3_ep->ep.maxpacket); | 1038 | usb3_ep->ep.maxpacket); |
1039 | u8 *buf = usb3_req->req.buf + usb3_req->req.actual; | 1039 | u8 *buf = usb3_req->req.buf + usb3_req->req.actual; |
1040 | u32 tmp = 0; | 1040 | u32 tmp = 0; |
1041 | bool is_last; | 1041 | bool is_last = !len ? true : false; |
1042 | 1042 | ||
1043 | if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) | 1043 | if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) |
1044 | return -EBUSY; | 1044 | return -EBUSY; |
@@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep, | |||
1059 | usb3_write(usb3, tmp, fifo_reg); | 1059 | usb3_write(usb3, tmp, fifo_reg); |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); | 1062 | if (!is_last) |
1063 | is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); | ||
1063 | /* Send the data */ | 1064 | /* Send the data */ |
1064 | usb3_set_px_con_send(usb3_ep, len, is_last); | 1065 | usb3_set_px_con_send(usb3_ep, len, is_last); |
1065 | 1066 | ||
@@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep, | |||
1150 | usb3_set_p0_con_for_ctrl_read_data(usb3); | 1151 | usb3_set_p0_con_for_ctrl_read_data(usb3); |
1151 | } else { | 1152 | } else { |
1152 | usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); | 1153 | usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); |
1153 | usb3_set_p0_con_for_ctrl_write_data(usb3); | 1154 | if (usb3_req->req.length) |
1155 | usb3_set_p0_con_for_ctrl_write_data(usb3); | ||
1154 | } | 1156 | } |
1155 | 1157 | ||
1156 | usb3_p0_xfer(usb3_ep, usb3_req); | 1158 | usb3_p0_xfer(usb3_ep, usb3_req); |
@@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size) | |||
2053 | static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, | 2055 | static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, |
2054 | const struct usb_endpoint_descriptor *desc) | 2056 | const struct usb_endpoint_descriptor *desc) |
2055 | { | 2057 | { |
2056 | return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc)); | 2058 | int i; |
2059 | const u32 max_packet_array[] = {8, 16, 32, 64, 512}; | ||
2060 | u32 mpkt = PN_RAMMAP_MPKT(1024); | ||
2061 | |||
2062 | for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) { | ||
2063 | if (usb_endpoint_maxp(desc) <= max_packet_array[i]) | ||
2064 | mpkt = PN_RAMMAP_MPKT(max_packet_array[i]); | ||
2065 | } | ||
2066 | |||
2067 | return usb3_ep->rammap_val | mpkt; | ||
2057 | } | 2068 | } |
2058 | 2069 | ||
2059 | static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, | 2070 | static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 658d9d1f9ea3..6dda3623a276 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev) | |||
447 | if ((value & ASMT_CONTROL_WRITE_BIT) == 0) | 447 | if ((value & ASMT_CONTROL_WRITE_BIT) == 0) |
448 | return 0; | 448 | return 0; |
449 | 449 | ||
450 | usleep_range(40, 60); | 450 | udelay(50); |
451 | } | 451 | } |
452 | 452 | ||
453 | dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); | 453 | dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); |
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); | |||
1022 | * | 1022 | * |
1023 | * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. | 1023 | * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. |
1024 | * It signals to the BIOS that the OS wants control of the host controller, | 1024 | * It signals to the BIOS that the OS wants control of the host controller, |
1025 | * and then waits 5 seconds for the BIOS to hand over control. | 1025 | * and then waits 1 second for the BIOS to hand over control. |
1026 | * If we timeout, assume the BIOS is broken and take control anyway. | 1026 | * If we timeout, assume the BIOS is broken and take control anyway. |
1027 | */ | 1027 | */ |
1028 | static void quirk_usb_handoff_xhci(struct pci_dev *pdev) | 1028 | static void quirk_usb_handoff_xhci(struct pci_dev *pdev) |
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) | |||
1069 | if (val & XHCI_HC_BIOS_OWNED) { | 1069 | if (val & XHCI_HC_BIOS_OWNED) { |
1070 | writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); | 1070 | writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); |
1071 | 1071 | ||
1072 | /* Wait for 5 seconds with 10 microsecond polling interval */ | 1072 | /* Wait for 1 second with 10 microsecond polling interval */ |
1073 | timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, | 1073 | timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, |
1074 | 0, 5000, 10); | 1074 | 0, 1000000, 10); |
1075 | 1075 | ||
1076 | /* Assume a buggy BIOS and take HC ownership anyway */ | 1076 | /* Assume a buggy BIOS and take HC ownership anyway */ |
1077 | if (timeout) { | 1077 | if (timeout) { |
@@ -1100,7 +1100,7 @@ hc_init: | |||
1100 | * operational or runtime registers. Wait 5 seconds and no more. | 1100 | * operational or runtime registers. Wait 5 seconds and no more. |
1101 | */ | 1101 | */ |
1102 | timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, | 1102 | timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, |
1103 | 5000, 10); | 1103 | 5000000, 10); |
1104 | /* Assume a buggy HC and start HC initialization anyway */ | 1104 | /* Assume a buggy HC and start HC initialization anyway */ |
1105 | if (timeout) { | 1105 | if (timeout) { |
1106 | val = readl(op_reg_base + XHCI_STS_OFFSET); | 1106 | val = readl(op_reg_base + XHCI_STS_OFFSET); |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index ad89a6d4111b..da9158f171cb 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, | |||
112 | 112 | ||
113 | /* If PSI table exists, add the custom speed attributes from it */ | 113 | /* If PSI table exists, add the custom speed attributes from it */ |
114 | if (usb3_1 && xhci->usb3_rhub.psi_count) { | 114 | if (usb3_1 && xhci->usb3_rhub.psi_count) { |
115 | u32 ssp_cap_base, bm_attrib, psi; | 115 | u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp; |
116 | int offset; | 116 | int offset; |
117 | 117 | ||
118 | ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; | 118 | ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; |
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf, | |||
139 | for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { | 139 | for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { |
140 | psi = xhci->usb3_rhub.psi[i]; | 140 | psi = xhci->usb3_rhub.psi[i]; |
141 | psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; | 141 | psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; |
142 | psi_exp = XHCI_EXT_PORT_PSIE(psi); | ||
143 | psi_mant = XHCI_EXT_PORT_PSIM(psi); | ||
144 | |||
145 | /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */ | ||
146 | for (; psi_exp < 3; psi_exp++) | ||
147 | psi_mant /= 1000; | ||
148 | if (psi_mant >= 10) | ||
149 | psi |= BIT(14); | ||
150 | |||
142 | if ((psi & PLT_MASK) == PLT_SYM) { | 151 | if ((psi & PLT_MASK) == PLT_SYM) { |
143 | /* Symmetric, create SSA RX and TX from one PSI entry */ | 152 | /* Symmetric, create SSA RX and TX from one PSI entry */ |
144 | put_unaligned_le32(psi, &buf[offset]); | 153 | put_unaligned_le32(psi, &buf[offset]); |
@@ -1506,9 +1515,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
1506 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; | 1515 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; |
1507 | t2 &= ~PORT_WKDISC_E; | 1516 | t2 &= ~PORT_WKDISC_E; |
1508 | } | 1517 | } |
1509 | if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && | ||
1510 | (hcd->speed < HCD_USB3)) | ||
1511 | t2 &= ~PORT_WAKE_BITS; | ||
1512 | } else | 1518 | } else |
1513 | t2 &= ~PORT_WAKE_BITS; | 1519 | t2 &= ~PORT_WAKE_BITS; |
1514 | 1520 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 8071c8fdd15e..76f392954733 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -54,11 +54,6 @@ | |||
54 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 | 54 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 |
55 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 | 55 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 |
56 | 56 | ||
57 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 | ||
58 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba | ||
59 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb | ||
60 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc | ||
61 | |||
62 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 | 57 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 |
63 | 58 | ||
64 | static const char hcd_name[] = "xhci_hcd"; | 59 | static const char hcd_name[] = "xhci_hcd"; |
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
142 | if (pdev->vendor == PCI_VENDOR_ID_AMD) | 137 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 138 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
144 | 139 | ||
145 | if ((pdev->vendor == PCI_VENDOR_ID_AMD) && | ||
146 | ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || | ||
147 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || | ||
148 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || | ||
149 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) | ||
150 | xhci->quirks |= XHCI_U2_DISABLE_WAKE; | ||
151 | |||
152 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 140 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
153 | xhci->quirks |= XHCI_LPM_SUPPORT; | 141 | xhci->quirks |= XHCI_LPM_SUPPORT; |
154 | xhci->quirks |= XHCI_INTEL_HOST; | 142 | xhci->quirks |= XHCI_INTEL_HOST; |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 163bafde709f..1cb6eaef4ae1 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
178 | * 2. xhci_plat is child of a device from firmware (dwc3-plat) | 178 | * 2. xhci_plat is child of a device from firmware (dwc3-plat) |
179 | * 3. xhci_plat is grandchild of a pci device (dwc3-pci) | 179 | * 3. xhci_plat is grandchild of a pci device (dwc3-pci) |
180 | */ | 180 | */ |
181 | sysdev = &pdev->dev; | 181 | for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) { |
182 | if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node) | 182 | if (is_of_node(sysdev->fwnode) || |
183 | sysdev = sysdev->parent; | 183 | is_acpi_device_node(sysdev->fwnode)) |
184 | break; | ||
184 | #ifdef CONFIG_PCI | 185 | #ifdef CONFIG_PCI |
185 | else if (sysdev->parent && sysdev->parent->parent && | 186 | else if (sysdev->bus == &pci_bus_type) |
186 | sysdev->parent->parent->bus == &pci_bus_type) | 187 | break; |
187 | sysdev = sysdev->parent->parent; | ||
188 | #endif | 188 | #endif |
189 | } | ||
190 | |||
191 | if (!sysdev) | ||
192 | sysdev = &pdev->dev; | ||
189 | 193 | ||
190 | /* Try to set 64-bit DMA first */ | 194 | /* Try to set 64-bit DMA first */ |
191 | if (WARN_ON(!sysdev->dma_mask)) | 195 | if (WARN_ON(!sysdev->dma_mask)) |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b2ff1ff1a02f..ee198ea47f49 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, | |||
1703 | if (xhci->quirks & XHCI_MTK_HOST) { | 1703 | if (xhci->quirks & XHCI_MTK_HOST) { |
1704 | ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); | 1704 | ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); |
1705 | if (ret < 0) { | 1705 | if (ret < 0) { |
1706 | xhci_free_endpoint_ring(xhci, virt_dev, ep_index); | 1706 | xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring); |
1707 | virt_dev->eps[ep_index].new_ring = NULL; | ||
1707 | return ret; | 1708 | return ret; |
1708 | } | 1709 | } |
1709 | } | 1710 | } |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2abaa4d6d39d..2b48aa4f6b76 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -735,6 +735,8 @@ struct xhci_ep_ctx { | |||
735 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) | 735 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) |
736 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ | 736 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ |
737 | #define EP_HAS_LSA (1 << 15) | 737 | #define EP_HAS_LSA (1 << 15) |
738 | /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ | ||
739 | #define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff) | ||
738 | 740 | ||
739 | /* ep_info2 bitmasks */ | 741 | /* ep_info2 bitmasks */ |
740 | /* | 742 | /* |
@@ -1681,7 +1683,7 @@ struct xhci_bus_state { | |||
1681 | 1683 | ||
1682 | static inline unsigned int hcd_index(struct usb_hcd *hcd) | 1684 | static inline unsigned int hcd_index(struct usb_hcd *hcd) |
1683 | { | 1685 | { |
1684 | if (hcd->speed == HCD_USB3) | 1686 | if (hcd->speed >= HCD_USB3) |
1685 | return 0; | 1687 | return 0; |
1686 | else | 1688 | else |
1687 | return 1; | 1689 | return 1; |
@@ -1826,7 +1828,7 @@ struct xhci_hcd { | |||
1826 | /* For controller with a broken Port Disable implementation */ | 1828 | /* For controller with a broken Port Disable implementation */ |
1827 | #define XHCI_BROKEN_PORT_PED (1 << 25) | 1829 | #define XHCI_BROKEN_PORT_PED (1 << 25) |
1828 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) | 1830 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) |
1829 | #define XHCI_U2_DISABLE_WAKE (1 << 27) | 1831 | /* Reserved. It was XHCI_U2_DISABLE_WAKE */ |
1830 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) | 1832 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) |
1831 | 1833 | ||
1832 | unsigned int num_active_eps; | 1834 | unsigned int num_active_eps; |
@@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, | |||
2540 | u8 lsa; | 2542 | u8 lsa; |
2541 | u8 hid; | 2543 | u8 hid; |
2542 | 2544 | ||
2543 | esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 | | 2545 | esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | |
2544 | EP_MAX_ESIT_PAYLOAD_LO(tx_info); | 2546 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); |
2545 | 2547 | ||
2546 | ep_state = info & EP_STATE_MASK; | 2548 | ep_state = info & EP_STATE_MASK; |
2547 | max_pstr = info & EP_MAXPSTREAMS_MASK; | 2549 | max_pstr = info & EP_MAXPSTREAMS_MASK; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index d1af831f43eb..68f26904c316 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, | |||
282 | struct usbhs_fifo *fifo) | 282 | struct usbhs_fifo *fifo) |
283 | { | 283 | { |
284 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); | 284 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); |
285 | int ret = 0; | ||
285 | 286 | ||
286 | if (!usbhs_pipe_is_dcp(pipe)) | 287 | if (!usbhs_pipe_is_dcp(pipe)) { |
287 | usbhsf_fifo_barrier(priv, fifo); | 288 | /* |
289 | * This driver checks the pipe condition first to avoid -EBUSY | ||
290 | * from usbhsf_fifo_barrier() with about 10 msec delay in | ||
291 | * the interrupt handler if the pipe is RX direction and empty. | ||
292 | */ | ||
293 | if (usbhs_pipe_is_dir_in(pipe)) | ||
294 | ret = usbhs_pipe_is_accessible(pipe); | ||
295 | if (!ret) | ||
296 | ret = usbhsf_fifo_barrier(priv, fifo); | ||
297 | } | ||
288 | 298 | ||
289 | usbhs_write(priv, fifo->ctr, BCLR); | 299 | /* |
300 | * if non-DCP pipe, this driver should set BCLR when | ||
301 | * usbhsf_fifo_barrier() returns 0. | ||
302 | */ | ||
303 | if (!ret) | ||
304 | usbhs_write(priv, fifo->ctr, BCLR); | ||
290 | } | 305 | } |
291 | 306 | ||
292 | static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, | 307 | static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 1a59f335b063..a3ccb899df60 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -834,13 +834,25 @@ Retry_Sense: | |||
834 | if (result == USB_STOR_TRANSPORT_GOOD) { | 834 | if (result == USB_STOR_TRANSPORT_GOOD) { |
835 | srb->result = SAM_STAT_GOOD; | 835 | srb->result = SAM_STAT_GOOD; |
836 | srb->sense_buffer[0] = 0x0; | 836 | srb->sense_buffer[0] = 0x0; |
837 | } | ||
838 | |||
839 | /* | ||
840 | * ATA-passthru commands use sense data to report | ||
841 | * the command completion status, and often devices | ||
842 | * return Check Condition status when nothing is | ||
843 | * wrong. | ||
844 | */ | ||
845 | else if (srb->cmnd[0] == ATA_16 || | ||
846 | srb->cmnd[0] == ATA_12) { | ||
847 | /* leave the data alone */ | ||
848 | } | ||
837 | 849 | ||
838 | /* | 850 | /* |
839 | * If there was a problem, report an unspecified | 851 | * If there was a problem, report an unspecified |
840 | * hardware error to prevent the higher layers from | 852 | * hardware error to prevent the higher layers from |
841 | * entering an infinite retry loop. | 853 | * entering an infinite retry loop. |
842 | */ | 854 | */ |
843 | } else { | 855 | else { |
844 | srb->result = DID_ERROR << 16; | 856 | srb->result = DID_ERROR << 16; |
845 | if ((sshdr.response_code & 0x72) == 0x72) | 857 | if ((sshdr.response_code & 0x72) == 0x72) |
846 | srb->sense_buffer[1] = HARDWARE_ERROR; | 858 | srb->sense_buffer[1] = HARDWARE_ERROR; |
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index f58caa9e6a27..a155cd02bce2 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h | |||
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf) | |||
9 | intf->desc.bInterfaceProtocol == USB_PR_UAS); | 9 | intf->desc.bInterfaceProtocol == USB_PR_UAS); |
10 | } | 10 | } |
11 | 11 | ||
12 | static int uas_find_uas_alt_setting(struct usb_interface *intf) | 12 | static struct usb_host_interface *uas_find_uas_alt_setting( |
13 | struct usb_interface *intf) | ||
13 | { | 14 | { |
14 | int i; | 15 | int i; |
15 | 16 | ||
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf) | |||
17 | struct usb_host_interface *alt = &intf->altsetting[i]; | 18 | struct usb_host_interface *alt = &intf->altsetting[i]; |
18 | 19 | ||
19 | if (uas_is_interface(alt)) | 20 | if (uas_is_interface(alt)) |
20 | return alt->desc.bAlternateSetting; | 21 | return alt; |
21 | } | 22 | } |
22 | 23 | ||
23 | return -ENODEV; | 24 | return NULL; |
24 | } | 25 | } |
25 | 26 | ||
26 | static int uas_find_endpoints(struct usb_host_interface *alt, | 27 | static int uas_find_endpoints(struct usb_host_interface *alt, |
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
58 | struct usb_device *udev = interface_to_usbdev(intf); | 59 | struct usb_device *udev = interface_to_usbdev(intf); |
59 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 60 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
60 | unsigned long flags = id->driver_info; | 61 | unsigned long flags = id->driver_info; |
61 | int r, alt; | 62 | struct usb_host_interface *alt; |
62 | 63 | int r; | |
63 | 64 | ||
64 | alt = uas_find_uas_alt_setting(intf); | 65 | alt = uas_find_uas_alt_setting(intf); |
65 | if (alt < 0) | 66 | if (!alt) |
66 | return 0; | 67 | return 0; |
67 | 68 | ||
68 | r = uas_find_endpoints(&intf->altsetting[alt], eps); | 69 | r = uas_find_endpoints(alt, eps); |
69 | if (r < 0) | 70 | if (r < 0) |
70 | return 0; | 71 | return 0; |
71 | 72 | ||
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index cfb1e3bbd434..63cf981ed81c 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids); | |||
873 | static int uas_switch_interface(struct usb_device *udev, | 873 | static int uas_switch_interface(struct usb_device *udev, |
874 | struct usb_interface *intf) | 874 | struct usb_interface *intf) |
875 | { | 875 | { |
876 | int alt; | 876 | struct usb_host_interface *alt; |
877 | 877 | ||
878 | alt = uas_find_uas_alt_setting(intf); | 878 | alt = uas_find_uas_alt_setting(intf); |
879 | if (alt < 0) | 879 | if (!alt) |
880 | return alt; | 880 | return -ENODEV; |
881 | 881 | ||
882 | return usb_set_interface(udev, | 882 | return usb_set_interface(udev, alt->desc.bInterfaceNumber, |
883 | intf->altsetting[0].desc.bInterfaceNumber, alt); | 883 | alt->desc.bAlternateSetting); |
884 | } | 884 | } |
885 | 885 | ||
886 | static int uas_configure_endpoints(struct uas_dev_info *devinfo) | 886 | static int uas_configure_endpoints(struct uas_dev_info *devinfo) |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 5a70c33ef0e0..eb06d88b41d6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, | |||
1459 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1459 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1460 | US_FL_SANE_SENSE ), | 1460 | US_FL_SANE_SENSE ), |
1461 | 1461 | ||
1462 | /* Reported by Kris Lindgren <kris.lindgren@gmail.com> */ | ||
1463 | UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999, | ||
1464 | "Seagate", | ||
1465 | "External", | ||
1466 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1467 | US_FL_NO_WP_DETECT ), | ||
1468 | |||
1462 | UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, | 1469 | UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, |
1463 | "Maxtor", | 1470 | "Maxtor", |
1464 | "USB to SATA", | 1471 | "USB to SATA", |
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 35a1e777b449..9a53912bdfe9 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface, | |||
825 | 825 | ||
826 | if (iface->cur_altsetting->desc.bNumEndpoints < 1) | 826 | if (iface->cur_altsetting->desc.bNumEndpoints < 1) |
827 | return -ENODEV; | 827 | return -ENODEV; |
828 | if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc)) | ||
829 | return -ENODEV; | ||
828 | 830 | ||
829 | result = -ENOMEM; | 831 | result = -ENOMEM; |
830 | uwb_rc = uwb_rc_alloc(); | 832 | uwb_rc = uwb_rc_alloc(); |
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c index 01c20a260a8b..39dd4ef53c77 100644 --- a/drivers/uwb/uwbd.c +++ b/drivers/uwb/uwbd.c | |||
@@ -302,18 +302,22 @@ static int uwbd(void *param) | |||
302 | /** Start the UWB daemon */ | 302 | /** Start the UWB daemon */ |
303 | void uwbd_start(struct uwb_rc *rc) | 303 | void uwbd_start(struct uwb_rc *rc) |
304 | { | 304 | { |
305 | rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); | 305 | struct task_struct *task = kthread_run(uwbd, rc, "uwbd"); |
306 | if (rc->uwbd.task == NULL) | 306 | if (IS_ERR(task)) { |
307 | rc->uwbd.task = NULL; | ||
307 | printk(KERN_ERR "UWB: Cannot start management daemon; " | 308 | printk(KERN_ERR "UWB: Cannot start management daemon; " |
308 | "UWB won't work\n"); | 309 | "UWB won't work\n"); |
309 | else | 310 | } else { |
311 | rc->uwbd.task = task; | ||
310 | rc->uwbd.pid = rc->uwbd.task->pid; | 312 | rc->uwbd.pid = rc->uwbd.task->pid; |
313 | } | ||
311 | } | 314 | } |
312 | 315 | ||
313 | /* Stop the UWB daemon and free any unprocessed events */ | 316 | /* Stop the UWB daemon and free any unprocessed events */ |
314 | void uwbd_stop(struct uwb_rc *rc) | 317 | void uwbd_stop(struct uwb_rc *rc) |
315 | { | 318 | { |
316 | kthread_stop(rc->uwbd.task); | 319 | if (rc->uwbd.task) |
320 | kthread_stop(rc->uwbd.task); | ||
317 | uwbd_flush(rc); | 321 | uwbd_flush(rc); |
318 | } | 322 | } |
319 | 323 | ||
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index 5fbfd9cfb6d6..5b3d57fc82d3 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) | |||
169 | static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) | 169 | static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) |
170 | { | 170 | { |
171 | struct pci_bar_info *bar = data; | 171 | struct pci_bar_info *bar = data; |
172 | unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4; | ||
173 | const struct resource *res = dev->resource; | ||
174 | u32 mask; | ||
172 | 175 | ||
173 | if (unlikely(!bar)) { | 176 | if (unlikely(!bar)) { |
174 | pr_warn(DRV_NAME ": driver data not found for %s\n", | 177 | pr_warn(DRV_NAME ": driver data not found for %s\n", |
@@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) | |||
179 | /* A write to obtain the length must happen as a 32-bit write. | 182 | /* A write to obtain the length must happen as a 32-bit write. |
180 | * This does not (yet) support writing individual bytes | 183 | * This does not (yet) support writing individual bytes |
181 | */ | 184 | */ |
182 | if (value == ~0) | 185 | if (res[pos].flags & IORESOURCE_IO) |
186 | mask = ~PCI_BASE_ADDRESS_IO_MASK; | ||
187 | else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) | ||
188 | mask = 0; | ||
189 | else | ||
190 | mask = ~PCI_BASE_ADDRESS_MEM_MASK; | ||
191 | if ((value | mask) == ~0U) | ||
183 | bar->which = 1; | 192 | bar->which = 1; |
184 | else { | 193 | else { |
185 | u32 tmpval; | 194 | u32 tmpval; |
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 82a8866758ee..a1c17000129b 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev, | |||
519 | return err; | 519 | return err; |
520 | } | 520 | } |
521 | 521 | ||
522 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | ||
523 | grant_ref_t *gnt_refs, | ||
524 | unsigned int nr_grefs, | ||
525 | void **vaddr) | ||
526 | { | ||
527 | struct xenbus_map_node *node; | ||
528 | struct vm_struct *area; | ||
529 | pte_t *ptes[XENBUS_MAX_RING_GRANTS]; | ||
530 | phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; | ||
531 | int err = GNTST_okay; | ||
532 | int i; | ||
533 | bool leaked; | ||
534 | |||
535 | *vaddr = NULL; | ||
536 | |||
537 | if (nr_grefs > XENBUS_MAX_RING_GRANTS) | ||
538 | return -EINVAL; | ||
539 | |||
540 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
541 | if (!node) | ||
542 | return -ENOMEM; | ||
543 | |||
544 | area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); | ||
545 | if (!area) { | ||
546 | kfree(node); | ||
547 | return -ENOMEM; | ||
548 | } | ||
549 | |||
550 | for (i = 0; i < nr_grefs; i++) | ||
551 | phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; | ||
552 | |||
553 | err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, | ||
554 | phys_addrs, | ||
555 | GNTMAP_host_map | GNTMAP_contains_pte, | ||
556 | &leaked); | ||
557 | if (err) | ||
558 | goto failed; | ||
559 | |||
560 | node->nr_handles = nr_grefs; | ||
561 | node->pv.area = area; | ||
562 | |||
563 | spin_lock(&xenbus_valloc_lock); | ||
564 | list_add(&node->next, &xenbus_valloc_pages); | ||
565 | spin_unlock(&xenbus_valloc_lock); | ||
566 | |||
567 | *vaddr = area->addr; | ||
568 | return 0; | ||
569 | |||
570 | failed: | ||
571 | if (!leaked) | ||
572 | free_vm_area(area); | ||
573 | else | ||
574 | pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); | ||
575 | |||
576 | kfree(node); | ||
577 | return err; | ||
578 | } | ||
579 | |||
580 | struct map_ring_valloc_hvm | 522 | struct map_ring_valloc_hvm |
581 | { | 523 | { |
582 | unsigned int idx; | 524 | unsigned int idx; |
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | |||
725 | } | 667 | } |
726 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | 668 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); |
727 | 669 | ||
670 | #ifdef CONFIG_XEN_PV | ||
671 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | ||
672 | grant_ref_t *gnt_refs, | ||
673 | unsigned int nr_grefs, | ||
674 | void **vaddr) | ||
675 | { | ||
676 | struct xenbus_map_node *node; | ||
677 | struct vm_struct *area; | ||
678 | pte_t *ptes[XENBUS_MAX_RING_GRANTS]; | ||
679 | phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; | ||
680 | int err = GNTST_okay; | ||
681 | int i; | ||
682 | bool leaked; | ||
683 | |||
684 | *vaddr = NULL; | ||
685 | |||
686 | if (nr_grefs > XENBUS_MAX_RING_GRANTS) | ||
687 | return -EINVAL; | ||
688 | |||
689 | node = kzalloc(sizeof(*node), GFP_KERNEL); | ||
690 | if (!node) | ||
691 | return -ENOMEM; | ||
692 | |||
693 | area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); | ||
694 | if (!area) { | ||
695 | kfree(node); | ||
696 | return -ENOMEM; | ||
697 | } | ||
698 | |||
699 | for (i = 0; i < nr_grefs; i++) | ||
700 | phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; | ||
701 | |||
702 | err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, | ||
703 | phys_addrs, | ||
704 | GNTMAP_host_map | GNTMAP_contains_pte, | ||
705 | &leaked); | ||
706 | if (err) | ||
707 | goto failed; | ||
708 | |||
709 | node->nr_handles = nr_grefs; | ||
710 | node->pv.area = area; | ||
711 | |||
712 | spin_lock(&xenbus_valloc_lock); | ||
713 | list_add(&node->next, &xenbus_valloc_pages); | ||
714 | spin_unlock(&xenbus_valloc_lock); | ||
715 | |||
716 | *vaddr = area->addr; | ||
717 | return 0; | ||
718 | |||
719 | failed: | ||
720 | if (!leaked) | ||
721 | free_vm_area(area); | ||
722 | else | ||
723 | pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); | ||
724 | |||
725 | kfree(node); | ||
726 | return err; | ||
727 | } | ||
728 | |||
728 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | 729 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) |
729 | { | 730 | { |
730 | struct xenbus_map_node *node; | 731 | struct xenbus_map_node *node; |
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | |||
788 | return err; | 789 | return err; |
789 | } | 790 | } |
790 | 791 | ||
792 | static const struct xenbus_ring_ops ring_ops_pv = { | ||
793 | .map = xenbus_map_ring_valloc_pv, | ||
794 | .unmap = xenbus_unmap_ring_vfree_pv, | ||
795 | }; | ||
796 | #endif | ||
797 | |||
791 | struct unmap_ring_vfree_hvm | 798 | struct unmap_ring_vfree_hvm |
792 | { | 799 | { |
793 | unsigned int idx; | 800 | unsigned int idx; |
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path) | |||
916 | } | 923 | } |
917 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); | 924 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); |
918 | 925 | ||
919 | static const struct xenbus_ring_ops ring_ops_pv = { | ||
920 | .map = xenbus_map_ring_valloc_pv, | ||
921 | .unmap = xenbus_unmap_ring_vfree_pv, | ||
922 | }; | ||
923 | |||
924 | static const struct xenbus_ring_ops ring_ops_hvm = { | 926 | static const struct xenbus_ring_ops ring_ops_hvm = { |
925 | .map = xenbus_map_ring_valloc_hvm, | 927 | .map = xenbus_map_ring_valloc_hvm, |
926 | .unmap = xenbus_unmap_ring_vfree_hvm, | 928 | .unmap = xenbus_unmap_ring_vfree_hvm, |
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = { | |||
928 | 930 | ||
929 | void __init xenbus_ring_ops_init(void) | 931 | void __init xenbus_ring_ops_init(void) |
930 | { | 932 | { |
933 | #ifdef CONFIG_XEN_PV | ||
931 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | 934 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
932 | ring_ops = &ring_ops_pv; | 935 | ring_ops = &ring_ops_pv; |
933 | else | 936 | else |
937 | #endif | ||
934 | ring_ops = &ring_ops_hvm; | 938 | ring_ops = &ring_ops_hvm; |
935 | } | 939 | } |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index ce7181ea60fa..2a46762def31 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
@@ -54,7 +54,7 @@ typedef struct { | |||
54 | int size; /* size of magic/mask */ | 54 | int size; /* size of magic/mask */ |
55 | char *magic; /* magic or filename extension */ | 55 | char *magic; /* magic or filename extension */ |
56 | char *mask; /* mask, NULL for exact match */ | 56 | char *mask; /* mask, NULL for exact match */ |
57 | char *interpreter; /* filename of interpreter */ | 57 | const char *interpreter; /* filename of interpreter */ |
58 | char *name; | 58 | char *name; |
59 | struct dentry *dentry; | 59 | struct dentry *dentry; |
60 | struct file *interp_file; | 60 | struct file *interp_file; |
@@ -131,27 +131,26 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
131 | { | 131 | { |
132 | Node *fmt; | 132 | Node *fmt; |
133 | struct file *interp_file = NULL; | 133 | struct file *interp_file = NULL; |
134 | char iname[BINPRM_BUF_SIZE]; | ||
135 | const char *iname_addr = iname; | ||
136 | int retval; | 134 | int retval; |
137 | int fd_binary = -1; | 135 | int fd_binary = -1; |
138 | 136 | ||
139 | retval = -ENOEXEC; | 137 | retval = -ENOEXEC; |
140 | if (!enabled) | 138 | if (!enabled) |
141 | goto ret; | 139 | return retval; |
142 | 140 | ||
143 | /* to keep locking time low, we copy the interpreter string */ | 141 | /* to keep locking time low, we copy the interpreter string */ |
144 | read_lock(&entries_lock); | 142 | read_lock(&entries_lock); |
145 | fmt = check_file(bprm); | 143 | fmt = check_file(bprm); |
146 | if (fmt) | 144 | if (fmt) |
147 | strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE); | 145 | dget(fmt->dentry); |
148 | read_unlock(&entries_lock); | 146 | read_unlock(&entries_lock); |
149 | if (!fmt) | 147 | if (!fmt) |
150 | goto ret; | 148 | return retval; |
151 | 149 | ||
152 | /* Need to be able to load the file after exec */ | 150 | /* Need to be able to load the file after exec */ |
151 | retval = -ENOENT; | ||
153 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) | 152 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) |
154 | return -ENOENT; | 153 | goto ret; |
155 | 154 | ||
156 | if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { | 155 | if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { |
157 | retval = remove_arg_zero(bprm); | 156 | retval = remove_arg_zero(bprm); |
@@ -195,22 +194,22 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
195 | bprm->argc++; | 194 | bprm->argc++; |
196 | 195 | ||
197 | /* add the interp as argv[0] */ | 196 | /* add the interp as argv[0] */ |
198 | retval = copy_strings_kernel(1, &iname_addr, bprm); | 197 | retval = copy_strings_kernel(1, &fmt->interpreter, bprm); |
199 | if (retval < 0) | 198 | if (retval < 0) |
200 | goto error; | 199 | goto error; |
201 | bprm->argc++; | 200 | bprm->argc++; |
202 | 201 | ||
203 | /* Update interp in case binfmt_script needs it. */ | 202 | /* Update interp in case binfmt_script needs it. */ |
204 | retval = bprm_change_interp(iname, bprm); | 203 | retval = bprm_change_interp(fmt->interpreter, bprm); |
205 | if (retval < 0) | 204 | if (retval < 0) |
206 | goto error; | 205 | goto error; |
207 | 206 | ||
208 | if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) { | 207 | if (fmt->flags & MISC_FMT_OPEN_FILE) { |
209 | interp_file = filp_clone_open(fmt->interp_file); | 208 | interp_file = filp_clone_open(fmt->interp_file); |
210 | if (!IS_ERR(interp_file)) | 209 | if (!IS_ERR(interp_file)) |
211 | deny_write_access(interp_file); | 210 | deny_write_access(interp_file); |
212 | } else { | 211 | } else { |
213 | interp_file = open_exec(iname); | 212 | interp_file = open_exec(fmt->interpreter); |
214 | } | 213 | } |
215 | retval = PTR_ERR(interp_file); | 214 | retval = PTR_ERR(interp_file); |
216 | if (IS_ERR(interp_file)) | 215 | if (IS_ERR(interp_file)) |
@@ -238,6 +237,7 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
238 | goto error; | 237 | goto error; |
239 | 238 | ||
240 | ret: | 239 | ret: |
240 | dput(fmt->dentry); | ||
241 | return retval; | 241 | return retval; |
242 | error: | 242 | error: |
243 | if (fd_binary > 0) | 243 | if (fd_binary > 0) |
@@ -594,8 +594,13 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode) | |||
594 | 594 | ||
595 | static void bm_evict_inode(struct inode *inode) | 595 | static void bm_evict_inode(struct inode *inode) |
596 | { | 596 | { |
597 | Node *e = inode->i_private; | ||
598 | |||
599 | if (e->flags & MISC_FMT_OPEN_FILE) | ||
600 | filp_close(e->interp_file, NULL); | ||
601 | |||
597 | clear_inode(inode); | 602 | clear_inode(inode); |
598 | kfree(inode->i_private); | 603 | kfree(e); |
599 | } | 604 | } |
600 | 605 | ||
601 | static void kill_node(Node *e) | 606 | static void kill_node(Node *e) |
@@ -603,24 +608,14 @@ static void kill_node(Node *e) | |||
603 | struct dentry *dentry; | 608 | struct dentry *dentry; |
604 | 609 | ||
605 | write_lock(&entries_lock); | 610 | write_lock(&entries_lock); |
606 | dentry = e->dentry; | 611 | list_del_init(&e->list); |
607 | if (dentry) { | ||
608 | list_del_init(&e->list); | ||
609 | e->dentry = NULL; | ||
610 | } | ||
611 | write_unlock(&entries_lock); | 612 | write_unlock(&entries_lock); |
612 | 613 | ||
613 | if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) { | 614 | dentry = e->dentry; |
614 | filp_close(e->interp_file, NULL); | 615 | drop_nlink(d_inode(dentry)); |
615 | e->interp_file = NULL; | 616 | d_drop(dentry); |
616 | } | 617 | dput(dentry); |
617 | 618 | simple_release_fs(&bm_mnt, &entry_count); | |
618 | if (dentry) { | ||
619 | drop_nlink(d_inode(dentry)); | ||
620 | d_drop(dentry); | ||
621 | dput(dentry); | ||
622 | simple_release_fs(&bm_mnt, &entry_count); | ||
623 | } | ||
624 | } | 619 | } |
625 | 620 | ||
626 | /* /<entry> */ | 621 | /* /<entry> */ |
@@ -665,7 +660,8 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, | |||
665 | root = file_inode(file)->i_sb->s_root; | 660 | root = file_inode(file)->i_sb->s_root; |
666 | inode_lock(d_inode(root)); | 661 | inode_lock(d_inode(root)); |
667 | 662 | ||
668 | kill_node(e); | 663 | if (!list_empty(&e->list)) |
664 | kill_node(e); | ||
669 | 665 | ||
670 | inode_unlock(d_inode(root)); | 666 | inode_unlock(d_inode(root)); |
671 | break; | 667 | break; |
@@ -794,7 +790,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, | |||
794 | inode_lock(d_inode(root)); | 790 | inode_lock(d_inode(root)); |
795 | 791 | ||
796 | while (!list_empty(&entries)) | 792 | while (!list_empty(&entries)) |
797 | kill_node(list_entry(entries.next, Node, list)); | 793 | kill_node(list_first_entry(&entries, Node, list)); |
798 | 794 | ||
799 | inode_unlock(d_inode(root)); | 795 | inode_unlock(d_inode(root)); |
800 | break; | 796 | break; |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index afdf4e3cafc2..7cde3f46ad26 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -19,7 +19,6 @@ static int load_script(struct linux_binprm *bprm) | |||
19 | const char *i_arg, *i_name; | 19 | const char *i_arg, *i_name; |
20 | char *cp; | 20 | char *cp; |
21 | struct file *file; | 21 | struct file *file; |
22 | char interp[BINPRM_BUF_SIZE]; | ||
23 | int retval; | 22 | int retval; |
24 | 23 | ||
25 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) | 24 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) |
@@ -55,7 +54,7 @@ static int load_script(struct linux_binprm *bprm) | |||
55 | break; | 54 | break; |
56 | } | 55 | } |
57 | for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); | 56 | for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); |
58 | if (*cp == '\0') | 57 | if (*cp == '\0') |
59 | return -ENOEXEC; /* No interpreter name found */ | 58 | return -ENOEXEC; /* No interpreter name found */ |
60 | i_name = cp; | 59 | i_name = cp; |
61 | i_arg = NULL; | 60 | i_arg = NULL; |
@@ -65,7 +64,6 @@ static int load_script(struct linux_binprm *bprm) | |||
65 | *cp++ = '\0'; | 64 | *cp++ = '\0'; |
66 | if (*cp) | 65 | if (*cp) |
67 | i_arg = cp; | 66 | i_arg = cp; |
68 | strcpy (interp, i_name); | ||
69 | /* | 67 | /* |
70 | * OK, we've parsed out the interpreter name and | 68 | * OK, we've parsed out the interpreter name and |
71 | * (optional) argument. | 69 | * (optional) argument. |
@@ -80,24 +78,27 @@ static int load_script(struct linux_binprm *bprm) | |||
80 | if (retval) | 78 | if (retval) |
81 | return retval; | 79 | return retval; |
82 | retval = copy_strings_kernel(1, &bprm->interp, bprm); | 80 | retval = copy_strings_kernel(1, &bprm->interp, bprm); |
83 | if (retval < 0) return retval; | 81 | if (retval < 0) |
82 | return retval; | ||
84 | bprm->argc++; | 83 | bprm->argc++; |
85 | if (i_arg) { | 84 | if (i_arg) { |
86 | retval = copy_strings_kernel(1, &i_arg, bprm); | 85 | retval = copy_strings_kernel(1, &i_arg, bprm); |
87 | if (retval < 0) return retval; | 86 | if (retval < 0) |
87 | return retval; | ||
88 | bprm->argc++; | 88 | bprm->argc++; |
89 | } | 89 | } |
90 | retval = copy_strings_kernel(1, &i_name, bprm); | 90 | retval = copy_strings_kernel(1, &i_name, bprm); |
91 | if (retval) return retval; | 91 | if (retval) |
92 | return retval; | ||
92 | bprm->argc++; | 93 | bprm->argc++; |
93 | retval = bprm_change_interp(interp, bprm); | 94 | retval = bprm_change_interp(i_name, bprm); |
94 | if (retval < 0) | 95 | if (retval < 0) |
95 | return retval; | 96 | return retval; |
96 | 97 | ||
97 | /* | 98 | /* |
98 | * OK, now restart the process with the interpreter's dentry. | 99 | * OK, now restart the process with the interpreter's dentry. |
99 | */ | 100 | */ |
100 | file = open_exec(interp); | 101 | file = open_exec(i_name); |
101 | if (IS_ERR(file)) | 102 | if (IS_ERR(file)) |
102 | return PTR_ERR(file); | 103 | return PTR_ERR(file); |
103 | 104 | ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b51d23f5cafa..280384bf34f1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -107,7 +107,8 @@ static void end_compressed_bio_read(struct bio *bio) | |||
107 | struct inode *inode; | 107 | struct inode *inode; |
108 | struct page *page; | 108 | struct page *page; |
109 | unsigned long index; | 109 | unsigned long index; |
110 | int ret; | 110 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
111 | int ret = 0; | ||
111 | 112 | ||
112 | if (bio->bi_status) | 113 | if (bio->bi_status) |
113 | cb->errors = 1; | 114 | cb->errors = 1; |
@@ -118,6 +119,21 @@ static void end_compressed_bio_read(struct bio *bio) | |||
118 | if (!refcount_dec_and_test(&cb->pending_bios)) | 119 | if (!refcount_dec_and_test(&cb->pending_bios)) |
119 | goto out; | 120 | goto out; |
120 | 121 | ||
122 | /* | ||
123 | * Record the correct mirror_num in cb->orig_bio so that | ||
124 | * read-repair can work properly. | ||
125 | */ | ||
126 | ASSERT(btrfs_io_bio(cb->orig_bio)); | ||
127 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; | ||
128 | cb->mirror_num = mirror; | ||
129 | |||
130 | /* | ||
131 | * Some IO in this cb have failed, just skip checksum as there | ||
132 | * is no way it could be correct. | ||
133 | */ | ||
134 | if (cb->errors == 1) | ||
135 | goto csum_failed; | ||
136 | |||
121 | inode = cb->inode; | 137 | inode = cb->inode; |
122 | ret = check_compressed_csum(BTRFS_I(inode), cb, | 138 | ret = check_compressed_csum(BTRFS_I(inode), cb, |
123 | (u64)bio->bi_iter.bi_sector << 9); | 139 | (u64)bio->bi_iter.bi_sector << 9); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5a8933da39a7..8fc690384c58 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -709,7 +709,6 @@ struct btrfs_delayed_root; | |||
709 | #define BTRFS_FS_OPEN 5 | 709 | #define BTRFS_FS_OPEN 5 |
710 | #define BTRFS_FS_QUOTA_ENABLED 6 | 710 | #define BTRFS_FS_QUOTA_ENABLED 6 |
711 | #define BTRFS_FS_QUOTA_ENABLING 7 | 711 | #define BTRFS_FS_QUOTA_ENABLING 7 |
712 | #define BTRFS_FS_QUOTA_DISABLING 8 | ||
713 | #define BTRFS_FS_UPDATE_UUID_TREE_GEN 9 | 712 | #define BTRFS_FS_UPDATE_UUID_TREE_GEN 9 |
714 | #define BTRFS_FS_CREATING_FREE_SPACE_TREE 10 | 713 | #define BTRFS_FS_CREATING_FREE_SPACE_TREE 10 |
715 | #define BTRFS_FS_BTREE_ERR 11 | 714 | #define BTRFS_FS_BTREE_ERR 11 |
@@ -723,7 +722,7 @@ struct btrfs_delayed_root; | |||
723 | * Indicate that a whole-filesystem exclusive operation is running | 722 | * Indicate that a whole-filesystem exclusive operation is running |
724 | * (device replace, resize, device add/delete, balance) | 723 | * (device replace, resize, device add/delete, balance) |
725 | */ | 724 | */ |
726 | #define BTRFS_FS_EXCL_OP 14 | 725 | #define BTRFS_FS_EXCL_OP 16 |
727 | 726 | ||
728 | struct btrfs_fs_info { | 727 | struct btrfs_fs_info { |
729 | u8 fsid[BTRFS_FSID_SIZE]; | 728 | u8 fsid[BTRFS_FSID_SIZE]; |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 487bbe4fb3c6..dfdab849037b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -3643,7 +3643,14 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) | |||
3643 | u64 flags; | 3643 | u64 flags; |
3644 | 3644 | ||
3645 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); | 3645 | do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); |
3646 | backup_super_roots(fs_info); | 3646 | |
3647 | /* | ||
3648 | * max_mirrors == 0 indicates we're from commit_transaction, | ||
3649 | * not from fsync where the tree roots in fs_info have not | ||
3650 | * been consistent on disk. | ||
3651 | */ | ||
3652 | if (max_mirrors == 0) | ||
3653 | backup_super_roots(fs_info); | ||
3647 | 3654 | ||
3648 | sb = fs_info->super_for_commit; | 3655 | sb = fs_info->super_for_commit; |
3649 | dev_item = &sb->dev_item; | 3656 | dev_item = &sb->dev_item; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3e5bb0cdd3cd..970190cd347e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, | |||
2801 | } | 2801 | } |
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | bio = btrfs_bio_alloc(bdev, sector << 9); | 2804 | bio = btrfs_bio_alloc(bdev, (u64)sector << 9); |
2805 | bio_add_page(bio, page, page_size, offset); | 2805 | bio_add_page(bio, page, page_size, offset); |
2806 | bio->bi_end_io = end_io_func; | 2806 | bio->bi_end_io = end_io_func; |
2807 | bio->bi_private = tree; | 2807 | bio->bi_private = tree; |
@@ -3471,8 +3471,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3471 | unsigned int write_flags = 0; | 3471 | unsigned int write_flags = 0; |
3472 | unsigned long nr_written = 0; | 3472 | unsigned long nr_written = 0; |
3473 | 3473 | ||
3474 | if (wbc->sync_mode == WB_SYNC_ALL) | 3474 | write_flags = wbc_to_write_flags(wbc); |
3475 | write_flags = REQ_SYNC; | ||
3476 | 3475 | ||
3477 | trace___extent_writepage(page, inode, wbc); | 3476 | trace___extent_writepage(page, inode, wbc); |
3478 | 3477 | ||
@@ -3718,7 +3717,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3718 | unsigned long i, num_pages; | 3717 | unsigned long i, num_pages; |
3719 | unsigned long bio_flags = 0; | 3718 | unsigned long bio_flags = 0; |
3720 | unsigned long start, end; | 3719 | unsigned long start, end; |
3721 | unsigned int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META; | 3720 | unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META; |
3722 | int ret = 0; | 3721 | int ret = 0; |
3723 | 3722 | ||
3724 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); | 3723 | clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); |
@@ -4063,9 +4062,6 @@ static void flush_epd_write_bio(struct extent_page_data *epd) | |||
4063 | if (epd->bio) { | 4062 | if (epd->bio) { |
4064 | int ret; | 4063 | int ret; |
4065 | 4064 | ||
4066 | bio_set_op_attrs(epd->bio, REQ_OP_WRITE, | ||
4067 | epd->sync_io ? REQ_SYNC : 0); | ||
4068 | |||
4069 | ret = submit_one_bio(epd->bio, 0, epd->bio_flags); | 4065 | ret = submit_one_bio(epd->bio, 0, epd->bio_flags); |
4070 | BUG_ON(ret < 0); /* -ENOMEM */ | 4066 | BUG_ON(ret < 0); /* -ENOMEM */ |
4071 | epd->bio = NULL; | 4067 | epd->bio = NULL; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 128f3e58634f..d94e3f68b9b1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode, | |||
135 | const u64 offset, | 135 | const u64 offset, |
136 | const u64 bytes) | 136 | const u64 bytes) |
137 | { | 137 | { |
138 | unsigned long index = offset >> PAGE_SHIFT; | ||
139 | unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; | ||
140 | struct page *page; | ||
141 | |||
142 | while (index <= end_index) { | ||
143 | page = find_get_page(inode->i_mapping, index); | ||
144 | index++; | ||
145 | if (!page) | ||
146 | continue; | ||
147 | ClearPagePrivate2(page); | ||
148 | put_page(page); | ||
149 | } | ||
138 | return __endio_write_update_ordered(inode, offset + PAGE_SIZE, | 150 | return __endio_write_update_ordered(inode, offset + PAGE_SIZE, |
139 | bytes - PAGE_SIZE, false); | 151 | bytes - PAGE_SIZE, false); |
140 | } | 152 | } |
@@ -8357,11 +8369,8 @@ static void btrfs_endio_direct_read(struct bio *bio) | |||
8357 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); | 8369 | struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); |
8358 | blk_status_t err = bio->bi_status; | 8370 | blk_status_t err = bio->bi_status; |
8359 | 8371 | ||
8360 | if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) { | 8372 | if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) |
8361 | err = btrfs_subio_endio_read(inode, io_bio, err); | 8373 | err = btrfs_subio_endio_read(inode, io_bio, err); |
8362 | if (!err) | ||
8363 | bio->bi_status = 0; | ||
8364 | } | ||
8365 | 8374 | ||
8366 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, | 8375 | unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, |
8367 | dip->logical_offset + dip->bytes - 1); | 8376 | dip->logical_offset + dip->bytes - 1); |
@@ -8369,7 +8378,7 @@ static void btrfs_endio_direct_read(struct bio *bio) | |||
8369 | 8378 | ||
8370 | kfree(dip); | 8379 | kfree(dip); |
8371 | 8380 | ||
8372 | dio_bio->bi_status = bio->bi_status; | 8381 | dio_bio->bi_status = err; |
8373 | dio_end_io(dio_bio); | 8382 | dio_end_io(dio_bio); |
8374 | 8383 | ||
8375 | if (io_bio->end_io) | 8384 | if (io_bio->end_io) |
@@ -8387,6 +8396,7 @@ static void __endio_write_update_ordered(struct inode *inode, | |||
8387 | btrfs_work_func_t func; | 8396 | btrfs_work_func_t func; |
8388 | u64 ordered_offset = offset; | 8397 | u64 ordered_offset = offset; |
8389 | u64 ordered_bytes = bytes; | 8398 | u64 ordered_bytes = bytes; |
8399 | u64 last_offset; | ||
8390 | int ret; | 8400 | int ret; |
8391 | 8401 | ||
8392 | if (btrfs_is_free_space_inode(BTRFS_I(inode))) { | 8402 | if (btrfs_is_free_space_inode(BTRFS_I(inode))) { |
@@ -8398,6 +8408,7 @@ static void __endio_write_update_ordered(struct inode *inode, | |||
8398 | } | 8408 | } |
8399 | 8409 | ||
8400 | again: | 8410 | again: |
8411 | last_offset = ordered_offset; | ||
8401 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, | 8412 | ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, |
8402 | &ordered_offset, | 8413 | &ordered_offset, |
8403 | ordered_bytes, | 8414 | ordered_bytes, |
@@ -8409,6 +8420,12 @@ again: | |||
8409 | btrfs_queue_work(wq, &ordered->work); | 8420 | btrfs_queue_work(wq, &ordered->work); |
8410 | out_test: | 8421 | out_test: |
8411 | /* | 8422 | /* |
8423 | * If btrfs_dec_test_ordered_pending does not find any ordered extent | ||
8424 | * in the range, we can exit. | ||
8425 | */ | ||
8426 | if (ordered_offset == last_offset) | ||
8427 | return; | ||
8428 | /* | ||
8412 | * our bio might span multiple ordered extents. If we haven't | 8429 | * our bio might span multiple ordered extents. If we haven't |
8413 | * completed the accounting for the whole dio, go back and try again | 8430 | * completed the accounting for the whole dio, go back and try again |
8414 | */ | 8431 | */ |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d6715c2bcdc4..6c7a49faf4e0 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2773,9 +2773,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, | |||
2773 | } | 2773 | } |
2774 | mutex_unlock(&fs_devices->device_list_mutex); | 2774 | mutex_unlock(&fs_devices->device_list_mutex); |
2775 | 2775 | ||
2776 | fi_args->nodesize = fs_info->super_copy->nodesize; | 2776 | fi_args->nodesize = fs_info->nodesize; |
2777 | fi_args->sectorsize = fs_info->super_copy->sectorsize; | 2777 | fi_args->sectorsize = fs_info->sectorsize; |
2778 | fi_args->clone_alignment = fs_info->super_copy->sectorsize; | 2778 | fi_args->clone_alignment = fs_info->sectorsize; |
2779 | 2779 | ||
2780 | if (copy_to_user(arg, fi_args, sizeof(*fi_args))) | 2780 | if (copy_to_user(arg, fi_args, sizeof(*fi_args))) |
2781 | ret = -EFAULT; | 2781 | ret = -EFAULT; |
@@ -3032,7 +3032,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, | |||
3032 | out: | 3032 | out: |
3033 | if (ret) | 3033 | if (ret) |
3034 | btrfs_cmp_data_free(cmp); | 3034 | btrfs_cmp_data_free(cmp); |
3035 | return 0; | 3035 | return ret; |
3036 | } | 3036 | } |
3037 | 3037 | ||
3038 | static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp) | 3038 | static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp) |
@@ -4061,6 +4061,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | |||
4061 | ret = PTR_ERR(new_root); | 4061 | ret = PTR_ERR(new_root); |
4062 | goto out; | 4062 | goto out; |
4063 | } | 4063 | } |
4064 | if (!is_fstree(new_root->objectid)) { | ||
4065 | ret = -ENOENT; | ||
4066 | goto out; | ||
4067 | } | ||
4064 | 4068 | ||
4065 | path = btrfs_alloc_path(); | 4069 | path = btrfs_alloc_path(); |
4066 | if (!path) { | 4070 | if (!path) { |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 5c8b61c86e61..e172d4843eae 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -807,7 +807,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, | |||
807 | } | 807 | } |
808 | ret = 0; | 808 | ret = 0; |
809 | out: | 809 | out: |
810 | set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags); | ||
811 | btrfs_free_path(path); | 810 | btrfs_free_path(path); |
812 | return ret; | 811 | return ret; |
813 | } | 812 | } |
@@ -953,7 +952,6 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, | |||
953 | if (!fs_info->quota_root) | 952 | if (!fs_info->quota_root) |
954 | goto out; | 953 | goto out; |
955 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | 954 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
956 | set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags); | ||
957 | btrfs_qgroup_wait_for_completion(fs_info, false); | 955 | btrfs_qgroup_wait_for_completion(fs_info, false); |
958 | spin_lock(&fs_info->qgroup_lock); | 956 | spin_lock(&fs_info->qgroup_lock); |
959 | quota_root = fs_info->quota_root; | 957 | quota_root = fs_info->quota_root; |
@@ -1307,6 +1305,8 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, | |||
1307 | } | 1305 | } |
1308 | } | 1306 | } |
1309 | ret = del_qgroup_item(trans, quota_root, qgroupid); | 1307 | ret = del_qgroup_item(trans, quota_root, qgroupid); |
1308 | if (ret && ret != -ENOENT) | ||
1309 | goto out; | ||
1310 | 1310 | ||
1311 | while (!list_empty(&qgroup->groups)) { | 1311 | while (!list_empty(&qgroup->groups)) { |
1312 | list = list_first_entry(&qgroup->groups, | 1312 | list = list_first_entry(&qgroup->groups, |
@@ -2086,8 +2086,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans, | |||
2086 | 2086 | ||
2087 | if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) | 2087 | if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) |
2088 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | 2088 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
2089 | if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags)) | ||
2090 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); | ||
2091 | 2089 | ||
2092 | spin_lock(&fs_info->qgroup_lock); | 2090 | spin_lock(&fs_info->qgroup_lock); |
2093 | while (!list_empty(&fs_info->dirty_qgroups)) { | 2091 | while (!list_empty(&fs_info->dirty_qgroups)) { |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 3a49a3c2fca4..9841faef08ea 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -2400,11 +2400,11 @@ void free_reloc_roots(struct list_head *list) | |||
2400 | while (!list_empty(list)) { | 2400 | while (!list_empty(list)) { |
2401 | reloc_root = list_entry(list->next, struct btrfs_root, | 2401 | reloc_root = list_entry(list->next, struct btrfs_root, |
2402 | root_list); | 2402 | root_list); |
2403 | __del_reloc_root(reloc_root); | ||
2403 | free_extent_buffer(reloc_root->node); | 2404 | free_extent_buffer(reloc_root->node); |
2404 | free_extent_buffer(reloc_root->commit_root); | 2405 | free_extent_buffer(reloc_root->commit_root); |
2405 | reloc_root->node = NULL; | 2406 | reloc_root->node = NULL; |
2406 | reloc_root->commit_root = NULL; | 2407 | reloc_root->commit_root = NULL; |
2407 | __del_reloc_root(reloc_root); | ||
2408 | } | 2408 | } |
2409 | } | 2409 | } |
2410 | 2410 | ||
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 32b043ef8ac9..8fd195cfe81b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -2630,7 +2630,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino) | |||
2630 | } else { | 2630 | } else { |
2631 | btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", | 2631 | btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", |
2632 | (int)(mode & S_IFMT)); | 2632 | (int)(mode & S_IFMT)); |
2633 | ret = -ENOTSUPP; | 2633 | ret = -EOPNOTSUPP; |
2634 | goto out; | 2634 | goto out; |
2635 | } | 2635 | } |
2636 | 2636 | ||
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index ad7f4bab640b..c800d067fcbf 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -4181,6 +4181,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
4181 | struct extent_map *em, *n; | 4181 | struct extent_map *em, *n; |
4182 | struct list_head extents; | 4182 | struct list_head extents; |
4183 | struct extent_map_tree *tree = &inode->extent_tree; | 4183 | struct extent_map_tree *tree = &inode->extent_tree; |
4184 | u64 logged_start, logged_end; | ||
4184 | u64 test_gen; | 4185 | u64 test_gen; |
4185 | int ret = 0; | 4186 | int ret = 0; |
4186 | int num = 0; | 4187 | int num = 0; |
@@ -4190,10 +4191,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
4190 | down_write(&inode->dio_sem); | 4191 | down_write(&inode->dio_sem); |
4191 | write_lock(&tree->lock); | 4192 | write_lock(&tree->lock); |
4192 | test_gen = root->fs_info->last_trans_committed; | 4193 | test_gen = root->fs_info->last_trans_committed; |
4194 | logged_start = start; | ||
4195 | logged_end = end; | ||
4193 | 4196 | ||
4194 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) { | 4197 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) { |
4195 | list_del_init(&em->list); | 4198 | list_del_init(&em->list); |
4196 | |||
4197 | /* | 4199 | /* |
4198 | * Just an arbitrary number, this can be really CPU intensive | 4200 | * Just an arbitrary number, this can be really CPU intensive |
4199 | * once we start getting a lot of extents, and really once we | 4201 | * once we start getting a lot of extents, and really once we |
@@ -4208,6 +4210,12 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
4208 | 4210 | ||
4209 | if (em->generation <= test_gen) | 4211 | if (em->generation <= test_gen) |
4210 | continue; | 4212 | continue; |
4213 | |||
4214 | if (em->start < logged_start) | ||
4215 | logged_start = em->start; | ||
4216 | if ((em->start + em->len - 1) > logged_end) | ||
4217 | logged_end = em->start + em->len - 1; | ||
4218 | |||
4211 | /* Need a ref to keep it from getting evicted from cache */ | 4219 | /* Need a ref to keep it from getting evicted from cache */ |
4212 | refcount_inc(&em->refs); | 4220 | refcount_inc(&em->refs); |
4213 | set_bit(EXTENT_FLAG_LOGGING, &em->flags); | 4221 | set_bit(EXTENT_FLAG_LOGGING, &em->flags); |
@@ -4216,7 +4224,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
4216 | } | 4224 | } |
4217 | 4225 | ||
4218 | list_sort(NULL, &extents, extent_cmp); | 4226 | list_sort(NULL, &extents, extent_cmp); |
4219 | btrfs_get_logged_extents(inode, logged_list, start, end); | 4227 | btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); |
4220 | /* | 4228 | /* |
4221 | * Some ordered extents started by fsync might have completed | 4229 | * Some ordered extents started by fsync might have completed |
4222 | * before we could collect them into the list logged_list, which | 4230 | * before we could collect them into the list logged_list, which |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0e8f16c305df..b39737568c22 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -6166,7 +6166,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, | |||
6166 | map_length = length; | 6166 | map_length = length; |
6167 | 6167 | ||
6168 | btrfs_bio_counter_inc_blocked(fs_info); | 6168 | btrfs_bio_counter_inc_blocked(fs_info); |
6169 | ret = __btrfs_map_block(fs_info, bio_op(bio), logical, | 6169 | ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, |
6170 | &map_length, &bbio, mirror_num, 1); | 6170 | &map_length, &bbio, mirror_num, 1); |
6171 | if (ret) { | 6171 | if (ret) { |
6172 | btrfs_bio_counter_dec(fs_info); | 6172 | btrfs_bio_counter_dec(fs_info); |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 9dd6b836ac9e..f23c820daaed 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/debugfs.h> | 8 | #include <linux/debugfs.h> |
9 | #include <linux/seq_file.h> | 9 | #include <linux/seq_file.h> |
10 | #include <linux/utsname.h> | ||
11 | #include <linux/ratelimit.h> | 10 | #include <linux/ratelimit.h> |
12 | 11 | ||
13 | #include "super.h" | 12 | #include "super.h" |
@@ -735,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc, | |||
735 | inode = req->r_inode; | 734 | inode = req->r_inode; |
736 | ihold(inode); | 735 | ihold(inode); |
737 | } else { | 736 | } else { |
738 | /* req->r_dentry is non-null for LSSNAP request. | 737 | /* req->r_dentry is non-null for LSSNAP request */ |
739 | * fall-thru */ | 738 | rcu_read_lock(); |
740 | WARN_ON_ONCE(!req->r_dentry); | 739 | inode = get_nonsnap_parent(req->r_dentry); |
740 | rcu_read_unlock(); | ||
741 | dout("__choose_mds using snapdir's parent %p\n", inode); | ||
741 | } | 742 | } |
742 | } | 743 | } else if (req->r_dentry) { |
743 | if (!inode && req->r_dentry) { | ||
744 | /* ignore race with rename; old or new d_parent is okay */ | 744 | /* ignore race with rename; old or new d_parent is okay */ |
745 | struct dentry *parent; | 745 | struct dentry *parent; |
746 | struct inode *dir; | 746 | struct inode *dir; |
@@ -884,8 +884,8 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 | |||
884 | void *p; | 884 | void *p; |
885 | 885 | ||
886 | const char* metadata[][2] = { | 886 | const char* metadata[][2] = { |
887 | {"hostname", utsname()->nodename}, | 887 | {"hostname", mdsc->nodename}, |
888 | {"kernel_version", utsname()->release}, | 888 | {"kernel_version", init_utsname()->release}, |
889 | {"entity_id", opt->name ? : ""}, | 889 | {"entity_id", opt->name ? : ""}, |
890 | {"root", fsopt->server_path ? : "/"}, | 890 | {"root", fsopt->server_path ? : "/"}, |
891 | {NULL, NULL} | 891 | {NULL, NULL} |
@@ -3539,6 +3539,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) | |||
3539 | init_rwsem(&mdsc->pool_perm_rwsem); | 3539 | init_rwsem(&mdsc->pool_perm_rwsem); |
3540 | mdsc->pool_perm_tree = RB_ROOT; | 3540 | mdsc->pool_perm_tree = RB_ROOT; |
3541 | 3541 | ||
3542 | strncpy(mdsc->nodename, utsname()->nodename, | ||
3543 | sizeof(mdsc->nodename) - 1); | ||
3542 | return 0; | 3544 | return 0; |
3543 | } | 3545 | } |
3544 | 3546 | ||
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index db57ae98ed34..636d6b2ec49c 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/rbtree.h> | 8 | #include <linux/rbtree.h> |
9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
10 | #include <linux/refcount.h> | 10 | #include <linux/refcount.h> |
11 | #include <linux/utsname.h> | ||
11 | 12 | ||
12 | #include <linux/ceph/types.h> | 13 | #include <linux/ceph/types.h> |
13 | #include <linux/ceph/messenger.h> | 14 | #include <linux/ceph/messenger.h> |
@@ -368,6 +369,8 @@ struct ceph_mds_client { | |||
368 | 369 | ||
369 | struct rw_semaphore pool_perm_rwsem; | 370 | struct rw_semaphore pool_perm_rwsem; |
370 | struct rb_root pool_perm_tree; | 371 | struct rb_root pool_perm_tree; |
372 | |||
373 | char nodename[__NEW_UTS_LEN + 1]; | ||
371 | }; | 374 | }; |
372 | 375 | ||
373 | extern const char *ceph_mds_op_name(int op); | 376 | extern const char *ceph_mds_op_name(int op); |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 1ffc8b426c1c..7fc0b850c352 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm, | |||
374 | realm->ino, realm, snapc, snapc->seq, | 374 | realm->ino, realm, snapc, snapc->seq, |
375 | (unsigned int) snapc->num_snaps); | 375 | (unsigned int) snapc->num_snaps); |
376 | 376 | ||
377 | if (realm->cached_context) { | 377 | ceph_put_snap_context(realm->cached_context); |
378 | ceph_put_snap_context(realm->cached_context); | ||
379 | /* queue realm for cap_snap creation */ | ||
380 | list_add_tail(&realm->dirty_item, dirty_realms); | ||
381 | } | ||
382 | realm->cached_context = snapc; | 378 | realm->cached_context = snapc; |
379 | /* queue realm for cap_snap creation */ | ||
380 | list_add_tail(&realm->dirty_item, dirty_realms); | ||
383 | return 0; | 381 | return 0; |
384 | 382 | ||
385 | fail: | 383 | fail: |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 9727e1dcacd5..cbb9534b89b4 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -160,8 +160,13 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
160 | if ((ses->serverDomain == NULL) || | 160 | if ((ses->serverDomain == NULL) || |
161 | (ses->serverOS == NULL) || | 161 | (ses->serverOS == NULL) || |
162 | (ses->serverNOS == NULL)) { | 162 | (ses->serverNOS == NULL)) { |
163 | seq_printf(m, "\n%d) entry for %s not fully " | 163 | seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d\t", |
164 | "displayed\n\t", i, ses->serverName); | 164 | i, ses->serverName, ses->ses_count, |
165 | ses->capabilities, ses->status); | ||
166 | if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) | ||
167 | seq_printf(m, "Guest\t"); | ||
168 | else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) | ||
169 | seq_printf(m, "Anonymous\t"); | ||
165 | } else { | 170 | } else { |
166 | seq_printf(m, | 171 | seq_printf(m, |
167 | "\n%d) Name: %s Domain: %s Uses: %d OS:" | 172 | "\n%d) Name: %s Domain: %s Uses: %d OS:" |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 180b3356ff86..8c8b75d33f31 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -461,6 +461,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) | |||
461 | seq_puts(s, ",nocase"); | 461 | seq_puts(s, ",nocase"); |
462 | if (tcon->retry) | 462 | if (tcon->retry) |
463 | seq_puts(s, ",hard"); | 463 | seq_puts(s, ",hard"); |
464 | else | ||
465 | seq_puts(s, ",soft"); | ||
464 | if (tcon->use_persistent) | 466 | if (tcon->use_persistent) |
465 | seq_puts(s, ",persistenthandles"); | 467 | seq_puts(s, ",persistenthandles"); |
466 | else if (tcon->use_resilient) | 468 | else if (tcon->use_resilient) |
@@ -1447,7 +1449,7 @@ exit_cifs(void) | |||
1447 | exit_cifs_idmap(); | 1449 | exit_cifs_idmap(); |
1448 | #endif | 1450 | #endif |
1449 | #ifdef CONFIG_CIFS_UPCALL | 1451 | #ifdef CONFIG_CIFS_UPCALL |
1450 | unregister_key_type(&cifs_spnego_key_type); | 1452 | exit_cifs_spnego(); |
1451 | #endif | 1453 | #endif |
1452 | cifs_destroy_request_bufs(); | 1454 | cifs_destroy_request_bufs(); |
1453 | cifs_destroy_mids(); | 1455 | cifs_destroy_mids(); |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 30bf89b1fd9a..5a10e566f0e6 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -149,5 +149,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
149 | extern const struct export_operations cifs_export_ops; | 149 | extern const struct export_operations cifs_export_ops; |
150 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ | 150 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ |
151 | 151 | ||
152 | #define CIFS_VERSION "2.09" | 152 | #define CIFS_VERSION "2.10" |
153 | #endif /* _CIFSFS_H */ | 153 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 808486c29f0d..de5b2e1fcce5 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -188,6 +188,8 @@ enum smb_version { | |||
188 | #ifdef CONFIG_CIFS_SMB311 | 188 | #ifdef CONFIG_CIFS_SMB311 |
189 | Smb_311, | 189 | Smb_311, |
190 | #endif /* SMB311 */ | 190 | #endif /* SMB311 */ |
191 | Smb_3any, | ||
192 | Smb_default, | ||
191 | Smb_version_err | 193 | Smb_version_err |
192 | }; | 194 | }; |
193 | 195 | ||
@@ -1701,6 +1703,10 @@ extern struct smb_version_values smb20_values; | |||
1701 | #define SMB21_VERSION_STRING "2.1" | 1703 | #define SMB21_VERSION_STRING "2.1" |
1702 | extern struct smb_version_operations smb21_operations; | 1704 | extern struct smb_version_operations smb21_operations; |
1703 | extern struct smb_version_values smb21_values; | 1705 | extern struct smb_version_values smb21_values; |
1706 | #define SMBDEFAULT_VERSION_STRING "default" | ||
1707 | extern struct smb_version_values smbdefault_values; | ||
1708 | #define SMB3ANY_VERSION_STRING "3" | ||
1709 | extern struct smb_version_values smb3any_values; | ||
1704 | #define SMB30_VERSION_STRING "3.0" | 1710 | #define SMB30_VERSION_STRING "3.0" |
1705 | extern struct smb_version_operations smb30_operations; | 1711 | extern struct smb_version_operations smb30_operations; |
1706 | extern struct smb_version_values smb30_values; | 1712 | extern struct smb_version_values smb30_values; |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5aa2d278ca84..0bfc2280436d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -301,6 +301,8 @@ static const match_table_t cifs_smb_version_tokens = { | |||
301 | { Smb_311, SMB311_VERSION_STRING }, | 301 | { Smb_311, SMB311_VERSION_STRING }, |
302 | { Smb_311, ALT_SMB311_VERSION_STRING }, | 302 | { Smb_311, ALT_SMB311_VERSION_STRING }, |
303 | #endif /* SMB311 */ | 303 | #endif /* SMB311 */ |
304 | { Smb_3any, SMB3ANY_VERSION_STRING }, | ||
305 | { Smb_default, SMBDEFAULT_VERSION_STRING }, | ||
304 | { Smb_version_err, NULL } | 306 | { Smb_version_err, NULL } |
305 | }; | 307 | }; |
306 | 308 | ||
@@ -1148,6 +1150,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol) | |||
1148 | vol->vals = &smb311_values; | 1150 | vol->vals = &smb311_values; |
1149 | break; | 1151 | break; |
1150 | #endif /* SMB311 */ | 1152 | #endif /* SMB311 */ |
1153 | case Smb_3any: | ||
1154 | vol->ops = &smb30_operations; /* currently identical with 3.0 */ | ||
1155 | vol->vals = &smb3any_values; | ||
1156 | break; | ||
1157 | case Smb_default: | ||
1158 | vol->ops = &smb30_operations; /* currently identical with 3.0 */ | ||
1159 | vol->vals = &smbdefault_values; | ||
1160 | break; | ||
1151 | default: | 1161 | default: |
1152 | cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value); | 1162 | cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value); |
1153 | return 1; | 1163 | return 1; |
@@ -1274,9 +1284,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1274 | 1284 | ||
1275 | vol->actimeo = CIFS_DEF_ACTIMEO; | 1285 | vol->actimeo = CIFS_DEF_ACTIMEO; |
1276 | 1286 | ||
1277 | /* FIXME: add autonegotiation for SMB3 or later rather than just SMB3 */ | 1287 | /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ |
1278 | vol->ops = &smb30_operations; /* both secure and accepted widely */ | 1288 | vol->ops = &smb30_operations; |
1279 | vol->vals = &smb30_values; | 1289 | vol->vals = &smbdefault_values; |
1280 | 1290 | ||
1281 | vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT; | 1291 | vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT; |
1282 | 1292 | ||
@@ -1988,11 +1998,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1988 | 1998 | ||
1989 | if (got_version == false) | 1999 | if (got_version == false) |
1990 | pr_warn("No dialect specified on mount. Default has changed to " | 2000 | pr_warn("No dialect specified on mount. Default has changed to " |
1991 | "a more secure dialect, SMB3 (vers=3.0), from CIFS " | 2001 | "a more secure dialect, SMB2.1 or later (e.g. SMB3), from CIFS " |
1992 | "(SMB1). To use the less secure SMB1 dialect to access " | 2002 | "(SMB1). To use the less secure SMB1 dialect to access " |
1993 | "old servers which do not support SMB3 specify vers=1.0" | 2003 | "old servers which do not support SMB3 (or SMB2.1) specify vers=1.0" |
1994 | " on mount. For somewhat newer servers such as Windows " | 2004 | " on mount.\n"); |
1995 | "7 try vers=2.1.\n"); | ||
1996 | 2005 | ||
1997 | kfree(mountdata_copy); | 2006 | kfree(mountdata_copy); |
1998 | return 0; | 2007 | return 0; |
@@ -2133,6 +2142,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
2133 | if (vol->nosharesock) | 2142 | if (vol->nosharesock) |
2134 | return 0; | 2143 | return 0; |
2135 | 2144 | ||
2145 | /* BB update this for smb3any and default case */ | ||
2136 | if ((server->vals != vol->vals) || (server->ops != vol->ops)) | 2146 | if ((server->vals != vol->vals) || (server->ops != vol->ops)) |
2137 | return 0; | 2147 | return 0; |
2138 | 2148 | ||
@@ -4144,6 +4154,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, | |||
4144 | cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", | 4154 | cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n", |
4145 | server->sec_mode, server->capabilities, server->timeAdj); | 4155 | server->sec_mode, server->capabilities, server->timeAdj); |
4146 | 4156 | ||
4157 | if (ses->auth_key.response) { | ||
4158 | cifs_dbg(VFS, "Free previous auth_key.response = %p\n", | ||
4159 | ses->auth_key.response); | ||
4160 | kfree(ses->auth_key.response); | ||
4161 | ses->auth_key.response = NULL; | ||
4162 | ses->auth_key.len = 0; | ||
4163 | } | ||
4164 | |||
4147 | if (server->ops->sess_setup) | 4165 | if (server->ops->sess_setup) |
4148 | rc = server->ops->sess_setup(xid, ses, nls_info); | 4166 | rc = server->ops->sess_setup(xid, ses, nls_info); |
4149 | 4167 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0786f19d288f..92fdf9c35de2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, | |||
224 | if (backup_cred(cifs_sb)) | 224 | if (backup_cred(cifs_sb)) |
225 | create_options |= CREATE_OPEN_BACKUP_INTENT; | 225 | create_options |= CREATE_OPEN_BACKUP_INTENT; |
226 | 226 | ||
227 | /* O_SYNC also has bit for O_DSYNC so following check picks up either */ | ||
228 | if (f_flags & O_SYNC) | ||
229 | create_options |= CREATE_WRITE_THROUGH; | ||
230 | |||
231 | if (f_flags & O_DIRECT) | ||
232 | create_options |= CREATE_NO_BUFFER; | ||
233 | |||
227 | oparms.tcon = tcon; | 234 | oparms.tcon = tcon; |
228 | oparms.cifs_sb = cifs_sb; | 235 | oparms.cifs_sb = cifs_sb; |
229 | oparms.desired_access = desired_access; | 236 | oparms.desired_access = desired_access; |
@@ -1102,8 +1109,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) | |||
1102 | struct cifs_tcon *tcon; | 1109 | struct cifs_tcon *tcon; |
1103 | unsigned int num, max_num, max_buf; | 1110 | unsigned int num, max_num, max_buf; |
1104 | LOCKING_ANDX_RANGE *buf, *cur; | 1111 | LOCKING_ANDX_RANGE *buf, *cur; |
1105 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1112 | static const int types[] = { |
1106 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1113 | LOCKING_ANDX_LARGE_FILES, |
1114 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES | ||
1115 | }; | ||
1107 | int i; | 1116 | int i; |
1108 | 1117 | ||
1109 | xid = get_xid(); | 1118 | xid = get_xid(); |
@@ -1434,8 +1443,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, | |||
1434 | unsigned int xid) | 1443 | unsigned int xid) |
1435 | { | 1444 | { |
1436 | int rc = 0, stored_rc; | 1445 | int rc = 0, stored_rc; |
1437 | int types[] = {LOCKING_ANDX_LARGE_FILES, | 1446 | static const int types[] = { |
1438 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; | 1447 | LOCKING_ANDX_LARGE_FILES, |
1448 | LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES | ||
1449 | }; | ||
1439 | unsigned int i; | 1450 | unsigned int i; |
1440 | unsigned int max_num, num, max_buf; | 1451 | unsigned int max_num, num, max_buf; |
1441 | LOCKING_ANDX_RANGE *buf, *cur; | 1452 | LOCKING_ANDX_RANGE *buf, *cur; |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index a8693632235f..7c732cb44164 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -234,6 +234,8 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, | |||
234 | fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); | 234 | fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); |
235 | fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime); | 235 | fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime); |
236 | fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange); | 236 | fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange); |
237 | /* old POSIX extensions don't get create time */ | ||
238 | |||
237 | fattr->cf_mode = le64_to_cpu(info->Permissions); | 239 | fattr->cf_mode = le64_to_cpu(info->Permissions); |
238 | 240 | ||
239 | /* | 241 | /* |
@@ -2024,6 +2026,19 @@ int cifs_getattr(const struct path *path, struct kstat *stat, | |||
2024 | stat->blksize = CIFS_MAX_MSGSIZE; | 2026 | stat->blksize = CIFS_MAX_MSGSIZE; |
2025 | stat->ino = CIFS_I(inode)->uniqueid; | 2027 | stat->ino = CIFS_I(inode)->uniqueid; |
2026 | 2028 | ||
2029 | /* old CIFS Unix Extensions doesn't return create time */ | ||
2030 | if (CIFS_I(inode)->createtime) { | ||
2031 | stat->result_mask |= STATX_BTIME; | ||
2032 | stat->btime = | ||
2033 | cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime)); | ||
2034 | } | ||
2035 | |||
2036 | stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED); | ||
2037 | if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED) | ||
2038 | stat->attributes |= STATX_ATTR_COMPRESSED; | ||
2039 | if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED) | ||
2040 | stat->attributes |= STATX_ATTR_ENCRYPTED; | ||
2041 | |||
2027 | /* | 2042 | /* |
2028 | * If on a multiuser mount without unix extensions or cifsacl being | 2043 | * If on a multiuser mount without unix extensions or cifsacl being |
2029 | * enabled, and the admin hasn't overridden them, set the ownership | 2044 | * enabled, and the admin hasn't overridden them, set the ownership |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index fb2934b9b97c..0dafdbae1f8c 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -426,6 +426,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
426 | return rc; | 426 | return rc; |
427 | } | 427 | } |
428 | 428 | ||
429 | #ifdef CONFIG_CIFS_XATTR | ||
429 | static ssize_t | 430 | static ssize_t |
430 | move_smb2_ea_to_cifs(char *dst, size_t dst_size, | 431 | move_smb2_ea_to_cifs(char *dst, size_t dst_size, |
431 | struct smb2_file_full_ea_info *src, size_t src_size, | 432 | struct smb2_file_full_ea_info *src, size_t src_size, |
@@ -613,6 +614,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, | |||
613 | 614 | ||
614 | return rc; | 615 | return rc; |
615 | } | 616 | } |
617 | #endif | ||
616 | 618 | ||
617 | static bool | 619 | static bool |
618 | smb2_can_echo(struct TCP_Server_Info *server) | 620 | smb2_can_echo(struct TCP_Server_Info *server) |
@@ -3110,6 +3112,46 @@ struct smb_version_values smb21_values = { | |||
3110 | .create_lease_size = sizeof(struct create_lease), | 3112 | .create_lease_size = sizeof(struct create_lease), |
3111 | }; | 3113 | }; |
3112 | 3114 | ||
3115 | struct smb_version_values smb3any_values = { | ||
3116 | .version_string = SMB3ANY_VERSION_STRING, | ||
3117 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ | ||
3118 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | ||
3119 | .large_lock_type = 0, | ||
3120 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | ||
3121 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | ||
3122 | .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, | ||
3123 | .header_size = sizeof(struct smb2_hdr), | ||
3124 | .max_header_size = MAX_SMB2_HDR_SIZE, | ||
3125 | .read_rsp_size = sizeof(struct smb2_read_rsp) - 1, | ||
3126 | .lock_cmd = SMB2_LOCK, | ||
3127 | .cap_unix = 0, | ||
3128 | .cap_nt_find = SMB2_NT_FIND, | ||
3129 | .cap_large_files = SMB2_LARGE_FILES, | ||
3130 | .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, | ||
3131 | .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, | ||
3132 | .create_lease_size = sizeof(struct create_lease_v2), | ||
3133 | }; | ||
3134 | |||
3135 | struct smb_version_values smbdefault_values = { | ||
3136 | .version_string = SMBDEFAULT_VERSION_STRING, | ||
3137 | .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ | ||
3138 | .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, | ||
3139 | .large_lock_type = 0, | ||
3140 | .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, | ||
3141 | .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, | ||
3142 | .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK, | ||
3143 | .header_size = sizeof(struct smb2_hdr), | ||
3144 | .max_header_size = MAX_SMB2_HDR_SIZE, | ||
3145 | .read_rsp_size = sizeof(struct smb2_read_rsp) - 1, | ||
3146 | .lock_cmd = SMB2_LOCK, | ||
3147 | .cap_unix = 0, | ||
3148 | .cap_nt_find = SMB2_NT_FIND, | ||
3149 | .cap_large_files = SMB2_LARGE_FILES, | ||
3150 | .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED, | ||
3151 | .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED, | ||
3152 | .create_lease_size = sizeof(struct create_lease_v2), | ||
3153 | }; | ||
3154 | |||
3113 | struct smb_version_values smb30_values = { | 3155 | struct smb_version_values smb30_values = { |
3114 | .version_string = SMB30_VERSION_STRING, | 3156 | .version_string = SMB30_VERSION_STRING, |
3115 | .protocol_id = SMB30_PROT_ID, | 3157 | .protocol_id = SMB30_PROT_ID, |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5531e7ee1210..6f0e6343c15e 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -439,7 +439,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req) | |||
439 | build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); | 439 | build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt); |
440 | req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); | 440 | req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); |
441 | req->NegotiateContextCount = cpu_to_le16(2); | 441 | req->NegotiateContextCount = cpu_to_le16(2); |
442 | inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2 | 442 | inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) |
443 | + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */ | 443 | + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */ |
444 | } | 444 | } |
445 | #else | 445 | #else |
@@ -491,10 +491,25 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
491 | 491 | ||
492 | req->hdr.sync_hdr.SessionId = 0; | 492 | req->hdr.sync_hdr.SessionId = 0; |
493 | 493 | ||
494 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); | 494 | if (strcmp(ses->server->vals->version_string, |
495 | 495 | SMB3ANY_VERSION_STRING) == 0) { | |
496 | req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */ | 496 | req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID); |
497 | inc_rfc1001_len(req, 2); | 497 | req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID); |
498 | req->DialectCount = cpu_to_le16(2); | ||
499 | inc_rfc1001_len(req, 4); | ||
500 | } else if (strcmp(ses->server->vals->version_string, | ||
501 | SMBDEFAULT_VERSION_STRING) == 0) { | ||
502 | req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID); | ||
503 | req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID); | ||
504 | req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID); | ||
505 | req->DialectCount = cpu_to_le16(3); | ||
506 | inc_rfc1001_len(req, 6); | ||
507 | } else { | ||
508 | /* otherwise send specific dialect */ | ||
509 | req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); | ||
510 | req->DialectCount = cpu_to_le16(1); | ||
511 | inc_rfc1001_len(req, 2); | ||
512 | } | ||
498 | 513 | ||
499 | /* only one of SMB2 signing flags may be set in SMB2 request */ | 514 | /* only one of SMB2 signing flags may be set in SMB2 request */ |
500 | if (ses->sign) | 515 | if (ses->sign) |
@@ -528,16 +543,43 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
528 | */ | 543 | */ |
529 | if (rc == -EOPNOTSUPP) { | 544 | if (rc == -EOPNOTSUPP) { |
530 | cifs_dbg(VFS, "Dialect not supported by server. Consider " | 545 | cifs_dbg(VFS, "Dialect not supported by server. Consider " |
531 | "specifying vers=1.0 or vers=2.1 on mount for accessing" | 546 | "specifying vers=1.0 or vers=2.0 on mount for accessing" |
532 | " older servers\n"); | 547 | " older servers\n"); |
533 | goto neg_exit; | 548 | goto neg_exit; |
534 | } else if (rc != 0) | 549 | } else if (rc != 0) |
535 | goto neg_exit; | 550 | goto neg_exit; |
536 | 551 | ||
552 | if (strcmp(ses->server->vals->version_string, | ||
553 | SMB3ANY_VERSION_STRING) == 0) { | ||
554 | if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { | ||
555 | cifs_dbg(VFS, | ||
556 | "SMB2 dialect returned but not requested\n"); | ||
557 | return -EIO; | ||
558 | } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { | ||
559 | cifs_dbg(VFS, | ||
560 | "SMB2.1 dialect returned but not requested\n"); | ||
561 | return -EIO; | ||
562 | } | ||
563 | } else if (strcmp(ses->server->vals->version_string, | ||
564 | SMBDEFAULT_VERSION_STRING) == 0) { | ||
565 | if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { | ||
566 | cifs_dbg(VFS, | ||
567 | "SMB2 dialect returned but not requested\n"); | ||
568 | return -EIO; | ||
569 | } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { | ||
570 | /* ops set to 3.0 by default for default so update */ | ||
571 | ses->server->ops = &smb21_operations; | ||
572 | } | ||
573 | } else if (le16_to_cpu(rsp->DialectRevision) != | ||
574 | ses->server->vals->protocol_id) { | ||
575 | /* if requested single dialect ensure returned dialect matched */ | ||
576 | cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n", | ||
577 | le16_to_cpu(rsp->DialectRevision)); | ||
578 | return -EIO; | ||
579 | } | ||
580 | |||
537 | cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); | 581 | cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); |
538 | 582 | ||
539 | /* BB we may eventually want to match the negotiated vs. requested | ||
540 | dialect, even though we are only requesting one at a time */ | ||
541 | if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) | 583 | if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) |
542 | cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); | 584 | cifs_dbg(FYI, "negotiated smb2.0 dialect\n"); |
543 | else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) | 585 | else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) |
@@ -558,6 +600,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) | |||
558 | } | 600 | } |
559 | server->dialect = le16_to_cpu(rsp->DialectRevision); | 601 | server->dialect = le16_to_cpu(rsp->DialectRevision); |
560 | 602 | ||
603 | /* BB: add check that dialect was valid given dialect(s) we asked for */ | ||
604 | |||
561 | /* SMB2 only has an extended negflavor */ | 605 | /* SMB2 only has an extended negflavor */ |
562 | server->negflavor = CIFS_NEGFLAVOR_EXTENDED; | 606 | server->negflavor = CIFS_NEGFLAVOR_EXTENDED; |
563 | /* set it to the maximum buffer size value we can send with 1 credit */ | 607 | /* set it to the maximum buffer size value we can send with 1 credit */ |
@@ -606,20 +650,28 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
606 | struct validate_negotiate_info_req vneg_inbuf; | 650 | struct validate_negotiate_info_req vneg_inbuf; |
607 | struct validate_negotiate_info_rsp *pneg_rsp; | 651 | struct validate_negotiate_info_rsp *pneg_rsp; |
608 | u32 rsplen; | 652 | u32 rsplen; |
653 | u32 inbuflen; /* max of 4 dialects */ | ||
609 | 654 | ||
610 | cifs_dbg(FYI, "validate negotiate\n"); | 655 | cifs_dbg(FYI, "validate negotiate\n"); |
611 | 656 | ||
612 | /* | 657 | /* |
613 | * validation ioctl must be signed, so no point sending this if we | 658 | * validation ioctl must be signed, so no point sending this if we |
614 | * can not sign it. We could eventually change this to selectively | 659 | * can not sign it (ie are not known user). Even if signing is not |
660 | * required (enabled but not negotiated), in those cases we selectively | ||
615 | * sign just this, the first and only signed request on a connection. | 661 | * sign just this, the first and only signed request on a connection. |
616 | * This is good enough for now since a user who wants better security | 662 | * Having validation of negotiate info helps reduce attack vectors. |
617 | * would also enable signing on the mount. Having validation of | ||
618 | * negotiate info for signed connections helps reduce attack vectors | ||
619 | */ | 663 | */ |
620 | if (tcon->ses->server->sign == false) | 664 | if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) |
621 | return 0; /* validation requires signing */ | 665 | return 0; /* validation requires signing */ |
622 | 666 | ||
667 | if (tcon->ses->user_name == NULL) { | ||
668 | cifs_dbg(FYI, "Can't validate negotiate: null user mount\n"); | ||
669 | return 0; /* validation requires signing */ | ||
670 | } | ||
671 | |||
672 | if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) | ||
673 | cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n"); | ||
674 | |||
623 | vneg_inbuf.Capabilities = | 675 | vneg_inbuf.Capabilities = |
624 | cpu_to_le32(tcon->ses->server->vals->req_capabilities); | 676 | cpu_to_le32(tcon->ses->server->vals->req_capabilities); |
625 | memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, | 677 | memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid, |
@@ -634,9 +686,30 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
634 | else | 686 | else |
635 | vneg_inbuf.SecurityMode = 0; | 687 | vneg_inbuf.SecurityMode = 0; |
636 | 688 | ||
637 | vneg_inbuf.DialectCount = cpu_to_le16(1); | 689 | |
638 | vneg_inbuf.Dialects[0] = | 690 | if (strcmp(tcon->ses->server->vals->version_string, |
639 | cpu_to_le16(tcon->ses->server->vals->protocol_id); | 691 | SMB3ANY_VERSION_STRING) == 0) { |
692 | vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID); | ||
693 | vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID); | ||
694 | vneg_inbuf.DialectCount = cpu_to_le16(2); | ||
695 | /* structure is big enough for 3 dialects, sending only 2 */ | ||
696 | inbuflen = sizeof(struct validate_negotiate_info_req) - 2; | ||
697 | } else if (strcmp(tcon->ses->server->vals->version_string, | ||
698 | SMBDEFAULT_VERSION_STRING) == 0) { | ||
699 | vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID); | ||
700 | vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID); | ||
701 | vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID); | ||
702 | vneg_inbuf.DialectCount = cpu_to_le16(3); | ||
703 | /* structure is big enough for 3 dialects */ | ||
704 | inbuflen = sizeof(struct validate_negotiate_info_req); | ||
705 | } else { | ||
706 | /* otherwise specific dialect was requested */ | ||
707 | vneg_inbuf.Dialects[0] = | ||
708 | cpu_to_le16(tcon->ses->server->vals->protocol_id); | ||
709 | vneg_inbuf.DialectCount = cpu_to_le16(1); | ||
710 | /* structure is big enough for 3 dialects, sending only 1 */ | ||
711 | inbuflen = sizeof(struct validate_negotiate_info_req) - 4; | ||
712 | } | ||
640 | 713 | ||
641 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, | 714 | rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, |
642 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, | 715 | FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, |
@@ -1110,6 +1183,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, | |||
1110 | while (sess_data->func) | 1183 | while (sess_data->func) |
1111 | sess_data->func(sess_data); | 1184 | sess_data->func(sess_data); |
1112 | 1185 | ||
1186 | if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign)) | ||
1187 | cifs_dbg(VFS, "signing requested but authenticated as guest\n"); | ||
1113 | rc = sess_data->result; | 1188 | rc = sess_data->result; |
1114 | out: | 1189 | out: |
1115 | kfree(sess_data); | 1190 | kfree(sess_data); |
@@ -1634,7 +1709,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1634 | struct cifs_tcon *tcon = oparms->tcon; | 1709 | struct cifs_tcon *tcon = oparms->tcon; |
1635 | struct cifs_ses *ses = tcon->ses; | 1710 | struct cifs_ses *ses = tcon->ses; |
1636 | struct kvec iov[4]; | 1711 | struct kvec iov[4]; |
1637 | struct kvec rsp_iov; | 1712 | struct kvec rsp_iov = {NULL, 0}; |
1638 | int resp_buftype; | 1713 | int resp_buftype; |
1639 | int uni_path_len; | 1714 | int uni_path_len; |
1640 | __le16 *copy_path = NULL; | 1715 | __le16 *copy_path = NULL; |
@@ -1763,7 +1838,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, | |||
1763 | 1838 | ||
1764 | if (rc != 0) { | 1839 | if (rc != 0) { |
1765 | cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); | 1840 | cifs_stats_fail_inc(tcon, SMB2_CREATE_HE); |
1766 | if (err_buf) | 1841 | if (err_buf && rsp) |
1767 | *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4, | 1842 | *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4, |
1768 | GFP_KERNEL); | 1843 | GFP_KERNEL); |
1769 | goto creat_exit; | 1844 | goto creat_exit; |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 393ed5f4e1b6..6c9653a130c8 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -716,7 +716,7 @@ struct validate_negotiate_info_req { | |||
716 | __u8 Guid[SMB2_CLIENT_GUID_SIZE]; | 716 | __u8 Guid[SMB2_CLIENT_GUID_SIZE]; |
717 | __le16 SecurityMode; | 717 | __le16 SecurityMode; |
718 | __le16 DialectCount; | 718 | __le16 DialectCount; |
719 | __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */ | 719 | __le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */ |
720 | } __packed; | 720 | } __packed; |
721 | 721 | ||
722 | struct validate_negotiate_info_rsp { | 722 | struct validate_negotiate_info_rsp { |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 5fa2211e49ae..62cf812ed0e5 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -229,6 +229,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
229 | { | 229 | { |
230 | loff_t offset = dio->iocb->ki_pos; | 230 | loff_t offset = dio->iocb->ki_pos; |
231 | ssize_t transferred = 0; | 231 | ssize_t transferred = 0; |
232 | int err; | ||
232 | 233 | ||
233 | /* | 234 | /* |
234 | * AIO submission can race with bio completion to get here while | 235 | * AIO submission can race with bio completion to get here while |
@@ -258,8 +259,22 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
258 | if (ret == 0) | 259 | if (ret == 0) |
259 | ret = transferred; | 260 | ret = transferred; |
260 | 261 | ||
262 | /* | ||
263 | * Try again to invalidate clean pages which might have been cached by | ||
264 | * non-direct readahead, or faulted in by get_user_pages() if the source | ||
265 | * of the write was an mmap'ed region of the file we're writing. Either | ||
266 | * one is a pretty crazy thing to do, so we don't support it 100%. If | ||
267 | * this invalidation fails, tough, the write still worked... | ||
268 | */ | ||
269 | if (ret > 0 && dio->op == REQ_OP_WRITE && | ||
270 | dio->inode->i_mapping->nrpages) { | ||
271 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, | ||
272 | offset >> PAGE_SHIFT, | ||
273 | (offset + ret - 1) >> PAGE_SHIFT); | ||
274 | WARN_ON_ONCE(err); | ||
275 | } | ||
276 | |||
261 | if (dio->end_io) { | 277 | if (dio->end_io) { |
262 | int err; | ||
263 | 278 | ||
264 | // XXX: ki_pos?? | 279 | // XXX: ki_pos?? |
265 | err = dio->end_io(dio->iocb, offset, ret, dio->private); | 280 | err = dio->end_io(dio->iocb, offset, ret, dio->private); |
@@ -304,6 +319,7 @@ static void dio_bio_end_aio(struct bio *bio) | |||
304 | struct dio *dio = bio->bi_private; | 319 | struct dio *dio = bio->bi_private; |
305 | unsigned long remaining; | 320 | unsigned long remaining; |
306 | unsigned long flags; | 321 | unsigned long flags; |
322 | bool defer_completion = false; | ||
307 | 323 | ||
308 | /* cleanup the bio */ | 324 | /* cleanup the bio */ |
309 | dio_bio_complete(dio, bio); | 325 | dio_bio_complete(dio, bio); |
@@ -315,7 +331,19 @@ static void dio_bio_end_aio(struct bio *bio) | |||
315 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 331 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
316 | 332 | ||
317 | if (remaining == 0) { | 333 | if (remaining == 0) { |
318 | if (dio->result && dio->defer_completion) { | 334 | /* |
335 | * Defer completion when defer_completion is set or | ||
336 | * when the inode has pages mapped and this is AIO write. | ||
337 | * We need to invalidate those pages because there is a | ||
338 | * chance they contain stale data in the case buffered IO | ||
339 | * went in between AIO submission and completion into the | ||
340 | * same region. | ||
341 | */ | ||
342 | if (dio->result) | ||
343 | defer_completion = dio->defer_completion || | ||
344 | (dio->op == REQ_OP_WRITE && | ||
345 | dio->inode->i_mapping->nrpages); | ||
346 | if (defer_completion) { | ||
319 | INIT_WORK(&dio->complete_work, dio_aio_complete_work); | 347 | INIT_WORK(&dio->complete_work, dio_aio_complete_work); |
320 | queue_work(dio->inode->i_sb->s_dio_done_wq, | 348 | queue_work(dio->inode->i_sb->s_dio_done_wq, |
321 | &dio->complete_work); | 349 | &dio->complete_work); |
@@ -1210,10 +1238,19 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1210 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue | 1238 | * For AIO O_(D)SYNC writes we need to defer completions to a workqueue |
1211 | * so that we can call ->fsync. | 1239 | * so that we can call ->fsync. |
1212 | */ | 1240 | */ |
1213 | if (dio->is_async && iov_iter_rw(iter) == WRITE && | 1241 | if (dio->is_async && iov_iter_rw(iter) == WRITE) { |
1214 | ((iocb->ki_filp->f_flags & O_DSYNC) || | 1242 | retval = 0; |
1215 | IS_SYNC(iocb->ki_filp->f_mapping->host))) { | 1243 | if ((iocb->ki_filp->f_flags & O_DSYNC) || |
1216 | retval = dio_set_defer_completion(dio); | 1244 | IS_SYNC(iocb->ki_filp->f_mapping->host)) |
1245 | retval = dio_set_defer_completion(dio); | ||
1246 | else if (!dio->inode->i_sb->s_dio_done_wq) { | ||
1247 | /* | ||
1248 | * In case of AIO write racing with buffered read we | ||
1249 | * need to defer completion. We can't decide this now, | ||
1250 | * however the workqueue needs to be initialized here. | ||
1251 | */ | ||
1252 | retval = sb_init_dio_done_wq(dio->inode->i_sb); | ||
1253 | } | ||
1217 | if (retval) { | 1254 | if (retval) { |
1218 | /* | 1255 | /* |
1219 | * We grab i_mutex only for reads so we don't have | 1256 | * We grab i_mutex only for reads so we don't have |
@@ -1410,7 +1410,7 @@ static void free_bprm(struct linux_binprm *bprm) | |||
1410 | kfree(bprm); | 1410 | kfree(bprm); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | int bprm_change_interp(char *interp, struct linux_binprm *bprm) | 1413 | int bprm_change_interp(const char *interp, struct linux_binprm *bprm) |
1414 | { | 1414 | { |
1415 | /* If a binfmt changed the interp, free it first. */ | 1415 | /* If a binfmt changed the interp, free it first. */ |
1416 | if (bprm->interp != bprm->filename) | 1416 | if (bprm->interp != bprm->filename) |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 0491da3b28c3..448a1119f0be 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -749,7 +749,7 @@ static void send_sigio_to_task(struct task_struct *p, | |||
749 | * specific si_codes. In that case use SI_SIGIO instead | 749 | * specific si_codes. In that case use SI_SIGIO instead |
750 | * to remove the ambiguity. | 750 | * to remove the ambiguity. |
751 | */ | 751 | */ |
752 | if (sig_specific_sicodes(signum)) | 752 | if ((signum != SIGPOLL) && sig_specific_sicodes(signum)) |
753 | si.si_code = SI_SIGIO; | 753 | si.si_code = SI_SIGIO; |
754 | 754 | ||
755 | /* Make sure we are called with one of the POLL_* | 755 | /* Make sure we are called with one of the POLL_* |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 98e845b7841b..11066d8647d2 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1945,13 +1945,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) | |||
1945 | { | 1945 | { |
1946 | struct gfs2_glock_iter *gi = seq->private; | 1946 | struct gfs2_glock_iter *gi = seq->private; |
1947 | loff_t n = *pos; | 1947 | loff_t n = *pos; |
1948 | int ret; | ||
1949 | |||
1950 | if (gi->last_pos <= *pos) | ||
1951 | n = (*pos - gi->last_pos); | ||
1952 | 1948 | ||
1953 | ret = rhashtable_walk_start(&gi->hti); | 1949 | rhashtable_walk_enter(&gl_hash_table, &gi->hti); |
1954 | if (ret) | 1950 | if (rhashtable_walk_start(&gi->hti) != 0) |
1955 | return NULL; | 1951 | return NULL; |
1956 | 1952 | ||
1957 | do { | 1953 | do { |
@@ -1959,6 +1955,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) | |||
1959 | } while (gi->gl && n--); | 1955 | } while (gi->gl && n--); |
1960 | 1956 | ||
1961 | gi->last_pos = *pos; | 1957 | gi->last_pos = *pos; |
1958 | |||
1962 | return gi->gl; | 1959 | return gi->gl; |
1963 | } | 1960 | } |
1964 | 1961 | ||
@@ -1970,6 +1967,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, | |||
1970 | (*pos)++; | 1967 | (*pos)++; |
1971 | gi->last_pos = *pos; | 1968 | gi->last_pos = *pos; |
1972 | gfs2_glock_iter_next(gi); | 1969 | gfs2_glock_iter_next(gi); |
1970 | |||
1973 | return gi->gl; | 1971 | return gi->gl; |
1974 | } | 1972 | } |
1975 | 1973 | ||
@@ -1980,6 +1978,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) | |||
1980 | 1978 | ||
1981 | gi->gl = NULL; | 1979 | gi->gl = NULL; |
1982 | rhashtable_walk_stop(&gi->hti); | 1980 | rhashtable_walk_stop(&gi->hti); |
1981 | rhashtable_walk_exit(&gi->hti); | ||
1983 | } | 1982 | } |
1984 | 1983 | ||
1985 | static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) | 1984 | static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) |
@@ -2042,12 +2041,10 @@ static int __gfs2_glocks_open(struct inode *inode, struct file *file, | |||
2042 | struct gfs2_glock_iter *gi = seq->private; | 2041 | struct gfs2_glock_iter *gi = seq->private; |
2043 | 2042 | ||
2044 | gi->sdp = inode->i_private; | 2043 | gi->sdp = inode->i_private; |
2045 | gi->last_pos = 0; | ||
2046 | seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); | 2044 | seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); |
2047 | if (seq->buf) | 2045 | if (seq->buf) |
2048 | seq->size = GFS2_SEQ_GOODSIZE; | 2046 | seq->size = GFS2_SEQ_GOODSIZE; |
2049 | gi->gl = NULL; | 2047 | gi->gl = NULL; |
2050 | rhashtable_walk_enter(&gl_hash_table, &gi->hti); | ||
2051 | } | 2048 | } |
2052 | return ret; | 2049 | return ret; |
2053 | } | 2050 | } |
@@ -2063,7 +2060,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file) | |||
2063 | struct gfs2_glock_iter *gi = seq->private; | 2060 | struct gfs2_glock_iter *gi = seq->private; |
2064 | 2061 | ||
2065 | gi->gl = NULL; | 2062 | gi->gl = NULL; |
2066 | rhashtable_walk_exit(&gi->hti); | ||
2067 | return seq_release_private(inode, file); | 2063 | return seq_release_private(inode, file); |
2068 | } | 2064 | } |
2069 | 2065 | ||
diff --git a/fs/iomap.c b/fs/iomap.c index 269b24a01f32..be61cf742b5e 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -713,8 +713,24 @@ struct iomap_dio { | |||
713 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) | 713 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) |
714 | { | 714 | { |
715 | struct kiocb *iocb = dio->iocb; | 715 | struct kiocb *iocb = dio->iocb; |
716 | struct inode *inode = file_inode(iocb->ki_filp); | ||
716 | ssize_t ret; | 717 | ssize_t ret; |
717 | 718 | ||
719 | /* | ||
720 | * Try again to invalidate clean pages which might have been cached by | ||
721 | * non-direct readahead, or faulted in by get_user_pages() if the source | ||
722 | * of the write was an mmap'ed region of the file we're writing. Either | ||
723 | * one is a pretty crazy thing to do, so we don't support it 100%. If | ||
724 | * this invalidation fails, tough, the write still worked... | ||
725 | */ | ||
726 | if (!dio->error && | ||
727 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | ||
728 | ret = invalidate_inode_pages2_range(inode->i_mapping, | ||
729 | iocb->ki_pos >> PAGE_SHIFT, | ||
730 | (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT); | ||
731 | WARN_ON_ONCE(ret); | ||
732 | } | ||
733 | |||
718 | if (dio->end_io) { | 734 | if (dio->end_io) { |
719 | ret = dio->end_io(iocb, | 735 | ret = dio->end_io(iocb, |
720 | dio->error ? dio->error : dio->size, | 736 | dio->error ? dio->error : dio->size, |
@@ -993,6 +1009,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
993 | WARN_ON_ONCE(ret); | 1009 | WARN_ON_ONCE(ret); |
994 | ret = 0; | 1010 | ret = 0; |
995 | 1011 | ||
1012 | if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) && | ||
1013 | !inode->i_sb->s_dio_done_wq) { | ||
1014 | ret = sb_init_dio_done_wq(inode->i_sb); | ||
1015 | if (ret < 0) | ||
1016 | goto out_free_dio; | ||
1017 | } | ||
1018 | |||
996 | inode_dio_begin(inode); | 1019 | inode_dio_begin(inode); |
997 | 1020 | ||
998 | blk_start_plug(&plug); | 1021 | blk_start_plug(&plug); |
@@ -1015,13 +1038,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1015 | if (ret < 0) | 1038 | if (ret < 0) |
1016 | iomap_dio_set_error(dio, ret); | 1039 | iomap_dio_set_error(dio, ret); |
1017 | 1040 | ||
1018 | if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) && | ||
1019 | !inode->i_sb->s_dio_done_wq) { | ||
1020 | ret = sb_init_dio_done_wq(inode->i_sb); | ||
1021 | if (ret < 0) | ||
1022 | iomap_dio_set_error(dio, ret); | ||
1023 | } | ||
1024 | |||
1025 | if (!atomic_dec_and_test(&dio->ref)) { | 1041 | if (!atomic_dec_and_test(&dio->ref)) { |
1026 | if (!is_sync_kiocb(iocb)) | 1042 | if (!is_sync_kiocb(iocb)) |
1027 | return -EIOCBQUEUED; | 1043 | return -EIOCBQUEUED; |
@@ -1042,19 +1058,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1042 | 1058 | ||
1043 | ret = iomap_dio_complete(dio); | 1059 | ret = iomap_dio_complete(dio); |
1044 | 1060 | ||
1045 | /* | ||
1046 | * Try again to invalidate clean pages which might have been cached by | ||
1047 | * non-direct readahead, or faulted in by get_user_pages() if the source | ||
1048 | * of the write was an mmap'ed region of the file we're writing. Either | ||
1049 | * one is a pretty crazy thing to do, so we don't support it 100%. If | ||
1050 | * this invalidation fails, tough, the write still worked... | ||
1051 | */ | ||
1052 | if (iov_iter_rw(iter) == WRITE) { | ||
1053 | int err = invalidate_inode_pages2_range(mapping, | ||
1054 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | ||
1055 | WARN_ON_ONCE(err); | ||
1056 | } | ||
1057 | |||
1058 | return ret; | 1061 | return ret; |
1059 | 1062 | ||
1060 | out_free_dio: | 1063 | out_free_dio: |
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index db692f554158..447a24d77b89 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -514,9 +514,11 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root) | |||
514 | if (sbi->s_fmode != ISOFS_INVALID_MODE) | 514 | if (sbi->s_fmode != ISOFS_INVALID_MODE) |
515 | seq_printf(m, ",fmode=%o", sbi->s_fmode); | 515 | seq_printf(m, ",fmode=%o", sbi->s_fmode); |
516 | 516 | ||
517 | #ifdef CONFIG_JOLIET | ||
517 | if (sbi->s_nls_iocharset && | 518 | if (sbi->s_nls_iocharset && |
518 | strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0) | 519 | strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0) |
519 | seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset); | 520 | seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset); |
521 | #endif | ||
520 | return 0; | 522 | return 0; |
521 | } | 523 | } |
522 | 524 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 54059b142d6b..3b601f115b6c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file) | |||
468 | 468 | ||
469 | /* File refers to upper, writable layer? */ | 469 | /* File refers to upper, writable layer? */ |
470 | upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER); | 470 | upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER); |
471 | if (upperdentry && file_inode(file) == d_inode(upperdentry)) | 471 | if (upperdentry && |
472 | (file_inode(file) == d_inode(upperdentry) || | ||
473 | file_inode(file) == d_inode(dentry))) | ||
472 | return 0; | 474 | return 0; |
473 | 475 | ||
474 | /* Lower layer: can't write to real file, sorry... */ | 476 | /* Lower layer: can't write to real file, sorry... */ |
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index aad97b30d5e6..c441f9387a1b 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c | |||
@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) | |||
561 | c->tmpfile = true; | 561 | c->tmpfile = true; |
562 | err = ovl_copy_up_locked(c); | 562 | err = ovl_copy_up_locked(c); |
563 | } else { | 563 | } else { |
564 | err = -EIO; | 564 | err = ovl_lock_rename_workdir(c->workdir, c->destdir); |
565 | if (lock_rename(c->workdir, c->destdir) != NULL) { | 565 | if (!err) { |
566 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
567 | } else { | ||
568 | err = ovl_copy_up_locked(c); | 566 | err = ovl_copy_up_locked(c); |
569 | unlock_rename(c->workdir, c->destdir); | 567 | unlock_rename(c->workdir, c->destdir); |
570 | } | 568 | } |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 3309b1912241..cc961a3bd3bd 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
@@ -216,26 +216,6 @@ out_unlock: | |||
216 | return err; | 216 | return err; |
217 | } | 217 | } |
218 | 218 | ||
219 | static int ovl_lock_rename_workdir(struct dentry *workdir, | ||
220 | struct dentry *upperdir) | ||
221 | { | ||
222 | /* Workdir should not be the same as upperdir */ | ||
223 | if (workdir == upperdir) | ||
224 | goto err; | ||
225 | |||
226 | /* Workdir should not be subdir of upperdir and vice versa */ | ||
227 | if (lock_rename(workdir, upperdir) != NULL) | ||
228 | goto err_unlock; | ||
229 | |||
230 | return 0; | ||
231 | |||
232 | err_unlock: | ||
233 | unlock_rename(workdir, upperdir); | ||
234 | err: | ||
235 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
236 | return -EIO; | ||
237 | } | ||
238 | |||
239 | static struct dentry *ovl_clear_empty(struct dentry *dentry, | 219 | static struct dentry *ovl_clear_empty(struct dentry *dentry, |
240 | struct list_head *list) | 220 | struct list_head *list) |
241 | { | 221 | { |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index c3addd1114f1..654bea1a5ac9 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
@@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry, | |||
506 | 506 | ||
507 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); | 507 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); |
508 | if (IS_ERR(index)) { | 508 | if (IS_ERR(index)) { |
509 | err = PTR_ERR(index); | ||
509 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" | 510 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" |
510 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", | 511 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", |
511 | d_inode(origin)->i_ino, name.len, name.name, | 512 | d_inode(origin)->i_ino, name.len, name.name, |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d4e8c1a08fb0..c706a6f99928 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry); | |||
235 | void ovl_inuse_unlock(struct dentry *dentry); | 235 | void ovl_inuse_unlock(struct dentry *dentry); |
236 | int ovl_nlink_start(struct dentry *dentry, bool *locked); | 236 | int ovl_nlink_start(struct dentry *dentry, bool *locked); |
237 | void ovl_nlink_end(struct dentry *dentry, bool locked); | 237 | void ovl_nlink_end(struct dentry *dentry, bool locked); |
238 | int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir); | ||
238 | 239 | ||
239 | static inline bool ovl_is_impuredir(struct dentry *dentry) | 240 | static inline bool ovl_is_impuredir(struct dentry *dentry) |
240 | { | 241 | { |
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 878a750986dd..25d9b5adcd42 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h | |||
@@ -37,6 +37,9 @@ struct ovl_fs { | |||
37 | bool noxattr; | 37 | bool noxattr; |
38 | /* sb common to all layers */ | 38 | /* sb common to all layers */ |
39 | struct super_block *same_sb; | 39 | struct super_block *same_sb; |
40 | /* Did we take the inuse lock? */ | ||
41 | bool upperdir_locked; | ||
42 | bool workdir_locked; | ||
40 | }; | 43 | }; |
41 | 44 | ||
42 | /* private information held for every overlayfs dentry */ | 45 | /* private information held for every overlayfs dentry */ |
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 62e9b22a2077..0f85ee9c3268 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
@@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
988 | struct path *lowerstack, unsigned int numlower) | 988 | struct path *lowerstack, unsigned int numlower) |
989 | { | 989 | { |
990 | int err; | 990 | int err; |
991 | struct dentry *index = NULL; | ||
991 | struct inode *dir = dentry->d_inode; | 992 | struct inode *dir = dentry->d_inode; |
992 | struct path path = { .mnt = mnt, .dentry = dentry }; | 993 | struct path path = { .mnt = mnt, .dentry = dentry }; |
993 | LIST_HEAD(list); | 994 | LIST_HEAD(list); |
@@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
1007 | 1008 | ||
1008 | inode_lock_nested(dir, I_MUTEX_PARENT); | 1009 | inode_lock_nested(dir, I_MUTEX_PARENT); |
1009 | list_for_each_entry(p, &list, l_node) { | 1010 | list_for_each_entry(p, &list, l_node) { |
1010 | struct dentry *index; | ||
1011 | |||
1012 | if (p->name[0] == '.') { | 1011 | if (p->name[0] == '.') { |
1013 | if (p->len == 1) | 1012 | if (p->len == 1) |
1014 | continue; | 1013 | continue; |
@@ -1018,6 +1017,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
1018 | index = lookup_one_len(p->name, dentry, p->len); | 1017 | index = lookup_one_len(p->name, dentry, p->len); |
1019 | if (IS_ERR(index)) { | 1018 | if (IS_ERR(index)) { |
1020 | err = PTR_ERR(index); | 1019 | err = PTR_ERR(index); |
1020 | index = NULL; | ||
1021 | break; | 1021 | break; |
1022 | } | 1022 | } |
1023 | err = ovl_verify_index(index, lowerstack, numlower); | 1023 | err = ovl_verify_index(index, lowerstack, numlower); |
@@ -1029,7 +1029,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
1029 | break; | 1029 | break; |
1030 | } | 1030 | } |
1031 | dput(index); | 1031 | dput(index); |
1032 | index = NULL; | ||
1032 | } | 1033 | } |
1034 | dput(index); | ||
1033 | inode_unlock(dir); | 1035 | inode_unlock(dir); |
1034 | out: | 1036 | out: |
1035 | ovl_cache_free(&list); | 1037 | ovl_cache_free(&list); |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index fd5ea4facc62..092d150643c1 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -211,9 +211,10 @@ static void ovl_put_super(struct super_block *sb) | |||
211 | 211 | ||
212 | dput(ufs->indexdir); | 212 | dput(ufs->indexdir); |
213 | dput(ufs->workdir); | 213 | dput(ufs->workdir); |
214 | ovl_inuse_unlock(ufs->workbasedir); | 214 | if (ufs->workdir_locked) |
215 | ovl_inuse_unlock(ufs->workbasedir); | ||
215 | dput(ufs->workbasedir); | 216 | dput(ufs->workbasedir); |
216 | if (ufs->upper_mnt) | 217 | if (ufs->upper_mnt && ufs->upperdir_locked) |
217 | ovl_inuse_unlock(ufs->upper_mnt->mnt_root); | 218 | ovl_inuse_unlock(ufs->upper_mnt->mnt_root); |
218 | mntput(ufs->upper_mnt); | 219 | mntput(ufs->upper_mnt); |
219 | for (i = 0; i < ufs->numlower; i++) | 220 | for (i = 0; i < ufs->numlower; i++) |
@@ -881,9 +882,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
881 | goto out_put_upperpath; | 882 | goto out_put_upperpath; |
882 | 883 | ||
883 | err = -EBUSY; | 884 | err = -EBUSY; |
884 | if (!ovl_inuse_trylock(upperpath.dentry)) { | 885 | if (ovl_inuse_trylock(upperpath.dentry)) { |
885 | pr_err("overlayfs: upperdir is in-use by another mount\n"); | 886 | ufs->upperdir_locked = true; |
887 | } else if (ufs->config.index) { | ||
888 | pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); | ||
886 | goto out_put_upperpath; | 889 | goto out_put_upperpath; |
890 | } else { | ||
891 | pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); | ||
887 | } | 892 | } |
888 | 893 | ||
889 | err = ovl_mount_dir(ufs->config.workdir, &workpath); | 894 | err = ovl_mount_dir(ufs->config.workdir, &workpath); |
@@ -901,9 +906,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
901 | } | 906 | } |
902 | 907 | ||
903 | err = -EBUSY; | 908 | err = -EBUSY; |
904 | if (!ovl_inuse_trylock(workpath.dentry)) { | 909 | if (ovl_inuse_trylock(workpath.dentry)) { |
905 | pr_err("overlayfs: workdir is in-use by another mount\n"); | 910 | ufs->workdir_locked = true; |
911 | } else if (ufs->config.index) { | ||
912 | pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); | ||
906 | goto out_put_workpath; | 913 | goto out_put_workpath; |
914 | } else { | ||
915 | pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); | ||
907 | } | 916 | } |
908 | 917 | ||
909 | ufs->workbasedir = workpath.dentry; | 918 | ufs->workbasedir = workpath.dentry; |
@@ -1156,11 +1165,13 @@ out_put_lowerpath: | |||
1156 | out_free_lowertmp: | 1165 | out_free_lowertmp: |
1157 | kfree(lowertmp); | 1166 | kfree(lowertmp); |
1158 | out_unlock_workdentry: | 1167 | out_unlock_workdentry: |
1159 | ovl_inuse_unlock(workpath.dentry); | 1168 | if (ufs->workdir_locked) |
1169 | ovl_inuse_unlock(workpath.dentry); | ||
1160 | out_put_workpath: | 1170 | out_put_workpath: |
1161 | path_put(&workpath); | 1171 | path_put(&workpath); |
1162 | out_unlock_upperdentry: | 1172 | out_unlock_upperdentry: |
1163 | ovl_inuse_unlock(upperpath.dentry); | 1173 | if (ufs->upperdir_locked) |
1174 | ovl_inuse_unlock(upperpath.dentry); | ||
1164 | out_put_upperpath: | 1175 | out_put_upperpath: |
1165 | path_put(&upperpath); | 1176 | path_put(&upperpath); |
1166 | out_free_config: | 1177 | out_free_config: |
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 117794582f9f..b9b239fa5cfd 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c | |||
@@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry) | |||
430 | } | 430 | } |
431 | } | 431 | } |
432 | 432 | ||
433 | /* Called must hold OVL_I(inode)->oi_lock */ | 433 | /* Caller must hold OVL_I(inode)->lock */ |
434 | static void ovl_cleanup_index(struct dentry *dentry) | 434 | static void ovl_cleanup_index(struct dentry *dentry) |
435 | { | 435 | { |
436 | struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode; | 436 | struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode; |
@@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry) | |||
469 | err = PTR_ERR(index); | 469 | err = PTR_ERR(index); |
470 | if (!IS_ERR(index)) | 470 | if (!IS_ERR(index)) |
471 | err = ovl_cleanup(dir, index); | 471 | err = ovl_cleanup(dir, index); |
472 | else | ||
473 | index = NULL; | ||
474 | |||
472 | inode_unlock(dir); | 475 | inode_unlock(dir); |
473 | if (err) | 476 | if (err) |
474 | goto fail; | 477 | goto fail; |
@@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked) | |||
557 | mutex_unlock(&OVL_I(d_inode(dentry))->lock); | 560 | mutex_unlock(&OVL_I(d_inode(dentry))->lock); |
558 | } | 561 | } |
559 | } | 562 | } |
563 | |||
564 | int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir) | ||
565 | { | ||
566 | /* Workdir should not be the same as upperdir */ | ||
567 | if (workdir == upperdir) | ||
568 | goto err; | ||
569 | |||
570 | /* Workdir should not be subdir of upperdir and vice versa */ | ||
571 | if (lock_rename(workdir, upperdir) != NULL) | ||
572 | goto err_unlock; | ||
573 | |||
574 | return 0; | ||
575 | |||
576 | err_unlock: | ||
577 | unlock_rename(workdir, upperdir); | ||
578 | err: | ||
579 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
580 | return -EIO; | ||
581 | } | ||
diff --git a/fs/proc/array.c b/fs/proc/array.c index 88c355574aa0..77a8eacbe032 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <linux/mman.h> | 62 | #include <linux/mman.h> |
63 | #include <linux/sched/mm.h> | 63 | #include <linux/sched/mm.h> |
64 | #include <linux/sched/numa_balancing.h> | 64 | #include <linux/sched/numa_balancing.h> |
65 | #include <linux/sched/task_stack.h> | ||
65 | #include <linux/sched/task.h> | 66 | #include <linux/sched/task.h> |
66 | #include <linux/sched/cputime.h> | 67 | #include <linux/sched/cputime.h> |
67 | #include <linux/proc_fs.h> | 68 | #include <linux/proc_fs.h> |
@@ -118,30 +119,25 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) | |||
118 | * simple bit tests. | 119 | * simple bit tests. |
119 | */ | 120 | */ |
120 | static const char * const task_state_array[] = { | 121 | static const char * const task_state_array[] = { |
121 | "R (running)", /* 0 */ | 122 | |
122 | "S (sleeping)", /* 1 */ | 123 | /* states in TASK_REPORT: */ |
123 | "D (disk sleep)", /* 2 */ | 124 | "R (running)", /* 0x00 */ |
124 | "T (stopped)", /* 4 */ | 125 | "S (sleeping)", /* 0x01 */ |
125 | "t (tracing stop)", /* 8 */ | 126 | "D (disk sleep)", /* 0x02 */ |
126 | "X (dead)", /* 16 */ | 127 | "T (stopped)", /* 0x04 */ |
127 | "Z (zombie)", /* 32 */ | 128 | "t (tracing stop)", /* 0x08 */ |
129 | "X (dead)", /* 0x10 */ | ||
130 | "Z (zombie)", /* 0x20 */ | ||
131 | "P (parked)", /* 0x40 */ | ||
132 | |||
133 | /* states beyond TASK_REPORT: */ | ||
134 | "I (idle)", /* 0x80 */ | ||
128 | }; | 135 | }; |
129 | 136 | ||
130 | static inline const char *get_task_state(struct task_struct *tsk) | 137 | static inline const char *get_task_state(struct task_struct *tsk) |
131 | { | 138 | { |
132 | unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT; | 139 | BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array)); |
133 | 140 | return task_state_array[__get_task_state(tsk)]; | |
134 | /* | ||
135 | * Parked tasks do not run; they sit in __kthread_parkme(). | ||
136 | * Without this check, we would report them as running, which is | ||
137 | * clearly wrong, so we report them as sleeping instead. | ||
138 | */ | ||
139 | if (tsk->state == TASK_PARKED) | ||
140 | state = TASK_INTERRUPTIBLE; | ||
141 | |||
142 | BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1); | ||
143 | |||
144 | return task_state_array[fls(state)]; | ||
145 | } | 141 | } |
146 | 142 | ||
147 | static inline int get_task_umask(struct task_struct *tsk) | 143 | static inline int get_task_umask(struct task_struct *tsk) |
@@ -421,7 +417,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
421 | * esp and eip are intentionally zeroed out. There is no | 417 | * esp and eip are intentionally zeroed out. There is no |
422 | * non-racy way to read them without freezing the task. | 418 | * non-racy way to read them without freezing the task. |
423 | * Programs that need reliable values can use ptrace(2). | 419 | * Programs that need reliable values can use ptrace(2). |
420 | * | ||
421 | * The only exception is if the task is core dumping because | ||
422 | * a program is not able to use ptrace(2) in that case. It is | ||
423 | * safe because the task has stopped executing permanently. | ||
424 | */ | 424 | */ |
425 | if (permitted && (task->flags & PF_DUMPCORE)) { | ||
426 | eip = KSTK_EIP(task); | ||
427 | esp = KSTK_ESP(task); | ||
428 | } | ||
425 | } | 429 | } |
426 | 430 | ||
427 | get_task_comm(tcomm, task); | 431 | get_task_comm(tcomm, task); |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8381db9db6d9..50b0556a124f 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -1980,7 +1980,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) | |||
1980 | ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0, | 1980 | ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0, |
1981 | &warn_to[cnt]); | 1981 | &warn_to[cnt]); |
1982 | if (ret) { | 1982 | if (ret) { |
1983 | spin_lock(&transfer_to[cnt]->dq_dqb_lock); | ||
1983 | dquot_decr_inodes(transfer_to[cnt], inode_usage); | 1984 | dquot_decr_inodes(transfer_to[cnt], inode_usage); |
1985 | spin_unlock(&transfer_to[cnt]->dq_dqb_lock); | ||
1984 | goto over_quota; | 1986 | goto over_quota; |
1985 | } | 1987 | } |
1986 | } | 1988 | } |
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index c0187cda2c1e..a73e5b34db41 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c | |||
@@ -328,12 +328,16 @@ static int v2_write_dquot(struct dquot *dquot) | |||
328 | if (!dquot->dq_off) { | 328 | if (!dquot->dq_off) { |
329 | alloc = true; | 329 | alloc = true; |
330 | down_write(&dqopt->dqio_sem); | 330 | down_write(&dqopt->dqio_sem); |
331 | } else { | ||
332 | down_read(&dqopt->dqio_sem); | ||
331 | } | 333 | } |
332 | ret = qtree_write_dquot( | 334 | ret = qtree_write_dquot( |
333 | sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, | 335 | sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, |
334 | dquot); | 336 | dquot); |
335 | if (alloc) | 337 | if (alloc) |
336 | up_write(&dqopt->dqio_sem); | 338 | up_write(&dqopt->dqio_sem); |
339 | else | ||
340 | up_read(&dqopt->dqio_sem); | ||
337 | return ret; | 341 | return ret; |
338 | } | 342 | } |
339 | 343 | ||
diff --git a/fs/read_write.c b/fs/read_write.c index a2b9a47235c5..f0d4b16873e8 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence, | |||
112 | * In the generic case the entire file is data, so as long as | 112 | * In the generic case the entire file is data, so as long as |
113 | * offset isn't at the end of the file then the offset is data. | 113 | * offset isn't at the end of the file then the offset is data. |
114 | */ | 114 | */ |
115 | if (offset >= eof) | 115 | if ((unsigned long long)offset >= eof) |
116 | return -ENXIO; | 116 | return -ENXIO; |
117 | break; | 117 | break; |
118 | case SEEK_HOLE: | 118 | case SEEK_HOLE: |
@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence, | |||
120 | * There is a virtual hole at the end of the file, so as long as | 120 | * There is a virtual hole at the end of the file, so as long as |
121 | * offset isn't i_size or larger, return i_size. | 121 | * offset isn't i_size or larger, return i_size. |
122 | */ | 122 | */ |
123 | if (offset >= eof) | 123 | if ((unsigned long long)offset >= eof) |
124 | return -ENXIO; | 124 | return -ENXIO; |
125 | offset = eof; | 125 | offset = eof; |
126 | break; | 126 | break; |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ef4b48d1ea42..1c713fd5b3e6 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -588,6 +588,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
588 | break; | 588 | break; |
589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (ACCESS_ONCE(ctx->released) || |
590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
591 | /* | ||
592 | * &ewq->wq may be queued in fork_event, but | ||
593 | * __remove_wait_queue ignores the head | ||
594 | * parameter. It would be a problem if it | ||
595 | * didn't. | ||
596 | */ | ||
591 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); | 597 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
592 | if (ewq->msg.event == UFFD_EVENT_FORK) { | 598 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
593 | struct userfaultfd_ctx *new; | 599 | struct userfaultfd_ctx *new; |
@@ -1061,6 +1067,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, | |||
1061 | (unsigned long) | 1067 | (unsigned long) |
1062 | uwq->msg.arg.reserved.reserved1; | 1068 | uwq->msg.arg.reserved.reserved1; |
1063 | list_move(&uwq->wq.entry, &fork_event); | 1069 | list_move(&uwq->wq.entry, &fork_event); |
1070 | /* | ||
1071 | * fork_nctx can be freed as soon as | ||
1072 | * we drop the lock, unless we take a | ||
1073 | * reference on it. | ||
1074 | */ | ||
1075 | userfaultfd_ctx_get(fork_nctx); | ||
1064 | spin_unlock(&ctx->event_wqh.lock); | 1076 | spin_unlock(&ctx->event_wqh.lock); |
1065 | ret = 0; | 1077 | ret = 0; |
1066 | break; | 1078 | break; |
@@ -1091,19 +1103,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, | |||
1091 | 1103 | ||
1092 | if (!ret && msg->event == UFFD_EVENT_FORK) { | 1104 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
1093 | ret = resolve_userfault_fork(ctx, fork_nctx, msg); | 1105 | ret = resolve_userfault_fork(ctx, fork_nctx, msg); |
1106 | spin_lock(&ctx->event_wqh.lock); | ||
1107 | if (!list_empty(&fork_event)) { | ||
1108 | /* | ||
1109 | * The fork thread didn't abort, so we can | ||
1110 | * drop the temporary refcount. | ||
1111 | */ | ||
1112 | userfaultfd_ctx_put(fork_nctx); | ||
1113 | |||
1114 | uwq = list_first_entry(&fork_event, | ||
1115 | typeof(*uwq), | ||
1116 | wq.entry); | ||
1117 | /* | ||
1118 | * If fork_event list wasn't empty and in turn | ||
1119 | * the event wasn't already released by fork | ||
1120 | * (the event is allocated on fork kernel | ||
1121 | * stack), put the event back to its place in | ||
1122 | * the event_wq. fork_event head will be freed | ||
1123 | * as soon as we return so the event cannot | ||
1124 | * stay queued there no matter the current | ||
1125 | * "ret" value. | ||
1126 | */ | ||
1127 | list_del(&uwq->wq.entry); | ||
1128 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | ||
1094 | 1129 | ||
1095 | if (!ret) { | 1130 | /* |
1096 | spin_lock(&ctx->event_wqh.lock); | 1131 | * Leave the event in the waitqueue and report |
1097 | if (!list_empty(&fork_event)) { | 1132 | * error to userland if we failed to resolve |
1098 | uwq = list_first_entry(&fork_event, | 1133 | * the userfault fork. |
1099 | typeof(*uwq), | 1134 | */ |
1100 | wq.entry); | 1135 | if (likely(!ret)) |
1101 | list_del(&uwq->wq.entry); | ||
1102 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | ||
1103 | userfaultfd_event_complete(ctx, uwq); | 1136 | userfaultfd_event_complete(ctx, uwq); |
1104 | } | 1137 | } else { |
1105 | spin_unlock(&ctx->event_wqh.lock); | 1138 | /* |
1139 | * Here the fork thread aborted and the | ||
1140 | * refcount from the fork thread on fork_nctx | ||
1141 | * has already been released. We still hold | ||
1142 | * the reference we took before releasing the | ||
1143 | * lock above. If resolve_userfault_fork | ||
1144 | * failed we've to drop it because the | ||
1145 | * fork_nctx has to be freed in such case. If | ||
1146 | * it succeeded we'll hold it because the new | ||
1147 | * uffd references it. | ||
1148 | */ | ||
1149 | if (ret) | ||
1150 | userfaultfd_ctx_put(fork_nctx); | ||
1106 | } | 1151 | } |
1152 | spin_unlock(&ctx->event_wqh.lock); | ||
1107 | } | 1153 | } |
1108 | 1154 | ||
1109 | return ret; | 1155 | return ret; |
diff --git a/fs/xattr.c b/fs/xattr.c index 4424f7fecf14..61cd28ba25f3 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -250,7 +250,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value, | |||
250 | } | 250 | } |
251 | memcpy(value, buffer, len); | 251 | memcpy(value, buffer, len); |
252 | out: | 252 | out: |
253 | security_release_secctx(buffer, len); | 253 | kfree(buffer); |
254 | out_noalloc: | 254 | out_noalloc: |
255 | return len; | 255 | return len; |
256 | } | 256 | } |
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index b008ff3250eb..df3e600835e8 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c | |||
@@ -156,7 +156,8 @@ __xfs_ag_resv_free( | |||
156 | trace_xfs_ag_resv_free(pag, type, 0); | 156 | trace_xfs_ag_resv_free(pag, type, 0); |
157 | 157 | ||
158 | resv = xfs_perag_resv(pag, type); | 158 | resv = xfs_perag_resv(pag, type); |
159 | pag->pag_mount->m_ag_max_usable += resv->ar_asked; | 159 | if (pag->pag_agno == 0) |
160 | pag->pag_mount->m_ag_max_usable += resv->ar_asked; | ||
160 | /* | 161 | /* |
161 | * AGFL blocks are always considered "free", so whatever | 162 | * AGFL blocks are always considered "free", so whatever |
162 | * was reserved at mount time must be given back at umount. | 163 | * was reserved at mount time must be given back at umount. |
@@ -216,7 +217,14 @@ __xfs_ag_resv_init( | |||
216 | return error; | 217 | return error; |
217 | } | 218 | } |
218 | 219 | ||
219 | mp->m_ag_max_usable -= ask; | 220 | /* |
221 | * Reduce the maximum per-AG allocation length by however much we're | ||
222 | * trying to reserve for an AG. Since this is a filesystem-wide | ||
223 | * counter, we only make the adjustment for AG 0. This assumes that | ||
224 | * there aren't any AGs hungrier for per-AG reservation than AG 0. | ||
225 | */ | ||
226 | if (pag->pag_agno == 0) | ||
227 | mp->m_ag_max_usable -= ask; | ||
220 | 228 | ||
221 | resv = xfs_perag_resv(pag, type); | 229 | resv = xfs_perag_resv(pag, type); |
222 | resv->ar_asked = ask; | 230 | resv->ar_asked = ask; |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 459f4b4f08fe..044a363119be 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include "xfs_rmap.h" | 49 | #include "xfs_rmap.h" |
50 | #include "xfs_ag_resv.h" | 50 | #include "xfs_ag_resv.h" |
51 | #include "xfs_refcount.h" | 51 | #include "xfs_refcount.h" |
52 | #include "xfs_rmap_btree.h" | ||
53 | #include "xfs_icache.h" | 52 | #include "xfs_icache.h" |
54 | 53 | ||
55 | 54 | ||
@@ -192,12 +191,8 @@ xfs_bmap_worst_indlen( | |||
192 | int maxrecs; /* maximum record count at this level */ | 191 | int maxrecs; /* maximum record count at this level */ |
193 | xfs_mount_t *mp; /* mount structure */ | 192 | xfs_mount_t *mp; /* mount structure */ |
194 | xfs_filblks_t rval; /* return value */ | 193 | xfs_filblks_t rval; /* return value */ |
195 | xfs_filblks_t orig_len; | ||
196 | 194 | ||
197 | mp = ip->i_mount; | 195 | mp = ip->i_mount; |
198 | |||
199 | /* Calculate the worst-case size of the bmbt. */ | ||
200 | orig_len = len; | ||
201 | maxrecs = mp->m_bmap_dmxr[0]; | 196 | maxrecs = mp->m_bmap_dmxr[0]; |
202 | for (level = 0, rval = 0; | 197 | for (level = 0, rval = 0; |
203 | level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); | 198 | level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); |
@@ -205,20 +200,12 @@ xfs_bmap_worst_indlen( | |||
205 | len += maxrecs - 1; | 200 | len += maxrecs - 1; |
206 | do_div(len, maxrecs); | 201 | do_div(len, maxrecs); |
207 | rval += len; | 202 | rval += len; |
208 | if (len == 1) { | 203 | if (len == 1) |
209 | rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - | 204 | return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - |
210 | level - 1; | 205 | level - 1; |
211 | break; | ||
212 | } | ||
213 | if (level == 0) | 206 | if (level == 0) |
214 | maxrecs = mp->m_bmap_dmxr[1]; | 207 | maxrecs = mp->m_bmap_dmxr[1]; |
215 | } | 208 | } |
216 | |||
217 | /* Calculate the worst-case size of the rmapbt. */ | ||
218 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) | ||
219 | rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) + | ||
220 | mp->m_rmap_maxlevels; | ||
221 | |||
222 | return rval; | 209 | return rval; |
223 | } | 210 | } |
224 | 211 | ||
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 29172609f2a3..f18e5932aec4 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -343,7 +343,8 @@ xfs_end_io( | |||
343 | error = xfs_reflink_end_cow(ip, offset, size); | 343 | error = xfs_reflink_end_cow(ip, offset, size); |
344 | break; | 344 | break; |
345 | case XFS_IO_UNWRITTEN: | 345 | case XFS_IO_UNWRITTEN: |
346 | error = xfs_iomap_write_unwritten(ip, offset, size); | 346 | /* writeback should never update isize */ |
347 | error = xfs_iomap_write_unwritten(ip, offset, size, false); | ||
347 | break; | 348 | break; |
348 | default: | 349 | default: |
349 | ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); | 350 | ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index cd9a5400ba4f..e9db7fc95b70 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -1459,7 +1459,19 @@ xfs_shift_file_space( | |||
1459 | return error; | 1459 | return error; |
1460 | 1460 | ||
1461 | /* | 1461 | /* |
1462 | * The extent shiting code works on extent granularity. So, if | 1462 | * Clean out anything hanging around in the cow fork now that |
1463 | * we've flushed all the dirty data out to disk to avoid having | ||
1464 | * CoW extents at the wrong offsets. | ||
1465 | */ | ||
1466 | if (xfs_is_reflink_inode(ip)) { | ||
1467 | error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF, | ||
1468 | true); | ||
1469 | if (error) | ||
1470 | return error; | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * The extent shifting code works on extent granularity. So, if | ||
1463 | * stop_fsb is not the starting block of extent, we need to split | 1475 | * stop_fsb is not the starting block of extent, we need to split |
1464 | * the extent at stop_fsb. | 1476 | * the extent at stop_fsb. |
1465 | */ | 1477 | */ |
@@ -2110,11 +2122,31 @@ xfs_swap_extents( | |||
2110 | ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; | 2122 | ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; |
2111 | tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; | 2123 | tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; |
2112 | tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; | 2124 | tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; |
2125 | } | ||
2126 | |||
2127 | /* Swap the cow forks. */ | ||
2128 | if (xfs_sb_version_hasreflink(&mp->m_sb)) { | ||
2129 | xfs_extnum_t extnum; | ||
2130 | |||
2131 | ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS); | ||
2132 | ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS); | ||
2133 | |||
2134 | extnum = ip->i_cnextents; | ||
2135 | ip->i_cnextents = tip->i_cnextents; | ||
2136 | tip->i_cnextents = extnum; | ||
2137 | |||
2113 | cowfp = ip->i_cowfp; | 2138 | cowfp = ip->i_cowfp; |
2114 | ip->i_cowfp = tip->i_cowfp; | 2139 | ip->i_cowfp = tip->i_cowfp; |
2115 | tip->i_cowfp = cowfp; | 2140 | tip->i_cowfp = cowfp; |
2116 | xfs_inode_set_cowblocks_tag(ip); | 2141 | |
2117 | xfs_inode_set_cowblocks_tag(tip); | 2142 | if (ip->i_cowfp && ip->i_cnextents) |
2143 | xfs_inode_set_cowblocks_tag(ip); | ||
2144 | else | ||
2145 | xfs_inode_clear_cowblocks_tag(ip); | ||
2146 | if (tip->i_cowfp && tip->i_cnextents) | ||
2147 | xfs_inode_set_cowblocks_tag(tip); | ||
2148 | else | ||
2149 | xfs_inode_clear_cowblocks_tag(tip); | ||
2118 | } | 2150 | } |
2119 | 2151 | ||
2120 | xfs_trans_log_inode(tp, ip, src_log_flags); | 2152 | xfs_trans_log_inode(tp, ip, src_log_flags); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index da14658da310..2f97c12ca75e 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1258,8 +1258,6 @@ xfs_buf_ioapply_map( | |||
1258 | int size; | 1258 | int size; |
1259 | int offset; | 1259 | int offset; |
1260 | 1260 | ||
1261 | total_nr_pages = bp->b_page_count; | ||
1262 | |||
1263 | /* skip the pages in the buffer before the start offset */ | 1261 | /* skip the pages in the buffer before the start offset */ |
1264 | page_index = 0; | 1262 | page_index = 0; |
1265 | offset = *buf_offset; | 1263 | offset = *buf_offset; |
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index bd786a9ac2c3..eaf86f55b7f2 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c | |||
@@ -347,7 +347,7 @@ xfs_verifier_error( | |||
347 | { | 347 | { |
348 | struct xfs_mount *mp = bp->b_target->bt_mount; | 348 | struct xfs_mount *mp = bp->b_target->bt_mount; |
349 | 349 | ||
350 | xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx", | 350 | xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx", |
351 | bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", | 351 | bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", |
352 | __return_address, bp->b_ops->name, bp->b_bn); | 352 | __return_address, bp->b_ops->name, bp->b_bn); |
353 | 353 | ||
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ebdd0bd2b261..309e26c9dddb 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -58,7 +58,7 @@ xfs_zero_range( | |||
58 | xfs_off_t count, | 58 | xfs_off_t count, |
59 | bool *did_zero) | 59 | bool *did_zero) |
60 | { | 60 | { |
61 | return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops); | 61 | return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops); |
62 | } | 62 | } |
63 | 63 | ||
64 | int | 64 | int |
@@ -377,8 +377,6 @@ restart: | |||
377 | */ | 377 | */ |
378 | spin_lock(&ip->i_flags_lock); | 378 | spin_lock(&ip->i_flags_lock); |
379 | if (iocb->ki_pos > i_size_read(inode)) { | 379 | if (iocb->ki_pos > i_size_read(inode)) { |
380 | bool zero = false; | ||
381 | |||
382 | spin_unlock(&ip->i_flags_lock); | 380 | spin_unlock(&ip->i_flags_lock); |
383 | if (!drained_dio) { | 381 | if (!drained_dio) { |
384 | if (*iolock == XFS_IOLOCK_SHARED) { | 382 | if (*iolock == XFS_IOLOCK_SHARED) { |
@@ -399,7 +397,7 @@ restart: | |||
399 | drained_dio = true; | 397 | drained_dio = true; |
400 | goto restart; | 398 | goto restart; |
401 | } | 399 | } |
402 | error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); | 400 | error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), NULL); |
403 | if (error) | 401 | if (error) |
404 | return error; | 402 | return error; |
405 | } else | 403 | } else |
@@ -436,7 +434,6 @@ xfs_dio_write_end_io( | |||
436 | struct inode *inode = file_inode(iocb->ki_filp); | 434 | struct inode *inode = file_inode(iocb->ki_filp); |
437 | struct xfs_inode *ip = XFS_I(inode); | 435 | struct xfs_inode *ip = XFS_I(inode); |
438 | loff_t offset = iocb->ki_pos; | 436 | loff_t offset = iocb->ki_pos; |
439 | bool update_size = false; | ||
440 | int error = 0; | 437 | int error = 0; |
441 | 438 | ||
442 | trace_xfs_end_io_direct_write(ip, offset, size); | 439 | trace_xfs_end_io_direct_write(ip, offset, size); |
@@ -447,6 +444,21 @@ xfs_dio_write_end_io( | |||
447 | if (size <= 0) | 444 | if (size <= 0) |
448 | return size; | 445 | return size; |
449 | 446 | ||
447 | if (flags & IOMAP_DIO_COW) { | ||
448 | error = xfs_reflink_end_cow(ip, offset, size); | ||
449 | if (error) | ||
450 | return error; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * Unwritten conversion updates the in-core isize after extent | ||
455 | * conversion but before updating the on-disk size. Updating isize any | ||
456 | * earlier allows a racing dio read to find unwritten extents before | ||
457 | * they are converted. | ||
458 | */ | ||
459 | if (flags & IOMAP_DIO_UNWRITTEN) | ||
460 | return xfs_iomap_write_unwritten(ip, offset, size, true); | ||
461 | |||
450 | /* | 462 | /* |
451 | * We need to update the in-core inode size here so that we don't end up | 463 | * We need to update the in-core inode size here so that we don't end up |
452 | * with the on-disk inode size being outside the in-core inode size. We | 464 | * with the on-disk inode size being outside the in-core inode size. We |
@@ -461,20 +473,11 @@ xfs_dio_write_end_io( | |||
461 | spin_lock(&ip->i_flags_lock); | 473 | spin_lock(&ip->i_flags_lock); |
462 | if (offset + size > i_size_read(inode)) { | 474 | if (offset + size > i_size_read(inode)) { |
463 | i_size_write(inode, offset + size); | 475 | i_size_write(inode, offset + size); |
464 | update_size = true; | 476 | spin_unlock(&ip->i_flags_lock); |
465 | } | ||
466 | spin_unlock(&ip->i_flags_lock); | ||
467 | |||
468 | if (flags & IOMAP_DIO_COW) { | ||
469 | error = xfs_reflink_end_cow(ip, offset, size); | ||
470 | if (error) | ||
471 | return error; | ||
472 | } | ||
473 | |||
474 | if (flags & IOMAP_DIO_UNWRITTEN) | ||
475 | error = xfs_iomap_write_unwritten(ip, offset, size); | ||
476 | else if (update_size) | ||
477 | error = xfs_setfilesize(ip, offset, size); | 477 | error = xfs_setfilesize(ip, offset, size); |
478 | } else { | ||
479 | spin_unlock(&ip->i_flags_lock); | ||
480 | } | ||
478 | 481 | ||
479 | return error; | 482 | return error; |
480 | } | 483 | } |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5599dda4727a..4ec5b7f45401 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -1624,10 +1624,12 @@ xfs_itruncate_extents( | |||
1624 | goto out; | 1624 | goto out; |
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * Clear the reflink flag if we truncated everything. | 1627 | * Clear the reflink flag if there are no data fork blocks and |
1628 | * there are no extents staged in the cow fork. | ||
1628 | */ | 1629 | */ |
1629 | if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) { | 1630 | if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) { |
1630 | ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; | 1631 | if (ip->i_d.di_nblocks == 0) |
1632 | ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; | ||
1631 | xfs_inode_clear_cowblocks_tag(ip); | 1633 | xfs_inode_clear_cowblocks_tag(ip); |
1632 | } | 1634 | } |
1633 | 1635 | ||
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 6d0f74ec31e8..a705f34b58fa 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -745,7 +745,7 @@ xfs_iflush_done( | |||
745 | */ | 745 | */ |
746 | iip = INODE_ITEM(blip); | 746 | iip = INODE_ITEM(blip); |
747 | if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) || | 747 | if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) || |
748 | lip->li_flags & XFS_LI_FAILED) | 748 | (blip->li_flags & XFS_LI_FAILED)) |
749 | need_ail++; | 749 | need_ail++; |
750 | 750 | ||
751 | blip = next; | 751 | blip = next; |
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 5049e8ab6e30..aa75389be8cf 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c | |||
@@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate( | |||
1088 | int *join_flags) | 1088 | int *join_flags) |
1089 | { | 1089 | { |
1090 | struct inode *inode = VFS_I(ip); | 1090 | struct inode *inode = VFS_I(ip); |
1091 | struct super_block *sb = inode->i_sb; | ||
1091 | int error; | 1092 | int error; |
1092 | 1093 | ||
1093 | *join_flags = 0; | 1094 | *join_flags = 0; |
@@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate( | |||
1100 | if (fa->fsx_xflags & FS_XFLAG_DAX) { | 1101 | if (fa->fsx_xflags & FS_XFLAG_DAX) { |
1101 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) | 1102 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) |
1102 | return -EINVAL; | 1103 | return -EINVAL; |
1103 | if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE) | 1104 | if (bdev_dax_supported(sb, sb->s_blocksize) < 0) |
1104 | return -EINVAL; | 1105 | return -EINVAL; |
1105 | } | 1106 | } |
1106 | 1107 | ||
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index a1909bc064e9..f179bdf1644d 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -829,7 +829,8 @@ int | |||
829 | xfs_iomap_write_unwritten( | 829 | xfs_iomap_write_unwritten( |
830 | xfs_inode_t *ip, | 830 | xfs_inode_t *ip, |
831 | xfs_off_t offset, | 831 | xfs_off_t offset, |
832 | xfs_off_t count) | 832 | xfs_off_t count, |
833 | bool update_isize) | ||
833 | { | 834 | { |
834 | xfs_mount_t *mp = ip->i_mount; | 835 | xfs_mount_t *mp = ip->i_mount; |
835 | xfs_fileoff_t offset_fsb; | 836 | xfs_fileoff_t offset_fsb; |
@@ -840,6 +841,7 @@ xfs_iomap_write_unwritten( | |||
840 | xfs_trans_t *tp; | 841 | xfs_trans_t *tp; |
841 | xfs_bmbt_irec_t imap; | 842 | xfs_bmbt_irec_t imap; |
842 | struct xfs_defer_ops dfops; | 843 | struct xfs_defer_ops dfops; |
844 | struct inode *inode = VFS_I(ip); | ||
843 | xfs_fsize_t i_size; | 845 | xfs_fsize_t i_size; |
844 | uint resblks; | 846 | uint resblks; |
845 | int error; | 847 | int error; |
@@ -899,7 +901,8 @@ xfs_iomap_write_unwritten( | |||
899 | i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); | 901 | i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); |
900 | if (i_size > offset + count) | 902 | if (i_size > offset + count) |
901 | i_size = offset + count; | 903 | i_size = offset + count; |
902 | 904 | if (update_isize && i_size > i_size_read(inode)) | |
905 | i_size_write(inode, i_size); | ||
903 | i_size = xfs_new_eof(ip, i_size); | 906 | i_size = xfs_new_eof(ip, i_size); |
904 | if (i_size) { | 907 | if (i_size) { |
905 | ip->i_d.di_size = i_size; | 908 | ip->i_d.di_size = i_size; |
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 00db3ecea084..ee535065c5d0 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, | |||
27 | struct xfs_bmbt_irec *, int); | 27 | struct xfs_bmbt_irec *, int); |
28 | int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t, | 28 | int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t, |
29 | struct xfs_bmbt_irec *); | 29 | struct xfs_bmbt_irec *); |
30 | int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t); | 30 | int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); |
31 | 31 | ||
32 | void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, | 32 | void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, |
33 | struct xfs_bmbt_irec *); | 33 | struct xfs_bmbt_irec *); |
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 2f2dc3c09ad0..4246876df7b7 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c | |||
@@ -274,7 +274,7 @@ xfs_fs_commit_blocks( | |||
274 | (end - 1) >> PAGE_SHIFT); | 274 | (end - 1) >> PAGE_SHIFT); |
275 | WARN_ON_ONCE(error); | 275 | WARN_ON_ONCE(error); |
276 | 276 | ||
277 | error = xfs_iomap_write_unwritten(ip, start, length); | 277 | error = xfs_iomap_write_unwritten(ip, start, length, false); |
278 | if (error) | 278 | if (error) |
279 | goto out_drop_iolock; | 279 | goto out_drop_iolock; |
280 | } | 280 | } |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 3246815c24d6..37e603bf1591 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
@@ -736,7 +736,13 @@ xfs_reflink_end_cow( | |||
736 | /* If there is a hole at end_fsb - 1 go to the previous extent */ | 736 | /* If there is a hole at end_fsb - 1 go to the previous extent */ |
737 | if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) || | 737 | if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) || |
738 | got.br_startoff > end_fsb) { | 738 | got.br_startoff > end_fsb) { |
739 | ASSERT(idx > 0); | 739 | /* |
740 | * In case of racing, overlapping AIO writes no COW extents | ||
741 | * might be left by the time I/O completes for the loser of | ||
742 | * the race. In that case we are done. | ||
743 | */ | ||
744 | if (idx <= 0) | ||
745 | goto out_cancel; | ||
740 | xfs_iext_get_extent(ifp, --idx, &got); | 746 | xfs_iext_get_extent(ifp, --idx, &got); |
741 | } | 747 | } |
742 | 748 | ||
@@ -809,6 +815,7 @@ next_extent: | |||
809 | 815 | ||
810 | out_defer: | 816 | out_defer: |
811 | xfs_defer_cancel(&dfops); | 817 | xfs_defer_cancel(&dfops); |
818 | out_cancel: | ||
812 | xfs_trans_cancel(tp); | 819 | xfs_trans_cancel(tp); |
813 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 820 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
814 | out: | 821 | out: |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index c996f4ae4a5f..584cf2d573ba 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -1654,6 +1654,16 @@ xfs_fs_fill_super( | |||
1654 | "DAX and reflink have not been tested together!"); | 1654 | "DAX and reflink have not been tested together!"); |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | if (mp->m_flags & XFS_MOUNT_DISCARD) { | ||
1658 | struct request_queue *q = bdev_get_queue(sb->s_bdev); | ||
1659 | |||
1660 | if (!blk_queue_discard(q)) { | ||
1661 | xfs_warn(mp, "mounting with \"discard\" option, but " | ||
1662 | "the device does not support discard"); | ||
1663 | mp->m_flags &= ~XFS_MOUNT_DISCARD; | ||
1664 | } | ||
1665 | } | ||
1666 | |||
1657 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { | 1667 | if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { |
1658 | if (mp->m_sb.sb_rblocks) { | 1668 | if (mp->m_sb.sb_rblocks) { |
1659 | xfs_alert(mp, | 1669 | xfs_alert(mp, |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index dedf9d789166..fa1505292f6c 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -399,17 +399,12 @@ extern const struct fwnode_operations acpi_device_fwnode_ops; | |||
399 | extern const struct fwnode_operations acpi_data_fwnode_ops; | 399 | extern const struct fwnode_operations acpi_data_fwnode_ops; |
400 | extern const struct fwnode_operations acpi_static_fwnode_ops; | 400 | extern const struct fwnode_operations acpi_static_fwnode_ops; |
401 | 401 | ||
402 | bool is_acpi_device_node(const struct fwnode_handle *fwnode); | ||
403 | bool is_acpi_data_node(const struct fwnode_handle *fwnode); | ||
404 | |||
402 | static inline bool is_acpi_node(const struct fwnode_handle *fwnode) | 405 | static inline bool is_acpi_node(const struct fwnode_handle *fwnode) |
403 | { | 406 | { |
404 | return !IS_ERR_OR_NULL(fwnode) && | 407 | return (is_acpi_device_node(fwnode) || is_acpi_data_node(fwnode)); |
405 | (fwnode->ops == &acpi_device_fwnode_ops | ||
406 | || fwnode->ops == &acpi_data_fwnode_ops); | ||
407 | } | ||
408 | |||
409 | static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) | ||
410 | { | ||
411 | return !IS_ERR_OR_NULL(fwnode) && | ||
412 | fwnode->ops == &acpi_device_fwnode_ops; | ||
413 | } | 408 | } |
414 | 409 | ||
415 | #define to_acpi_device_node(__fwnode) \ | 410 | #define to_acpi_device_node(__fwnode) \ |
@@ -422,11 +417,6 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) | |||
422 | NULL; \ | 417 | NULL; \ |
423 | }) | 418 | }) |
424 | 419 | ||
425 | static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) | ||
426 | { | ||
427 | return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops; | ||
428 | } | ||
429 | |||
430 | #define to_acpi_data_node(__fwnode) \ | 420 | #define to_acpi_data_node(__fwnode) \ |
431 | ({ \ | 421 | ({ \ |
432 | typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \ | 422 | typeof(__fwnode) __to_acpi_data_node_fwnode = __fwnode; \ |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 0504ef8f3aa3..976f8ac26665 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -115,15 +115,35 @@ do { \ | |||
115 | (__ret); \ | 115 | (__ret); \ |
116 | }) | 116 | }) |
117 | 117 | ||
118 | #define this_cpu_generic_read(pcp) \ | 118 | #define __this_cpu_generic_read_nopreempt(pcp) \ |
119 | ({ \ | 119 | ({ \ |
120 | typeof(pcp) __ret; \ | 120 | typeof(pcp) __ret; \ |
121 | preempt_disable_notrace(); \ | 121 | preempt_disable_notrace(); \ |
122 | __ret = raw_cpu_generic_read(pcp); \ | 122 | __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ |
123 | preempt_enable_notrace(); \ | 123 | preempt_enable_notrace(); \ |
124 | __ret; \ | 124 | __ret; \ |
125 | }) | 125 | }) |
126 | 126 | ||
127 | #define __this_cpu_generic_read_noirq(pcp) \ | ||
128 | ({ \ | ||
129 | typeof(pcp) __ret; \ | ||
130 | unsigned long __flags; \ | ||
131 | raw_local_irq_save(__flags); \ | ||
132 | __ret = raw_cpu_generic_read(pcp); \ | ||
133 | raw_local_irq_restore(__flags); \ | ||
134 | __ret; \ | ||
135 | }) | ||
136 | |||
137 | #define this_cpu_generic_read(pcp) \ | ||
138 | ({ \ | ||
139 | typeof(pcp) __ret; \ | ||
140 | if (__native_word(pcp)) \ | ||
141 | __ret = __this_cpu_generic_read_nopreempt(pcp); \ | ||
142 | else \ | ||
143 | __ret = __this_cpu_generic_read_noirq(pcp); \ | ||
144 | __ret; \ | ||
145 | }) | ||
146 | |||
127 | #define this_cpu_generic_to_op(pcp, val, op) \ | 147 | #define this_cpu_generic_to_op(pcp, val, op) \ |
128 | do { \ | 148 | do { \ |
129 | unsigned long __flags; \ | 149 | unsigned long __flags; \ |
diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h new file mode 100644 index 000000000000..e1a643e4bc91 --- /dev/null +++ b/include/dt-bindings/reset/snps,hsdk-reset.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /** | ||
2 | * This header provides index for the HSDK reset controller. | ||
3 | */ | ||
4 | #ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK | ||
5 | #define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK | ||
6 | |||
7 | #define HSDK_APB_RESET 0 | ||
8 | #define HSDK_AXI_RESET 1 | ||
9 | #define HSDK_ETH_RESET 2 | ||
10 | #define HSDK_USB_RESET 3 | ||
11 | #define HSDK_SDIO_RESET 4 | ||
12 | #define HSDK_HDMI_RESET 5 | ||
13 | #define HSDK_GFX_RESET 6 | ||
14 | #define HSDK_DMAC_RESET 7 | ||
15 | #define HSDK_EBI_RESET 8 | ||
16 | |||
17 | #endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/ | ||
diff --git a/include/dt-bindings/reset/snps,hsdk-v1-reset.h b/include/dt-bindings/reset/snps,hsdk-v1-reset.h deleted file mode 100644 index d898c89b7123..000000000000 --- a/include/dt-bindings/reset/snps,hsdk-v1-reset.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /** | ||
2 | * This header provides index for the HSDK v1 reset controller. | ||
3 | */ | ||
4 | #ifndef _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1 | ||
5 | #define _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1 | ||
6 | |||
7 | #define HSDK_V1_APB_RESET 0 | ||
8 | #define HSDK_V1_AXI_RESET 1 | ||
9 | #define HSDK_V1_ETH_RESET 2 | ||
10 | #define HSDK_V1_USB_RESET 3 | ||
11 | #define HSDK_V1_SDIO_RESET 4 | ||
12 | #define HSDK_V1_HDMI_RESET 5 | ||
13 | #define HSDK_V1_GFX_RESET 6 | ||
14 | #define HSDK_V1_DMAC_RESET 7 | ||
15 | #define HSDK_V1_EBI_RESET 8 | ||
16 | |||
17 | #endif /*_DT_BINDINGS_RESET_CONTROLLER_HSDK_V1*/ | ||
diff --git a/include/linux/audit.h b/include/linux/audit.h index 74d4d4e8e3db..cb708eb8accc 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
@@ -314,11 +314,7 @@ void audit_core_dumps(long signr); | |||
314 | 314 | ||
315 | static inline void audit_seccomp(unsigned long syscall, long signr, int code) | 315 | static inline void audit_seccomp(unsigned long syscall, long signr, int code) |
316 | { | 316 | { |
317 | if (!audit_enabled) | 317 | if (audit_enabled && unlikely(!audit_dummy_context())) |
318 | return; | ||
319 | |||
320 | /* Force a record to be reported if a signal was delivered. */ | ||
321 | if (signr || unlikely(!audit_dummy_context())) | ||
322 | __audit_seccomp(syscall, signr, code); | 318 | __audit_seccomp(syscall, signr, code); |
323 | } | 319 | } |
324 | 320 | ||
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index fb44d6180ca0..18d05b5491f3 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, | |||
131 | int executable_stack); | 131 | int executable_stack); |
132 | extern int transfer_args_to_stack(struct linux_binprm *bprm, | 132 | extern int transfer_args_to_stack(struct linux_binprm *bprm, |
133 | unsigned long *sp_location); | 133 | unsigned long *sp_location); |
134 | extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); | 134 | extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); |
135 | extern int copy_strings_kernel(int argc, const char *const *argv, | 135 | extern int copy_strings_kernel(int argc, const char *const *argv, |
136 | struct linux_binprm *bprm); | 136 | struct linux_binprm *bprm); |
137 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | 137 | extern int prepare_bprm_creds(struct linux_binprm *bprm); |
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 8b9d6fff002d..f2deb71958b2 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h | |||
@@ -92,7 +92,7 @@ | |||
92 | /** | 92 | /** |
93 | * FIELD_GET() - extract a bitfield element | 93 | * FIELD_GET() - extract a bitfield element |
94 | * @_mask: shifted mask defining the field's length and position | 94 | * @_mask: shifted mask defining the field's length and position |
95 | * @_reg: 32bit value of entire bitfield | 95 | * @_reg: value of entire bitfield |
96 | * | 96 | * |
97 | * FIELD_GET() extracts the field specified by @_mask from the | 97 | * FIELD_GET() extracts the field specified by @_mask from the |
98 | * bitfield passed in as @_reg by masking and shifting it down. | 98 | * bitfield passed in as @_reg by masking and shifting it down. |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 460294bb0fa5..02fa42d24b52 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -551,6 +551,7 @@ struct request_queue { | |||
551 | int node; | 551 | int node; |
552 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 552 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
553 | struct blk_trace *blk_trace; | 553 | struct blk_trace *blk_trace; |
554 | struct mutex blk_trace_mutex; | ||
554 | #endif | 555 | #endif |
555 | /* | 556 | /* |
556 | * for flush operations | 557 | * for flush operations |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f24bfb2b9a2d..6d508767e144 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -3,8 +3,27 @@ | |||
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | /* | ||
7 | * CPU-up CPU-down | ||
8 | * | ||
9 | * BP AP BP AP | ||
10 | * | ||
11 | * OFFLINE OFFLINE | ||
12 | * | ^ | ||
13 | * v | | ||
14 | * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead) | ||
15 | * | AP_OFFLINE | ||
16 | * v (IRQ-off) ,---------------^ | ||
17 | * AP_ONLNE | (stop_machine) | ||
18 | * | TEARDOWN_CPU <- AP_ONLINE_IDLE | ||
19 | * | ^ | ||
20 | * v | | ||
21 | * AP_ACTIVE AP_ACTIVE | ||
22 | */ | ||
23 | |||
6 | enum cpuhp_state { | 24 | enum cpuhp_state { |
7 | CPUHP_OFFLINE, | 25 | CPUHP_INVALID = -1, |
26 | CPUHP_OFFLINE = 0, | ||
8 | CPUHP_CREATE_THREADS, | 27 | CPUHP_CREATE_THREADS, |
9 | CPUHP_PERF_PREPARE, | 28 | CPUHP_PERF_PREPARE, |
10 | CPUHP_PERF_X86_PREPARE, | 29 | CPUHP_PERF_X86_PREPARE, |
diff --git a/include/linux/device.h b/include/linux/device.h index c6f27207dbe8..66fe271c2544 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -307,8 +307,6 @@ struct driver_attribute { | |||
307 | size_t count); | 307 | size_t count); |
308 | }; | 308 | }; |
309 | 309 | ||
310 | #define DRIVER_ATTR(_name, _mode, _show, _store) \ | ||
311 | struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) | ||
312 | #define DRIVER_ATTR_RW(_name) \ | 310 | #define DRIVER_ATTR_RW(_name) \ |
313 | struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) | 311 | struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) |
314 | #define DRIVER_ATTR_RO(_name) \ | 312 | #define DRIVER_ATTR_RO(_name) \ |
@@ -838,7 +836,7 @@ struct dev_links_info { | |||
838 | * @driver_data: Private pointer for driver specific info. | 836 | * @driver_data: Private pointer for driver specific info. |
839 | * @links: Links to suppliers and consumers of this device. | 837 | * @links: Links to suppliers and consumers of this device. |
840 | * @power: For device power management. | 838 | * @power: For device power management. |
841 | * See Documentation/power/admin-guide/devices.rst for details. | 839 | * See Documentation/driver-api/pm/devices.rst for details. |
842 | * @pm_domain: Provide callbacks that are executed during system suspend, | 840 | * @pm_domain: Provide callbacks that are executed during system suspend, |
843 | * hibernation, system resume and during runtime PM transitions | 841 | * hibernation, system resume and during runtime PM transitions |
844 | * along with subsystem-level and driver-level callbacks. | 842 | * along with subsystem-level and driver-level callbacks. |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 339e73742e73..13dab191a23e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -403,7 +403,7 @@ struct address_space { | |||
403 | unsigned long flags; /* error bits */ | 403 | unsigned long flags; /* error bits */ |
404 | spinlock_t private_lock; /* for use by the address_space */ | 404 | spinlock_t private_lock; /* for use by the address_space */ |
405 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ | 405 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ |
406 | struct list_head private_list; /* ditto */ | 406 | struct list_head private_list; /* for use by the address_space */ |
407 | void *private_data; /* ditto */ | 407 | void *private_data; /* ditto */ |
408 | errseq_t wb_err; | 408 | errseq_t wb_err; |
409 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; | 409 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; |
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 5ba430cc9a87..1fc7abd28b0b 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h | |||
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, | |||
111 | int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, | 111 | int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, |
112 | unsigned int size, unsigned int *val); | 112 | unsigned int size, unsigned int *val); |
113 | 113 | ||
114 | int ad_sd_reset(struct ad_sigma_delta *sigma_delta, | ||
115 | unsigned int reset_length); | ||
116 | |||
114 | int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, | 117 | int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, |
115 | const struct iio_chan_spec *chan, int *val); | 118 | const struct iio_chan_spec *chan, int *val); |
116 | int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, | 119 | int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, |
diff --git a/include/linux/input.h b/include/linux/input.h index a65e3b24fb18..fb5e23c7ed98 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -529,6 +529,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, | |||
529 | 529 | ||
530 | int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); | 530 | int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); |
531 | int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); | 531 | int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); |
532 | int input_ff_flush(struct input_dev *dev, struct file *file); | ||
532 | 533 | ||
533 | int input_ff_create_memless(struct input_dev *dev, void *data, | 534 | int input_ff_create_memless(struct input_dev *dev, void *data, |
534 | int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); | 535 | int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a7f2ac689d29..41b8c5757859 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -167,11 +167,11 @@ struct iommu_resv_region { | |||
167 | * @map: map a physically contiguous memory region to an iommu domain | 167 | * @map: map a physically contiguous memory region to an iommu domain |
168 | * @unmap: unmap a physically contiguous memory region from an iommu domain | 168 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
169 | * @map_sg: map a scatter-gather list of physically contiguous memory chunks | 169 | * @map_sg: map a scatter-gather list of physically contiguous memory chunks |
170 | * to an iommu domain | ||
170 | * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain | 171 | * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain |
171 | * @tlb_range_add: Add a given iova range to the flush queue for this domain | 172 | * @tlb_range_add: Add a given iova range to the flush queue for this domain |
172 | * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush | 173 | * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush |
173 | * queue | 174 | * queue |
174 | * to an iommu domain | ||
175 | * @iova_to_phys: translate iova to physical address | 175 | * @iova_to_phys: translate iova to physical address |
176 | * @add_device: add device to iommu grouping | 176 | * @add_device: add device to iommu grouping |
177 | * @remove_device: remove device from iommu grouping | 177 | * @remove_device: remove device from iommu grouping |
diff --git a/include/linux/irq.h b/include/linux/irq.h index b99a784635ff..d4728bf6a537 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -783,10 +783,7 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) | |||
783 | static inline | 783 | static inline |
784 | struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) | 784 | struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) |
785 | { | 785 | { |
786 | if (!cpumask_empty(d->common->effective_affinity)) | 786 | return d->common->effective_affinity; |
787 | return d->common->effective_affinity; | ||
788 | |||
789 | return d->common->affinity; | ||
790 | } | 787 | } |
791 | static inline void irq_data_update_effective_affinity(struct irq_data *d, | 788 | static inline void irq_data_update_effective_affinity(struct irq_data *d, |
792 | const struct cpumask *m) | 789 | const struct cpumask *m) |
diff --git a/include/linux/key.h b/include/linux/key.h index 044114185120..e315e16b6ff8 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -187,6 +187,7 @@ struct key { | |||
187 | #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ | 187 | #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ |
188 | #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ | 188 | #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ |
189 | #define KEY_FLAG_KEEP 10 /* set if key should not be removed */ | 189 | #define KEY_FLAG_KEEP 10 /* set if key should not be removed */ |
190 | #define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */ | ||
190 | 191 | ||
191 | /* the key type and key description string | 192 | /* the key type and key description string |
192 | * - the desc is used to match a key against search criteria | 193 | * - the desc is used to match a key against search criteria |
@@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type, | |||
243 | #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ | 244 | #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ |
244 | #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ | 245 | #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ |
245 | #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ | 246 | #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ |
247 | #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ | ||
246 | 248 | ||
247 | extern void key_revoke(struct key *key); | 249 | extern void key_revoke(struct key *key); |
248 | extern void key_invalidate(struct key *key); | 250 | extern void key_invalidate(struct key *key); |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index eaf4ad209c8f..e32dbc4934db 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -980,7 +980,6 @@ enum mlx5_cap_type { | |||
980 | MLX5_CAP_RESERVED, | 980 | MLX5_CAP_RESERVED, |
981 | MLX5_CAP_VECTOR_CALC, | 981 | MLX5_CAP_VECTOR_CALC, |
982 | MLX5_CAP_QOS, | 982 | MLX5_CAP_QOS, |
983 | MLX5_CAP_FPGA, | ||
984 | /* NUM OF CAP Types */ | 983 | /* NUM OF CAP Types */ |
985 | MLX5_CAP_NUM | 984 | MLX5_CAP_NUM |
986 | }; | 985 | }; |
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups { | |||
1110 | MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) | 1109 | MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) |
1111 | 1110 | ||
1112 | #define MLX5_CAP_FPGA(mdev, cap) \ | 1111 | #define MLX5_CAP_FPGA(mdev, cap) \ |
1113 | MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) | 1112 | MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) |
1114 | 1113 | ||
1115 | #define MLX5_CAP64_FPGA(mdev, cap) \ | 1114 | #define MLX5_CAP64_FPGA(mdev, cap) \ |
1116 | MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) | 1115 | MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) |
1117 | 1116 | ||
1118 | enum { | 1117 | enum { |
1119 | MLX5_CMD_STAT_OK = 0x0, | 1118 | MLX5_CMD_STAT_OK = 0x0, |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 02ff700e4f30..401c8972cc3a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -774,6 +774,7 @@ struct mlx5_core_dev { | |||
774 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; | 774 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
775 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; | 775 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; |
776 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; | 776 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; |
777 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; | ||
777 | } caps; | 778 | } caps; |
778 | phys_addr_t iseg_base; | 779 | phys_addr_t iseg_base; |
779 | struct mlx5_init_seg __iomem *iseg; | 780 | struct mlx5_init_seg __iomem *iseg; |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a528b35a022e..69772347f866 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { | |||
327 | u8 reserved_at_80[0x18]; | 327 | u8 reserved_at_80[0x18]; |
328 | u8 log_max_destination[0x8]; | 328 | u8 log_max_destination[0x8]; |
329 | 329 | ||
330 | u8 reserved_at_a0[0x18]; | 330 | u8 log_max_flow_counter[0x8]; |
331 | u8 reserved_at_a8[0x10]; | ||
331 | u8 log_max_flow[0x8]; | 332 | u8 log_max_flow[0x8]; |
332 | 333 | ||
333 | u8 reserved_at_c0[0x40]; | 334 | u8 reserved_at_c0[0x40]; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f8c10d336e42..065d99deb847 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp); | |||
240 | 240 | ||
241 | #if defined(CONFIG_X86_INTEL_MPX) | 241 | #if defined(CONFIG_X86_INTEL_MPX) |
242 | /* MPX specific bounds table or bounds directory */ | 242 | /* MPX specific bounds table or bounds directory */ |
243 | # define VM_MPX VM_HIGH_ARCH_BIT_4 | 243 | # define VM_MPX VM_HIGH_ARCH_4 |
244 | #else | 244 | #else |
245 | # define VM_MPX VM_NONE | 245 | # define VM_MPX VM_NONE |
246 | #endif | 246 | #endif |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f3f2d07feb2a..9a43763a68ad 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -316,7 +316,7 @@ struct mmc_host { | |||
316 | #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ | 316 | #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ |
317 | #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ | 317 | #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ |
318 | #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ | 318 | #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ |
319 | #define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ | 319 | /* (1 << 21) is free for reuse */ |
320 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ | 320 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ |
321 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ | 321 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ |
322 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ | 322 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 7b2e31b1745a..6866e8126982 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void); | |||
400 | 400 | ||
401 | #else /* CONFIG_MMU_NOTIFIER */ | 401 | #else /* CONFIG_MMU_NOTIFIER */ |
402 | 402 | ||
403 | static inline int mm_has_notifiers(struct mm_struct *mm) | ||
404 | { | ||
405 | return 0; | ||
406 | } | ||
407 | |||
403 | static inline void mmu_notifier_release(struct mm_struct *mm) | 408 | static inline void mmu_notifier_release(struct mm_struct *mm) |
404 | { | 409 | { |
405 | } | 410 | } |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 356a814e7c8e..c8f89417740b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
1094 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 1094 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
1095 | #endif | 1095 | #endif |
1096 | 1096 | ||
1097 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 1097 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) |
1098 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 1098 | { |
1099 | return pfn >> PFN_SECTION_SHIFT; | ||
1100 | } | ||
1101 | static inline unsigned long section_nr_to_pfn(unsigned long sec) | ||
1102 | { | ||
1103 | return sec << PFN_SECTION_SHIFT; | ||
1104 | } | ||
1099 | 1105 | ||
1100 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | 1106 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
1101 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | 1107 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a36abe2da13e..27e249ed7c5c 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -12,11 +12,31 @@ | |||
12 | 12 | ||
13 | #ifdef CONFIG_LOCKUP_DETECTOR | 13 | #ifdef CONFIG_LOCKUP_DETECTOR |
14 | void lockup_detector_init(void); | 14 | void lockup_detector_init(void); |
15 | void lockup_detector_soft_poweroff(void); | ||
16 | void lockup_detector_cleanup(void); | ||
17 | bool is_hardlockup(void); | ||
18 | |||
19 | extern int watchdog_user_enabled; | ||
20 | extern int nmi_watchdog_user_enabled; | ||
21 | extern int soft_watchdog_user_enabled; | ||
22 | extern int watchdog_thresh; | ||
23 | extern unsigned long watchdog_enabled; | ||
24 | |||
25 | extern struct cpumask watchdog_cpumask; | ||
26 | extern unsigned long *watchdog_cpumask_bits; | ||
27 | #ifdef CONFIG_SMP | ||
28 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
29 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
15 | #else | 30 | #else |
16 | static inline void lockup_detector_init(void) | 31 | #define sysctl_softlockup_all_cpu_backtrace 0 |
17 | { | 32 | #define sysctl_hardlockup_all_cpu_backtrace 0 |
18 | } | 33 | #endif /* !CONFIG_SMP */ |
19 | #endif | 34 | |
35 | #else /* CONFIG_LOCKUP_DETECTOR */ | ||
36 | static inline void lockup_detector_init(void) { } | ||
37 | static inline void lockup_detector_soft_poweroff(void) { } | ||
38 | static inline void lockup_detector_cleanup(void) { } | ||
39 | #endif /* !CONFIG_LOCKUP_DETECTOR */ | ||
20 | 40 | ||
21 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 41 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
22 | extern void touch_softlockup_watchdog_sched(void); | 42 | extern void touch_softlockup_watchdog_sched(void); |
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void); | |||
24 | extern void touch_softlockup_watchdog_sync(void); | 44 | extern void touch_softlockup_watchdog_sync(void); |
25 | extern void touch_all_softlockup_watchdogs(void); | 45 | extern void touch_all_softlockup_watchdogs(void); |
26 | extern unsigned int softlockup_panic; | 46 | extern unsigned int softlockup_panic; |
27 | extern int soft_watchdog_enabled; | ||
28 | extern atomic_t watchdog_park_in_progress; | ||
29 | #else | 47 | #else |
30 | static inline void touch_softlockup_watchdog_sched(void) | 48 | static inline void touch_softlockup_watchdog_sched(void) { } |
31 | { | 49 | static inline void touch_softlockup_watchdog(void) { } |
32 | } | 50 | static inline void touch_softlockup_watchdog_sync(void) { } |
33 | static inline void touch_softlockup_watchdog(void) | 51 | static inline void touch_all_softlockup_watchdogs(void) { } |
34 | { | ||
35 | } | ||
36 | static inline void touch_softlockup_watchdog_sync(void) | ||
37 | { | ||
38 | } | ||
39 | static inline void touch_all_softlockup_watchdogs(void) | ||
40 | { | ||
41 | } | ||
42 | #endif | 52 | #endif |
43 | 53 | ||
44 | #ifdef CONFIG_DETECT_HUNG_TASK | 54 | #ifdef CONFIG_DETECT_HUNG_TASK |
45 | void reset_hung_task_detector(void); | 55 | void reset_hung_task_detector(void); |
46 | #else | 56 | #else |
47 | static inline void reset_hung_task_detector(void) | 57 | static inline void reset_hung_task_detector(void) { } |
48 | { | ||
49 | } | ||
50 | #endif | 58 | #endif |
51 | 59 | ||
52 | /* | 60 | /* |
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void) | |||
54 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | 62 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - |
55 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | 63 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. |
56 | * | 64 | * |
57 | * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | 65 | * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and |
58 | * are variables that are only used as an 'interface' between the parameters | 66 | * 'soft_watchdog_user_enabled' are variables that are only used as an |
59 | * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | 67 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
60 | * 'watchdog_thresh' variable is handled differently because its value is not | 68 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is |
61 | * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | 69 | * handled differently because its value is not boolean, and the lockup |
62 | * is equal zero. | 70 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. |
63 | */ | 71 | */ |
64 | #define NMI_WATCHDOG_ENABLED_BIT 0 | 72 | #define NMI_WATCHDOG_ENABLED_BIT 0 |
65 | #define SOFT_WATCHDOG_ENABLED_BIT 1 | 73 | #define SOFT_WATCHDOG_ENABLED_BIT 1 |
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic; | |||
73 | static inline void hardlockup_detector_disable(void) {} | 81 | static inline void hardlockup_detector_disable(void) {} |
74 | #endif | 82 | #endif |
75 | 83 | ||
84 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) | ||
85 | # define NMI_WATCHDOG_SYSCTL_PERM 0644 | ||
86 | #else | ||
87 | # define NMI_WATCHDOG_SYSCTL_PERM 0444 | ||
88 | #endif | ||
89 | |||
76 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) | 90 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
77 | extern void arch_touch_nmi_watchdog(void); | 91 | extern void arch_touch_nmi_watchdog(void); |
92 | extern void hardlockup_detector_perf_stop(void); | ||
93 | extern void hardlockup_detector_perf_restart(void); | ||
94 | extern void hardlockup_detector_perf_disable(void); | ||
95 | extern void hardlockup_detector_perf_enable(void); | ||
96 | extern void hardlockup_detector_perf_cleanup(void); | ||
97 | extern int hardlockup_detector_perf_init(void); | ||
78 | #else | 98 | #else |
79 | #if !defined(CONFIG_HAVE_NMI_WATCHDOG) | 99 | static inline void hardlockup_detector_perf_stop(void) { } |
100 | static inline void hardlockup_detector_perf_restart(void) { } | ||
101 | static inline void hardlockup_detector_perf_disable(void) { } | ||
102 | static inline void hardlockup_detector_perf_enable(void) { } | ||
103 | static inline void hardlockup_detector_perf_cleanup(void) { } | ||
104 | # if !defined(CONFIG_HAVE_NMI_WATCHDOG) | ||
105 | static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } | ||
80 | static inline void arch_touch_nmi_watchdog(void) {} | 106 | static inline void arch_touch_nmi_watchdog(void) {} |
107 | # else | ||
108 | static inline int hardlockup_detector_perf_init(void) { return 0; } | ||
109 | # endif | ||
81 | #endif | 110 | #endif |
82 | #endif | 111 | |
112 | void watchdog_nmi_stop(void); | ||
113 | void watchdog_nmi_start(void); | ||
114 | int watchdog_nmi_probe(void); | ||
83 | 115 | ||
84 | /** | 116 | /** |
85 | * touch_nmi_watchdog - restart NMI watchdog timeout. | 117 | * touch_nmi_watchdog - restart NMI watchdog timeout. |
86 | * | 118 | * |
87 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() | 119 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() |
88 | * may be used to reset the timeout - for code which intentionally | 120 | * may be used to reset the timeout - for code which intentionally |
89 | * disables interrupts for a long time. This call is stateless. | 121 | * disables interrupts for a long time. This call is stateless. |
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu) | |||
153 | u64 hw_nmi_get_sample_period(int watchdog_thresh); | 185 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
154 | #endif | 186 | #endif |
155 | 187 | ||
156 | #ifdef CONFIG_LOCKUP_DETECTOR | ||
157 | extern int nmi_watchdog_enabled; | ||
158 | extern int watchdog_user_enabled; | ||
159 | extern int watchdog_thresh; | ||
160 | extern unsigned long watchdog_enabled; | ||
161 | extern struct cpumask watchdog_cpumask; | ||
162 | extern unsigned long *watchdog_cpumask_bits; | ||
163 | extern int __read_mostly watchdog_suspended; | ||
164 | #ifdef CONFIG_SMP | ||
165 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
166 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
167 | #else | ||
168 | #define sysctl_softlockup_all_cpu_backtrace 0 | ||
169 | #define sysctl_hardlockup_all_cpu_backtrace 0 | ||
170 | #endif | ||
171 | |||
172 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ | 188 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
173 | defined(CONFIG_HARDLOCKUP_DETECTOR) | 189 | defined(CONFIG_HARDLOCKUP_DETECTOR) |
174 | void watchdog_update_hrtimer_threshold(u64 period); | 190 | void watchdog_update_hrtimer_threshold(u64 period); |
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period); | |||
176 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | 192 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } |
177 | #endif | 193 | #endif |
178 | 194 | ||
179 | extern bool is_hardlockup(void); | ||
180 | struct ctl_table; | 195 | struct ctl_table; |
181 | extern int proc_watchdog(struct ctl_table *, int , | 196 | extern int proc_watchdog(struct ctl_table *, int , |
182 | void __user *, size_t *, loff_t *); | 197 | void __user *, size_t *, loff_t *); |
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int , | |||
188 | void __user *, size_t *, loff_t *); | 203 | void __user *, size_t *, loff_t *); |
189 | extern int proc_watchdog_cpumask(struct ctl_table *, int, | 204 | extern int proc_watchdog_cpumask(struct ctl_table *, int, |
190 | void __user *, size_t *, loff_t *); | 205 | void __user *, size_t *, loff_t *); |
191 | extern int lockup_detector_suspend(void); | ||
192 | extern void lockup_detector_resume(void); | ||
193 | #else | ||
194 | static inline int lockup_detector_suspend(void) | ||
195 | { | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static inline void lockup_detector_resume(void) | ||
200 | { | ||
201 | } | ||
202 | #endif | ||
203 | 206 | ||
204 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI | 207 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
205 | #include <asm/nmi.h> | 208 | #include <asm/nmi.h> |
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 9c5cb4480806..a726f96010d5 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h | |||
@@ -346,11 +346,6 @@ struct nvme_fc_remote_port { | |||
346 | * indicating an FC transport Aborted status. | 346 | * indicating an FC transport Aborted status. |
347 | * Entrypoint is Mandatory. | 347 | * Entrypoint is Mandatory. |
348 | * | 348 | * |
349 | * @defer_rcv: Called by the transport to signal the LLLD that it has | ||
350 | * begun processing of a previously received NVME CMD IU. The LLDD | ||
351 | * is now free to re-use the rcv buffer associated with the | ||
352 | * nvmefc_tgt_fcp_req. | ||
353 | * | ||
354 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD | 349 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD |
355 | * supports for cpu affinitization. | 350 | * supports for cpu affinitization. |
356 | * Value is Mandatory. Must be at least 1. | 351 | * Value is Mandatory. Must be at least 1. |
@@ -806,11 +801,19 @@ struct nvmet_fc_target_port { | |||
806 | * outstanding operation (if there was one) to complete, then will | 801 | * outstanding operation (if there was one) to complete, then will |
807 | * call the fcp_req_release() callback to return the command's | 802 | * call the fcp_req_release() callback to return the command's |
808 | * exchange context back to the LLDD. | 803 | * exchange context back to the LLDD. |
804 | * Entrypoint is Mandatory. | ||
809 | * | 805 | * |
810 | * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req | 806 | * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req |
811 | * to the LLDD after all operations on the fcp operation are complete. | 807 | * to the LLDD after all operations on the fcp operation are complete. |
812 | * This may be due to the command completing or upon completion of | 808 | * This may be due to the command completing or upon completion of |
813 | * abort cleanup. | 809 | * abort cleanup. |
810 | * Entrypoint is Mandatory. | ||
811 | * | ||
812 | * @defer_rcv: Called by the transport to signal the LLLD that it has | ||
813 | * begun processing of a previously received NVME CMD IU. The LLDD | ||
814 | * is now free to re-use the rcv buffer associated with the | ||
815 | * nvmefc_tgt_fcp_req. | ||
816 | * Entrypoint is Optional. | ||
814 | * | 817 | * |
815 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD | 818 | * @max_hw_queues: indicates the maximum number of hw queues the LLDD |
816 | * supports for cpu affinitization. | 819 | * supports for cpu affinitization. |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 87723c86f136..9310ce77d8e1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -471,12 +471,14 @@ enum nvme_opcode { | |||
471 | * | 471 | * |
472 | * @NVME_SGL_FMT_ADDRESS: absolute address of the data block | 472 | * @NVME_SGL_FMT_ADDRESS: absolute address of the data block |
473 | * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block | 473 | * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block |
474 | * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA | ||
474 | * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation | 475 | * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation |
475 | * request subtype | 476 | * request subtype |
476 | */ | 477 | */ |
477 | enum { | 478 | enum { |
478 | NVME_SGL_FMT_ADDRESS = 0x00, | 479 | NVME_SGL_FMT_ADDRESS = 0x00, |
479 | NVME_SGL_FMT_OFFSET = 0x01, | 480 | NVME_SGL_FMT_OFFSET = 0x01, |
481 | NVME_SGL_FMT_TRANSPORT_A = 0x0A, | ||
480 | NVME_SGL_FMT_INVALIDATE = 0x0f, | 482 | NVME_SGL_FMT_INVALIDATE = 0x0f, |
481 | }; | 483 | }; |
482 | 484 | ||
@@ -490,12 +492,16 @@ enum { | |||
490 | * | 492 | * |
491 | * For struct nvme_keyed_sgl_desc: | 493 | * For struct nvme_keyed_sgl_desc: |
492 | * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor | 494 | * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor |
495 | * | ||
496 | * Transport-specific SGL types: | ||
497 | * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor | ||
493 | */ | 498 | */ |
494 | enum { | 499 | enum { |
495 | NVME_SGL_FMT_DATA_DESC = 0x00, | 500 | NVME_SGL_FMT_DATA_DESC = 0x00, |
496 | NVME_SGL_FMT_SEG_DESC = 0x02, | 501 | NVME_SGL_FMT_SEG_DESC = 0x02, |
497 | NVME_SGL_FMT_LAST_SEG_DESC = 0x03, | 502 | NVME_SGL_FMT_LAST_SEG_DESC = 0x03, |
498 | NVME_KEY_SGL_FMT_DATA_DESC = 0x04, | 503 | NVME_KEY_SGL_FMT_DATA_DESC = 0x04, |
504 | NVME_TRANSPORT_SGL_DATA_DESC = 0x05, | ||
499 | }; | 505 | }; |
500 | 506 | ||
501 | struct nvme_sgl_desc { | 507 | struct nvme_sgl_desc { |
@@ -1127,19 +1133,6 @@ enum { | |||
1127 | NVME_SC_UNWRITTEN_BLOCK = 0x287, | 1133 | NVME_SC_UNWRITTEN_BLOCK = 0x287, |
1128 | 1134 | ||
1129 | NVME_SC_DNR = 0x4000, | 1135 | NVME_SC_DNR = 0x4000, |
1130 | |||
1131 | |||
1132 | /* | ||
1133 | * FC Transport-specific error status values for NVME commands | ||
1134 | * | ||
1135 | * Transport-specific status code values must be in the range 0xB0..0xBF | ||
1136 | */ | ||
1137 | |||
1138 | /* Generic FC failure - catchall */ | ||
1139 | NVME_SC_FC_TRANSPORT_ERROR = 0x00B0, | ||
1140 | |||
1141 | /* I/O failure due to FC ABTS'd */ | ||
1142 | NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1, | ||
1143 | }; | 1136 | }; |
1144 | 1137 | ||
1145 | struct nvme_completion { | 1138 | struct nvme_completion { |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index e0d1946270f3..fb908e598348 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -57,7 +57,14 @@ extern const struct of_device_id of_default_bus_match_table[]; | |||
57 | extern struct platform_device *of_device_alloc(struct device_node *np, | 57 | extern struct platform_device *of_device_alloc(struct device_node *np, |
58 | const char *bus_id, | 58 | const char *bus_id, |
59 | struct device *parent); | 59 | struct device *parent); |
60 | #ifdef CONFIG_OF | ||
60 | extern struct platform_device *of_find_device_by_node(struct device_node *np); | 61 | extern struct platform_device *of_find_device_by_node(struct device_node *np); |
62 | #else | ||
63 | static inline struct platform_device *of_find_device_by_node(struct device_node *np) | ||
64 | { | ||
65 | return NULL; | ||
66 | } | ||
67 | #endif | ||
61 | 68 | ||
62 | /* Platform devices and busses creation */ | 69 | /* Platform devices and busses creation */ |
63 | extern struct platform_device *of_platform_device_create(struct device_node *np, | 70 | extern struct platform_device *of_platform_device_create(struct device_node *np, |
diff --git a/include/linux/pci.h b/include/linux/pci.h index f68c58a93dd0..f4f8ee5a7362 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1685,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } | |||
1685 | 1685 | ||
1686 | #define dev_is_pci(d) (false) | 1686 | #define dev_is_pci(d) (false) |
1687 | #define dev_is_pf(d) (false) | 1687 | #define dev_is_pf(d) (false) |
1688 | static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) | ||
1689 | { return false; } | ||
1688 | #endif /* CONFIG_PCI */ | 1690 | #endif /* CONFIG_PCI */ |
1689 | 1691 | ||
1690 | /* Include architecture-dependent settings and functions */ | 1692 | /* Include architecture-dependent settings and functions */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 92fb8dd5a9e4..26a7df4e558c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -65,25 +65,23 @@ struct task_group; | |||
65 | */ | 65 | */ |
66 | 66 | ||
67 | /* Used in tsk->state: */ | 67 | /* Used in tsk->state: */ |
68 | #define TASK_RUNNING 0 | 68 | #define TASK_RUNNING 0x0000 |
69 | #define TASK_INTERRUPTIBLE 1 | 69 | #define TASK_INTERRUPTIBLE 0x0001 |
70 | #define TASK_UNINTERRUPTIBLE 2 | 70 | #define TASK_UNINTERRUPTIBLE 0x0002 |
71 | #define __TASK_STOPPED 4 | 71 | #define __TASK_STOPPED 0x0004 |
72 | #define __TASK_TRACED 8 | 72 | #define __TASK_TRACED 0x0008 |
73 | /* Used in tsk->exit_state: */ | 73 | /* Used in tsk->exit_state: */ |
74 | #define EXIT_DEAD 16 | 74 | #define EXIT_DEAD 0x0010 |
75 | #define EXIT_ZOMBIE 32 | 75 | #define EXIT_ZOMBIE 0x0020 |
76 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) | 76 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) |
77 | /* Used in tsk->state again: */ | 77 | /* Used in tsk->state again: */ |
78 | #define TASK_DEAD 64 | 78 | #define TASK_PARKED 0x0040 |
79 | #define TASK_WAKEKILL 128 | 79 | #define TASK_DEAD 0x0080 |
80 | #define TASK_WAKING 256 | 80 | #define TASK_WAKEKILL 0x0100 |
81 | #define TASK_PARKED 512 | 81 | #define TASK_WAKING 0x0200 |
82 | #define TASK_NOLOAD 1024 | 82 | #define TASK_NOLOAD 0x0400 |
83 | #define TASK_NEW 2048 | 83 | #define TASK_NEW 0x0800 |
84 | #define TASK_STATE_MAX 4096 | 84 | #define TASK_STATE_MAX 0x1000 |
85 | |||
86 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" | ||
87 | 85 | ||
88 | /* Convenience macros for the sake of set_current_state: */ | 86 | /* Convenience macros for the sake of set_current_state: */ |
89 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 87 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
@@ -99,7 +97,8 @@ struct task_group; | |||
99 | /* get_task_state(): */ | 97 | /* get_task_state(): */ |
100 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ | 98 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
101 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ | 99 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
102 | __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) | 100 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
101 | TASK_PARKED) | ||
103 | 102 | ||
104 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | 103 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
105 | 104 | ||
@@ -1243,17 +1242,34 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) | |||
1243 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | 1242 | return task_pgrp_nr_ns(tsk, &init_pid_ns); |
1244 | } | 1243 | } |
1245 | 1244 | ||
1246 | static inline char task_state_to_char(struct task_struct *task) | 1245 | #define TASK_REPORT_IDLE (TASK_REPORT + 1) |
1246 | #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) | ||
1247 | |||
1248 | static inline unsigned int __get_task_state(struct task_struct *tsk) | ||
1249 | { | ||
1250 | unsigned int tsk_state = READ_ONCE(tsk->state); | ||
1251 | unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; | ||
1252 | |||
1253 | BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); | ||
1254 | |||
1255 | if (tsk_state == TASK_IDLE) | ||
1256 | state = TASK_REPORT_IDLE; | ||
1257 | |||
1258 | return fls(state); | ||
1259 | } | ||
1260 | |||
1261 | static inline char __task_state_to_char(unsigned int state) | ||
1247 | { | 1262 | { |
1248 | const char stat_nam[] = TASK_STATE_TO_CHAR_STR; | 1263 | static const char state_char[] = "RSDTtXZPI"; |
1249 | unsigned long state = task->state; | ||
1250 | 1264 | ||
1251 | state = state ? __ffs(state) + 1 : 0; | 1265 | BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); |
1252 | 1266 | ||
1253 | /* Make sure the string lines up properly with the number of task states: */ | 1267 | return state_char[state]; |
1254 | BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); | 1268 | } |
1255 | 1269 | ||
1256 | return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; | 1270 | static inline char task_state_to_char(struct task_struct *tsk) |
1271 | { | ||
1272 | return __task_state_to_char(__get_task_state(tsk)); | ||
1257 | } | 1273 | } |
1258 | 1274 | ||
1259 | /** | 1275 | /** |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3a19c253bdb1..ae53e413fb13 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm) | |||
84 | 84 | ||
85 | /* mmput gets rid of the mappings and all user-space */ | 85 | /* mmput gets rid of the mappings and all user-space */ |
86 | extern void mmput(struct mm_struct *); | 86 | extern void mmput(struct mm_struct *); |
87 | #ifdef CONFIG_MMU | ||
88 | /* same as above but performs the slow path from the async context. Can | ||
89 | * be called from the atomic context as well | ||
90 | */ | ||
91 | void mmput_async(struct mm_struct *); | ||
92 | #endif | ||
87 | 93 | ||
88 | /* Grab a reference to a task's mm, if it is not already going away */ | 94 | /* Grab a reference to a task's mm, if it is not already going away */ |
89 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 95 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index ecc296c137cd..c8bef436b61d 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
@@ -3,7 +3,8 @@ | |||
3 | 3 | ||
4 | #include <uapi/linux/seccomp.h> | 4 | #include <uapi/linux/seccomp.h> |
5 | 5 | ||
6 | #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) | 6 | #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ |
7 | SECCOMP_FILTER_FLAG_LOG) | ||
7 | 8 | ||
8 | #ifdef CONFIG_SECCOMP | 9 | #ifdef CONFIG_SECCOMP |
9 | 10 | ||
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index 12910cf19869..c149aa7bedf3 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h | |||
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); | 57 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); |
58 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | 58 | void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, |
59 | const struct cpumask *); | 59 | const struct cpumask *); |
60 | 60 | ||
61 | #endif | 61 | #endif |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 95606a2d556f..a78186d826d7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -221,21 +221,25 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event) | |||
221 | } \ | 221 | } \ |
222 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) | 222 | static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) |
223 | 223 | ||
224 | #ifdef TIF_FSCHECK | ||
225 | /* | 224 | /* |
226 | * Called before coming back to user-mode. Returning to user-mode with an | 225 | * Called before coming back to user-mode. Returning to user-mode with an |
227 | * address limit different than USER_DS can allow to overwrite kernel memory. | 226 | * address limit different than USER_DS can allow to overwrite kernel memory. |
228 | */ | 227 | */ |
229 | static inline void addr_limit_user_check(void) | 228 | static inline void addr_limit_user_check(void) |
230 | { | 229 | { |
231 | 230 | #ifdef TIF_FSCHECK | |
232 | if (!test_thread_flag(TIF_FSCHECK)) | 231 | if (!test_thread_flag(TIF_FSCHECK)) |
233 | return; | 232 | return; |
233 | #endif | ||
234 | 234 | ||
235 | BUG_ON(!segment_eq(get_fs(), USER_DS)); | 235 | if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), |
236 | "Invalid address limit on user-mode return")) | ||
237 | force_sig(SIGKILL, current); | ||
238 | |||
239 | #ifdef TIF_FSCHECK | ||
236 | clear_thread_flag(TIF_FSCHECK); | 240 | clear_thread_flag(TIF_FSCHECK); |
237 | } | ||
238 | #endif | 241 | #endif |
242 | } | ||
239 | 243 | ||
240 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | 244 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, |
241 | qid_t id, void __user *addr); | 245 | qid_t id, void __user *addr); |
diff --git a/include/linux/timer.h b/include/linux/timer.h index e6789b8757d5..6383c528b148 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, | |||
168 | #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ | 168 | #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ |
169 | __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) | 169 | __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) |
170 | 170 | ||
171 | #define TIMER_DATA_TYPE unsigned long | ||
172 | #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) | ||
173 | |||
174 | static inline void timer_setup(struct timer_list *timer, | ||
175 | void (*callback)(struct timer_list *), | ||
176 | unsigned int flags) | ||
177 | { | ||
178 | __setup_timer(timer, (TIMER_FUNC_TYPE)callback, | ||
179 | (TIMER_DATA_TYPE)timer, flags); | ||
180 | } | ||
181 | |||
182 | #define from_timer(var, callback_timer, timer_fieldname) \ | ||
183 | container_of(callback_timer, typeof(*var), timer_fieldname) | ||
184 | |||
171 | /** | 185 | /** |
172 | * timer_pending - is a timer pending? | 186 | * timer_pending - is a timer pending? |
173 | * @timer: the timer in question | 187 | * @timer: the timer in question |
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 7f11050746ae..2e0f22298fe9 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h | |||
@@ -272,6 +272,7 @@ struct trace_event_call { | |||
272 | int perf_refcount; | 272 | int perf_refcount; |
273 | struct hlist_head __percpu *perf_events; | 273 | struct hlist_head __percpu *perf_events; |
274 | struct bpf_prog *prog; | 274 | struct bpf_prog *prog; |
275 | struct perf_event *bpf_prog_owner; | ||
275 | 276 | ||
276 | int (*perf_perm)(struct trace_event_call *, | 277 | int (*perf_perm)(struct trace_event_call *, |
277 | struct perf_event *); | 278 | struct perf_event *); |
diff --git a/include/net/dst.h b/include/net/dst.h index 93568bd0a352..06a6765da074 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -271,7 +271,7 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) | |||
271 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) | 271 | static inline struct dst_entry *dst_clone(struct dst_entry *dst) |
272 | { | 272 | { |
273 | if (dst) | 273 | if (dst) |
274 | atomic_inc(&dst->__refcnt); | 274 | dst_hold(dst); |
275 | return dst; | 275 | return dst; |
276 | } | 276 | } |
277 | 277 | ||
@@ -312,21 +312,6 @@ static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb | |||
312 | } | 312 | } |
313 | 313 | ||
314 | /** | 314 | /** |
315 | * skb_dst_force - makes sure skb dst is refcounted | ||
316 | * @skb: buffer | ||
317 | * | ||
318 | * If dst is not yet refcounted, let's do it | ||
319 | */ | ||
320 | static inline void skb_dst_force(struct sk_buff *skb) | ||
321 | { | ||
322 | if (skb_dst_is_noref(skb)) { | ||
323 | WARN_ON(!rcu_read_lock_held()); | ||
324 | skb->_skb_refdst &= ~SKB_DST_NOREF; | ||
325 | dst_clone(skb_dst(skb)); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * dst_hold_safe - Take a reference on a dst if possible | 315 | * dst_hold_safe - Take a reference on a dst if possible |
331 | * @dst: pointer to dst entry | 316 | * @dst: pointer to dst entry |
332 | * | 317 | * |
@@ -339,16 +324,17 @@ static inline bool dst_hold_safe(struct dst_entry *dst) | |||
339 | } | 324 | } |
340 | 325 | ||
341 | /** | 326 | /** |
342 | * skb_dst_force_safe - makes sure skb dst is refcounted | 327 | * skb_dst_force - makes sure skb dst is refcounted |
343 | * @skb: buffer | 328 | * @skb: buffer |
344 | * | 329 | * |
345 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | 330 | * If dst is not yet refcounted and not destroyed, grab a ref on it. |
346 | */ | 331 | */ |
347 | static inline void skb_dst_force_safe(struct sk_buff *skb) | 332 | static inline void skb_dst_force(struct sk_buff *skb) |
348 | { | 333 | { |
349 | if (skb_dst_is_noref(skb)) { | 334 | if (skb_dst_is_noref(skb)) { |
350 | struct dst_entry *dst = skb_dst(skb); | 335 | struct dst_entry *dst = skb_dst(skb); |
351 | 336 | ||
337 | WARN_ON(!rcu_read_lock_held()); | ||
352 | if (!dst_hold_safe(dst)) | 338 | if (!dst_hold_safe(dst)) |
353 | dst = NULL; | 339 | dst = NULL; |
354 | 340 | ||
diff --git a/include/net/netlink.h b/include/net/netlink.h index e51cf5f81597..14c289393071 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h | |||
@@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, | |||
773 | */ | 773 | */ |
774 | static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) | 774 | static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) |
775 | { | 775 | { |
776 | return nla_put(skb, attrtype, sizeof(u8), &value); | 776 | /* temporary variables to work around GCC PR81715 with asan-stack=1 */ |
777 | u8 tmp = value; | ||
778 | |||
779 | return nla_put(skb, attrtype, sizeof(u8), &tmp); | ||
777 | } | 780 | } |
778 | 781 | ||
779 | /** | 782 | /** |
@@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) | |||
784 | */ | 787 | */ |
785 | static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) | 788 | static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) |
786 | { | 789 | { |
787 | return nla_put(skb, attrtype, sizeof(u16), &value); | 790 | u16 tmp = value; |
791 | |||
792 | return nla_put(skb, attrtype, sizeof(u16), &tmp); | ||
788 | } | 793 | } |
789 | 794 | ||
790 | /** | 795 | /** |
@@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) | |||
795 | */ | 800 | */ |
796 | static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) | 801 | static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) |
797 | { | 802 | { |
798 | return nla_put(skb, attrtype, sizeof(__be16), &value); | 803 | __be16 tmp = value; |
804 | |||
805 | return nla_put(skb, attrtype, sizeof(__be16), &tmp); | ||
799 | } | 806 | } |
800 | 807 | ||
801 | /** | 808 | /** |
@@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) | |||
806 | */ | 813 | */ |
807 | static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) | 814 | static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) |
808 | { | 815 | { |
809 | return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); | 816 | __be16 tmp = value; |
817 | |||
818 | return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); | ||
810 | } | 819 | } |
811 | 820 | ||
812 | /** | 821 | /** |
@@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) | |||
817 | */ | 826 | */ |
818 | static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) | 827 | static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) |
819 | { | 828 | { |
820 | return nla_put(skb, attrtype, sizeof(__le16), &value); | 829 | __le16 tmp = value; |
830 | |||
831 | return nla_put(skb, attrtype, sizeof(__le16), &tmp); | ||
821 | } | 832 | } |
822 | 833 | ||
823 | /** | 834 | /** |
@@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) | |||
828 | */ | 839 | */ |
829 | static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) | 840 | static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) |
830 | { | 841 | { |
831 | return nla_put(skb, attrtype, sizeof(u32), &value); | 842 | u32 tmp = value; |
843 | |||
844 | return nla_put(skb, attrtype, sizeof(u32), &tmp); | ||
832 | } | 845 | } |
833 | 846 | ||
834 | /** | 847 | /** |
@@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) | |||
839 | */ | 852 | */ |
840 | static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) | 853 | static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) |
841 | { | 854 | { |
842 | return nla_put(skb, attrtype, sizeof(__be32), &value); | 855 | __be32 tmp = value; |
856 | |||
857 | return nla_put(skb, attrtype, sizeof(__be32), &tmp); | ||
843 | } | 858 | } |
844 | 859 | ||
845 | /** | 860 | /** |
@@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) | |||
850 | */ | 865 | */ |
851 | static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) | 866 | static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) |
852 | { | 867 | { |
853 | return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); | 868 | __be32 tmp = value; |
869 | |||
870 | return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); | ||
854 | } | 871 | } |
855 | 872 | ||
856 | /** | 873 | /** |
@@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) | |||
861 | */ | 878 | */ |
862 | static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) | 879 | static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) |
863 | { | 880 | { |
864 | return nla_put(skb, attrtype, sizeof(__le32), &value); | 881 | __le32 tmp = value; |
882 | |||
883 | return nla_put(skb, attrtype, sizeof(__le32), &tmp); | ||
865 | } | 884 | } |
866 | 885 | ||
867 | /** | 886 | /** |
@@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) | |||
874 | static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, | 893 | static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, |
875 | u64 value, int padattr) | 894 | u64 value, int padattr) |
876 | { | 895 | { |
877 | return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr); | 896 | u64 tmp = value; |
897 | |||
898 | return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr); | ||
878 | } | 899 | } |
879 | 900 | ||
880 | /** | 901 | /** |
@@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, | |||
887 | static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, | 908 | static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, |
888 | int padattr) | 909 | int padattr) |
889 | { | 910 | { |
890 | return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr); | 911 | __be64 tmp = value; |
912 | |||
913 | return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr); | ||
891 | } | 914 | } |
892 | 915 | ||
893 | /** | 916 | /** |
@@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, | |||
900 | static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, | 923 | static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, |
901 | int padattr) | 924 | int padattr) |
902 | { | 925 | { |
903 | return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value, | 926 | __be64 tmp = value; |
927 | |||
928 | return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp, | ||
904 | padattr); | 929 | padattr); |
905 | } | 930 | } |
906 | 931 | ||
@@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, | |||
914 | static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, | 939 | static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, |
915 | int padattr) | 940 | int padattr) |
916 | { | 941 | { |
917 | return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr); | 942 | __le64 tmp = value; |
943 | |||
944 | return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr); | ||
918 | } | 945 | } |
919 | 946 | ||
920 | /** | 947 | /** |
@@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, | |||
925 | */ | 952 | */ |
926 | static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) | 953 | static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) |
927 | { | 954 | { |
928 | return nla_put(skb, attrtype, sizeof(s8), &value); | 955 | s8 tmp = value; |
956 | |||
957 | return nla_put(skb, attrtype, sizeof(s8), &tmp); | ||
929 | } | 958 | } |
930 | 959 | ||
931 | /** | 960 | /** |
@@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) | |||
936 | */ | 965 | */ |
937 | static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) | 966 | static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) |
938 | { | 967 | { |
939 | return nla_put(skb, attrtype, sizeof(s16), &value); | 968 | s16 tmp = value; |
969 | |||
970 | return nla_put(skb, attrtype, sizeof(s16), &tmp); | ||
940 | } | 971 | } |
941 | 972 | ||
942 | /** | 973 | /** |
@@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) | |||
947 | */ | 978 | */ |
948 | static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) | 979 | static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) |
949 | { | 980 | { |
950 | return nla_put(skb, attrtype, sizeof(s32), &value); | 981 | s32 tmp = value; |
982 | |||
983 | return nla_put(skb, attrtype, sizeof(s32), &tmp); | ||
951 | } | 984 | } |
952 | 985 | ||
953 | /** | 986 | /** |
@@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) | |||
960 | static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, | 993 | static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, |
961 | int padattr) | 994 | int padattr) |
962 | { | 995 | { |
963 | return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr); | 996 | s64 tmp = value; |
997 | |||
998 | return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr); | ||
964 | } | 999 | } |
965 | 1000 | ||
966 | /** | 1001 | /** |
@@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, | |||
1010 | static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, | 1045 | static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, |
1011 | __be32 addr) | 1046 | __be32 addr) |
1012 | { | 1047 | { |
1013 | return nla_put_be32(skb, attrtype, addr); | 1048 | __be32 tmp = addr; |
1049 | |||
1050 | return nla_put_be32(skb, attrtype, tmp); | ||
1014 | } | 1051 | } |
1015 | 1052 | ||
1016 | /** | 1053 | /** |
diff --git a/include/net/protocol.h b/include/net/protocol.h index 65ba335b0e7e..4fc75f7ae23b 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h | |||
@@ -39,8 +39,8 @@ | |||
39 | 39 | ||
40 | /* This is used to register protocols. */ | 40 | /* This is used to register protocols. */ |
41 | struct net_protocol { | 41 | struct net_protocol { |
42 | void (*early_demux)(struct sk_buff *skb); | 42 | int (*early_demux)(struct sk_buff *skb); |
43 | void (*early_demux_handler)(struct sk_buff *skb); | 43 | int (*early_demux_handler)(struct sk_buff *skb); |
44 | int (*handler)(struct sk_buff *skb); | 44 | int (*handler)(struct sk_buff *skb); |
45 | void (*err_handler)(struct sk_buff *skb, u32 info); | 45 | void (*err_handler)(struct sk_buff *skb, u32 info); |
46 | unsigned int no_policy:1, | 46 | unsigned int no_policy:1, |
diff --git a/include/net/route.h b/include/net/route.h index 1b09a9368c68..d538e6db1afe 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 | |||
175 | fl4->fl4_gre_key = gre_key; | 175 | fl4->fl4_gre_key = gre_key; |
176 | return ip_route_output_key(net, fl4); | 176 | return ip_route_output_key(net, fl4); |
177 | } | 177 | } |
178 | 178 | int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |
179 | u8 tos, struct net_device *dev, | ||
180 | struct in_device *in_dev, u32 *itag); | ||
179 | int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, | 181 | int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, |
180 | u8 tos, struct net_device *devin); | 182 | u8 tos, struct net_device *devin); |
181 | int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, | 183 | int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, |
@@ -190,7 +192,7 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, | |||
190 | rcu_read_lock(); | 192 | rcu_read_lock(); |
191 | err = ip_route_input_noref(skb, dst, src, tos, devin); | 193 | err = ip_route_input_noref(skb, dst, src, tos, devin); |
192 | if (!err) { | 194 | if (!err) { |
193 | skb_dst_force_safe(skb); | 195 | skb_dst_force(skb); |
194 | if (!skb_dst(skb)) | 196 | if (!skb_dst(skb)) |
195 | err = -EINVAL; | 197 | err = -EINVAL; |
196 | } | 198 | } |
diff --git a/include/net/sock.h b/include/net/sock.h index 03a362568357..a6b9a8d1a6df 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -856,7 +856,7 @@ void sk_stream_write_space(struct sock *sk); | |||
856 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 856 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
857 | { | 857 | { |
858 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ | 858 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ |
859 | skb_dst_force_safe(skb); | 859 | skb_dst_force(skb); |
860 | 860 | ||
861 | if (!sk->sk_backlog.tail) | 861 | if (!sk->sk_backlog.tail) |
862 | sk->sk_backlog.head = skb; | 862 | sk->sk_backlog.head = skb; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index b510f284427a..89974c5286d8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32); | |||
345 | 345 | ||
346 | void tcp_shutdown(struct sock *sk, int how); | 346 | void tcp_shutdown(struct sock *sk, int how); |
347 | 347 | ||
348 | void tcp_v4_early_demux(struct sk_buff *skb); | 348 | int tcp_v4_early_demux(struct sk_buff *skb); |
349 | int tcp_v4_rcv(struct sk_buff *skb); | 349 | int tcp_v4_rcv(struct sk_buff *skb); |
350 | 350 | ||
351 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | 351 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); |
@@ -544,7 +544,6 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, | |||
544 | int min_tso_segs); | 544 | int min_tso_segs); |
545 | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | 545 | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
546 | int nonagle); | 546 | int nonagle); |
547 | bool tcp_may_send_now(struct sock *sk); | ||
548 | int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); | 547 | int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); |
549 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); | 548 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); |
550 | void tcp_retransmit_timer(struct sock *sk); | 549 | void tcp_retransmit_timer(struct sock *sk); |
diff --git a/include/net/udp.h b/include/net/udp.h index 12dfbfe2e2d7..6c759c8594e2 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, | |||
259 | return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); | 259 | return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); |
260 | } | 260 | } |
261 | 261 | ||
262 | void udp_v4_early_demux(struct sk_buff *skb); | 262 | int udp_v4_early_demux(struct sk_buff *skb); |
263 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); | 263 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); |
264 | int udp_get_port(struct sock *sk, unsigned short snum, | 264 | int udp_get_port(struct sock *sk, unsigned short snum, |
265 | int (*saddr_cmp)(const struct sock *, | 265 | int (*saddr_cmp)(const struct sock *, |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bdb1279a415b..e8608b2dc844 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -285,7 +285,7 @@ enum ib_tm_cap_flags { | |||
285 | IB_TM_CAP_RC = 1 << 0, | 285 | IB_TM_CAP_RC = 1 << 0, |
286 | }; | 286 | }; |
287 | 287 | ||
288 | struct ib_xrq_caps { | 288 | struct ib_tm_caps { |
289 | /* Max size of RNDV header */ | 289 | /* Max size of RNDV header */ |
290 | u32 max_rndv_hdr_size; | 290 | u32 max_rndv_hdr_size; |
291 | /* Max number of entries in tag matching list */ | 291 | /* Max number of entries in tag matching list */ |
@@ -358,7 +358,7 @@ struct ib_device_attr { | |||
358 | struct ib_rss_caps rss_caps; | 358 | struct ib_rss_caps rss_caps; |
359 | u32 max_wq_type_rq; | 359 | u32 max_wq_type_rq; |
360 | u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ | 360 | u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ |
361 | struct ib_xrq_caps xrq_caps; | 361 | struct ib_tm_caps tm_caps; |
362 | }; | 362 | }; |
363 | 363 | ||
364 | enum ib_mtu { | 364 | enum ib_mtu { |
@@ -1739,7 +1739,7 @@ struct ib_mr { | |||
1739 | u32 lkey; | 1739 | u32 lkey; |
1740 | u32 rkey; | 1740 | u32 rkey; |
1741 | u64 iova; | 1741 | u64 iova; |
1742 | u32 length; | 1742 | u64 length; |
1743 | unsigned int page_size; | 1743 | unsigned int page_size; |
1744 | bool need_inval; | 1744 | bool need_inval; |
1745 | union { | 1745 | union { |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 82e93ee94708..67c5a9f223f7 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -192,6 +192,7 @@ struct scsi_device { | |||
192 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ | 192 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ |
193 | unsigned broken_fua:1; /* Don't set FUA bit */ | 193 | unsigned broken_fua:1; /* Don't set FUA bit */ |
194 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ | 194 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ |
195 | unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ | ||
195 | 196 | ||
196 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ | 197 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ |
197 | 198 | ||
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 9592570e092a..36b03013d629 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h | |||
@@ -29,5 +29,6 @@ | |||
29 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ | 29 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ |
30 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ | 30 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ |
31 | #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ | 31 | #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ |
32 | #define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */ | ||
32 | 33 | ||
33 | #endif | 34 | #endif |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 6183d20a01fb..b266d2a3bcb1 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
@@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, | |||
434 | unsigned int target_id); | 434 | unsigned int target_id); |
435 | extern void iscsi_remove_session(struct iscsi_cls_session *session); | 435 | extern void iscsi_remove_session(struct iscsi_cls_session *session); |
436 | extern void iscsi_free_session(struct iscsi_cls_session *session); | 436 | extern void iscsi_free_session(struct iscsi_cls_session *session); |
437 | extern int iscsi_destroy_session(struct iscsi_cls_session *session); | ||
438 | extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, | 437 | extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, |
439 | int dd_size, uint32_t cid); | 438 | int dd_size, uint32_t cid); |
440 | extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); | 439 | extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); |
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h index d0509db6d0ec..f89cd5ee1c7a 100644 --- a/include/sound/hda_verbs.h +++ b/include/sound/hda_verbs.h | |||
@@ -95,6 +95,7 @@ enum { | |||
95 | #define AC_VERB_SET_EAPD_BTLENABLE 0x70c | 95 | #define AC_VERB_SET_EAPD_BTLENABLE 0x70c |
96 | #define AC_VERB_SET_DIGI_CONVERT_1 0x70d | 96 | #define AC_VERB_SET_DIGI_CONVERT_1 0x70d |
97 | #define AC_VERB_SET_DIGI_CONVERT_2 0x70e | 97 | #define AC_VERB_SET_DIGI_CONVERT_2 0x70e |
98 | #define AC_VERB_SET_DIGI_CONVERT_3 0x73e | ||
98 | #define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f | 99 | #define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f |
99 | #define AC_VERB_SET_GPIO_DATA 0x715 | 100 | #define AC_VERB_SET_GPIO_DATA 0x715 |
100 | #define AC_VERB_SET_GPIO_MASK 0x716 | 101 | #define AC_VERB_SET_GPIO_MASK 0x716 |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ae1409ffe99a..3c8b7f625670 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -114,7 +114,10 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * | |||
114 | * Preemption ignores task state, therefore preempted tasks are always | 114 | * Preemption ignores task state, therefore preempted tasks are always |
115 | * RUNNING (we will not have dequeued if state != RUNNING). | 115 | * RUNNING (we will not have dequeued if state != RUNNING). |
116 | */ | 116 | */ |
117 | return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; | 117 | if (preempt) |
118 | return TASK_STATE_MAX; | ||
119 | |||
120 | return __get_task_state(p); | ||
118 | } | 121 | } |
119 | #endif /* CREATE_TRACE_POINTS */ | 122 | #endif /* CREATE_TRACE_POINTS */ |
120 | 123 | ||
@@ -152,12 +155,14 @@ TRACE_EVENT(sched_switch, | |||
152 | 155 | ||
153 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", | 156 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", |
154 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | 157 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
155 | __entry->prev_state & (TASK_STATE_MAX-1) ? | 158 | |
156 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", | 159 | (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? |
157 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, | 160 | __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", |
158 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, | 161 | { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" }, |
159 | { 128, "K" }, { 256, "W" }, { 512, "P" }, | 162 | { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" }, |
160 | { 1024, "N" }) : "R", | 163 | { 0x40, "P" }, { 0x80, "I" }) : |
164 | "R", | ||
165 | |||
161 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", | 166 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", |
162 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | 167 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
163 | ); | 168 | ); |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 43ab5c402f98..f90860d1f897 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -312,7 +312,7 @@ union bpf_attr { | |||
312 | * jump into another BPF program | 312 | * jump into another BPF program |
313 | * @ctx: context pointer passed to next program | 313 | * @ctx: context pointer passed to next program |
314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
315 | * @index: index inside array that selects specific program to run | 315 | * @index: 32-bit index inside array that selects specific program to run |
316 | * Return: 0 on success or negative error | 316 | * Return: 0 on success or negative error |
317 | * | 317 | * |
318 | * int bpf_clone_redirect(skb, ifindex, flags) | 318 | * int bpf_clone_redirect(skb, ifindex, flags) |
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 412c06a624c8..ccaea525340b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h | |||
@@ -269,9 +269,9 @@ enum { | |||
269 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 269 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
270 | 270 | ||
271 | #define DM_VERSION_MAJOR 4 | 271 | #define DM_VERSION_MAJOR 4 |
272 | #define DM_VERSION_MINOR 36 | 272 | #define DM_VERSION_MINOR 37 |
273 | #define DM_VERSION_PATCHLEVEL 0 | 273 | #define DM_VERSION_PATCHLEVEL 0 |
274 | #define DM_VERSION_EXTRA "-ioctl (2017-06-09)" | 274 | #define DM_VERSION_EXTRA "-ioctl (2017-09-20)" |
275 | 275 | ||
276 | /* Status bits */ | 276 | /* Status bits */ |
277 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 277 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9c041dae8e2c..5bd1b1de4ea0 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
@@ -1753,6 +1753,8 @@ enum ethtool_reset_flags { | |||
1753 | * %ethtool_link_mode_bit_indices for the link modes, and other | 1753 | * %ethtool_link_mode_bit_indices for the link modes, and other |
1754 | * link features that the link partner advertised through | 1754 | * link features that the link partner advertised through |
1755 | * autonegotiation; 0 if unknown or not applicable. Read-only. | 1755 | * autonegotiation; 0 if unknown or not applicable. Read-only. |
1756 | * @transceiver: Used to distinguish different possible PHY types, | ||
1757 | * reported consistently by PHYLIB. Read-only. | ||
1756 | * | 1758 | * |
1757 | * If autonegotiation is disabled, the speed and @duplex represent the | 1759 | * If autonegotiation is disabled, the speed and @duplex represent the |
1758 | * fixed link mode and are writable if the driver supports multiple | 1760 | * fixed link mode and are writable if the driver supports multiple |
@@ -1804,7 +1806,9 @@ struct ethtool_link_settings { | |||
1804 | __u8 eth_tp_mdix; | 1806 | __u8 eth_tp_mdix; |
1805 | __u8 eth_tp_mdix_ctrl; | 1807 | __u8 eth_tp_mdix_ctrl; |
1806 | __s8 link_mode_masks_nwords; | 1808 | __s8 link_mode_masks_nwords; |
1807 | __u32 reserved[8]; | 1809 | __u8 transceiver; |
1810 | __u8 reserved1[3]; | ||
1811 | __u32 reserved[7]; | ||
1808 | __u32 link_mode_masks[0]; | 1812 | __u32 link_mode_masks[0]; |
1809 | /* layout of link_mode_masks fields: | 1813 | /* layout of link_mode_masks fields: |
1810 | * __u32 map_supported[link_mode_masks_nwords]; | 1814 | * __u32 map_supported[link_mode_masks_nwords]; |
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 7b4567bacfc2..26283fefdf5f 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h | |||
@@ -23,15 +23,15 @@ | |||
23 | #ifndef KFD_IOCTL_H_INCLUDED | 23 | #ifndef KFD_IOCTL_H_INCLUDED |
24 | #define KFD_IOCTL_H_INCLUDED | 24 | #define KFD_IOCTL_H_INCLUDED |
25 | 25 | ||
26 | #include <linux/types.h> | 26 | #include <drm/drm.h> |
27 | #include <linux/ioctl.h> | 27 | #include <linux/ioctl.h> |
28 | 28 | ||
29 | #define KFD_IOCTL_MAJOR_VERSION 1 | 29 | #define KFD_IOCTL_MAJOR_VERSION 1 |
30 | #define KFD_IOCTL_MINOR_VERSION 1 | 30 | #define KFD_IOCTL_MINOR_VERSION 1 |
31 | 31 | ||
32 | struct kfd_ioctl_get_version_args { | 32 | struct kfd_ioctl_get_version_args { |
33 | uint32_t major_version; /* from KFD */ | 33 | __u32 major_version; /* from KFD */ |
34 | uint32_t minor_version; /* from KFD */ | 34 | __u32 minor_version; /* from KFD */ |
35 | }; | 35 | }; |
36 | 36 | ||
37 | /* For kfd_ioctl_create_queue_args.queue_type. */ | 37 | /* For kfd_ioctl_create_queue_args.queue_type. */ |
@@ -43,36 +43,36 @@ struct kfd_ioctl_get_version_args { | |||
43 | #define KFD_MAX_QUEUE_PRIORITY 15 | 43 | #define KFD_MAX_QUEUE_PRIORITY 15 |
44 | 44 | ||
45 | struct kfd_ioctl_create_queue_args { | 45 | struct kfd_ioctl_create_queue_args { |
46 | uint64_t ring_base_address; /* to KFD */ | 46 | __u64 ring_base_address; /* to KFD */ |
47 | uint64_t write_pointer_address; /* from KFD */ | 47 | __u64 write_pointer_address; /* from KFD */ |
48 | uint64_t read_pointer_address; /* from KFD */ | 48 | __u64 read_pointer_address; /* from KFD */ |
49 | uint64_t doorbell_offset; /* from KFD */ | 49 | __u64 doorbell_offset; /* from KFD */ |
50 | 50 | ||
51 | uint32_t ring_size; /* to KFD */ | 51 | __u32 ring_size; /* to KFD */ |
52 | uint32_t gpu_id; /* to KFD */ | 52 | __u32 gpu_id; /* to KFD */ |
53 | uint32_t queue_type; /* to KFD */ | 53 | __u32 queue_type; /* to KFD */ |
54 | uint32_t queue_percentage; /* to KFD */ | 54 | __u32 queue_percentage; /* to KFD */ |
55 | uint32_t queue_priority; /* to KFD */ | 55 | __u32 queue_priority; /* to KFD */ |
56 | uint32_t queue_id; /* from KFD */ | 56 | __u32 queue_id; /* from KFD */ |
57 | 57 | ||
58 | uint64_t eop_buffer_address; /* to KFD */ | 58 | __u64 eop_buffer_address; /* to KFD */ |
59 | uint64_t eop_buffer_size; /* to KFD */ | 59 | __u64 eop_buffer_size; /* to KFD */ |
60 | uint64_t ctx_save_restore_address; /* to KFD */ | 60 | __u64 ctx_save_restore_address; /* to KFD */ |
61 | uint64_t ctx_save_restore_size; /* to KFD */ | 61 | __u64 ctx_save_restore_size; /* to KFD */ |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct kfd_ioctl_destroy_queue_args { | 64 | struct kfd_ioctl_destroy_queue_args { |
65 | uint32_t queue_id; /* to KFD */ | 65 | __u32 queue_id; /* to KFD */ |
66 | uint32_t pad; | 66 | __u32 pad; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct kfd_ioctl_update_queue_args { | 69 | struct kfd_ioctl_update_queue_args { |
70 | uint64_t ring_base_address; /* to KFD */ | 70 | __u64 ring_base_address; /* to KFD */ |
71 | 71 | ||
72 | uint32_t queue_id; /* to KFD */ | 72 | __u32 queue_id; /* to KFD */ |
73 | uint32_t ring_size; /* to KFD */ | 73 | __u32 ring_size; /* to KFD */ |
74 | uint32_t queue_percentage; /* to KFD */ | 74 | __u32 queue_percentage; /* to KFD */ |
75 | uint32_t queue_priority; /* to KFD */ | 75 | __u32 queue_priority; /* to KFD */ |
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ | 78 | /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ |
@@ -80,13 +80,13 @@ struct kfd_ioctl_update_queue_args { | |||
80 | #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 | 80 | #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 |
81 | 81 | ||
82 | struct kfd_ioctl_set_memory_policy_args { | 82 | struct kfd_ioctl_set_memory_policy_args { |
83 | uint64_t alternate_aperture_base; /* to KFD */ | 83 | __u64 alternate_aperture_base; /* to KFD */ |
84 | uint64_t alternate_aperture_size; /* to KFD */ | 84 | __u64 alternate_aperture_size; /* to KFD */ |
85 | 85 | ||
86 | uint32_t gpu_id; /* to KFD */ | 86 | __u32 gpu_id; /* to KFD */ |
87 | uint32_t default_policy; /* to KFD */ | 87 | __u32 default_policy; /* to KFD */ |
88 | uint32_t alternate_policy; /* to KFD */ | 88 | __u32 alternate_policy; /* to KFD */ |
89 | uint32_t pad; | 89 | __u32 pad; |
90 | }; | 90 | }; |
91 | 91 | ||
92 | /* | 92 | /* |
@@ -97,26 +97,26 @@ struct kfd_ioctl_set_memory_policy_args { | |||
97 | */ | 97 | */ |
98 | 98 | ||
99 | struct kfd_ioctl_get_clock_counters_args { | 99 | struct kfd_ioctl_get_clock_counters_args { |
100 | uint64_t gpu_clock_counter; /* from KFD */ | 100 | __u64 gpu_clock_counter; /* from KFD */ |
101 | uint64_t cpu_clock_counter; /* from KFD */ | 101 | __u64 cpu_clock_counter; /* from KFD */ |
102 | uint64_t system_clock_counter; /* from KFD */ | 102 | __u64 system_clock_counter; /* from KFD */ |
103 | uint64_t system_clock_freq; /* from KFD */ | 103 | __u64 system_clock_freq; /* from KFD */ |
104 | 104 | ||
105 | uint32_t gpu_id; /* to KFD */ | 105 | __u32 gpu_id; /* to KFD */ |
106 | uint32_t pad; | 106 | __u32 pad; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | #define NUM_OF_SUPPORTED_GPUS 7 | 109 | #define NUM_OF_SUPPORTED_GPUS 7 |
110 | 110 | ||
111 | struct kfd_process_device_apertures { | 111 | struct kfd_process_device_apertures { |
112 | uint64_t lds_base; /* from KFD */ | 112 | __u64 lds_base; /* from KFD */ |
113 | uint64_t lds_limit; /* from KFD */ | 113 | __u64 lds_limit; /* from KFD */ |
114 | uint64_t scratch_base; /* from KFD */ | 114 | __u64 scratch_base; /* from KFD */ |
115 | uint64_t scratch_limit; /* from KFD */ | 115 | __u64 scratch_limit; /* from KFD */ |
116 | uint64_t gpuvm_base; /* from KFD */ | 116 | __u64 gpuvm_base; /* from KFD */ |
117 | uint64_t gpuvm_limit; /* from KFD */ | 117 | __u64 gpuvm_limit; /* from KFD */ |
118 | uint32_t gpu_id; /* from KFD */ | 118 | __u32 gpu_id; /* from KFD */ |
119 | uint32_t pad; | 119 | __u32 pad; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | struct kfd_ioctl_get_process_apertures_args { | 122 | struct kfd_ioctl_get_process_apertures_args { |
@@ -124,8 +124,8 @@ struct kfd_ioctl_get_process_apertures_args { | |||
124 | process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ | 124 | process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ |
125 | 125 | ||
126 | /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ | 126 | /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ |
127 | uint32_t num_of_nodes; | 127 | __u32 num_of_nodes; |
128 | uint32_t pad; | 128 | __u32 pad; |
129 | }; | 129 | }; |
130 | 130 | ||
131 | #define MAX_ALLOWED_NUM_POINTS 100 | 131 | #define MAX_ALLOWED_NUM_POINTS 100 |
@@ -133,25 +133,25 @@ struct kfd_ioctl_get_process_apertures_args { | |||
133 | #define MAX_ALLOWED_WAC_BUFF_SIZE 128 | 133 | #define MAX_ALLOWED_WAC_BUFF_SIZE 128 |
134 | 134 | ||
135 | struct kfd_ioctl_dbg_register_args { | 135 | struct kfd_ioctl_dbg_register_args { |
136 | uint32_t gpu_id; /* to KFD */ | 136 | __u32 gpu_id; /* to KFD */ |
137 | uint32_t pad; | 137 | __u32 pad; |
138 | }; | 138 | }; |
139 | 139 | ||
140 | struct kfd_ioctl_dbg_unregister_args { | 140 | struct kfd_ioctl_dbg_unregister_args { |
141 | uint32_t gpu_id; /* to KFD */ | 141 | __u32 gpu_id; /* to KFD */ |
142 | uint32_t pad; | 142 | __u32 pad; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | struct kfd_ioctl_dbg_address_watch_args { | 145 | struct kfd_ioctl_dbg_address_watch_args { |
146 | uint64_t content_ptr; /* a pointer to the actual content */ | 146 | __u64 content_ptr; /* a pointer to the actual content */ |
147 | uint32_t gpu_id; /* to KFD */ | 147 | __u32 gpu_id; /* to KFD */ |
148 | uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */ | 148 | __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct kfd_ioctl_dbg_wave_control_args { | 151 | struct kfd_ioctl_dbg_wave_control_args { |
152 | uint64_t content_ptr; /* a pointer to the actual content */ | 152 | __u64 content_ptr; /* a pointer to the actual content */ |
153 | uint32_t gpu_id; /* to KFD */ | 153 | __u32 gpu_id; /* to KFD */ |
154 | uint32_t buf_size_in_bytes; /*including gpu_id and buf_size */ | 154 | __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ |
155 | }; | 155 | }; |
156 | 156 | ||
157 | /* Matching HSA_EVENTTYPE */ | 157 | /* Matching HSA_EVENTTYPE */ |
@@ -172,44 +172,44 @@ struct kfd_ioctl_dbg_wave_control_args { | |||
172 | #define KFD_SIGNAL_EVENT_LIMIT 256 | 172 | #define KFD_SIGNAL_EVENT_LIMIT 256 |
173 | 173 | ||
174 | struct kfd_ioctl_create_event_args { | 174 | struct kfd_ioctl_create_event_args { |
175 | uint64_t event_page_offset; /* from KFD */ | 175 | __u64 event_page_offset; /* from KFD */ |
176 | uint32_t event_trigger_data; /* from KFD - signal events only */ | 176 | __u32 event_trigger_data; /* from KFD - signal events only */ |
177 | uint32_t event_type; /* to KFD */ | 177 | __u32 event_type; /* to KFD */ |
178 | uint32_t auto_reset; /* to KFD */ | 178 | __u32 auto_reset; /* to KFD */ |
179 | uint32_t node_id; /* to KFD - only valid for certain | 179 | __u32 node_id; /* to KFD - only valid for certain |
180 | event types */ | 180 | event types */ |
181 | uint32_t event_id; /* from KFD */ | 181 | __u32 event_id; /* from KFD */ |
182 | uint32_t event_slot_index; /* from KFD */ | 182 | __u32 event_slot_index; /* from KFD */ |
183 | }; | 183 | }; |
184 | 184 | ||
185 | struct kfd_ioctl_destroy_event_args { | 185 | struct kfd_ioctl_destroy_event_args { |
186 | uint32_t event_id; /* to KFD */ | 186 | __u32 event_id; /* to KFD */ |
187 | uint32_t pad; | 187 | __u32 pad; |
188 | }; | 188 | }; |
189 | 189 | ||
190 | struct kfd_ioctl_set_event_args { | 190 | struct kfd_ioctl_set_event_args { |
191 | uint32_t event_id; /* to KFD */ | 191 | __u32 event_id; /* to KFD */ |
192 | uint32_t pad; | 192 | __u32 pad; |
193 | }; | 193 | }; |
194 | 194 | ||
195 | struct kfd_ioctl_reset_event_args { | 195 | struct kfd_ioctl_reset_event_args { |
196 | uint32_t event_id; /* to KFD */ | 196 | __u32 event_id; /* to KFD */ |
197 | uint32_t pad; | 197 | __u32 pad; |
198 | }; | 198 | }; |
199 | 199 | ||
200 | struct kfd_memory_exception_failure { | 200 | struct kfd_memory_exception_failure { |
201 | uint32_t NotPresent; /* Page not present or supervisor privilege */ | 201 | __u32 NotPresent; /* Page not present or supervisor privilege */ |
202 | uint32_t ReadOnly; /* Write access to a read-only page */ | 202 | __u32 ReadOnly; /* Write access to a read-only page */ |
203 | uint32_t NoExecute; /* Execute access to a page marked NX */ | 203 | __u32 NoExecute; /* Execute access to a page marked NX */ |
204 | uint32_t pad; | 204 | __u32 pad; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | /* memory exception data*/ | 207 | /* memory exception data*/ |
208 | struct kfd_hsa_memory_exception_data { | 208 | struct kfd_hsa_memory_exception_data { |
209 | struct kfd_memory_exception_failure failure; | 209 | struct kfd_memory_exception_failure failure; |
210 | uint64_t va; | 210 | __u64 va; |
211 | uint32_t gpu_id; | 211 | __u32 gpu_id; |
212 | uint32_t pad; | 212 | __u32 pad; |
213 | }; | 213 | }; |
214 | 214 | ||
215 | /* Event data*/ | 215 | /* Event data*/ |
@@ -217,19 +217,19 @@ struct kfd_event_data { | |||
217 | union { | 217 | union { |
218 | struct kfd_hsa_memory_exception_data memory_exception_data; | 218 | struct kfd_hsa_memory_exception_data memory_exception_data; |
219 | }; /* From KFD */ | 219 | }; /* From KFD */ |
220 | uint64_t kfd_event_data_ext; /* pointer to an extension structure | 220 | __u64 kfd_event_data_ext; /* pointer to an extension structure |
221 | for future exception types */ | 221 | for future exception types */ |
222 | uint32_t event_id; /* to KFD */ | 222 | __u32 event_id; /* to KFD */ |
223 | uint32_t pad; | 223 | __u32 pad; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | struct kfd_ioctl_wait_events_args { | 226 | struct kfd_ioctl_wait_events_args { |
227 | uint64_t events_ptr; /* pointed to struct | 227 | __u64 events_ptr; /* pointed to struct |
228 | kfd_event_data array, to KFD */ | 228 | kfd_event_data array, to KFD */ |
229 | uint32_t num_events; /* to KFD */ | 229 | __u32 num_events; /* to KFD */ |
230 | uint32_t wait_for_all; /* to KFD */ | 230 | __u32 wait_for_all; /* to KFD */ |
231 | uint32_t timeout; /* to KFD */ | 231 | __u32 timeout; /* to KFD */ |
232 | uint32_t wait_result; /* from KFD */ | 232 | __u32 wait_result; /* from KFD */ |
233 | }; | 233 | }; |
234 | 234 | ||
235 | struct kfd_ioctl_set_scratch_backing_va_args { | 235 | struct kfd_ioctl_set_scratch_backing_va_args { |
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 0f238a43ff1e..f6bc1dea3247 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h | |||
@@ -11,27 +11,34 @@ | |||
11 | #define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */ | 11 | #define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */ |
12 | 12 | ||
13 | /* Valid operations for seccomp syscall. */ | 13 | /* Valid operations for seccomp syscall. */ |
14 | #define SECCOMP_SET_MODE_STRICT 0 | 14 | #define SECCOMP_SET_MODE_STRICT 0 |
15 | #define SECCOMP_SET_MODE_FILTER 1 | 15 | #define SECCOMP_SET_MODE_FILTER 1 |
16 | #define SECCOMP_GET_ACTION_AVAIL 2 | ||
16 | 17 | ||
17 | /* Valid flags for SECCOMP_SET_MODE_FILTER */ | 18 | /* Valid flags for SECCOMP_SET_MODE_FILTER */ |
18 | #define SECCOMP_FILTER_FLAG_TSYNC 1 | 19 | #define SECCOMP_FILTER_FLAG_TSYNC 1 |
20 | #define SECCOMP_FILTER_FLAG_LOG 2 | ||
19 | 21 | ||
20 | /* | 22 | /* |
21 | * All BPF programs must return a 32-bit value. | 23 | * All BPF programs must return a 32-bit value. |
22 | * The bottom 16-bits are for optional return data. | 24 | * The bottom 16-bits are for optional return data. |
23 | * The upper 16-bits are ordered from least permissive values to most. | 25 | * The upper 16-bits are ordered from least permissive values to most, |
26 | * as a signed value (so 0x8000000 is negative). | ||
24 | * | 27 | * |
25 | * The ordering ensures that a min_t() over composed return values always | 28 | * The ordering ensures that a min_t() over composed return values always |
26 | * selects the least permissive choice. | 29 | * selects the least permissive choice. |
27 | */ | 30 | */ |
28 | #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */ | 31 | #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ |
29 | #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ | 32 | #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ |
30 | #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ | 33 | #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD |
31 | #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ | 34 | #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ |
32 | #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ | 35 | #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ |
36 | #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ | ||
37 | #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ | ||
38 | #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ | ||
33 | 39 | ||
34 | /* Masks for the return value sections. */ | 40 | /* Masks for the return value sections. */ |
41 | #define SECCOMP_RET_ACTION_FULL 0xffff0000U | ||
35 | #define SECCOMP_RET_ACTION 0x7fff0000U | 42 | #define SECCOMP_RET_ACTION 0x7fff0000U |
36 | #define SECCOMP_RET_DATA 0x0000ffffU | 43 | #define SECCOMP_RET_DATA 0x0000ffffU |
37 | 44 | ||
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index ce1169af39d7..2a5d63040a0b 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h | |||
@@ -780,6 +780,7 @@ struct usb_interface_assoc_descriptor { | |||
780 | __u8 iFunction; | 780 | __u8 iFunction; |
781 | } __attribute__ ((packed)); | 781 | } __attribute__ ((packed)); |
782 | 782 | ||
783 | #define USB_DT_INTERFACE_ASSOCIATION_SIZE 8 | ||
783 | 784 | ||
784 | /*-------------------------------------------------------------------------*/ | 785 | /*-------------------------------------------------------------------------*/ |
785 | 786 | ||
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 9a0b6479fe0c..d4e0b53bfc75 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h | |||
@@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp { | |||
261 | struct ib_uverbs_rss_caps rss_caps; | 261 | struct ib_uverbs_rss_caps rss_caps; |
262 | __u32 max_wq_type_rq; | 262 | __u32 max_wq_type_rq; |
263 | __u32 raw_packet_caps; | 263 | __u32 raw_packet_caps; |
264 | struct ib_uverbs_tm_caps xrq_caps; | 264 | struct ib_uverbs_tm_caps tm_caps; |
265 | }; | 265 | }; |
266 | 266 | ||
267 | struct ib_uverbs_query_port { | 267 | struct ib_uverbs_query_port { |
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index 415dbc6e43fd..6adc2a955340 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h | |||
@@ -84,16 +84,6 @@ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) | |||
84 | BUG(); | 84 | BUG(); |
85 | } | 85 | } |
86 | 86 | ||
87 | /* TODO: this shouldn't be here but it is because the frontend drivers | ||
88 | * are using it (its rolled in headers) even though we won't hit the code path. | ||
89 | * So for right now just punt with this. | ||
90 | */ | ||
91 | static inline pte_t *lookup_address(unsigned long address, unsigned int *level) | ||
92 | { | ||
93 | BUG(); | ||
94 | return NULL; | ||
95 | } | ||
96 | |||
97 | extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, | 87 | extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
98 | struct gnttab_map_grant_ref *kmap_ops, | 88 | struct gnttab_map_grant_ref *kmap_ops, |
99 | struct page **pages, unsigned int count); | 89 | struct page **pages, unsigned int count); |
@@ -1154,7 +1154,7 @@ static int put_compat_shm_info(struct shm_info *ip, | |||
1154 | info.shm_swp = ip->shm_swp; | 1154 | info.shm_swp = ip->shm_swp; |
1155 | info.swap_attempts = ip->swap_attempts; | 1155 | info.swap_attempts = ip->swap_attempts; |
1156 | info.swap_successes = ip->swap_successes; | 1156 | info.swap_successes = ip->swap_successes; |
1157 | return copy_to_user(up, &info, sizeof(info)); | 1157 | return copy_to_user(uip, &info, sizeof(info)); |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, | 1160 | static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, |
@@ -1237,7 +1237,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) | |||
1237 | err = shmctl_stat(ns, shmid, cmd, &sem64); | 1237 | err = shmctl_stat(ns, shmid, cmd, &sem64); |
1238 | if (err < 0) | 1238 | if (err < 0) |
1239 | return err; | 1239 | return err; |
1240 | if (copy_compat_shmid_to_user(&sem64, uptr, version)) | 1240 | if (copy_compat_shmid_to_user(uptr, &sem64, version)) |
1241 | err = -EFAULT; | 1241 | err = -EFAULT; |
1242 | return err; | 1242 | return err; |
1243 | 1243 | ||
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 917cc04a0a94..7b62df86be1d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -1022,7 +1022,7 @@ select_insn: | |||
1022 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; | 1022 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; |
1023 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 1023 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
1024 | struct bpf_prog *prog; | 1024 | struct bpf_prog *prog; |
1025 | u64 index = BPF_R3; | 1025 | u32 index = BPF_R3; |
1026 | 1026 | ||
1027 | if (unlikely(index >= array->map.max_entries)) | 1027 | if (unlikely(index >= array->map.max_entries)) |
1028 | goto out; | 1028 | goto out; |
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 959c9a07f318..e093d9a2c4dd 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -75,8 +75,8 @@ static u64 dev_map_bitmap_size(const union bpf_attr *attr) | |||
75 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | 75 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) |
76 | { | 76 | { |
77 | struct bpf_dtab *dtab; | 77 | struct bpf_dtab *dtab; |
78 | int err = -EINVAL; | ||
78 | u64 cost; | 79 | u64 cost; |
79 | int err; | ||
80 | 80 | ||
81 | /* check sanity of attributes */ | 81 | /* check sanity of attributes */ |
82 | if (attr->max_entries == 0 || attr->key_size != 4 || | 82 | if (attr->max_entries == 0 || attr->key_size != 4 || |
@@ -108,6 +108,8 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |||
108 | if (err) | 108 | if (err) |
109 | goto free_dtab; | 109 | goto free_dtab; |
110 | 110 | ||
111 | err = -ENOMEM; | ||
112 | |||
111 | /* A per cpu bitfield with a bit per possible net device */ | 113 | /* A per cpu bitfield with a bit per possible net device */ |
112 | dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), | 114 | dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), |
113 | __alignof__(unsigned long)); | 115 | __alignof__(unsigned long)); |
@@ -128,7 +130,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |||
128 | free_dtab: | 130 | free_dtab: |
129 | free_percpu(dtab->flush_needed); | 131 | free_percpu(dtab->flush_needed); |
130 | kfree(dtab); | 132 | kfree(dtab); |
131 | return ERR_PTR(-ENOMEM); | 133 | return ERR_PTR(err); |
132 | } | 134 | } |
133 | 135 | ||
134 | static void dev_map_free(struct bpf_map *map) | 136 | static void dev_map_free(struct bpf_map *map) |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cb17e1cd1d43..25d074920a00 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -186,15 +186,17 @@ static int bpf_map_alloc_id(struct bpf_map *map) | |||
186 | 186 | ||
187 | static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) | 187 | static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) |
188 | { | 188 | { |
189 | unsigned long flags; | ||
190 | |||
189 | if (do_idr_lock) | 191 | if (do_idr_lock) |
190 | spin_lock_bh(&map_idr_lock); | 192 | spin_lock_irqsave(&map_idr_lock, flags); |
191 | else | 193 | else |
192 | __acquire(&map_idr_lock); | 194 | __acquire(&map_idr_lock); |
193 | 195 | ||
194 | idr_remove(&map_idr, map->id); | 196 | idr_remove(&map_idr, map->id); |
195 | 197 | ||
196 | if (do_idr_lock) | 198 | if (do_idr_lock) |
197 | spin_unlock_bh(&map_idr_lock); | 199 | spin_unlock_irqrestore(&map_idr_lock, flags); |
198 | else | 200 | else |
199 | __release(&map_idr_lock); | 201 | __release(&map_idr_lock); |
200 | } | 202 | } |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 799b2451ef2d..b914fbe1383e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -4205,7 +4205,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
4205 | } | 4205 | } |
4206 | 4206 | ||
4207 | if (insn->imm == BPF_FUNC_redirect_map) { | 4207 | if (insn->imm == BPF_FUNC_redirect_map) { |
4208 | u64 addr = (unsigned long)prog; | 4208 | /* Note, we cannot use prog directly as imm as subsequent |
4209 | * rewrites would still change the prog pointer. The only | ||
4210 | * stable address we can use is aux, which also works with | ||
4211 | * prog clones during blinding. | ||
4212 | */ | ||
4213 | u64 addr = (unsigned long)prog->aux; | ||
4209 | struct bpf_insn r4_ld[] = { | 4214 | struct bpf_insn r4_ld[] = { |
4210 | BPF_LD_IMM64(BPF_REG_4, addr), | 4215 | BPF_LD_IMM64(BPF_REG_4, addr), |
4211 | *insn, | 4216 | *insn, |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index d6551cd45238..44857278eb8a 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -2311,6 +2311,14 @@ out_release_tset: | |||
2311 | list_del_init(&cset->mg_node); | 2311 | list_del_init(&cset->mg_node); |
2312 | } | 2312 | } |
2313 | spin_unlock_irq(&css_set_lock); | 2313 | spin_unlock_irq(&css_set_lock); |
2314 | |||
2315 | /* | ||
2316 | * Re-initialize the cgroup_taskset structure in case it is reused | ||
2317 | * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() | ||
2318 | * iteration. | ||
2319 | */ | ||
2320 | tset->nr_tasks = 0; | ||
2321 | tset->csets = &tset->src_csets; | ||
2314 | return ret; | 2322 | return ret; |
2315 | } | 2323 | } |
2316 | 2324 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index acf5308fad51..d851df22f5c5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/lockdep.h> | 24 | #include <linux/lockdep.h> |
25 | #include <linux/tick.h> | 25 | #include <linux/tick.h> |
26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
27 | #include <linux/nmi.h> | ||
27 | #include <linux/smpboot.h> | 28 | #include <linux/smpboot.h> |
28 | #include <linux/relay.h> | 29 | #include <linux/relay.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
@@ -46,11 +47,13 @@ | |||
46 | * @bringup: Single callback bringup or teardown selector | 47 | * @bringup: Single callback bringup or teardown selector |
47 | * @cb_state: The state for a single callback (install/uninstall) | 48 | * @cb_state: The state for a single callback (install/uninstall) |
48 | * @result: Result of the operation | 49 | * @result: Result of the operation |
49 | * @done: Signal completion to the issuer of the task | 50 | * @done_up: Signal completion to the issuer of the task for cpu-up |
51 | * @done_down: Signal completion to the issuer of the task for cpu-down | ||
50 | */ | 52 | */ |
51 | struct cpuhp_cpu_state { | 53 | struct cpuhp_cpu_state { |
52 | enum cpuhp_state state; | 54 | enum cpuhp_state state; |
53 | enum cpuhp_state target; | 55 | enum cpuhp_state target; |
56 | enum cpuhp_state fail; | ||
54 | #ifdef CONFIG_SMP | 57 | #ifdef CONFIG_SMP |
55 | struct task_struct *thread; | 58 | struct task_struct *thread; |
56 | bool should_run; | 59 | bool should_run; |
@@ -58,18 +61,39 @@ struct cpuhp_cpu_state { | |||
58 | bool single; | 61 | bool single; |
59 | bool bringup; | 62 | bool bringup; |
60 | struct hlist_node *node; | 63 | struct hlist_node *node; |
64 | struct hlist_node *last; | ||
61 | enum cpuhp_state cb_state; | 65 | enum cpuhp_state cb_state; |
62 | int result; | 66 | int result; |
63 | struct completion done; | 67 | struct completion done_up; |
68 | struct completion done_down; | ||
64 | #endif | 69 | #endif |
65 | }; | 70 | }; |
66 | 71 | ||
67 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); | 72 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { |
73 | .fail = CPUHP_INVALID, | ||
74 | }; | ||
68 | 75 | ||
69 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) | 76 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) |
70 | static struct lock_class_key cpuhp_state_key; | 77 | static struct lockdep_map cpuhp_state_up_map = |
71 | static struct lockdep_map cpuhp_state_lock_map = | 78 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); |
72 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key); | 79 | static struct lockdep_map cpuhp_state_down_map = |
80 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); | ||
81 | |||
82 | |||
83 | static void inline cpuhp_lock_acquire(bool bringup) | ||
84 | { | ||
85 | lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); | ||
86 | } | ||
87 | |||
88 | static void inline cpuhp_lock_release(bool bringup) | ||
89 | { | ||
90 | lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); | ||
91 | } | ||
92 | #else | ||
93 | |||
94 | static void inline cpuhp_lock_acquire(bool bringup) { } | ||
95 | static void inline cpuhp_lock_release(bool bringup) { } | ||
96 | |||
73 | #endif | 97 | #endif |
74 | 98 | ||
75 | /** | 99 | /** |
@@ -123,13 +147,16 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) | |||
123 | /** | 147 | /** |
124 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state | 148 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state |
125 | * @cpu: The cpu for which the callback should be invoked | 149 | * @cpu: The cpu for which the callback should be invoked |
126 | * @step: The step in the state machine | 150 | * @state: The state to do callbacks for |
127 | * @bringup: True if the bringup callback should be invoked | 151 | * @bringup: True if the bringup callback should be invoked |
152 | * @node: For multi-instance, do a single entry callback for install/remove | ||
153 | * @lastp: For multi-instance rollback, remember how far we got | ||
128 | * | 154 | * |
129 | * Called from cpu hotplug and from the state register machinery. | 155 | * Called from cpu hotplug and from the state register machinery. |
130 | */ | 156 | */ |
131 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, | 157 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, |
132 | bool bringup, struct hlist_node *node) | 158 | bool bringup, struct hlist_node *node, |
159 | struct hlist_node **lastp) | ||
133 | { | 160 | { |
134 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 161 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
135 | struct cpuhp_step *step = cpuhp_get_step(state); | 162 | struct cpuhp_step *step = cpuhp_get_step(state); |
@@ -137,7 +164,17 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, | |||
137 | int (*cb)(unsigned int cpu); | 164 | int (*cb)(unsigned int cpu); |
138 | int ret, cnt; | 165 | int ret, cnt; |
139 | 166 | ||
167 | if (st->fail == state) { | ||
168 | st->fail = CPUHP_INVALID; | ||
169 | |||
170 | if (!(bringup ? step->startup.single : step->teardown.single)) | ||
171 | return 0; | ||
172 | |||
173 | return -EAGAIN; | ||
174 | } | ||
175 | |||
140 | if (!step->multi_instance) { | 176 | if (!step->multi_instance) { |
177 | WARN_ON_ONCE(lastp && *lastp); | ||
141 | cb = bringup ? step->startup.single : step->teardown.single; | 178 | cb = bringup ? step->startup.single : step->teardown.single; |
142 | if (!cb) | 179 | if (!cb) |
143 | return 0; | 180 | return 0; |
@@ -152,6 +189,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, | |||
152 | 189 | ||
153 | /* Single invocation for instance add/remove */ | 190 | /* Single invocation for instance add/remove */ |
154 | if (node) { | 191 | if (node) { |
192 | WARN_ON_ONCE(lastp && *lastp); | ||
155 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); | 193 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); |
156 | ret = cbm(cpu, node); | 194 | ret = cbm(cpu, node); |
157 | trace_cpuhp_exit(cpu, st->state, state, ret); | 195 | trace_cpuhp_exit(cpu, st->state, state, ret); |
@@ -161,13 +199,23 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, | |||
161 | /* State transition. Invoke on all instances */ | 199 | /* State transition. Invoke on all instances */ |
162 | cnt = 0; | 200 | cnt = 0; |
163 | hlist_for_each(node, &step->list) { | 201 | hlist_for_each(node, &step->list) { |
202 | if (lastp && node == *lastp) | ||
203 | break; | ||
204 | |||
164 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); | 205 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); |
165 | ret = cbm(cpu, node); | 206 | ret = cbm(cpu, node); |
166 | trace_cpuhp_exit(cpu, st->state, state, ret); | 207 | trace_cpuhp_exit(cpu, st->state, state, ret); |
167 | if (ret) | 208 | if (ret) { |
168 | goto err; | 209 | if (!lastp) |
210 | goto err; | ||
211 | |||
212 | *lastp = node; | ||
213 | return ret; | ||
214 | } | ||
169 | cnt++; | 215 | cnt++; |
170 | } | 216 | } |
217 | if (lastp) | ||
218 | *lastp = NULL; | ||
171 | return 0; | 219 | return 0; |
172 | err: | 220 | err: |
173 | /* Rollback the instances if one failed */ | 221 | /* Rollback the instances if one failed */ |
@@ -178,12 +226,39 @@ err: | |||
178 | hlist_for_each(node, &step->list) { | 226 | hlist_for_each(node, &step->list) { |
179 | if (!cnt--) | 227 | if (!cnt--) |
180 | break; | 228 | break; |
181 | cbm(cpu, node); | 229 | |
230 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); | ||
231 | ret = cbm(cpu, node); | ||
232 | trace_cpuhp_exit(cpu, st->state, state, ret); | ||
233 | /* | ||
234 | * Rollback must not fail, | ||
235 | */ | ||
236 | WARN_ON_ONCE(ret); | ||
182 | } | 237 | } |
183 | return ret; | 238 | return ret; |
184 | } | 239 | } |
185 | 240 | ||
186 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
242 | static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) | ||
243 | { | ||
244 | struct completion *done = bringup ? &st->done_up : &st->done_down; | ||
245 | wait_for_completion(done); | ||
246 | } | ||
247 | |||
248 | static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) | ||
249 | { | ||
250 | struct completion *done = bringup ? &st->done_up : &st->done_down; | ||
251 | complete(done); | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * The former STARTING/DYING states, ran with IRQs disabled and must not fail. | ||
256 | */ | ||
257 | static bool cpuhp_is_atomic_state(enum cpuhp_state state) | ||
258 | { | ||
259 | return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; | ||
260 | } | ||
261 | |||
187 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ | 262 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
188 | static DEFINE_MUTEX(cpu_add_remove_lock); | 263 | static DEFINE_MUTEX(cpu_add_remove_lock); |
189 | bool cpuhp_tasks_frozen; | 264 | bool cpuhp_tasks_frozen; |
@@ -271,14 +346,79 @@ void cpu_hotplug_enable(void) | |||
271 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); | 346 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
272 | #endif /* CONFIG_HOTPLUG_CPU */ | 347 | #endif /* CONFIG_HOTPLUG_CPU */ |
273 | 348 | ||
274 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); | 349 | static inline enum cpuhp_state |
350 | cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) | ||
351 | { | ||
352 | enum cpuhp_state prev_state = st->state; | ||
353 | |||
354 | st->rollback = false; | ||
355 | st->last = NULL; | ||
356 | |||
357 | st->target = target; | ||
358 | st->single = false; | ||
359 | st->bringup = st->state < target; | ||
360 | |||
361 | return prev_state; | ||
362 | } | ||
363 | |||
364 | static inline void | ||
365 | cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) | ||
366 | { | ||
367 | st->rollback = true; | ||
368 | |||
369 | /* | ||
370 | * If we have st->last we need to undo partial multi_instance of this | ||
371 | * state first. Otherwise start undo at the previous state. | ||
372 | */ | ||
373 | if (!st->last) { | ||
374 | if (st->bringup) | ||
375 | st->state--; | ||
376 | else | ||
377 | st->state++; | ||
378 | } | ||
379 | |||
380 | st->target = prev_state; | ||
381 | st->bringup = !st->bringup; | ||
382 | } | ||
383 | |||
384 | /* Regular hotplug invocation of the AP hotplug thread */ | ||
385 | static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) | ||
386 | { | ||
387 | if (!st->single && st->state == st->target) | ||
388 | return; | ||
389 | |||
390 | st->result = 0; | ||
391 | /* | ||
392 | * Make sure the above stores are visible before should_run becomes | ||
393 | * true. Paired with the mb() above in cpuhp_thread_fun() | ||
394 | */ | ||
395 | smp_mb(); | ||
396 | st->should_run = true; | ||
397 | wake_up_process(st->thread); | ||
398 | wait_for_ap_thread(st, st->bringup); | ||
399 | } | ||
400 | |||
401 | static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target) | ||
402 | { | ||
403 | enum cpuhp_state prev_state; | ||
404 | int ret; | ||
405 | |||
406 | prev_state = cpuhp_set_state(st, target); | ||
407 | __cpuhp_kick_ap(st); | ||
408 | if ((ret = st->result)) { | ||
409 | cpuhp_reset_state(st, prev_state); | ||
410 | __cpuhp_kick_ap(st); | ||
411 | } | ||
412 | |||
413 | return ret; | ||
414 | } | ||
275 | 415 | ||
276 | static int bringup_wait_for_ap(unsigned int cpu) | 416 | static int bringup_wait_for_ap(unsigned int cpu) |
277 | { | 417 | { |
278 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 418 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
279 | 419 | ||
280 | /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ | 420 | /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ |
281 | wait_for_completion(&st->done); | 421 | wait_for_ap_thread(st, true); |
282 | if (WARN_ON_ONCE((!cpu_online(cpu)))) | 422 | if (WARN_ON_ONCE((!cpu_online(cpu)))) |
283 | return -ECANCELED; | 423 | return -ECANCELED; |
284 | 424 | ||
@@ -286,12 +426,10 @@ static int bringup_wait_for_ap(unsigned int cpu) | |||
286 | stop_machine_unpark(cpu); | 426 | stop_machine_unpark(cpu); |
287 | kthread_unpark(st->thread); | 427 | kthread_unpark(st->thread); |
288 | 428 | ||
289 | /* Should we go further up ? */ | 429 | if (st->target <= CPUHP_AP_ONLINE_IDLE) |
290 | if (st->target > CPUHP_AP_ONLINE_IDLE) { | 430 | return 0; |
291 | __cpuhp_kick_ap_work(st); | 431 | |
292 | wait_for_completion(&st->done); | 432 | return cpuhp_kick_ap(st, st->target); |
293 | } | ||
294 | return st->result; | ||
295 | } | 433 | } |
296 | 434 | ||
297 | static int bringup_cpu(unsigned int cpu) | 435 | static int bringup_cpu(unsigned int cpu) |
@@ -317,32 +455,6 @@ static int bringup_cpu(unsigned int cpu) | |||
317 | /* | 455 | /* |
318 | * Hotplug state machine related functions | 456 | * Hotplug state machine related functions |
319 | */ | 457 | */ |
320 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) | ||
321 | { | ||
322 | for (st->state++; st->state < st->target; st->state++) { | ||
323 | struct cpuhp_step *step = cpuhp_get_step(st->state); | ||
324 | |||
325 | if (!step->skip_onerr) | ||
326 | cpuhp_invoke_callback(cpu, st->state, true, NULL); | ||
327 | } | ||
328 | } | ||
329 | |||
330 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | ||
331 | enum cpuhp_state target) | ||
332 | { | ||
333 | enum cpuhp_state prev_state = st->state; | ||
334 | int ret = 0; | ||
335 | |||
336 | for (; st->state > target; st->state--) { | ||
337 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL); | ||
338 | if (ret) { | ||
339 | st->target = prev_state; | ||
340 | undo_cpu_down(cpu, st); | ||
341 | break; | ||
342 | } | ||
343 | } | ||
344 | return ret; | ||
345 | } | ||
346 | 458 | ||
347 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | 459 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
348 | { | 460 | { |
@@ -350,7 +462,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) | |||
350 | struct cpuhp_step *step = cpuhp_get_step(st->state); | 462 | struct cpuhp_step *step = cpuhp_get_step(st->state); |
351 | 463 | ||
352 | if (!step->skip_onerr) | 464 | if (!step->skip_onerr) |
353 | cpuhp_invoke_callback(cpu, st->state, false, NULL); | 465 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
354 | } | 466 | } |
355 | } | 467 | } |
356 | 468 | ||
@@ -362,7 +474,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | |||
362 | 474 | ||
363 | while (st->state < target) { | 475 | while (st->state < target) { |
364 | st->state++; | 476 | st->state++; |
365 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL); | 477 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
366 | if (ret) { | 478 | if (ret) { |
367 | st->target = prev_state; | 479 | st->target = prev_state; |
368 | undo_cpu_up(cpu, st); | 480 | undo_cpu_up(cpu, st); |
@@ -379,7 +491,8 @@ static void cpuhp_create(unsigned int cpu) | |||
379 | { | 491 | { |
380 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 492 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
381 | 493 | ||
382 | init_completion(&st->done); | 494 | init_completion(&st->done_up); |
495 | init_completion(&st->done_down); | ||
383 | } | 496 | } |
384 | 497 | ||
385 | static int cpuhp_should_run(unsigned int cpu) | 498 | static int cpuhp_should_run(unsigned int cpu) |
@@ -389,69 +502,90 @@ static int cpuhp_should_run(unsigned int cpu) | |||
389 | return st->should_run; | 502 | return st->should_run; |
390 | } | 503 | } |
391 | 504 | ||
392 | /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */ | ||
393 | static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st) | ||
394 | { | ||
395 | enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); | ||
396 | |||
397 | return cpuhp_down_callbacks(cpu, st, target); | ||
398 | } | ||
399 | |||
400 | /* Execute the online startup callbacks. Used to be CPU_ONLINE */ | ||
401 | static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) | ||
402 | { | ||
403 | return cpuhp_up_callbacks(cpu, st, st->target); | ||
404 | } | ||
405 | |||
406 | /* | 505 | /* |
407 | * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke | 506 | * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke |
408 | * callbacks when a state gets [un]installed at runtime. | 507 | * callbacks when a state gets [un]installed at runtime. |
508 | * | ||
509 | * Each invocation of this function by the smpboot thread does a single AP | ||
510 | * state callback. | ||
511 | * | ||
512 | * It has 3 modes of operation: | ||
513 | * - single: runs st->cb_state | ||
514 | * - up: runs ++st->state, while st->state < st->target | ||
515 | * - down: runs st->state--, while st->state > st->target | ||
516 | * | ||
517 | * When complete or on error, should_run is cleared and the completion is fired. | ||
409 | */ | 518 | */ |
410 | static void cpuhp_thread_fun(unsigned int cpu) | 519 | static void cpuhp_thread_fun(unsigned int cpu) |
411 | { | 520 | { |
412 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); | 521 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
413 | int ret = 0; | 522 | bool bringup = st->bringup; |
523 | enum cpuhp_state state; | ||
414 | 524 | ||
415 | /* | 525 | /* |
416 | * Paired with the mb() in cpuhp_kick_ap_work and | 526 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures |
417 | * cpuhp_invoke_ap_callback, so the work set is consistent visible. | 527 | * that if we see ->should_run we also see the rest of the state. |
418 | */ | 528 | */ |
419 | smp_mb(); | 529 | smp_mb(); |
420 | if (!st->should_run) | 530 | |
531 | if (WARN_ON_ONCE(!st->should_run)) | ||
421 | return; | 532 | return; |
422 | 533 | ||
423 | st->should_run = false; | 534 | cpuhp_lock_acquire(bringup); |
424 | 535 | ||
425 | lock_map_acquire(&cpuhp_state_lock_map); | ||
426 | /* Single callback invocation for [un]install ? */ | ||
427 | if (st->single) { | 536 | if (st->single) { |
428 | if (st->cb_state < CPUHP_AP_ONLINE) { | 537 | state = st->cb_state; |
429 | local_irq_disable(); | 538 | st->should_run = false; |
430 | ret = cpuhp_invoke_callback(cpu, st->cb_state, | 539 | } else { |
431 | st->bringup, st->node); | 540 | if (bringup) { |
432 | local_irq_enable(); | 541 | st->state++; |
542 | state = st->state; | ||
543 | st->should_run = (st->state < st->target); | ||
544 | WARN_ON_ONCE(st->state > st->target); | ||
433 | } else { | 545 | } else { |
434 | ret = cpuhp_invoke_callback(cpu, st->cb_state, | 546 | state = st->state; |
435 | st->bringup, st->node); | 547 | st->state--; |
548 | st->should_run = (st->state > st->target); | ||
549 | WARN_ON_ONCE(st->state < st->target); | ||
436 | } | 550 | } |
437 | } else if (st->rollback) { | 551 | } |
438 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); | 552 | |
553 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); | ||
554 | |||
555 | if (st->rollback) { | ||
556 | struct cpuhp_step *step = cpuhp_get_step(state); | ||
557 | if (step->skip_onerr) | ||
558 | goto next; | ||
559 | } | ||
560 | |||
561 | if (cpuhp_is_atomic_state(state)) { | ||
562 | local_irq_disable(); | ||
563 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); | ||
564 | local_irq_enable(); | ||
439 | 565 | ||
440 | undo_cpu_down(cpu, st); | 566 | /* |
441 | st->rollback = false; | 567 | * STARTING/DYING must not fail! |
568 | */ | ||
569 | WARN_ON_ONCE(st->result); | ||
442 | } else { | 570 | } else { |
443 | /* Cannot happen .... */ | 571 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
444 | BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); | 572 | } |
445 | 573 | ||
446 | /* Regular hotplug work */ | 574 | if (st->result) { |
447 | if (st->state < st->target) | 575 | /* |
448 | ret = cpuhp_ap_online(cpu, st); | 576 | * If we fail on a rollback, we're up a creek without no |
449 | else if (st->state > st->target) | 577 | * paddle, no way forward, no way back. We loose, thanks for |
450 | ret = cpuhp_ap_offline(cpu, st); | 578 | * playing. |
579 | */ | ||
580 | WARN_ON_ONCE(st->rollback); | ||
581 | st->should_run = false; | ||
451 | } | 582 | } |
452 | lock_map_release(&cpuhp_state_lock_map); | 583 | |
453 | st->result = ret; | 584 | next: |
454 | complete(&st->done); | 585 | cpuhp_lock_release(bringup); |
586 | |||
587 | if (!st->should_run) | ||
588 | complete_ap_thread(st, bringup); | ||
455 | } | 589 | } |
456 | 590 | ||
457 | /* Invoke a single callback on a remote cpu */ | 591 | /* Invoke a single callback on a remote cpu */ |
@@ -460,62 +594,64 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, | |||
460 | struct hlist_node *node) | 594 | struct hlist_node *node) |
461 | { | 595 | { |
462 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 596 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
597 | int ret; | ||
463 | 598 | ||
464 | if (!cpu_online(cpu)) | 599 | if (!cpu_online(cpu)) |
465 | return 0; | 600 | return 0; |
466 | 601 | ||
467 | lock_map_acquire(&cpuhp_state_lock_map); | 602 | cpuhp_lock_acquire(false); |
468 | lock_map_release(&cpuhp_state_lock_map); | 603 | cpuhp_lock_release(false); |
604 | |||
605 | cpuhp_lock_acquire(true); | ||
606 | cpuhp_lock_release(true); | ||
469 | 607 | ||
470 | /* | 608 | /* |
471 | * If we are up and running, use the hotplug thread. For early calls | 609 | * If we are up and running, use the hotplug thread. For early calls |
472 | * we invoke the thread function directly. | 610 | * we invoke the thread function directly. |
473 | */ | 611 | */ |
474 | if (!st->thread) | 612 | if (!st->thread) |
475 | return cpuhp_invoke_callback(cpu, state, bringup, node); | 613 | return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
614 | |||
615 | st->rollback = false; | ||
616 | st->last = NULL; | ||
476 | 617 | ||
618 | st->node = node; | ||
619 | st->bringup = bringup; | ||
477 | st->cb_state = state; | 620 | st->cb_state = state; |
478 | st->single = true; | 621 | st->single = true; |
479 | st->bringup = bringup; | ||
480 | st->node = node; | ||
481 | 622 | ||
482 | /* | 623 | __cpuhp_kick_ap(st); |
483 | * Make sure the above stores are visible before should_run becomes | ||
484 | * true. Paired with the mb() above in cpuhp_thread_fun() | ||
485 | */ | ||
486 | smp_mb(); | ||
487 | st->should_run = true; | ||
488 | wake_up_process(st->thread); | ||
489 | wait_for_completion(&st->done); | ||
490 | return st->result; | ||
491 | } | ||
492 | 624 | ||
493 | /* Regular hotplug invocation of the AP hotplug thread */ | ||
494 | static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) | ||
495 | { | ||
496 | st->result = 0; | ||
497 | st->single = false; | ||
498 | /* | 625 | /* |
499 | * Make sure the above stores are visible before should_run becomes | 626 | * If we failed and did a partial, do a rollback. |
500 | * true. Paired with the mb() above in cpuhp_thread_fun() | ||
501 | */ | 627 | */ |
502 | smp_mb(); | 628 | if ((ret = st->result) && st->last) { |
503 | st->should_run = true; | 629 | st->rollback = true; |
504 | wake_up_process(st->thread); | 630 | st->bringup = !bringup; |
631 | |||
632 | __cpuhp_kick_ap(st); | ||
633 | } | ||
634 | |||
635 | return ret; | ||
505 | } | 636 | } |
506 | 637 | ||
507 | static int cpuhp_kick_ap_work(unsigned int cpu) | 638 | static int cpuhp_kick_ap_work(unsigned int cpu) |
508 | { | 639 | { |
509 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 640 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
510 | enum cpuhp_state state = st->state; | 641 | enum cpuhp_state prev_state = st->state; |
642 | int ret; | ||
643 | |||
644 | cpuhp_lock_acquire(false); | ||
645 | cpuhp_lock_release(false); | ||
511 | 646 | ||
512 | trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work); | 647 | cpuhp_lock_acquire(true); |
513 | lock_map_acquire(&cpuhp_state_lock_map); | 648 | cpuhp_lock_release(true); |
514 | lock_map_release(&cpuhp_state_lock_map); | 649 | |
515 | __cpuhp_kick_ap_work(st); | 650 | trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); |
516 | wait_for_completion(&st->done); | 651 | ret = cpuhp_kick_ap(st, st->target); |
517 | trace_cpuhp_exit(cpu, st->state, state, st->result); | 652 | trace_cpuhp_exit(cpu, st->state, prev_state, ret); |
518 | return st->result; | 653 | |
654 | return ret; | ||
519 | } | 655 | } |
520 | 656 | ||
521 | static struct smp_hotplug_thread cpuhp_threads = { | 657 | static struct smp_hotplug_thread cpuhp_threads = { |
@@ -581,6 +717,7 @@ static int take_cpu_down(void *_param) | |||
581 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); | 717 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
582 | enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); | 718 | enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); |
583 | int err, cpu = smp_processor_id(); | 719 | int err, cpu = smp_processor_id(); |
720 | int ret; | ||
584 | 721 | ||
585 | /* Ensure this CPU doesn't handle any more interrupts. */ | 722 | /* Ensure this CPU doesn't handle any more interrupts. */ |
586 | err = __cpu_disable(); | 723 | err = __cpu_disable(); |
@@ -594,8 +731,13 @@ static int take_cpu_down(void *_param) | |||
594 | WARN_ON(st->state != CPUHP_TEARDOWN_CPU); | 731 | WARN_ON(st->state != CPUHP_TEARDOWN_CPU); |
595 | st->state--; | 732 | st->state--; |
596 | /* Invoke the former CPU_DYING callbacks */ | 733 | /* Invoke the former CPU_DYING callbacks */ |
597 | for (; st->state > target; st->state--) | 734 | for (; st->state > target; st->state--) { |
598 | cpuhp_invoke_callback(cpu, st->state, false, NULL); | 735 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
736 | /* | ||
737 | * DYING must not fail! | ||
738 | */ | ||
739 | WARN_ON_ONCE(ret); | ||
740 | } | ||
599 | 741 | ||
600 | /* Give up timekeeping duties */ | 742 | /* Give up timekeeping duties */ |
601 | tick_handover_do_timer(); | 743 | tick_handover_do_timer(); |
@@ -639,7 +781,7 @@ static int takedown_cpu(unsigned int cpu) | |||
639 | * | 781 | * |
640 | * Wait for the stop thread to go away. | 782 | * Wait for the stop thread to go away. |
641 | */ | 783 | */ |
642 | wait_for_completion(&st->done); | 784 | wait_for_ap_thread(st, false); |
643 | BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); | 785 | BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); |
644 | 786 | ||
645 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ | 787 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ |
@@ -658,7 +800,7 @@ static void cpuhp_complete_idle_dead(void *arg) | |||
658 | { | 800 | { |
659 | struct cpuhp_cpu_state *st = arg; | 801 | struct cpuhp_cpu_state *st = arg; |
660 | 802 | ||
661 | complete(&st->done); | 803 | complete_ap_thread(st, false); |
662 | } | 804 | } |
663 | 805 | ||
664 | void cpuhp_report_idle_dead(void) | 806 | void cpuhp_report_idle_dead(void) |
@@ -676,11 +818,32 @@ void cpuhp_report_idle_dead(void) | |||
676 | cpuhp_complete_idle_dead, st, 0); | 818 | cpuhp_complete_idle_dead, st, 0); |
677 | } | 819 | } |
678 | 820 | ||
679 | #else | 821 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
680 | #define takedown_cpu NULL | 822 | { |
681 | #endif | 823 | for (st->state++; st->state < st->target; st->state++) { |
824 | struct cpuhp_step *step = cpuhp_get_step(st->state); | ||
682 | 825 | ||
683 | #ifdef CONFIG_HOTPLUG_CPU | 826 | if (!step->skip_onerr) |
827 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); | ||
828 | } | ||
829 | } | ||
830 | |||
831 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, | ||
832 | enum cpuhp_state target) | ||
833 | { | ||
834 | enum cpuhp_state prev_state = st->state; | ||
835 | int ret = 0; | ||
836 | |||
837 | for (; st->state > target; st->state--) { | ||
838 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); | ||
839 | if (ret) { | ||
840 | st->target = prev_state; | ||
841 | undo_cpu_down(cpu, st); | ||
842 | break; | ||
843 | } | ||
844 | } | ||
845 | return ret; | ||
846 | } | ||
684 | 847 | ||
685 | /* Requires cpu_add_remove_lock to be held */ | 848 | /* Requires cpu_add_remove_lock to be held */ |
686 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | 849 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, |
@@ -699,13 +862,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
699 | 862 | ||
700 | cpuhp_tasks_frozen = tasks_frozen; | 863 | cpuhp_tasks_frozen = tasks_frozen; |
701 | 864 | ||
702 | prev_state = st->state; | 865 | prev_state = cpuhp_set_state(st, target); |
703 | st->target = target; | ||
704 | /* | 866 | /* |
705 | * If the current CPU state is in the range of the AP hotplug thread, | 867 | * If the current CPU state is in the range of the AP hotplug thread, |
706 | * then we need to kick the thread. | 868 | * then we need to kick the thread. |
707 | */ | 869 | */ |
708 | if (st->state > CPUHP_TEARDOWN_CPU) { | 870 | if (st->state > CPUHP_TEARDOWN_CPU) { |
871 | st->target = max((int)target, CPUHP_TEARDOWN_CPU); | ||
709 | ret = cpuhp_kick_ap_work(cpu); | 872 | ret = cpuhp_kick_ap_work(cpu); |
710 | /* | 873 | /* |
711 | * The AP side has done the error rollback already. Just | 874 | * The AP side has done the error rollback already. Just |
@@ -720,6 +883,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
720 | */ | 883 | */ |
721 | if (st->state > CPUHP_TEARDOWN_CPU) | 884 | if (st->state > CPUHP_TEARDOWN_CPU) |
722 | goto out; | 885 | goto out; |
886 | |||
887 | st->target = target; | ||
723 | } | 888 | } |
724 | /* | 889 | /* |
725 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need | 890 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
@@ -727,13 +892,17 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
727 | */ | 892 | */ |
728 | ret = cpuhp_down_callbacks(cpu, st, target); | 893 | ret = cpuhp_down_callbacks(cpu, st, target); |
729 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { | 894 | if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
730 | st->target = prev_state; | 895 | cpuhp_reset_state(st, prev_state); |
731 | st->rollback = true; | 896 | __cpuhp_kick_ap(st); |
732 | cpuhp_kick_ap_work(cpu); | ||
733 | } | 897 | } |
734 | 898 | ||
735 | out: | 899 | out: |
736 | cpus_write_unlock(); | 900 | cpus_write_unlock(); |
901 | /* | ||
902 | * Do post unplug cleanup. This is still protected against | ||
903 | * concurrent CPU hotplug via cpu_add_remove_lock. | ||
904 | */ | ||
905 | lockup_detector_cleanup(); | ||
737 | return ret; | 906 | return ret; |
738 | } | 907 | } |
739 | 908 | ||
@@ -754,11 +923,15 @@ out: | |||
754 | cpu_maps_update_done(); | 923 | cpu_maps_update_done(); |
755 | return err; | 924 | return err; |
756 | } | 925 | } |
926 | |||
757 | int cpu_down(unsigned int cpu) | 927 | int cpu_down(unsigned int cpu) |
758 | { | 928 | { |
759 | return do_cpu_down(cpu, CPUHP_OFFLINE); | 929 | return do_cpu_down(cpu, CPUHP_OFFLINE); |
760 | } | 930 | } |
761 | EXPORT_SYMBOL(cpu_down); | 931 | EXPORT_SYMBOL(cpu_down); |
932 | |||
933 | #else | ||
934 | #define takedown_cpu NULL | ||
762 | #endif /*CONFIG_HOTPLUG_CPU*/ | 935 | #endif /*CONFIG_HOTPLUG_CPU*/ |
763 | 936 | ||
764 | /** | 937 | /** |
@@ -772,11 +945,16 @@ void notify_cpu_starting(unsigned int cpu) | |||
772 | { | 945 | { |
773 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); | 946 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
774 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); | 947 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
948 | int ret; | ||
775 | 949 | ||
776 | rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ | 950 | rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ |
777 | while (st->state < target) { | 951 | while (st->state < target) { |
778 | st->state++; | 952 | st->state++; |
779 | cpuhp_invoke_callback(cpu, st->state, true, NULL); | 953 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
954 | /* | ||
955 | * STARTING must not fail! | ||
956 | */ | ||
957 | WARN_ON_ONCE(ret); | ||
780 | } | 958 | } |
781 | } | 959 | } |
782 | 960 | ||
@@ -794,7 +972,7 @@ void cpuhp_online_idle(enum cpuhp_state state) | |||
794 | return; | 972 | return; |
795 | 973 | ||
796 | st->state = CPUHP_AP_ONLINE_IDLE; | 974 | st->state = CPUHP_AP_ONLINE_IDLE; |
797 | complete(&st->done); | 975 | complete_ap_thread(st, true); |
798 | } | 976 | } |
799 | 977 | ||
800 | /* Requires cpu_add_remove_lock to be held */ | 978 | /* Requires cpu_add_remove_lock to be held */ |
@@ -829,7 +1007,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) | |||
829 | 1007 | ||
830 | cpuhp_tasks_frozen = tasks_frozen; | 1008 | cpuhp_tasks_frozen = tasks_frozen; |
831 | 1009 | ||
832 | st->target = target; | 1010 | cpuhp_set_state(st, target); |
833 | /* | 1011 | /* |
834 | * If the current CPU state is in the range of the AP hotplug thread, | 1012 | * If the current CPU state is in the range of the AP hotplug thread, |
835 | * then we need to kick the thread once more. | 1013 | * then we need to kick the thread once more. |
@@ -1296,6 +1474,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, | |||
1296 | struct cpuhp_step *sp = cpuhp_get_step(state); | 1474 | struct cpuhp_step *sp = cpuhp_get_step(state); |
1297 | int ret; | 1475 | int ret; |
1298 | 1476 | ||
1477 | /* | ||
1478 | * If there's nothing to do, we done. | ||
1479 | * Relies on the union for multi_instance. | ||
1480 | */ | ||
1299 | if ((bringup && !sp->startup.single) || | 1481 | if ((bringup && !sp->startup.single) || |
1300 | (!bringup && !sp->teardown.single)) | 1482 | (!bringup && !sp->teardown.single)) |
1301 | return 0; | 1483 | return 0; |
@@ -1307,9 +1489,9 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, | |||
1307 | if (cpuhp_is_ap_state(state)) | 1489 | if (cpuhp_is_ap_state(state)) |
1308 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); | 1490 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); |
1309 | else | 1491 | else |
1310 | ret = cpuhp_invoke_callback(cpu, state, bringup, node); | 1492 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
1311 | #else | 1493 | #else |
1312 | ret = cpuhp_invoke_callback(cpu, state, bringup, node); | 1494 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
1313 | #endif | 1495 | #endif |
1314 | BUG_ON(ret && !bringup); | 1496 | BUG_ON(ret && !bringup); |
1315 | return ret; | 1497 | return ret; |
@@ -1641,9 +1823,55 @@ static ssize_t show_cpuhp_target(struct device *dev, | |||
1641 | } | 1823 | } |
1642 | static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); | 1824 | static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); |
1643 | 1825 | ||
1826 | |||
1827 | static ssize_t write_cpuhp_fail(struct device *dev, | ||
1828 | struct device_attribute *attr, | ||
1829 | const char *buf, size_t count) | ||
1830 | { | ||
1831 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); | ||
1832 | struct cpuhp_step *sp; | ||
1833 | int fail, ret; | ||
1834 | |||
1835 | ret = kstrtoint(buf, 10, &fail); | ||
1836 | if (ret) | ||
1837 | return ret; | ||
1838 | |||
1839 | /* | ||
1840 | * Cannot fail STARTING/DYING callbacks. | ||
1841 | */ | ||
1842 | if (cpuhp_is_atomic_state(fail)) | ||
1843 | return -EINVAL; | ||
1844 | |||
1845 | /* | ||
1846 | * Cannot fail anything that doesn't have callbacks. | ||
1847 | */ | ||
1848 | mutex_lock(&cpuhp_state_mutex); | ||
1849 | sp = cpuhp_get_step(fail); | ||
1850 | if (!sp->startup.single && !sp->teardown.single) | ||
1851 | ret = -EINVAL; | ||
1852 | mutex_unlock(&cpuhp_state_mutex); | ||
1853 | if (ret) | ||
1854 | return ret; | ||
1855 | |||
1856 | st->fail = fail; | ||
1857 | |||
1858 | return count; | ||
1859 | } | ||
1860 | |||
1861 | static ssize_t show_cpuhp_fail(struct device *dev, | ||
1862 | struct device_attribute *attr, char *buf) | ||
1863 | { | ||
1864 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); | ||
1865 | |||
1866 | return sprintf(buf, "%d\n", st->fail); | ||
1867 | } | ||
1868 | |||
1869 | static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail); | ||
1870 | |||
1644 | static struct attribute *cpuhp_cpu_attrs[] = { | 1871 | static struct attribute *cpuhp_cpu_attrs[] = { |
1645 | &dev_attr_state.attr, | 1872 | &dev_attr_state.attr, |
1646 | &dev_attr_target.attr, | 1873 | &dev_attr_target.attr, |
1874 | &dev_attr_fail.attr, | ||
1647 | NULL | 1875 | NULL |
1648 | }; | 1876 | }; |
1649 | 1877 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 3e691b75b2db..6bc21e202ae4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -8171,6 +8171,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) | |||
8171 | } | 8171 | } |
8172 | } | 8172 | } |
8173 | event->tp_event->prog = prog; | 8173 | event->tp_event->prog = prog; |
8174 | event->tp_event->bpf_prog_owner = event; | ||
8174 | 8175 | ||
8175 | return 0; | 8176 | return 0; |
8176 | } | 8177 | } |
@@ -8185,7 +8186,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event) | |||
8185 | return; | 8186 | return; |
8186 | 8187 | ||
8187 | prog = event->tp_event->prog; | 8188 | prog = event->tp_event->prog; |
8188 | if (prog) { | 8189 | if (prog && event->tp_event->bpf_prog_owner == event) { |
8189 | event->tp_event->prog = NULL; | 8190 | event->tp_event->prog = NULL; |
8190 | bpf_prog_put(prog); | 8191 | bpf_prog_put(prog); |
8191 | } | 8192 | } |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index af71a84e12ee..f684d8e5fa2b 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -412,6 +412,19 @@ err: | |||
412 | return NULL; | 412 | return NULL; |
413 | } | 413 | } |
414 | 414 | ||
415 | static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb) | ||
416 | { | ||
417 | if (rb->aux_overwrite) | ||
418 | return false; | ||
419 | |||
420 | if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { | ||
421 | rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); | ||
422 | return true; | ||
423 | } | ||
424 | |||
425 | return false; | ||
426 | } | ||
427 | |||
415 | /* | 428 | /* |
416 | * Commit the data written by hardware into the ring buffer by adjusting | 429 | * Commit the data written by hardware into the ring buffer by adjusting |
417 | * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the | 430 | * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the |
@@ -451,10 +464,8 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) | |||
451 | } | 464 | } |
452 | 465 | ||
453 | rb->user_page->aux_head = rb->aux_head; | 466 | rb->user_page->aux_head = rb->aux_head; |
454 | if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { | 467 | if (rb_need_aux_wakeup(rb)) |
455 | wakeup = true; | 468 | wakeup = true; |
456 | rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); | ||
457 | } | ||
458 | 469 | ||
459 | if (wakeup) { | 470 | if (wakeup) { |
460 | if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) | 471 | if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) |
@@ -484,9 +495,8 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) | |||
484 | rb->aux_head += size; | 495 | rb->aux_head += size; |
485 | 496 | ||
486 | rb->user_page->aux_head = rb->aux_head; | 497 | rb->user_page->aux_head = rb->aux_head; |
487 | if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { | 498 | if (rb_need_aux_wakeup(rb)) { |
488 | perf_output_wakeup(handle); | 499 | perf_output_wakeup(handle); |
489 | rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); | ||
490 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; | 500 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; |
491 | } | 501 | } |
492 | 502 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 3481ababd06a..f2cd53e92147 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1600,12 +1600,10 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | |||
1600 | struct waitid_info info = {.status = 0}; | 1600 | struct waitid_info info = {.status = 0}; |
1601 | long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); | 1601 | long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); |
1602 | int signo = 0; | 1602 | int signo = 0; |
1603 | |||
1603 | if (err > 0) { | 1604 | if (err > 0) { |
1604 | signo = SIGCHLD; | 1605 | signo = SIGCHLD; |
1605 | err = 0; | 1606 | err = 0; |
1606 | } | ||
1607 | |||
1608 | if (!err) { | ||
1609 | if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) | 1607 | if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) |
1610 | return -EFAULT; | 1608 | return -EFAULT; |
1611 | } | 1609 | } |
@@ -1723,16 +1721,15 @@ COMPAT_SYSCALL_DEFINE5(waitid, | |||
1723 | if (err > 0) { | 1721 | if (err > 0) { |
1724 | signo = SIGCHLD; | 1722 | signo = SIGCHLD; |
1725 | err = 0; | 1723 | err = 0; |
1726 | } | 1724 | if (uru) { |
1727 | 1725 | /* kernel_waitid() overwrites everything in ru */ | |
1728 | if (!err && uru) { | 1726 | if (COMPAT_USE_64BIT_TIME) |
1729 | /* kernel_waitid() overwrites everything in ru */ | 1727 | err = copy_to_user(uru, &ru, sizeof(ru)); |
1730 | if (COMPAT_USE_64BIT_TIME) | 1728 | else |
1731 | err = copy_to_user(uru, &ru, sizeof(ru)); | 1729 | err = put_compat_rusage(&ru, uru); |
1732 | else | 1730 | if (err) |
1733 | err = put_compat_rusage(&ru, uru); | 1731 | return -EFAULT; |
1734 | if (err) | 1732 | } |
1735 | return -EFAULT; | ||
1736 | } | 1733 | } |
1737 | 1734 | ||
1738 | if (!infop) | 1735 | if (!infop) |
diff --git a/kernel/extable.c b/kernel/extable.c index 38c2412401a1..9aa1cc41ecf7 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -102,15 +102,7 @@ int core_kernel_data(unsigned long addr) | |||
102 | 102 | ||
103 | int __kernel_text_address(unsigned long addr) | 103 | int __kernel_text_address(unsigned long addr) |
104 | { | 104 | { |
105 | if (core_kernel_text(addr)) | 105 | if (kernel_text_address(addr)) |
106 | return 1; | ||
107 | if (is_module_text_address(addr)) | ||
108 | return 1; | ||
109 | if (is_ftrace_trampoline(addr)) | ||
110 | return 1; | ||
111 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) | ||
112 | return 1; | ||
113 | if (is_bpf_text_address(addr)) | ||
114 | return 1; | 106 | return 1; |
115 | /* | 107 | /* |
116 | * There might be init symbols in saved stacktraces. | 108 | * There might be init symbols in saved stacktraces. |
@@ -127,17 +119,42 @@ int __kernel_text_address(unsigned long addr) | |||
127 | 119 | ||
128 | int kernel_text_address(unsigned long addr) | 120 | int kernel_text_address(unsigned long addr) |
129 | { | 121 | { |
122 | bool no_rcu; | ||
123 | int ret = 1; | ||
124 | |||
130 | if (core_kernel_text(addr)) | 125 | if (core_kernel_text(addr)) |
131 | return 1; | 126 | return 1; |
127 | |||
128 | /* | ||
129 | * If a stack dump happens while RCU is not watching, then | ||
130 | * RCU needs to be notified that it requires to start | ||
131 | * watching again. This can happen either by tracing that | ||
132 | * triggers a stack trace, or a WARN() that happens during | ||
133 | * coming back from idle, or cpu on or offlining. | ||
134 | * | ||
135 | * is_module_text_address() as well as the kprobe slots | ||
136 | * and is_bpf_text_address() require RCU to be watching. | ||
137 | */ | ||
138 | no_rcu = !rcu_is_watching(); | ||
139 | |||
140 | /* Treat this like an NMI as it can happen anywhere */ | ||
141 | if (no_rcu) | ||
142 | rcu_nmi_enter(); | ||
143 | |||
132 | if (is_module_text_address(addr)) | 144 | if (is_module_text_address(addr)) |
133 | return 1; | 145 | goto out; |
134 | if (is_ftrace_trampoline(addr)) | 146 | if (is_ftrace_trampoline(addr)) |
135 | return 1; | 147 | goto out; |
136 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) | 148 | if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) |
137 | return 1; | 149 | goto out; |
138 | if (is_bpf_text_address(addr)) | 150 | if (is_bpf_text_address(addr)) |
139 | return 1; | 151 | goto out; |
140 | return 0; | 152 | ret = 0; |
153 | out: | ||
154 | if (no_rcu) | ||
155 | rcu_nmi_exit(); | ||
156 | |||
157 | return ret; | ||
141 | } | 158 | } |
142 | 159 | ||
143 | /* | 160 | /* |
diff --git a/kernel/fork.c b/kernel/fork.c index 10646182440f..e702cb9ffbd8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -946,6 +946,24 @@ void mmput(struct mm_struct *mm) | |||
946 | } | 946 | } |
947 | EXPORT_SYMBOL_GPL(mmput); | 947 | EXPORT_SYMBOL_GPL(mmput); |
948 | 948 | ||
949 | #ifdef CONFIG_MMU | ||
950 | static void mmput_async_fn(struct work_struct *work) | ||
951 | { | ||
952 | struct mm_struct *mm = container_of(work, struct mm_struct, | ||
953 | async_put_work); | ||
954 | |||
955 | __mmput(mm); | ||
956 | } | ||
957 | |||
958 | void mmput_async(struct mm_struct *mm) | ||
959 | { | ||
960 | if (atomic_dec_and_test(&mm->mm_users)) { | ||
961 | INIT_WORK(&mm->async_put_work, mmput_async_fn); | ||
962 | schedule_work(&mm->async_put_work); | ||
963 | } | ||
964 | } | ||
965 | #endif | ||
966 | |||
949 | /** | 967 | /** |
950 | * set_mm_exe_file - change a reference to the mm's executable file | 968 | * set_mm_exe_file - change a reference to the mm's executable file |
951 | * | 969 | * |
diff --git a/kernel/futex.c b/kernel/futex.c index 3d38eaf05492..0518a0bfc746 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -821,8 +821,6 @@ static void get_pi_state(struct futex_pi_state *pi_state) | |||
821 | /* | 821 | /* |
822 | * Drops a reference to the pi_state object and frees or caches it | 822 | * Drops a reference to the pi_state object and frees or caches it |
823 | * when the last reference is gone. | 823 | * when the last reference is gone. |
824 | * | ||
825 | * Must be called with the hb lock held. | ||
826 | */ | 824 | */ |
827 | static void put_pi_state(struct futex_pi_state *pi_state) | 825 | static void put_pi_state(struct futex_pi_state *pi_state) |
828 | { | 826 | { |
@@ -837,16 +835,22 @@ static void put_pi_state(struct futex_pi_state *pi_state) | |||
837 | * and has cleaned up the pi_state already | 835 | * and has cleaned up the pi_state already |
838 | */ | 836 | */ |
839 | if (pi_state->owner) { | 837 | if (pi_state->owner) { |
840 | raw_spin_lock_irq(&pi_state->owner->pi_lock); | 838 | struct task_struct *owner; |
841 | list_del_init(&pi_state->list); | ||
842 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); | ||
843 | 839 | ||
844 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | 840 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
841 | owner = pi_state->owner; | ||
842 | if (owner) { | ||
843 | raw_spin_lock(&owner->pi_lock); | ||
844 | list_del_init(&pi_state->list); | ||
845 | raw_spin_unlock(&owner->pi_lock); | ||
846 | } | ||
847 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); | ||
848 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); | ||
845 | } | 849 | } |
846 | 850 | ||
847 | if (current->pi_state_cache) | 851 | if (current->pi_state_cache) { |
848 | kfree(pi_state); | 852 | kfree(pi_state); |
849 | else { | 853 | } else { |
850 | /* | 854 | /* |
851 | * pi_state->list is already empty. | 855 | * pi_state->list is already empty. |
852 | * clear pi_state->owner. | 856 | * clear pi_state->owner. |
@@ -907,13 +911,14 @@ void exit_pi_state_list(struct task_struct *curr) | |||
907 | raw_spin_unlock_irq(&curr->pi_lock); | 911 | raw_spin_unlock_irq(&curr->pi_lock); |
908 | 912 | ||
909 | spin_lock(&hb->lock); | 913 | spin_lock(&hb->lock); |
910 | 914 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | |
911 | raw_spin_lock_irq(&curr->pi_lock); | 915 | raw_spin_lock(&curr->pi_lock); |
912 | /* | 916 | /* |
913 | * We dropped the pi-lock, so re-check whether this | 917 | * We dropped the pi-lock, so re-check whether this |
914 | * task still owns the PI-state: | 918 | * task still owns the PI-state: |
915 | */ | 919 | */ |
916 | if (head->next != next) { | 920 | if (head->next != next) { |
921 | raw_spin_unlock(&pi_state->pi_mutex.wait_lock); | ||
917 | spin_unlock(&hb->lock); | 922 | spin_unlock(&hb->lock); |
918 | continue; | 923 | continue; |
919 | } | 924 | } |
@@ -922,9 +927,10 @@ void exit_pi_state_list(struct task_struct *curr) | |||
922 | WARN_ON(list_empty(&pi_state->list)); | 927 | WARN_ON(list_empty(&pi_state->list)); |
923 | list_del_init(&pi_state->list); | 928 | list_del_init(&pi_state->list); |
924 | pi_state->owner = NULL; | 929 | pi_state->owner = NULL; |
925 | raw_spin_unlock_irq(&curr->pi_lock); | 930 | raw_spin_unlock(&curr->pi_lock); |
926 | 931 | ||
927 | get_pi_state(pi_state); | 932 | get_pi_state(pi_state); |
933 | raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); | ||
928 | spin_unlock(&hb->lock); | 934 | spin_unlock(&hb->lock); |
929 | 935 | ||
930 | rt_mutex_futex_unlock(&pi_state->pi_mutex); | 936 | rt_mutex_futex_unlock(&pi_state->pi_mutex); |
@@ -1208,6 +1214,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, | |||
1208 | 1214 | ||
1209 | WARN_ON(!list_empty(&pi_state->list)); | 1215 | WARN_ON(!list_empty(&pi_state->list)); |
1210 | list_add(&pi_state->list, &p->pi_state_list); | 1216 | list_add(&pi_state->list, &p->pi_state_list); |
1217 | /* | ||
1218 | * Assignment without holding pi_state->pi_mutex.wait_lock is safe | ||
1219 | * because there is no concurrency as the object is not published yet. | ||
1220 | */ | ||
1211 | pi_state->owner = p; | 1221 | pi_state->owner = p; |
1212 | raw_spin_unlock_irq(&p->pi_lock); | 1222 | raw_spin_unlock_irq(&p->pi_lock); |
1213 | 1223 | ||
@@ -2878,6 +2888,7 @@ retry: | |||
2878 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | 2888 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
2879 | spin_unlock(&hb->lock); | 2889 | spin_unlock(&hb->lock); |
2880 | 2890 | ||
2891 | /* drops pi_state->pi_mutex.wait_lock */ | ||
2881 | ret = wake_futex_pi(uaddr, uval, pi_state); | 2892 | ret = wake_futex_pi(uaddr, uval, pi_state); |
2882 | 2893 | ||
2883 | put_pi_state(pi_state); | 2894 | put_pi_state(pi_state); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f51b7b6d2451..6fc89fd93824 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -202,7 +202,7 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) | |||
202 | 202 | ||
203 | irqd_clr_managed_shutdown(d); | 203 | irqd_clr_managed_shutdown(d); |
204 | 204 | ||
205 | if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) { | 205 | if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { |
206 | /* | 206 | /* |
207 | * Catch code which fiddles with enable_irq() on a managed | 207 | * Catch code which fiddles with enable_irq() on a managed |
208 | * and potentially shutdown IRQ. Chained interrupt | 208 | * and potentially shutdown IRQ. Chained interrupt |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index f7086b78ad6e..5270a54b9fa4 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -322,7 +322,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, | |||
322 | /* Calc pointer to the next generic chip */ | 322 | /* Calc pointer to the next generic chip */ |
323 | tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); | 323 | tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); |
324 | } | 324 | } |
325 | d->name = name; | ||
326 | return 0; | 325 | return 0; |
327 | } | 326 | } |
328 | EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips); | 327 | EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index e84b7056bb08..ac4644e92b49 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -945,7 +945,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
945 | struct irq_desc *desc; | 945 | struct irq_desc *desc; |
946 | struct irq_domain *domain; | 946 | struct irq_domain *domain; |
947 | struct radix_tree_iter iter; | 947 | struct radix_tree_iter iter; |
948 | void **slot; | 948 | void __rcu **slot; |
949 | int i; | 949 | int i; |
950 | 950 | ||
951 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", | 951 | seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", |
@@ -1453,7 +1453,7 @@ out_free_desc: | |||
1453 | /* The irq_data was moved, fix the revmap to refer to the new location */ | 1453 | /* The irq_data was moved, fix the revmap to refer to the new location */ |
1454 | static void irq_domain_fix_revmap(struct irq_data *d) | 1454 | static void irq_domain_fix_revmap(struct irq_data *d) |
1455 | { | 1455 | { |
1456 | void **slot; | 1456 | void __rcu **slot; |
1457 | 1457 | ||
1458 | if (d->hwirq < d->domain->revmap_size) | 1458 | if (d->hwirq < d->domain->revmap_size) |
1459 | return; /* Not using radix tree. */ | 1459 | return; /* Not using radix tree. */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 573dc52b0806..d00132b5c325 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1643,6 +1643,10 @@ const void *free_irq(unsigned int irq, void *dev_id) | |||
1643 | #endif | 1643 | #endif |
1644 | 1644 | ||
1645 | action = __free_irq(irq, dev_id); | 1645 | action = __free_irq(irq, dev_id); |
1646 | |||
1647 | if (!action) | ||
1648 | return NULL; | ||
1649 | |||
1646 | devname = action->name; | 1650 | devname = action->name; |
1647 | kfree(action); | 1651 | kfree(action); |
1648 | return devname; | 1652 | return devname; |
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index ea34ed8bb952..055bb2962a0b 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c | |||
@@ -131,7 +131,7 @@ static int kcmp_epoll_target(struct task_struct *task1, | |||
131 | if (filp_epoll) { | 131 | if (filp_epoll) { |
132 | filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); | 132 | filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); |
133 | fput(filp_epoll); | 133 | fput(filp_epoll); |
134 | } else | 134 | } |
135 | 135 | ||
136 | if (IS_ERR(filp_tgt)) | 136 | if (IS_ERR(filp_tgt)) |
137 | return PTR_ERR(filp_tgt); | 137 | return PTR_ERR(filp_tgt); |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 02f660666ab8..1fefe6dcafd7 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -613,6 +613,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
613 | DEFINE_WAKE_Q(wake_q); | 613 | DEFINE_WAKE_Q(wake_q); |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * __rwsem_down_write_failed_common(sem) | ||
617 | * rwsem_optimistic_spin(sem) | ||
618 | * osq_unlock(sem->osq) | ||
619 | * ... | ||
620 | * atomic_long_add_return(&sem->count) | ||
621 | * | ||
622 | * - VS - | ||
623 | * | ||
624 | * __up_write() | ||
625 | * if (atomic_long_sub_return_release(&sem->count) < 0) | ||
626 | * rwsem_wake(sem) | ||
627 | * osq_is_locked(&sem->osq) | ||
628 | * | ||
629 | * And __up_write() must observe !osq_is_locked() when it observes the | ||
630 | * atomic_long_add_return() in order to not miss a wakeup. | ||
631 | * | ||
632 | * This boils down to: | ||
633 | * | ||
634 | * [S.rel] X = 1 [RmW] r0 = (Y += 0) | ||
635 | * MB RMB | ||
636 | * [RmW] Y += 1 [L] r1 = X | ||
637 | * | ||
638 | * exists (r0=1 /\ r1=0) | ||
639 | */ | ||
640 | smp_rmb(); | ||
641 | |||
642 | /* | ||
616 | * If a spinner is present, it is not necessary to do the wakeup. | 643 | * If a spinner is present, it is not necessary to do the wakeup. |
617 | * Try to do wakeup only if the trylock succeeds to minimize | 644 | * Try to do wakeup only if the trylock succeeds to minimize |
618 | * spinlock contention which may introduce too much delay in the | 645 | * spinlock contention which may introduce too much delay in the |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 6bcbfbf1a8fd..403ab9cdb949 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
@@ -350,7 +350,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
350 | pgprot_t pgprot = PAGE_KERNEL; | 350 | pgprot_t pgprot = PAGE_KERNEL; |
351 | struct dev_pagemap *pgmap; | 351 | struct dev_pagemap *pgmap; |
352 | struct page_map *page_map; | 352 | struct page_map *page_map; |
353 | int error, nid, is_ram; | 353 | int error, nid, is_ram, i = 0; |
354 | 354 | ||
355 | align_start = res->start & ~(SECTION_SIZE - 1); | 355 | align_start = res->start & ~(SECTION_SIZE - 1); |
356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | 356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
@@ -448,6 +448,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
448 | list_del(&page->lru); | 448 | list_del(&page->lru); |
449 | page->pgmap = pgmap; | 449 | page->pgmap = pgmap; |
450 | percpu_ref_get(ref); | 450 | percpu_ref_get(ref); |
451 | if (!(++i % 1024)) | ||
452 | cond_resched(); | ||
451 | } | 453 | } |
452 | devres_add(dev, page_map); | 454 | devres_add(dev, page_map); |
453 | return __va(res->start); | 455 | return __va(res->start); |
diff --git a/kernel/params.c b/kernel/params.c index 60b2d8101355..cc9108c2a1fd 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -224,7 +224,7 @@ char *parse_args(const char *doing, | |||
224 | } \ | 224 | } \ |
225 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ | 225 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ |
226 | { \ | 226 | { \ |
227 | return scnprintf(buffer, PAGE_SIZE, format, \ | 227 | return scnprintf(buffer, PAGE_SIZE, format "\n", \ |
228 | *((type *)kp->arg)); \ | 228 | *((type *)kp->arg)); \ |
229 | } \ | 229 | } \ |
230 | const struct kernel_param_ops param_ops_##name = { \ | 230 | const struct kernel_param_ops param_ops_##name = { \ |
@@ -236,14 +236,14 @@ char *parse_args(const char *doing, | |||
236 | EXPORT_SYMBOL(param_ops_##name) | 236 | EXPORT_SYMBOL(param_ops_##name) |
237 | 237 | ||
238 | 238 | ||
239 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); | 239 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); |
240 | STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); | 240 | STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); |
241 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); | 241 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); |
242 | STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); | 242 | STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); |
243 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); | 243 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); |
244 | STANDARD_PARAM_DEF(long, long, "%li", kstrtol); | 244 | STANDARD_PARAM_DEF(long, long, "%li", kstrtol); |
245 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); | 245 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); |
246 | STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); | 246 | STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); |
247 | 247 | ||
248 | int param_set_charp(const char *val, const struct kernel_param *kp) | 248 | int param_set_charp(const char *val, const struct kernel_param *kp) |
249 | { | 249 | { |
@@ -270,7 +270,7 @@ EXPORT_SYMBOL(param_set_charp); | |||
270 | 270 | ||
271 | int param_get_charp(char *buffer, const struct kernel_param *kp) | 271 | int param_get_charp(char *buffer, const struct kernel_param *kp) |
272 | { | 272 | { |
273 | return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg)); | 273 | return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg)); |
274 | } | 274 | } |
275 | EXPORT_SYMBOL(param_get_charp); | 275 | EXPORT_SYMBOL(param_get_charp); |
276 | 276 | ||
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(param_set_bool); | |||
301 | int param_get_bool(char *buffer, const struct kernel_param *kp) | 301 | int param_get_bool(char *buffer, const struct kernel_param *kp) |
302 | { | 302 | { |
303 | /* Y and N chosen as being relatively non-coder friendly */ | 303 | /* Y and N chosen as being relatively non-coder friendly */ |
304 | return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N'); | 304 | return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N'); |
305 | } | 305 | } |
306 | EXPORT_SYMBOL(param_get_bool); | 306 | EXPORT_SYMBOL(param_get_bool); |
307 | 307 | ||
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(param_set_invbool); | |||
360 | 360 | ||
361 | int param_get_invbool(char *buffer, const struct kernel_param *kp) | 361 | int param_get_invbool(char *buffer, const struct kernel_param *kp) |
362 | { | 362 | { |
363 | return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); | 363 | return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y'); |
364 | } | 364 | } |
365 | EXPORT_SYMBOL(param_get_invbool); | 365 | EXPORT_SYMBOL(param_get_invbool); |
366 | 366 | ||
@@ -460,8 +460,9 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) | |||
460 | struct kernel_param p = *kp; | 460 | struct kernel_param p = *kp; |
461 | 461 | ||
462 | for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { | 462 | for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { |
463 | /* Replace \n with comma */ | ||
463 | if (i) | 464 | if (i) |
464 | buffer[off++] = ','; | 465 | buffer[off - 1] = ','; |
465 | p.arg = arr->elem + arr->elemsize * i; | 466 | p.arg = arr->elem + arr->elemsize * i; |
466 | check_kparam_locked(p.mod); | 467 | check_kparam_locked(p.mod); |
467 | ret = arr->ops->get(buffer + off, &p); | 468 | ret = arr->ops->get(buffer + off, &p); |
@@ -507,7 +508,7 @@ EXPORT_SYMBOL(param_set_copystring); | |||
507 | int param_get_string(char *buffer, const struct kernel_param *kp) | 508 | int param_get_string(char *buffer, const struct kernel_param *kp) |
508 | { | 509 | { |
509 | const struct kparam_string *kps = kp->str; | 510 | const struct kparam_string *kps = kp->str; |
510 | return strlcpy(buffer, kps->string, kps->maxlen); | 511 | return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string); |
511 | } | 512 | } |
512 | EXPORT_SYMBOL(param_get_string); | 513 | EXPORT_SYMBOL(param_get_string); |
513 | 514 | ||
@@ -549,10 +550,6 @@ static ssize_t param_attr_show(struct module_attribute *mattr, | |||
549 | kernel_param_lock(mk->mod); | 550 | kernel_param_lock(mk->mod); |
550 | count = attribute->param->ops->get(buf, attribute->param); | 551 | count = attribute->param->ops->get(buf, attribute->param); |
551 | kernel_param_unlock(mk->mod); | 552 | kernel_param_unlock(mk->mod); |
552 | if (count > 0) { | ||
553 | strcat(buf, "\n"); | ||
554 | ++count; | ||
555 | } | ||
556 | return count; | 553 | return count; |
557 | } | 554 | } |
558 | 555 | ||
@@ -600,7 +597,7 @@ EXPORT_SYMBOL(kernel_param_unlock); | |||
600 | /* | 597 | /* |
601 | * add_sysfs_param - add a parameter to sysfs | 598 | * add_sysfs_param - add a parameter to sysfs |
602 | * @mk: struct module_kobject | 599 | * @mk: struct module_kobject |
603 | * @kparam: the actual parameter definition to add to sysfs | 600 | * @kp: the actual parameter definition to add to sysfs |
604 | * @name: name of parameter | 601 | * @name: name of parameter |
605 | * | 602 | * |
606 | * Create a kobject if for a (per-module) parameter if mp NULL, and | 603 | * Create a kobject if for a (per-module) parameter if mp NULL, and |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 3e2b4f519009..ccd2d20e6b06 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -120,22 +120,26 @@ static void s2idle_loop(void) | |||
120 | * frozen processes + suspended devices + idle processors. | 120 | * frozen processes + suspended devices + idle processors. |
121 | * Thus s2idle_enter() should be called right after | 121 | * Thus s2idle_enter() should be called right after |
122 | * all devices have been suspended. | 122 | * all devices have been suspended. |
123 | * | ||
124 | * Wakeups during the noirq suspend of devices may be spurious, | ||
125 | * so prevent them from terminating the loop right away. | ||
123 | */ | 126 | */ |
124 | error = dpm_noirq_suspend_devices(PMSG_SUSPEND); | 127 | error = dpm_noirq_suspend_devices(PMSG_SUSPEND); |
125 | if (!error) | 128 | if (!error) |
126 | s2idle_enter(); | 129 | s2idle_enter(); |
130 | else if (error == -EBUSY && pm_wakeup_pending()) | ||
131 | error = 0; | ||
127 | 132 | ||
128 | dpm_noirq_resume_devices(PMSG_RESUME); | 133 | if (!error && s2idle_ops && s2idle_ops->wake) |
129 | if (error && (error != -EBUSY || !pm_wakeup_pending())) { | ||
130 | dpm_noirq_end(); | ||
131 | break; | ||
132 | } | ||
133 | |||
134 | if (s2idle_ops && s2idle_ops->wake) | ||
135 | s2idle_ops->wake(); | 134 | s2idle_ops->wake(); |
136 | 135 | ||
136 | dpm_noirq_resume_devices(PMSG_RESUME); | ||
137 | |||
137 | dpm_noirq_end(); | 138 | dpm_noirq_end(); |
138 | 139 | ||
140 | if (error) | ||
141 | break; | ||
142 | |||
139 | if (s2idle_ops && s2idle_ops->sync) | 143 | if (s2idle_ops && s2idle_ops->sync) |
140 | s2idle_ops->sync(); | 144 | s2idle_ops->sync(); |
141 | 145 | ||
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1250e4bd4b85..b0ad62b0e7b8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -882,6 +882,11 @@ void rcu_irq_exit(void) | |||
882 | 882 | ||
883 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 883 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); |
884 | rdtp = this_cpu_ptr(&rcu_dynticks); | 884 | rdtp = this_cpu_ptr(&rcu_dynticks); |
885 | |||
886 | /* Page faults can happen in NMI handlers, so check... */ | ||
887 | if (rdtp->dynticks_nmi_nesting) | ||
888 | return; | ||
889 | |||
885 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 890 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
886 | rdtp->dynticks_nesting < 1); | 891 | rdtp->dynticks_nesting < 1); |
887 | if (rdtp->dynticks_nesting <= 1) { | 892 | if (rdtp->dynticks_nesting <= 1) { |
@@ -1015,6 +1020,11 @@ void rcu_irq_enter(void) | |||
1015 | 1020 | ||
1016 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); | 1021 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); |
1017 | rdtp = this_cpu_ptr(&rcu_dynticks); | 1022 | rdtp = this_cpu_ptr(&rcu_dynticks); |
1023 | |||
1024 | /* Page faults can happen in NMI handlers, so check... */ | ||
1025 | if (rdtp->dynticks_nmi_nesting) | ||
1026 | return; | ||
1027 | |||
1018 | oldval = rdtp->dynticks_nesting; | 1028 | oldval = rdtp->dynticks_nesting; |
1019 | rdtp->dynticks_nesting++; | 1029 | rdtp->dynticks_nesting++; |
1020 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 1030 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 18a6966567da..d17c5da523a0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5166,6 +5166,28 @@ void sched_show_task(struct task_struct *p) | |||
5166 | put_task_stack(p); | 5166 | put_task_stack(p); |
5167 | } | 5167 | } |
5168 | 5168 | ||
5169 | static inline bool | ||
5170 | state_filter_match(unsigned long state_filter, struct task_struct *p) | ||
5171 | { | ||
5172 | /* no filter, everything matches */ | ||
5173 | if (!state_filter) | ||
5174 | return true; | ||
5175 | |||
5176 | /* filter, but doesn't match */ | ||
5177 | if (!(p->state & state_filter)) | ||
5178 | return false; | ||
5179 | |||
5180 | /* | ||
5181 | * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows | ||
5182 | * TASK_KILLABLE). | ||
5183 | */ | ||
5184 | if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) | ||
5185 | return false; | ||
5186 | |||
5187 | return true; | ||
5188 | } | ||
5189 | |||
5190 | |||
5169 | void show_state_filter(unsigned long state_filter) | 5191 | void show_state_filter(unsigned long state_filter) |
5170 | { | 5192 | { |
5171 | struct task_struct *g, *p; | 5193 | struct task_struct *g, *p; |
@@ -5188,7 +5210,7 @@ void show_state_filter(unsigned long state_filter) | |||
5188 | */ | 5210 | */ |
5189 | touch_nmi_watchdog(); | 5211 | touch_nmi_watchdog(); |
5190 | touch_all_softlockup_watchdogs(); | 5212 | touch_all_softlockup_watchdogs(); |
5191 | if (!state_filter || (p->state & state_filter)) | 5213 | if (state_filter_match(state_filter, p)) |
5192 | sched_show_task(p); | 5214 | sched_show_task(p); |
5193 | } | 5215 | } |
5194 | 5216 | ||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 01217fb5a5de..2f93e4a2d9f6 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
@@ -466,8 +466,6 @@ static char *task_group_path(struct task_group *tg) | |||
466 | } | 466 | } |
467 | #endif | 467 | #endif |
468 | 468 | ||
469 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; | ||
470 | |||
471 | static void | 469 | static void |
472 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 470 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
473 | { | 471 | { |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 98b59b5db90b..bb3a38005b9c 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -17,11 +17,13 @@ | |||
17 | #include <linux/audit.h> | 17 | #include <linux/audit.h> |
18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
19 | #include <linux/coredump.h> | 19 | #include <linux/coredump.h> |
20 | #include <linux/kmemleak.h> | ||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/sched/task_stack.h> | 22 | #include <linux/sched/task_stack.h> |
22 | #include <linux/seccomp.h> | 23 | #include <linux/seccomp.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/sysctl.h> | ||
25 | 27 | ||
26 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER | 28 | #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER |
27 | #include <asm/syscall.h> | 29 | #include <asm/syscall.h> |
@@ -42,6 +44,7 @@ | |||
42 | * get/put helpers should be used when accessing an instance | 44 | * get/put helpers should be used when accessing an instance |
43 | * outside of a lifetime-guarded section. In general, this | 45 | * outside of a lifetime-guarded section. In general, this |
44 | * is only needed for handling filters shared across tasks. | 46 | * is only needed for handling filters shared across tasks. |
47 | * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged | ||
45 | * @prev: points to a previously installed, or inherited, filter | 48 | * @prev: points to a previously installed, or inherited, filter |
46 | * @prog: the BPF program to evaluate | 49 | * @prog: the BPF program to evaluate |
47 | * | 50 | * |
@@ -57,6 +60,7 @@ | |||
57 | */ | 60 | */ |
58 | struct seccomp_filter { | 61 | struct seccomp_filter { |
59 | refcount_t usage; | 62 | refcount_t usage; |
63 | bool log; | ||
60 | struct seccomp_filter *prev; | 64 | struct seccomp_filter *prev; |
61 | struct bpf_prog *prog; | 65 | struct bpf_prog *prog; |
62 | }; | 66 | }; |
@@ -171,10 +175,15 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
171 | /** | 175 | /** |
172 | * seccomp_run_filters - evaluates all seccomp filters against @sd | 176 | * seccomp_run_filters - evaluates all seccomp filters against @sd |
173 | * @sd: optional seccomp data to be passed to filters | 177 | * @sd: optional seccomp data to be passed to filters |
178 | * @match: stores struct seccomp_filter that resulted in the return value, | ||
179 | * unless filter returned SECCOMP_RET_ALLOW, in which case it will | ||
180 | * be unchanged. | ||
174 | * | 181 | * |
175 | * Returns valid seccomp BPF response codes. | 182 | * Returns valid seccomp BPF response codes. |
176 | */ | 183 | */ |
177 | static u32 seccomp_run_filters(const struct seccomp_data *sd) | 184 | #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL))) |
185 | static u32 seccomp_run_filters(const struct seccomp_data *sd, | ||
186 | struct seccomp_filter **match) | ||
178 | { | 187 | { |
179 | struct seccomp_data sd_local; | 188 | struct seccomp_data sd_local; |
180 | u32 ret = SECCOMP_RET_ALLOW; | 189 | u32 ret = SECCOMP_RET_ALLOW; |
@@ -184,7 +193,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd) | |||
184 | 193 | ||
185 | /* Ensure unexpected behavior doesn't result in failing open. */ | 194 | /* Ensure unexpected behavior doesn't result in failing open. */ |
186 | if (unlikely(WARN_ON(f == NULL))) | 195 | if (unlikely(WARN_ON(f == NULL))) |
187 | return SECCOMP_RET_KILL; | 196 | return SECCOMP_RET_KILL_PROCESS; |
188 | 197 | ||
189 | if (!sd) { | 198 | if (!sd) { |
190 | populate_seccomp_data(&sd_local); | 199 | populate_seccomp_data(&sd_local); |
@@ -198,8 +207,10 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd) | |||
198 | for (; f; f = f->prev) { | 207 | for (; f; f = f->prev) { |
199 | u32 cur_ret = BPF_PROG_RUN(f->prog, sd); | 208 | u32 cur_ret = BPF_PROG_RUN(f->prog, sd); |
200 | 209 | ||
201 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | 210 | if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) { |
202 | ret = cur_ret; | 211 | ret = cur_ret; |
212 | *match = f; | ||
213 | } | ||
203 | } | 214 | } |
204 | return ret; | 215 | return ret; |
205 | } | 216 | } |
@@ -444,6 +455,10 @@ static long seccomp_attach_filter(unsigned int flags, | |||
444 | return ret; | 455 | return ret; |
445 | } | 456 | } |
446 | 457 | ||
458 | /* Set log flag, if present. */ | ||
459 | if (flags & SECCOMP_FILTER_FLAG_LOG) | ||
460 | filter->log = true; | ||
461 | |||
447 | /* | 462 | /* |
448 | * If there is an existing filter, make it the prev and don't drop its | 463 | * If there is an existing filter, make it the prev and don't drop its |
449 | * task reference. | 464 | * task reference. |
@@ -458,14 +473,19 @@ static long seccomp_attach_filter(unsigned int flags, | |||
458 | return 0; | 473 | return 0; |
459 | } | 474 | } |
460 | 475 | ||
476 | void __get_seccomp_filter(struct seccomp_filter *filter) | ||
477 | { | ||
478 | /* Reference count is bounded by the number of total processes. */ | ||
479 | refcount_inc(&filter->usage); | ||
480 | } | ||
481 | |||
461 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ | 482 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ |
462 | void get_seccomp_filter(struct task_struct *tsk) | 483 | void get_seccomp_filter(struct task_struct *tsk) |
463 | { | 484 | { |
464 | struct seccomp_filter *orig = tsk->seccomp.filter; | 485 | struct seccomp_filter *orig = tsk->seccomp.filter; |
465 | if (!orig) | 486 | if (!orig) |
466 | return; | 487 | return; |
467 | /* Reference count is bounded by the number of total processes. */ | 488 | __get_seccomp_filter(orig); |
468 | refcount_inc(&orig->usage); | ||
469 | } | 489 | } |
470 | 490 | ||
471 | static inline void seccomp_filter_free(struct seccomp_filter *filter) | 491 | static inline void seccomp_filter_free(struct seccomp_filter *filter) |
@@ -476,10 +496,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter) | |||
476 | } | 496 | } |
477 | } | 497 | } |
478 | 498 | ||
479 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ | 499 | static void __put_seccomp_filter(struct seccomp_filter *orig) |
480 | void put_seccomp_filter(struct task_struct *tsk) | ||
481 | { | 500 | { |
482 | struct seccomp_filter *orig = tsk->seccomp.filter; | ||
483 | /* Clean up single-reference branches iteratively. */ | 501 | /* Clean up single-reference branches iteratively. */ |
484 | while (orig && refcount_dec_and_test(&orig->usage)) { | 502 | while (orig && refcount_dec_and_test(&orig->usage)) { |
485 | struct seccomp_filter *freeme = orig; | 503 | struct seccomp_filter *freeme = orig; |
@@ -488,6 +506,12 @@ void put_seccomp_filter(struct task_struct *tsk) | |||
488 | } | 506 | } |
489 | } | 507 | } |
490 | 508 | ||
509 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ | ||
510 | void put_seccomp_filter(struct task_struct *tsk) | ||
511 | { | ||
512 | __put_seccomp_filter(tsk->seccomp.filter); | ||
513 | } | ||
514 | |||
491 | static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason) | 515 | static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason) |
492 | { | 516 | { |
493 | memset(info, 0, sizeof(*info)); | 517 | memset(info, 0, sizeof(*info)); |
@@ -514,6 +538,65 @@ static void seccomp_send_sigsys(int syscall, int reason) | |||
514 | } | 538 | } |
515 | #endif /* CONFIG_SECCOMP_FILTER */ | 539 | #endif /* CONFIG_SECCOMP_FILTER */ |
516 | 540 | ||
541 | /* For use with seccomp_actions_logged */ | ||
542 | #define SECCOMP_LOG_KILL_PROCESS (1 << 0) | ||
543 | #define SECCOMP_LOG_KILL_THREAD (1 << 1) | ||
544 | #define SECCOMP_LOG_TRAP (1 << 2) | ||
545 | #define SECCOMP_LOG_ERRNO (1 << 3) | ||
546 | #define SECCOMP_LOG_TRACE (1 << 4) | ||
547 | #define SECCOMP_LOG_LOG (1 << 5) | ||
548 | #define SECCOMP_LOG_ALLOW (1 << 6) | ||
549 | |||
550 | static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS | | ||
551 | SECCOMP_LOG_KILL_THREAD | | ||
552 | SECCOMP_LOG_TRAP | | ||
553 | SECCOMP_LOG_ERRNO | | ||
554 | SECCOMP_LOG_TRACE | | ||
555 | SECCOMP_LOG_LOG; | ||
556 | |||
557 | static inline void seccomp_log(unsigned long syscall, long signr, u32 action, | ||
558 | bool requested) | ||
559 | { | ||
560 | bool log = false; | ||
561 | |||
562 | switch (action) { | ||
563 | case SECCOMP_RET_ALLOW: | ||
564 | break; | ||
565 | case SECCOMP_RET_TRAP: | ||
566 | log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP; | ||
567 | break; | ||
568 | case SECCOMP_RET_ERRNO: | ||
569 | log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO; | ||
570 | break; | ||
571 | case SECCOMP_RET_TRACE: | ||
572 | log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE; | ||
573 | break; | ||
574 | case SECCOMP_RET_LOG: | ||
575 | log = seccomp_actions_logged & SECCOMP_LOG_LOG; | ||
576 | break; | ||
577 | case SECCOMP_RET_KILL_THREAD: | ||
578 | log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD; | ||
579 | break; | ||
580 | case SECCOMP_RET_KILL_PROCESS: | ||
581 | default: | ||
582 | log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS; | ||
583 | } | ||
584 | |||
585 | /* | ||
586 | * Force an audit message to be emitted when the action is RET_KILL_*, | ||
587 | * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is | ||
588 | * allowed to be logged by the admin. | ||
589 | */ | ||
590 | if (log) | ||
591 | return __audit_seccomp(syscall, signr, action); | ||
592 | |||
593 | /* | ||
594 | * Let the audit subsystem decide if the action should be audited based | ||
595 | * on whether the current task itself is being audited. | ||
596 | */ | ||
597 | return audit_seccomp(syscall, signr, action); | ||
598 | } | ||
599 | |||
517 | /* | 600 | /* |
518 | * Secure computing mode 1 allows only read/write/exit/sigreturn. | 601 | * Secure computing mode 1 allows only read/write/exit/sigreturn. |
519 | * To be fully secure this must be combined with rlimit | 602 | * To be fully secure this must be combined with rlimit |
@@ -539,7 +622,7 @@ static void __secure_computing_strict(int this_syscall) | |||
539 | #ifdef SECCOMP_DEBUG | 622 | #ifdef SECCOMP_DEBUG |
540 | dump_stack(); | 623 | dump_stack(); |
541 | #endif | 624 | #endif |
542 | audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL); | 625 | seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true); |
543 | do_exit(SIGKILL); | 626 | do_exit(SIGKILL); |
544 | } | 627 | } |
545 | 628 | ||
@@ -566,6 +649,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, | |||
566 | const bool recheck_after_trace) | 649 | const bool recheck_after_trace) |
567 | { | 650 | { |
568 | u32 filter_ret, action; | 651 | u32 filter_ret, action; |
652 | struct seccomp_filter *match = NULL; | ||
569 | int data; | 653 | int data; |
570 | 654 | ||
571 | /* | 655 | /* |
@@ -574,9 +658,9 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, | |||
574 | */ | 658 | */ |
575 | rmb(); | 659 | rmb(); |
576 | 660 | ||
577 | filter_ret = seccomp_run_filters(sd); | 661 | filter_ret = seccomp_run_filters(sd, &match); |
578 | data = filter_ret & SECCOMP_RET_DATA; | 662 | data = filter_ret & SECCOMP_RET_DATA; |
579 | action = filter_ret & SECCOMP_RET_ACTION; | 663 | action = filter_ret & SECCOMP_RET_ACTION_FULL; |
580 | 664 | ||
581 | switch (action) { | 665 | switch (action) { |
582 | case SECCOMP_RET_ERRNO: | 666 | case SECCOMP_RET_ERRNO: |
@@ -637,14 +721,25 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, | |||
637 | 721 | ||
638 | return 0; | 722 | return 0; |
639 | 723 | ||
724 | case SECCOMP_RET_LOG: | ||
725 | seccomp_log(this_syscall, 0, action, true); | ||
726 | return 0; | ||
727 | |||
640 | case SECCOMP_RET_ALLOW: | 728 | case SECCOMP_RET_ALLOW: |
729 | /* | ||
730 | * Note that the "match" filter will always be NULL for | ||
731 | * this action since SECCOMP_RET_ALLOW is the starting | ||
732 | * state in seccomp_run_filters(). | ||
733 | */ | ||
641 | return 0; | 734 | return 0; |
642 | 735 | ||
643 | case SECCOMP_RET_KILL: | 736 | case SECCOMP_RET_KILL_THREAD: |
737 | case SECCOMP_RET_KILL_PROCESS: | ||
644 | default: | 738 | default: |
645 | audit_seccomp(this_syscall, SIGSYS, action); | 739 | seccomp_log(this_syscall, SIGSYS, action, true); |
646 | /* Dump core only if this is the last remaining thread. */ | 740 | /* Dump core only if this is the last remaining thread. */ |
647 | if (get_nr_threads(current) == 1) { | 741 | if (action == SECCOMP_RET_KILL_PROCESS || |
742 | get_nr_threads(current) == 1) { | ||
648 | siginfo_t info; | 743 | siginfo_t info; |
649 | 744 | ||
650 | /* Show the original registers in the dump. */ | 745 | /* Show the original registers in the dump. */ |
@@ -653,13 +748,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, | |||
653 | seccomp_init_siginfo(&info, this_syscall, data); | 748 | seccomp_init_siginfo(&info, this_syscall, data); |
654 | do_coredump(&info); | 749 | do_coredump(&info); |
655 | } | 750 | } |
656 | do_exit(SIGSYS); | 751 | if (action == SECCOMP_RET_KILL_PROCESS) |
752 | do_group_exit(SIGSYS); | ||
753 | else | ||
754 | do_exit(SIGSYS); | ||
657 | } | 755 | } |
658 | 756 | ||
659 | unreachable(); | 757 | unreachable(); |
660 | 758 | ||
661 | skip: | 759 | skip: |
662 | audit_seccomp(this_syscall, 0, action); | 760 | seccomp_log(this_syscall, 0, action, match ? match->log : false); |
663 | return -1; | 761 | return -1; |
664 | } | 762 | } |
665 | #else | 763 | #else |
@@ -794,6 +892,29 @@ static inline long seccomp_set_mode_filter(unsigned int flags, | |||
794 | } | 892 | } |
795 | #endif | 893 | #endif |
796 | 894 | ||
895 | static long seccomp_get_action_avail(const char __user *uaction) | ||
896 | { | ||
897 | u32 action; | ||
898 | |||
899 | if (copy_from_user(&action, uaction, sizeof(action))) | ||
900 | return -EFAULT; | ||
901 | |||
902 | switch (action) { | ||
903 | case SECCOMP_RET_KILL_PROCESS: | ||
904 | case SECCOMP_RET_KILL_THREAD: | ||
905 | case SECCOMP_RET_TRAP: | ||
906 | case SECCOMP_RET_ERRNO: | ||
907 | case SECCOMP_RET_TRACE: | ||
908 | case SECCOMP_RET_LOG: | ||
909 | case SECCOMP_RET_ALLOW: | ||
910 | break; | ||
911 | default: | ||
912 | return -EOPNOTSUPP; | ||
913 | } | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | |||
797 | /* Common entry point for both prctl and syscall. */ | 918 | /* Common entry point for both prctl and syscall. */ |
798 | static long do_seccomp(unsigned int op, unsigned int flags, | 919 | static long do_seccomp(unsigned int op, unsigned int flags, |
799 | const char __user *uargs) | 920 | const char __user *uargs) |
@@ -805,6 +926,11 @@ static long do_seccomp(unsigned int op, unsigned int flags, | |||
805 | return seccomp_set_mode_strict(); | 926 | return seccomp_set_mode_strict(); |
806 | case SECCOMP_SET_MODE_FILTER: | 927 | case SECCOMP_SET_MODE_FILTER: |
807 | return seccomp_set_mode_filter(flags, uargs); | 928 | return seccomp_set_mode_filter(flags, uargs); |
929 | case SECCOMP_GET_ACTION_AVAIL: | ||
930 | if (flags != 0) | ||
931 | return -EINVAL; | ||
932 | |||
933 | return seccomp_get_action_avail(uargs); | ||
808 | default: | 934 | default: |
809 | return -EINVAL; | 935 | return -EINVAL; |
810 | } | 936 | } |
@@ -908,13 +1034,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, | |||
908 | if (!data) | 1034 | if (!data) |
909 | goto out; | 1035 | goto out; |
910 | 1036 | ||
911 | get_seccomp_filter(task); | 1037 | __get_seccomp_filter(filter); |
912 | spin_unlock_irq(&task->sighand->siglock); | 1038 | spin_unlock_irq(&task->sighand->siglock); |
913 | 1039 | ||
914 | if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog))) | 1040 | if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog))) |
915 | ret = -EFAULT; | 1041 | ret = -EFAULT; |
916 | 1042 | ||
917 | put_seccomp_filter(task); | 1043 | __put_seccomp_filter(filter); |
918 | return ret; | 1044 | return ret; |
919 | 1045 | ||
920 | out: | 1046 | out: |
@@ -922,3 +1048,185 @@ out: | |||
922 | return ret; | 1048 | return ret; |
923 | } | 1049 | } |
924 | #endif | 1050 | #endif |
1051 | |||
1052 | #ifdef CONFIG_SYSCTL | ||
1053 | |||
1054 | /* Human readable action names for friendly sysctl interaction */ | ||
1055 | #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process" | ||
1056 | #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread" | ||
1057 | #define SECCOMP_RET_TRAP_NAME "trap" | ||
1058 | #define SECCOMP_RET_ERRNO_NAME "errno" | ||
1059 | #define SECCOMP_RET_TRACE_NAME "trace" | ||
1060 | #define SECCOMP_RET_LOG_NAME "log" | ||
1061 | #define SECCOMP_RET_ALLOW_NAME "allow" | ||
1062 | |||
1063 | static const char seccomp_actions_avail[] = | ||
1064 | SECCOMP_RET_KILL_PROCESS_NAME " " | ||
1065 | SECCOMP_RET_KILL_THREAD_NAME " " | ||
1066 | SECCOMP_RET_TRAP_NAME " " | ||
1067 | SECCOMP_RET_ERRNO_NAME " " | ||
1068 | SECCOMP_RET_TRACE_NAME " " | ||
1069 | SECCOMP_RET_LOG_NAME " " | ||
1070 | SECCOMP_RET_ALLOW_NAME; | ||
1071 | |||
1072 | struct seccomp_log_name { | ||
1073 | u32 log; | ||
1074 | const char *name; | ||
1075 | }; | ||
1076 | |||
1077 | static const struct seccomp_log_name seccomp_log_names[] = { | ||
1078 | { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME }, | ||
1079 | { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME }, | ||
1080 | { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME }, | ||
1081 | { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME }, | ||
1082 | { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME }, | ||
1083 | { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME }, | ||
1084 | { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME }, | ||
1085 | { } | ||
1086 | }; | ||
1087 | |||
1088 | static bool seccomp_names_from_actions_logged(char *names, size_t size, | ||
1089 | u32 actions_logged) | ||
1090 | { | ||
1091 | const struct seccomp_log_name *cur; | ||
1092 | bool append_space = false; | ||
1093 | |||
1094 | for (cur = seccomp_log_names; cur->name && size; cur++) { | ||
1095 | ssize_t ret; | ||
1096 | |||
1097 | if (!(actions_logged & cur->log)) | ||
1098 | continue; | ||
1099 | |||
1100 | if (append_space) { | ||
1101 | ret = strscpy(names, " ", size); | ||
1102 | if (ret < 0) | ||
1103 | return false; | ||
1104 | |||
1105 | names += ret; | ||
1106 | size -= ret; | ||
1107 | } else | ||
1108 | append_space = true; | ||
1109 | |||
1110 | ret = strscpy(names, cur->name, size); | ||
1111 | if (ret < 0) | ||
1112 | return false; | ||
1113 | |||
1114 | names += ret; | ||
1115 | size -= ret; | ||
1116 | } | ||
1117 | |||
1118 | return true; | ||
1119 | } | ||
1120 | |||
1121 | static bool seccomp_action_logged_from_name(u32 *action_logged, | ||
1122 | const char *name) | ||
1123 | { | ||
1124 | const struct seccomp_log_name *cur; | ||
1125 | |||
1126 | for (cur = seccomp_log_names; cur->name; cur++) { | ||
1127 | if (!strcmp(cur->name, name)) { | ||
1128 | *action_logged = cur->log; | ||
1129 | return true; | ||
1130 | } | ||
1131 | } | ||
1132 | |||
1133 | return false; | ||
1134 | } | ||
1135 | |||
1136 | static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names) | ||
1137 | { | ||
1138 | char *name; | ||
1139 | |||
1140 | *actions_logged = 0; | ||
1141 | while ((name = strsep(&names, " ")) && *name) { | ||
1142 | u32 action_logged = 0; | ||
1143 | |||
1144 | if (!seccomp_action_logged_from_name(&action_logged, name)) | ||
1145 | return false; | ||
1146 | |||
1147 | *actions_logged |= action_logged; | ||
1148 | } | ||
1149 | |||
1150 | return true; | ||
1151 | } | ||
1152 | |||
1153 | static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write, | ||
1154 | void __user *buffer, size_t *lenp, | ||
1155 | loff_t *ppos) | ||
1156 | { | ||
1157 | char names[sizeof(seccomp_actions_avail)]; | ||
1158 | struct ctl_table table; | ||
1159 | int ret; | ||
1160 | |||
1161 | if (write && !capable(CAP_SYS_ADMIN)) | ||
1162 | return -EPERM; | ||
1163 | |||
1164 | memset(names, 0, sizeof(names)); | ||
1165 | |||
1166 | if (!write) { | ||
1167 | if (!seccomp_names_from_actions_logged(names, sizeof(names), | ||
1168 | seccomp_actions_logged)) | ||
1169 | return -EINVAL; | ||
1170 | } | ||
1171 | |||
1172 | table = *ro_table; | ||
1173 | table.data = names; | ||
1174 | table.maxlen = sizeof(names); | ||
1175 | ret = proc_dostring(&table, write, buffer, lenp, ppos); | ||
1176 | if (ret) | ||
1177 | return ret; | ||
1178 | |||
1179 | if (write) { | ||
1180 | u32 actions_logged; | ||
1181 | |||
1182 | if (!seccomp_actions_logged_from_names(&actions_logged, | ||
1183 | table.data)) | ||
1184 | return -EINVAL; | ||
1185 | |||
1186 | if (actions_logged & SECCOMP_LOG_ALLOW) | ||
1187 | return -EINVAL; | ||
1188 | |||
1189 | seccomp_actions_logged = actions_logged; | ||
1190 | } | ||
1191 | |||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | static struct ctl_path seccomp_sysctl_path[] = { | ||
1196 | { .procname = "kernel", }, | ||
1197 | { .procname = "seccomp", }, | ||
1198 | { } | ||
1199 | }; | ||
1200 | |||
1201 | static struct ctl_table seccomp_sysctl_table[] = { | ||
1202 | { | ||
1203 | .procname = "actions_avail", | ||
1204 | .data = (void *) &seccomp_actions_avail, | ||
1205 | .maxlen = sizeof(seccomp_actions_avail), | ||
1206 | .mode = 0444, | ||
1207 | .proc_handler = proc_dostring, | ||
1208 | }, | ||
1209 | { | ||
1210 | .procname = "actions_logged", | ||
1211 | .mode = 0644, | ||
1212 | .proc_handler = seccomp_actions_logged_handler, | ||
1213 | }, | ||
1214 | { } | ||
1215 | }; | ||
1216 | |||
1217 | static int __init seccomp_sysctl_init(void) | ||
1218 | { | ||
1219 | struct ctl_table_header *hdr; | ||
1220 | |||
1221 | hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table); | ||
1222 | if (!hdr) | ||
1223 | pr_warn("seccomp: sysctl registration failed\n"); | ||
1224 | else | ||
1225 | kmemleak_not_leak(hdr); | ||
1226 | |||
1227 | return 0; | ||
1228 | } | ||
1229 | |||
1230 | device_initcall(seccomp_sysctl_init) | ||
1231 | |||
1232 | #endif /* CONFIG_SYSCTL */ | ||
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 1d71c051a951..5043e7433f4b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); | |||
344 | * by the client, but only by calling this function. | 344 | * by the client, but only by calling this function. |
345 | * This function can only be called on a registered smp_hotplug_thread. | 345 | * This function can only be called on a registered smp_hotplug_thread. |
346 | */ | 346 | */ |
347 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | 347 | void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, |
348 | const struct cpumask *new) | 348 | const struct cpumask *new) |
349 | { | 349 | { |
350 | struct cpumask *old = plug_thread->cpumask; | 350 | struct cpumask *old = plug_thread->cpumask; |
351 | cpumask_var_t tmp; | 351 | static struct cpumask tmp; |
352 | unsigned int cpu; | 352 | unsigned int cpu; |
353 | 353 | ||
354 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | 354 | lockdep_assert_cpus_held(); |
355 | return -ENOMEM; | ||
356 | |||
357 | get_online_cpus(); | ||
358 | mutex_lock(&smpboot_threads_lock); | 355 | mutex_lock(&smpboot_threads_lock); |
359 | 356 | ||
360 | /* Park threads that were exclusively enabled on the old mask. */ | 357 | /* Park threads that were exclusively enabled on the old mask. */ |
361 | cpumask_andnot(tmp, old, new); | 358 | cpumask_andnot(&tmp, old, new); |
362 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | 359 | for_each_cpu_and(cpu, &tmp, cpu_online_mask) |
363 | smpboot_park_thread(plug_thread, cpu); | 360 | smpboot_park_thread(plug_thread, cpu); |
364 | 361 | ||
365 | /* Unpark threads that are exclusively enabled on the new mask. */ | 362 | /* Unpark threads that are exclusively enabled on the new mask. */ |
366 | cpumask_andnot(tmp, new, old); | 363 | cpumask_andnot(&tmp, new, old); |
367 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | 364 | for_each_cpu_and(cpu, &tmp, cpu_online_mask) |
368 | smpboot_unpark_thread(plug_thread, cpu); | 365 | smpboot_unpark_thread(plug_thread, cpu); |
369 | 366 | ||
370 | cpumask_copy(old, new); | 367 | cpumask_copy(old, new); |
371 | 368 | ||
372 | mutex_unlock(&smpboot_threads_lock); | 369 | mutex_unlock(&smpboot_threads_lock); |
373 | put_online_cpus(); | ||
374 | |||
375 | free_cpumask_var(tmp); | ||
376 | |||
377 | return 0; | ||
378 | } | 370 | } |
379 | EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread); | ||
380 | 371 | ||
381 | static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); | 372 | static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); |
382 | 373 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6648fbbb8157..d9c31bc2eaea 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = { | |||
367 | .data = &sysctl_sched_time_avg, | 367 | .data = &sysctl_sched_time_avg, |
368 | .maxlen = sizeof(unsigned int), | 368 | .maxlen = sizeof(unsigned int), |
369 | .mode = 0644, | 369 | .mode = 0644, |
370 | .proc_handler = proc_dointvec, | 370 | .proc_handler = proc_dointvec_minmax, |
371 | .extra1 = &one, | ||
371 | }, | 372 | }, |
372 | #ifdef CONFIG_SCHEDSTATS | 373 | #ifdef CONFIG_SCHEDSTATS |
373 | { | 374 | { |
@@ -871,9 +872,9 @@ static struct ctl_table kern_table[] = { | |||
871 | #if defined(CONFIG_LOCKUP_DETECTOR) | 872 | #if defined(CONFIG_LOCKUP_DETECTOR) |
872 | { | 873 | { |
873 | .procname = "watchdog", | 874 | .procname = "watchdog", |
874 | .data = &watchdog_user_enabled, | 875 | .data = &watchdog_user_enabled, |
875 | .maxlen = sizeof (int), | 876 | .maxlen = sizeof(int), |
876 | .mode = 0644, | 877 | .mode = 0644, |
877 | .proc_handler = proc_watchdog, | 878 | .proc_handler = proc_watchdog, |
878 | .extra1 = &zero, | 879 | .extra1 = &zero, |
879 | .extra2 = &one, | 880 | .extra2 = &one, |
@@ -889,16 +890,12 @@ static struct ctl_table kern_table[] = { | |||
889 | }, | 890 | }, |
890 | { | 891 | { |
891 | .procname = "nmi_watchdog", | 892 | .procname = "nmi_watchdog", |
892 | .data = &nmi_watchdog_enabled, | 893 | .data = &nmi_watchdog_user_enabled, |
893 | .maxlen = sizeof (int), | 894 | .maxlen = sizeof(int), |
894 | .mode = 0644, | 895 | .mode = NMI_WATCHDOG_SYSCTL_PERM, |
895 | .proc_handler = proc_nmi_watchdog, | 896 | .proc_handler = proc_nmi_watchdog, |
896 | .extra1 = &zero, | 897 | .extra1 = &zero, |
897 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) | ||
898 | .extra2 = &one, | 898 | .extra2 = &one, |
899 | #else | ||
900 | .extra2 = &zero, | ||
901 | #endif | ||
902 | }, | 899 | }, |
903 | { | 900 | { |
904 | .procname = "watchdog_cpumask", | 901 | .procname = "watchdog_cpumask", |
@@ -910,9 +907,9 @@ static struct ctl_table kern_table[] = { | |||
910 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 907 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
911 | { | 908 | { |
912 | .procname = "soft_watchdog", | 909 | .procname = "soft_watchdog", |
913 | .data = &soft_watchdog_enabled, | 910 | .data = &soft_watchdog_user_enabled, |
914 | .maxlen = sizeof (int), | 911 | .maxlen = sizeof(int), |
915 | .mode = 0644, | 912 | .mode = 0644, |
916 | .proc_handler = proc_soft_watchdog, | 913 | .proc_handler = proc_soft_watchdog, |
917 | .extra1 = &zero, | 914 | .extra1 = &zero, |
918 | .extra2 = &one, | 915 | .extra2 = &one, |
@@ -2187,8 +2184,6 @@ static int do_proc_douintvec_conv(unsigned long *lvalp, | |||
2187 | if (write) { | 2184 | if (write) { |
2188 | if (*lvalp > UINT_MAX) | 2185 | if (*lvalp > UINT_MAX) |
2189 | return -EINVAL; | 2186 | return -EINVAL; |
2190 | if (*lvalp > UINT_MAX) | ||
2191 | return -EINVAL; | ||
2192 | *valp = *lvalp; | 2187 | *valp = *lvalp; |
2193 | } else { | 2188 | } else { |
2194 | unsigned int val = *valp; | 2189 | unsigned int val = *valp; |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 2a685b45b73b..45a3928544ce 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -648,6 +648,12 @@ int blk_trace_startstop(struct request_queue *q, int start) | |||
648 | } | 648 | } |
649 | EXPORT_SYMBOL_GPL(blk_trace_startstop); | 649 | EXPORT_SYMBOL_GPL(blk_trace_startstop); |
650 | 650 | ||
651 | /* | ||
652 | * When reading or writing the blktrace sysfs files, the references to the | ||
653 | * opened sysfs or device files should prevent the underlying block device | ||
654 | * from being removed. So no further delete protection is really needed. | ||
655 | */ | ||
656 | |||
651 | /** | 657 | /** |
652 | * blk_trace_ioctl: - handle the ioctls associated with tracing | 658 | * blk_trace_ioctl: - handle the ioctls associated with tracing |
653 | * @bdev: the block device | 659 | * @bdev: the block device |
@@ -665,7 +671,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
665 | if (!q) | 671 | if (!q) |
666 | return -ENXIO; | 672 | return -ENXIO; |
667 | 673 | ||
668 | mutex_lock(&bdev->bd_mutex); | 674 | mutex_lock(&q->blk_trace_mutex); |
669 | 675 | ||
670 | switch (cmd) { | 676 | switch (cmd) { |
671 | case BLKTRACESETUP: | 677 | case BLKTRACESETUP: |
@@ -691,7 +697,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | |||
691 | break; | 697 | break; |
692 | } | 698 | } |
693 | 699 | ||
694 | mutex_unlock(&bdev->bd_mutex); | 700 | mutex_unlock(&q->blk_trace_mutex); |
695 | return ret; | 701 | return ret; |
696 | } | 702 | } |
697 | 703 | ||
@@ -1727,7 +1733,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |||
1727 | if (q == NULL) | 1733 | if (q == NULL) |
1728 | goto out_bdput; | 1734 | goto out_bdput; |
1729 | 1735 | ||
1730 | mutex_lock(&bdev->bd_mutex); | 1736 | mutex_lock(&q->blk_trace_mutex); |
1731 | 1737 | ||
1732 | if (attr == &dev_attr_enable) { | 1738 | if (attr == &dev_attr_enable) { |
1733 | ret = sprintf(buf, "%u\n", !!q->blk_trace); | 1739 | ret = sprintf(buf, "%u\n", !!q->blk_trace); |
@@ -1746,7 +1752,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | |||
1746 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); | 1752 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); |
1747 | 1753 | ||
1748 | out_unlock_bdev: | 1754 | out_unlock_bdev: |
1749 | mutex_unlock(&bdev->bd_mutex); | 1755 | mutex_unlock(&q->blk_trace_mutex); |
1750 | out_bdput: | 1756 | out_bdput: |
1751 | bdput(bdev); | 1757 | bdput(bdev); |
1752 | out: | 1758 | out: |
@@ -1788,7 +1794,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |||
1788 | if (q == NULL) | 1794 | if (q == NULL) |
1789 | goto out_bdput; | 1795 | goto out_bdput; |
1790 | 1796 | ||
1791 | mutex_lock(&bdev->bd_mutex); | 1797 | mutex_lock(&q->blk_trace_mutex); |
1792 | 1798 | ||
1793 | if (attr == &dev_attr_enable) { | 1799 | if (attr == &dev_attr_enable) { |
1794 | if (value) | 1800 | if (value) |
@@ -1814,7 +1820,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | |||
1814 | } | 1820 | } |
1815 | 1821 | ||
1816 | out_unlock_bdev: | 1822 | out_unlock_bdev: |
1817 | mutex_unlock(&bdev->bd_mutex); | 1823 | mutex_unlock(&q->blk_trace_mutex); |
1818 | out_bdput: | 1824 | out_bdput: |
1819 | bdput(bdev); | 1825 | bdput(bdev); |
1820 | out: | 1826 | out: |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6abfafd7f173..8319e09e15b9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | |||
4954 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 4954 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
4955 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); | 4955 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
4956 | 4956 | ||
4957 | static unsigned long save_global_trampoline; | ||
4958 | static unsigned long save_global_flags; | ||
4959 | |||
4960 | static int __init set_graph_function(char *str) | 4957 | static int __init set_graph_function(char *str) |
4961 | { | 4958 | { |
4962 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 4959 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
@@ -6808,17 +6805,6 @@ void unregister_ftrace_graph(void) | |||
6808 | unregister_pm_notifier(&ftrace_suspend_notifier); | 6805 | unregister_pm_notifier(&ftrace_suspend_notifier); |
6809 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 6806 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
6810 | 6807 | ||
6811 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
6812 | /* | ||
6813 | * Function graph does not allocate the trampoline, but | ||
6814 | * other global_ops do. We need to reset the ALLOC_TRAMP flag | ||
6815 | * if one was used. | ||
6816 | */ | ||
6817 | global_ops.trampoline = save_global_trampoline; | ||
6818 | if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) | ||
6819 | global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | ||
6820 | #endif | ||
6821 | |||
6822 | out: | 6808 | out: |
6823 | mutex_unlock(&ftrace_lock); | 6809 | mutex_unlock(&ftrace_lock); |
6824 | } | 6810 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5360b7aec57a..752e5daf0896 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -4020,11 +4020,17 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
4020 | /* If this file was open for write, then erase contents */ | 4020 | /* If this file was open for write, then erase contents */ |
4021 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | 4021 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
4022 | int cpu = tracing_get_cpu(inode); | 4022 | int cpu = tracing_get_cpu(inode); |
4023 | struct trace_buffer *trace_buf = &tr->trace_buffer; | ||
4024 | |||
4025 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
4026 | if (tr->current_trace->print_max) | ||
4027 | trace_buf = &tr->max_buffer; | ||
4028 | #endif | ||
4023 | 4029 | ||
4024 | if (cpu == RING_BUFFER_ALL_CPUS) | 4030 | if (cpu == RING_BUFFER_ALL_CPUS) |
4025 | tracing_reset_online_cpus(&tr->trace_buffer); | 4031 | tracing_reset_online_cpus(trace_buf); |
4026 | else | 4032 | else |
4027 | tracing_reset(&tr->trace_buffer, cpu); | 4033 | tracing_reset(trace_buf, cpu); |
4028 | } | 4034 | } |
4029 | 4035 | ||
4030 | if (file->f_mode & FMODE_READ) { | 4036 | if (file->f_mode & FMODE_READ) { |
@@ -5358,6 +5364,13 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) | |||
5358 | if (t == tr->current_trace) | 5364 | if (t == tr->current_trace) |
5359 | goto out; | 5365 | goto out; |
5360 | 5366 | ||
5367 | /* Some tracers won't work on kernel command line */ | ||
5368 | if (system_state < SYSTEM_RUNNING && t->noboot) { | ||
5369 | pr_warn("Tracer '%s' is not allowed on command line, ignored\n", | ||
5370 | t->name); | ||
5371 | goto out; | ||
5372 | } | ||
5373 | |||
5361 | /* Some tracers are only allowed for the top level buffer */ | 5374 | /* Some tracers are only allowed for the top level buffer */ |
5362 | if (!trace_ok_for_array(t, tr)) { | 5375 | if (!trace_ok_for_array(t, tr)) { |
5363 | ret = -EINVAL; | 5376 | ret = -EINVAL; |
@@ -5667,7 +5680,7 @@ static int tracing_wait_pipe(struct file *filp) | |||
5667 | * | 5680 | * |
5668 | * iter->pos will be 0 if we haven't read anything. | 5681 | * iter->pos will be 0 if we haven't read anything. |
5669 | */ | 5682 | */ |
5670 | if (!tracing_is_on() && iter->pos) | 5683 | if (!tracer_tracing_is_on(iter->tr) && iter->pos) |
5671 | break; | 5684 | break; |
5672 | 5685 | ||
5673 | mutex_unlock(&iter->mutex); | 5686 | mutex_unlock(&iter->mutex); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fb5d54d0d1b3..652c682707cd 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -444,6 +444,8 @@ struct tracer { | |||
444 | #ifdef CONFIG_TRACER_MAX_TRACE | 444 | #ifdef CONFIG_TRACER_MAX_TRACE |
445 | bool use_max_tr; | 445 | bool use_max_tr; |
446 | #endif | 446 | #endif |
447 | /* True if tracer cannot be enabled in kernel param */ | ||
448 | bool noboot; | ||
447 | }; | 449 | }; |
448 | 450 | ||
449 | 451 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index cd7480d0a201..dca78fc48439 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -282,6 +282,7 @@ static struct tracer mmio_tracer __read_mostly = | |||
282 | .close = mmio_close, | 282 | .close = mmio_close, |
283 | .read = mmio_read, | 283 | .read = mmio_read, |
284 | .print_line = mmio_print_line, | 284 | .print_line = mmio_print_line, |
285 | .noboot = true, | ||
285 | }; | 286 | }; |
286 | 287 | ||
287 | __init static int init_mmio_trace(void) | 288 | __init static int init_mmio_trace(void) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index bac629af2285..c738e764e2a5 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -656,15 +656,6 @@ int trace_print_lat_context(struct trace_iterator *iter) | |||
656 | return !trace_seq_has_overflowed(s); | 656 | return !trace_seq_has_overflowed(s); |
657 | } | 657 | } |
658 | 658 | ||
659 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
660 | |||
661 | static int task_state_char(unsigned long state) | ||
662 | { | ||
663 | int bit = state ? __ffs(state) + 1 : 0; | ||
664 | |||
665 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
666 | } | ||
667 | |||
668 | /** | 659 | /** |
669 | * ftrace_find_event - find a registered event | 660 | * ftrace_find_event - find a registered event |
670 | * @type: the type of event to look for | 661 | * @type: the type of event to look for |
@@ -930,8 +921,8 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, | |||
930 | 921 | ||
931 | trace_assign_type(field, iter->ent); | 922 | trace_assign_type(field, iter->ent); |
932 | 923 | ||
933 | T = task_state_char(field->next_state); | 924 | T = __task_state_to_char(field->next_state); |
934 | S = task_state_char(field->prev_state); | 925 | S = __task_state_to_char(field->prev_state); |
935 | trace_find_cmdline(field->next_pid, comm); | 926 | trace_find_cmdline(field->next_pid, comm); |
936 | trace_seq_printf(&iter->seq, | 927 | trace_seq_printf(&iter->seq, |
937 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | 928 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
@@ -966,8 +957,8 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
966 | trace_assign_type(field, iter->ent); | 957 | trace_assign_type(field, iter->ent); |
967 | 958 | ||
968 | if (!S) | 959 | if (!S) |
969 | S = task_state_char(field->prev_state); | 960 | S = __task_state_to_char(field->prev_state); |
970 | T = task_state_char(field->next_state); | 961 | T = __task_state_to_char(field->next_state); |
971 | trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 962 | trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
972 | field->prev_pid, | 963 | field->prev_pid, |
973 | field->prev_prio, | 964 | field->prev_prio, |
@@ -1002,8 +993,8 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
1002 | trace_assign_type(field, iter->ent); | 993 | trace_assign_type(field, iter->ent); |
1003 | 994 | ||
1004 | if (!S) | 995 | if (!S) |
1005 | S = task_state_char(field->prev_state); | 996 | S = __task_state_to_char(field->prev_state); |
1006 | T = task_state_char(field->next_state); | 997 | T = __task_state_to_char(field->next_state); |
1007 | 998 | ||
1008 | SEQ_PUT_HEX_FIELD(s, field->prev_pid); | 999 | SEQ_PUT_HEX_FIELD(s, field->prev_pid); |
1009 | SEQ_PUT_HEX_FIELD(s, field->prev_prio); | 1000 | SEQ_PUT_HEX_FIELD(s, field->prev_prio); |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index ddec53b67646..0c331978b1a6 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -397,10 +397,10 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
397 | entry = ring_buffer_event_data(event); | 397 | entry = ring_buffer_event_data(event); |
398 | entry->prev_pid = prev->pid; | 398 | entry->prev_pid = prev->pid; |
399 | entry->prev_prio = prev->prio; | 399 | entry->prev_prio = prev->prio; |
400 | entry->prev_state = prev->state; | 400 | entry->prev_state = __get_task_state(prev); |
401 | entry->next_pid = next->pid; | 401 | entry->next_pid = next->pid; |
402 | entry->next_prio = next->prio; | 402 | entry->next_prio = next->prio; |
403 | entry->next_state = next->state; | 403 | entry->next_state = __get_task_state(next); |
404 | entry->next_cpu = task_cpu(next); | 404 | entry->next_cpu = task_cpu(next); |
405 | 405 | ||
406 | if (!call_filter_check_discard(call, entry, buffer, event)) | 406 | if (!call_filter_check_discard(call, entry, buffer, event)) |
@@ -425,10 +425,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
425 | entry = ring_buffer_event_data(event); | 425 | entry = ring_buffer_event_data(event); |
426 | entry->prev_pid = curr->pid; | 426 | entry->prev_pid = curr->pid; |
427 | entry->prev_prio = curr->prio; | 427 | entry->prev_prio = curr->prio; |
428 | entry->prev_state = curr->state; | 428 | entry->prev_state = __get_task_state(curr); |
429 | entry->next_pid = wakee->pid; | 429 | entry->next_pid = wakee->pid; |
430 | entry->next_prio = wakee->prio; | 430 | entry->next_prio = wakee->prio; |
431 | entry->next_state = wakee->state; | 431 | entry->next_state = __get_task_state(wakee); |
432 | entry->next_cpu = task_cpu(wakee); | 432 | entry->next_cpu = task_cpu(wakee); |
433 | 433 | ||
434 | if (!call_filter_check_discard(call, entry, buffer, event)) | 434 | if (!call_filter_check_discard(call, entry, buffer, event)) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index a4df67cbc711..49cb41412eec 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
96 | if (in_nmi()) | 96 | if (in_nmi()) |
97 | return; | 97 | return; |
98 | 98 | ||
99 | /* | ||
100 | * There's a slight chance that we are tracing inside the | ||
101 | * RCU infrastructure, and rcu_irq_enter() will not work | ||
102 | * as expected. | ||
103 | */ | ||
104 | if (unlikely(rcu_irq_enter_disabled())) | ||
105 | return; | ||
106 | |||
107 | local_irq_save(flags); | 99 | local_irq_save(flags); |
108 | arch_spin_lock(&stack_trace_max_lock); | 100 | arch_spin_lock(&stack_trace_max_lock); |
109 | 101 | ||
110 | /* | ||
111 | * RCU may not be watching, make it see us. | ||
112 | * The stack trace code uses rcu_sched. | ||
113 | */ | ||
114 | rcu_irq_enter(); | ||
115 | |||
116 | /* In case another CPU set the tracer_frame on us */ | 102 | /* In case another CPU set the tracer_frame on us */ |
117 | if (unlikely(!frame_size)) | 103 | if (unlikely(!frame_size)) |
118 | this_size -= tracer_frame; | 104 | this_size -= tracer_frame; |
@@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
205 | } | 191 | } |
206 | 192 | ||
207 | out: | 193 | out: |
208 | rcu_irq_exit(); | ||
209 | arch_spin_unlock(&stack_trace_max_lock); | 194 | arch_spin_unlock(&stack_trace_max_lock); |
210 | local_irq_restore(flags); | 195 | local_irq_restore(flags); |
211 | } | 196 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f5d52024f6b7..6bcb854909c0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -29,20 +29,29 @@ | |||
29 | #include <linux/kvm_para.h> | 29 | #include <linux/kvm_para.h> |
30 | #include <linux/kthread.h> | 30 | #include <linux/kthread.h> |
31 | 31 | ||
32 | /* Watchdog configuration */ | 32 | static DEFINE_MUTEX(watchdog_mutex); |
33 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
34 | |||
35 | int __read_mostly nmi_watchdog_enabled; | ||
36 | 33 | ||
37 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) | 34 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
38 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | | 35 | # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED) |
39 | NMI_WATCHDOG_ENABLED; | 36 | # define NMI_WATCHDOG_DEFAULT 1 |
40 | #else | 37 | #else |
41 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; | 38 | # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED) |
39 | # define NMI_WATCHDOG_DEFAULT 0 | ||
42 | #endif | 40 | #endif |
43 | 41 | ||
42 | unsigned long __read_mostly watchdog_enabled; | ||
43 | int __read_mostly watchdog_user_enabled = 1; | ||
44 | int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; | ||
45 | int __read_mostly soft_watchdog_user_enabled = 1; | ||
46 | int __read_mostly watchdog_thresh = 10; | ||
47 | int __read_mostly nmi_watchdog_available; | ||
48 | |||
49 | struct cpumask watchdog_allowed_mask __read_mostly; | ||
50 | |||
51 | struct cpumask watchdog_cpumask __read_mostly; | ||
52 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
53 | |||
44 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 54 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
45 | /* boot commands */ | ||
46 | /* | 55 | /* |
47 | * Should we panic when a soft-lockup or hard-lockup occurs: | 56 | * Should we panic when a soft-lockup or hard-lockup occurs: |
48 | */ | 57 | */ |
@@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic = | |||
56 | * kernel command line parameters are parsed, because otherwise it is not | 65 | * kernel command line parameters are parsed, because otherwise it is not |
57 | * possible to override this in hardlockup_panic_setup(). | 66 | * possible to override this in hardlockup_panic_setup(). |
58 | */ | 67 | */ |
59 | void hardlockup_detector_disable(void) | 68 | void __init hardlockup_detector_disable(void) |
60 | { | 69 | { |
61 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | 70 | nmi_watchdog_user_enabled = 0; |
62 | } | 71 | } |
63 | 72 | ||
64 | static int __init hardlockup_panic_setup(char *str) | 73 | static int __init hardlockup_panic_setup(char *str) |
@@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str) | |||
68 | else if (!strncmp(str, "nopanic", 7)) | 77 | else if (!strncmp(str, "nopanic", 7)) |
69 | hardlockup_panic = 0; | 78 | hardlockup_panic = 0; |
70 | else if (!strncmp(str, "0", 1)) | 79 | else if (!strncmp(str, "0", 1)) |
71 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | 80 | nmi_watchdog_user_enabled = 0; |
72 | else if (!strncmp(str, "1", 1)) | 81 | else if (!strncmp(str, "1", 1)) |
73 | watchdog_enabled |= NMI_WATCHDOG_ENABLED; | 82 | nmi_watchdog_user_enabled = 1; |
74 | return 1; | 83 | return 1; |
75 | } | 84 | } |
76 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 85 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
77 | 86 | ||
78 | #endif | 87 | # ifdef CONFIG_SMP |
79 | |||
80 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | ||
81 | int __read_mostly soft_watchdog_enabled; | ||
82 | #endif | ||
83 | |||
84 | int __read_mostly watchdog_user_enabled; | ||
85 | int __read_mostly watchdog_thresh = 10; | ||
86 | |||
87 | #ifdef CONFIG_SMP | ||
88 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
89 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; | 88 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
90 | #endif | ||
91 | struct cpumask watchdog_cpumask __read_mostly; | ||
92 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
93 | 89 | ||
94 | /* | 90 | static int __init hardlockup_all_cpu_backtrace_setup(char *str) |
95 | * The 'watchdog_running' variable is set to 1 when the watchdog threads | 91 | { |
96 | * are registered/started and is set to 0 when the watchdog threads are | 92 | sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
97 | * unregistered/stopped, so it is an indicator whether the threads exist. | 93 | return 1; |
98 | */ | 94 | } |
99 | static int __read_mostly watchdog_running; | 95 | __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); |
100 | /* | 96 | # endif /* CONFIG_SMP */ |
101 | * If a subsystem has a need to deactivate the watchdog temporarily, it | 97 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
102 | * can use the suspend/resume interface to achieve this. The content of | ||
103 | * the 'watchdog_suspended' variable reflects this state. Existing threads | ||
104 | * are parked/unparked by the lockup_detector_{suspend|resume} functions | ||
105 | * (see comment blocks pertaining to those functions for further details). | ||
106 | * | ||
107 | * 'watchdog_suspended' also prevents threads from being registered/started | ||
108 | * or unregistered/stopped via parameters in /proc/sys/kernel, so the state | ||
109 | * of 'watchdog_running' cannot change while the watchdog is deactivated | ||
110 | * temporarily (see related code in 'proc' handlers). | ||
111 | */ | ||
112 | int __read_mostly watchdog_suspended; | ||
113 | 98 | ||
114 | /* | 99 | /* |
115 | * These functions can be overridden if an architecture implements its | 100 | * These functions can be overridden if an architecture implements its |
@@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended; | |||
121 | */ | 106 | */ |
122 | int __weak watchdog_nmi_enable(unsigned int cpu) | 107 | int __weak watchdog_nmi_enable(unsigned int cpu) |
123 | { | 108 | { |
109 | hardlockup_detector_perf_enable(); | ||
124 | return 0; | 110 | return 0; |
125 | } | 111 | } |
112 | |||
126 | void __weak watchdog_nmi_disable(unsigned int cpu) | 113 | void __weak watchdog_nmi_disable(unsigned int cpu) |
127 | { | 114 | { |
115 | hardlockup_detector_perf_disable(); | ||
128 | } | 116 | } |
129 | 117 | ||
130 | /* | 118 | /* Return 0, if a NMI watchdog is available. Error code otherwise */ |
131 | * watchdog_nmi_reconfigure can be implemented to be notified after any | 119 | int __weak __init watchdog_nmi_probe(void) |
132 | * watchdog configuration change. The arch hardlockup watchdog should | 120 | { |
133 | * respond to the following variables: | 121 | return hardlockup_detector_perf_init(); |
134 | * - nmi_watchdog_enabled | 122 | } |
123 | |||
124 | /** | ||
125 | * watchdog_nmi_stop - Stop the watchdog for reconfiguration | ||
126 | * | ||
127 | * The reconfiguration steps are: | ||
128 | * watchdog_nmi_stop(); | ||
129 | * update_variables(); | ||
130 | * watchdog_nmi_start(); | ||
131 | */ | ||
132 | void __weak watchdog_nmi_stop(void) { } | ||
133 | |||
134 | /** | ||
135 | * watchdog_nmi_start - Start the watchdog after reconfiguration | ||
136 | * | ||
137 | * Counterpart to watchdog_nmi_stop(). | ||
138 | * | ||
139 | * The following variables have been updated in update_variables() and | ||
140 | * contain the currently valid configuration: | ||
141 | * - watchdog_enabled | ||
135 | * - watchdog_thresh | 142 | * - watchdog_thresh |
136 | * - watchdog_cpumask | 143 | * - watchdog_cpumask |
137 | * - sysctl_hardlockup_all_cpu_backtrace | ||
138 | * - hardlockup_panic | ||
139 | * - watchdog_suspended | ||
140 | */ | 144 | */ |
141 | void __weak watchdog_nmi_reconfigure(void) | 145 | void __weak watchdog_nmi_start(void) { } |
146 | |||
147 | /** | ||
148 | * lockup_detector_update_enable - Update the sysctl enable bit | ||
149 | * | ||
150 | * Caller needs to make sure that the NMI/perf watchdogs are off, so this | ||
151 | * can't race with watchdog_nmi_disable(). | ||
152 | */ | ||
153 | static void lockup_detector_update_enable(void) | ||
142 | { | 154 | { |
155 | watchdog_enabled = 0; | ||
156 | if (!watchdog_user_enabled) | ||
157 | return; | ||
158 | if (nmi_watchdog_available && nmi_watchdog_user_enabled) | ||
159 | watchdog_enabled |= NMI_WATCHDOG_ENABLED; | ||
160 | if (soft_watchdog_user_enabled) | ||
161 | watchdog_enabled |= SOFT_WATCHDOG_ENABLED; | ||
143 | } | 162 | } |
144 | 163 | ||
145 | |||
146 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 164 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
147 | 165 | ||
148 | /* Helper for online, unparked cpus. */ | 166 | /* Global variables, exported for sysctl */ |
149 | #define for_each_watchdog_cpu(cpu) \ | 167 | unsigned int __read_mostly softlockup_panic = |
150 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) | 168 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
151 | |||
152 | atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); | ||
153 | 169 | ||
170 | static bool softlockup_threads_initialized __read_mostly; | ||
154 | static u64 __read_mostly sample_period; | 171 | static u64 __read_mostly sample_period; |
155 | 172 | ||
156 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 173 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
@@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); | |||
164 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | 181 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
165 | static unsigned long soft_lockup_nmi_warn; | 182 | static unsigned long soft_lockup_nmi_warn; |
166 | 183 | ||
167 | unsigned int __read_mostly softlockup_panic = | ||
168 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
169 | |||
170 | static int __init softlockup_panic_setup(char *str) | 184 | static int __init softlockup_panic_setup(char *str) |
171 | { | 185 | { |
172 | softlockup_panic = simple_strtoul(str, NULL, 0); | 186 | softlockup_panic = simple_strtoul(str, NULL, 0); |
173 | |||
174 | return 1; | 187 | return 1; |
175 | } | 188 | } |
176 | __setup("softlockup_panic=", softlockup_panic_setup); | 189 | __setup("softlockup_panic=", softlockup_panic_setup); |
177 | 190 | ||
178 | static int __init nowatchdog_setup(char *str) | 191 | static int __init nowatchdog_setup(char *str) |
179 | { | 192 | { |
180 | watchdog_enabled = 0; | 193 | watchdog_user_enabled = 0; |
181 | return 1; | 194 | return 1; |
182 | } | 195 | } |
183 | __setup("nowatchdog", nowatchdog_setup); | 196 | __setup("nowatchdog", nowatchdog_setup); |
184 | 197 | ||
185 | static int __init nosoftlockup_setup(char *str) | 198 | static int __init nosoftlockup_setup(char *str) |
186 | { | 199 | { |
187 | watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; | 200 | soft_watchdog_user_enabled = 0; |
188 | return 1; | 201 | return 1; |
189 | } | 202 | } |
190 | __setup("nosoftlockup", nosoftlockup_setup); | 203 | __setup("nosoftlockup", nosoftlockup_setup); |
191 | 204 | ||
192 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
206 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
207 | |||
193 | static int __init softlockup_all_cpu_backtrace_setup(char *str) | 208 | static int __init softlockup_all_cpu_backtrace_setup(char *str) |
194 | { | 209 | { |
195 | sysctl_softlockup_all_cpu_backtrace = | 210 | sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
196 | !!simple_strtol(str, NULL, 0); | ||
197 | return 1; | 211 | return 1; |
198 | } | 212 | } |
199 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); | 213 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); |
200 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
201 | static int __init hardlockup_all_cpu_backtrace_setup(char *str) | ||
202 | { | ||
203 | sysctl_hardlockup_all_cpu_backtrace = | ||
204 | !!simple_strtol(str, NULL, 0); | ||
205 | return 1; | ||
206 | } | ||
207 | __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); | ||
208 | #endif | ||
209 | #endif | 214 | #endif |
210 | 215 | ||
216 | static void __lockup_detector_cleanup(void); | ||
217 | |||
211 | /* | 218 | /* |
212 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- | 219 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
213 | * lockups can have false positives under extreme conditions. So we generally | 220 | * lockups can have false positives under extreme conditions. So we generally |
@@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void) | |||
278 | int cpu; | 285 | int cpu; |
279 | 286 | ||
280 | /* | 287 | /* |
281 | * this is done lockless | 288 | * watchdog_mutex cannpt be taken here, as this might be called |
282 | * do we care if a 0 races with a timestamp? | 289 | * from (soft)interrupt context, so the access to |
283 | * all it means is the softlock check starts one cycle later | 290 | * watchdog_allowed_cpumask might race with a concurrent update. |
291 | * | ||
292 | * The watchdog time stamp can race against a concurrent real | ||
293 | * update as well, the only side effect might be a cycle delay for | ||
294 | * the softlockup check. | ||
284 | */ | 295 | */ |
285 | for_each_watchdog_cpu(cpu) | 296 | for_each_cpu(cpu, &watchdog_allowed_mask) |
286 | per_cpu(watchdog_touch_ts, cpu) = 0; | 297 | per_cpu(watchdog_touch_ts, cpu) = 0; |
287 | wq_watchdog_touch(-1); | 298 | wq_watchdog_touch(-1); |
288 | } | 299 | } |
@@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void) | |||
322 | __this_cpu_inc(hrtimer_interrupts); | 333 | __this_cpu_inc(hrtimer_interrupts); |
323 | } | 334 | } |
324 | 335 | ||
325 | static int watchdog_enable_all_cpus(void); | ||
326 | static void watchdog_disable_all_cpus(void); | ||
327 | |||
328 | /* watchdog kicker functions */ | 336 | /* watchdog kicker functions */ |
329 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 337 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
330 | { | 338 | { |
@@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
333 | int duration; | 341 | int duration; |
334 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; | 342 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
335 | 343 | ||
336 | if (atomic_read(&watchdog_park_in_progress) != 0) | 344 | if (!watchdog_enabled) |
337 | return HRTIMER_NORESTART; | 345 | return HRTIMER_NORESTART; |
338 | 346 | ||
339 | /* kick the hardlockup detector */ | 347 | /* kick the hardlockup detector */ |
@@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) | |||
447 | 455 | ||
448 | static void watchdog_enable(unsigned int cpu) | 456 | static void watchdog_enable(unsigned int cpu) |
449 | { | 457 | { |
450 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); | 458 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
451 | 459 | ||
452 | /* kick off the timer for the hardlockup detector */ | 460 | /* |
461 | * Start the timer first to prevent the NMI watchdog triggering | ||
462 | * before the timer has a chance to fire. | ||
463 | */ | ||
453 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 464 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
454 | hrtimer->function = watchdog_timer_fn; | 465 | hrtimer->function = watchdog_timer_fn; |
455 | |||
456 | /* Enable the perf event */ | ||
457 | watchdog_nmi_enable(cpu); | ||
458 | |||
459 | /* done here because hrtimer_start can only pin to smp_processor_id() */ | ||
460 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | 466 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
461 | HRTIMER_MODE_REL_PINNED); | 467 | HRTIMER_MODE_REL_PINNED); |
462 | 468 | ||
463 | /* initialize timestamp */ | 469 | /* Initialize timestamp */ |
464 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); | ||
465 | __touch_watchdog(); | 470 | __touch_watchdog(); |
471 | /* Enable the perf event */ | ||
472 | if (watchdog_enabled & NMI_WATCHDOG_ENABLED) | ||
473 | watchdog_nmi_enable(cpu); | ||
474 | |||
475 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); | ||
466 | } | 476 | } |
467 | 477 | ||
468 | static void watchdog_disable(unsigned int cpu) | 478 | static void watchdog_disable(unsigned int cpu) |
469 | { | 479 | { |
470 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); | 480 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
471 | 481 | ||
472 | watchdog_set_prio(SCHED_NORMAL, 0); | 482 | watchdog_set_prio(SCHED_NORMAL, 0); |
473 | hrtimer_cancel(hrtimer); | 483 | /* |
474 | /* disable the perf event */ | 484 | * Disable the perf event first. That prevents that a large delay |
485 | * between disabling the timer and disabling the perf event causes | ||
486 | * the perf NMI to detect a false positive. | ||
487 | */ | ||
475 | watchdog_nmi_disable(cpu); | 488 | watchdog_nmi_disable(cpu); |
489 | hrtimer_cancel(hrtimer); | ||
476 | } | 490 | } |
477 | 491 | ||
478 | static void watchdog_cleanup(unsigned int cpu, bool online) | 492 | static void watchdog_cleanup(unsigned int cpu, bool online) |
@@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu) | |||
499 | __this_cpu_write(soft_lockup_hrtimer_cnt, | 513 | __this_cpu_write(soft_lockup_hrtimer_cnt, |
500 | __this_cpu_read(hrtimer_interrupts)); | 514 | __this_cpu_read(hrtimer_interrupts)); |
501 | __touch_watchdog(); | 515 | __touch_watchdog(); |
502 | |||
503 | /* | ||
504 | * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the | ||
505 | * failure path. Check for failures that can occur asynchronously - | ||
506 | * for example, when CPUs are on-lined - and shut down the hardware | ||
507 | * perf event on each CPU accordingly. | ||
508 | * | ||
509 | * The only non-obvious place this bit can be cleared is through | ||
510 | * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a | ||
511 | * pr_info here would be too noisy as it would result in a message | ||
512 | * every few seconds if the hardlockup was disabled but the softlockup | ||
513 | * enabled. | ||
514 | */ | ||
515 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
516 | watchdog_nmi_disable(cpu); | ||
517 | } | 516 | } |
518 | 517 | ||
519 | static struct smp_hotplug_thread watchdog_threads = { | 518 | static struct smp_hotplug_thread watchdog_threads = { |
@@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
527 | .unpark = watchdog_enable, | 526 | .unpark = watchdog_enable, |
528 | }; | 527 | }; |
529 | 528 | ||
530 | /* | 529 | static void softlockup_update_smpboot_threads(void) |
531 | * park all watchdog threads that are specified in 'watchdog_cpumask' | ||
532 | * | ||
533 | * This function returns an error if kthread_park() of a watchdog thread | ||
534 | * fails. In this situation, the watchdog threads of some CPUs can already | ||
535 | * be parked and the watchdog threads of other CPUs can still be runnable. | ||
536 | * Callers are expected to handle this special condition as appropriate in | ||
537 | * their context. | ||
538 | * | ||
539 | * This function may only be called in a context that is protected against | ||
540 | * races with CPU hotplug - for example, via get_online_cpus(). | ||
541 | */ | ||
542 | static int watchdog_park_threads(void) | ||
543 | { | 530 | { |
544 | int cpu, ret = 0; | 531 | lockdep_assert_held(&watchdog_mutex); |
545 | 532 | ||
546 | atomic_set(&watchdog_park_in_progress, 1); | 533 | if (!softlockup_threads_initialized) |
534 | return; | ||
547 | 535 | ||
548 | for_each_watchdog_cpu(cpu) { | 536 | smpboot_update_cpumask_percpu_thread(&watchdog_threads, |
549 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); | 537 | &watchdog_allowed_mask); |
550 | if (ret) | ||
551 | break; | ||
552 | } | ||
553 | |||
554 | atomic_set(&watchdog_park_in_progress, 0); | ||
555 | |||
556 | return ret; | ||
557 | } | 538 | } |
558 | 539 | ||
559 | /* | 540 | /* Temporarily park all watchdog threads */ |
560 | * unpark all watchdog threads that are specified in 'watchdog_cpumask' | 541 | static void softlockup_park_all_threads(void) |
561 | * | ||
562 | * This function may only be called in a context that is protected against | ||
563 | * races with CPU hotplug - for example, via get_online_cpus(). | ||
564 | */ | ||
565 | static void watchdog_unpark_threads(void) | ||
566 | { | 542 | { |
567 | int cpu; | 543 | cpumask_clear(&watchdog_allowed_mask); |
568 | 544 | softlockup_update_smpboot_threads(); | |
569 | for_each_watchdog_cpu(cpu) | ||
570 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | ||
571 | } | 545 | } |
572 | 546 | ||
573 | static int update_watchdog_all_cpus(void) | 547 | /* Unpark enabled threads */ |
548 | static void softlockup_unpark_threads(void) | ||
574 | { | 549 | { |
575 | int ret; | 550 | cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask); |
576 | 551 | softlockup_update_smpboot_threads(); | |
577 | ret = watchdog_park_threads(); | ||
578 | if (ret) | ||
579 | return ret; | ||
580 | |||
581 | watchdog_unpark_threads(); | ||
582 | |||
583 | return 0; | ||
584 | } | 552 | } |
585 | 553 | ||
586 | static int watchdog_enable_all_cpus(void) | 554 | static void lockup_detector_reconfigure(void) |
587 | { | 555 | { |
588 | int err = 0; | 556 | cpus_read_lock(); |
589 | 557 | watchdog_nmi_stop(); | |
590 | if (!watchdog_running) { | 558 | softlockup_park_all_threads(); |
591 | err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, | 559 | set_sample_period(); |
592 | &watchdog_cpumask); | 560 | lockup_detector_update_enable(); |
593 | if (err) | 561 | if (watchdog_enabled && watchdog_thresh) |
594 | pr_err("Failed to create watchdog threads, disabled\n"); | 562 | softlockup_unpark_threads(); |
595 | else | 563 | watchdog_nmi_start(); |
596 | watchdog_running = 1; | 564 | cpus_read_unlock(); |
597 | } else { | 565 | /* |
598 | /* | 566 | * Must be called outside the cpus locked section to prevent |
599 | * Enable/disable the lockup detectors or | 567 | * recursive locking in the perf code. |
600 | * change the sample period 'on the fly'. | 568 | */ |
601 | */ | 569 | __lockup_detector_cleanup(); |
602 | err = update_watchdog_all_cpus(); | ||
603 | |||
604 | if (err) { | ||
605 | watchdog_disable_all_cpus(); | ||
606 | pr_err("Failed to update lockup detectors, disabled\n"); | ||
607 | } | ||
608 | } | ||
609 | |||
610 | if (err) | ||
611 | watchdog_enabled = 0; | ||
612 | |||
613 | return err; | ||
614 | } | 570 | } |
615 | 571 | ||
616 | static void watchdog_disable_all_cpus(void) | 572 | /* |
573 | * Create the watchdog thread infrastructure and configure the detector(s). | ||
574 | * | ||
575 | * The threads are not unparked as watchdog_allowed_mask is empty. When | ||
576 | * the threads are sucessfully initialized, take the proper locks and | ||
577 | * unpark the threads in the watchdog_cpumask if the watchdog is enabled. | ||
578 | */ | ||
579 | static __init void lockup_detector_setup(void) | ||
617 | { | 580 | { |
618 | if (watchdog_running) { | 581 | int ret; |
619 | watchdog_running = 0; | ||
620 | smpboot_unregister_percpu_thread(&watchdog_threads); | ||
621 | } | ||
622 | } | ||
623 | 582 | ||
624 | #ifdef CONFIG_SYSCTL | 583 | /* |
625 | static int watchdog_update_cpus(void) | 584 | * If sysctl is off and watchdog got disabled on the command line, |
626 | { | 585 | * nothing to do here. |
627 | return smpboot_update_cpumask_percpu_thread( | 586 | */ |
628 | &watchdog_threads, &watchdog_cpumask); | 587 | lockup_detector_update_enable(); |
629 | } | ||
630 | #endif | ||
631 | 588 | ||
632 | #else /* SOFTLOCKUP */ | 589 | if (!IS_ENABLED(CONFIG_SYSCTL) && |
633 | static int watchdog_park_threads(void) | 590 | !(watchdog_enabled && watchdog_thresh)) |
634 | { | 591 | return; |
635 | return 0; | ||
636 | } | ||
637 | 592 | ||
638 | static void watchdog_unpark_threads(void) | 593 | ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads, |
639 | { | 594 | &watchdog_allowed_mask); |
640 | } | 595 | if (ret) { |
596 | pr_err("Failed to initialize soft lockup detector threads\n"); | ||
597 | return; | ||
598 | } | ||
641 | 599 | ||
642 | static int watchdog_enable_all_cpus(void) | 600 | mutex_lock(&watchdog_mutex); |
643 | { | 601 | softlockup_threads_initialized = true; |
644 | return 0; | 602 | lockup_detector_reconfigure(); |
603 | mutex_unlock(&watchdog_mutex); | ||
645 | } | 604 | } |
646 | 605 | ||
647 | static void watchdog_disable_all_cpus(void) | 606 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
607 | static inline int watchdog_park_threads(void) { return 0; } | ||
608 | static inline void watchdog_unpark_threads(void) { } | ||
609 | static inline int watchdog_enable_all_cpus(void) { return 0; } | ||
610 | static inline void watchdog_disable_all_cpus(void) { } | ||
611 | static void lockup_detector_reconfigure(void) | ||
648 | { | 612 | { |
613 | cpus_read_lock(); | ||
614 | watchdog_nmi_stop(); | ||
615 | lockup_detector_update_enable(); | ||
616 | watchdog_nmi_start(); | ||
617 | cpus_read_unlock(); | ||
649 | } | 618 | } |
650 | 619 | static inline void lockup_detector_setup(void) | |
651 | #ifdef CONFIG_SYSCTL | ||
652 | static int watchdog_update_cpus(void) | ||
653 | { | 620 | { |
654 | return 0; | 621 | lockup_detector_reconfigure(); |
655 | } | 622 | } |
656 | #endif | 623 | #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
657 | 624 | ||
658 | static void set_sample_period(void) | 625 | static void __lockup_detector_cleanup(void) |
659 | { | 626 | { |
627 | lockdep_assert_held(&watchdog_mutex); | ||
628 | hardlockup_detector_perf_cleanup(); | ||
660 | } | 629 | } |
661 | #endif /* SOFTLOCKUP */ | ||
662 | 630 | ||
663 | /* | 631 | /** |
664 | * Suspend the hard and soft lockup detector by parking the watchdog threads. | 632 | * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes |
633 | * | ||
634 | * Caller must not hold the cpu hotplug rwsem. | ||
665 | */ | 635 | */ |
666 | int lockup_detector_suspend(void) | 636 | void lockup_detector_cleanup(void) |
667 | { | 637 | { |
668 | int ret = 0; | 638 | mutex_lock(&watchdog_mutex); |
669 | 639 | __lockup_detector_cleanup(); | |
670 | get_online_cpus(); | 640 | mutex_unlock(&watchdog_mutex); |
671 | mutex_lock(&watchdog_proc_mutex); | ||
672 | /* | ||
673 | * Multiple suspend requests can be active in parallel (counted by | ||
674 | * the 'watchdog_suspended' variable). If the watchdog threads are | ||
675 | * running, the first caller takes care that they will be parked. | ||
676 | * The state of 'watchdog_running' cannot change while a suspend | ||
677 | * request is active (see related code in 'proc' handlers). | ||
678 | */ | ||
679 | if (watchdog_running && !watchdog_suspended) | ||
680 | ret = watchdog_park_threads(); | ||
681 | |||
682 | if (ret == 0) | ||
683 | watchdog_suspended++; | ||
684 | else { | ||
685 | watchdog_disable_all_cpus(); | ||
686 | pr_err("Failed to suspend lockup detectors, disabled\n"); | ||
687 | watchdog_enabled = 0; | ||
688 | } | ||
689 | |||
690 | watchdog_nmi_reconfigure(); | ||
691 | |||
692 | mutex_unlock(&watchdog_proc_mutex); | ||
693 | |||
694 | return ret; | ||
695 | } | 641 | } |
696 | 642 | ||
697 | /* | 643 | /** |
698 | * Resume the hard and soft lockup detector by unparking the watchdog threads. | 644 | * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) |
645 | * | ||
646 | * Special interface for parisc. It prevents lockup detector warnings from | ||
647 | * the default pm_poweroff() function which busy loops forever. | ||
699 | */ | 648 | */ |
700 | void lockup_detector_resume(void) | 649 | void lockup_detector_soft_poweroff(void) |
701 | { | 650 | { |
702 | mutex_lock(&watchdog_proc_mutex); | 651 | watchdog_enabled = 0; |
703 | |||
704 | watchdog_suspended--; | ||
705 | /* | ||
706 | * The watchdog threads are unparked if they were previously running | ||
707 | * and if there is no more active suspend request. | ||
708 | */ | ||
709 | if (watchdog_running && !watchdog_suspended) | ||
710 | watchdog_unpark_threads(); | ||
711 | |||
712 | watchdog_nmi_reconfigure(); | ||
713 | |||
714 | mutex_unlock(&watchdog_proc_mutex); | ||
715 | put_online_cpus(); | ||
716 | } | 652 | } |
717 | 653 | ||
718 | #ifdef CONFIG_SYSCTL | 654 | #ifdef CONFIG_SYSCTL |
719 | 655 | ||
720 | /* | 656 | /* Propagate any changes to the watchdog threads */ |
721 | * Update the run state of the lockup detectors. | 657 | static void proc_watchdog_update(void) |
722 | */ | ||
723 | static int proc_watchdog_update(void) | ||
724 | { | 658 | { |
725 | int err = 0; | 659 | /* Remove impossible cpus to keep sysctl output clean. */ |
726 | 660 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); | |
727 | /* | 661 | lockup_detector_reconfigure(); |
728 | * Watchdog threads won't be started if they are already active. | ||
729 | * The 'watchdog_running' variable in watchdog_*_all_cpus() takes | ||
730 | * care of this. If those threads are already active, the sample | ||
731 | * period will be updated and the lockup detectors will be enabled | ||
732 | * or disabled 'on the fly'. | ||
733 | */ | ||
734 | if (watchdog_enabled && watchdog_thresh) | ||
735 | err = watchdog_enable_all_cpus(); | ||
736 | else | ||
737 | watchdog_disable_all_cpus(); | ||
738 | |||
739 | watchdog_nmi_reconfigure(); | ||
740 | |||
741 | return err; | ||
742 | |||
743 | } | 662 | } |
744 | 663 | ||
745 | /* | 664 | /* |
746 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter | 665 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
747 | * | 666 | * |
748 | * caller | table->data points to | 'which' contains the flag(s) | 667 | * caller | table->data points to | 'which' |
749 | * -------------------|-----------------------|----------------------------- | 668 | * -------------------|----------------------------|-------------------------- |
750 | * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed | 669 | * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED | |
751 | * | | with SOFT_WATCHDOG_ENABLED | 670 | * | | SOFT_WATCHDOG_ENABLED |
752 | * -------------------|-----------------------|----------------------------- | 671 | * -------------------|----------------------------|-------------------------- |
753 | * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED | 672 | * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
754 | * -------------------|-----------------------|----------------------------- | 673 | * -------------------|----------------------------|-------------------------- |
755 | * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED | 674 | * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED |
756 | */ | 675 | */ |
757 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, | 676 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
758 | void __user *buffer, size_t *lenp, loff_t *ppos) | 677 | void __user *buffer, size_t *lenp, loff_t *ppos) |
759 | { | 678 | { |
760 | int err, old, new; | 679 | int err, old, *param = table->data; |
761 | int *watchdog_param = (int *)table->data; | ||
762 | 680 | ||
763 | get_online_cpus(); | 681 | mutex_lock(&watchdog_mutex); |
764 | mutex_lock(&watchdog_proc_mutex); | ||
765 | 682 | ||
766 | if (watchdog_suspended) { | ||
767 | /* no parameter changes allowed while watchdog is suspended */ | ||
768 | err = -EAGAIN; | ||
769 | goto out; | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | * If the parameter is being read return the state of the corresponding | ||
774 | * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the | ||
775 | * run state of the lockup detectors. | ||
776 | */ | ||
777 | if (!write) { | 683 | if (!write) { |
778 | *watchdog_param = (watchdog_enabled & which) != 0; | 684 | /* |
685 | * On read synchronize the userspace interface. This is a | ||
686 | * racy snapshot. | ||
687 | */ | ||
688 | *param = (watchdog_enabled & which) != 0; | ||
779 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 689 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
780 | } else { | 690 | } else { |
691 | old = READ_ONCE(*param); | ||
781 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 692 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
782 | if (err) | 693 | if (!err && old != READ_ONCE(*param)) |
783 | goto out; | 694 | proc_watchdog_update(); |
784 | |||
785 | /* | ||
786 | * There is a race window between fetching the current value | ||
787 | * from 'watchdog_enabled' and storing the new value. During | ||
788 | * this race window, watchdog_nmi_enable() can sneak in and | ||
789 | * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. | ||
790 | * The 'cmpxchg' detects this race and the loop retries. | ||
791 | */ | ||
792 | do { | ||
793 | old = watchdog_enabled; | ||
794 | /* | ||
795 | * If the parameter value is not zero set the | ||
796 | * corresponding bit(s), else clear it(them). | ||
797 | */ | ||
798 | if (*watchdog_param) | ||
799 | new = old | which; | ||
800 | else | ||
801 | new = old & ~which; | ||
802 | } while (cmpxchg(&watchdog_enabled, old, new) != old); | ||
803 | |||
804 | /* | ||
805 | * Update the run state of the lockup detectors. There is _no_ | ||
806 | * need to check the value returned by proc_watchdog_update() | ||
807 | * and to restore the previous value of 'watchdog_enabled' as | ||
808 | * both lockup detectors are disabled if proc_watchdog_update() | ||
809 | * returns an error. | ||
810 | */ | ||
811 | if (old == new) | ||
812 | goto out; | ||
813 | |||
814 | err = proc_watchdog_update(); | ||
815 | } | 695 | } |
816 | out: | 696 | mutex_unlock(&watchdog_mutex); |
817 | mutex_unlock(&watchdog_proc_mutex); | ||
818 | put_online_cpus(); | ||
819 | return err; | 697 | return err; |
820 | } | 698 | } |
821 | 699 | ||
@@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write, | |||
835 | int proc_nmi_watchdog(struct ctl_table *table, int write, | 713 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
836 | void __user *buffer, size_t *lenp, loff_t *ppos) | 714 | void __user *buffer, size_t *lenp, loff_t *ppos) |
837 | { | 715 | { |
716 | if (!nmi_watchdog_available && write) | ||
717 | return -ENOTSUPP; | ||
838 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED, | 718 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED, |
839 | table, write, buffer, lenp, ppos); | 719 | table, write, buffer, lenp, ppos); |
840 | } | 720 | } |
@@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write, | |||
855 | int proc_watchdog_thresh(struct ctl_table *table, int write, | 735 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
856 | void __user *buffer, size_t *lenp, loff_t *ppos) | 736 | void __user *buffer, size_t *lenp, loff_t *ppos) |
857 | { | 737 | { |
858 | int err, old, new; | 738 | int err, old; |
859 | |||
860 | get_online_cpus(); | ||
861 | mutex_lock(&watchdog_proc_mutex); | ||
862 | 739 | ||
863 | if (watchdog_suspended) { | 740 | mutex_lock(&watchdog_mutex); |
864 | /* no parameter changes allowed while watchdog is suspended */ | ||
865 | err = -EAGAIN; | ||
866 | goto out; | ||
867 | } | ||
868 | 741 | ||
869 | old = ACCESS_ONCE(watchdog_thresh); | 742 | old = READ_ONCE(watchdog_thresh); |
870 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 743 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
871 | 744 | ||
872 | if (err || !write) | 745 | if (!err && write && old != READ_ONCE(watchdog_thresh)) |
873 | goto out; | 746 | proc_watchdog_update(); |
874 | |||
875 | /* | ||
876 | * Update the sample period. Restore on failure. | ||
877 | */ | ||
878 | new = ACCESS_ONCE(watchdog_thresh); | ||
879 | if (old == new) | ||
880 | goto out; | ||
881 | 747 | ||
882 | set_sample_period(); | 748 | mutex_unlock(&watchdog_mutex); |
883 | err = proc_watchdog_update(); | ||
884 | if (err) { | ||
885 | watchdog_thresh = old; | ||
886 | set_sample_period(); | ||
887 | } | ||
888 | out: | ||
889 | mutex_unlock(&watchdog_proc_mutex); | ||
890 | put_online_cpus(); | ||
891 | return err; | 749 | return err; |
892 | } | 750 | } |
893 | 751 | ||
@@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, | |||
902 | { | 760 | { |
903 | int err; | 761 | int err; |
904 | 762 | ||
905 | get_online_cpus(); | 763 | mutex_lock(&watchdog_mutex); |
906 | mutex_lock(&watchdog_proc_mutex); | ||
907 | |||
908 | if (watchdog_suspended) { | ||
909 | /* no parameter changes allowed while watchdog is suspended */ | ||
910 | err = -EAGAIN; | ||
911 | goto out; | ||
912 | } | ||
913 | 764 | ||
914 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); | 765 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); |
915 | if (!err && write) { | 766 | if (!err && write) |
916 | /* Remove impossible cpus to keep sysctl output cleaner. */ | 767 | proc_watchdog_update(); |
917 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, | ||
918 | cpu_possible_mask); | ||
919 | |||
920 | if (watchdog_running) { | ||
921 | /* | ||
922 | * Failure would be due to being unable to allocate | ||
923 | * a temporary cpumask, so we are likely not in a | ||
924 | * position to do much else to make things better. | ||
925 | */ | ||
926 | if (watchdog_update_cpus() != 0) | ||
927 | pr_err("cpumask update failed\n"); | ||
928 | } | ||
929 | 768 | ||
930 | watchdog_nmi_reconfigure(); | 769 | mutex_unlock(&watchdog_mutex); |
931 | } | ||
932 | out: | ||
933 | mutex_unlock(&watchdog_proc_mutex); | ||
934 | put_online_cpus(); | ||
935 | return err; | 770 | return err; |
936 | } | 771 | } |
937 | |||
938 | #endif /* CONFIG_SYSCTL */ | 772 | #endif /* CONFIG_SYSCTL */ |
939 | 773 | ||
940 | void __init lockup_detector_init(void) | 774 | void __init lockup_detector_init(void) |
941 | { | 775 | { |
942 | set_sample_period(); | ||
943 | |||
944 | #ifdef CONFIG_NO_HZ_FULL | 776 | #ifdef CONFIG_NO_HZ_FULL |
945 | if (tick_nohz_full_enabled()) { | 777 | if (tick_nohz_full_enabled()) { |
946 | pr_info("Disabling watchdog on nohz_full cores by default\n"); | 778 | pr_info("Disabling watchdog on nohz_full cores by default\n"); |
@@ -951,6 +783,7 @@ void __init lockup_detector_init(void) | |||
951 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); | 783 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); |
952 | #endif | 784 | #endif |
953 | 785 | ||
954 | if (watchdog_enabled) | 786 | if (!watchdog_nmi_probe()) |
955 | watchdog_enable_all_cpus(); | 787 | nmi_watchdog_available = true; |
788 | lockup_detector_setup(); | ||
956 | } | 789 | } |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 3a09ea1b1d3d..71a62ceacdc8 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
@@ -21,8 +21,10 @@ | |||
21 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); | 21 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
22 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | 22 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
24 | static struct cpumask dead_events_mask; | ||
24 | 25 | ||
25 | static unsigned long hardlockup_allcpu_dumped; | 26 | static unsigned long hardlockup_allcpu_dumped; |
27 | static unsigned int watchdog_cpus; | ||
26 | 28 | ||
27 | void arch_touch_nmi_watchdog(void) | 29 | void arch_touch_nmi_watchdog(void) |
28 | { | 30 | { |
@@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = { | |||
103 | 105 | ||
104 | /* Callback function for perf event subsystem */ | 106 | /* Callback function for perf event subsystem */ |
105 | static void watchdog_overflow_callback(struct perf_event *event, | 107 | static void watchdog_overflow_callback(struct perf_event *event, |
106 | struct perf_sample_data *data, | 108 | struct perf_sample_data *data, |
107 | struct pt_regs *regs) | 109 | struct pt_regs *regs) |
108 | { | 110 | { |
109 | /* Ensure the watchdog never gets throttled */ | 111 | /* Ensure the watchdog never gets throttled */ |
110 | event->hw.interrupts = 0; | 112 | event->hw.interrupts = 0; |
111 | 113 | ||
112 | if (atomic_read(&watchdog_park_in_progress) != 0) | ||
113 | return; | ||
114 | |||
115 | if (__this_cpu_read(watchdog_nmi_touch) == true) { | 114 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
116 | __this_cpu_write(watchdog_nmi_touch, false); | 115 | __this_cpu_write(watchdog_nmi_touch, false); |
117 | return; | 116 | return; |
@@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event, | |||
160 | return; | 159 | return; |
161 | } | 160 | } |
162 | 161 | ||
163 | /* | 162 | static int hardlockup_detector_event_create(void) |
164 | * People like the simple clean cpu node info on boot. | ||
165 | * Reduce the watchdog noise by only printing messages | ||
166 | * that are different from what cpu0 displayed. | ||
167 | */ | ||
168 | static unsigned long firstcpu_err; | ||
169 | static atomic_t watchdog_cpus; | ||
170 | |||
171 | int watchdog_nmi_enable(unsigned int cpu) | ||
172 | { | 163 | { |
164 | unsigned int cpu = smp_processor_id(); | ||
173 | struct perf_event_attr *wd_attr; | 165 | struct perf_event_attr *wd_attr; |
174 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 166 | struct perf_event *evt; |
175 | int firstcpu = 0; | ||
176 | |||
177 | /* nothing to do if the hard lockup detector is disabled */ | ||
178 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
179 | goto out; | ||
180 | |||
181 | /* is it already setup and enabled? */ | ||
182 | if (event && event->state > PERF_EVENT_STATE_OFF) | ||
183 | goto out; | ||
184 | |||
185 | /* it is setup but not enabled */ | ||
186 | if (event != NULL) | ||
187 | goto out_enable; | ||
188 | |||
189 | if (atomic_inc_return(&watchdog_cpus) == 1) | ||
190 | firstcpu = 1; | ||
191 | 167 | ||
192 | wd_attr = &wd_hw_attr; | 168 | wd_attr = &wd_hw_attr; |
193 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); | 169 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
194 | 170 | ||
195 | /* Try to register using hardware perf events */ | 171 | /* Try to register using hardware perf events */ |
196 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | 172 | evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL, |
173 | watchdog_overflow_callback, NULL); | ||
174 | if (IS_ERR(evt)) { | ||
175 | pr_info("Perf event create on CPU %d failed with %ld\n", cpu, | ||
176 | PTR_ERR(evt)); | ||
177 | return PTR_ERR(evt); | ||
178 | } | ||
179 | this_cpu_write(watchdog_ev, evt); | ||
180 | return 0; | ||
181 | } | ||
197 | 182 | ||
198 | /* save the first cpu's error for future comparision */ | 183 | /** |
199 | if (firstcpu && IS_ERR(event)) | 184 | * hardlockup_detector_perf_enable - Enable the local event |
200 | firstcpu_err = PTR_ERR(event); | 185 | */ |
186 | void hardlockup_detector_perf_enable(void) | ||
187 | { | ||
188 | if (hardlockup_detector_event_create()) | ||
189 | return; | ||
201 | 190 | ||
202 | if (!IS_ERR(event)) { | 191 | if (!watchdog_cpus++) |
203 | /* only print for the first cpu initialized */ | 192 | pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); |
204 | if (firstcpu || firstcpu_err) | ||
205 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
206 | goto out_save; | ||
207 | } | ||
208 | 193 | ||
209 | /* | 194 | perf_event_enable(this_cpu_read(watchdog_ev)); |
210 | * Disable the hard lockup detector if _any_ CPU fails to set up | ||
211 | * set up the hardware perf event. The watchdog() function checks | ||
212 | * the NMI_WATCHDOG_ENABLED bit periodically. | ||
213 | * | ||
214 | * The barriers are for syncing up watchdog_enabled across all the | ||
215 | * cpus, as clear_bit() does not use barriers. | ||
216 | */ | ||
217 | smp_mb__before_atomic(); | ||
218 | clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); | ||
219 | smp_mb__after_atomic(); | ||
220 | |||
221 | /* skip displaying the same error again */ | ||
222 | if (!firstcpu && (PTR_ERR(event) == firstcpu_err)) | ||
223 | return PTR_ERR(event); | ||
224 | |||
225 | /* vary the KERN level based on the returned errno */ | ||
226 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
227 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
228 | else if (PTR_ERR(event) == -ENOENT) | ||
229 | pr_warn("disabled (cpu%i): hardware events not enabled\n", | ||
230 | cpu); | ||
231 | else | ||
232 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", | ||
233 | cpu, PTR_ERR(event)); | ||
234 | |||
235 | pr_info("Shutting down hard lockup detector on all cpus\n"); | ||
236 | |||
237 | return PTR_ERR(event); | ||
238 | |||
239 | /* success path */ | ||
240 | out_save: | ||
241 | per_cpu(watchdog_ev, cpu) = event; | ||
242 | out_enable: | ||
243 | perf_event_enable(per_cpu(watchdog_ev, cpu)); | ||
244 | out: | ||
245 | return 0; | ||
246 | } | 195 | } |
247 | 196 | ||
248 | void watchdog_nmi_disable(unsigned int cpu) | 197 | /** |
198 | * hardlockup_detector_perf_disable - Disable the local event | ||
199 | */ | ||
200 | void hardlockup_detector_perf_disable(void) | ||
249 | { | 201 | { |
250 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 202 | struct perf_event *event = this_cpu_read(watchdog_ev); |
251 | 203 | ||
252 | if (event) { | 204 | if (event) { |
253 | perf_event_disable(event); | 205 | perf_event_disable(event); |
206 | cpumask_set_cpu(smp_processor_id(), &dead_events_mask); | ||
207 | watchdog_cpus--; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them | ||
213 | * | ||
214 | * Called from lockup_detector_cleanup(). Serialized by the caller. | ||
215 | */ | ||
216 | void hardlockup_detector_perf_cleanup(void) | ||
217 | { | ||
218 | int cpu; | ||
219 | |||
220 | for_each_cpu(cpu, &dead_events_mask) { | ||
221 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
222 | |||
223 | /* | ||
224 | * Required because for_each_cpu() reports unconditionally | ||
225 | * CPU0 as set on UP kernels. Sigh. | ||
226 | */ | ||
227 | if (event) | ||
228 | perf_event_release_kernel(event); | ||
254 | per_cpu(watchdog_ev, cpu) = NULL; | 229 | per_cpu(watchdog_ev, cpu) = NULL; |
230 | } | ||
231 | cpumask_clear(&dead_events_mask); | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * hardlockup_detector_perf_stop - Globally stop watchdog events | ||
236 | * | ||
237 | * Special interface for x86 to handle the perf HT bug. | ||
238 | */ | ||
239 | void __init hardlockup_detector_perf_stop(void) | ||
240 | { | ||
241 | int cpu; | ||
242 | |||
243 | lockdep_assert_cpus_held(); | ||
244 | |||
245 | for_each_online_cpu(cpu) { | ||
246 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
247 | |||
248 | if (event) | ||
249 | perf_event_disable(event); | ||
250 | } | ||
251 | } | ||
255 | 252 | ||
256 | /* should be in cleanup, but blocks oprofile */ | 253 | /** |
257 | perf_event_release_kernel(event); | 254 | * hardlockup_detector_perf_restart - Globally restart watchdog events |
255 | * | ||
256 | * Special interface for x86 to handle the perf HT bug. | ||
257 | */ | ||
258 | void __init hardlockup_detector_perf_restart(void) | ||
259 | { | ||
260 | int cpu; | ||
261 | |||
262 | lockdep_assert_cpus_held(); | ||
263 | |||
264 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
265 | return; | ||
266 | |||
267 | for_each_online_cpu(cpu) { | ||
268 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
269 | |||
270 | if (event) | ||
271 | perf_event_enable(event); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * hardlockup_detector_perf_init - Probe whether NMI event is available at all | ||
277 | */ | ||
278 | int __init hardlockup_detector_perf_init(void) | ||
279 | { | ||
280 | int ret = hardlockup_detector_event_create(); | ||
258 | 281 | ||
259 | /* watchdog_nmi_enable() expects this to be zero initially. */ | 282 | if (ret) { |
260 | if (atomic_dec_and_test(&watchdog_cpus)) | 283 | pr_info("Perf NMI watchdog permanently disabled\n"); |
261 | firstcpu_err = 0; | 284 | } else { |
285 | perf_event_release_kernel(this_cpu_read(watchdog_ev)); | ||
286 | this_cpu_write(watchdog_ev, NULL); | ||
262 | } | 287 | } |
288 | return ret; | ||
263 | } | 289 | } |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b19c491cbc4e..2689b7c50c52 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -219,7 +219,8 @@ config FRAME_WARN | |||
219 | range 0 8192 | 219 | range 0 8192 |
220 | default 0 if KASAN | 220 | default 0 if KASAN |
221 | default 2048 if GCC_PLUGIN_LATENT_ENTROPY | 221 | default 2048 if GCC_PLUGIN_LATENT_ENTROPY |
222 | default 1024 if !64BIT | 222 | default 1280 if (!64BIT && PARISC) |
223 | default 1024 if (!64BIT && !PARISC) | ||
223 | default 2048 if 64BIT | 224 | default 2048 if 64BIT |
224 | help | 225 | help |
225 | Tell gcc to warn at build time for stack frames larger than this. | 226 | Tell gcc to warn at build time for stack frames larger than this. |
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(idr_get_next_ext); | |||
146 | * idr_alloc() and idr_remove() (as long as the ID being removed is not | 146 | * idr_alloc() and idr_remove() (as long as the ID being removed is not |
147 | * the one being replaced!). | 147 | * the one being replaced!). |
148 | * | 148 | * |
149 | * Returns: 0 on success. %-ENOENT indicates that @id was not found. | 149 | * Returns: the old value on success. %-ENOENT indicates that @id was not |
150 | * %-EINVAL indicates that @id or @ptr were not valid. | 150 | * found. %-EINVAL indicates that @id or @ptr were not valid. |
151 | */ | 151 | */ |
152 | void *idr_replace(struct idr *idr, void *ptr, int id) | 152 | void *idr_replace(struct idr *idr, void *ptr, int id) |
153 | { | 153 | { |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 52c8dd6d8e82..1c1c06ddc20a 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -687,8 +687,10 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache); | |||
687 | 687 | ||
688 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) | 688 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
689 | { | 689 | { |
690 | size_t v = n + offset; | 690 | struct page *head = compound_head(page); |
691 | if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) | 691 | size_t v = n + offset + page_address(page) - page_address(head); |
692 | |||
693 | if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) | ||
692 | return true; | 694 | return true; |
693 | WARN_ON(1); | 695 | WARN_ON(1); |
694 | return false; | 696 | return false; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index e590523ea476..f237a09a5862 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -294,6 +294,26 @@ static void cleanup_uevent_env(struct subprocess_info *info) | |||
294 | } | 294 | } |
295 | #endif | 295 | #endif |
296 | 296 | ||
297 | static void zap_modalias_env(struct kobj_uevent_env *env) | ||
298 | { | ||
299 | static const char modalias_prefix[] = "MODALIAS="; | ||
300 | int i; | ||
301 | |||
302 | for (i = 0; i < env->envp_idx;) { | ||
303 | if (strncmp(env->envp[i], modalias_prefix, | ||
304 | sizeof(modalias_prefix) - 1)) { | ||
305 | i++; | ||
306 | continue; | ||
307 | } | ||
308 | |||
309 | if (i != env->envp_idx - 1) | ||
310 | memmove(&env->envp[i], &env->envp[i + 1], | ||
311 | sizeof(env->envp[i]) * env->envp_idx - 1); | ||
312 | |||
313 | env->envp_idx--; | ||
314 | } | ||
315 | } | ||
316 | |||
297 | /** | 317 | /** |
298 | * kobject_uevent_env - send an uevent with environmental data | 318 | * kobject_uevent_env - send an uevent with environmental data |
299 | * | 319 | * |
@@ -409,16 +429,29 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
409 | } | 429 | } |
410 | } | 430 | } |
411 | 431 | ||
412 | /* | 432 | switch (action) { |
413 | * Mark "add" and "remove" events in the object to ensure proper | 433 | case KOBJ_ADD: |
414 | * events to userspace during automatic cleanup. If the object did | 434 | /* |
415 | * send an "add" event, "remove" will automatically generated by | 435 | * Mark "add" event so we can make sure we deliver "remove" |
416 | * the core, if not already done by the caller. | 436 | * event to userspace during automatic cleanup. If |
417 | */ | 437 | * the object did send an "add" event, "remove" will |
418 | if (action == KOBJ_ADD) | 438 | * automatically generated by the core, if not already done |
439 | * by the caller. | ||
440 | */ | ||
419 | kobj->state_add_uevent_sent = 1; | 441 | kobj->state_add_uevent_sent = 1; |
420 | else if (action == KOBJ_REMOVE) | 442 | break; |
443 | |||
444 | case KOBJ_REMOVE: | ||
421 | kobj->state_remove_uevent_sent = 1; | 445 | kobj->state_remove_uevent_sent = 1; |
446 | break; | ||
447 | |||
448 | case KOBJ_UNBIND: | ||
449 | zap_modalias_env(env); | ||
450 | break; | ||
451 | |||
452 | default: | ||
453 | break; | ||
454 | } | ||
422 | 455 | ||
423 | mutex_lock(&uevent_sock_mutex); | 456 | mutex_lock(&uevent_sock_mutex); |
424 | /* we will send an event, so request a new sequence number */ | 457 | /* we will send an event, so request a new sequence number */ |
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index bd3574312b82..141734d255e4 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
@@ -85,8 +85,8 @@ static FORCE_INLINE int LZ4_decompress_generic( | |||
85 | const BYTE * const lowLimit = lowPrefix - dictSize; | 85 | const BYTE * const lowLimit = lowPrefix - dictSize; |
86 | 86 | ||
87 | const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; | 87 | const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; |
88 | const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; | 88 | static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; |
89 | const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; | 89 | static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; |
90 | 90 | ||
91 | const int safeDecode = (endOnInput == endOnInputSize); | 91 | const int safeDecode = (endOnInput == endOnInputSize); |
92 | const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); | 92 | const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 08f8043cac61..d01f47135239 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
49 | if (rs->missed) { | 49 | if (rs->missed) { |
50 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { | 50 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { |
51 | pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); | 51 | printk_deferred(KERN_WARNING |
52 | "%s: %d callbacks suppressed\n", | ||
53 | func, rs->missed); | ||
52 | rs->missed = 0; | 54 | rs->missed = 0; |
53 | } | 55 | } |
54 | } | 56 | } |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 707ca5d677c6..ddd7dde87c3c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -735,9 +735,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |||
735 | * rhashtable_walk_start - Start a hash table walk | 735 | * rhashtable_walk_start - Start a hash table walk |
736 | * @iter: Hash table iterator | 736 | * @iter: Hash table iterator |
737 | * | 737 | * |
738 | * Start a hash table walk. Note that we take the RCU lock in all | 738 | * Start a hash table walk at the current iterator position. Note that we take |
739 | * cases including when we return an error. So you must always call | 739 | * the RCU lock in all cases including when we return an error. So you must |
740 | * rhashtable_walk_stop to clean up. | 740 | * always call rhashtable_walk_stop to clean up. |
741 | * | 741 | * |
742 | * Returns zero if successful. | 742 | * Returns zero if successful. |
743 | * | 743 | * |
@@ -846,7 +846,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next); | |||
846 | * rhashtable_walk_stop - Finish a hash table walk | 846 | * rhashtable_walk_stop - Finish a hash table walk |
847 | * @iter: Hash table iterator | 847 | * @iter: Hash table iterator |
848 | * | 848 | * |
849 | * Finish a hash table walk. | 849 | * Finish a hash table walk. Does not reset the iterator to the start of the |
850 | * hash table. | ||
850 | */ | 851 | */ |
851 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | 852 | void rhashtable_walk_stop(struct rhashtable_iter *iter) |
852 | __releases(RCU) | 853 | __releases(RCU) |
diff --git a/mm/compaction.c b/mm/compaction.c index fb548e4c7bd4..03d31a875341 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1999,17 +1999,14 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) | |||
1999 | if (pgdat->kcompactd_max_order < order) | 1999 | if (pgdat->kcompactd_max_order < order) |
2000 | pgdat->kcompactd_max_order = order; | 2000 | pgdat->kcompactd_max_order = order; |
2001 | 2001 | ||
2002 | /* | ||
2003 | * Pairs with implicit barrier in wait_event_freezable() | ||
2004 | * such that wakeups are not missed in the lockless | ||
2005 | * waitqueue_active() call. | ||
2006 | */ | ||
2007 | smp_acquire__after_ctrl_dep(); | ||
2008 | |||
2009 | if (pgdat->kcompactd_classzone_idx > classzone_idx) | 2002 | if (pgdat->kcompactd_classzone_idx > classzone_idx) |
2010 | pgdat->kcompactd_classzone_idx = classzone_idx; | 2003 | pgdat->kcompactd_classzone_idx = classzone_idx; |
2011 | 2004 | ||
2012 | if (!waitqueue_active(&pgdat->kcompactd_wait)) | 2005 | /* |
2006 | * Pairs with implicit barrier in wait_event_freezable() | ||
2007 | * such that wakeups are not missed. | ||
2008 | */ | ||
2009 | if (!wq_has_sleeper(&pgdat->kcompactd_wait)) | ||
2013 | return; | 2010 | return; |
2014 | 2011 | ||
2015 | if (!kcompactd_node_suitable(pgdat)) | 2012 | if (!kcompactd_node_suitable(pgdat)) |
diff --git a/mm/filemap.c b/mm/filemap.c index 870971e20967..594d73fef8b4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -620,6 +620,14 @@ int file_check_and_advance_wb_err(struct file *file) | |||
620 | trace_file_check_and_advance_wb_err(file, old); | 620 | trace_file_check_and_advance_wb_err(file, old); |
621 | spin_unlock(&file->f_lock); | 621 | spin_unlock(&file->f_lock); |
622 | } | 622 | } |
623 | |||
624 | /* | ||
625 | * We're mostly using this function as a drop in replacement for | ||
626 | * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect | ||
627 | * that the legacy code would have had on these flags. | ||
628 | */ | ||
629 | clear_bit(AS_EIO, &mapping->flags); | ||
630 | clear_bit(AS_ENOSPC, &mapping->flags); | ||
623 | return err; | 631 | return err; |
624 | } | 632 | } |
625 | EXPORT_SYMBOL(file_check_and_advance_wb_err); | 633 | EXPORT_SYMBOL(file_check_and_advance_wb_err); |
@@ -2926,9 +2934,15 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) | |||
2926 | * we're writing. Either one is a pretty crazy thing to do, | 2934 | * we're writing. Either one is a pretty crazy thing to do, |
2927 | * so we don't support it 100%. If this invalidation | 2935 | * so we don't support it 100%. If this invalidation |
2928 | * fails, tough, the write still worked... | 2936 | * fails, tough, the write still worked... |
2937 | * | ||
2938 | * Most of the time we do not need this since dio_complete() will do | ||
2939 | * the invalidation for us. However there are some file systems that | ||
2940 | * do not end up with dio_complete() being called, so let's not break | ||
2941 | * them by removing it completely | ||
2929 | */ | 2942 | */ |
2930 | invalidate_inode_pages2_range(mapping, | 2943 | if (mapping->nrpages) |
2931 | pos >> PAGE_SHIFT, end); | 2944 | invalidate_inode_pages2_range(mapping, |
2945 | pos >> PAGE_SHIFT, end); | ||
2932 | 2946 | ||
2933 | if (written > 0) { | 2947 | if (written > 0) { |
2934 | pos += written; | 2948 | pos += written; |
@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item, | |||
1990 | */ | 1990 | */ |
1991 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | 1991 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
1992 | { | 1992 | { |
1993 | struct mm_struct *mm = rmap_item->mm; | ||
1993 | struct rmap_item *tree_rmap_item; | 1994 | struct rmap_item *tree_rmap_item; |
1994 | struct page *tree_page = NULL; | 1995 | struct page *tree_page = NULL; |
1995 | struct stable_node *stable_node; | 1996 | struct stable_node *stable_node; |
@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |||
2062 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | 2063 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { |
2063 | struct vm_area_struct *vma; | 2064 | struct vm_area_struct *vma; |
2064 | 2065 | ||
2065 | vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); | 2066 | down_read(&mm->mmap_sem); |
2067 | vma = find_mergeable_vma(mm, rmap_item->address); | ||
2066 | err = try_to_merge_one_page(vma, page, | 2068 | err = try_to_merge_one_page(vma, page, |
2067 | ZERO_PAGE(rmap_item->address)); | 2069 | ZERO_PAGE(rmap_item->address)); |
2070 | up_read(&mm->mmap_sem); | ||
2068 | /* | 2071 | /* |
2069 | * In case of failure, the page was not really empty, so we | 2072 | * In case of failure, the page was not really empty, so we |
2070 | * need to continue. Otherwise we're done. | 2073 | * need to continue. Otherwise we're done. |
diff --git a/mm/list_lru.c b/mm/list_lru.c index 7a40fa2be858..f141f0c80ff3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
@@ -325,12 +325,12 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru) | |||
325 | { | 325 | { |
326 | int size = memcg_nr_cache_ids; | 326 | int size = memcg_nr_cache_ids; |
327 | 327 | ||
328 | nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL); | 328 | nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL); |
329 | if (!nlru->memcg_lrus) | 329 | if (!nlru->memcg_lrus) |
330 | return -ENOMEM; | 330 | return -ENOMEM; |
331 | 331 | ||
332 | if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { | 332 | if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { |
333 | kfree(nlru->memcg_lrus); | 333 | kvfree(nlru->memcg_lrus); |
334 | return -ENOMEM; | 334 | return -ENOMEM; |
335 | } | 335 | } |
336 | 336 | ||
@@ -340,7 +340,7 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru) | |||
340 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) | 340 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
341 | { | 341 | { |
342 | __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); | 342 | __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); |
343 | kfree(nlru->memcg_lrus); | 343 | kvfree(nlru->memcg_lrus); |
344 | } | 344 | } |
345 | 345 | ||
346 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, | 346 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
@@ -351,12 +351,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru, | |||
351 | BUG_ON(old_size > new_size); | 351 | BUG_ON(old_size > new_size); |
352 | 352 | ||
353 | old = nlru->memcg_lrus; | 353 | old = nlru->memcg_lrus; |
354 | new = kmalloc(new_size * sizeof(void *), GFP_KERNEL); | 354 | new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL); |
355 | if (!new) | 355 | if (!new) |
356 | return -ENOMEM; | 356 | return -ENOMEM; |
357 | 357 | ||
358 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { | 358 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
359 | kfree(new); | 359 | kvfree(new); |
360 | return -ENOMEM; | 360 | return -ENOMEM; |
361 | } | 361 | } |
362 | 362 | ||
@@ -373,7 +373,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru, | |||
373 | nlru->memcg_lrus = new; | 373 | nlru->memcg_lrus = new; |
374 | spin_unlock_irq(&nlru->lock); | 374 | spin_unlock_irq(&nlru->lock); |
375 | 375 | ||
376 | kfree(old); | 376 | kvfree(old); |
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
diff --git a/mm/madvise.c b/mm/madvise.c index 21261ff0466f..25bade36e9ca 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -625,18 +625,26 @@ static int madvise_inject_error(int behavior, | |||
625 | { | 625 | { |
626 | struct page *page; | 626 | struct page *page; |
627 | struct zone *zone; | 627 | struct zone *zone; |
628 | unsigned int order; | ||
628 | 629 | ||
629 | if (!capable(CAP_SYS_ADMIN)) | 630 | if (!capable(CAP_SYS_ADMIN)) |
630 | return -EPERM; | 631 | return -EPERM; |
631 | 632 | ||
632 | for (; start < end; start += PAGE_SIZE << | 633 | |
633 | compound_order(compound_head(page))) { | 634 | for (; start < end; start += PAGE_SIZE << order) { |
634 | int ret; | 635 | int ret; |
635 | 636 | ||
636 | ret = get_user_pages_fast(start, 1, 0, &page); | 637 | ret = get_user_pages_fast(start, 1, 0, &page); |
637 | if (ret != 1) | 638 | if (ret != 1) |
638 | return ret; | 639 | return ret; |
639 | 640 | ||
641 | /* | ||
642 | * When soft offlining hugepages, after migrating the page | ||
643 | * we dissolve it, therefore in the second loop "page" will | ||
644 | * no longer be a compound page, and order will be 0. | ||
645 | */ | ||
646 | order = compound_order(compound_head(page)); | ||
647 | |||
640 | if (PageHWPoison(page)) { | 648 | if (PageHWPoison(page)) { |
641 | put_page(page); | 649 | put_page(page); |
642 | continue; | 650 | continue; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 15af3da5af02..d5f3a62887cf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy) | |||
1777 | struct memcg_stock_pcp *stock; | 1777 | struct memcg_stock_pcp *stock; |
1778 | unsigned long flags; | 1778 | unsigned long flags; |
1779 | 1779 | ||
1780 | /* | ||
1781 | * The only protection from memory hotplug vs. drain_stock races is | ||
1782 | * that we always operate on local CPU stock here with IRQ disabled | ||
1783 | */ | ||
1780 | local_irq_save(flags); | 1784 | local_irq_save(flags); |
1781 | 1785 | ||
1782 | stock = this_cpu_ptr(&memcg_stock); | 1786 | stock = this_cpu_ptr(&memcg_stock); |
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) | |||
1821 | /* If someone's already draining, avoid adding running more workers. */ | 1825 | /* If someone's already draining, avoid adding running more workers. */ |
1822 | if (!mutex_trylock(&percpu_charge_mutex)) | 1826 | if (!mutex_trylock(&percpu_charge_mutex)) |
1823 | return; | 1827 | return; |
1824 | /* Notify other cpus that system-wide "drain" is running */ | 1828 | /* |
1825 | get_online_cpus(); | 1829 | * Notify other cpus that system-wide "drain" is running |
1830 | * We do not care about races with the cpu hotplug because cpu down | ||
1831 | * as well as workers from this path always operate on the local | ||
1832 | * per-cpu data. CPU up doesn't touch memcg_stock at all. | ||
1833 | */ | ||
1826 | curcpu = get_cpu(); | 1834 | curcpu = get_cpu(); |
1827 | for_each_online_cpu(cpu) { | 1835 | for_each_online_cpu(cpu) { |
1828 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 1836 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
1829 | struct mem_cgroup *memcg; | 1837 | struct mem_cgroup *memcg; |
1830 | 1838 | ||
1831 | memcg = stock->cached; | 1839 | memcg = stock->cached; |
1832 | if (!memcg || !stock->nr_pages) | 1840 | if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) |
1833 | continue; | 1841 | continue; |
1834 | if (!mem_cgroup_is_descendant(memcg, root_memcg)) | 1842 | if (!mem_cgroup_is_descendant(memcg, root_memcg)) { |
1843 | css_put(&memcg->css); | ||
1835 | continue; | 1844 | continue; |
1845 | } | ||
1836 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { | 1846 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { |
1837 | if (cpu == curcpu) | 1847 | if (cpu == curcpu) |
1838 | drain_local_stock(&stock->work); | 1848 | drain_local_stock(&stock->work); |
1839 | else | 1849 | else |
1840 | schedule_work_on(cpu, &stock->work); | 1850 | schedule_work_on(cpu, &stock->work); |
1841 | } | 1851 | } |
1852 | css_put(&memcg->css); | ||
1842 | } | 1853 | } |
1843 | put_cpu(); | 1854 | put_cpu(); |
1844 | put_online_cpus(); | ||
1845 | mutex_unlock(&percpu_charge_mutex); | 1855 | mutex_unlock(&percpu_charge_mutex); |
1846 | } | 1856 | } |
1847 | 1857 | ||
@@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug) | |||
5648 | static void uncharge_page(struct page *page, struct uncharge_gather *ug) | 5658 | static void uncharge_page(struct page *page, struct uncharge_gather *ug) |
5649 | { | 5659 | { |
5650 | VM_BUG_ON_PAGE(PageLRU(page), page); | 5660 | VM_BUG_ON_PAGE(PageLRU(page), page); |
5651 | VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page); | 5661 | VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && |
5662 | !PageHWPoison(page) , page); | ||
5652 | 5663 | ||
5653 | if (!page->mem_cgroup) | 5664 | if (!page->mem_cgroup) |
5654 | return; | 5665 | return; |
diff --git a/mm/memory.c b/mm/memory.c index ec4e15494901..a728bed16c20 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -845,7 +845,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
845 | * vm_normal_page() so that we do not have to special case all | 845 | * vm_normal_page() so that we do not have to special case all |
846 | * call site of vm_normal_page(). | 846 | * call site of vm_normal_page(). |
847 | */ | 847 | */ |
848 | if (likely(pfn < highest_memmap_pfn)) { | 848 | if (likely(pfn <= highest_memmap_pfn)) { |
849 | struct page *page = pfn_to_page(pfn); | 849 | struct page *page = pfn_to_page(pfn); |
850 | 850 | ||
851 | if (is_device_public_page(page)) { | 851 | if (is_device_public_page(page)) { |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e882cb6da994..d4b5f29906b9 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -328,6 +328,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn, | |||
328 | if (err && (err != -EEXIST)) | 328 | if (err && (err != -EEXIST)) |
329 | break; | 329 | break; |
330 | err = 0; | 330 | err = 0; |
331 | cond_resched(); | ||
331 | } | 332 | } |
332 | vmemmap_populate_print_last(); | 333 | vmemmap_populate_print_last(); |
333 | out: | 334 | out: |
@@ -337,7 +338,7 @@ EXPORT_SYMBOL_GPL(__add_pages); | |||
337 | 338 | ||
338 | #ifdef CONFIG_MEMORY_HOTREMOVE | 339 | #ifdef CONFIG_MEMORY_HOTREMOVE |
339 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | 340 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
340 | static int find_smallest_section_pfn(int nid, struct zone *zone, | 341 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
341 | unsigned long start_pfn, | 342 | unsigned long start_pfn, |
342 | unsigned long end_pfn) | 343 | unsigned long end_pfn) |
343 | { | 344 | { |
@@ -362,7 +363,7 @@ static int find_smallest_section_pfn(int nid, struct zone *zone, | |||
362 | } | 363 | } |
363 | 364 | ||
364 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | 365 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ |
365 | static int find_biggest_section_pfn(int nid, struct zone *zone, | 366 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
366 | unsigned long start_pfn, | 367 | unsigned long start_pfn, |
367 | unsigned long end_pfn) | 368 | unsigned long end_pfn) |
368 | { | 369 | { |
@@ -550,7 +551,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, | |||
550 | return ret; | 551 | return ret; |
551 | 552 | ||
552 | scn_nr = __section_nr(ms); | 553 | scn_nr = __section_nr(ms); |
553 | start_pfn = section_nr_to_pfn(scn_nr); | 554 | start_pfn = section_nr_to_pfn((unsigned long)scn_nr); |
554 | __remove_zone(zone, start_pfn); | 555 | __remove_zone(zone, start_pfn); |
555 | 556 | ||
556 | sparse_remove_one_section(zone, ms, map_offset); | 557 | sparse_remove_one_section(zone, ms, map_offset); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 99736e026712..dee0f75c3013 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/ratelimit.h> | 40 | #include <linux/ratelimit.h> |
41 | #include <linux/kthread.h> | 41 | #include <linux/kthread.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/mmu_notifier.h> | ||
43 | 44 | ||
44 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
45 | #include "internal.h" | 46 | #include "internal.h" |
@@ -495,6 +496,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | |||
495 | } | 496 | } |
496 | 497 | ||
497 | /* | 498 | /* |
499 | * If the mm has notifiers then we would need to invalidate them around | ||
500 | * unmap_page_range and that is risky because notifiers can sleep and | ||
501 | * what they do is basically undeterministic. So let's have a short | ||
502 | * sleep to give the oom victim some more time. | ||
503 | * TODO: we really want to get rid of this ugly hack and make sure that | ||
504 | * notifiers cannot block for unbounded amount of time and add | ||
505 | * mmu_notifier_invalidate_range_{start,end} around unmap_page_range | ||
506 | */ | ||
507 | if (mm_has_notifiers(mm)) { | ||
508 | up_read(&mm->mmap_sem); | ||
509 | schedule_timeout_idle(HZ); | ||
510 | goto unlock_oom; | ||
511 | } | ||
512 | |||
513 | /* | ||
498 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't | 514 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't |
499 | * work on the mm anymore. The check for MMF_OOM_SKIP must run | 515 | * work on the mm anymore. The check for MMF_OOM_SKIP must run |
500 | * under mmap_sem for reading because it serializes against the | 516 | * under mmap_sem for reading because it serializes against the |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c841af88836a..77e4d3c5c57b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1190,7 +1190,7 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, | |||
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 1192 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
1193 | static void init_reserved_page(unsigned long pfn) | 1193 | static void __meminit init_reserved_page(unsigned long pfn) |
1194 | { | 1194 | { |
1195 | pg_data_t *pgdat; | 1195 | pg_data_t *pgdat; |
1196 | int nid, zid; | 1196 | int nid, zid; |
@@ -5367,6 +5367,7 @@ not_early: | |||
5367 | 5367 | ||
5368 | __init_single_page(page, pfn, zone, nid); | 5368 | __init_single_page(page, pfn, zone, nid); |
5369 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 5369 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
5370 | cond_resched(); | ||
5370 | } else { | 5371 | } else { |
5371 | __init_single_pfn(pfn, zone, nid); | 5372 | __init_single_pfn(pfn, zone, nid); |
5372 | } | 5373 | } |
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index 6142484e88f7..7a58460bfd27 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c | |||
@@ -73,7 +73,7 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk, | |||
73 | last_alloc + 1 : 0; | 73 | last_alloc + 1 : 0; |
74 | 74 | ||
75 | as_len = 0; | 75 | as_len = 0; |
76 | start = chunk->start_offset; | 76 | start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * If a bit is set in the allocation map, the bound_map identifies | 79 | * If a bit is set in the allocation map, the bound_map identifies |
diff --git a/mm/percpu.c b/mm/percpu.c index 59d44d61f5f1..aa121cef76de 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -353,6 +353,8 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, | |||
353 | block->contig_hint_start); | 353 | block->contig_hint_start); |
354 | return; | 354 | return; |
355 | } | 355 | } |
356 | /* reset to satisfy the second predicate above */ | ||
357 | block_off = 0; | ||
356 | 358 | ||
357 | *bits = block->right_free; | 359 | *bits = block->right_free; |
358 | *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; | 360 | *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; |
@@ -407,6 +409,8 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, | |||
407 | *bit_off = pcpu_block_off_to_off(i, block->first_free); | 409 | *bit_off = pcpu_block_off_to_off(i, block->first_free); |
408 | return; | 410 | return; |
409 | } | 411 | } |
412 | /* reset to satisfy the second predicate above */ | ||
413 | block_off = 0; | ||
410 | 414 | ||
411 | *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, | 415 | *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, |
412 | align); | 416 | align); |
diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 6bb4deb12e78..d908c8769b48 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <asm/sections.h> | 15 | #include <asm/sections.h> |
16 | 16 | ||
17 | const int rodata_test_data = 0xC3; | 17 | static const int rodata_test_data = 0xC3; |
18 | 18 | ||
19 | void rodata_test(void) | 19 | void rodata_test(void) |
20 | { | 20 | { |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 904a83be82de..80164599ca5d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -165,9 +165,9 @@ static int init_memcg_params(struct kmem_cache *s, | |||
165 | if (!memcg_nr_cache_ids) | 165 | if (!memcg_nr_cache_ids) |
166 | return 0; | 166 | return 0; |
167 | 167 | ||
168 | arr = kzalloc(sizeof(struct memcg_cache_array) + | 168 | arr = kvzalloc(sizeof(struct memcg_cache_array) + |
169 | memcg_nr_cache_ids * sizeof(void *), | 169 | memcg_nr_cache_ids * sizeof(void *), |
170 | GFP_KERNEL); | 170 | GFP_KERNEL); |
171 | if (!arr) | 171 | if (!arr) |
172 | return -ENOMEM; | 172 | return -ENOMEM; |
173 | 173 | ||
@@ -178,15 +178,23 @@ static int init_memcg_params(struct kmem_cache *s, | |||
178 | static void destroy_memcg_params(struct kmem_cache *s) | 178 | static void destroy_memcg_params(struct kmem_cache *s) |
179 | { | 179 | { |
180 | if (is_root_cache(s)) | 180 | if (is_root_cache(s)) |
181 | kfree(rcu_access_pointer(s->memcg_params.memcg_caches)); | 181 | kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); |
182 | } | ||
183 | |||
184 | static void free_memcg_params(struct rcu_head *rcu) | ||
185 | { | ||
186 | struct memcg_cache_array *old; | ||
187 | |||
188 | old = container_of(rcu, struct memcg_cache_array, rcu); | ||
189 | kvfree(old); | ||
182 | } | 190 | } |
183 | 191 | ||
184 | static int update_memcg_params(struct kmem_cache *s, int new_array_size) | 192 | static int update_memcg_params(struct kmem_cache *s, int new_array_size) |
185 | { | 193 | { |
186 | struct memcg_cache_array *old, *new; | 194 | struct memcg_cache_array *old, *new; |
187 | 195 | ||
188 | new = kzalloc(sizeof(struct memcg_cache_array) + | 196 | new = kvzalloc(sizeof(struct memcg_cache_array) + |
189 | new_array_size * sizeof(void *), GFP_KERNEL); | 197 | new_array_size * sizeof(void *), GFP_KERNEL); |
190 | if (!new) | 198 | if (!new) |
191 | return -ENOMEM; | 199 | return -ENOMEM; |
192 | 200 | ||
@@ -198,7 +206,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size) | |||
198 | 206 | ||
199 | rcu_assign_pointer(s->memcg_params.memcg_caches, new); | 207 | rcu_assign_pointer(s->memcg_params.memcg_caches, new); |
200 | if (old) | 208 | if (old) |
201 | kfree_rcu(old, rcu); | 209 | call_rcu(&old->rcu, free_memcg_params); |
202 | return 0; | 210 | return 0; |
203 | } | 211 | } |
204 | 212 | ||
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, | |||
575 | void *arg) | 575 | void *arg) |
576 | { | 576 | { |
577 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && | 577 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
578 | !PageUnevictable(page)) { | 578 | !PageSwapCache(page) && !PageUnevictable(page)) { |
579 | bool active = PageActive(page); | 579 | bool active = PageActive(page); |
580 | 580 | ||
581 | del_page_from_lru_list(page, lruvec, | 581 | del_page_from_lru_list(page, lruvec, |
@@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page) | |||
665 | void mark_page_lazyfree(struct page *page) | 665 | void mark_page_lazyfree(struct page *page) |
666 | { | 666 | { |
667 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && | 667 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
668 | !PageUnevictable(page)) { | 668 | !PageSwapCache(page) && !PageUnevictable(page)) { |
669 | struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); | 669 | struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); |
670 | 670 | ||
671 | get_page(page); | 671 | get_page(page); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 71ce2d1ccbf7..ed91091d1e68 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -242,6 +242,17 @@ int add_to_swap(struct page *page) | |||
242 | * clear SWAP_HAS_CACHE flag. | 242 | * clear SWAP_HAS_CACHE flag. |
243 | */ | 243 | */ |
244 | goto fail; | 244 | goto fail; |
245 | /* | ||
246 | * Normally the page will be dirtied in unmap because its pte should be | ||
247 | * dirty. A special case is MADV_FREE page. The page'e pte could have | ||
248 | * dirty bit cleared but the page's SwapBacked bit is still set because | ||
249 | * clearing the dirty bit and SwapBacked bit has no lock protected. For | ||
250 | * such page, unmap will not set dirty bit for it, so page reclaim will | ||
251 | * not write the page out. This can cause data corruption when the page | ||
252 | * is swap in later. Always setting the dirty bit for the page solves | ||
253 | * the problem. | ||
254 | */ | ||
255 | set_page_dirty(page); | ||
245 | 256 | ||
246 | return 1; | 257 | return 1; |
247 | 258 | ||
diff --git a/mm/z3fold.c b/mm/z3fold.c index 486550df32be..b2ba2ba585f3 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) | |||
250 | 250 | ||
251 | WARN_ON(!list_empty(&zhdr->buddy)); | 251 | WARN_ON(!list_empty(&zhdr->buddy)); |
252 | set_bit(PAGE_STALE, &page->private); | 252 | set_bit(PAGE_STALE, &page->private); |
253 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
253 | spin_lock(&pool->lock); | 254 | spin_lock(&pool->lock); |
254 | if (!list_empty(&page->lru)) | 255 | if (!list_empty(&page->lru)) |
255 | list_del(&page->lru); | 256 | list_del(&page->lru); |
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w) | |||
303 | list_del(&zhdr->buddy); | 304 | list_del(&zhdr->buddy); |
304 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) | 305 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) |
305 | continue; | 306 | continue; |
306 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
307 | spin_unlock(&pool->stale_lock); | 307 | spin_unlock(&pool->stale_lock); |
308 | cancel_work_sync(&zhdr->work); | 308 | cancel_work_sync(&zhdr->work); |
309 | free_z3fold_page(page); | 309 | free_z3fold_page(page); |
@@ -624,10 +624,8 @@ lookup: | |||
624 | * stale pages list. cancel_work_sync() can sleep so we must make | 624 | * stale pages list. cancel_work_sync() can sleep so we must make |
625 | * sure it won't be called in case we're in atomic context. | 625 | * sure it won't be called in case we're in atomic context. |
626 | */ | 626 | */ |
627 | if (zhdr && (can_sleep || !work_pending(&zhdr->work) || | 627 | if (zhdr && (can_sleep || !work_pending(&zhdr->work))) { |
628 | !unlikely(work_busy(&zhdr->work)))) { | ||
629 | list_del(&zhdr->buddy); | 628 | list_del(&zhdr->buddy); |
630 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
631 | spin_unlock(&pool->stale_lock); | 629 | spin_unlock(&pool->stale_lock); |
632 | if (can_sleep) | 630 | if (can_sleep) |
633 | cancel_work_sync(&zhdr->work); | 631 | cancel_work_sync(&zhdr->work); |
@@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
875 | goto next; | 873 | goto next; |
876 | } | 874 | } |
877 | next: | 875 | next: |
876 | spin_lock(&pool->lock); | ||
878 | if (test_bit(PAGE_HEADLESS, &page->private)) { | 877 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
879 | if (ret == 0) { | 878 | if (ret == 0) { |
879 | spin_unlock(&pool->lock); | ||
880 | free_z3fold_page(page); | 880 | free_z3fold_page(page); |
881 | return 0; | 881 | return 0; |
882 | } | 882 | } |
883 | } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { | 883 | } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { |
884 | atomic64_dec(&pool->pages_nr); | 884 | atomic64_dec(&pool->pages_nr); |
885 | spin_unlock(&pool->lock); | ||
885 | return 0; | 886 | return 0; |
886 | } | 887 | } |
887 | spin_lock(&pool->lock); | ||
888 | 888 | ||
889 | /* | 889 | /* |
890 | * Add to the beginning of LRU. | 890 | * Add to the beginning of LRU. |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e2ed69850489..0bc31de9071a 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
21 | if (unlikely(!skb)) | 21 | if (unlikely(!skb)) |
22 | return false; | 22 | return false; |
23 | 23 | ||
24 | if (unlikely(!(vlan_dev->flags & IFF_UP))) { | ||
25 | kfree_skb(skb); | ||
26 | *skbp = NULL; | ||
27 | return false; | ||
28 | } | ||
29 | |||
24 | skb->dev = vlan_dev; | 30 | skb->dev = vlan_dev; |
25 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { | 31 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { |
26 | /* Our lower layer thinks this is not local, let's make sure. | 32 | /* Our lower layer thinks this is not local, let's make sure. |
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index c18115d22f00..db82a40875e8 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig | |||
@@ -126,14 +126,4 @@ config BT_DEBUGFS | |||
126 | Provide extensive information about internal Bluetooth states | 126 | Provide extensive information about internal Bluetooth states |
127 | in debugfs. | 127 | in debugfs. |
128 | 128 | ||
129 | config BT_LEGACY_IOCTL | ||
130 | bool "Enable legacy ioctl interfaces" | ||
131 | depends on BT && BT_BREDR | ||
132 | default y | ||
133 | help | ||
134 | Enable support for legacy ioctl interfaces. This is only needed | ||
135 | for old and deprecated applications using direct ioctl calls for | ||
136 | controller management. Since Linux 3.4 all configuration and | ||
137 | setup is done via mgmt interface and this is no longer needed. | ||
138 | |||
139 | source "drivers/bluetooth/Kconfig" | 129 | source "drivers/bluetooth/Kconfig" |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 0bad296fe0af..65d734c165bd 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -878,7 +878,6 @@ static int hci_sock_release(struct socket *sock) | |||
878 | return 0; | 878 | return 0; |
879 | } | 879 | } |
880 | 880 | ||
881 | #ifdef CONFIG_BT_LEGACY_IOCTL | ||
882 | static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) | 881 | static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) |
883 | { | 882 | { |
884 | bdaddr_t bdaddr; | 883 | bdaddr_t bdaddr; |
@@ -1050,7 +1049,6 @@ done: | |||
1050 | release_sock(sk); | 1049 | release_sock(sk); |
1051 | return err; | 1050 | return err; |
1052 | } | 1051 | } |
1053 | #endif | ||
1054 | 1052 | ||
1055 | static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, | 1053 | static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, |
1056 | int addr_len) | 1054 | int addr_len) |
@@ -1971,11 +1969,7 @@ static const struct proto_ops hci_sock_ops = { | |||
1971 | .getname = hci_sock_getname, | 1969 | .getname = hci_sock_getname, |
1972 | .sendmsg = hci_sock_sendmsg, | 1970 | .sendmsg = hci_sock_sendmsg, |
1973 | .recvmsg = hci_sock_recvmsg, | 1971 | .recvmsg = hci_sock_recvmsg, |
1974 | #ifdef CONFIG_BT_LEGACY_IOCTL | ||
1975 | .ioctl = hci_sock_ioctl, | 1972 | .ioctl = hci_sock_ioctl, |
1976 | #else | ||
1977 | .ioctl = sock_no_ioctl, | ||
1978 | #endif | ||
1979 | .poll = datagram_poll, | 1973 | .poll = datagram_poll, |
1980 | .listen = sock_no_listen, | 1974 | .listen = sock_no_listen, |
1981 | .shutdown = sock_no_shutdown, | 1975 | .shutdown = sock_no_shutdown, |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index f358d0bfa76b..79d14d70b7ea 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -2445,19 +2445,34 @@ static void apply_upmap(struct ceph_osdmap *osdmap, | |||
2445 | 2445 | ||
2446 | pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); | 2446 | pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); |
2447 | if (pg) { | 2447 | if (pg) { |
2448 | for (i = 0; i < raw->size; i++) { | 2448 | /* |
2449 | for (j = 0; j < pg->pg_upmap_items.len; j++) { | 2449 | * Note: this approach does not allow a bidirectional swap, |
2450 | int from = pg->pg_upmap_items.from_to[j][0]; | 2450 | * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. |
2451 | int to = pg->pg_upmap_items.from_to[j][1]; | 2451 | */ |
2452 | 2452 | for (i = 0; i < pg->pg_upmap_items.len; i++) { | |
2453 | if (from == raw->osds[i]) { | 2453 | int from = pg->pg_upmap_items.from_to[i][0]; |
2454 | if (!(to != CRUSH_ITEM_NONE && | 2454 | int to = pg->pg_upmap_items.from_to[i][1]; |
2455 | to < osdmap->max_osd && | 2455 | int pos = -1; |
2456 | osdmap->osd_weight[to] == 0)) | 2456 | bool exists = false; |
2457 | raw->osds[i] = to; | 2457 | |
2458 | /* make sure replacement doesn't already appear */ | ||
2459 | for (j = 0; j < raw->size; j++) { | ||
2460 | int osd = raw->osds[j]; | ||
2461 | |||
2462 | if (osd == to) { | ||
2463 | exists = true; | ||
2458 | break; | 2464 | break; |
2459 | } | 2465 | } |
2466 | /* ignore mapping if target is marked out */ | ||
2467 | if (osd == from && pos < 0 && | ||
2468 | !(to != CRUSH_ITEM_NONE && | ||
2469 | to < osdmap->max_osd && | ||
2470 | osdmap->osd_weight[to] == 0)) { | ||
2471 | pos = j; | ||
2472 | } | ||
2460 | } | 2473 | } |
2474 | if (!exists && pos >= 0) | ||
2475 | raw->osds[pos] = to; | ||
2461 | } | 2476 | } |
2462 | } | 2477 | } |
2463 | } | 2478 | } |
diff --git a/net/compat.c b/net/compat.c index 6ded6c821d7a..22381719718c 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -185,6 +185,13 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, | |||
185 | ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); | 185 | ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); |
186 | } | 186 | } |
187 | 187 | ||
188 | /* | ||
189 | * check the length of messages copied in is the same as the | ||
190 | * what we get from the first loop | ||
191 | */ | ||
192 | if ((char *)kcmsg - (char *)kcmsg_base != kcmlen) | ||
193 | goto Einval; | ||
194 | |||
188 | /* Ok, looks like we made it. Hook it up and return success. */ | 195 | /* Ok, looks like we made it. Hook it up and return success. */ |
189 | kmsg->msg_control = kcmsg_base; | 196 | kmsg->msg_control = kcmsg_base; |
190 | kmsg->msg_controllen = kcmlen; | 197 | kmsg->msg_controllen = kcmlen; |
diff --git a/net/core/dev.c b/net/core/dev.c index fb766d906148..588b473194a8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1948,8 +1948,12 @@ again: | |||
1948 | goto again; | 1948 | goto again; |
1949 | } | 1949 | } |
1950 | out_unlock: | 1950 | out_unlock: |
1951 | if (pt_prev) | 1951 | if (pt_prev) { |
1952 | pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); | 1952 | if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) |
1953 | pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); | ||
1954 | else | ||
1955 | kfree_skb(skb2); | ||
1956 | } | ||
1953 | rcu_read_unlock(); | 1957 | rcu_read_unlock(); |
1954 | } | 1958 | } |
1955 | EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); | 1959 | EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); |
@@ -3892,6 +3896,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | |||
3892 | __skb_pull(skb, off); | 3896 | __skb_pull(skb, off); |
3893 | else if (off < 0) | 3897 | else if (off < 0) |
3894 | __skb_push(skb, -off); | 3898 | __skb_push(skb, -off); |
3899 | skb->mac_header += off; | ||
3895 | 3900 | ||
3896 | switch (act) { | 3901 | switch (act) { |
3897 | case XDP_REDIRECT: | 3902 | case XDP_REDIRECT: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6a582ae4c5d9..3228411ada0f 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -525,6 +525,8 @@ convert_link_ksettings_to_legacy_settings( | |||
525 | = link_ksettings->base.eth_tp_mdix; | 525 | = link_ksettings->base.eth_tp_mdix; |
526 | legacy_settings->eth_tp_mdix_ctrl | 526 | legacy_settings->eth_tp_mdix_ctrl |
527 | = link_ksettings->base.eth_tp_mdix_ctrl; | 527 | = link_ksettings->base.eth_tp_mdix_ctrl; |
528 | legacy_settings->transceiver | ||
529 | = link_ksettings->base.transceiver; | ||
528 | return retval; | 530 | return retval; |
529 | } | 531 | } |
530 | 532 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index 24dd33dd9f04..74b8c91fb5f4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) | |||
989 | 989 | ||
990 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) | 990 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
991 | { | 991 | { |
992 | bool ret = __sk_filter_charge(sk, fp); | 992 | if (!refcount_inc_not_zero(&fp->refcnt)) |
993 | if (ret) | 993 | return false; |
994 | refcount_inc(&fp->refcnt); | 994 | |
995 | return ret; | 995 | if (!__sk_filter_charge(sk, fp)) { |
996 | sk_filter_release(fp); | ||
997 | return false; | ||
998 | } | ||
999 | return true; | ||
996 | } | 1000 | } |
997 | 1001 | ||
998 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) | 1002 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
@@ -1794,7 +1798,7 @@ struct redirect_info { | |||
1794 | u32 flags; | 1798 | u32 flags; |
1795 | struct bpf_map *map; | 1799 | struct bpf_map *map; |
1796 | struct bpf_map *map_to_flush; | 1800 | struct bpf_map *map_to_flush; |
1797 | const struct bpf_prog *map_owner; | 1801 | unsigned long map_owner; |
1798 | }; | 1802 | }; |
1799 | 1803 | ||
1800 | static DEFINE_PER_CPU(struct redirect_info, redirect_info); | 1804 | static DEFINE_PER_CPU(struct redirect_info, redirect_info); |
@@ -2500,11 +2504,17 @@ void xdp_do_flush_map(void) | |||
2500 | } | 2504 | } |
2501 | EXPORT_SYMBOL_GPL(xdp_do_flush_map); | 2505 | EXPORT_SYMBOL_GPL(xdp_do_flush_map); |
2502 | 2506 | ||
2507 | static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog, | ||
2508 | unsigned long aux) | ||
2509 | { | ||
2510 | return (unsigned long)xdp_prog->aux != aux; | ||
2511 | } | ||
2512 | |||
2503 | static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, | 2513 | static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, |
2504 | struct bpf_prog *xdp_prog) | 2514 | struct bpf_prog *xdp_prog) |
2505 | { | 2515 | { |
2506 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | 2516 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); |
2507 | const struct bpf_prog *map_owner = ri->map_owner; | 2517 | unsigned long map_owner = ri->map_owner; |
2508 | struct bpf_map *map = ri->map; | 2518 | struct bpf_map *map = ri->map; |
2509 | struct net_device *fwd = NULL; | 2519 | struct net_device *fwd = NULL; |
2510 | u32 index = ri->ifindex; | 2520 | u32 index = ri->ifindex; |
@@ -2512,9 +2522,9 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, | |||
2512 | 2522 | ||
2513 | ri->ifindex = 0; | 2523 | ri->ifindex = 0; |
2514 | ri->map = NULL; | 2524 | ri->map = NULL; |
2515 | ri->map_owner = NULL; | 2525 | ri->map_owner = 0; |
2516 | 2526 | ||
2517 | if (unlikely(map_owner != xdp_prog)) { | 2527 | if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) { |
2518 | err = -EFAULT; | 2528 | err = -EFAULT; |
2519 | map = NULL; | 2529 | map = NULL; |
2520 | goto err; | 2530 | goto err; |
@@ -2574,7 +2584,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, | |||
2574 | struct bpf_prog *xdp_prog) | 2584 | struct bpf_prog *xdp_prog) |
2575 | { | 2585 | { |
2576 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | 2586 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); |
2577 | const struct bpf_prog *map_owner = ri->map_owner; | 2587 | unsigned long map_owner = ri->map_owner; |
2578 | struct bpf_map *map = ri->map; | 2588 | struct bpf_map *map = ri->map; |
2579 | struct net_device *fwd = NULL; | 2589 | struct net_device *fwd = NULL; |
2580 | u32 index = ri->ifindex; | 2590 | u32 index = ri->ifindex; |
@@ -2583,10 +2593,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, | |||
2583 | 2593 | ||
2584 | ri->ifindex = 0; | 2594 | ri->ifindex = 0; |
2585 | ri->map = NULL; | 2595 | ri->map = NULL; |
2586 | ri->map_owner = NULL; | 2596 | ri->map_owner = 0; |
2587 | 2597 | ||
2588 | if (map) { | 2598 | if (map) { |
2589 | if (unlikely(map_owner != xdp_prog)) { | 2599 | if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) { |
2590 | err = -EFAULT; | 2600 | err = -EFAULT; |
2591 | map = NULL; | 2601 | map = NULL; |
2592 | goto err; | 2602 | goto err; |
@@ -2632,7 +2642,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) | |||
2632 | ri->ifindex = ifindex; | 2642 | ri->ifindex = ifindex; |
2633 | ri->flags = flags; | 2643 | ri->flags = flags; |
2634 | ri->map = NULL; | 2644 | ri->map = NULL; |
2635 | ri->map_owner = NULL; | 2645 | ri->map_owner = 0; |
2636 | 2646 | ||
2637 | return XDP_REDIRECT; | 2647 | return XDP_REDIRECT; |
2638 | } | 2648 | } |
@@ -2646,7 +2656,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = { | |||
2646 | }; | 2656 | }; |
2647 | 2657 | ||
2648 | BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags, | 2658 | BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags, |
2649 | const struct bpf_prog *, map_owner) | 2659 | unsigned long, map_owner) |
2650 | { | 2660 | { |
2651 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | 2661 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); |
2652 | 2662 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a78fd61da0ec..d4bcdcc68e92 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -3854,6 +3854,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, | |||
3854 | return -EMSGSIZE; | 3854 | return -EMSGSIZE; |
3855 | 3855 | ||
3856 | ifsm = nlmsg_data(nlh); | 3856 | ifsm = nlmsg_data(nlh); |
3857 | ifsm->family = PF_UNSPEC; | ||
3858 | ifsm->pad1 = 0; | ||
3859 | ifsm->pad2 = 0; | ||
3857 | ifsm->ifindex = dev->ifindex; | 3860 | ifsm->ifindex = dev->ifindex; |
3858 | ifsm->filter_mask = filter_mask; | 3861 | ifsm->filter_mask = filter_mask; |
3859 | 3862 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 9b7b6bbb2a23..23953b741a41 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1654 | 1654 | ||
1655 | sock_copy(newsk, sk); | 1655 | sock_copy(newsk, sk); |
1656 | 1656 | ||
1657 | newsk->sk_prot_creator = sk->sk_prot; | ||
1658 | |||
1657 | /* SANITY */ | 1659 | /* SANITY */ |
1658 | if (likely(newsk->sk_net_refcnt)) | 1660 | if (likely(newsk->sk_net_refcnt)) |
1659 | get_net(sock_net(newsk)); | 1661 | get_net(sock_net(newsk)); |
@@ -1682,13 +1684,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1682 | 1684 | ||
1683 | sock_reset_flag(newsk, SOCK_DONE); | 1685 | sock_reset_flag(newsk, SOCK_DONE); |
1684 | 1686 | ||
1685 | filter = rcu_dereference_protected(newsk->sk_filter, 1); | 1687 | rcu_read_lock(); |
1688 | filter = rcu_dereference(sk->sk_filter); | ||
1686 | if (filter != NULL) | 1689 | if (filter != NULL) |
1687 | /* though it's an empty new sock, the charging may fail | 1690 | /* though it's an empty new sock, the charging may fail |
1688 | * if sysctl_optmem_max was changed between creation of | 1691 | * if sysctl_optmem_max was changed between creation of |
1689 | * original socket and cloning | 1692 | * original socket and cloning |
1690 | */ | 1693 | */ |
1691 | is_charged = sk_filter_charge(newsk, filter); | 1694 | is_charged = sk_filter_charge(newsk, filter); |
1695 | RCU_INIT_POINTER(newsk->sk_filter, filter); | ||
1696 | rcu_read_unlock(); | ||
1692 | 1697 | ||
1693 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { | 1698 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { |
1694 | /* We need to make sure that we don't uncharge the new | 1699 | /* We need to make sure that we don't uncharge the new |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2afa99506f8b..865e29e62bad 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name) | |||
1301 | p->old_duplex = -1; | 1301 | p->old_duplex = -1; |
1302 | 1302 | ||
1303 | port->netdev = slave_dev; | 1303 | port->netdev = slave_dev; |
1304 | ret = register_netdev(slave_dev); | ||
1305 | if (ret) { | ||
1306 | netdev_err(master, "error %d registering interface %s\n", | ||
1307 | ret, slave_dev->name); | ||
1308 | port->netdev = NULL; | ||
1309 | free_percpu(p->stats64); | ||
1310 | free_netdev(slave_dev); | ||
1311 | return ret; | ||
1312 | } | ||
1313 | 1304 | ||
1314 | netif_carrier_off(slave_dev); | 1305 | netif_carrier_off(slave_dev); |
1315 | 1306 | ||
1316 | ret = dsa_slave_phy_setup(p, slave_dev); | 1307 | ret = dsa_slave_phy_setup(p, slave_dev); |
1317 | if (ret) { | 1308 | if (ret) { |
1318 | netdev_err(master, "error %d setting up slave phy\n", ret); | 1309 | netdev_err(master, "error %d setting up slave phy\n", ret); |
1319 | unregister_netdev(slave_dev); | 1310 | goto out_free; |
1320 | free_percpu(p->stats64); | 1311 | } |
1321 | free_netdev(slave_dev); | 1312 | |
1322 | return ret; | 1313 | ret = register_netdev(slave_dev); |
1314 | if (ret) { | ||
1315 | netdev_err(master, "error %d registering interface %s\n", | ||
1316 | ret, slave_dev->name); | ||
1317 | goto out_phy; | ||
1323 | } | 1318 | } |
1324 | 1319 | ||
1325 | return 0; | 1320 | return 0; |
1321 | |||
1322 | out_phy: | ||
1323 | phy_disconnect(p->phy); | ||
1324 | if (of_phy_is_fixed_link(p->dp->dn)) | ||
1325 | of_phy_deregister_fixed_link(p->dp->dn); | ||
1326 | out_free: | ||
1327 | free_percpu(p->stats64); | ||
1328 | free_netdev(slave_dev); | ||
1329 | port->netdev = NULL; | ||
1330 | return ret; | ||
1326 | } | 1331 | } |
1327 | 1332 | ||
1328 | void dsa_slave_destroy(struct net_device *slave_dev) | 1333 | void dsa_slave_destroy(struct net_device *slave_dev) |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b9c64b40a83a..c039c937ba90 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -266,7 +266,7 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, | |||
266 | #if IS_ENABLED(CONFIG_IPV6) | 266 | #if IS_ENABLED(CONFIG_IPV6) |
267 | if (tb->fast_sk_family == AF_INET6) | 267 | if (tb->fast_sk_family == AF_INET6) |
268 | return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, | 268 | return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, |
269 | &sk->sk_v6_rcv_saddr, | 269 | inet6_rcv_saddr(sk), |
270 | tb->fast_rcv_saddr, | 270 | tb->fast_rcv_saddr, |
271 | sk->sk_rcv_saddr, | 271 | sk->sk_rcv_saddr, |
272 | tb->fast_ipv6_only, | 272 | tb->fast_ipv6_only, |
@@ -321,13 +321,14 @@ tb_found: | |||
321 | goto fail_unlock; | 321 | goto fail_unlock; |
322 | } | 322 | } |
323 | success: | 323 | success: |
324 | if (!hlist_empty(&tb->owners)) { | 324 | if (hlist_empty(&tb->owners)) { |
325 | tb->fastreuse = reuse; | 325 | tb->fastreuse = reuse; |
326 | if (sk->sk_reuseport) { | 326 | if (sk->sk_reuseport) { |
327 | tb->fastreuseport = FASTREUSEPORT_ANY; | 327 | tb->fastreuseport = FASTREUSEPORT_ANY; |
328 | tb->fastuid = uid; | 328 | tb->fastuid = uid; |
329 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | 329 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; |
330 | tb->fast_ipv6_only = ipv6_only_sock(sk); | 330 | tb->fast_ipv6_only = ipv6_only_sock(sk); |
331 | tb->fast_sk_family = sk->sk_family; | ||
331 | #if IS_ENABLED(CONFIG_IPV6) | 332 | #if IS_ENABLED(CONFIG_IPV6) |
332 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | 333 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
333 | #endif | 334 | #endif |
@@ -354,6 +355,7 @@ success: | |||
354 | tb->fastuid = uid; | 355 | tb->fastuid = uid; |
355 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | 356 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; |
356 | tb->fast_ipv6_only = ipv6_only_sock(sk); | 357 | tb->fast_ipv6_only = ipv6_only_sock(sk); |
358 | tb->fast_sk_family = sk->sk_family; | ||
357 | #if IS_ENABLED(CONFIG_IPV6) | 359 | #if IS_ENABLED(CONFIG_IPV6) |
358 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | 360 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
359 | #endif | 361 | #endif |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index e7eb590c86ce..b20c8ac64081 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | |||
128 | break; | 128 | break; |
129 | } | 129 | } |
130 | if (cmp == -1) | 130 | if (cmp == -1) |
131 | pp = &(*pp)->rb_left; | 131 | pp = &next->rb_left; |
132 | else | 132 | else |
133 | pp = &(*pp)->rb_right; | 133 | pp = &next->rb_right; |
134 | } | 134 | } |
135 | *parent_p = parent; | 135 | *parent_p = parent; |
136 | *pp_p = pp; | 136 | *pp_p = pp; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0162fb955b33..467e44d7587d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
259 | struct ip_tunnel *tunnel; | 259 | struct ip_tunnel *tunnel; |
260 | struct erspanhdr *ershdr; | 260 | struct erspanhdr *ershdr; |
261 | const struct iphdr *iph; | 261 | const struct iphdr *iph; |
262 | __be32 session_id; | ||
263 | __be32 index; | 262 | __be32 index; |
264 | int len; | 263 | int len; |
265 | 264 | ||
@@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
275 | /* The original GRE header does not have key field, | 274 | /* The original GRE header does not have key field, |
276 | * Use ERSPAN 10-bit session ID as key. | 275 | * Use ERSPAN 10-bit session ID as key. |
277 | */ | 276 | */ |
278 | session_id = cpu_to_be32(ntohs(ershdr->session_id)); | 277 | tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK); |
279 | tpi->key = session_id; | ||
280 | index = ershdr->md.index; | 278 | index = ershdr->md.index; |
281 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, | 279 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, |
282 | tpi->flags | TUNNEL_KEY, | 280 | tpi->flags | TUNNEL_KEY, |
@@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
733 | if (skb_cow_head(skb, dev->needed_headroom)) | 731 | if (skb_cow_head(skb, dev->needed_headroom)) |
734 | goto free_skb; | 732 | goto free_skb; |
735 | 733 | ||
736 | if (skb->len > dev->mtu) { | 734 | if (skb->len - dev->hard_header_len > dev->mtu) { |
737 | pskb_trim(skb, dev->mtu); | 735 | pskb_trim(skb, dev->mtu); |
738 | truncate = true; | 736 | truncate = true; |
739 | } | 737 | } |
@@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev) | |||
1223 | { | 1221 | { |
1224 | __gre_tunnel_init(dev); | 1222 | __gre_tunnel_init(dev); |
1225 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1223 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1224 | netif_keep_dst(dev); | ||
1226 | 1225 | ||
1227 | return ip_tunnel_init(dev); | 1226 | return ip_tunnel_init(dev); |
1228 | } | 1227 | } |
@@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev) | |||
1246 | 1245 | ||
1247 | tunnel->tun_hlen = 8; | 1246 | tunnel->tun_hlen = 8; |
1248 | tunnel->parms.iph.protocol = IPPROTO_GRE; | 1247 | tunnel->parms.iph.protocol = IPPROTO_GRE; |
1249 | t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr); | 1248 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + |
1249 | sizeof(struct erspanhdr); | ||
1250 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | ||
1250 | 1251 | ||
1251 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | 1252 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; |
1252 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | 1253 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; |
1253 | dev->features |= GRE_FEATURES; | 1254 | dev->features |= GRE_FEATURES; |
1254 | dev->hw_features |= GRE_FEATURES; | 1255 | dev->hw_features |= GRE_FEATURES; |
1255 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1256 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1257 | netif_keep_dst(dev); | ||
1256 | 1258 | ||
1257 | return ip_tunnel_init(dev); | 1259 | return ip_tunnel_init(dev); |
1258 | } | 1260 | } |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index fa2dc8f692c6..57fc13c6ab2b 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -311,9 +311,10 @@ drop: | |||
311 | static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 311 | static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
312 | { | 312 | { |
313 | const struct iphdr *iph = ip_hdr(skb); | 313 | const struct iphdr *iph = ip_hdr(skb); |
314 | struct rtable *rt; | 314 | int (*edemux)(struct sk_buff *skb); |
315 | struct net_device *dev = skb->dev; | 315 | struct net_device *dev = skb->dev; |
316 | void (*edemux)(struct sk_buff *skb); | 316 | struct rtable *rt; |
317 | int err; | ||
317 | 318 | ||
318 | /* if ingress device is enslaved to an L3 master device pass the | 319 | /* if ingress device is enslaved to an L3 master device pass the |
319 | * skb to its handler for processing | 320 | * skb to its handler for processing |
@@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
331 | 332 | ||
332 | ipprot = rcu_dereference(inet_protos[protocol]); | 333 | ipprot = rcu_dereference(inet_protos[protocol]); |
333 | if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { | 334 | if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { |
334 | edemux(skb); | 335 | err = edemux(skb); |
336 | if (unlikely(err)) | ||
337 | goto drop_error; | ||
335 | /* must reload iph, skb->head might have changed */ | 338 | /* must reload iph, skb->head might have changed */ |
336 | iph = ip_hdr(skb); | 339 | iph = ip_hdr(skb); |
337 | } | 340 | } |
@@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
342 | * how the packet travels inside Linux networking. | 345 | * how the packet travels inside Linux networking. |
343 | */ | 346 | */ |
344 | if (!skb_valid_dst(skb)) { | 347 | if (!skb_valid_dst(skb)) { |
345 | int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, | 348 | err = ip_route_input_noref(skb, iph->daddr, iph->saddr, |
346 | iph->tos, dev); | 349 | iph->tos, dev); |
347 | if (unlikely(err)) { | 350 | if (unlikely(err)) |
348 | if (err == -EXDEV) | 351 | goto drop_error; |
349 | __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); | ||
350 | goto drop; | ||
351 | } | ||
352 | } | 352 | } |
353 | 353 | ||
354 | #ifdef CONFIG_IP_ROUTE_CLASSID | 354 | #ifdef CONFIG_IP_ROUTE_CLASSID |
@@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
399 | drop: | 399 | drop: |
400 | kfree_skb(skb); | 400 | kfree_skb(skb); |
401 | return NET_RX_DROP; | 401 | return NET_RX_DROP; |
402 | |||
403 | drop_error: | ||
404 | if (err == -EXDEV) | ||
405 | __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); | ||
406 | goto drop; | ||
402 | } | 407 | } |
403 | 408 | ||
404 | /* | 409 | /* |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 5ed63d250950..89453cf62158 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, | |||
168 | struct ip_tunnel_parm *parms = &tunnel->parms; | 168 | struct ip_tunnel_parm *parms = &tunnel->parms; |
169 | struct dst_entry *dst = skb_dst(skb); | 169 | struct dst_entry *dst = skb_dst(skb); |
170 | struct net_device *tdev; /* Device to other host */ | 170 | struct net_device *tdev; /* Device to other host */ |
171 | int pkt_len = skb->len; | ||
171 | int err; | 172 | int err; |
172 | int mtu; | 173 | int mtu; |
173 | 174 | ||
@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, | |||
229 | 230 | ||
230 | err = dst_output(tunnel->net, skb->sk, skb); | 231 | err = dst_output(tunnel->net, skb->sk, skb); |
231 | if (net_xmit_eval(err) == 0) | 232 | if (net_xmit_eval(err) == 0) |
232 | err = skb->len; | 233 | err = pkt_len; |
233 | iptunnel_xmit_stats(dev, err); | 234 | iptunnel_xmit_stats(dev, err); |
234 | return NETDEV_TX_OK; | 235 | return NETDEV_TX_OK; |
235 | 236 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 94d4cd2d5ea4..ac6fde5d45f1 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev, | |||
1520 | EXPORT_SYMBOL(rt_dst_alloc); | 1520 | EXPORT_SYMBOL(rt_dst_alloc); |
1521 | 1521 | ||
1522 | /* called in rcu_read_lock() section */ | 1522 | /* called in rcu_read_lock() section */ |
1523 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1523 | int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1524 | u8 tos, struct net_device *dev, int our) | 1524 | u8 tos, struct net_device *dev, |
1525 | struct in_device *in_dev, u32 *itag) | ||
1525 | { | 1526 | { |
1526 | struct rtable *rth; | ||
1527 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
1528 | unsigned int flags = RTCF_MULTICAST; | ||
1529 | u32 itag = 0; | ||
1530 | int err; | 1527 | int err; |
1531 | 1528 | ||
1532 | /* Primary sanity checks. */ | 1529 | /* Primary sanity checks. */ |
1533 | |||
1534 | if (!in_dev) | 1530 | if (!in_dev) |
1535 | return -EINVAL; | 1531 | return -EINVAL; |
1536 | 1532 | ||
1537 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || | 1533 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || |
1538 | skb->protocol != htons(ETH_P_IP)) | 1534 | skb->protocol != htons(ETH_P_IP)) |
1539 | goto e_inval; | 1535 | return -EINVAL; |
1540 | 1536 | ||
1541 | if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) | 1537 | if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) |
1542 | goto e_inval; | 1538 | return -EINVAL; |
1543 | 1539 | ||
1544 | if (ipv4_is_zeronet(saddr)) { | 1540 | if (ipv4_is_zeronet(saddr)) { |
1545 | if (!ipv4_is_local_multicast(daddr)) | 1541 | if (!ipv4_is_local_multicast(daddr)) |
1546 | goto e_inval; | 1542 | return -EINVAL; |
1547 | } else { | 1543 | } else { |
1548 | err = fib_validate_source(skb, saddr, 0, tos, 0, dev, | 1544 | err = fib_validate_source(skb, saddr, 0, tos, 0, dev, |
1549 | in_dev, &itag); | 1545 | in_dev, itag); |
1550 | if (err < 0) | 1546 | if (err < 0) |
1551 | goto e_err; | 1547 | return err; |
1552 | } | 1548 | } |
1549 | return 0; | ||
1550 | } | ||
1551 | |||
1552 | /* called in rcu_read_lock() section */ | ||
1553 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | ||
1554 | u8 tos, struct net_device *dev, int our) | ||
1555 | { | ||
1556 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
1557 | unsigned int flags = RTCF_MULTICAST; | ||
1558 | struct rtable *rth; | ||
1559 | u32 itag = 0; | ||
1560 | int err; | ||
1561 | |||
1562 | err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); | ||
1563 | if (err) | ||
1564 | return err; | ||
1565 | |||
1553 | if (our) | 1566 | if (our) |
1554 | flags |= RTCF_LOCAL; | 1567 | flags |= RTCF_LOCAL; |
1555 | 1568 | ||
1556 | rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, | 1569 | rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, |
1557 | IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); | 1570 | IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); |
1558 | if (!rth) | 1571 | if (!rth) |
1559 | goto e_nobufs; | 1572 | return -ENOBUFS; |
1560 | 1573 | ||
1561 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1574 | #ifdef CONFIG_IP_ROUTE_CLASSID |
1562 | rth->dst.tclassid = itag; | 1575 | rth->dst.tclassid = itag; |
@@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1572 | 1585 | ||
1573 | skb_dst_set(skb, &rth->dst); | 1586 | skb_dst_set(skb, &rth->dst); |
1574 | return 0; | 1587 | return 0; |
1575 | |||
1576 | e_nobufs: | ||
1577 | return -ENOBUFS; | ||
1578 | e_inval: | ||
1579 | return -EINVAL; | ||
1580 | e_err: | ||
1581 | return err; | ||
1582 | } | 1588 | } |
1583 | 1589 | ||
1584 | 1590 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d9416b5162bc..85164d4d3e53 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1503,23 +1503,23 @@ csum_err: | |||
1503 | } | 1503 | } |
1504 | EXPORT_SYMBOL(tcp_v4_do_rcv); | 1504 | EXPORT_SYMBOL(tcp_v4_do_rcv); |
1505 | 1505 | ||
1506 | void tcp_v4_early_demux(struct sk_buff *skb) | 1506 | int tcp_v4_early_demux(struct sk_buff *skb) |
1507 | { | 1507 | { |
1508 | const struct iphdr *iph; | 1508 | const struct iphdr *iph; |
1509 | const struct tcphdr *th; | 1509 | const struct tcphdr *th; |
1510 | struct sock *sk; | 1510 | struct sock *sk; |
1511 | 1511 | ||
1512 | if (skb->pkt_type != PACKET_HOST) | 1512 | if (skb->pkt_type != PACKET_HOST) |
1513 | return; | 1513 | return 0; |
1514 | 1514 | ||
1515 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) | 1515 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) |
1516 | return; | 1516 | return 0; |
1517 | 1517 | ||
1518 | iph = ip_hdr(skb); | 1518 | iph = ip_hdr(skb); |
1519 | th = tcp_hdr(skb); | 1519 | th = tcp_hdr(skb); |
1520 | 1520 | ||
1521 | if (th->doff < sizeof(struct tcphdr) / 4) | 1521 | if (th->doff < sizeof(struct tcphdr) / 4) |
1522 | return; | 1522 | return 0; |
1523 | 1523 | ||
1524 | sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, | 1524 | sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, |
1525 | iph->saddr, th->source, | 1525 | iph->saddr, th->source, |
@@ -1538,6 +1538,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
1538 | skb_dst_set_noref(skb, dst); | 1538 | skb_dst_set_noref(skb, dst); |
1539 | } | 1539 | } |
1540 | } | 1540 | } |
1541 | return 0; | ||
1541 | } | 1542 | } |
1542 | 1543 | ||
1543 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) | 1544 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1c839c99114c..0bc9e46a5369 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1806,40 +1806,6 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp, | |||
1806 | return !after(end_seq, tcp_wnd_end(tp)); | 1806 | return !after(end_seq, tcp_wnd_end(tp)); |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) | ||
1810 | * should be put on the wire right now. If so, it returns the number of | ||
1811 | * packets allowed by the congestion window. | ||
1812 | */ | ||
1813 | static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, | ||
1814 | unsigned int cur_mss, int nonagle) | ||
1815 | { | ||
1816 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1817 | unsigned int cwnd_quota; | ||
1818 | |||
1819 | tcp_init_tso_segs(skb, cur_mss); | ||
1820 | |||
1821 | if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) | ||
1822 | return 0; | ||
1823 | |||
1824 | cwnd_quota = tcp_cwnd_test(tp, skb); | ||
1825 | if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) | ||
1826 | cwnd_quota = 0; | ||
1827 | |||
1828 | return cwnd_quota; | ||
1829 | } | ||
1830 | |||
1831 | /* Test if sending is allowed right now. */ | ||
1832 | bool tcp_may_send_now(struct sock *sk) | ||
1833 | { | ||
1834 | const struct tcp_sock *tp = tcp_sk(sk); | ||
1835 | struct sk_buff *skb = tcp_send_head(sk); | ||
1836 | |||
1837 | return skb && | ||
1838 | tcp_snd_test(sk, skb, tcp_current_mss(sk), | ||
1839 | (tcp_skb_is_last(sk, skb) ? | ||
1840 | tp->nonagle : TCP_NAGLE_PUSH)); | ||
1841 | } | ||
1842 | |||
1843 | /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet | 1809 | /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet |
1844 | * which is put after SKB on the list. It is very much like | 1810 | * which is put after SKB on the list. It is very much like |
1845 | * tcp_fragment() except that it may make several kinds of assumptions | 1811 | * tcp_fragment() except that it may make several kinds of assumptions |
@@ -3423,6 +3389,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
3423 | goto done; | 3389 | goto done; |
3424 | } | 3390 | } |
3425 | 3391 | ||
3392 | /* data was not sent, this is our new send_head */ | ||
3393 | sk->sk_send_head = syn_data; | ||
3394 | tp->packets_out -= tcp_skb_pcount(syn_data); | ||
3395 | |||
3426 | fallback: | 3396 | fallback: |
3427 | /* Send a regular SYN with Fast Open cookie request option */ | 3397 | /* Send a regular SYN with Fast Open cookie request option */ |
3428 | if (fo->cookie.len > 0) | 3398 | if (fo->cookie.len > 0) |
@@ -3475,6 +3445,11 @@ int tcp_connect(struct sock *sk) | |||
3475 | */ | 3445 | */ |
3476 | tp->snd_nxt = tp->write_seq; | 3446 | tp->snd_nxt = tp->write_seq; |
3477 | tp->pushed_seq = tp->write_seq; | 3447 | tp->pushed_seq = tp->write_seq; |
3448 | buff = tcp_send_head(sk); | ||
3449 | if (unlikely(buff)) { | ||
3450 | tp->snd_nxt = TCP_SKB_CB(buff)->seq; | ||
3451 | tp->pushed_seq = TCP_SKB_CB(buff)->seq; | ||
3452 | } | ||
3478 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); | 3453 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); |
3479 | 3454 | ||
3480 | /* Timer for repeating the SYN until an answer. */ | 3455 | /* Timer for repeating the SYN until an answer. */ |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ef29df8648e4..5676237d2b0f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -2221,9 +2221,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, | |||
2221 | return NULL; | 2221 | return NULL; |
2222 | } | 2222 | } |
2223 | 2223 | ||
2224 | void udp_v4_early_demux(struct sk_buff *skb) | 2224 | int udp_v4_early_demux(struct sk_buff *skb) |
2225 | { | 2225 | { |
2226 | struct net *net = dev_net(skb->dev); | 2226 | struct net *net = dev_net(skb->dev); |
2227 | struct in_device *in_dev = NULL; | ||
2227 | const struct iphdr *iph; | 2228 | const struct iphdr *iph; |
2228 | const struct udphdr *uh; | 2229 | const struct udphdr *uh; |
2229 | struct sock *sk = NULL; | 2230 | struct sock *sk = NULL; |
@@ -2234,24 +2235,24 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
2234 | 2235 | ||
2235 | /* validate the packet */ | 2236 | /* validate the packet */ |
2236 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) | 2237 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) |
2237 | return; | 2238 | return 0; |
2238 | 2239 | ||
2239 | iph = ip_hdr(skb); | 2240 | iph = ip_hdr(skb); |
2240 | uh = udp_hdr(skb); | 2241 | uh = udp_hdr(skb); |
2241 | 2242 | ||
2242 | if (skb->pkt_type == PACKET_BROADCAST || | 2243 | if (skb->pkt_type == PACKET_BROADCAST || |
2243 | skb->pkt_type == PACKET_MULTICAST) { | 2244 | skb->pkt_type == PACKET_MULTICAST) { |
2244 | struct in_device *in_dev = __in_dev_get_rcu(skb->dev); | 2245 | in_dev = __in_dev_get_rcu(skb->dev); |
2245 | 2246 | ||
2246 | if (!in_dev) | 2247 | if (!in_dev) |
2247 | return; | 2248 | return 0; |
2248 | 2249 | ||
2249 | /* we are supposed to accept bcast packets */ | 2250 | /* we are supposed to accept bcast packets */ |
2250 | if (skb->pkt_type == PACKET_MULTICAST) { | 2251 | if (skb->pkt_type == PACKET_MULTICAST) { |
2251 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, | 2252 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, |
2252 | iph->protocol); | 2253 | iph->protocol); |
2253 | if (!ours) | 2254 | if (!ours) |
2254 | return; | 2255 | return 0; |
2255 | } | 2256 | } |
2256 | 2257 | ||
2257 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, | 2258 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, |
@@ -2263,7 +2264,7 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
2263 | } | 2264 | } |
2264 | 2265 | ||
2265 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) | 2266 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
2266 | return; | 2267 | return 0; |
2267 | 2268 | ||
2268 | skb->sk = sk; | 2269 | skb->sk = sk; |
2269 | skb->destructor = sock_efree; | 2270 | skb->destructor = sock_efree; |
@@ -2272,12 +2273,23 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
2272 | if (dst) | 2273 | if (dst) |
2273 | dst = dst_check(dst, 0); | 2274 | dst = dst_check(dst, 0); |
2274 | if (dst) { | 2275 | if (dst) { |
2276 | u32 itag = 0; | ||
2277 | |||
2275 | /* set noref for now. | 2278 | /* set noref for now. |
2276 | * any place which wants to hold dst has to call | 2279 | * any place which wants to hold dst has to call |
2277 | * dst_hold_safe() | 2280 | * dst_hold_safe() |
2278 | */ | 2281 | */ |
2279 | skb_dst_set_noref(skb, dst); | 2282 | skb_dst_set_noref(skb, dst); |
2283 | |||
2284 | /* for unconnected multicast sockets we need to validate | ||
2285 | * the source on each packet | ||
2286 | */ | ||
2287 | if (!inet_sk(sk)->inet_daddr && in_dev) | ||
2288 | return ip_mc_validate_source(skb, iph->daddr, | ||
2289 | iph->saddr, iph->tos, | ||
2290 | skb->dev, in_dev, &itag); | ||
2280 | } | 2291 | } |
2292 | return 0; | ||
2281 | } | 2293 | } |
2282 | 2294 | ||
2283 | int udp_rcv(struct sk_buff *skb) | 2295 | int udp_rcv(struct sk_buff *skb) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c2e2a78787ec..96861c702c06 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1399,10 +1399,18 @@ static inline int ipv6_saddr_preferred(int type) | |||
1399 | return 0; | 1399 | return 0; |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev) | 1402 | static bool ipv6_use_optimistic_addr(struct net *net, |
1403 | struct inet6_dev *idev) | ||
1403 | { | 1404 | { |
1404 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 1405 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
1405 | return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic; | 1406 | if (!idev) |
1407 | return false; | ||
1408 | if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad) | ||
1409 | return false; | ||
1410 | if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic) | ||
1411 | return false; | ||
1412 | |||
1413 | return true; | ||
1406 | #else | 1414 | #else |
1407 | return false; | 1415 | return false; |
1408 | #endif | 1416 | #endif |
@@ -1472,7 +1480,7 @@ static int ipv6_get_saddr_eval(struct net *net, | |||
1472 | /* Rule 3: Avoid deprecated and optimistic addresses */ | 1480 | /* Rule 3: Avoid deprecated and optimistic addresses */ |
1473 | u8 avoid = IFA_F_DEPRECATED; | 1481 | u8 avoid = IFA_F_DEPRECATED; |
1474 | 1482 | ||
1475 | if (!ipv6_use_optimistic_addr(score->ifa->idev)) | 1483 | if (!ipv6_use_optimistic_addr(net, score->ifa->idev)) |
1476 | avoid |= IFA_F_OPTIMISTIC; | 1484 | avoid |= IFA_F_OPTIMISTIC; |
1477 | ret = ipv6_saddr_preferred(score->addr_type) || | 1485 | ret = ipv6_saddr_preferred(score->addr_type) || |
1478 | !(score->ifa->flags & avoid); | 1486 | !(score->ifa->flags & avoid); |
@@ -2460,7 +2468,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, | |||
2460 | int max_addresses = in6_dev->cnf.max_addresses; | 2468 | int max_addresses = in6_dev->cnf.max_addresses; |
2461 | 2469 | ||
2462 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 2470 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
2463 | if (in6_dev->cnf.optimistic_dad && | 2471 | if ((net->ipv6.devconf_all->optimistic_dad || |
2472 | in6_dev->cnf.optimistic_dad) && | ||
2464 | !net->ipv6.devconf_all->forwarding && sllao) | 2473 | !net->ipv6.devconf_all->forwarding && sllao) |
2465 | addr_flags |= IFA_F_OPTIMISTIC; | 2474 | addr_flags |= IFA_F_OPTIMISTIC; |
2466 | #endif | 2475 | #endif |
@@ -3051,7 +3060,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev, | |||
3051 | u32 addr_flags = flags | IFA_F_PERMANENT; | 3060 | u32 addr_flags = flags | IFA_F_PERMANENT; |
3052 | 3061 | ||
3053 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 3062 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
3054 | if (idev->cnf.optimistic_dad && | 3063 | if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad || |
3064 | idev->cnf.optimistic_dad) && | ||
3055 | !dev_net(idev->dev)->ipv6.devconf_all->forwarding) | 3065 | !dev_net(idev->dev)->ipv6.devconf_all->forwarding) |
3056 | addr_flags |= IFA_F_OPTIMISTIC; | 3066 | addr_flags |= IFA_F_OPTIMISTIC; |
3057 | #endif | 3067 | #endif |
@@ -3810,6 +3820,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3810 | goto out; | 3820 | goto out; |
3811 | 3821 | ||
3812 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || | 3822 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || |
3823 | dev_net(dev)->ipv6.devconf_all->accept_dad < 1 || | ||
3813 | idev->cnf.accept_dad < 1 || | 3824 | idev->cnf.accept_dad < 1 || |
3814 | !(ifp->flags&IFA_F_TENTATIVE) || | 3825 | !(ifp->flags&IFA_F_TENTATIVE) || |
3815 | ifp->flags & IFA_F_NODAD) { | 3826 | ifp->flags & IFA_F_NODAD) { |
@@ -3841,7 +3852,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3841 | */ | 3852 | */ |
3842 | if (ifp->flags & IFA_F_OPTIMISTIC) { | 3853 | if (ifp->flags & IFA_F_OPTIMISTIC) { |
3843 | ip6_ins_rt(ifp->rt); | 3854 | ip6_ins_rt(ifp->rt); |
3844 | if (ipv6_use_optimistic_addr(idev)) { | 3855 | if (ipv6_use_optimistic_addr(dev_net(dev), idev)) { |
3845 | /* Because optimistic nodes can use this address, | 3856 | /* Because optimistic nodes can use this address, |
3846 | * notify listeners. If DAD fails, RTM_DELADDR is sent. | 3857 | * notify listeners. If DAD fails, RTM_DELADDR is sent. |
3847 | */ | 3858 | */ |
@@ -3897,7 +3908,9 @@ static void addrconf_dad_work(struct work_struct *w) | |||
3897 | action = DAD_ABORT; | 3908 | action = DAD_ABORT; |
3898 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | 3909 | ifp->state = INET6_IFADDR_STATE_POSTDAD; |
3899 | 3910 | ||
3900 | if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 && | 3911 | if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 || |
3912 | idev->cnf.accept_dad > 1) && | ||
3913 | !idev->cnf.disable_ipv6 && | ||
3901 | !(ifp->flags & IFA_F_STABLE_PRIVACY)) { | 3914 | !(ifp->flags & IFA_F_STABLE_PRIVACY)) { |
3902 | struct in6_addr addr; | 3915 | struct in6_addr addr; |
3903 | 3916 | ||
@@ -4940,9 +4953,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) | |||
4940 | 4953 | ||
4941 | /* Don't send DELADDR notification for TENTATIVE address, | 4954 | /* Don't send DELADDR notification for TENTATIVE address, |
4942 | * since NEWADDR notification is sent only after removing | 4955 | * since NEWADDR notification is sent only after removing |
4943 | * TENTATIVE flag. | 4956 | * TENTATIVE flag, if DAD has not failed. |
4944 | */ | 4957 | */ |
4945 | if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR) | 4958 | if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) && |
4959 | event == RTM_DELADDR) | ||
4946 | return; | 4960 | return; |
4947 | 4961 | ||
4948 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); | 4962 | skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b7a72d409334..1602b491b281 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -940,24 +940,25 @@ done: | |||
940 | } | 940 | } |
941 | 941 | ||
942 | static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, | 942 | static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, |
943 | unsigned short type, | 943 | unsigned short type, const void *daddr, |
944 | const void *daddr, const void *saddr, unsigned int len) | 944 | const void *saddr, unsigned int len) |
945 | { | 945 | { |
946 | struct ip6_tnl *t = netdev_priv(dev); | 946 | struct ip6_tnl *t = netdev_priv(dev); |
947 | struct ipv6hdr *ipv6h = skb_push(skb, t->hlen); | 947 | struct ipv6hdr *ipv6h; |
948 | __be16 *p = (__be16 *)(ipv6h+1); | 948 | __be16 *p; |
949 | 949 | ||
950 | ip6_flow_hdr(ipv6h, 0, | 950 | ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); |
951 | ip6_make_flowlabel(dev_net(dev), skb, | 951 | ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, |
952 | t->fl.u.ip6.flowlabel, true, | 952 | t->fl.u.ip6.flowlabel, |
953 | &t->fl.u.ip6)); | 953 | true, &t->fl.u.ip6)); |
954 | ipv6h->hop_limit = t->parms.hop_limit; | 954 | ipv6h->hop_limit = t->parms.hop_limit; |
955 | ipv6h->nexthdr = NEXTHDR_GRE; | 955 | ipv6h->nexthdr = NEXTHDR_GRE; |
956 | ipv6h->saddr = t->parms.laddr; | 956 | ipv6h->saddr = t->parms.laddr; |
957 | ipv6h->daddr = t->parms.raddr; | 957 | ipv6h->daddr = t->parms.raddr; |
958 | 958 | ||
959 | p[0] = t->parms.o_flags; | 959 | p = (__be16 *)(ipv6h + 1); |
960 | p[1] = htons(type); | 960 | p[0] = t->parms.o_flags; |
961 | p[1] = htons(type); | ||
961 | 962 | ||
962 | /* | 963 | /* |
963 | * Set the source hardware address. | 964 | * Set the source hardware address. |
@@ -1310,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
1310 | dev->features |= NETIF_F_NETNS_LOCAL; | 1311 | dev->features |= NETIF_F_NETNS_LOCAL; |
1311 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1312 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
1312 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1313 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1314 | netif_keep_dst(dev); | ||
1313 | } | 1315 | } |
1314 | 1316 | ||
1315 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], | 1317 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ae73164559d5..a1c24443cd9e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, | |||
1043 | struct dst_entry *dst = NULL, *ndst = NULL; | 1043 | struct dst_entry *dst = NULL, *ndst = NULL; |
1044 | struct net_device *tdev; | 1044 | struct net_device *tdev; |
1045 | int mtu; | 1045 | int mtu; |
1046 | unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; | ||
1046 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; | 1047 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; |
1047 | unsigned int max_headroom = psh_hlen; | 1048 | unsigned int max_headroom = psh_hlen; |
1048 | bool use_cache = false; | 1049 | bool use_cache = false; |
@@ -1124,7 +1125,7 @@ route_lookup: | |||
1124 | t->parms.name); | 1125 | t->parms.name); |
1125 | goto tx_err_dst_release; | 1126 | goto tx_err_dst_release; |
1126 | } | 1127 | } |
1127 | mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen; | 1128 | mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; |
1128 | if (encap_limit >= 0) { | 1129 | if (encap_limit >= 0) { |
1129 | max_headroom += 8; | 1130 | max_headroom += 8; |
1130 | mtu -= 8; | 1131 | mtu -= 8; |
@@ -1133,7 +1134,7 @@ route_lookup: | |||
1133 | mtu = IPV6_MIN_MTU; | 1134 | mtu = IPV6_MIN_MTU; |
1134 | if (skb_dst(skb) && !t->parms.collect_md) | 1135 | if (skb_dst(skb) && !t->parms.collect_md) |
1135 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | 1136 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
1136 | if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) { | 1137 | if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { |
1137 | *pmtu = mtu; | 1138 | *pmtu = mtu; |
1138 | err = -EMSGSIZE; | 1139 | err = -EMSGSIZE; |
1139 | goto tx_err_dst_release; | 1140 | goto tx_err_dst_release; |
@@ -2259,6 +2260,9 @@ static int __init ip6_tunnel_init(void) | |||
2259 | { | 2260 | { |
2260 | int err; | 2261 | int err; |
2261 | 2262 | ||
2263 | if (!ipv6_mod_enabled()) | ||
2264 | return -EOPNOTSUPP; | ||
2265 | |||
2262 | err = register_pernet_device(&ip6_tnl_net_ops); | 2266 | err = register_pernet_device(&ip6_tnl_net_ops); |
2263 | if (err < 0) | 2267 | if (err < 0) |
2264 | goto out_pernet; | 2268 | goto out_pernet; |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 79444a4bfd6d..bcdc2d557de1 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
445 | struct dst_entry *dst = skb_dst(skb); | 445 | struct dst_entry *dst = skb_dst(skb); |
446 | struct net_device *tdev; | 446 | struct net_device *tdev; |
447 | struct xfrm_state *x; | 447 | struct xfrm_state *x; |
448 | int pkt_len = skb->len; | ||
448 | int err = -1; | 449 | int err = -1; |
449 | int mtu; | 450 | int mtu; |
450 | 451 | ||
@@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
502 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 503 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
503 | 504 | ||
504 | u64_stats_update_begin(&tstats->syncp); | 505 | u64_stats_update_begin(&tstats->syncp); |
505 | tstats->tx_bytes += skb->len; | 506 | tstats->tx_bytes += pkt_len; |
506 | tstats->tx_packets++; | 507 | tstats->tx_packets++; |
507 | u64_stats_update_end(&tstats->syncp); | 508 | u64_stats_update_end(&tstats->syncp); |
508 | } else { | 509 | } else { |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e2ecfb137297..40d7234c27b9 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1015,6 +1015,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | |||
1015 | */ | 1015 | */ |
1016 | offset = skb_transport_offset(skb); | 1016 | offset = skb_transport_offset(skb); |
1017 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | 1017 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); |
1018 | csum = skb->csum; | ||
1018 | 1019 | ||
1019 | skb->ip_summed = CHECKSUM_NONE; | 1020 | skb->ip_summed = CHECKSUM_NONE; |
1020 | 1021 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ee485df73ccd..02d61101b108 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1314,6 +1314,9 @@ again: | |||
1314 | 1314 | ||
1315 | hlist_del_init(&session->hlist); | 1315 | hlist_del_init(&session->hlist); |
1316 | 1316 | ||
1317 | if (test_and_set_bit(0, &session->dead)) | ||
1318 | goto again; | ||
1319 | |||
1317 | if (session->ref != NULL) | 1320 | if (session->ref != NULL) |
1318 | (*session->ref)(session); | 1321 | (*session->ref)(session); |
1319 | 1322 | ||
@@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1685 | 1688 | ||
1686 | /* This function is used by the netlink TUNNEL_DELETE command. | 1689 | /* This function is used by the netlink TUNNEL_DELETE command. |
1687 | */ | 1690 | */ |
1688 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1691 | void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1689 | { | 1692 | { |
1690 | l2tp_tunnel_inc_refcount(tunnel); | 1693 | if (!test_and_set_bit(0, &tunnel->dead)) { |
1691 | if (false == queue_work(l2tp_wq, &tunnel->del_work)) { | 1694 | l2tp_tunnel_inc_refcount(tunnel); |
1692 | l2tp_tunnel_dec_refcount(tunnel); | 1695 | queue_work(l2tp_wq, &tunnel->del_work); |
1693 | return 1; | ||
1694 | } | 1696 | } |
1695 | return 0; | ||
1696 | } | 1697 | } |
1697 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1698 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
1698 | 1699 | ||
@@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash); | |||
1750 | */ | 1751 | */ |
1751 | int l2tp_session_delete(struct l2tp_session *session) | 1752 | int l2tp_session_delete(struct l2tp_session *session) |
1752 | { | 1753 | { |
1754 | if (test_and_set_bit(0, &session->dead)) | ||
1755 | return 0; | ||
1756 | |||
1753 | if (session->ref) | 1757 | if (session->ref) |
1754 | (*session->ref)(session); | 1758 | (*session->ref)(session); |
1755 | __l2tp_session_unhash(session); | 1759 | __l2tp_session_unhash(session); |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a305e0c5925a..67c79d9b5c6c 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -76,6 +76,7 @@ struct l2tp_session_cfg { | |||
76 | struct l2tp_session { | 76 | struct l2tp_session { |
77 | int magic; /* should be | 77 | int magic; /* should be |
78 | * L2TP_SESSION_MAGIC */ | 78 | * L2TP_SESSION_MAGIC */ |
79 | long dead; | ||
79 | 80 | ||
80 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel | 81 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel |
81 | * context */ | 82 | * context */ |
@@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg { | |||
160 | 161 | ||
161 | struct l2tp_tunnel { | 162 | struct l2tp_tunnel { |
162 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | 163 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ |
164 | |||
165 | unsigned long dead; | ||
166 | |||
163 | struct rcu_head rcu; | 167 | struct rcu_head rcu; |
164 | rwlock_t hlist_lock; /* protect session_hlist */ | 168 | rwlock_t hlist_lock; /* protect session_hlist */ |
165 | bool acpt_newsess; /* Indicates whether this | 169 | bool acpt_newsess; /* Indicates whether this |
@@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, | |||
254 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, | 258 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, |
255 | struct l2tp_tunnel **tunnelp); | 259 | struct l2tp_tunnel **tunnelp); |
256 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | 260 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); |
257 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 261 | void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
258 | struct l2tp_session *l2tp_session_create(int priv_size, | 262 | struct l2tp_session *l2tp_session_create(int priv_size, |
259 | struct l2tp_tunnel *tunnel, | 263 | struct l2tp_tunnel *tunnel, |
260 | u32 session_id, u32 peer_session_id, | 264 | u32 session_id, u32 peer_session_id, |
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 87da9ef61860..014a7bc2a872 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
@@ -44,7 +44,6 @@ struct l2tp_eth { | |||
44 | struct net_device *dev; | 44 | struct net_device *dev; |
45 | struct sock *tunnel_sock; | 45 | struct sock *tunnel_sock; |
46 | struct l2tp_session *session; | 46 | struct l2tp_session *session; |
47 | struct list_head list; | ||
48 | atomic_long_t tx_bytes; | 47 | atomic_long_t tx_bytes; |
49 | atomic_long_t tx_packets; | 48 | atomic_long_t tx_packets; |
50 | atomic_long_t tx_dropped; | 49 | atomic_long_t tx_dropped; |
@@ -58,17 +57,6 @@ struct l2tp_eth_sess { | |||
58 | struct net_device *dev; | 57 | struct net_device *dev; |
59 | }; | 58 | }; |
60 | 59 | ||
61 | /* per-net private data for this module */ | ||
62 | static unsigned int l2tp_eth_net_id; | ||
63 | struct l2tp_eth_net { | ||
64 | struct list_head l2tp_eth_dev_list; | ||
65 | spinlock_t l2tp_eth_lock; | ||
66 | }; | ||
67 | |||
68 | static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) | ||
69 | { | ||
70 | return net_generic(net, l2tp_eth_net_id); | ||
71 | } | ||
72 | 60 | ||
73 | static int l2tp_eth_dev_init(struct net_device *dev) | 61 | static int l2tp_eth_dev_init(struct net_device *dev) |
74 | { | 62 | { |
@@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev) | |||
84 | 72 | ||
85 | static void l2tp_eth_dev_uninit(struct net_device *dev) | 73 | static void l2tp_eth_dev_uninit(struct net_device *dev) |
86 | { | 74 | { |
87 | struct l2tp_eth *priv = netdev_priv(dev); | ||
88 | struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); | ||
89 | |||
90 | spin_lock(&pn->l2tp_eth_lock); | ||
91 | list_del_init(&priv->list); | ||
92 | spin_unlock(&pn->l2tp_eth_lock); | ||
93 | dev_put(dev); | 75 | dev_put(dev); |
94 | } | 76 | } |
95 | 77 | ||
@@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
273 | struct l2tp_eth *priv; | 255 | struct l2tp_eth *priv; |
274 | struct l2tp_eth_sess *spriv; | 256 | struct l2tp_eth_sess *spriv; |
275 | int rc; | 257 | int rc; |
276 | struct l2tp_eth_net *pn; | ||
277 | 258 | ||
278 | if (cfg->ifname) { | 259 | if (cfg->ifname) { |
279 | strlcpy(name, cfg->ifname, IFNAMSIZ); | 260 | strlcpy(name, cfg->ifname, IFNAMSIZ); |
@@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
305 | priv = netdev_priv(dev); | 286 | priv = netdev_priv(dev); |
306 | priv->dev = dev; | 287 | priv->dev = dev; |
307 | priv->session = session; | 288 | priv->session = session; |
308 | INIT_LIST_HEAD(&priv->list); | ||
309 | 289 | ||
310 | priv->tunnel_sock = tunnel->sock; | 290 | priv->tunnel_sock = tunnel->sock; |
311 | session->recv_skb = l2tp_eth_dev_recv; | 291 | session->recv_skb = l2tp_eth_dev_recv; |
@@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
326 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | 306 | strlcpy(session->ifname, dev->name, IFNAMSIZ); |
327 | 307 | ||
328 | dev_hold(dev); | 308 | dev_hold(dev); |
329 | pn = l2tp_eth_pernet(dev_net(dev)); | ||
330 | spin_lock(&pn->l2tp_eth_lock); | ||
331 | list_add(&priv->list, &pn->l2tp_eth_dev_list); | ||
332 | spin_unlock(&pn->l2tp_eth_lock); | ||
333 | 309 | ||
334 | return 0; | 310 | return 0; |
335 | 311 | ||
@@ -342,22 +318,6 @@ out: | |||
342 | return rc; | 318 | return rc; |
343 | } | 319 | } |
344 | 320 | ||
345 | static __net_init int l2tp_eth_init_net(struct net *net) | ||
346 | { | ||
347 | struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); | ||
348 | |||
349 | INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); | ||
350 | spin_lock_init(&pn->l2tp_eth_lock); | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static struct pernet_operations l2tp_eth_net_ops = { | ||
356 | .init = l2tp_eth_init_net, | ||
357 | .id = &l2tp_eth_net_id, | ||
358 | .size = sizeof(struct l2tp_eth_net), | ||
359 | }; | ||
360 | |||
361 | 321 | ||
362 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { | 322 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { |
363 | .session_create = l2tp_eth_create, | 323 | .session_create = l2tp_eth_create, |
@@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void) | |||
371 | 331 | ||
372 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); | 332 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); |
373 | if (err) | 333 | if (err) |
374 | goto out; | 334 | goto err; |
375 | |||
376 | err = register_pernet_device(&l2tp_eth_net_ops); | ||
377 | if (err) | ||
378 | goto out_unreg; | ||
379 | 335 | ||
380 | pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); | 336 | pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); |
381 | 337 | ||
382 | return 0; | 338 | return 0; |
383 | 339 | ||
384 | out_unreg: | 340 | err: |
385 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
386 | out: | ||
387 | return err; | 341 | return err; |
388 | } | 342 | } |
389 | 343 | ||
390 | static void __exit l2tp_eth_exit(void) | 344 | static void __exit l2tp_eth_exit(void) |
391 | { | 345 | { |
392 | unregister_pernet_device(&l2tp_eth_net_ops); | ||
393 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | 346 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); |
394 | } | 347 | } |
395 | 348 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 50e3ee9a9d61..bc6e8bfc5be4 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session) | |||
437 | 437 | ||
438 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 438 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
439 | 439 | ||
440 | if (sock) { | 440 | if (sock) |
441 | inet_shutdown(sock, SEND_SHUTDOWN); | 441 | inet_shutdown(sock, SEND_SHUTDOWN); |
442 | /* Don't let the session go away before our socket does */ | 442 | |
443 | l2tp_session_inc_refcount(session); | 443 | /* Don't let the session go away before our socket does */ |
444 | } | 444 | l2tp_session_inc_refcount(session); |
445 | } | 445 | } |
446 | 446 | ||
447 | /* Really kill the session socket. (Called from sock_put() if | 447 | /* Really kill the session socket. (Called from sock_put() if |
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index f236c0bc7b3f..51063d9ed0f7 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
@@ -1041,12 +1041,24 @@ out: | |||
1041 | static int | 1041 | static int |
1042 | mtype_head(struct ip_set *set, struct sk_buff *skb) | 1042 | mtype_head(struct ip_set *set, struct sk_buff *skb) |
1043 | { | 1043 | { |
1044 | const struct htype *h = set->data; | 1044 | struct htype *h = set->data; |
1045 | const struct htable *t; | 1045 | const struct htable *t; |
1046 | struct nlattr *nested; | 1046 | struct nlattr *nested; |
1047 | size_t memsize; | 1047 | size_t memsize; |
1048 | u8 htable_bits; | 1048 | u8 htable_bits; |
1049 | 1049 | ||
1050 | /* If any members have expired, set->elements will be wrong | ||
1051 | * mytype_expire function will update it with the right count. | ||
1052 | * we do not hold set->lock here, so grab it first. | ||
1053 | * set->elements can still be incorrect in the case of a huge set, | ||
1054 | * because elements might time out during the listing. | ||
1055 | */ | ||
1056 | if (SET_WITH_TIMEOUT(set)) { | ||
1057 | spin_lock_bh(&set->lock); | ||
1058 | mtype_expire(set, h); | ||
1059 | spin_unlock_bh(&set->lock); | ||
1060 | } | ||
1061 | |||
1050 | rcu_read_lock_bh(); | 1062 | rcu_read_lock_bh(); |
1051 | t = rcu_dereference_bh_nfnl(h->table); | 1063 | t = rcu_dereference_bh_nfnl(h->table); |
1052 | memsize = mtype_ahash_memsize(h, t) + set->ext_size; | 1064 | memsize = mtype_ahash_memsize(h, t) + set->ext_size; |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index f393a7086025..af8345fc4fbd 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct, | |||
429 | 429 | ||
430 | srchash = hash_by_src(net, | 430 | srchash = hash_by_src(net, |
431 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 431 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
432 | lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)]; | 432 | lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; |
433 | spin_lock_bh(lock); | 433 | spin_lock_bh(lock); |
434 | hlist_add_head_rcu(&ct->nat_bysource, | 434 | hlist_add_head_rcu(&ct->nat_bysource, |
435 | &nf_nat_bysource[srchash]); | 435 | &nf_nat_bysource[srchash]); |
@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) | |||
532 | unsigned int h; | 532 | unsigned int h; |
533 | 533 | ||
534 | h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | 534 | h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); |
535 | spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); | 535 | spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); |
536 | hlist_del_rcu(&ct->nat_bysource); | 536 | hlist_del_rcu(&ct->nat_bysource); |
537 | spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); | 537 | spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); |
538 | } | 538 | } |
539 | 539 | ||
540 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) | 540 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) |
@@ -807,8 +807,8 @@ static int __init nf_nat_init(void) | |||
807 | 807 | ||
808 | /* Leave them the same for the moment. */ | 808 | /* Leave them the same for the moment. */ |
809 | nf_nat_htable_size = nf_conntrack_htable_size; | 809 | nf_nat_htable_size = nf_conntrack_htable_size; |
810 | if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks)) | 810 | if (nf_nat_htable_size < CONNTRACK_LOCKS) |
811 | nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks); | 811 | nf_nat_htable_size = CONNTRACK_LOCKS; |
812 | 812 | ||
813 | nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); | 813 | nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); |
814 | if (!nf_nat_bysource) | 814 | if (!nf_nat_bysource) |
@@ -821,7 +821,7 @@ static int __init nf_nat_init(void) | |||
821 | return ret; | 821 | return ret; |
822 | } | 822 | } |
823 | 823 | ||
824 | for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++) | 824 | for (i = 0; i < CONNTRACK_LOCKS; i++) |
825 | spin_lock_init(&nf_nat_locks[i]); | 825 | spin_lock_init(&nf_nat_locks[i]); |
826 | 826 | ||
827 | nf_ct_helper_expectfn_register(&follow_master_nat); | 827 | nf_ct_helper_expectfn_register(&follow_master_nat); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 327807731b44..94c11cf0459d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2270,10 +2270,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2270 | 2270 | ||
2271 | mutex_unlock(nlk->cb_mutex); | 2271 | mutex_unlock(nlk->cb_mutex); |
2272 | 2272 | ||
2273 | ret = 0; | ||
2273 | if (cb->start) | 2274 | if (cb->start) |
2274 | cb->start(cb); | 2275 | ret = cb->start(cb); |
2276 | |||
2277 | if (!ret) | ||
2278 | ret = netlink_dump(sk); | ||
2275 | 2279 | ||
2276 | ret = netlink_dump(sk); | ||
2277 | sock_put(sk); | 2280 | sock_put(sk); |
2278 | 2281 | ||
2279 | if (ret) | 2282 | if (ret) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c26172995511..bec01a3daf5b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1684,10 +1684,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | |||
1684 | 1684 | ||
1685 | mutex_lock(&fanout_mutex); | 1685 | mutex_lock(&fanout_mutex); |
1686 | 1686 | ||
1687 | err = -EINVAL; | ||
1688 | if (!po->running) | ||
1689 | goto out; | ||
1690 | |||
1691 | err = -EALREADY; | 1687 | err = -EALREADY; |
1692 | if (po->fanout) | 1688 | if (po->fanout) |
1693 | goto out; | 1689 | goto out; |
@@ -1749,7 +1745,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | |||
1749 | list_add(&match->list, &fanout_list); | 1745 | list_add(&match->list, &fanout_list); |
1750 | } | 1746 | } |
1751 | err = -EINVAL; | 1747 | err = -EINVAL; |
1752 | if (match->type == type && | 1748 | |
1749 | spin_lock(&po->bind_lock); | ||
1750 | if (po->running && | ||
1751 | match->type == type && | ||
1753 | match->prot_hook.type == po->prot_hook.type && | 1752 | match->prot_hook.type == po->prot_hook.type && |
1754 | match->prot_hook.dev == po->prot_hook.dev) { | 1753 | match->prot_hook.dev == po->prot_hook.dev) { |
1755 | err = -ENOSPC; | 1754 | err = -ENOSPC; |
@@ -1761,6 +1760,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | |||
1761 | err = 0; | 1760 | err = 0; |
1762 | } | 1761 | } |
1763 | } | 1762 | } |
1763 | spin_unlock(&po->bind_lock); | ||
1764 | |||
1765 | if (err && !refcount_read(&match->sk_ref)) { | ||
1766 | list_del(&match->list); | ||
1767 | kfree(match); | ||
1768 | } | ||
1769 | |||
1764 | out: | 1770 | out: |
1765 | if (err && rollover) { | 1771 | if (err && rollover) { |
1766 | kfree(rollover); | 1772 | kfree(rollover); |
@@ -2834,6 +2840,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2834 | struct virtio_net_hdr vnet_hdr = { 0 }; | 2840 | struct virtio_net_hdr vnet_hdr = { 0 }; |
2835 | int offset = 0; | 2841 | int offset = 0; |
2836 | struct packet_sock *po = pkt_sk(sk); | 2842 | struct packet_sock *po = pkt_sk(sk); |
2843 | bool has_vnet_hdr = false; | ||
2837 | int hlen, tlen, linear; | 2844 | int hlen, tlen, linear; |
2838 | int extra_len = 0; | 2845 | int extra_len = 0; |
2839 | 2846 | ||
@@ -2877,6 +2884,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2877 | err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); | 2884 | err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); |
2878 | if (err) | 2885 | if (err) |
2879 | goto out_unlock; | 2886 | goto out_unlock; |
2887 | has_vnet_hdr = true; | ||
2880 | } | 2888 | } |
2881 | 2889 | ||
2882 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { | 2890 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
@@ -2935,7 +2943,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
2935 | skb->priority = sk->sk_priority; | 2943 | skb->priority = sk->sk_priority; |
2936 | skb->mark = sockc.mark; | 2944 | skb->mark = sockc.mark; |
2937 | 2945 | ||
2938 | if (po->has_vnet_hdr) { | 2946 | if (has_vnet_hdr) { |
2939 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); | 2947 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); |
2940 | if (err) | 2948 | if (err) |
2941 | goto out_free; | 2949 | goto out_free; |
@@ -3063,13 +3071,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, | |||
3063 | int ret = 0; | 3071 | int ret = 0; |
3064 | bool unlisted = false; | 3072 | bool unlisted = false; |
3065 | 3073 | ||
3066 | if (po->fanout) | ||
3067 | return -EINVAL; | ||
3068 | |||
3069 | lock_sock(sk); | 3074 | lock_sock(sk); |
3070 | spin_lock(&po->bind_lock); | 3075 | spin_lock(&po->bind_lock); |
3071 | rcu_read_lock(); | 3076 | rcu_read_lock(); |
3072 | 3077 | ||
3078 | if (po->fanout) { | ||
3079 | ret = -EINVAL; | ||
3080 | goto out_unlock; | ||
3081 | } | ||
3082 | |||
3073 | if (name) { | 3083 | if (name) { |
3074 | dev = dev_get_by_name_rcu(sock_net(sk), name); | 3084 | dev = dev_get_by_name_rcu(sock_net(sk), name); |
3075 | if (!dev) { | 3085 | if (!dev) { |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1a267e77c6de..d230cb4c8094 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -922,28 +922,28 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
922 | 922 | ||
923 | if (!tc_flags_valid(fnew->flags)) { | 923 | if (!tc_flags_valid(fnew->flags)) { |
924 | err = -EINVAL; | 924 | err = -EINVAL; |
925 | goto errout; | 925 | goto errout_idr; |
926 | } | 926 | } |
927 | } | 927 | } |
928 | 928 | ||
929 | err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr); | 929 | err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr); |
930 | if (err) | 930 | if (err) |
931 | goto errout; | 931 | goto errout_idr; |
932 | 932 | ||
933 | err = fl_check_assign_mask(head, &mask); | 933 | err = fl_check_assign_mask(head, &mask); |
934 | if (err) | 934 | if (err) |
935 | goto errout; | 935 | goto errout_idr; |
936 | 936 | ||
937 | if (!tc_skip_sw(fnew->flags)) { | 937 | if (!tc_skip_sw(fnew->flags)) { |
938 | if (!fold && fl_lookup(head, &fnew->mkey)) { | 938 | if (!fold && fl_lookup(head, &fnew->mkey)) { |
939 | err = -EEXIST; | 939 | err = -EEXIST; |
940 | goto errout; | 940 | goto errout_idr; |
941 | } | 941 | } |
942 | 942 | ||
943 | err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, | 943 | err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, |
944 | head->ht_params); | 944 | head->ht_params); |
945 | if (err) | 945 | if (err) |
946 | goto errout; | 946 | goto errout_idr; |
947 | } | 947 | } |
948 | 948 | ||
949 | if (!tc_skip_hw(fnew->flags)) { | 949 | if (!tc_skip_hw(fnew->flags)) { |
@@ -952,7 +952,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
952 | &mask.key, | 952 | &mask.key, |
953 | fnew); | 953 | fnew); |
954 | if (err) | 954 | if (err) |
955 | goto errout; | 955 | goto errout_idr; |
956 | } | 956 | } |
957 | 957 | ||
958 | if (!tc_in_hw(fnew->flags)) | 958 | if (!tc_in_hw(fnew->flags)) |
@@ -981,6 +981,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
981 | kfree(tb); | 981 | kfree(tb); |
982 | return 0; | 982 | return 0; |
983 | 983 | ||
984 | errout_idr: | ||
985 | if (fnew->handle) | ||
986 | idr_remove_ext(&head->handle_idr, fnew->handle); | ||
984 | errout: | 987 | errout: |
985 | tcf_exts_destroy(&fnew->exts); | 988 | tcf_exts_destroy(&fnew->exts); |
986 | kfree(fnew); | 989 | kfree(fnew); |
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 21cc45caf842..eeac606c95ab 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
@@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, | |||
32 | if (tc_skip_sw(head->flags)) | 32 | if (tc_skip_sw(head->flags)) |
33 | return -1; | 33 | return -1; |
34 | 34 | ||
35 | *res = head->res; | ||
35 | return tcf_exts_exec(skb, &head->exts, res); | 36 | return tcf_exts_exec(skb, &head->exts, res); |
36 | } | 37 | } |
37 | 38 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 92237e75dbbc..bf8c81e07c70 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -685,6 +685,7 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
685 | qdisc->gso_skb = NULL; | 685 | qdisc->gso_skb = NULL; |
686 | } | 686 | } |
687 | qdisc->q.qlen = 0; | 687 | qdisc->q.qlen = 0; |
688 | qdisc->qstats.backlog = 0; | ||
688 | } | 689 | } |
689 | EXPORT_SYMBOL(qdisc_reset); | 690 | EXPORT_SYMBOL(qdisc_reset); |
690 | 691 | ||
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index daaf214e5201..3f88b75488b0 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -958,6 +958,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
958 | } | 958 | } |
959 | 959 | ||
960 | if (cl != NULL) { | 960 | if (cl != NULL) { |
961 | int old_flags; | ||
962 | |||
961 | if (parentid) { | 963 | if (parentid) { |
962 | if (cl->cl_parent && | 964 | if (cl->cl_parent && |
963 | cl->cl_parent->cl_common.classid != parentid) | 965 | cl->cl_parent->cl_common.classid != parentid) |
@@ -978,6 +980,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
978 | } | 980 | } |
979 | 981 | ||
980 | sch_tree_lock(sch); | 982 | sch_tree_lock(sch); |
983 | old_flags = cl->cl_flags; | ||
984 | |||
981 | if (rsc != NULL) | 985 | if (rsc != NULL) |
982 | hfsc_change_rsc(cl, rsc, cur_time); | 986 | hfsc_change_rsc(cl, rsc, cur_time); |
983 | if (fsc != NULL) | 987 | if (fsc != NULL) |
@@ -986,10 +990,21 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
986 | hfsc_change_usc(cl, usc, cur_time); | 990 | hfsc_change_usc(cl, usc, cur_time); |
987 | 991 | ||
988 | if (cl->qdisc->q.qlen != 0) { | 992 | if (cl->qdisc->q.qlen != 0) { |
989 | if (cl->cl_flags & HFSC_RSC) | 993 | int len = qdisc_peek_len(cl->qdisc); |
990 | update_ed(cl, qdisc_peek_len(cl->qdisc)); | 994 | |
991 | if (cl->cl_flags & HFSC_FSC) | 995 | if (cl->cl_flags & HFSC_RSC) { |
992 | update_vf(cl, 0, cur_time); | 996 | if (old_flags & HFSC_RSC) |
997 | update_ed(cl, len); | ||
998 | else | ||
999 | init_ed(cl, len); | ||
1000 | } | ||
1001 | |||
1002 | if (cl->cl_flags & HFSC_FSC) { | ||
1003 | if (old_flags & HFSC_FSC) | ||
1004 | update_vf(cl, 0, cur_time); | ||
1005 | else | ||
1006 | init_vf(cl, len); | ||
1007 | } | ||
993 | } | 1008 | } |
994 | sch_tree_unlock(sch); | 1009 | sch_tree_unlock(sch); |
995 | 1010 | ||
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 22ed01a76b19..a72a7d925d46 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, | |||
463 | .r = r, | 463 | .r = r, |
464 | .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), | 464 | .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), |
465 | }; | 465 | }; |
466 | int pos = cb->args[2]; | ||
466 | 467 | ||
467 | /* eps hashtable dumps | 468 | /* eps hashtable dumps |
468 | * args: | 469 | * args: |
@@ -493,7 +494,8 @@ skip: | |||
493 | goto done; | 494 | goto done; |
494 | 495 | ||
495 | sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, | 496 | sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, |
496 | net, (int *)&cb->args[2], &commp); | 497 | net, &pos, &commp); |
498 | cb->args[2] = pos; | ||
497 | 499 | ||
498 | done: | 500 | done: |
499 | cb->args[1] = cb->args[4]; | 501 | cb->args[1] = cb->args[4]; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 8c6d24b2995d..745f145d4c4d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -282,6 +282,7 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock, | |||
282 | __be32 *subnet, u8 *prefix_len) | 282 | __be32 *subnet, u8 *prefix_len) |
283 | { | 283 | { |
284 | struct dst_entry *dst = sk_dst_get(clcsock->sk); | 284 | struct dst_entry *dst = sk_dst_get(clcsock->sk); |
285 | struct in_device *in_dev; | ||
285 | struct sockaddr_in addr; | 286 | struct sockaddr_in addr; |
286 | int rc = -ENOENT; | 287 | int rc = -ENOENT; |
287 | int len; | 288 | int len; |
@@ -298,14 +299,17 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock, | |||
298 | /* get address to which the internal TCP socket is bound */ | 299 | /* get address to which the internal TCP socket is bound */ |
299 | kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); | 300 | kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len); |
300 | /* analyze IPv4 specific data of net_device belonging to TCP socket */ | 301 | /* analyze IPv4 specific data of net_device belonging to TCP socket */ |
301 | for_ifa(dst->dev->ip_ptr) { | 302 | rcu_read_lock(); |
302 | if (ifa->ifa_address != addr.sin_addr.s_addr) | 303 | in_dev = __in_dev_get_rcu(dst->dev); |
304 | for_ifa(in_dev) { | ||
305 | if (!inet_ifa_match(addr.sin_addr.s_addr, ifa)) | ||
303 | continue; | 306 | continue; |
304 | *prefix_len = inet_mask_len(ifa->ifa_mask); | 307 | *prefix_len = inet_mask_len(ifa->ifa_mask); |
305 | *subnet = ifa->ifa_address & ifa->ifa_mask; | 308 | *subnet = ifa->ifa_address & ifa->ifa_mask; |
306 | rc = 0; | 309 | rc = 0; |
307 | break; | 310 | break; |
308 | } endfor_ifa(dst->dev->ip_ptr); | 311 | } endfor_ifa(in_dev); |
312 | rcu_read_unlock(); | ||
309 | 313 | ||
310 | out_rel: | 314 | out_rel: |
311 | dst_release(dst); | 315 | dst_release(dst); |
@@ -509,7 +513,7 @@ decline_rdma: | |||
509 | /* RDMA setup failed, switch back to TCP */ | 513 | /* RDMA setup failed, switch back to TCP */ |
510 | smc->use_fallback = true; | 514 | smc->use_fallback = true; |
511 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | 515 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { |
512 | rc = smc_clc_send_decline(smc, reason_code, 0); | 516 | rc = smc_clc_send_decline(smc, reason_code); |
513 | if (rc < sizeof(struct smc_clc_msg_decline)) | 517 | if (rc < sizeof(struct smc_clc_msg_decline)) |
514 | goto out_err; | 518 | goto out_err; |
515 | } | 519 | } |
@@ -804,8 +808,6 @@ static void smc_listen_work(struct work_struct *work) | |||
804 | rc = local_contact; | 808 | rc = local_contact; |
805 | if (rc == -ENOMEM) | 809 | if (rc == -ENOMEM) |
806 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 810 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
807 | else if (rc == -ENOLINK) | ||
808 | reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */ | ||
809 | goto decline_rdma; | 811 | goto decline_rdma; |
810 | } | 812 | } |
811 | link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | 813 | link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; |
@@ -899,7 +901,7 @@ decline_rdma: | |||
899 | smc_conn_free(&new_smc->conn); | 901 | smc_conn_free(&new_smc->conn); |
900 | new_smc->use_fallback = true; | 902 | new_smc->use_fallback = true; |
901 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { | 903 | if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) { |
902 | rc = smc_clc_send_decline(new_smc, reason_code, 0); | 904 | rc = smc_clc_send_decline(new_smc, reason_code); |
903 | if (rc < sizeof(struct smc_clc_msg_decline)) | 905 | if (rc < sizeof(struct smc_clc_msg_decline)) |
904 | goto out_err; | 906 | goto out_err; |
905 | } | 907 | } |
diff --git a/net/smc/smc.h b/net/smc/smc.h index 6e44313e4467..0ccd6fa387ad 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h | |||
@@ -149,7 +149,7 @@ struct smc_connection { | |||
149 | atomic_t sndbuf_space; /* remaining space in sndbuf */ | 149 | atomic_t sndbuf_space; /* remaining space in sndbuf */ |
150 | u16 tx_cdc_seq; /* sequence # for CDC send */ | 150 | u16 tx_cdc_seq; /* sequence # for CDC send */ |
151 | spinlock_t send_lock; /* protect wr_sends */ | 151 | spinlock_t send_lock; /* protect wr_sends */ |
152 | struct work_struct tx_work; /* retry of smc_cdc_msg_send */ | 152 | struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ |
153 | 153 | ||
154 | struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. | 154 | struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. |
155 | * .prod cf. TCP rcv_nxt | 155 | * .prod cf. TCP rcv_nxt |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 3934913ab835..b7dd2743fb5c 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -95,9 +95,10 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | |||
95 | } | 95 | } |
96 | if (clcm->type == SMC_CLC_DECLINE) { | 96 | if (clcm->type == SMC_CLC_DECLINE) { |
97 | reason_code = SMC_CLC_DECL_REPLY; | 97 | reason_code = SMC_CLC_DECL_REPLY; |
98 | if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis) | 98 | if (((struct smc_clc_msg_decline *)buf)->hdr.flag) { |
99 | == SMC_CLC_DECL_SYNCERR) | ||
100 | smc->conn.lgr->sync_err = true; | 99 | smc->conn.lgr->sync_err = true; |
100 | smc_lgr_terminate(smc->conn.lgr); | ||
101 | } | ||
101 | } | 102 | } |
102 | 103 | ||
103 | out: | 104 | out: |
@@ -105,8 +106,7 @@ out: | |||
105 | } | 106 | } |
106 | 107 | ||
107 | /* send CLC DECLINE message across internal TCP socket */ | 108 | /* send CLC DECLINE message across internal TCP socket */ |
108 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | 109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) |
109 | u8 out_of_sync) | ||
110 | { | 110 | { |
111 | struct smc_clc_msg_decline dclc; | 111 | struct smc_clc_msg_decline dclc; |
112 | struct msghdr msg; | 112 | struct msghdr msg; |
@@ -118,7 +118,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | |||
118 | dclc.hdr.type = SMC_CLC_DECLINE; | 118 | dclc.hdr.type = SMC_CLC_DECLINE; |
119 | dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); | 119 | dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline)); |
120 | dclc.hdr.version = SMC_CLC_V1; | 120 | dclc.hdr.version = SMC_CLC_V1; |
121 | dclc.hdr.flag = out_of_sync ? 1 : 0; | 121 | dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0; |
122 | memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); | 122 | memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); |
123 | dclc.peer_diagnosis = htonl(peer_diag_info); | 123 | dclc.peer_diagnosis = htonl(peer_diag_info); |
124 | memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); | 124 | memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); |
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 13db8ce177c9..1c55414041d4 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h | |||
@@ -106,8 +106,7 @@ struct smc_ib_device; | |||
106 | 106 | ||
107 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, | 107 | int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, |
108 | u8 expected_type); | 108 | u8 expected_type); |
109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, | 109 | int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info); |
110 | u8 out_of_sync); | ||
111 | int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, | 110 | int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev, |
112 | u8 ibport); | 111 | u8 ibport); |
113 | int smc_clc_send_confirm(struct smc_sock *smc); | 112 | int smc_clc_send_confirm(struct smc_sock *smc); |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 3c2e166b5d22..f0d16fb825f7 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -174,15 +174,15 @@ int smc_close_active(struct smc_sock *smc) | |||
174 | { | 174 | { |
175 | struct smc_cdc_conn_state_flags *txflags = | 175 | struct smc_cdc_conn_state_flags *txflags = |
176 | &smc->conn.local_tx_ctrl.conn_state_flags; | 176 | &smc->conn.local_tx_ctrl.conn_state_flags; |
177 | long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
178 | struct smc_connection *conn = &smc->conn; | 177 | struct smc_connection *conn = &smc->conn; |
179 | struct sock *sk = &smc->sk; | 178 | struct sock *sk = &smc->sk; |
180 | int old_state; | 179 | int old_state; |
180 | long timeout; | ||
181 | int rc = 0; | 181 | int rc = 0; |
182 | 182 | ||
183 | if (sock_flag(sk, SOCK_LINGER) && | 183 | timeout = current->flags & PF_EXITING ? |
184 | !(current->flags & PF_EXITING)) | 184 | 0 : sock_flag(sk, SOCK_LINGER) ? |
185 | timeout = sk->sk_lingertime; | 185 | sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; |
186 | 186 | ||
187 | again: | 187 | again: |
188 | old_state = sk->sk_state; | 188 | old_state = sk->sk_state; |
@@ -208,7 +208,7 @@ again: | |||
208 | case SMC_ACTIVE: | 208 | case SMC_ACTIVE: |
209 | smc_close_stream_wait(smc, timeout); | 209 | smc_close_stream_wait(smc, timeout); |
210 | release_sock(sk); | 210 | release_sock(sk); |
211 | cancel_work_sync(&conn->tx_work); | 211 | cancel_delayed_work_sync(&conn->tx_work); |
212 | lock_sock(sk); | 212 | lock_sock(sk); |
213 | if (sk->sk_state == SMC_ACTIVE) { | 213 | if (sk->sk_state == SMC_ACTIVE) { |
214 | /* send close request */ | 214 | /* send close request */ |
@@ -234,7 +234,7 @@ again: | |||
234 | if (!smc_cdc_rxed_any_close(conn)) | 234 | if (!smc_cdc_rxed_any_close(conn)) |
235 | smc_close_stream_wait(smc, timeout); | 235 | smc_close_stream_wait(smc, timeout); |
236 | release_sock(sk); | 236 | release_sock(sk); |
237 | cancel_work_sync(&conn->tx_work); | 237 | cancel_delayed_work_sync(&conn->tx_work); |
238 | lock_sock(sk); | 238 | lock_sock(sk); |
239 | if (sk->sk_err != ECONNABORTED) { | 239 | if (sk->sk_err != ECONNABORTED) { |
240 | /* confirm close from peer */ | 240 | /* confirm close from peer */ |
@@ -263,7 +263,9 @@ again: | |||
263 | /* peer sending PeerConnectionClosed will cause transition */ | 263 | /* peer sending PeerConnectionClosed will cause transition */ |
264 | break; | 264 | break; |
265 | case SMC_PROCESSABORT: | 265 | case SMC_PROCESSABORT: |
266 | cancel_work_sync(&conn->tx_work); | 266 | release_sock(sk); |
267 | cancel_delayed_work_sync(&conn->tx_work); | ||
268 | lock_sock(sk); | ||
267 | smc_close_abort(conn); | 269 | smc_close_abort(conn); |
268 | sk->sk_state = SMC_CLOSED; | 270 | sk->sk_state = SMC_CLOSED; |
269 | smc_close_wait_tx_pends(smc); | 271 | smc_close_wait_tx_pends(smc); |
@@ -411,13 +413,14 @@ void smc_close_sock_put_work(struct work_struct *work) | |||
411 | int smc_close_shutdown_write(struct smc_sock *smc) | 413 | int smc_close_shutdown_write(struct smc_sock *smc) |
412 | { | 414 | { |
413 | struct smc_connection *conn = &smc->conn; | 415 | struct smc_connection *conn = &smc->conn; |
414 | long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
415 | struct sock *sk = &smc->sk; | 416 | struct sock *sk = &smc->sk; |
416 | int old_state; | 417 | int old_state; |
418 | long timeout; | ||
417 | int rc = 0; | 419 | int rc = 0; |
418 | 420 | ||
419 | if (sock_flag(sk, SOCK_LINGER)) | 421 | timeout = current->flags & PF_EXITING ? |
420 | timeout = sk->sk_lingertime; | 422 | 0 : sock_flag(sk, SOCK_LINGER) ? |
423 | sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; | ||
421 | 424 | ||
422 | again: | 425 | again: |
423 | old_state = sk->sk_state; | 426 | old_state = sk->sk_state; |
@@ -425,7 +428,7 @@ again: | |||
425 | case SMC_ACTIVE: | 428 | case SMC_ACTIVE: |
426 | smc_close_stream_wait(smc, timeout); | 429 | smc_close_stream_wait(smc, timeout); |
427 | release_sock(sk); | 430 | release_sock(sk); |
428 | cancel_work_sync(&conn->tx_work); | 431 | cancel_delayed_work_sync(&conn->tx_work); |
429 | lock_sock(sk); | 432 | lock_sock(sk); |
430 | /* send close wr request */ | 433 | /* send close wr request */ |
431 | rc = smc_close_wr(conn); | 434 | rc = smc_close_wr(conn); |
@@ -439,7 +442,7 @@ again: | |||
439 | if (!smc_cdc_rxed_any_close(conn)) | 442 | if (!smc_cdc_rxed_any_close(conn)) |
440 | smc_close_stream_wait(smc, timeout); | 443 | smc_close_stream_wait(smc, timeout); |
441 | release_sock(sk); | 444 | release_sock(sk); |
442 | cancel_work_sync(&conn->tx_work); | 445 | cancel_delayed_work_sync(&conn->tx_work); |
443 | lock_sock(sk); | 446 | lock_sock(sk); |
444 | /* confirm close from peer */ | 447 | /* confirm close from peer */ |
445 | rc = smc_close_wr(conn); | 448 | rc = smc_close_wr(conn); |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 1a16d51e2330..20b66e79c5d6 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -25,8 +25,9 @@ | |||
25 | #include "smc_cdc.h" | 25 | #include "smc_cdc.h" |
26 | #include "smc_close.h" | 26 | #include "smc_close.h" |
27 | 27 | ||
28 | #define SMC_LGR_NUM_INCR 256 | 28 | #define SMC_LGR_NUM_INCR 256 |
29 | #define SMC_LGR_FREE_DELAY (600 * HZ) | 29 | #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) |
30 | #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10) | ||
30 | 31 | ||
31 | static u32 smc_lgr_num; /* unique link group number */ | 32 | static u32 smc_lgr_num; /* unique link group number */ |
32 | 33 | ||
@@ -107,8 +108,15 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) | |||
107 | __smc_lgr_unregister_conn(conn); | 108 | __smc_lgr_unregister_conn(conn); |
108 | } | 109 | } |
109 | write_unlock_bh(&lgr->conns_lock); | 110 | write_unlock_bh(&lgr->conns_lock); |
110 | if (reduced && !lgr->conns_num) | 111 | if (!reduced || lgr->conns_num) |
111 | schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); | 112 | return; |
113 | /* client link group creation always follows the server link group | ||
114 | * creation. For client use a somewhat higher removal delay time, | ||
115 | * otherwise there is a risk of out-of-sync link groups. | ||
116 | */ | ||
117 | mod_delayed_work(system_wq, &lgr->free_work, | ||
118 | lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT : | ||
119 | SMC_LGR_FREE_DELAY_SERV); | ||
112 | } | 120 | } |
113 | 121 | ||
114 | static void smc_lgr_free_work(struct work_struct *work) | 122 | static void smc_lgr_free_work(struct work_struct *work) |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 547e0e113b17..0b5852299158 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -380,6 +380,7 @@ static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) | |||
380 | ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); | 380 | ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); |
381 | if (ndev) { | 381 | if (ndev) { |
382 | memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); | 382 | memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); |
383 | dev_put(ndev); | ||
383 | } else if (!rc) { | 384 | } else if (!rc) { |
384 | memcpy(&smcibdev->mac[ibport - 1][0], | 385 | memcpy(&smcibdev->mac[ibport - 1][0], |
385 | &smcibdev->gid[ibport - 1].raw[8], 3); | 386 | &smcibdev->gid[ibport - 1].raw[8], 3); |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 78f7af28ae4f..31f8453c25c5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -181,8 +181,10 @@ static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem) | |||
181 | sizeof(new_pnetelem->ndev->name)) || | 181 | sizeof(new_pnetelem->ndev->name)) || |
182 | smc_pnet_same_ibname(pnetelem, | 182 | smc_pnet_same_ibname(pnetelem, |
183 | new_pnetelem->smcibdev->ibdev->name, | 183 | new_pnetelem->smcibdev->ibdev->name, |
184 | new_pnetelem->ib_port)) | 184 | new_pnetelem->ib_port)) { |
185 | dev_put(pnetelem->ndev); | ||
185 | goto found; | 186 | goto found; |
187 | } | ||
186 | } | 188 | } |
187 | list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist); | 189 | list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist); |
188 | rc = 0; | 190 | rc = 0; |
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index b17a333e9bb0..3e631ae4b6b6 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c | |||
@@ -148,6 +148,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, | |||
148 | read_done = sock_intr_errno(timeo); | 148 | read_done = sock_intr_errno(timeo); |
149 | break; | 149 | break; |
150 | } | 150 | } |
151 | if (!timeo) | ||
152 | return -EAGAIN; | ||
151 | } | 153 | } |
152 | 154 | ||
153 | if (!atomic_read(&conn->bytes_to_rcv)) { | 155 | if (!atomic_read(&conn->bytes_to_rcv)) { |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 3c656beb8820..3866573288dd 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include "smc_cdc.h" | 24 | #include "smc_cdc.h" |
25 | #include "smc_tx.h" | 25 | #include "smc_tx.h" |
26 | 26 | ||
27 | #define SMC_TX_WORK_DELAY HZ | ||
28 | |||
27 | /***************************** sndbuf producer *******************************/ | 29 | /***************************** sndbuf producer *******************************/ |
28 | 30 | ||
29 | /* callback implementation for sk.sk_write_space() | 31 | /* callback implementation for sk.sk_write_space() |
@@ -406,7 +408,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn) | |||
406 | goto out_unlock; | 408 | goto out_unlock; |
407 | } | 409 | } |
408 | rc = 0; | 410 | rc = 0; |
409 | schedule_work(&conn->tx_work); | 411 | schedule_delayed_work(&conn->tx_work, |
412 | SMC_TX_WORK_DELAY); | ||
410 | } | 413 | } |
411 | goto out_unlock; | 414 | goto out_unlock; |
412 | } | 415 | } |
@@ -430,7 +433,7 @@ out_unlock: | |||
430 | */ | 433 | */ |
431 | static void smc_tx_work(struct work_struct *work) | 434 | static void smc_tx_work(struct work_struct *work) |
432 | { | 435 | { |
433 | struct smc_connection *conn = container_of(work, | 436 | struct smc_connection *conn = container_of(to_delayed_work(work), |
434 | struct smc_connection, | 437 | struct smc_connection, |
435 | tx_work); | 438 | tx_work); |
436 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 439 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
@@ -468,7 +471,8 @@ void smc_tx_consumer_update(struct smc_connection *conn) | |||
468 | if (!rc) | 471 | if (!rc) |
469 | rc = smc_cdc_msg_send(conn, wr_buf, pend); | 472 | rc = smc_cdc_msg_send(conn, wr_buf, pend); |
470 | if (rc < 0) { | 473 | if (rc < 0) { |
471 | schedule_work(&conn->tx_work); | 474 | schedule_delayed_work(&conn->tx_work, |
475 | SMC_TX_WORK_DELAY); | ||
472 | return; | 476 | return; |
473 | } | 477 | } |
474 | smc_curs_write(&conn->rx_curs_confirmed, | 478 | smc_curs_write(&conn->rx_curs_confirmed, |
@@ -487,6 +491,6 @@ void smc_tx_consumer_update(struct smc_connection *conn) | |||
487 | void smc_tx_init(struct smc_sock *smc) | 491 | void smc_tx_init(struct smc_sock *smc) |
488 | { | 492 | { |
489 | smc->sk.sk_write_space = smc_tx_write_space; | 493 | smc->sk.sk_write_space = smc_tx_write_space; |
490 | INIT_WORK(&smc->conn.tx_work, smc_tx_work); | 494 | INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); |
491 | spin_lock_init(&smc->conn.send_lock); | 495 | spin_lock_init(&smc->conn.send_lock); |
492 | } | 496 | } |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index ab56bda66783..525d91e0d57e 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -244,7 +244,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) | |||
244 | int rc; | 244 | int rc; |
245 | 245 | ||
246 | ib_req_notify_cq(link->smcibdev->roce_cq_send, | 246 | ib_req_notify_cq(link->smcibdev->roce_cq_send, |
247 | IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS); | 247 | IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); |
248 | pend = container_of(priv, struct smc_wr_tx_pend, priv); | 248 | pend = container_of(priv, struct smc_wr_tx_pend, priv); |
249 | rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], | 249 | rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], |
250 | &failed_wr); | 250 | &failed_wr); |
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 5a936a6a31a3..df062e086bdb 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c | |||
@@ -401,7 +401,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |||
401 | if (unlikely(n != mw->mw_nents)) | 401 | if (unlikely(n != mw->mw_nents)) |
402 | goto out_mapmr_err; | 402 | goto out_mapmr_err; |
403 | 403 | ||
404 | dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", | 404 | dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n", |
405 | __func__, frmr, mw->mw_nents, mr->length); | 405 | __func__, frmr, mw->mw_nents, mr->length); |
406 | 406 | ||
407 | key = (u8)(mr->rkey & 0x000000FF); | 407 | key = (u8)(mr->rkey & 0x000000FF); |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 6ef379f004ac..121e59a1d0e7 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
551 | return false; | 551 | return false; |
552 | if (msg_errcode(msg)) | 552 | if (msg_errcode(msg)) |
553 | return false; | 553 | return false; |
554 | *err = -TIPC_ERR_NO_NAME; | 554 | *err = TIPC_ERR_NO_NAME; |
555 | if (skb_linearize(skb)) | 555 | if (skb_linearize(skb)) |
556 | return false; | 556 | return false; |
557 | msg = buf_msg(skb); | 557 | msg = buf_msg(skb); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0df8023f480b..690874293cfc 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -9987,6 +9987,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) | |||
9987 | if (err) | 9987 | if (err) |
9988 | return err; | 9988 | return err; |
9989 | 9989 | ||
9990 | if (!setup.chandef.chan) | ||
9991 | return -EINVAL; | ||
9992 | |||
9990 | err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band, | 9993 | err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band, |
9991 | &setup.beacon_rate); | 9994 | &setup.beacon_rate); |
9992 | if (err) | 9995 | if (err) |
@@ -10903,6 +10906,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) | |||
10903 | if (err) | 10906 | if (err) |
10904 | return err; | 10907 | return err; |
10905 | 10908 | ||
10909 | if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || | ||
10910 | !tb[NL80211_REKEY_DATA_KCK]) | ||
10911 | return -EINVAL; | ||
10906 | if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) | 10912 | if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) |
10907 | return -ERANGE; | 10913 | return -ERANGE; |
10908 | if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) | 10914 | if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) |
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 2e3a10e79ca9..061d0c3a420a 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -265,6 +265,8 @@ objtool_args += --no-fp | |||
265 | endif | 265 | endif |
266 | ifdef CONFIG_GCOV_KERNEL | 266 | ifdef CONFIG_GCOV_KERNEL |
267 | objtool_args += --no-unreachable | 267 | objtool_args += --no-unreachable |
268 | else | ||
269 | objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) | ||
268 | endif | 270 | endif |
269 | 271 | ||
270 | # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory | 272 | # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index dd2c262aebbf..8b80bac055e4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -6390,7 +6390,7 @@ sub process { | |||
6390 | exit(0); | 6390 | exit(0); |
6391 | } | 6391 | } |
6392 | 6392 | ||
6393 | if (!$is_patch && $file !~ /cover-letter\.patch$/) { | 6393 | if (!$is_patch && $filename !~ /cover-letter\.patch$/) { |
6394 | ERROR("NOT_UNIFIED_DIFF", | 6394 | ERROR("NOT_UNIFIED_DIFF", |
6395 | "Does not appear to be a unified-diff format patch\n"); | 6395 | "Does not appear to be a unified-diff format patch\n"); |
6396 | } | 6396 | } |
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff index f9a3d8d23c64..8c4fbad2055e 100755 --- a/scripts/dtc/dtx_diff +++ b/scripts/dtc/dtx_diff | |||
@@ -86,6 +86,7 @@ eod | |||
86 | compile_to_dts() { | 86 | compile_to_dts() { |
87 | 87 | ||
88 | dtx="$1" | 88 | dtx="$1" |
89 | dtc_include="$2" | ||
89 | 90 | ||
90 | if [ -d "${dtx}" ] ; then | 91 | if [ -d "${dtx}" ] ; then |
91 | 92 | ||
@@ -113,7 +114,7 @@ compile_to_dts() { | |||
113 | # ----- input is DTS (source) | 114 | # ----- input is DTS (source) |
114 | 115 | ||
115 | if ( cpp ${cpp_flags} -x assembler-with-cpp ${dtx} \ | 116 | if ( cpp ${cpp_flags} -x assembler-with-cpp ${dtx} \ |
116 | | ${DTC} -I dts ) ; then | 117 | | ${DTC} ${dtc_include} -I dts ) ; then |
117 | return | 118 | return |
118 | fi | 119 | fi |
119 | 120 | ||
@@ -320,18 +321,13 @@ fi | |||
320 | 321 | ||
321 | cpp_flags="\ | 322 | cpp_flags="\ |
322 | -nostdinc \ | 323 | -nostdinc \ |
323 | -I${srctree}/arch/${ARCH}/boot/dts \ | ||
324 | -I${srctree}/scripts/dtc/include-prefixes \ | 324 | -I${srctree}/scripts/dtc/include-prefixes \ |
325 | -I${srctree}/drivers/of/testcase-data \ | ||
326 | -undef -D__DTS__" | 325 | -undef -D__DTS__" |
327 | 326 | ||
328 | dtc_flags="\ | 327 | DTC="\ |
329 | -i ${srctree}/arch/${ARCH}/boot/dts/ \ | 328 | ${DTC} \ |
330 | -i ${srctree}/kernel/dts \ | 329 | -i ${srctree}/scripts/dtc/include-prefixes \ |
331 | ${dtx_path_1_dtc_include} \ | 330 | -O dts -qq -f ${dtc_sort} -o -" |
332 | ${dtx_path_2_dtc_include}" | ||
333 | |||
334 | DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -" | ||
335 | 331 | ||
336 | 332 | ||
337 | # ----- do the diff or decompile | 333 | # ----- do the diff or decompile |
@@ -339,11 +335,11 @@ DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -" | |||
339 | if (( ${cmd_diff} )) ; then | 335 | if (( ${cmd_diff} )) ; then |
340 | 336 | ||
341 | diff ${diff_flags} --label "${dtx_file_1}" --label "${dtx_file_2}" \ | 337 | diff ${diff_flags} --label "${dtx_file_1}" --label "${dtx_file_2}" \ |
342 | <(compile_to_dts "${dtx_file_1}") \ | 338 | <(compile_to_dts "${dtx_file_1}" "${dtx_path_1_dtc_include}") \ |
343 | <(compile_to_dts "${dtx_file_2}") | 339 | <(compile_to_dts "${dtx_file_2}" "${dtx_path_2_dtc_include}") |
344 | 340 | ||
345 | else | 341 | else |
346 | 342 | ||
347 | compile_to_dts "${dtx_file_1}" | 343 | compile_to_dts "${dtx_file_1}" "${dtx_path_1_dtc_include}" |
348 | 344 | ||
349 | fi | 345 | fi |
diff --git a/scripts/mkversion b/scripts/mkversion deleted file mode 100644 index c12addc9c7ef..000000000000 --- a/scripts/mkversion +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | if [ ! -f .version ] | ||
2 | then | ||
3 | echo 1 | ||
4 | else | ||
5 | expr 0`cat .version` + 1 | ||
6 | fi | ||
diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 71b4a8af9d4d..73f9f3192b9f 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile | |||
@@ -50,8 +50,6 @@ rpm-pkg rpm: FORCE | |||
50 | $(MAKE) clean | 50 | $(MAKE) clean |
51 | $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec | 51 | $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec |
52 | $(call cmd,src_tar,$(KERNELPATH),kernel.spec) | 52 | $(call cmd,src_tar,$(KERNELPATH),kernel.spec) |
53 | $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version | ||
54 | mv -f $(objtree)/.tmp_version $(objtree)/.version | ||
55 | rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz | 53 | rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz |
56 | rm $(KERNELPATH).tar.gz kernel.spec | 54 | rm $(KERNELPATH).tar.gz kernel.spec |
57 | 55 | ||
@@ -60,9 +58,6 @@ rpm-pkg rpm: FORCE | |||
60 | binrpm-pkg: FORCE | 58 | binrpm-pkg: FORCE |
61 | $(MAKE) KBUILD_SRC= | 59 | $(MAKE) KBUILD_SRC= |
62 | $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec | 60 | $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec |
63 | $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version | ||
64 | mv -f $(objtree)/.tmp_version $(objtree)/.version | ||
65 | |||
66 | rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ | 61 | rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \ |
67 | $(UTS_MACHINE) -bb $(objtree)/binkernel.spec | 62 | $(UTS_MACHINE) -bb $(objtree)/binkernel.spec |
68 | rm binkernel.spec | 63 | rm binkernel.spec |
diff --git a/scripts/package/builddeb b/scripts/package/builddeb index aad67000e4dd..0bc87473f68f 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb | |||
@@ -92,12 +92,10 @@ else | |||
92 | fi | 92 | fi |
93 | sourcename=$KDEB_SOURCENAME | 93 | sourcename=$KDEB_SOURCENAME |
94 | tmpdir="$objtree/debian/tmp" | 94 | tmpdir="$objtree/debian/tmp" |
95 | fwdir="$objtree/debian/fwtmp" | ||
96 | kernel_headers_dir="$objtree/debian/hdrtmp" | 95 | kernel_headers_dir="$objtree/debian/hdrtmp" |
97 | libc_headers_dir="$objtree/debian/headertmp" | 96 | libc_headers_dir="$objtree/debian/headertmp" |
98 | dbg_dir="$objtree/debian/dbgtmp" | 97 | dbg_dir="$objtree/debian/dbgtmp" |
99 | packagename=linux-image-$version | 98 | packagename=linux-image-$version |
100 | fwpackagename=linux-firmware-image-$version | ||
101 | kernel_headers_packagename=linux-headers-$version | 99 | kernel_headers_packagename=linux-headers-$version |
102 | libc_headers_packagename=linux-libc-dev | 100 | libc_headers_packagename=linux-libc-dev |
103 | dbg_packagename=$packagename-dbg | 101 | dbg_packagename=$packagename-dbg |
@@ -126,10 +124,9 @@ esac | |||
126 | BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" | 124 | BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" |
127 | 125 | ||
128 | # Setup the directory structure | 126 | # Setup the directory structure |
129 | rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files | 127 | rm -rf "$tmpdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files |
130 | mkdir -m 755 -p "$tmpdir/DEBIAN" | 128 | mkdir -m 755 -p "$tmpdir/DEBIAN" |
131 | mkdir -p "$tmpdir/lib" "$tmpdir/boot" | 129 | mkdir -p "$tmpdir/lib" "$tmpdir/boot" |
132 | mkdir -p "$fwdir/lib/firmware/$version/" | ||
133 | mkdir -p "$kernel_headers_dir/lib/modules/$version/" | 130 | mkdir -p "$kernel_headers_dir/lib/modules/$version/" |
134 | 131 | ||
135 | # Build and install the kernel | 132 | # Build and install the kernel |
@@ -306,7 +303,6 @@ else | |||
306 | cat <<EOF >> debian/control | 303 | cat <<EOF >> debian/control |
307 | 304 | ||
308 | Package: $packagename | 305 | Package: $packagename |
309 | Suggests: $fwpackagename | ||
310 | Architecture: any | 306 | Architecture: any |
311 | Description: Linux kernel, version $version | 307 | Description: Linux kernel, version $version |
312 | This package contains the Linux kernel, modules and corresponding other | 308 | This package contains the Linux kernel, modules and corresponding other |
@@ -345,22 +341,6 @@ Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch} | |||
345 | This is useful for people who need to build external modules | 341 | This is useful for people who need to build external modules |
346 | EOF | 342 | EOF |
347 | 343 | ||
348 | # Do we have firmware? Move it out of the way and build it into a package. | ||
349 | if [ -e "$tmpdir/lib/firmware" ]; then | ||
350 | mv "$tmpdir/lib/firmware"/* "$fwdir/lib/firmware/$version/" | ||
351 | rmdir "$tmpdir/lib/firmware" | ||
352 | |||
353 | cat <<EOF >> debian/control | ||
354 | |||
355 | Package: $fwpackagename | ||
356 | Architecture: all | ||
357 | Description: Linux kernel firmware, version $version | ||
358 | This package contains firmware from the Linux kernel, version $version. | ||
359 | EOF | ||
360 | |||
361 | create_package "$fwpackagename" "$fwdir" | ||
362 | fi | ||
363 | |||
364 | cat <<EOF >> debian/control | 344 | cat <<EOF >> debian/control |
365 | 345 | ||
366 | Package: $libc_headers_packagename | 346 | Package: $libc_headers_packagename |
diff --git a/scripts/package/mkspec b/scripts/package/mkspec index bb43f153fd8e..f47f17aae135 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec | |||
@@ -27,9 +27,7 @@ __KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-/_/g"` | |||
27 | echo "Name: kernel" | 27 | echo "Name: kernel" |
28 | echo "Summary: The Linux Kernel" | 28 | echo "Summary: The Linux Kernel" |
29 | echo "Version: $__KERNELRELEASE" | 29 | echo "Version: $__KERNELRELEASE" |
30 | # we need to determine the NEXT version number so that uname and | 30 | echo "Release: $(cat .version 2>/dev/null || echo 1)" |
31 | # rpm -q will agree | ||
32 | echo "Release: `. $srctree/scripts/mkversion`" | ||
33 | echo "License: GPL" | 31 | echo "License: GPL" |
34 | echo "Group: System Environment/Kernel" | 32 | echo "Group: System Environment/Kernel" |
35 | echo "Vendor: The Linux Community" | 33 | echo "Vendor: The Linux Community" |
@@ -77,7 +75,7 @@ fi | |||
77 | echo "%build" | 75 | echo "%build" |
78 | 76 | ||
79 | if ! $PREBUILT; then | 77 | if ! $PREBUILT; then |
80 | echo "make clean && make %{?_smp_mflags}" | 78 | echo "make clean && make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}" |
81 | echo "" | 79 | echo "" |
82 | fi | 80 | fi |
83 | 81 | ||
@@ -88,11 +86,8 @@ echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules' | |||
88 | echo "%else" | 86 | echo "%else" |
89 | echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules' | 87 | echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules' |
90 | echo "%endif" | 88 | echo "%endif" |
91 | echo 'mkdir -p $RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE" | ||
92 | 89 | ||
93 | echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= mod-fw= modules_install' | 90 | echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install' |
94 | echo 'INSTALL_FW_PATH=$RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE" | ||
95 | echo 'make INSTALL_FW_PATH=$INSTALL_FW_PATH' firmware_install | ||
96 | echo "%ifarch ia64" | 91 | echo "%ifarch ia64" |
97 | echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE" | 92 | echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE" |
98 | echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/" | 93 | echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/" |
@@ -119,7 +114,7 @@ if ! $PREBUILT; then | |||
119 | echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/build" | 114 | echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/build" |
120 | echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/source" | 115 | echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/source" |
121 | echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE" | 116 | echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE" |
122 | echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\"" | 117 | echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude .config.old --exclude .missing-syscalls.d\"" |
123 | echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)" | 118 | echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)" |
124 | echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE" | 119 | echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE" |
125 | echo "ln -sf /usr/src/kernels/$KERNELRELEASE build" | 120 | echo "ln -sf /usr/src/kernels/$KERNELRELEASE build" |
@@ -154,7 +149,6 @@ echo '%defattr (-, root, root)' | |||
154 | echo "/lib/modules/$KERNELRELEASE" | 149 | echo "/lib/modules/$KERNELRELEASE" |
155 | echo "%exclude /lib/modules/$KERNELRELEASE/build" | 150 | echo "%exclude /lib/modules/$KERNELRELEASE/build" |
156 | echo "%exclude /lib/modules/$KERNELRELEASE/source" | 151 | echo "%exclude /lib/modules/$KERNELRELEASE/source" |
157 | echo "/lib/firmware/$KERNELRELEASE" | ||
158 | echo "/boot/*" | 152 | echo "/boot/*" |
159 | echo "" | 153 | echo "" |
160 | echo "%files headers" | 154 | echo "%files headers" |
diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 400ef35169c5..aa0cc49ad1ad 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt | |||
@@ -53,6 +53,7 @@ acumulator||accumulator | |||
53 | adapater||adapter | 53 | adapater||adapter |
54 | addional||additional | 54 | addional||additional |
55 | additionaly||additionally | 55 | additionaly||additionally |
56 | additonal||additional | ||
56 | addres||address | 57 | addres||address |
57 | adddress||address | 58 | adddress||address |
58 | addreses||addresses | 59 | addreses||addresses |
@@ -67,6 +68,8 @@ adviced||advised | |||
67 | afecting||affecting | 68 | afecting||affecting |
68 | againt||against | 69 | againt||against |
69 | agaist||against | 70 | agaist||against |
71 | aggreataon||aggregation | ||
72 | aggreation||aggregation | ||
70 | albumns||albums | 73 | albumns||albums |
71 | alegorical||allegorical | 74 | alegorical||allegorical |
72 | algined||aligned | 75 | algined||aligned |
@@ -80,6 +83,8 @@ aligment||alignment | |||
80 | alignement||alignment | 83 | alignement||alignment |
81 | allign||align | 84 | allign||align |
82 | alligned||aligned | 85 | alligned||aligned |
86 | alllocate||allocate | ||
87 | alloated||allocated | ||
83 | allocatote||allocate | 88 | allocatote||allocate |
84 | allocatrd||allocated | 89 | allocatrd||allocated |
85 | allocte||allocate | 90 | allocte||allocate |
@@ -171,6 +176,7 @@ availale||available | |||
171 | availavility||availability | 176 | availavility||availability |
172 | availble||available | 177 | availble||available |
173 | availiable||available | 178 | availiable||available |
179 | availible||available | ||
174 | avalable||available | 180 | avalable||available |
175 | avaliable||available | 181 | avaliable||available |
176 | aysnc||async | 182 | aysnc||async |
@@ -203,6 +209,7 @@ broadcat||broadcast | |||
203 | cacluated||calculated | 209 | cacluated||calculated |
204 | caculation||calculation | 210 | caculation||calculation |
205 | calender||calendar | 211 | calender||calendar |
212 | calescing||coalescing | ||
206 | calle||called | 213 | calle||called |
207 | callibration||calibration | 214 | callibration||calibration |
208 | calucate||calculate | 215 | calucate||calculate |
@@ -210,6 +217,7 @@ calulate||calculate | |||
210 | cancelation||cancellation | 217 | cancelation||cancellation |
211 | cancle||cancel | 218 | cancle||cancel |
212 | capabilites||capabilities | 219 | capabilites||capabilities |
220 | capabilty||capability | ||
213 | capabitilies||capabilities | 221 | capabitilies||capabilities |
214 | capatibilities||capabilities | 222 | capatibilities||capabilities |
215 | capapbilities||capabilities | 223 | capapbilities||capabilities |
@@ -302,6 +310,7 @@ containts||contains | |||
302 | contaisn||contains | 310 | contaisn||contains |
303 | contant||contact | 311 | contant||contact |
304 | contence||contents | 312 | contence||contents |
313 | continious||continuous | ||
305 | continous||continuous | 314 | continous||continuous |
306 | continously||continuously | 315 | continously||continuously |
307 | continueing||continuing | 316 | continueing||continuing |
@@ -393,6 +402,7 @@ differrence||difference | |||
393 | diffrent||different | 402 | diffrent||different |
394 | diffrentiate||differentiate | 403 | diffrentiate||differentiate |
395 | difinition||definition | 404 | difinition||definition |
405 | dimesions||dimensions | ||
396 | diplay||display | 406 | diplay||display |
397 | direectly||directly | 407 | direectly||directly |
398 | disassocation||disassociation | 408 | disassocation||disassociation |
@@ -449,6 +459,7 @@ equiped||equipped | |||
449 | equivelant||equivalent | 459 | equivelant||equivalent |
450 | equivilant||equivalent | 460 | equivilant||equivalent |
451 | eror||error | 461 | eror||error |
462 | errorr||error | ||
452 | estbalishment||establishment | 463 | estbalishment||establishment |
453 | etsablishment||establishment | 464 | etsablishment||establishment |
454 | etsbalishment||establishment | 465 | etsbalishment||establishment |
@@ -481,6 +492,7 @@ failied||failed | |||
481 | faillure||failure | 492 | faillure||failure |
482 | failue||failure | 493 | failue||failure |
483 | failuer||failure | 494 | failuer||failure |
495 | failng||failing | ||
484 | faireness||fairness | 496 | faireness||fairness |
485 | falied||failed | 497 | falied||failed |
486 | faliure||failure | 498 | faliure||failure |
@@ -493,6 +505,7 @@ fetaure||feature | |||
493 | fetaures||features | 505 | fetaures||features |
494 | fileystem||filesystem | 506 | fileystem||filesystem |
495 | fimware||firmware | 507 | fimware||firmware |
508 | firware||firmware | ||
496 | finanize||finalize | 509 | finanize||finalize |
497 | findn||find | 510 | findn||find |
498 | finilizes||finalizes | 511 | finilizes||finalizes |
@@ -502,6 +515,7 @@ folloing||following | |||
502 | followign||following | 515 | followign||following |
503 | followings||following | 516 | followings||following |
504 | follwing||following | 517 | follwing||following |
518 | fonud||found | ||
505 | forseeable||foreseeable | 519 | forseeable||foreseeable |
506 | forse||force | 520 | forse||force |
507 | fortan||fortran | 521 | fortan||fortran |
@@ -532,6 +546,7 @@ grabing||grabbing | |||
532 | grahical||graphical | 546 | grahical||graphical |
533 | grahpical||graphical | 547 | grahpical||graphical |
534 | grapic||graphic | 548 | grapic||graphic |
549 | grranted||granted | ||
535 | guage||gauge | 550 | guage||gauge |
536 | guarenteed||guaranteed | 551 | guarenteed||guaranteed |
537 | guarentee||guarantee | 552 | guarentee||guarantee |
@@ -543,6 +558,7 @@ happend||happened | |||
543 | harware||hardware | 558 | harware||hardware |
544 | heirarchically||hierarchically | 559 | heirarchically||hierarchically |
545 | helpfull||helpful | 560 | helpfull||helpful |
561 | hybernate||hibernate | ||
546 | hierachy||hierarchy | 562 | hierachy||hierarchy |
547 | hierarchie||hierarchy | 563 | hierarchie||hierarchy |
548 | howver||however | 564 | howver||however |
@@ -565,16 +581,19 @@ implemenation||implementation | |||
565 | implementaiton||implementation | 581 | implementaiton||implementation |
566 | implementated||implemented | 582 | implementated||implemented |
567 | implemention||implementation | 583 | implemention||implementation |
584 | implementd||implemented | ||
568 | implemetation||implementation | 585 | implemetation||implementation |
569 | implemntation||implementation | 586 | implemntation||implementation |
570 | implentation||implementation | 587 | implentation||implementation |
571 | implmentation||implementation | 588 | implmentation||implementation |
572 | implmenting||implementing | 589 | implmenting||implementing |
590 | incative||inactive | ||
573 | incomming||incoming | 591 | incomming||incoming |
574 | incompatabilities||incompatibilities | 592 | incompatabilities||incompatibilities |
575 | incompatable||incompatible | 593 | incompatable||incompatible |
576 | inconsistant||inconsistent | 594 | inconsistant||inconsistent |
577 | increas||increase | 595 | increas||increase |
596 | incremeted||incremented | ||
578 | incrment||increment | 597 | incrment||increment |
579 | indendation||indentation | 598 | indendation||indentation |
580 | indended||intended | 599 | indended||intended |
@@ -619,6 +638,7 @@ interger||integer | |||
619 | intermittant||intermittent | 638 | intermittant||intermittent |
620 | internel||internal | 639 | internel||internal |
621 | interoprability||interoperability | 640 | interoprability||interoperability |
641 | interuupt||interrupt | ||
622 | interrface||interface | 642 | interrface||interface |
623 | interrrupt||interrupt | 643 | interrrupt||interrupt |
624 | interrup||interrupt | 644 | interrup||interrupt |
@@ -638,8 +658,10 @@ intrrupt||interrupt | |||
638 | intterrupt||interrupt | 658 | intterrupt||interrupt |
639 | intuative||intuitive | 659 | intuative||intuitive |
640 | invaid||invalid | 660 | invaid||invalid |
661 | invald||invalid | ||
641 | invalde||invalid | 662 | invalde||invalid |
642 | invalide||invalid | 663 | invalide||invalid |
664 | invalidiate||invalidate | ||
643 | invalud||invalid | 665 | invalud||invalid |
644 | invididual||individual | 666 | invididual||individual |
645 | invokation||invocation | 667 | invokation||invocation |
@@ -713,6 +735,7 @@ misformed||malformed | |||
713 | mispelled||misspelled | 735 | mispelled||misspelled |
714 | mispelt||misspelt | 736 | mispelt||misspelt |
715 | mising||missing | 737 | mising||missing |
738 | mismactch||mismatch | ||
716 | missmanaged||mismanaged | 739 | missmanaged||mismanaged |
717 | missmatch||mismatch | 740 | missmatch||mismatch |
718 | miximum||maximum | 741 | miximum||maximum |
@@ -731,6 +754,7 @@ multidimensionnal||multidimensional | |||
731 | multple||multiple | 754 | multple||multiple |
732 | mumber||number | 755 | mumber||number |
733 | muticast||multicast | 756 | muticast||multicast |
757 | mutilcast||multicast | ||
734 | mutiple||multiple | 758 | mutiple||multiple |
735 | mutli||multi | 759 | mutli||multi |
736 | nams||names | 760 | nams||names |
@@ -834,6 +858,7 @@ posible||possible | |||
834 | positon||position | 858 | positon||position |
835 | possibilites||possibilities | 859 | possibilites||possibilities |
836 | powerfull||powerful | 860 | powerfull||powerful |
861 | preample||preamble | ||
837 | preapre||prepare | 862 | preapre||prepare |
838 | preceeded||preceded | 863 | preceeded||preceded |
839 | preceeding||preceding | 864 | preceeding||preceding |
@@ -1059,6 +1084,7 @@ sturcture||structure | |||
1059 | subdirectoires||subdirectories | 1084 | subdirectoires||subdirectories |
1060 | suble||subtle | 1085 | suble||subtle |
1061 | substract||subtract | 1086 | substract||subtract |
1087 | submition||submission | ||
1062 | succesfully||successfully | 1088 | succesfully||successfully |
1063 | succesful||successful | 1089 | succesful||successful |
1064 | successed||succeeded | 1090 | successed||succeeded |
@@ -1078,6 +1104,7 @@ suppoted||supported | |||
1078 | suppported||supported | 1104 | suppported||supported |
1079 | suppport||support | 1105 | suppport||support |
1080 | supress||suppress | 1106 | supress||suppress |
1107 | surpressed||suppressed | ||
1081 | surpresses||suppresses | 1108 | surpresses||suppresses |
1082 | susbsystem||subsystem | 1109 | susbsystem||subsystem |
1083 | suspeneded||suspended | 1110 | suspeneded||suspended |
@@ -1091,6 +1118,7 @@ swithced||switched | |||
1091 | swithcing||switching | 1118 | swithcing||switching |
1092 | swithed||switched | 1119 | swithed||switched |
1093 | swithing||switching | 1120 | swithing||switching |
1121 | swtich||switch | ||
1094 | symetric||symmetric | 1122 | symetric||symmetric |
1095 | synax||syntax | 1123 | synax||syntax |
1096 | synchonized||synchronized | 1124 | synchonized||synchronized |
@@ -1111,7 +1139,9 @@ therfore||therefore | |||
1111 | thier||their | 1139 | thier||their |
1112 | threds||threads | 1140 | threds||threads |
1113 | threshhold||threshold | 1141 | threshhold||threshold |
1142 | thresold||threshold | ||
1114 | throught||through | 1143 | throught||through |
1144 | troughput||throughput | ||
1115 | thses||these | 1145 | thses||these |
1116 | tiggered||triggered | 1146 | tiggered||triggered |
1117 | tipically||typically | 1147 | tipically||typically |
@@ -1120,6 +1150,7 @@ tmis||this | |||
1120 | torerable||tolerable | 1150 | torerable||tolerable |
1121 | tramsmitted||transmitted | 1151 | tramsmitted||transmitted |
1122 | tramsmit||transmit | 1152 | tramsmit||transmit |
1153 | tranasction||transaction | ||
1123 | tranfer||transfer | 1154 | tranfer||transfer |
1124 | transciever||transceiver | 1155 | transciever||transceiver |
1125 | transferd||transferred | 1156 | transferd||transferred |
@@ -1133,6 +1164,7 @@ trasmission||transmission | |||
1133 | treshold||threshold | 1164 | treshold||threshold |
1134 | trigerring||triggering | 1165 | trigerring||triggering |
1135 | trun||turn | 1166 | trun||turn |
1167 | tunning||tuning | ||
1136 | ture||true | 1168 | ture||true |
1137 | tyep||type | 1169 | tyep||type |
1138 | udpate||update | 1170 | udpate||update |
@@ -1199,6 +1231,7 @@ visiters||visitors | |||
1199 | vitual||virtual | 1231 | vitual||virtual |
1200 | wakeus||wakeups | 1232 | wakeus||wakeups |
1201 | wating||waiting | 1233 | wating||waiting |
1234 | wiat||wait | ||
1202 | wether||whether | 1235 | wether||whether |
1203 | whataver||whatever | 1236 | whataver||whatever |
1204 | whcih||which | 1237 | whcih||which |
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore index 9cdec70d72b8..d5b291e94264 100644 --- a/security/apparmor/.gitignore +++ b/security/apparmor/.gitignore | |||
@@ -1,5 +1,6 @@ | |||
1 | # | 1 | # |
2 | # Generated include files | 2 | # Generated include files |
3 | # | 3 | # |
4 | net_names.h | ||
4 | capability_names.h | 5 | capability_names.h |
5 | rlim_names.h | 6 | rlim_names.h |
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile index a16b195274de..dafdd387d42b 100644 --- a/security/apparmor/Makefile +++ b/security/apparmor/Makefile | |||
@@ -4,11 +4,44 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o | |||
4 | 4 | ||
5 | apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ | 5 | apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ |
6 | path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ | 6 | path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ |
7 | resource.o secid.o file.o policy_ns.o label.o | 7 | resource.o secid.o file.o policy_ns.o label.o mount.o net.o |
8 | apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o | 8 | apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o |
9 | 9 | ||
10 | clean-files := capability_names.h rlim_names.h | 10 | clean-files := capability_names.h rlim_names.h net_names.h |
11 | 11 | ||
12 | # Build a lower case string table of address family names | ||
13 | # Transform lines from | ||
14 | # #define AF_LOCAL 1 /* POSIX name for AF_UNIX */ | ||
15 | # #define AF_INET 2 /* Internet IP Protocol */ | ||
16 | # to | ||
17 | # [1] = "local", | ||
18 | # [2] = "inet", | ||
19 | # | ||
20 | # and build the securityfs entries for the mapping. | ||
21 | # Transforms lines from | ||
22 | # #define AF_INET 2 /* Internet IP Protocol */ | ||
23 | # to | ||
24 | # #define AA_SFS_AF_MASK "local inet" | ||
25 | quiet_cmd_make-af = GEN $@ | ||
26 | cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\ | ||
27 | sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \ | ||
28 | 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ | ||
29 | echo "};" >> $@ ;\ | ||
30 | printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\ | ||
31 | sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \ | ||
32 | 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\ | ||
33 | $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ | ||
34 | |||
35 | # Build a lower case string table of sock type names | ||
36 | # Transform lines from | ||
37 | # SOCK_STREAM = 1, | ||
38 | # to | ||
39 | # [1] = "stream", | ||
40 | quiet_cmd_make-sock = GEN $@ | ||
41 | cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\ | ||
42 | sed $^ >>$@ -r -n \ | ||
43 | -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ | ||
44 | echo "};" >> $@ | ||
12 | 45 | ||
13 | # Build a lower case string table of capability names | 46 | # Build a lower case string table of capability names |
14 | # Transforms lines from | 47 | # Transforms lines from |
@@ -61,6 +94,7 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \ | |||
61 | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ | 94 | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ |
62 | 95 | ||
63 | $(obj)/capability.o : $(obj)/capability_names.h | 96 | $(obj)/capability.o : $(obj)/capability_names.h |
97 | $(obj)/net.o : $(obj)/net_names.h | ||
64 | $(obj)/resource.o : $(obj)/rlim_names.h | 98 | $(obj)/resource.o : $(obj)/rlim_names.h |
65 | $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ | 99 | $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ |
66 | $(src)/Makefile | 100 | $(src)/Makefile |
@@ -68,3 +102,8 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ | |||
68 | $(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ | 102 | $(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ |
69 | $(src)/Makefile | 103 | $(src)/Makefile |
70 | $(call cmd,make-rlim) | 104 | $(call cmd,make-rlim) |
105 | $(obj)/net_names.h : $(srctree)/include/linux/socket.h \ | ||
106 | $(srctree)/include/linux/net.h \ | ||
107 | $(src)/Makefile | ||
108 | $(call cmd,make-af) | ||
109 | $(call cmd,make-sock) | ||
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 853c2ec8e0c9..518d5928661b 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "include/audit.h" | 32 | #include "include/audit.h" |
33 | #include "include/context.h" | 33 | #include "include/context.h" |
34 | #include "include/crypto.h" | 34 | #include "include/crypto.h" |
35 | #include "include/ipc.h" | ||
35 | #include "include/policy_ns.h" | 36 | #include "include/policy_ns.h" |
36 | #include "include/label.h" | 37 | #include "include/label.h" |
37 | #include "include/policy.h" | 38 | #include "include/policy.h" |
@@ -248,8 +249,10 @@ static struct dentry *aafs_create(const char *name, umode_t mode, | |||
248 | 249 | ||
249 | inode_lock(dir); | 250 | inode_lock(dir); |
250 | dentry = lookup_one_len(name, parent, strlen(name)); | 251 | dentry = lookup_one_len(name, parent, strlen(name)); |
251 | if (IS_ERR(dentry)) | 252 | if (IS_ERR(dentry)) { |
253 | error = PTR_ERR(dentry); | ||
252 | goto fail_lock; | 254 | goto fail_lock; |
255 | } | ||
253 | 256 | ||
254 | if (d_really_is_positive(dentry)) { | 257 | if (d_really_is_positive(dentry)) { |
255 | error = -EEXIST; | 258 | error = -EEXIST; |
@@ -1443,6 +1446,10 @@ void __aafs_profile_migrate_dents(struct aa_profile *old, | |||
1443 | { | 1446 | { |
1444 | int i; | 1447 | int i; |
1445 | 1448 | ||
1449 | AA_BUG(!old); | ||
1450 | AA_BUG(!new); | ||
1451 | AA_BUG(!mutex_is_locked(&profiles_ns(old)->lock)); | ||
1452 | |||
1446 | for (i = 0; i < AAFS_PROF_SIZEOF; i++) { | 1453 | for (i = 0; i < AAFS_PROF_SIZEOF; i++) { |
1447 | new->dents[i] = old->dents[i]; | 1454 | new->dents[i] = old->dents[i]; |
1448 | if (new->dents[i]) | 1455 | if (new->dents[i]) |
@@ -1506,6 +1513,9 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent) | |||
1506 | struct dentry *dent = NULL, *dir; | 1513 | struct dentry *dent = NULL, *dir; |
1507 | int error; | 1514 | int error; |
1508 | 1515 | ||
1516 | AA_BUG(!profile); | ||
1517 | AA_BUG(!mutex_is_locked(&profiles_ns(profile)->lock)); | ||
1518 | |||
1509 | if (!parent) { | 1519 | if (!parent) { |
1510 | struct aa_profile *p; | 1520 | struct aa_profile *p; |
1511 | p = aa_deref_parent(profile); | 1521 | p = aa_deref_parent(profile); |
@@ -1731,6 +1741,7 @@ void __aafs_ns_rmdir(struct aa_ns *ns) | |||
1731 | 1741 | ||
1732 | if (!ns) | 1742 | if (!ns) |
1733 | return; | 1743 | return; |
1744 | AA_BUG(!mutex_is_locked(&ns->lock)); | ||
1734 | 1745 | ||
1735 | list_for_each_entry(child, &ns->base.profiles, base.list) | 1746 | list_for_each_entry(child, &ns->base.profiles, base.list) |
1736 | __aafs_profile_rmdir(child); | 1747 | __aafs_profile_rmdir(child); |
@@ -1903,6 +1914,10 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns) | |||
1903 | { | 1914 | { |
1904 | struct aa_ns *parent, *next; | 1915 | struct aa_ns *parent, *next; |
1905 | 1916 | ||
1917 | AA_BUG(!root); | ||
1918 | AA_BUG(!ns); | ||
1919 | AA_BUG(ns != root && !mutex_is_locked(&ns->parent->lock)); | ||
1920 | |||
1906 | /* is next namespace a child */ | 1921 | /* is next namespace a child */ |
1907 | if (!list_empty(&ns->sub_ns)) { | 1922 | if (!list_empty(&ns->sub_ns)) { |
1908 | next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); | 1923 | next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); |
@@ -1937,6 +1952,9 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns) | |||
1937 | static struct aa_profile *__first_profile(struct aa_ns *root, | 1952 | static struct aa_profile *__first_profile(struct aa_ns *root, |
1938 | struct aa_ns *ns) | 1953 | struct aa_ns *ns) |
1939 | { | 1954 | { |
1955 | AA_BUG(!root); | ||
1956 | AA_BUG(ns && !mutex_is_locked(&ns->lock)); | ||
1957 | |||
1940 | for (; ns; ns = __next_ns(root, ns)) { | 1958 | for (; ns; ns = __next_ns(root, ns)) { |
1941 | if (!list_empty(&ns->base.profiles)) | 1959 | if (!list_empty(&ns->base.profiles)) |
1942 | return list_first_entry(&ns->base.profiles, | 1960 | return list_first_entry(&ns->base.profiles, |
@@ -1959,6 +1977,8 @@ static struct aa_profile *__next_profile(struct aa_profile *p) | |||
1959 | struct aa_profile *parent; | 1977 | struct aa_profile *parent; |
1960 | struct aa_ns *ns = p->ns; | 1978 | struct aa_ns *ns = p->ns; |
1961 | 1979 | ||
1980 | AA_BUG(!mutex_is_locked(&profiles_ns(p)->lock)); | ||
1981 | |||
1962 | /* is next profile a child */ | 1982 | /* is next profile a child */ |
1963 | if (!list_empty(&p->base.profiles)) | 1983 | if (!list_empty(&p->base.profiles)) |
1964 | return list_first_entry(&p->base.profiles, typeof(*p), | 1984 | return list_first_entry(&p->base.profiles, typeof(*p), |
@@ -2127,6 +2147,11 @@ static struct aa_sfs_entry aa_sfs_entry_ptrace[] = { | |||
2127 | { } | 2147 | { } |
2128 | }; | 2148 | }; |
2129 | 2149 | ||
2150 | static struct aa_sfs_entry aa_sfs_entry_signal[] = { | ||
2151 | AA_SFS_FILE_STRING("mask", AA_SFS_SIG_MASK), | ||
2152 | { } | ||
2153 | }; | ||
2154 | |||
2130 | static struct aa_sfs_entry aa_sfs_entry_domain[] = { | 2155 | static struct aa_sfs_entry aa_sfs_entry_domain[] = { |
2131 | AA_SFS_FILE_BOOLEAN("change_hat", 1), | 2156 | AA_SFS_FILE_BOOLEAN("change_hat", 1), |
2132 | AA_SFS_FILE_BOOLEAN("change_hatv", 1), | 2157 | AA_SFS_FILE_BOOLEAN("change_hatv", 1), |
@@ -2151,9 +2176,14 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = { | |||
2151 | { } | 2176 | { } |
2152 | }; | 2177 | }; |
2153 | 2178 | ||
2179 | static struct aa_sfs_entry aa_sfs_entry_mount[] = { | ||
2180 | AA_SFS_FILE_STRING("mask", "mount umount pivot_root"), | ||
2181 | { } | ||
2182 | }; | ||
2183 | |||
2154 | static struct aa_sfs_entry aa_sfs_entry_ns[] = { | 2184 | static struct aa_sfs_entry aa_sfs_entry_ns[] = { |
2155 | AA_SFS_FILE_BOOLEAN("profile", 1), | 2185 | AA_SFS_FILE_BOOLEAN("profile", 1), |
2156 | AA_SFS_FILE_BOOLEAN("pivot_root", 1), | 2186 | AA_SFS_FILE_BOOLEAN("pivot_root", 0), |
2157 | { } | 2187 | { } |
2158 | }; | 2188 | }; |
2159 | 2189 | ||
@@ -2172,22 +2202,25 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = { | |||
2172 | AA_SFS_DIR("policy", aa_sfs_entry_policy), | 2202 | AA_SFS_DIR("policy", aa_sfs_entry_policy), |
2173 | AA_SFS_DIR("domain", aa_sfs_entry_domain), | 2203 | AA_SFS_DIR("domain", aa_sfs_entry_domain), |
2174 | AA_SFS_DIR("file", aa_sfs_entry_file), | 2204 | AA_SFS_DIR("file", aa_sfs_entry_file), |
2205 | AA_SFS_DIR("network", aa_sfs_entry_network), | ||
2206 | AA_SFS_DIR("mount", aa_sfs_entry_mount), | ||
2175 | AA_SFS_DIR("namespaces", aa_sfs_entry_ns), | 2207 | AA_SFS_DIR("namespaces", aa_sfs_entry_ns), |
2176 | AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), | 2208 | AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), |
2177 | AA_SFS_DIR("rlimit", aa_sfs_entry_rlimit), | 2209 | AA_SFS_DIR("rlimit", aa_sfs_entry_rlimit), |
2178 | AA_SFS_DIR("caps", aa_sfs_entry_caps), | 2210 | AA_SFS_DIR("caps", aa_sfs_entry_caps), |
2179 | AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace), | 2211 | AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace), |
2212 | AA_SFS_DIR("signal", aa_sfs_entry_signal), | ||
2180 | AA_SFS_DIR("query", aa_sfs_entry_query), | 2213 | AA_SFS_DIR("query", aa_sfs_entry_query), |
2181 | { } | 2214 | { } |
2182 | }; | 2215 | }; |
2183 | 2216 | ||
2184 | static struct aa_sfs_entry aa_sfs_entry_apparmor[] = { | 2217 | static struct aa_sfs_entry aa_sfs_entry_apparmor[] = { |
2185 | AA_SFS_FILE_FOPS(".access", 0640, &aa_sfs_access), | 2218 | AA_SFS_FILE_FOPS(".access", 0666, &aa_sfs_access), |
2186 | AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops), | 2219 | AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops), |
2187 | AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops), | 2220 | AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops), |
2188 | AA_SFS_FILE_FOPS(".ns_level", 0666, &seq_ns_level_fops), | 2221 | AA_SFS_FILE_FOPS(".ns_level", 0444, &seq_ns_level_fops), |
2189 | AA_SFS_FILE_FOPS(".ns_name", 0640, &seq_ns_name_fops), | 2222 | AA_SFS_FILE_FOPS(".ns_name", 0444, &seq_ns_name_fops), |
2190 | AA_SFS_FILE_FOPS("profiles", 0440, &aa_sfs_profiles_fops), | 2223 | AA_SFS_FILE_FOPS("profiles", 0444, &aa_sfs_profiles_fops), |
2191 | AA_SFS_DIR("features", aa_sfs_entry_features), | 2224 | AA_SFS_DIR("features", aa_sfs_entry_features), |
2192 | { } | 2225 | { } |
2193 | }; | 2226 | }; |
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 17a601c67b62..dd754b7850a8 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c | |||
@@ -374,8 +374,8 @@ static const char *next_name(int xtype, const char *name) | |||
374 | * | 374 | * |
375 | * Returns: refcounted label, or NULL on failure (MAYBE NULL) | 375 | * Returns: refcounted label, or NULL on failure (MAYBE NULL) |
376 | */ | 376 | */ |
377 | static struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex, | 377 | struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex, |
378 | const char **name) | 378 | const char **name) |
379 | { | 379 | { |
380 | struct aa_label *label = NULL; | 380 | struct aa_label *label = NULL; |
381 | u32 xtype = xindex & AA_X_TYPE_MASK; | 381 | u32 xtype = xindex & AA_X_TYPE_MASK; |
diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 3382518b87fa..db80221891c6 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "include/context.h" | 21 | #include "include/context.h" |
22 | #include "include/file.h" | 22 | #include "include/file.h" |
23 | #include "include/match.h" | 23 | #include "include/match.h" |
24 | #include "include/net.h" | ||
24 | #include "include/path.h" | 25 | #include "include/path.h" |
25 | #include "include/policy.h" | 26 | #include "include/policy.h" |
26 | #include "include/label.h" | 27 | #include "include/label.h" |
@@ -566,6 +567,32 @@ static int __file_path_perm(const char *op, struct aa_label *label, | |||
566 | return error; | 567 | return error; |
567 | } | 568 | } |
568 | 569 | ||
570 | static int __file_sock_perm(const char *op, struct aa_label *label, | ||
571 | struct aa_label *flabel, struct file *file, | ||
572 | u32 request, u32 denied) | ||
573 | { | ||
574 | struct socket *sock = (struct socket *) file->private_data; | ||
575 | int error; | ||
576 | |||
577 | AA_BUG(!sock); | ||
578 | |||
579 | /* revalidation due to label out of date. No revocation at this time */ | ||
580 | if (!denied && aa_label_is_subset(flabel, label)) | ||
581 | return 0; | ||
582 | |||
583 | /* TODO: improve to skip profiles cached in flabel */ | ||
584 | error = aa_sock_file_perm(label, op, request, sock); | ||
585 | if (denied) { | ||
586 | /* TODO: improve to skip profiles checked above */ | ||
587 | /* check every profile in file label to is cached */ | ||
588 | last_error(error, aa_sock_file_perm(flabel, op, request, sock)); | ||
589 | } | ||
590 | if (!error) | ||
591 | update_file_ctx(file_ctx(file), label, request); | ||
592 | |||
593 | return error; | ||
594 | } | ||
595 | |||
569 | /** | 596 | /** |
570 | * aa_file_perm - do permission revalidation check & audit for @file | 597 | * aa_file_perm - do permission revalidation check & audit for @file |
571 | * @op: operation being checked | 598 | * @op: operation being checked |
@@ -610,6 +637,9 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file, | |||
610 | error = __file_path_perm(op, label, flabel, file, request, | 637 | error = __file_path_perm(op, label, flabel, file, request, |
611 | denied); | 638 | denied); |
612 | 639 | ||
640 | else if (S_ISSOCK(file_inode(file)->i_mode)) | ||
641 | error = __file_sock_perm(op, label, flabel, file, request, | ||
642 | denied); | ||
613 | done: | 643 | done: |
614 | rcu_read_unlock(); | 644 | rcu_read_unlock(); |
615 | 645 | ||
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h index aaf893f4e4f5..829082c35faa 100644 --- a/security/apparmor/include/apparmor.h +++ b/security/apparmor/include/apparmor.h | |||
@@ -27,7 +27,9 @@ | |||
27 | #define AA_CLASS_NET 4 | 27 | #define AA_CLASS_NET 4 |
28 | #define AA_CLASS_RLIMITS 5 | 28 | #define AA_CLASS_RLIMITS 5 |
29 | #define AA_CLASS_DOMAIN 6 | 29 | #define AA_CLASS_DOMAIN 6 |
30 | #define AA_CLASS_MOUNT 7 | ||
30 | #define AA_CLASS_PTRACE 9 | 31 | #define AA_CLASS_PTRACE 9 |
32 | #define AA_CLASS_SIGNAL 10 | ||
31 | #define AA_CLASS_LABEL 16 | 33 | #define AA_CLASS_LABEL 16 |
32 | 34 | ||
33 | #define AA_CLASS_LAST AA_CLASS_LABEL | 35 | #define AA_CLASS_LAST AA_CLASS_LABEL |
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index c68839a44351..ff4316e1068d 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h | |||
@@ -71,6 +71,10 @@ enum audit_type { | |||
71 | #define OP_FMPROT "file_mprotect" | 71 | #define OP_FMPROT "file_mprotect" |
72 | #define OP_INHERIT "file_inherit" | 72 | #define OP_INHERIT "file_inherit" |
73 | 73 | ||
74 | #define OP_PIVOTROOT "pivotroot" | ||
75 | #define OP_MOUNT "mount" | ||
76 | #define OP_UMOUNT "umount" | ||
77 | |||
74 | #define OP_CREATE "create" | 78 | #define OP_CREATE "create" |
75 | #define OP_POST_CREATE "post_create" | 79 | #define OP_POST_CREATE "post_create" |
76 | #define OP_BIND "bind" | 80 | #define OP_BIND "bind" |
@@ -86,6 +90,7 @@ enum audit_type { | |||
86 | #define OP_SHUTDOWN "socket_shutdown" | 90 | #define OP_SHUTDOWN "socket_shutdown" |
87 | 91 | ||
88 | #define OP_PTRACE "ptrace" | 92 | #define OP_PTRACE "ptrace" |
93 | #define OP_SIGNAL "signal" | ||
89 | 94 | ||
90 | #define OP_EXEC "exec" | 95 | #define OP_EXEC "exec" |
91 | 96 | ||
@@ -116,20 +121,36 @@ struct apparmor_audit_data { | |||
116 | /* these entries require a custom callback fn */ | 121 | /* these entries require a custom callback fn */ |
117 | struct { | 122 | struct { |
118 | struct aa_label *peer; | 123 | struct aa_label *peer; |
119 | struct { | 124 | union { |
120 | const char *target; | 125 | struct { |
121 | kuid_t ouid; | 126 | kuid_t ouid; |
122 | } fs; | 127 | const char *target; |
128 | } fs; | ||
129 | struct { | ||
130 | int type, protocol; | ||
131 | struct sock *peer_sk; | ||
132 | void *addr; | ||
133 | int addrlen; | ||
134 | } net; | ||
135 | int signal; | ||
136 | struct { | ||
137 | int rlim; | ||
138 | unsigned long max; | ||
139 | } rlim; | ||
140 | }; | ||
123 | }; | 141 | }; |
124 | struct { | 142 | struct { |
125 | const char *name; | 143 | struct aa_profile *profile; |
126 | long pos; | ||
127 | const char *ns; | 144 | const char *ns; |
145 | long pos; | ||
128 | } iface; | 146 | } iface; |
129 | struct { | 147 | struct { |
130 | int rlim; | 148 | const char *src_name; |
131 | unsigned long max; | 149 | const char *type; |
132 | } rlim; | 150 | const char *trans; |
151 | const char *data; | ||
152 | unsigned long flags; | ||
153 | } mnt; | ||
133 | }; | 154 | }; |
134 | }; | 155 | }; |
135 | 156 | ||
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h index 24c5976d6143..ac9862ff7cdf 100644 --- a/security/apparmor/include/domain.h +++ b/security/apparmor/include/domain.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/binfmts.h> | 15 | #include <linux/binfmts.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | #include "label.h" | ||
19 | |||
18 | #ifndef __AA_DOMAIN_H | 20 | #ifndef __AA_DOMAIN_H |
19 | #define __AA_DOMAIN_H | 21 | #define __AA_DOMAIN_H |
20 | 22 | ||
@@ -29,6 +31,9 @@ struct aa_domain { | |||
29 | #define AA_CHANGE_ONEXEC 4 | 31 | #define AA_CHANGE_ONEXEC 4 |
30 | #define AA_CHANGE_STACK 8 | 32 | #define AA_CHANGE_STACK 8 |
31 | 33 | ||
34 | struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex, | ||
35 | const char **name); | ||
36 | |||
32 | int apparmor_bprm_set_creds(struct linux_binprm *bprm); | 37 | int apparmor_bprm_set_creds(struct linux_binprm *bprm); |
33 | 38 | ||
34 | void aa_free_domain_entries(struct aa_domain *domain); | 39 | void aa_free_domain_entries(struct aa_domain *domain); |
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h index 656fdb81c8a0..5ffc218d1e74 100644 --- a/security/apparmor/include/ipc.h +++ b/security/apparmor/include/ipc.h | |||
@@ -27,8 +27,14 @@ struct aa_profile; | |||
27 | 27 | ||
28 | #define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \ | 28 | #define AA_PTRACE_PERM_MASK (AA_PTRACE_READ | AA_PTRACE_TRACE | \ |
29 | AA_MAY_BE_READ | AA_MAY_BE_TRACED) | 29 | AA_MAY_BE_READ | AA_MAY_BE_TRACED) |
30 | #define AA_SIGNAL_PERM_MASK (MAY_READ | MAY_WRITE) | ||
31 | |||
32 | #define AA_SFS_SIG_MASK "hup int quit ill trap abrt bus fpe kill usr1 " \ | ||
33 | "segv usr2 pipe alrm term stkflt chld cont stop stp ttin ttou urg " \ | ||
34 | "xcpu xfsz vtalrm prof winch io pwr sys emt lost" | ||
30 | 35 | ||
31 | int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, | 36 | int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, |
32 | u32 request); | 37 | u32 request); |
38 | int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig); | ||
33 | 39 | ||
34 | #endif /* __AA_IPC_H */ | 40 | #endif /* __AA_IPC_H */ |
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h index 9a283b722755..af22dcbbcb8a 100644 --- a/security/apparmor/include/label.h +++ b/security/apparmor/include/label.h | |||
@@ -310,6 +310,7 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp); | |||
310 | #define FLAG_SHOW_MODE 1 | 310 | #define FLAG_SHOW_MODE 1 |
311 | #define FLAG_VIEW_SUBNS 2 | 311 | #define FLAG_VIEW_SUBNS 2 |
312 | #define FLAG_HIDDEN_UNCONFINED 4 | 312 | #define FLAG_HIDDEN_UNCONFINED 4 |
313 | #define FLAG_ABS_ROOT 8 | ||
313 | int aa_label_snxprint(char *str, size_t size, struct aa_ns *view, | 314 | int aa_label_snxprint(char *str, size_t size, struct aa_ns *view, |
314 | struct aa_label *label, int flags); | 315 | struct aa_label *label, int flags); |
315 | int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label, | 316 | int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label, |
diff --git a/security/apparmor/include/mount.h b/security/apparmor/include/mount.h new file mode 100644 index 000000000000..25d6067fa6ef --- /dev/null +++ b/security/apparmor/include/mount.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor file mediation function definitions. | ||
5 | * | ||
6 | * Copyright 2017 Canonical Ltd. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation, version 2 of the | ||
11 | * License. | ||
12 | */ | ||
13 | |||
14 | #ifndef __AA_MOUNT_H | ||
15 | #define __AA_MOUNT_H | ||
16 | |||
17 | #include <linux/fs.h> | ||
18 | #include <linux/path.h> | ||
19 | |||
20 | #include "domain.h" | ||
21 | #include "policy.h" | ||
22 | |||
23 | /* mount perms */ | ||
24 | #define AA_MAY_PIVOTROOT 0x01 | ||
25 | #define AA_MAY_MOUNT 0x02 | ||
26 | #define AA_MAY_UMOUNT 0x04 | ||
27 | #define AA_AUDIT_DATA 0x40 | ||
28 | #define AA_MNT_CONT_MATCH 0x40 | ||
29 | |||
30 | #define AA_MS_IGNORE_MASK (MS_KERNMOUNT | MS_NOSEC | MS_ACTIVE | MS_BORN) | ||
31 | |||
32 | int aa_remount(struct aa_label *label, const struct path *path, | ||
33 | unsigned long flags, void *data); | ||
34 | |||
35 | int aa_bind_mount(struct aa_label *label, const struct path *path, | ||
36 | const char *old_name, unsigned long flags); | ||
37 | |||
38 | |||
39 | int aa_mount_change_type(struct aa_label *label, const struct path *path, | ||
40 | unsigned long flags); | ||
41 | |||
42 | int aa_move_mount(struct aa_label *label, const struct path *path, | ||
43 | const char *old_name); | ||
44 | |||
45 | int aa_new_mount(struct aa_label *label, const char *dev_name, | ||
46 | const struct path *path, const char *type, unsigned long flags, | ||
47 | void *data); | ||
48 | |||
49 | int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags); | ||
50 | |||
51 | int aa_pivotroot(struct aa_label *label, const struct path *old_path, | ||
52 | const struct path *new_path); | ||
53 | |||
54 | #endif /* __AA_MOUNT_H */ | ||
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h new file mode 100644 index 000000000000..140c8efcf364 --- /dev/null +++ b/security/apparmor/include/net.h | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor network mediation definitions. | ||
5 | * | ||
6 | * Copyright (C) 1998-2008 Novell/SUSE | ||
7 | * Copyright 2009-2017 Canonical Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation, version 2 of the | ||
12 | * License. | ||
13 | */ | ||
14 | |||
15 | #ifndef __AA_NET_H | ||
16 | #define __AA_NET_H | ||
17 | |||
18 | #include <net/sock.h> | ||
19 | #include <linux/path.h> | ||
20 | |||
21 | #include "apparmorfs.h" | ||
22 | #include "label.h" | ||
23 | #include "perms.h" | ||
24 | #include "policy.h" | ||
25 | |||
26 | #define AA_MAY_SEND AA_MAY_WRITE | ||
27 | #define AA_MAY_RECEIVE AA_MAY_READ | ||
28 | |||
29 | #define AA_MAY_SHUTDOWN AA_MAY_DELETE | ||
30 | |||
31 | #define AA_MAY_CONNECT AA_MAY_OPEN | ||
32 | #define AA_MAY_ACCEPT 0x00100000 | ||
33 | |||
34 | #define AA_MAY_BIND 0x00200000 | ||
35 | #define AA_MAY_LISTEN 0x00400000 | ||
36 | |||
37 | #define AA_MAY_SETOPT 0x01000000 | ||
38 | #define AA_MAY_GETOPT 0x02000000 | ||
39 | |||
40 | #define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \ | ||
41 | AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \ | ||
42 | AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \ | ||
43 | AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT) | ||
44 | |||
45 | #define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \ | ||
46 | AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\ | ||
47 | AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \ | ||
48 | AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \ | ||
49 | AA_MAY_MPROT) | ||
50 | |||
51 | #define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \ | ||
52 | AA_MAY_ACCEPT) | ||
53 | struct aa_sk_ctx { | ||
54 | struct aa_label *label; | ||
55 | struct aa_label *peer; | ||
56 | struct path path; | ||
57 | }; | ||
58 | |||
59 | #define SK_CTX(X) ((X)->sk_security) | ||
60 | #define SOCK_ctx(X) SOCK_INODE(X)->i_security | ||
61 | #define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \ | ||
62 | struct lsm_network_audit NAME ## _net = { .sk = (SK), \ | ||
63 | .family = (F)}; \ | ||
64 | DEFINE_AUDIT_DATA(NAME, \ | ||
65 | ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \ | ||
66 | LSM_AUDIT_DATA_NONE, \ | ||
67 | OP); \ | ||
68 | NAME.u.net = &(NAME ## _net); \ | ||
69 | aad(&NAME)->net.type = (T); \ | ||
70 | aad(&NAME)->net.protocol = (P) | ||
71 | |||
72 | #define DEFINE_AUDIT_SK(NAME, OP, SK) \ | ||
73 | DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \ | ||
74 | (SK)->sk_protocol) | ||
75 | |||
76 | /* struct aa_net - network confinement data | ||
77 | * @allow: basic network families permissions | ||
78 | * @audit: which network permissions to force audit | ||
79 | * @quiet: which network permissions to quiet rejects | ||
80 | */ | ||
81 | struct aa_net { | ||
82 | u16 allow[AF_MAX]; | ||
83 | u16 audit[AF_MAX]; | ||
84 | u16 quiet[AF_MAX]; | ||
85 | }; | ||
86 | |||
87 | |||
88 | extern struct aa_sfs_entry aa_sfs_entry_network[]; | ||
89 | |||
90 | void audit_net_cb(struct audit_buffer *ab, void *va); | ||
91 | int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa, | ||
92 | u32 request, u16 family, int type); | ||
93 | int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family, | ||
94 | int type, int protocol); | ||
95 | static inline int aa_profile_af_sk_perm(struct aa_profile *profile, | ||
96 | struct common_audit_data *sa, | ||
97 | u32 request, | ||
98 | struct sock *sk) | ||
99 | { | ||
100 | return aa_profile_af_perm(profile, sa, request, sk->sk_family, | ||
101 | sk->sk_type); | ||
102 | } | ||
103 | int aa_sk_perm(const char *op, u32 request, struct sock *sk); | ||
104 | |||
105 | int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, | ||
106 | struct socket *sock); | ||
107 | |||
108 | |||
109 | static inline void aa_free_net_rules(struct aa_net *new) | ||
110 | { | ||
111 | /* NOP */ | ||
112 | } | ||
113 | |||
114 | #endif /* __AA_NET_H */ | ||
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h index 2b27bb79aec4..af04d5a7d73d 100644 --- a/security/apparmor/include/perms.h +++ b/security/apparmor/include/perms.h | |||
@@ -135,9 +135,10 @@ extern struct aa_perms allperms; | |||
135 | 135 | ||
136 | 136 | ||
137 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); | 137 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); |
138 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); | 138 | void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, |
139 | u32 mask); | ||
139 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, | 140 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, |
140 | u32 chrsmask, const char **names, u32 namesmask); | 141 | u32 chrsmask, const char * const *names, u32 namesmask); |
141 | void aa_apply_modes_to_perms(struct aa_profile *profile, | 142 | void aa_apply_modes_to_perms(struct aa_profile *profile, |
142 | struct aa_perms *perms); | 143 | struct aa_perms *perms); |
143 | void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, | 144 | void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, |
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index 17fe41a9cac3..4364088a0b9e 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "file.h" | 30 | #include "file.h" |
31 | #include "lib.h" | 31 | #include "lib.h" |
32 | #include "label.h" | 32 | #include "label.h" |
33 | #include "net.h" | ||
33 | #include "perms.h" | 34 | #include "perms.h" |
34 | #include "resource.h" | 35 | #include "resource.h" |
35 | 36 | ||
@@ -111,6 +112,7 @@ struct aa_data { | |||
111 | * @policy: general match rules governing policy | 112 | * @policy: general match rules governing policy |
112 | * @file: The set of rules governing basic file access and domain transitions | 113 | * @file: The set of rules governing basic file access and domain transitions |
113 | * @caps: capabilities for the profile | 114 | * @caps: capabilities for the profile |
115 | * @net: network controls for the profile | ||
114 | * @rlimits: rlimits for the profile | 116 | * @rlimits: rlimits for the profile |
115 | * | 117 | * |
116 | * @dents: dentries for the profiles file entries in apparmorfs | 118 | * @dents: dentries for the profiles file entries in apparmorfs |
@@ -148,6 +150,7 @@ struct aa_profile { | |||
148 | struct aa_policydb policy; | 150 | struct aa_policydb policy; |
149 | struct aa_file_rules file; | 151 | struct aa_file_rules file; |
150 | struct aa_caps caps; | 152 | struct aa_caps caps; |
153 | struct aa_net net; | ||
151 | struct aa_rlimit rlimits; | 154 | struct aa_rlimit rlimits; |
152 | 155 | ||
153 | struct aa_loaddata *rawdata; | 156 | struct aa_loaddata *rawdata; |
@@ -220,6 +223,16 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile, | |||
220 | return 0; | 223 | return 0; |
221 | } | 224 | } |
222 | 225 | ||
226 | static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile, | ||
227 | u16 AF) { | ||
228 | unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET); | ||
229 | u16 be_af = cpu_to_be16(AF); | ||
230 | |||
231 | if (!state) | ||
232 | return 0; | ||
233 | return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2); | ||
234 | } | ||
235 | |||
223 | /** | 236 | /** |
224 | * aa_get_profile - increment refcount on profile @p | 237 | * aa_get_profile - increment refcount on profile @p |
225 | * @p: profile (MAYBE NULL) | 238 | * @p: profile (MAYBE NULL) |
diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h new file mode 100644 index 000000000000..92e62fe95292 --- /dev/null +++ b/security/apparmor/include/sig_names.h | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <linux/signal.h> | ||
2 | |||
3 | #define SIGUNKNOWN 0 | ||
4 | #define MAXMAPPED_SIG 35 | ||
5 | /* provide a mapping of arch signal to internal signal # for mediation | ||
6 | * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO | ||
7 | * map to the same entry those that may/or may not get a separate entry | ||
8 | */ | ||
9 | static const int sig_map[MAXMAPPED_SIG] = { | ||
10 | [0] = MAXMAPPED_SIG, /* existence test */ | ||
11 | [SIGHUP] = 1, | ||
12 | [SIGINT] = 2, | ||
13 | [SIGQUIT] = 3, | ||
14 | [SIGILL] = 4, | ||
15 | [SIGTRAP] = 5, /* -, 5, - */ | ||
16 | [SIGABRT] = 6, /* SIGIOT: -, 6, - */ | ||
17 | [SIGBUS] = 7, /* 10, 7, 10 */ | ||
18 | [SIGFPE] = 8, | ||
19 | [SIGKILL] = 9, | ||
20 | [SIGUSR1] = 10, /* 30, 10, 16 */ | ||
21 | [SIGSEGV] = 11, | ||
22 | [SIGUSR2] = 12, /* 31, 12, 17 */ | ||
23 | [SIGPIPE] = 13, | ||
24 | [SIGALRM] = 14, | ||
25 | [SIGTERM] = 15, | ||
26 | #ifdef SIGSTKFLT | ||
27 | [SIGSTKFLT] = 16, /* -, 16, - */ | ||
28 | #endif | ||
29 | [SIGCHLD] = 17, /* 20, 17, 18. SIGCHLD -, -, 18 */ | ||
30 | [SIGCONT] = 18, /* 19, 18, 25 */ | ||
31 | [SIGSTOP] = 19, /* 17, 19, 23 */ | ||
32 | [SIGTSTP] = 20, /* 18, 20, 24 */ | ||
33 | [SIGTTIN] = 21, /* 21, 21, 26 */ | ||
34 | [SIGTTOU] = 22, /* 22, 22, 27 */ | ||
35 | [SIGURG] = 23, /* 16, 23, 21 */ | ||
36 | [SIGXCPU] = 24, /* 24, 24, 30 */ | ||
37 | [SIGXFSZ] = 25, /* 25, 25, 31 */ | ||
38 | [SIGVTALRM] = 26, /* 26, 26, 28 */ | ||
39 | [SIGPROF] = 27, /* 27, 27, 29 */ | ||
40 | [SIGWINCH] = 28, /* 28, 28, 20 */ | ||
41 | [SIGIO] = 29, /* SIGPOLL: 23, 29, 22 */ | ||
42 | [SIGPWR] = 30, /* 29, 30, 19. SIGINFO 29, -, - */ | ||
43 | #ifdef SIGSYS | ||
44 | [SIGSYS] = 31, /* 12, 31, 12. often SIG LOST/UNUSED */ | ||
45 | #endif | ||
46 | #ifdef SIGEMT | ||
47 | [SIGEMT] = 32, /* 7, - , 7 */ | ||
48 | #endif | ||
49 | #if defined(SIGLOST) && SIGPWR != SIGLOST /* sparc */ | ||
50 | [SIGLOST] = 33, /* unused on Linux */ | ||
51 | #endif | ||
52 | #if defined(SIGUNUSED) && \ | ||
53 | defined(SIGLOST) && defined(SIGSYS) && SIGLOST != SIGSYS | ||
54 | [SIGUNUSED] = 34, /* -, 31, - */ | ||
55 | #endif | ||
56 | }; | ||
57 | |||
58 | /* this table is ordered post sig_map[sig] mapping */ | ||
59 | static const char *const sig_names[MAXMAPPED_SIG + 1] = { | ||
60 | "unknown", | ||
61 | "hup", | ||
62 | "int", | ||
63 | "quit", | ||
64 | "ill", | ||
65 | "trap", | ||
66 | "abrt", | ||
67 | "bus", | ||
68 | "fpe", | ||
69 | "kill", | ||
70 | "usr1", | ||
71 | "segv", | ||
72 | "usr2", | ||
73 | "pipe", | ||
74 | "alrm", | ||
75 | "term", | ||
76 | "stkflt", | ||
77 | "chld", | ||
78 | "cont", | ||
79 | "stop", | ||
80 | "stp", | ||
81 | "ttin", | ||
82 | "ttou", | ||
83 | "urg", | ||
84 | "xcpu", | ||
85 | "xfsz", | ||
86 | "vtalrm", | ||
87 | "prof", | ||
88 | "winch", | ||
89 | "io", | ||
90 | "pwr", | ||
91 | "sys", | ||
92 | "emt", | ||
93 | "lost", | ||
94 | "unused", | ||
95 | |||
96 | "exists", /* always last existence test mapped to MAXMAPPED_SIG */ | ||
97 | }; | ||
98 | |||
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 11e66b5bbc42..66fb9ede9447 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include "include/context.h" | 20 | #include "include/context.h" |
21 | #include "include/policy.h" | 21 | #include "include/policy.h" |
22 | #include "include/ipc.h" | 22 | #include "include/ipc.h" |
23 | #include "include/sig_names.h" | ||
23 | 24 | ||
24 | /** | 25 | /** |
25 | * audit_ptrace_mask - convert mask to permission string | 26 | * audit_ptrace_mask - convert mask to permission string |
@@ -121,3 +122,101 @@ int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, | |||
121 | } | 122 | } |
122 | 123 | ||
123 | 124 | ||
125 | static inline int map_signal_num(int sig) | ||
126 | { | ||
127 | if (sig > SIGRTMAX) | ||
128 | return SIGUNKNOWN; | ||
129 | else if (sig >= SIGRTMIN) | ||
130 | return sig - SIGRTMIN + 128; /* rt sigs mapped to 128 */ | ||
131 | else if (sig <= MAXMAPPED_SIG) | ||
132 | return sig_map[sig]; | ||
133 | return SIGUNKNOWN; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * audit_file_mask - convert mask to permission string | ||
138 | * @buffer: buffer to write string to (NOT NULL) | ||
139 | * @mask: permission mask to convert | ||
140 | */ | ||
141 | static void audit_signal_mask(struct audit_buffer *ab, u32 mask) | ||
142 | { | ||
143 | if (mask & MAY_READ) | ||
144 | audit_log_string(ab, "receive"); | ||
145 | if (mask & MAY_WRITE) | ||
146 | audit_log_string(ab, "send"); | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * audit_cb - call back for signal specific audit fields | ||
151 | * @ab: audit_buffer (NOT NULL) | ||
152 | * @va: audit struct to audit values of (NOT NULL) | ||
153 | */ | ||
154 | static void audit_signal_cb(struct audit_buffer *ab, void *va) | ||
155 | { | ||
156 | struct common_audit_data *sa = va; | ||
157 | |||
158 | if (aad(sa)->request & AA_SIGNAL_PERM_MASK) { | ||
159 | audit_log_format(ab, " requested_mask="); | ||
160 | audit_signal_mask(ab, aad(sa)->request); | ||
161 | if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) { | ||
162 | audit_log_format(ab, " denied_mask="); | ||
163 | audit_signal_mask(ab, aad(sa)->denied); | ||
164 | } | ||
165 | } | ||
166 | if (aad(sa)->signal <= MAXMAPPED_SIG) | ||
167 | audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]); | ||
168 | else | ||
169 | audit_log_format(ab, " signal=rtmin+%d", | ||
170 | aad(sa)->signal - 128); | ||
171 | audit_log_format(ab, " peer="); | ||
172 | aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer, | ||
173 | FLAGS_NONE, GFP_ATOMIC); | ||
174 | } | ||
175 | |||
176 | /* TODO: update to handle compound name&name2, conditionals */ | ||
177 | static void profile_match_signal(struct aa_profile *profile, const char *label, | ||
178 | int signal, struct aa_perms *perms) | ||
179 | { | ||
180 | unsigned int state; | ||
181 | |||
182 | /* TODO: secondary cache check <profile, profile, perm> */ | ||
183 | state = aa_dfa_next(profile->policy.dfa, | ||
184 | profile->policy.start[AA_CLASS_SIGNAL], | ||
185 | signal); | ||
186 | state = aa_dfa_match(profile->policy.dfa, state, label); | ||
187 | aa_compute_perms(profile->policy.dfa, state, perms); | ||
188 | } | ||
189 | |||
190 | static int profile_signal_perm(struct aa_profile *profile, | ||
191 | struct aa_profile *peer, u32 request, | ||
192 | struct common_audit_data *sa) | ||
193 | { | ||
194 | struct aa_perms perms; | ||
195 | |||
196 | if (profile_unconfined(profile) || | ||
197 | !PROFILE_MEDIATES(profile, AA_CLASS_SIGNAL)) | ||
198 | return 0; | ||
199 | |||
200 | aad(sa)->peer = &peer->label; | ||
201 | profile_match_signal(profile, peer->base.hname, aad(sa)->signal, | ||
202 | &perms); | ||
203 | aa_apply_modes_to_perms(profile, &perms); | ||
204 | return aa_check_perms(profile, &perms, request, sa, audit_signal_cb); | ||
205 | } | ||
206 | |||
207 | static int aa_signal_cross_perm(struct aa_profile *sender, | ||
208 | struct aa_profile *target, | ||
209 | struct common_audit_data *sa) | ||
210 | { | ||
211 | return xcheck(profile_signal_perm(sender, target, MAY_WRITE, sa), | ||
212 | profile_signal_perm(target, sender, MAY_READ, sa)); | ||
213 | } | ||
214 | |||
215 | int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig) | ||
216 | { | ||
217 | DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_SIGNAL); | ||
218 | |||
219 | aad(&sa)->signal = map_signal_num(sig); | ||
220 | return xcheck_labels_profiles(sender, target, aa_signal_cross_perm, | ||
221 | &sa); | ||
222 | } | ||
diff --git a/security/apparmor/label.c b/security/apparmor/label.c index e052eaba1cf6..c5b99b954580 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c | |||
@@ -49,7 +49,7 @@ static void free_proxy(struct aa_proxy *proxy) | |||
49 | /* p->label will not updated any more as p is dead */ | 49 | /* p->label will not updated any more as p is dead */ |
50 | aa_put_label(rcu_dereference_protected(proxy->label, true)); | 50 | aa_put_label(rcu_dereference_protected(proxy->label, true)); |
51 | memset(proxy, 0, sizeof(*proxy)); | 51 | memset(proxy, 0, sizeof(*proxy)); |
52 | proxy->label = (struct aa_label *) PROXY_POISON; | 52 | RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON); |
53 | kfree(proxy); | 53 | kfree(proxy); |
54 | } | 54 | } |
55 | } | 55 | } |
@@ -1450,9 +1450,11 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp) | |||
1450 | * cached label name is present and visible | 1450 | * cached label name is present and visible |
1451 | * @label->hname only exists if label is namespace hierachical | 1451 | * @label->hname only exists if label is namespace hierachical |
1452 | */ | 1452 | */ |
1453 | static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label) | 1453 | static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label, |
1454 | int flags) | ||
1454 | { | 1455 | { |
1455 | if (label->hname && labels_ns(label) == ns) | 1456 | if (label->hname && (!ns || labels_ns(label) == ns) && |
1457 | !(flags & ~FLAG_SHOW_MODE)) | ||
1456 | return true; | 1458 | return true; |
1457 | 1459 | ||
1458 | return false; | 1460 | return false; |
@@ -1495,7 +1497,7 @@ static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view, | |||
1495 | view = profiles_ns(profile); | 1497 | view = profiles_ns(profile); |
1496 | 1498 | ||
1497 | if (view != profile->ns && | 1499 | if (view != profile->ns && |
1498 | (!prev_ns || (prev_ns && *prev_ns != profile->ns))) { | 1500 | (!prev_ns || (*prev_ns != profile->ns))) { |
1499 | if (prev_ns) | 1501 | if (prev_ns) |
1500 | *prev_ns = profile->ns; | 1502 | *prev_ns = profile->ns; |
1501 | ns_name = aa_ns_name(view, profile->ns, | 1503 | ns_name = aa_ns_name(view, profile->ns, |
@@ -1605,8 +1607,13 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns, | |||
1605 | AA_BUG(!str && size != 0); | 1607 | AA_BUG(!str && size != 0); |
1606 | AA_BUG(!label); | 1608 | AA_BUG(!label); |
1607 | 1609 | ||
1608 | if (!ns) | 1610 | if (flags & FLAG_ABS_ROOT) { |
1611 | ns = root_ns; | ||
1612 | len = snprintf(str, size, "="); | ||
1613 | update_for_len(total, len, size, str); | ||
1614 | } else if (!ns) { | ||
1609 | ns = labels_ns(label); | 1615 | ns = labels_ns(label); |
1616 | } | ||
1610 | 1617 | ||
1611 | label_for_each(i, label, profile) { | 1618 | label_for_each(i, label, profile) { |
1612 | if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { | 1619 | if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) { |
@@ -1710,10 +1717,8 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns, | |||
1710 | AA_BUG(!ab); | 1717 | AA_BUG(!ab); |
1711 | AA_BUG(!label); | 1718 | AA_BUG(!label); |
1712 | 1719 | ||
1713 | if (!ns) | 1720 | if (!use_label_hname(ns, label, flags) || |
1714 | ns = labels_ns(label); | 1721 | display_mode(ns, label, flags)) { |
1715 | |||
1716 | if (!use_label_hname(ns, label) || display_mode(ns, label, flags)) { | ||
1717 | len = aa_label_asxprint(&name, ns, label, flags, gfp); | 1722 | len = aa_label_asxprint(&name, ns, label, flags, gfp); |
1718 | if (len == -1) { | 1723 | if (len == -1) { |
1719 | AA_DEBUG("label print error"); | 1724 | AA_DEBUG("label print error"); |
@@ -1738,10 +1743,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns, | |||
1738 | AA_BUG(!f); | 1743 | AA_BUG(!f); |
1739 | AA_BUG(!label); | 1744 | AA_BUG(!label); |
1740 | 1745 | ||
1741 | if (!ns) | 1746 | if (!use_label_hname(ns, label, flags)) { |
1742 | ns = labels_ns(label); | ||
1743 | |||
1744 | if (!use_label_hname(ns, label)) { | ||
1745 | char *str; | 1747 | char *str; |
1746 | int len; | 1748 | int len; |
1747 | 1749 | ||
@@ -1764,10 +1766,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags, | |||
1764 | { | 1766 | { |
1765 | AA_BUG(!label); | 1767 | AA_BUG(!label); |
1766 | 1768 | ||
1767 | if (!ns) | 1769 | if (!use_label_hname(ns, label, flags)) { |
1768 | ns = labels_ns(label); | ||
1769 | |||
1770 | if (!use_label_hname(ns, label)) { | ||
1771 | char *str; | 1770 | char *str; |
1772 | int len; | 1771 | int len; |
1773 | 1772 | ||
@@ -1874,6 +1873,9 @@ struct aa_label *aa_label_parse(struct aa_label *base, const char *str, | |||
1874 | if (*str == '&') | 1873 | if (*str == '&') |
1875 | str++; | 1874 | str++; |
1876 | } | 1875 | } |
1876 | if (*str == '=') | ||
1877 | base = &root_ns->unconfined->label; | ||
1878 | |||
1877 | error = vec_setup(profile, vec, len, gfp); | 1879 | error = vec_setup(profile, vec, len, gfp); |
1878 | if (error) | 1880 | if (error) |
1879 | return ERR_PTR(error); | 1881 | return ERR_PTR(error); |
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 08ca26bcca77..8818621b5d95 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c | |||
@@ -211,7 +211,8 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask) | |||
211 | *str = '\0'; | 211 | *str = '\0'; |
212 | } | 212 | } |
213 | 213 | ||
214 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask) | 214 | void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, |
215 | u32 mask) | ||
215 | { | 216 | { |
216 | const char *fmt = "%s"; | 217 | const char *fmt = "%s"; |
217 | unsigned int i, perm = 1; | 218 | unsigned int i, perm = 1; |
@@ -229,7 +230,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask) | |||
229 | } | 230 | } |
230 | 231 | ||
231 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, | 232 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, |
232 | u32 chrsmask, const char **names, u32 namesmask) | 233 | u32 chrsmask, const char * const *names, u32 namesmask) |
233 | { | 234 | { |
234 | char str[33]; | 235 | char str[33]; |
235 | 236 | ||
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 7a82c0f61452..72b915dfcaf7 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -33,11 +33,13 @@ | |||
33 | #include "include/context.h" | 33 | #include "include/context.h" |
34 | #include "include/file.h" | 34 | #include "include/file.h" |
35 | #include "include/ipc.h" | 35 | #include "include/ipc.h" |
36 | #include "include/net.h" | ||
36 | #include "include/path.h" | 37 | #include "include/path.h" |
37 | #include "include/label.h" | 38 | #include "include/label.h" |
38 | #include "include/policy.h" | 39 | #include "include/policy.h" |
39 | #include "include/policy_ns.h" | 40 | #include "include/policy_ns.h" |
40 | #include "include/procattr.h" | 41 | #include "include/procattr.h" |
42 | #include "include/mount.h" | ||
41 | 43 | ||
42 | /* Flag indicating whether initialization completed */ | 44 | /* Flag indicating whether initialization completed */ |
43 | int apparmor_initialized; | 45 | int apparmor_initialized; |
@@ -511,6 +513,65 @@ static int apparmor_file_mprotect(struct vm_area_struct *vma, | |||
511 | !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); | 513 | !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); |
512 | } | 514 | } |
513 | 515 | ||
516 | static int apparmor_sb_mount(const char *dev_name, const struct path *path, | ||
517 | const char *type, unsigned long flags, void *data) | ||
518 | { | ||
519 | struct aa_label *label; | ||
520 | int error = 0; | ||
521 | |||
522 | /* Discard magic */ | ||
523 | if ((flags & MS_MGC_MSK) == MS_MGC_VAL) | ||
524 | flags &= ~MS_MGC_MSK; | ||
525 | |||
526 | flags &= ~AA_MS_IGNORE_MASK; | ||
527 | |||
528 | label = __begin_current_label_crit_section(); | ||
529 | if (!unconfined(label)) { | ||
530 | if (flags & MS_REMOUNT) | ||
531 | error = aa_remount(label, path, flags, data); | ||
532 | else if (flags & MS_BIND) | ||
533 | error = aa_bind_mount(label, path, dev_name, flags); | ||
534 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | | ||
535 | MS_UNBINDABLE)) | ||
536 | error = aa_mount_change_type(label, path, flags); | ||
537 | else if (flags & MS_MOVE) | ||
538 | error = aa_move_mount(label, path, dev_name); | ||
539 | else | ||
540 | error = aa_new_mount(label, dev_name, path, type, | ||
541 | flags, data); | ||
542 | } | ||
543 | __end_current_label_crit_section(label); | ||
544 | |||
545 | return error; | ||
546 | } | ||
547 | |||
548 | static int apparmor_sb_umount(struct vfsmount *mnt, int flags) | ||
549 | { | ||
550 | struct aa_label *label; | ||
551 | int error = 0; | ||
552 | |||
553 | label = __begin_current_label_crit_section(); | ||
554 | if (!unconfined(label)) | ||
555 | error = aa_umount(label, mnt, flags); | ||
556 | __end_current_label_crit_section(label); | ||
557 | |||
558 | return error; | ||
559 | } | ||
560 | |||
561 | static int apparmor_sb_pivotroot(const struct path *old_path, | ||
562 | const struct path *new_path) | ||
563 | { | ||
564 | struct aa_label *label; | ||
565 | int error = 0; | ||
566 | |||
567 | label = aa_get_current_label(); | ||
568 | if (!unconfined(label)) | ||
569 | error = aa_pivotroot(label, old_path, new_path); | ||
570 | aa_put_label(label); | ||
571 | |||
572 | return error; | ||
573 | } | ||
574 | |||
514 | static int apparmor_getprocattr(struct task_struct *task, char *name, | 575 | static int apparmor_getprocattr(struct task_struct *task, char *name, |
515 | char **value) | 576 | char **value) |
516 | { | 577 | { |
@@ -656,12 +717,398 @@ static int apparmor_task_setrlimit(struct task_struct *task, | |||
656 | return error; | 717 | return error; |
657 | } | 718 | } |
658 | 719 | ||
720 | static int apparmor_task_kill(struct task_struct *target, struct siginfo *info, | ||
721 | int sig, u32 secid) | ||
722 | { | ||
723 | struct aa_label *cl, *tl; | ||
724 | int error; | ||
725 | |||
726 | if (secid) | ||
727 | /* TODO: after secid to label mapping is done. | ||
728 | * Dealing with USB IO specific behavior | ||
729 | */ | ||
730 | return 0; | ||
731 | cl = __begin_current_label_crit_section(); | ||
732 | tl = aa_get_task_label(target); | ||
733 | error = aa_may_signal(cl, tl, sig); | ||
734 | aa_put_label(tl); | ||
735 | __end_current_label_crit_section(cl); | ||
736 | |||
737 | return error; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * apparmor_sk_alloc_security - allocate and attach the sk_security field | ||
742 | */ | ||
743 | static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags) | ||
744 | { | ||
745 | struct aa_sk_ctx *ctx; | ||
746 | |||
747 | ctx = kzalloc(sizeof(*ctx), flags); | ||
748 | if (!ctx) | ||
749 | return -ENOMEM; | ||
750 | |||
751 | SK_CTX(sk) = ctx; | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * apparmor_sk_free_security - free the sk_security field | ||
758 | */ | ||
759 | static void apparmor_sk_free_security(struct sock *sk) | ||
760 | { | ||
761 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
762 | |||
763 | SK_CTX(sk) = NULL; | ||
764 | aa_put_label(ctx->label); | ||
765 | aa_put_label(ctx->peer); | ||
766 | path_put(&ctx->path); | ||
767 | kfree(ctx); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * apparmor_clone_security - clone the sk_security field | ||
772 | */ | ||
773 | static void apparmor_sk_clone_security(const struct sock *sk, | ||
774 | struct sock *newsk) | ||
775 | { | ||
776 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
777 | struct aa_sk_ctx *new = SK_CTX(newsk); | ||
778 | |||
779 | new->label = aa_get_label(ctx->label); | ||
780 | new->peer = aa_get_label(ctx->peer); | ||
781 | new->path = ctx->path; | ||
782 | path_get(&new->path); | ||
783 | } | ||
784 | |||
785 | static int aa_sock_create_perm(struct aa_label *label, int family, int type, | ||
786 | int protocol) | ||
787 | { | ||
788 | AA_BUG(!label); | ||
789 | AA_BUG(in_interrupt()); | ||
790 | |||
791 | return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type, | ||
792 | protocol); | ||
793 | } | ||
794 | |||
795 | |||
796 | /** | ||
797 | * apparmor_socket_create - check perms before creating a new socket | ||
798 | */ | ||
799 | static int apparmor_socket_create(int family, int type, int protocol, int kern) | ||
800 | { | ||
801 | struct aa_label *label; | ||
802 | int error = 0; | ||
803 | |||
804 | label = begin_current_label_crit_section(); | ||
805 | if (!(kern || unconfined(label))) | ||
806 | error = aa_sock_create_perm(label, family, type, protocol); | ||
807 | end_current_label_crit_section(label); | ||
808 | |||
809 | return error; | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * apparmor_socket_post_create - setup the per-socket security struct | ||
814 | * | ||
815 | * Note: | ||
816 | * - kernel sockets currently labeled unconfined but we may want to | ||
817 | * move to a special kernel label | ||
818 | * - socket may not have sk here if created with sock_create_lite or | ||
819 | * sock_alloc. These should be accept cases which will be handled in | ||
820 | * sock_graft. | ||
821 | */ | ||
822 | static int apparmor_socket_post_create(struct socket *sock, int family, | ||
823 | int type, int protocol, int kern) | ||
824 | { | ||
825 | struct aa_label *label; | ||
826 | |||
827 | if (kern) { | ||
828 | struct aa_ns *ns = aa_get_current_ns(); | ||
829 | |||
830 | label = aa_get_label(ns_unconfined(ns)); | ||
831 | aa_put_ns(ns); | ||
832 | } else | ||
833 | label = aa_get_current_label(); | ||
834 | |||
835 | if (sock->sk) { | ||
836 | struct aa_sk_ctx *ctx = SK_CTX(sock->sk); | ||
837 | |||
838 | aa_put_label(ctx->label); | ||
839 | ctx->label = aa_get_label(label); | ||
840 | } | ||
841 | aa_put_label(label); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /** | ||
847 | * apparmor_socket_bind - check perms before bind addr to socket | ||
848 | */ | ||
849 | static int apparmor_socket_bind(struct socket *sock, | ||
850 | struct sockaddr *address, int addrlen) | ||
851 | { | ||
852 | AA_BUG(!sock); | ||
853 | AA_BUG(!sock->sk); | ||
854 | AA_BUG(!address); | ||
855 | AA_BUG(in_interrupt()); | ||
856 | |||
857 | return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk); | ||
858 | } | ||
859 | |||
860 | /** | ||
861 | * apparmor_socket_connect - check perms before connecting @sock to @address | ||
862 | */ | ||
863 | static int apparmor_socket_connect(struct socket *sock, | ||
864 | struct sockaddr *address, int addrlen) | ||
865 | { | ||
866 | AA_BUG(!sock); | ||
867 | AA_BUG(!sock->sk); | ||
868 | AA_BUG(!address); | ||
869 | AA_BUG(in_interrupt()); | ||
870 | |||
871 | return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk); | ||
872 | } | ||
873 | |||
874 | /** | ||
875 | * apparmor_socket_list - check perms before allowing listen | ||
876 | */ | ||
877 | static int apparmor_socket_listen(struct socket *sock, int backlog) | ||
878 | { | ||
879 | AA_BUG(!sock); | ||
880 | AA_BUG(!sock->sk); | ||
881 | AA_BUG(in_interrupt()); | ||
882 | |||
883 | return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk); | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * apparmor_socket_accept - check perms before accepting a new connection. | ||
888 | * | ||
889 | * Note: while @newsock is created and has some information, the accept | ||
890 | * has not been done. | ||
891 | */ | ||
892 | static int apparmor_socket_accept(struct socket *sock, struct socket *newsock) | ||
893 | { | ||
894 | AA_BUG(!sock); | ||
895 | AA_BUG(!sock->sk); | ||
896 | AA_BUG(!newsock); | ||
897 | AA_BUG(in_interrupt()); | ||
898 | |||
899 | return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk); | ||
900 | } | ||
901 | |||
902 | static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock, | ||
903 | struct msghdr *msg, int size) | ||
904 | { | ||
905 | AA_BUG(!sock); | ||
906 | AA_BUG(!sock->sk); | ||
907 | AA_BUG(!msg); | ||
908 | AA_BUG(in_interrupt()); | ||
909 | |||
910 | return aa_sk_perm(op, request, sock->sk); | ||
911 | } | ||
912 | |||
913 | /** | ||
914 | * apparmor_socket_sendmsg - check perms before sending msg to another socket | ||
915 | */ | ||
916 | static int apparmor_socket_sendmsg(struct socket *sock, | ||
917 | struct msghdr *msg, int size) | ||
918 | { | ||
919 | return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size); | ||
920 | } | ||
921 | |||
922 | /** | ||
923 | * apparmor_socket_recvmsg - check perms before receiving a message | ||
924 | */ | ||
925 | static int apparmor_socket_recvmsg(struct socket *sock, | ||
926 | struct msghdr *msg, int size, int flags) | ||
927 | { | ||
928 | return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size); | ||
929 | } | ||
930 | |||
931 | /* revaliation, get/set attr, shutdown */ | ||
932 | static int aa_sock_perm(const char *op, u32 request, struct socket *sock) | ||
933 | { | ||
934 | AA_BUG(!sock); | ||
935 | AA_BUG(!sock->sk); | ||
936 | AA_BUG(in_interrupt()); | ||
937 | |||
938 | return aa_sk_perm(op, request, sock->sk); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * apparmor_socket_getsockname - check perms before getting the local address | ||
943 | */ | ||
944 | static int apparmor_socket_getsockname(struct socket *sock) | ||
945 | { | ||
946 | return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock); | ||
947 | } | ||
948 | |||
949 | /** | ||
950 | * apparmor_socket_getpeername - check perms before getting remote address | ||
951 | */ | ||
952 | static int apparmor_socket_getpeername(struct socket *sock) | ||
953 | { | ||
954 | return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock); | ||
955 | } | ||
956 | |||
957 | /* revaliation, get/set attr, opt */ | ||
958 | static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock, | ||
959 | int level, int optname) | ||
960 | { | ||
961 | AA_BUG(!sock); | ||
962 | AA_BUG(!sock->sk); | ||
963 | AA_BUG(in_interrupt()); | ||
964 | |||
965 | return aa_sk_perm(op, request, sock->sk); | ||
966 | } | ||
967 | |||
968 | /** | ||
969 | * apparmor_getsockopt - check perms before getting socket options | ||
970 | */ | ||
971 | static int apparmor_socket_getsockopt(struct socket *sock, int level, | ||
972 | int optname) | ||
973 | { | ||
974 | return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock, | ||
975 | level, optname); | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * apparmor_setsockopt - check perms before setting socket options | ||
980 | */ | ||
981 | static int apparmor_socket_setsockopt(struct socket *sock, int level, | ||
982 | int optname) | ||
983 | { | ||
984 | return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock, | ||
985 | level, optname); | ||
986 | } | ||
987 | |||
988 | /** | ||
989 | * apparmor_socket_shutdown - check perms before shutting down @sock conn | ||
990 | */ | ||
991 | static int apparmor_socket_shutdown(struct socket *sock, int how) | ||
992 | { | ||
993 | return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock); | ||
994 | } | ||
995 | |||
996 | /** | ||
997 | * apparmor_socket_sock_recv_skb - check perms before associating skb to sk | ||
998 | * | ||
999 | * Note: can not sleep may be called with locks held | ||
1000 | * | ||
1001 | * dont want protocol specific in __skb_recv_datagram() | ||
1002 | * to deny an incoming connection socket_sock_rcv_skb() | ||
1003 | */ | ||
1004 | static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
1005 | { | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | |||
1010 | static struct aa_label *sk_peer_label(struct sock *sk) | ||
1011 | { | ||
1012 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
1013 | |||
1014 | if (ctx->peer) | ||
1015 | return ctx->peer; | ||
1016 | |||
1017 | return ERR_PTR(-ENOPROTOOPT); | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * apparmor_socket_getpeersec_stream - get security context of peer | ||
1022 | * | ||
1023 | * Note: for tcp only valid if using ipsec or cipso on lan | ||
1024 | */ | ||
1025 | static int apparmor_socket_getpeersec_stream(struct socket *sock, | ||
1026 | char __user *optval, | ||
1027 | int __user *optlen, | ||
1028 | unsigned int len) | ||
1029 | { | ||
1030 | char *name; | ||
1031 | int slen, error = 0; | ||
1032 | struct aa_label *label; | ||
1033 | struct aa_label *peer; | ||
1034 | |||
1035 | label = begin_current_label_crit_section(); | ||
1036 | peer = sk_peer_label(sock->sk); | ||
1037 | if (IS_ERR(peer)) { | ||
1038 | error = PTR_ERR(peer); | ||
1039 | goto done; | ||
1040 | } | ||
1041 | slen = aa_label_asxprint(&name, labels_ns(label), peer, | ||
1042 | FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | | ||
1043 | FLAG_HIDDEN_UNCONFINED, GFP_KERNEL); | ||
1044 | /* don't include terminating \0 in slen, it breaks some apps */ | ||
1045 | if (slen < 0) { | ||
1046 | error = -ENOMEM; | ||
1047 | } else { | ||
1048 | if (slen > len) { | ||
1049 | error = -ERANGE; | ||
1050 | } else if (copy_to_user(optval, name, slen)) { | ||
1051 | error = -EFAULT; | ||
1052 | goto out; | ||
1053 | } | ||
1054 | if (put_user(slen, optlen)) | ||
1055 | error = -EFAULT; | ||
1056 | out: | ||
1057 | kfree(name); | ||
1058 | |||
1059 | } | ||
1060 | |||
1061 | done: | ||
1062 | end_current_label_crit_section(label); | ||
1063 | |||
1064 | return error; | ||
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * apparmor_socket_getpeersec_dgram - get security label of packet | ||
1069 | * @sock: the peer socket | ||
1070 | * @skb: packet data | ||
1071 | * @secid: pointer to where to put the secid of the packet | ||
1072 | * | ||
1073 | * Sets the netlabel socket state on sk from parent | ||
1074 | */ | ||
1075 | static int apparmor_socket_getpeersec_dgram(struct socket *sock, | ||
1076 | struct sk_buff *skb, u32 *secid) | ||
1077 | |||
1078 | { | ||
1079 | /* TODO: requires secid support */ | ||
1080 | return -ENOPROTOOPT; | ||
1081 | } | ||
1082 | |||
1083 | /** | ||
1084 | * apparmor_sock_graft - Initialize newly created socket | ||
1085 | * @sk: child sock | ||
1086 | * @parent: parent socket | ||
1087 | * | ||
1088 | * Note: could set off of SOCK_CTX(parent) but need to track inode and we can | ||
1089 | * just set sk security information off of current creating process label | ||
1090 | * Labeling of sk for accept case - probably should be sock based | ||
1091 | * instead of task, because of the case where an implicitly labeled | ||
1092 | * socket is shared by different tasks. | ||
1093 | */ | ||
1094 | static void apparmor_sock_graft(struct sock *sk, struct socket *parent) | ||
1095 | { | ||
1096 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
1097 | |||
1098 | if (!ctx->label) | ||
1099 | ctx->label = aa_get_current_label(); | ||
1100 | } | ||
1101 | |||
659 | static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { | 1102 | static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { |
660 | LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), | 1103 | LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), |
661 | LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), | 1104 | LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), |
662 | LSM_HOOK_INIT(capget, apparmor_capget), | 1105 | LSM_HOOK_INIT(capget, apparmor_capget), |
663 | LSM_HOOK_INIT(capable, apparmor_capable), | 1106 | LSM_HOOK_INIT(capable, apparmor_capable), |
664 | 1107 | ||
1108 | LSM_HOOK_INIT(sb_mount, apparmor_sb_mount), | ||
1109 | LSM_HOOK_INIT(sb_umount, apparmor_sb_umount), | ||
1110 | LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot), | ||
1111 | |||
665 | LSM_HOOK_INIT(path_link, apparmor_path_link), | 1112 | LSM_HOOK_INIT(path_link, apparmor_path_link), |
666 | LSM_HOOK_INIT(path_unlink, apparmor_path_unlink), | 1113 | LSM_HOOK_INIT(path_unlink, apparmor_path_unlink), |
667 | LSM_HOOK_INIT(path_symlink, apparmor_path_symlink), | 1114 | LSM_HOOK_INIT(path_symlink, apparmor_path_symlink), |
@@ -686,6 +1133,30 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { | |||
686 | LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), | 1133 | LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), |
687 | LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), | 1134 | LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), |
688 | 1135 | ||
1136 | LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security), | ||
1137 | LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security), | ||
1138 | LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security), | ||
1139 | |||
1140 | LSM_HOOK_INIT(socket_create, apparmor_socket_create), | ||
1141 | LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create), | ||
1142 | LSM_HOOK_INIT(socket_bind, apparmor_socket_bind), | ||
1143 | LSM_HOOK_INIT(socket_connect, apparmor_socket_connect), | ||
1144 | LSM_HOOK_INIT(socket_listen, apparmor_socket_listen), | ||
1145 | LSM_HOOK_INIT(socket_accept, apparmor_socket_accept), | ||
1146 | LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg), | ||
1147 | LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg), | ||
1148 | LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname), | ||
1149 | LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername), | ||
1150 | LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt), | ||
1151 | LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt), | ||
1152 | LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown), | ||
1153 | LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb), | ||
1154 | LSM_HOOK_INIT(socket_getpeersec_stream, | ||
1155 | apparmor_socket_getpeersec_stream), | ||
1156 | LSM_HOOK_INIT(socket_getpeersec_dgram, | ||
1157 | apparmor_socket_getpeersec_dgram), | ||
1158 | LSM_HOOK_INIT(sock_graft, apparmor_sock_graft), | ||
1159 | |||
689 | LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), | 1160 | LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), |
690 | LSM_HOOK_INIT(cred_free, apparmor_cred_free), | 1161 | LSM_HOOK_INIT(cred_free, apparmor_cred_free), |
691 | LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), | 1162 | LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), |
@@ -696,6 +1167,7 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { | |||
696 | LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds), | 1167 | LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds), |
697 | 1168 | ||
698 | LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit), | 1169 | LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit), |
1170 | LSM_HOOK_INIT(task_kill, apparmor_task_kill), | ||
699 | }; | 1171 | }; |
700 | 1172 | ||
701 | /* | 1173 | /* |
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c new file mode 100644 index 000000000000..82a64b58041d --- /dev/null +++ b/security/apparmor/mount.c | |||
@@ -0,0 +1,696 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor mediation of files | ||
5 | * | ||
6 | * Copyright (C) 1998-2008 Novell/SUSE | ||
7 | * Copyright 2009-2017 Canonical Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation, version 2 of the | ||
12 | * License. | ||
13 | */ | ||
14 | |||
15 | #include <linux/fs.h> | ||
16 | #include <linux/mount.h> | ||
17 | #include <linux/namei.h> | ||
18 | |||
19 | #include "include/apparmor.h" | ||
20 | #include "include/audit.h" | ||
21 | #include "include/context.h" | ||
22 | #include "include/domain.h" | ||
23 | #include "include/file.h" | ||
24 | #include "include/match.h" | ||
25 | #include "include/mount.h" | ||
26 | #include "include/path.h" | ||
27 | #include "include/policy.h" | ||
28 | |||
29 | |||
30 | static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags) | ||
31 | { | ||
32 | if (flags & MS_RDONLY) | ||
33 | audit_log_format(ab, "ro"); | ||
34 | else | ||
35 | audit_log_format(ab, "rw"); | ||
36 | if (flags & MS_NOSUID) | ||
37 | audit_log_format(ab, ", nosuid"); | ||
38 | if (flags & MS_NODEV) | ||
39 | audit_log_format(ab, ", nodev"); | ||
40 | if (flags & MS_NOEXEC) | ||
41 | audit_log_format(ab, ", noexec"); | ||
42 | if (flags & MS_SYNCHRONOUS) | ||
43 | audit_log_format(ab, ", sync"); | ||
44 | if (flags & MS_REMOUNT) | ||
45 | audit_log_format(ab, ", remount"); | ||
46 | if (flags & MS_MANDLOCK) | ||
47 | audit_log_format(ab, ", mand"); | ||
48 | if (flags & MS_DIRSYNC) | ||
49 | audit_log_format(ab, ", dirsync"); | ||
50 | if (flags & MS_NOATIME) | ||
51 | audit_log_format(ab, ", noatime"); | ||
52 | if (flags & MS_NODIRATIME) | ||
53 | audit_log_format(ab, ", nodiratime"); | ||
54 | if (flags & MS_BIND) | ||
55 | audit_log_format(ab, flags & MS_REC ? ", rbind" : ", bind"); | ||
56 | if (flags & MS_MOVE) | ||
57 | audit_log_format(ab, ", move"); | ||
58 | if (flags & MS_SILENT) | ||
59 | audit_log_format(ab, ", silent"); | ||
60 | if (flags & MS_POSIXACL) | ||
61 | audit_log_format(ab, ", acl"); | ||
62 | if (flags & MS_UNBINDABLE) | ||
63 | audit_log_format(ab, flags & MS_REC ? ", runbindable" : | ||
64 | ", unbindable"); | ||
65 | if (flags & MS_PRIVATE) | ||
66 | audit_log_format(ab, flags & MS_REC ? ", rprivate" : | ||
67 | ", private"); | ||
68 | if (flags & MS_SLAVE) | ||
69 | audit_log_format(ab, flags & MS_REC ? ", rslave" : | ||
70 | ", slave"); | ||
71 | if (flags & MS_SHARED) | ||
72 | audit_log_format(ab, flags & MS_REC ? ", rshared" : | ||
73 | ", shared"); | ||
74 | if (flags & MS_RELATIME) | ||
75 | audit_log_format(ab, ", relatime"); | ||
76 | if (flags & MS_I_VERSION) | ||
77 | audit_log_format(ab, ", iversion"); | ||
78 | if (flags & MS_STRICTATIME) | ||
79 | audit_log_format(ab, ", strictatime"); | ||
80 | if (flags & MS_NOUSER) | ||
81 | audit_log_format(ab, ", nouser"); | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * audit_cb - call back for mount specific audit fields | ||
86 | * @ab: audit_buffer (NOT NULL) | ||
87 | * @va: audit struct to audit values of (NOT NULL) | ||
88 | */ | ||
89 | static void audit_cb(struct audit_buffer *ab, void *va) | ||
90 | { | ||
91 | struct common_audit_data *sa = va; | ||
92 | |||
93 | if (aad(sa)->mnt.type) { | ||
94 | audit_log_format(ab, " fstype="); | ||
95 | audit_log_untrustedstring(ab, aad(sa)->mnt.type); | ||
96 | } | ||
97 | if (aad(sa)->mnt.src_name) { | ||
98 | audit_log_format(ab, " srcname="); | ||
99 | audit_log_untrustedstring(ab, aad(sa)->mnt.src_name); | ||
100 | } | ||
101 | if (aad(sa)->mnt.trans) { | ||
102 | audit_log_format(ab, " trans="); | ||
103 | audit_log_untrustedstring(ab, aad(sa)->mnt.trans); | ||
104 | } | ||
105 | if (aad(sa)->mnt.flags) { | ||
106 | audit_log_format(ab, " flags=\""); | ||
107 | audit_mnt_flags(ab, aad(sa)->mnt.flags); | ||
108 | audit_log_format(ab, "\""); | ||
109 | } | ||
110 | if (aad(sa)->mnt.data) { | ||
111 | audit_log_format(ab, " options="); | ||
112 | audit_log_untrustedstring(ab, aad(sa)->mnt.data); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * audit_mount - handle the auditing of mount operations | ||
118 | * @profile: the profile being enforced (NOT NULL) | ||
119 | * @op: operation being mediated (NOT NULL) | ||
120 | * @name: name of object being mediated (MAYBE NULL) | ||
121 | * @src_name: src_name of object being mediated (MAYBE_NULL) | ||
122 | * @type: type of filesystem (MAYBE_NULL) | ||
123 | * @trans: name of trans (MAYBE NULL) | ||
124 | * @flags: filesystem idependent mount flags | ||
125 | * @data: filesystem mount flags | ||
126 | * @request: permissions requested | ||
127 | * @perms: the permissions computed for the request (NOT NULL) | ||
128 | * @info: extra information message (MAYBE NULL) | ||
129 | * @error: 0 if operation allowed else failure error code | ||
130 | * | ||
131 | * Returns: %0 or error on failure | ||
132 | */ | ||
133 | static int audit_mount(struct aa_profile *profile, const char *op, | ||
134 | const char *name, const char *src_name, | ||
135 | const char *type, const char *trans, | ||
136 | unsigned long flags, const void *data, u32 request, | ||
137 | struct aa_perms *perms, const char *info, int error) | ||
138 | { | ||
139 | int audit_type = AUDIT_APPARMOR_AUTO; | ||
140 | DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, op); | ||
141 | |||
142 | if (likely(!error)) { | ||
143 | u32 mask = perms->audit; | ||
144 | |||
145 | if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL)) | ||
146 | mask = 0xffff; | ||
147 | |||
148 | /* mask off perms that are not being force audited */ | ||
149 | request &= mask; | ||
150 | |||
151 | if (likely(!request)) | ||
152 | return 0; | ||
153 | audit_type = AUDIT_APPARMOR_AUDIT; | ||
154 | } else { | ||
155 | /* only report permissions that were denied */ | ||
156 | request = request & ~perms->allow; | ||
157 | |||
158 | if (request & perms->kill) | ||
159 | audit_type = AUDIT_APPARMOR_KILL; | ||
160 | |||
161 | /* quiet known rejects, assumes quiet and kill do not overlap */ | ||
162 | if ((request & perms->quiet) && | ||
163 | AUDIT_MODE(profile) != AUDIT_NOQUIET && | ||
164 | AUDIT_MODE(profile) != AUDIT_ALL) | ||
165 | request &= ~perms->quiet; | ||
166 | |||
167 | if (!request) | ||
168 | return error; | ||
169 | } | ||
170 | |||
171 | aad(&sa)->name = name; | ||
172 | aad(&sa)->mnt.src_name = src_name; | ||
173 | aad(&sa)->mnt.type = type; | ||
174 | aad(&sa)->mnt.trans = trans; | ||
175 | aad(&sa)->mnt.flags = flags; | ||
176 | if (data && (perms->audit & AA_AUDIT_DATA)) | ||
177 | aad(&sa)->mnt.data = data; | ||
178 | aad(&sa)->info = info; | ||
179 | aad(&sa)->error = error; | ||
180 | |||
181 | return aa_audit(audit_type, profile, &sa, audit_cb); | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * match_mnt_flags - Do an ordered match on mount flags | ||
186 | * @dfa: dfa to match against | ||
187 | * @state: state to start in | ||
188 | * @flags: mount flags to match against | ||
189 | * | ||
190 | * Mount flags are encoded as an ordered match. This is done instead of | ||
191 | * checking against a simple bitmask, to allow for logical operations | ||
192 | * on the flags. | ||
193 | * | ||
194 | * Returns: next state after flags match | ||
195 | */ | ||
196 | static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state, | ||
197 | unsigned long flags) | ||
198 | { | ||
199 | unsigned int i; | ||
200 | |||
201 | for (i = 0; i <= 31 ; ++i) { | ||
202 | if ((1 << i) & flags) | ||
203 | state = aa_dfa_next(dfa, state, i + 1); | ||
204 | } | ||
205 | |||
206 | return state; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * compute_mnt_perms - compute mount permission associated with @state | ||
211 | * @dfa: dfa to match against (NOT NULL) | ||
212 | * @state: state match finished in | ||
213 | * | ||
214 | * Returns: mount permissions | ||
215 | */ | ||
216 | static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa, | ||
217 | unsigned int state) | ||
218 | { | ||
219 | struct aa_perms perms; | ||
220 | |||
221 | perms.kill = 0; | ||
222 | perms.allow = dfa_user_allow(dfa, state); | ||
223 | perms.audit = dfa_user_audit(dfa, state); | ||
224 | perms.quiet = dfa_user_quiet(dfa, state); | ||
225 | perms.xindex = dfa_user_xindex(dfa, state); | ||
226 | |||
227 | return perms; | ||
228 | } | ||
229 | |||
230 | static const char * const mnt_info_table[] = { | ||
231 | "match succeeded", | ||
232 | "failed mntpnt match", | ||
233 | "failed srcname match", | ||
234 | "failed type match", | ||
235 | "failed flags match", | ||
236 | "failed data match" | ||
237 | }; | ||
238 | |||
239 | /* | ||
240 | * Returns 0 on success else element that match failed in, this is the | ||
241 | * index into the mnt_info_table above | ||
242 | */ | ||
243 | static int do_match_mnt(struct aa_dfa *dfa, unsigned int start, | ||
244 | const char *mntpnt, const char *devname, | ||
245 | const char *type, unsigned long flags, | ||
246 | void *data, bool binary, struct aa_perms *perms) | ||
247 | { | ||
248 | unsigned int state; | ||
249 | |||
250 | AA_BUG(!dfa); | ||
251 | AA_BUG(!perms); | ||
252 | |||
253 | state = aa_dfa_match(dfa, start, mntpnt); | ||
254 | state = aa_dfa_null_transition(dfa, state); | ||
255 | if (!state) | ||
256 | return 1; | ||
257 | |||
258 | if (devname) | ||
259 | state = aa_dfa_match(dfa, state, devname); | ||
260 | state = aa_dfa_null_transition(dfa, state); | ||
261 | if (!state) | ||
262 | return 2; | ||
263 | |||
264 | if (type) | ||
265 | state = aa_dfa_match(dfa, state, type); | ||
266 | state = aa_dfa_null_transition(dfa, state); | ||
267 | if (!state) | ||
268 | return 3; | ||
269 | |||
270 | state = match_mnt_flags(dfa, state, flags); | ||
271 | if (!state) | ||
272 | return 4; | ||
273 | *perms = compute_mnt_perms(dfa, state); | ||
274 | if (perms->allow & AA_MAY_MOUNT) | ||
275 | return 0; | ||
276 | |||
277 | /* only match data if not binary and the DFA flags data is expected */ | ||
278 | if (data && !binary && (perms->allow & AA_MNT_CONT_MATCH)) { | ||
279 | state = aa_dfa_null_transition(dfa, state); | ||
280 | if (!state) | ||
281 | return 4; | ||
282 | |||
283 | state = aa_dfa_match(dfa, state, data); | ||
284 | if (!state) | ||
285 | return 5; | ||
286 | *perms = compute_mnt_perms(dfa, state); | ||
287 | if (perms->allow & AA_MAY_MOUNT) | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* failed at end of flags match */ | ||
292 | return 4; | ||
293 | } | ||
294 | |||
295 | |||
296 | static int path_flags(struct aa_profile *profile, const struct path *path) | ||
297 | { | ||
298 | AA_BUG(!profile); | ||
299 | AA_BUG(!path); | ||
300 | |||
301 | return profile->path_flags | | ||
302 | (S_ISDIR(path->dentry->d_inode->i_mode) ? PATH_IS_DIR : 0); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * match_mnt_path_str - handle path matching for mount | ||
307 | * @profile: the confining profile | ||
308 | * @mntpath: for the mntpnt (NOT NULL) | ||
309 | * @buffer: buffer to be used to lookup mntpath | ||
310 | * @devnme: string for the devname/src_name (MAY BE NULL OR ERRPTR) | ||
311 | * @type: string for the dev type (MAYBE NULL) | ||
312 | * @flags: mount flags to match | ||
313 | * @data: fs mount data (MAYBE NULL) | ||
314 | * @binary: whether @data is binary | ||
315 | * @devinfo: error str if (IS_ERR(@devname)) | ||
316 | * | ||
317 | * Returns: 0 on success else error | ||
318 | */ | ||
319 | static int match_mnt_path_str(struct aa_profile *profile, | ||
320 | const struct path *mntpath, char *buffer, | ||
321 | const char *devname, const char *type, | ||
322 | unsigned long flags, void *data, bool binary, | ||
323 | const char *devinfo) | ||
324 | { | ||
325 | struct aa_perms perms = { }; | ||
326 | const char *mntpnt = NULL, *info = NULL; | ||
327 | int pos, error; | ||
328 | |||
329 | AA_BUG(!profile); | ||
330 | AA_BUG(!mntpath); | ||
331 | AA_BUG(!buffer); | ||
332 | |||
333 | error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, | ||
334 | &mntpnt, &info, profile->disconnected); | ||
335 | if (error) | ||
336 | goto audit; | ||
337 | if (IS_ERR(devname)) { | ||
338 | error = PTR_ERR(devname); | ||
339 | devname = NULL; | ||
340 | info = devinfo; | ||
341 | goto audit; | ||
342 | } | ||
343 | |||
344 | error = -EACCES; | ||
345 | pos = do_match_mnt(profile->policy.dfa, | ||
346 | profile->policy.start[AA_CLASS_MOUNT], | ||
347 | mntpnt, devname, type, flags, data, binary, &perms); | ||
348 | if (pos) { | ||
349 | info = mnt_info_table[pos]; | ||
350 | goto audit; | ||
351 | } | ||
352 | error = 0; | ||
353 | |||
354 | audit: | ||
355 | return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL, | ||
356 | flags, data, AA_MAY_MOUNT, &perms, info, error); | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * match_mnt - handle path matching for mount | ||
361 | * @profile: the confining profile | ||
362 | * @mntpath: for the mntpnt (NOT NULL) | ||
363 | * @buffer: buffer to be used to lookup mntpath | ||
364 | * @devpath: path devname/src_name (MAYBE NULL) | ||
365 | * @devbuffer: buffer to be used to lookup devname/src_name | ||
366 | * @type: string for the dev type (MAYBE NULL) | ||
367 | * @flags: mount flags to match | ||
368 | * @data: fs mount data (MAYBE NULL) | ||
369 | * @binary: whether @data is binary | ||
370 | * | ||
371 | * Returns: 0 on success else error | ||
372 | */ | ||
373 | static int match_mnt(struct aa_profile *profile, const struct path *path, | ||
374 | char *buffer, struct path *devpath, char *devbuffer, | ||
375 | const char *type, unsigned long flags, void *data, | ||
376 | bool binary) | ||
377 | { | ||
378 | const char *devname = NULL, *info = NULL; | ||
379 | int error = -EACCES; | ||
380 | |||
381 | AA_BUG(!profile); | ||
382 | AA_BUG(devpath && !devbuffer); | ||
383 | |||
384 | if (devpath) { | ||
385 | error = aa_path_name(devpath, path_flags(profile, devpath), | ||
386 | devbuffer, &devname, &info, | ||
387 | profile->disconnected); | ||
388 | if (error) | ||
389 | devname = ERR_PTR(error); | ||
390 | } | ||
391 | |||
392 | return match_mnt_path_str(profile, path, buffer, devname, type, flags, | ||
393 | data, binary, info); | ||
394 | } | ||
395 | |||
396 | int aa_remount(struct aa_label *label, const struct path *path, | ||
397 | unsigned long flags, void *data) | ||
398 | { | ||
399 | struct aa_profile *profile; | ||
400 | char *buffer = NULL; | ||
401 | bool binary; | ||
402 | int error; | ||
403 | |||
404 | AA_BUG(!label); | ||
405 | AA_BUG(!path); | ||
406 | |||
407 | binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA; | ||
408 | |||
409 | get_buffers(buffer); | ||
410 | error = fn_for_each_confined(label, profile, | ||
411 | match_mnt(profile, path, buffer, NULL, NULL, NULL, | ||
412 | flags, data, binary)); | ||
413 | put_buffers(buffer); | ||
414 | |||
415 | return error; | ||
416 | } | ||
417 | |||
418 | int aa_bind_mount(struct aa_label *label, const struct path *path, | ||
419 | const char *dev_name, unsigned long flags) | ||
420 | { | ||
421 | struct aa_profile *profile; | ||
422 | char *buffer = NULL, *old_buffer = NULL; | ||
423 | struct path old_path; | ||
424 | int error; | ||
425 | |||
426 | AA_BUG(!label); | ||
427 | AA_BUG(!path); | ||
428 | |||
429 | if (!dev_name || !*dev_name) | ||
430 | return -EINVAL; | ||
431 | |||
432 | flags &= MS_REC | MS_BIND; | ||
433 | |||
434 | error = kern_path(dev_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); | ||
435 | if (error) | ||
436 | return error; | ||
437 | |||
438 | get_buffers(buffer, old_buffer); | ||
439 | error = fn_for_each_confined(label, profile, | ||
440 | match_mnt(profile, path, buffer, &old_path, old_buffer, | ||
441 | NULL, flags, NULL, false)); | ||
442 | put_buffers(buffer, old_buffer); | ||
443 | path_put(&old_path); | ||
444 | |||
445 | return error; | ||
446 | } | ||
447 | |||
448 | int aa_mount_change_type(struct aa_label *label, const struct path *path, | ||
449 | unsigned long flags) | ||
450 | { | ||
451 | struct aa_profile *profile; | ||
452 | char *buffer = NULL; | ||
453 | int error; | ||
454 | |||
455 | AA_BUG(!label); | ||
456 | AA_BUG(!path); | ||
457 | |||
458 | /* These are the flags allowed by do_change_type() */ | ||
459 | flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE | | ||
460 | MS_UNBINDABLE); | ||
461 | |||
462 | get_buffers(buffer); | ||
463 | error = fn_for_each_confined(label, profile, | ||
464 | match_mnt(profile, path, buffer, NULL, NULL, NULL, | ||
465 | flags, NULL, false)); | ||
466 | put_buffers(buffer); | ||
467 | |||
468 | return error; | ||
469 | } | ||
470 | |||
471 | int aa_move_mount(struct aa_label *label, const struct path *path, | ||
472 | const char *orig_name) | ||
473 | { | ||
474 | struct aa_profile *profile; | ||
475 | char *buffer = NULL, *old_buffer = NULL; | ||
476 | struct path old_path; | ||
477 | int error; | ||
478 | |||
479 | AA_BUG(!label); | ||
480 | AA_BUG(!path); | ||
481 | |||
482 | if (!orig_name || !*orig_name) | ||
483 | return -EINVAL; | ||
484 | |||
485 | error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path); | ||
486 | if (error) | ||
487 | return error; | ||
488 | |||
489 | get_buffers(buffer, old_buffer); | ||
490 | error = fn_for_each_confined(label, profile, | ||
491 | match_mnt(profile, path, buffer, &old_path, old_buffer, | ||
492 | NULL, MS_MOVE, NULL, false)); | ||
493 | put_buffers(buffer, old_buffer); | ||
494 | path_put(&old_path); | ||
495 | |||
496 | return error; | ||
497 | } | ||
498 | |||
499 | int aa_new_mount(struct aa_label *label, const char *dev_name, | ||
500 | const struct path *path, const char *type, unsigned long flags, | ||
501 | void *data) | ||
502 | { | ||
503 | struct aa_profile *profile; | ||
504 | char *buffer = NULL, *dev_buffer = NULL; | ||
505 | bool binary = true; | ||
506 | int error; | ||
507 | int requires_dev = 0; | ||
508 | struct path tmp_path, *dev_path = NULL; | ||
509 | |||
510 | AA_BUG(!label); | ||
511 | AA_BUG(!path); | ||
512 | |||
513 | if (type) { | ||
514 | struct file_system_type *fstype; | ||
515 | |||
516 | fstype = get_fs_type(type); | ||
517 | if (!fstype) | ||
518 | return -ENODEV; | ||
519 | binary = fstype->fs_flags & FS_BINARY_MOUNTDATA; | ||
520 | requires_dev = fstype->fs_flags & FS_REQUIRES_DEV; | ||
521 | put_filesystem(fstype); | ||
522 | |||
523 | if (requires_dev) { | ||
524 | if (!dev_name || !*dev_name) | ||
525 | return -ENOENT; | ||
526 | |||
527 | error = kern_path(dev_name, LOOKUP_FOLLOW, &tmp_path); | ||
528 | if (error) | ||
529 | return error; | ||
530 | dev_path = &tmp_path; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | get_buffers(buffer, dev_buffer); | ||
535 | if (dev_path) { | ||
536 | error = fn_for_each_confined(label, profile, | ||
537 | match_mnt(profile, path, buffer, dev_path, dev_buffer, | ||
538 | type, flags, data, binary)); | ||
539 | } else { | ||
540 | error = fn_for_each_confined(label, profile, | ||
541 | match_mnt_path_str(profile, path, buffer, dev_name, | ||
542 | type, flags, data, binary, NULL)); | ||
543 | } | ||
544 | put_buffers(buffer, dev_buffer); | ||
545 | if (dev_path) | ||
546 | path_put(dev_path); | ||
547 | |||
548 | return error; | ||
549 | } | ||
550 | |||
551 | static int profile_umount(struct aa_profile *profile, struct path *path, | ||
552 | char *buffer) | ||
553 | { | ||
554 | struct aa_perms perms = { }; | ||
555 | const char *name = NULL, *info = NULL; | ||
556 | unsigned int state; | ||
557 | int error; | ||
558 | |||
559 | AA_BUG(!profile); | ||
560 | AA_BUG(!path); | ||
561 | |||
562 | error = aa_path_name(path, path_flags(profile, path), buffer, &name, | ||
563 | &info, profile->disconnected); | ||
564 | if (error) | ||
565 | goto audit; | ||
566 | |||
567 | state = aa_dfa_match(profile->policy.dfa, | ||
568 | profile->policy.start[AA_CLASS_MOUNT], | ||
569 | name); | ||
570 | perms = compute_mnt_perms(profile->policy.dfa, state); | ||
571 | if (AA_MAY_UMOUNT & ~perms.allow) | ||
572 | error = -EACCES; | ||
573 | |||
574 | audit: | ||
575 | return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL, | ||
576 | AA_MAY_UMOUNT, &perms, info, error); | ||
577 | } | ||
578 | |||
579 | int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags) | ||
580 | { | ||
581 | struct aa_profile *profile; | ||
582 | char *buffer = NULL; | ||
583 | int error; | ||
584 | struct path path = { .mnt = mnt, .dentry = mnt->mnt_root }; | ||
585 | |||
586 | AA_BUG(!label); | ||
587 | AA_BUG(!mnt); | ||
588 | |||
589 | get_buffers(buffer); | ||
590 | error = fn_for_each_confined(label, profile, | ||
591 | profile_umount(profile, &path, buffer)); | ||
592 | put_buffers(buffer); | ||
593 | |||
594 | return error; | ||
595 | } | ||
596 | |||
597 | /* helper fn for transition on pivotroot | ||
598 | * | ||
599 | * Returns: label for transition or ERR_PTR. Does not return NULL | ||
600 | */ | ||
601 | static struct aa_label *build_pivotroot(struct aa_profile *profile, | ||
602 | const struct path *new_path, | ||
603 | char *new_buffer, | ||
604 | const struct path *old_path, | ||
605 | char *old_buffer) | ||
606 | { | ||
607 | const char *old_name, *new_name = NULL, *info = NULL; | ||
608 | const char *trans_name = NULL; | ||
609 | struct aa_perms perms = { }; | ||
610 | unsigned int state; | ||
611 | int error; | ||
612 | |||
613 | AA_BUG(!profile); | ||
614 | AA_BUG(!new_path); | ||
615 | AA_BUG(!old_path); | ||
616 | |||
617 | if (profile_unconfined(profile)) | ||
618 | return aa_get_newest_label(&profile->label); | ||
619 | |||
620 | error = aa_path_name(old_path, path_flags(profile, old_path), | ||
621 | old_buffer, &old_name, &info, | ||
622 | profile->disconnected); | ||
623 | if (error) | ||
624 | goto audit; | ||
625 | error = aa_path_name(new_path, path_flags(profile, new_path), | ||
626 | new_buffer, &new_name, &info, | ||
627 | profile->disconnected); | ||
628 | if (error) | ||
629 | goto audit; | ||
630 | |||
631 | error = -EACCES; | ||
632 | state = aa_dfa_match(profile->policy.dfa, | ||
633 | profile->policy.start[AA_CLASS_MOUNT], | ||
634 | new_name); | ||
635 | state = aa_dfa_null_transition(profile->policy.dfa, state); | ||
636 | state = aa_dfa_match(profile->policy.dfa, state, old_name); | ||
637 | perms = compute_mnt_perms(profile->policy.dfa, state); | ||
638 | |||
639 | if (AA_MAY_PIVOTROOT & perms.allow) | ||
640 | error = 0; | ||
641 | |||
642 | audit: | ||
643 | error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name, | ||
644 | NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT, | ||
645 | &perms, info, error); | ||
646 | if (error) | ||
647 | return ERR_PTR(error); | ||
648 | |||
649 | return aa_get_newest_label(&profile->label); | ||
650 | } | ||
651 | |||
652 | int aa_pivotroot(struct aa_label *label, const struct path *old_path, | ||
653 | const struct path *new_path) | ||
654 | { | ||
655 | struct aa_profile *profile; | ||
656 | struct aa_label *target = NULL; | ||
657 | char *old_buffer = NULL, *new_buffer = NULL, *info = NULL; | ||
658 | int error; | ||
659 | |||
660 | AA_BUG(!label); | ||
661 | AA_BUG(!old_path); | ||
662 | AA_BUG(!new_path); | ||
663 | |||
664 | get_buffers(old_buffer, new_buffer); | ||
665 | target = fn_label_build(label, profile, GFP_ATOMIC, | ||
666 | build_pivotroot(profile, new_path, new_buffer, | ||
667 | old_path, old_buffer)); | ||
668 | if (!target) { | ||
669 | info = "label build failed"; | ||
670 | error = -ENOMEM; | ||
671 | goto fail; | ||
672 | } else if (!IS_ERR(target)) { | ||
673 | error = aa_replace_current_label(target); | ||
674 | if (error) { | ||
675 | /* TODO: audit target */ | ||
676 | aa_put_label(target); | ||
677 | goto out; | ||
678 | } | ||
679 | } else | ||
680 | /* already audited error */ | ||
681 | error = PTR_ERR(target); | ||
682 | out: | ||
683 | put_buffers(old_buffer, new_buffer); | ||
684 | |||
685 | return error; | ||
686 | |||
687 | fail: | ||
688 | /* TODO: add back in auditing of new_name and old_name */ | ||
689 | error = fn_for_each(label, profile, | ||
690 | audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */, | ||
691 | NULL /* old_name */, | ||
692 | NULL, NULL, | ||
693 | 0, NULL, AA_MAY_PIVOTROOT, &nullperms, info, | ||
694 | error)); | ||
695 | goto out; | ||
696 | } | ||
diff --git a/security/apparmor/net.c b/security/apparmor/net.c new file mode 100644 index 000000000000..33d54435f8d6 --- /dev/null +++ b/security/apparmor/net.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor network mediation | ||
5 | * | ||
6 | * Copyright (C) 1998-2008 Novell/SUSE | ||
7 | * Copyright 2009-2017 Canonical Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation, version 2 of the | ||
12 | * License. | ||
13 | */ | ||
14 | |||
15 | #include "include/apparmor.h" | ||
16 | #include "include/audit.h" | ||
17 | #include "include/context.h" | ||
18 | #include "include/label.h" | ||
19 | #include "include/net.h" | ||
20 | #include "include/policy.h" | ||
21 | |||
22 | #include "net_names.h" | ||
23 | |||
24 | |||
25 | struct aa_sfs_entry aa_sfs_entry_network[] = { | ||
26 | AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK), | ||
27 | { } | ||
28 | }; | ||
29 | |||
30 | static const char * const net_mask_names[] = { | ||
31 | "unknown", | ||
32 | "send", | ||
33 | "receive", | ||
34 | "unknown", | ||
35 | |||
36 | "create", | ||
37 | "shutdown", | ||
38 | "connect", | ||
39 | "unknown", | ||
40 | |||
41 | "setattr", | ||
42 | "getattr", | ||
43 | "setcred", | ||
44 | "getcred", | ||
45 | |||
46 | "chmod", | ||
47 | "chown", | ||
48 | "chgrp", | ||
49 | "lock", | ||
50 | |||
51 | "mmap", | ||
52 | "mprot", | ||
53 | "unknown", | ||
54 | "unknown", | ||
55 | |||
56 | "accept", | ||
57 | "bind", | ||
58 | "listen", | ||
59 | "unknown", | ||
60 | |||
61 | "setopt", | ||
62 | "getopt", | ||
63 | "unknown", | ||
64 | "unknown", | ||
65 | |||
66 | "unknown", | ||
67 | "unknown", | ||
68 | "unknown", | ||
69 | "unknown", | ||
70 | }; | ||
71 | |||
72 | |||
73 | /* audit callback for net specific fields */ | ||
74 | void audit_net_cb(struct audit_buffer *ab, void *va) | ||
75 | { | ||
76 | struct common_audit_data *sa = va; | ||
77 | |||
78 | audit_log_format(ab, " family="); | ||
79 | if (address_family_names[sa->u.net->family]) | ||
80 | audit_log_string(ab, address_family_names[sa->u.net->family]); | ||
81 | else | ||
82 | audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family); | ||
83 | audit_log_format(ab, " sock_type="); | ||
84 | if (sock_type_names[aad(sa)->net.type]) | ||
85 | audit_log_string(ab, sock_type_names[aad(sa)->net.type]); | ||
86 | else | ||
87 | audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type); | ||
88 | audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol); | ||
89 | |||
90 | if (aad(sa)->request & NET_PERMS_MASK) { | ||
91 | audit_log_format(ab, " requested_mask="); | ||
92 | aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0, | ||
93 | net_mask_names, NET_PERMS_MASK); | ||
94 | |||
95 | if (aad(sa)->denied & NET_PERMS_MASK) { | ||
96 | audit_log_format(ab, " denied_mask="); | ||
97 | aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0, | ||
98 | net_mask_names, NET_PERMS_MASK); | ||
99 | } | ||
100 | } | ||
101 | if (aad(sa)->peer) { | ||
102 | audit_log_format(ab, " peer="); | ||
103 | aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer, | ||
104 | FLAGS_NONE, GFP_ATOMIC); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | |||
109 | /* Generic af perm */ | ||
110 | int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa, | ||
111 | u32 request, u16 family, int type) | ||
112 | { | ||
113 | struct aa_perms perms = { }; | ||
114 | |||
115 | AA_BUG(family >= AF_MAX); | ||
116 | AA_BUG(type < 0 || type >= SOCK_MAX); | ||
117 | |||
118 | if (profile_unconfined(profile)) | ||
119 | return 0; | ||
120 | |||
121 | perms.allow = (profile->net.allow[family] & (1 << type)) ? | ||
122 | ALL_PERMS_MASK : 0; | ||
123 | perms.audit = (profile->net.audit[family] & (1 << type)) ? | ||
124 | ALL_PERMS_MASK : 0; | ||
125 | perms.quiet = (profile->net.quiet[family] & (1 << type)) ? | ||
126 | ALL_PERMS_MASK : 0; | ||
127 | aa_apply_modes_to_perms(profile, &perms); | ||
128 | |||
129 | return aa_check_perms(profile, &perms, request, sa, audit_net_cb); | ||
130 | } | ||
131 | |||
132 | int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family, | ||
133 | int type, int protocol) | ||
134 | { | ||
135 | struct aa_profile *profile; | ||
136 | DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol); | ||
137 | |||
138 | return fn_for_each_confined(label, profile, | ||
139 | aa_profile_af_perm(profile, &sa, request, family, | ||
140 | type)); | ||
141 | } | ||
142 | |||
143 | static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request, | ||
144 | struct sock *sk) | ||
145 | { | ||
146 | struct aa_profile *profile; | ||
147 | DEFINE_AUDIT_SK(sa, op, sk); | ||
148 | |||
149 | AA_BUG(!label); | ||
150 | AA_BUG(!sk); | ||
151 | |||
152 | if (unconfined(label)) | ||
153 | return 0; | ||
154 | |||
155 | return fn_for_each_confined(label, profile, | ||
156 | aa_profile_af_sk_perm(profile, &sa, request, sk)); | ||
157 | } | ||
158 | |||
159 | int aa_sk_perm(const char *op, u32 request, struct sock *sk) | ||
160 | { | ||
161 | struct aa_label *label; | ||
162 | int error; | ||
163 | |||
164 | AA_BUG(!sk); | ||
165 | AA_BUG(in_interrupt()); | ||
166 | |||
167 | /* TODO: switch to begin_current_label ???? */ | ||
168 | label = begin_current_label_crit_section(); | ||
169 | error = aa_label_sk_perm(label, op, request, sk); | ||
170 | end_current_label_crit_section(label); | ||
171 | |||
172 | return error; | ||
173 | } | ||
174 | |||
175 | |||
176 | int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, | ||
177 | struct socket *sock) | ||
178 | { | ||
179 | AA_BUG(!label); | ||
180 | AA_BUG(!sock); | ||
181 | AA_BUG(!sock->sk); | ||
182 | |||
183 | return aa_label_sk_perm(label, op, request, sock->sk); | ||
184 | } | ||
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 244ea4a4a8f0..4243b0c3f0e4 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c | |||
@@ -289,85 +289,6 @@ fail: | |||
289 | return NULL; | 289 | return NULL; |
290 | } | 290 | } |
291 | 291 | ||
292 | /** | ||
293 | * aa_new_null_profile - create or find a null-X learning profile | ||
294 | * @parent: profile that caused this profile to be created (NOT NULL) | ||
295 | * @hat: true if the null- learning profile is a hat | ||
296 | * @base: name to base the null profile off of | ||
297 | * @gfp: type of allocation | ||
298 | * | ||
299 | * Find/Create a null- complain mode profile used in learning mode. The | ||
300 | * name of the profile is unique and follows the format of parent//null-XXX. | ||
301 | * where XXX is based on the @name or if that fails or is not supplied | ||
302 | * a unique number | ||
303 | * | ||
304 | * null profiles are added to the profile list but the list does not | ||
305 | * hold a count on them so that they are automatically released when | ||
306 | * not in use. | ||
307 | * | ||
308 | * Returns: new refcounted profile else NULL on failure | ||
309 | */ | ||
310 | struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat, | ||
311 | const char *base, gfp_t gfp) | ||
312 | { | ||
313 | struct aa_profile *profile; | ||
314 | char *name; | ||
315 | |||
316 | AA_BUG(!parent); | ||
317 | |||
318 | if (base) { | ||
319 | name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base), | ||
320 | gfp); | ||
321 | if (name) { | ||
322 | sprintf(name, "%s//null-%s", parent->base.hname, base); | ||
323 | goto name; | ||
324 | } | ||
325 | /* fall through to try shorter uniq */ | ||
326 | } | ||
327 | |||
328 | name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp); | ||
329 | if (!name) | ||
330 | return NULL; | ||
331 | sprintf(name, "%s//null-%x", parent->base.hname, | ||
332 | atomic_inc_return(&parent->ns->uniq_null)); | ||
333 | |||
334 | name: | ||
335 | /* lookup to see if this is a dup creation */ | ||
336 | profile = aa_find_child(parent, basename(name)); | ||
337 | if (profile) | ||
338 | goto out; | ||
339 | |||
340 | profile = aa_alloc_profile(name, NULL, gfp); | ||
341 | if (!profile) | ||
342 | goto fail; | ||
343 | |||
344 | profile->mode = APPARMOR_COMPLAIN; | ||
345 | profile->label.flags |= FLAG_NULL; | ||
346 | if (hat) | ||
347 | profile->label.flags |= FLAG_HAT; | ||
348 | profile->path_flags = parent->path_flags; | ||
349 | |||
350 | /* released on free_profile */ | ||
351 | rcu_assign_pointer(profile->parent, aa_get_profile(parent)); | ||
352 | profile->ns = aa_get_ns(parent->ns); | ||
353 | profile->file.dfa = aa_get_dfa(nulldfa); | ||
354 | profile->policy.dfa = aa_get_dfa(nulldfa); | ||
355 | |||
356 | mutex_lock(&profile->ns->lock); | ||
357 | __add_profile(&parent->base.profiles, profile); | ||
358 | mutex_unlock(&profile->ns->lock); | ||
359 | |||
360 | /* refcount released by caller */ | ||
361 | out: | ||
362 | kfree(name); | ||
363 | |||
364 | return profile; | ||
365 | |||
366 | fail: | ||
367 | aa_free_profile(profile); | ||
368 | return NULL; | ||
369 | } | ||
370 | |||
371 | /* TODO: profile accounting - setup in remove */ | 292 | /* TODO: profile accounting - setup in remove */ |
372 | 293 | ||
373 | /** | 294 | /** |
@@ -559,6 +480,93 @@ struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, | |||
559 | } | 480 | } |
560 | 481 | ||
561 | /** | 482 | /** |
483 | * aa_new_null_profile - create or find a null-X learning profile | ||
484 | * @parent: profile that caused this profile to be created (NOT NULL) | ||
485 | * @hat: true if the null- learning profile is a hat | ||
486 | * @base: name to base the null profile off of | ||
487 | * @gfp: type of allocation | ||
488 | * | ||
489 | * Find/Create a null- complain mode profile used in learning mode. The | ||
490 | * name of the profile is unique and follows the format of parent//null-XXX. | ||
491 | * where XXX is based on the @name or if that fails or is not supplied | ||
492 | * a unique number | ||
493 | * | ||
494 | * null profiles are added to the profile list but the list does not | ||
495 | * hold a count on them so that they are automatically released when | ||
496 | * not in use. | ||
497 | * | ||
498 | * Returns: new refcounted profile else NULL on failure | ||
499 | */ | ||
500 | struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat, | ||
501 | const char *base, gfp_t gfp) | ||
502 | { | ||
503 | struct aa_profile *p, *profile; | ||
504 | const char *bname; | ||
505 | char *name; | ||
506 | |||
507 | AA_BUG(!parent); | ||
508 | |||
509 | if (base) { | ||
510 | name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base), | ||
511 | gfp); | ||
512 | if (name) { | ||
513 | sprintf(name, "%s//null-%s", parent->base.hname, base); | ||
514 | goto name; | ||
515 | } | ||
516 | /* fall through to try shorter uniq */ | ||
517 | } | ||
518 | |||
519 | name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp); | ||
520 | if (!name) | ||
521 | return NULL; | ||
522 | sprintf(name, "%s//null-%x", parent->base.hname, | ||
523 | atomic_inc_return(&parent->ns->uniq_null)); | ||
524 | |||
525 | name: | ||
526 | /* lookup to see if this is a dup creation */ | ||
527 | bname = basename(name); | ||
528 | profile = aa_find_child(parent, bname); | ||
529 | if (profile) | ||
530 | goto out; | ||
531 | |||
532 | profile = aa_alloc_profile(name, NULL, gfp); | ||
533 | if (!profile) | ||
534 | goto fail; | ||
535 | |||
536 | profile->mode = APPARMOR_COMPLAIN; | ||
537 | profile->label.flags |= FLAG_NULL; | ||
538 | if (hat) | ||
539 | profile->label.flags |= FLAG_HAT; | ||
540 | profile->path_flags = parent->path_flags; | ||
541 | |||
542 | /* released on free_profile */ | ||
543 | rcu_assign_pointer(profile->parent, aa_get_profile(parent)); | ||
544 | profile->ns = aa_get_ns(parent->ns); | ||
545 | profile->file.dfa = aa_get_dfa(nulldfa); | ||
546 | profile->policy.dfa = aa_get_dfa(nulldfa); | ||
547 | |||
548 | mutex_lock(&profile->ns->lock); | ||
549 | p = __find_child(&parent->base.profiles, bname); | ||
550 | if (p) { | ||
551 | aa_free_profile(profile); | ||
552 | profile = aa_get_profile(p); | ||
553 | } else { | ||
554 | __add_profile(&parent->base.profiles, profile); | ||
555 | } | ||
556 | mutex_unlock(&profile->ns->lock); | ||
557 | |||
558 | /* refcount released by caller */ | ||
559 | out: | ||
560 | kfree(name); | ||
561 | |||
562 | return profile; | ||
563 | |||
564 | fail: | ||
565 | aa_free_profile(profile); | ||
566 | return NULL; | ||
567 | } | ||
568 | |||
569 | /** | ||
562 | * replacement_allowed - test to see if replacement is allowed | 570 | * replacement_allowed - test to see if replacement is allowed |
563 | * @profile: profile to test if it can be replaced (MAYBE NULL) | 571 | * @profile: profile to test if it can be replaced (MAYBE NULL) |
564 | * @noreplace: true if replacement shouldn't be allowed but addition is okay | 572 | * @noreplace: true if replacement shouldn't be allowed but addition is okay |
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index 351d3bab3a3d..62a3589c62ab 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c | |||
@@ -112,6 +112,8 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name) | |||
112 | ns->unconfined->label.flags |= FLAG_IX_ON_NAME_ERROR | | 112 | ns->unconfined->label.flags |= FLAG_IX_ON_NAME_ERROR | |
113 | FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED; | 113 | FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED; |
114 | ns->unconfined->mode = APPARMOR_UNCONFINED; | 114 | ns->unconfined->mode = APPARMOR_UNCONFINED; |
115 | ns->unconfined->file.dfa = aa_get_dfa(nulldfa); | ||
116 | ns->unconfined->policy.dfa = aa_get_dfa(nulldfa); | ||
115 | 117 | ||
116 | /* ns and ns->unconfined share ns->unconfined refcount */ | 118 | /* ns and ns->unconfined share ns->unconfined refcount */ |
117 | ns->unconfined->ns = ns; | 119 | ns->unconfined->ns = ns; |
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index c600f4dd1783..5a2aec358322 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c | |||
@@ -85,9 +85,9 @@ static void audit_cb(struct audit_buffer *ab, void *va) | |||
85 | audit_log_format(ab, " ns="); | 85 | audit_log_format(ab, " ns="); |
86 | audit_log_untrustedstring(ab, aad(sa)->iface.ns); | 86 | audit_log_untrustedstring(ab, aad(sa)->iface.ns); |
87 | } | 87 | } |
88 | if (aad(sa)->iface.name) { | 88 | if (aad(sa)->name) { |
89 | audit_log_format(ab, " name="); | 89 | audit_log_format(ab, " name="); |
90 | audit_log_untrustedstring(ab, aad(sa)->iface.name); | 90 | audit_log_untrustedstring(ab, aad(sa)->name); |
91 | } | 91 | } |
92 | if (aad(sa)->iface.pos) | 92 | if (aad(sa)->iface.pos) |
93 | audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos); | 93 | audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos); |
@@ -114,9 +114,9 @@ static int audit_iface(struct aa_profile *new, const char *ns_name, | |||
114 | aad(&sa)->iface.pos = e->pos - e->start; | 114 | aad(&sa)->iface.pos = e->pos - e->start; |
115 | aad(&sa)->iface.ns = ns_name; | 115 | aad(&sa)->iface.ns = ns_name; |
116 | if (new) | 116 | if (new) |
117 | aad(&sa)->iface.name = new->base.hname; | 117 | aad(&sa)->name = new->base.hname; |
118 | else | 118 | else |
119 | aad(&sa)->iface.name = name; | 119 | aad(&sa)->name = name; |
120 | aad(&sa)->info = info; | 120 | aad(&sa)->info = info; |
121 | aad(&sa)->error = error; | 121 | aad(&sa)->error = error; |
122 | 122 | ||
@@ -275,6 +275,19 @@ fail: | |||
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
277 | 277 | ||
278 | static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name) | ||
279 | { | ||
280 | if (unpack_nameX(e, AA_U16, name)) { | ||
281 | if (!inbounds(e, sizeof(u16))) | ||
282 | return 0; | ||
283 | if (data) | ||
284 | *data = le16_to_cpu(get_unaligned((__le16 *) e->pos)); | ||
285 | e->pos += sizeof(u16); | ||
286 | return 1; | ||
287 | } | ||
288 | return 0; | ||
289 | } | ||
290 | |||
278 | static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) | 291 | static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) |
279 | { | 292 | { |
280 | if (unpack_nameX(e, AA_U32, name)) { | 293 | if (unpack_nameX(e, AA_U32, name)) { |
@@ -448,7 +461,7 @@ fail: | |||
448 | */ | 461 | */ |
449 | static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) | 462 | static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) |
450 | { | 463 | { |
451 | void *pos = e->pos; | 464 | void *saved_pos = e->pos; |
452 | 465 | ||
453 | /* exec table is optional */ | 466 | /* exec table is optional */ |
454 | if (unpack_nameX(e, AA_STRUCT, "xtable")) { | 467 | if (unpack_nameX(e, AA_STRUCT, "xtable")) { |
@@ -511,7 +524,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) | |||
511 | 524 | ||
512 | fail: | 525 | fail: |
513 | aa_free_domain_entries(&profile->file.trans); | 526 | aa_free_domain_entries(&profile->file.trans); |
514 | e->pos = pos; | 527 | e->pos = saved_pos; |
515 | return 0; | 528 | return 0; |
516 | } | 529 | } |
517 | 530 | ||
@@ -583,7 +596,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
583 | { | 596 | { |
584 | struct aa_profile *profile = NULL; | 597 | struct aa_profile *profile = NULL; |
585 | const char *tmpname, *tmpns = NULL, *name = NULL; | 598 | const char *tmpname, *tmpns = NULL, *name = NULL; |
586 | size_t ns_len; | 599 | const char *info = "failed to unpack profile"; |
600 | size_t size = 0, ns_len; | ||
587 | struct rhashtable_params params = { 0 }; | 601 | struct rhashtable_params params = { 0 }; |
588 | char *key = NULL; | 602 | char *key = NULL; |
589 | struct aa_data *data; | 603 | struct aa_data *data; |
@@ -604,8 +618,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
604 | tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); | 618 | tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); |
605 | if (tmpns) { | 619 | if (tmpns) { |
606 | *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); | 620 | *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); |
607 | if (!*ns_name) | 621 | if (!*ns_name) { |
622 | info = "out of memory"; | ||
608 | goto fail; | 623 | goto fail; |
624 | } | ||
609 | name = tmpname; | 625 | name = tmpname; |
610 | } | 626 | } |
611 | 627 | ||
@@ -624,12 +640,15 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
624 | if (IS_ERR(profile->xmatch)) { | 640 | if (IS_ERR(profile->xmatch)) { |
625 | error = PTR_ERR(profile->xmatch); | 641 | error = PTR_ERR(profile->xmatch); |
626 | profile->xmatch = NULL; | 642 | profile->xmatch = NULL; |
643 | info = "bad xmatch"; | ||
627 | goto fail; | 644 | goto fail; |
628 | } | 645 | } |
629 | /* xmatch_len is not optional if xmatch is set */ | 646 | /* xmatch_len is not optional if xmatch is set */ |
630 | if (profile->xmatch) { | 647 | if (profile->xmatch) { |
631 | if (!unpack_u32(e, &tmp, NULL)) | 648 | if (!unpack_u32(e, &tmp, NULL)) { |
649 | info = "missing xmatch len"; | ||
632 | goto fail; | 650 | goto fail; |
651 | } | ||
633 | profile->xmatch_len = tmp; | 652 | profile->xmatch_len = tmp; |
634 | } | 653 | } |
635 | 654 | ||
@@ -637,8 +656,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
637 | (void) unpack_str(e, &profile->disconnected, "disconnected"); | 656 | (void) unpack_str(e, &profile->disconnected, "disconnected"); |
638 | 657 | ||
639 | /* per profile debug flags (complain, audit) */ | 658 | /* per profile debug flags (complain, audit) */ |
640 | if (!unpack_nameX(e, AA_STRUCT, "flags")) | 659 | if (!unpack_nameX(e, AA_STRUCT, "flags")) { |
660 | info = "profile missing flags"; | ||
641 | goto fail; | 661 | goto fail; |
662 | } | ||
663 | info = "failed to unpack profile flags"; | ||
642 | if (!unpack_u32(e, &tmp, NULL)) | 664 | if (!unpack_u32(e, &tmp, NULL)) |
643 | goto fail; | 665 | goto fail; |
644 | if (tmp & PACKED_FLAG_HAT) | 666 | if (tmp & PACKED_FLAG_HAT) |
@@ -667,6 +689,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
667 | /* set a default value if path_flags field is not present */ | 689 | /* set a default value if path_flags field is not present */ |
668 | profile->path_flags = PATH_MEDIATE_DELETED; | 690 | profile->path_flags = PATH_MEDIATE_DELETED; |
669 | 691 | ||
692 | info = "failed to unpack profile capabilities"; | ||
670 | if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL)) | 693 | if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL)) |
671 | goto fail; | 694 | goto fail; |
672 | if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL)) | 695 | if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL)) |
@@ -676,6 +699,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
676 | if (!unpack_u32(e, &tmpcap.cap[0], NULL)) | 699 | if (!unpack_u32(e, &tmpcap.cap[0], NULL)) |
677 | goto fail; | 700 | goto fail; |
678 | 701 | ||
702 | info = "failed to unpack upper profile capabilities"; | ||
679 | if (unpack_nameX(e, AA_STRUCT, "caps64")) { | 703 | if (unpack_nameX(e, AA_STRUCT, "caps64")) { |
680 | /* optional upper half of 64 bit caps */ | 704 | /* optional upper half of 64 bit caps */ |
681 | if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL)) | 705 | if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL)) |
@@ -690,6 +714,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
690 | goto fail; | 714 | goto fail; |
691 | } | 715 | } |
692 | 716 | ||
717 | info = "failed to unpack extended profile capabilities"; | ||
693 | if (unpack_nameX(e, AA_STRUCT, "capsx")) { | 718 | if (unpack_nameX(e, AA_STRUCT, "capsx")) { |
694 | /* optional extended caps mediation mask */ | 719 | /* optional extended caps mediation mask */ |
695 | if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL)) | 720 | if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL)) |
@@ -700,11 +725,46 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
700 | goto fail; | 725 | goto fail; |
701 | } | 726 | } |
702 | 727 | ||
703 | if (!unpack_rlimits(e, profile)) | 728 | if (!unpack_rlimits(e, profile)) { |
729 | info = "failed to unpack profile rlimits"; | ||
704 | goto fail; | 730 | goto fail; |
731 | } | ||
732 | |||
733 | size = unpack_array(e, "net_allowed_af"); | ||
734 | if (size) { | ||
735 | |||
736 | for (i = 0; i < size; i++) { | ||
737 | /* discard extraneous rules that this kernel will | ||
738 | * never request | ||
739 | */ | ||
740 | if (i >= AF_MAX) { | ||
741 | u16 tmp; | ||
742 | |||
743 | if (!unpack_u16(e, &tmp, NULL) || | ||
744 | !unpack_u16(e, &tmp, NULL) || | ||
745 | !unpack_u16(e, &tmp, NULL)) | ||
746 | goto fail; | ||
747 | continue; | ||
748 | } | ||
749 | if (!unpack_u16(e, &profile->net.allow[i], NULL)) | ||
750 | goto fail; | ||
751 | if (!unpack_u16(e, &profile->net.audit[i], NULL)) | ||
752 | goto fail; | ||
753 | if (!unpack_u16(e, &profile->net.quiet[i], NULL)) | ||
754 | goto fail; | ||
755 | } | ||
756 | if (!unpack_nameX(e, AA_ARRAYEND, NULL)) | ||
757 | goto fail; | ||
758 | } | ||
759 | if (VERSION_LT(e->version, v7)) { | ||
760 | /* pre v7 policy always allowed these */ | ||
761 | profile->net.allow[AF_UNIX] = 0xffff; | ||
762 | profile->net.allow[AF_NETLINK] = 0xffff; | ||
763 | } | ||
705 | 764 | ||
706 | if (unpack_nameX(e, AA_STRUCT, "policydb")) { | 765 | if (unpack_nameX(e, AA_STRUCT, "policydb")) { |
707 | /* generic policy dfa - optional and may be NULL */ | 766 | /* generic policy dfa - optional and may be NULL */ |
767 | info = "failed to unpack policydb"; | ||
708 | profile->policy.dfa = unpack_dfa(e); | 768 | profile->policy.dfa = unpack_dfa(e); |
709 | if (IS_ERR(profile->policy.dfa)) { | 769 | if (IS_ERR(profile->policy.dfa)) { |
710 | error = PTR_ERR(profile->policy.dfa); | 770 | error = PTR_ERR(profile->policy.dfa); |
@@ -734,6 +794,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
734 | if (IS_ERR(profile->file.dfa)) { | 794 | if (IS_ERR(profile->file.dfa)) { |
735 | error = PTR_ERR(profile->file.dfa); | 795 | error = PTR_ERR(profile->file.dfa); |
736 | profile->file.dfa = NULL; | 796 | profile->file.dfa = NULL; |
797 | info = "failed to unpack profile file rules"; | ||
737 | goto fail; | 798 | goto fail; |
738 | } else if (profile->file.dfa) { | 799 | } else if (profile->file.dfa) { |
739 | if (!unpack_u32(e, &profile->file.start, "dfa_start")) | 800 | if (!unpack_u32(e, &profile->file.start, "dfa_start")) |
@@ -746,10 +807,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
746 | } else | 807 | } else |
747 | profile->file.dfa = aa_get_dfa(nulldfa); | 808 | profile->file.dfa = aa_get_dfa(nulldfa); |
748 | 809 | ||
749 | if (!unpack_trans_table(e, profile)) | 810 | if (!unpack_trans_table(e, profile)) { |
811 | info = "failed to unpack profile transition table"; | ||
750 | goto fail; | 812 | goto fail; |
813 | } | ||
751 | 814 | ||
752 | if (unpack_nameX(e, AA_STRUCT, "data")) { | 815 | if (unpack_nameX(e, AA_STRUCT, "data")) { |
816 | info = "out of memory"; | ||
753 | profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); | 817 | profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); |
754 | if (!profile->data) | 818 | if (!profile->data) |
755 | goto fail; | 819 | goto fail; |
@@ -761,8 +825,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
761 | params.hashfn = strhash; | 825 | params.hashfn = strhash; |
762 | params.obj_cmpfn = datacmp; | 826 | params.obj_cmpfn = datacmp; |
763 | 827 | ||
764 | if (rhashtable_init(profile->data, ¶ms)) | 828 | if (rhashtable_init(profile->data, ¶ms)) { |
829 | info = "failed to init key, value hash table"; | ||
765 | goto fail; | 830 | goto fail; |
831 | } | ||
766 | 832 | ||
767 | while (unpack_strdup(e, &key, NULL)) { | 833 | while (unpack_strdup(e, &key, NULL)) { |
768 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 834 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
@@ -784,12 +850,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
784 | profile->data->p); | 850 | profile->data->p); |
785 | } | 851 | } |
786 | 852 | ||
787 | if (!unpack_nameX(e, AA_STRUCTEND, NULL)) | 853 | if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { |
854 | info = "failed to unpack end of key, value data table"; | ||
788 | goto fail; | 855 | goto fail; |
856 | } | ||
789 | } | 857 | } |
790 | 858 | ||
791 | if (!unpack_nameX(e, AA_STRUCTEND, NULL)) | 859 | if (!unpack_nameX(e, AA_STRUCTEND, NULL)) { |
860 | info = "failed to unpack end of profile"; | ||
792 | goto fail; | 861 | goto fail; |
862 | } | ||
793 | 863 | ||
794 | return profile; | 864 | return profile; |
795 | 865 | ||
@@ -798,8 +868,7 @@ fail: | |||
798 | name = NULL; | 868 | name = NULL; |
799 | else if (!name) | 869 | else if (!name) |
800 | name = "unknown"; | 870 | name = "unknown"; |
801 | audit_iface(profile, NULL, name, "failed to unpack profile", e, | 871 | audit_iface(profile, NULL, name, info, e, error); |
802 | error); | ||
803 | aa_free_profile(profile); | 872 | aa_free_profile(profile); |
804 | 873 | ||
805 | return ERR_PTR(error); | 874 | return ERR_PTR(error); |
@@ -832,7 +901,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns) | |||
832 | * if not specified use previous version | 901 | * if not specified use previous version |
833 | * Mask off everything that is not kernel abi version | 902 | * Mask off everything that is not kernel abi version |
834 | */ | 903 | */ |
835 | if (VERSION_LT(e->version, v5) && VERSION_GT(e->version, v7)) { | 904 | if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) { |
836 | audit_iface(NULL, NULL, NULL, "unsupported interface version", | 905 | audit_iface(NULL, NULL, NULL, "unsupported interface version", |
837 | e, error); | 906 | e, error); |
838 | return error; | 907 | return error; |
diff --git a/security/commoncap.c b/security/commoncap.c index 6bf72b175b49..c25e0d27537f 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -294,10 +294,10 @@ int cap_capset(struct cred *new, | |||
294 | * | 294 | * |
295 | * Determine if an inode having a change applied that's marked ATTR_KILL_PRIV | 295 | * Determine if an inode having a change applied that's marked ATTR_KILL_PRIV |
296 | * affects the security markings on that inode, and if it is, should | 296 | * affects the security markings on that inode, and if it is, should |
297 | * inode_killpriv() be invoked or the change rejected? | 297 | * inode_killpriv() be invoked or the change rejected. |
298 | * | 298 | * |
299 | * Returns 0 if granted; +ve if granted, but inode_killpriv() is required; and | 299 | * Returns 1 if security.capability has a value, meaning inode_killpriv() |
300 | * -ve to deny the change. | 300 | * is required, 0 otherwise, meaning inode_killpriv() is not required. |
301 | */ | 301 | */ |
302 | int cap_inode_need_killpriv(struct dentry *dentry) | 302 | int cap_inode_need_killpriv(struct dentry *dentry) |
303 | { | 303 | { |
diff --git a/security/keys/Kconfig b/security/keys/Kconfig index a7a23b5541f8..91eafada3164 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig | |||
@@ -45,10 +45,8 @@ config BIG_KEYS | |||
45 | bool "Large payload keys" | 45 | bool "Large payload keys" |
46 | depends on KEYS | 46 | depends on KEYS |
47 | depends on TMPFS | 47 | depends on TMPFS |
48 | depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y) | ||
49 | select CRYPTO_AES | 48 | select CRYPTO_AES |
50 | select CRYPTO_ECB | 49 | select CRYPTO_GCM |
51 | select CRYPTO_RNG | ||
52 | help | 50 | help |
53 | This option provides support for holding large keys within the kernel | 51 | This option provides support for holding large keys within the kernel |
54 | (for example Kerberos ticket caches). The data may be stored out to | 52 | (for example Kerberos ticket caches). The data may be stored out to |
diff --git a/security/keys/big_key.c b/security/keys/big_key.c index 6acb00f6f22c..e607830b6154 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* Large capacity key type | 1 | /* Large capacity key type |
2 | * | 2 | * |
3 | * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. | ||
3 | * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. | 4 | * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 5 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 6 | * |
@@ -16,10 +17,10 @@ | |||
16 | #include <linux/shmem_fs.h> | 17 | #include <linux/shmem_fs.h> |
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/scatterlist.h> | 19 | #include <linux/scatterlist.h> |
20 | #include <linux/random.h> | ||
19 | #include <keys/user-type.h> | 21 | #include <keys/user-type.h> |
20 | #include <keys/big_key-type.h> | 22 | #include <keys/big_key-type.h> |
21 | #include <crypto/rng.h> | 23 | #include <crypto/aead.h> |
22 | #include <crypto/skcipher.h> | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * Layout of key payload words. | 26 | * Layout of key payload words. |
@@ -49,7 +50,12 @@ enum big_key_op { | |||
49 | /* | 50 | /* |
50 | * Key size for big_key data encryption | 51 | * Key size for big_key data encryption |
51 | */ | 52 | */ |
52 | #define ENC_KEY_SIZE 16 | 53 | #define ENC_KEY_SIZE 32 |
54 | |||
55 | /* | ||
56 | * Authentication tag length | ||
57 | */ | ||
58 | #define ENC_AUTHTAG_SIZE 16 | ||
53 | 59 | ||
54 | /* | 60 | /* |
55 | * big_key defined keys take an arbitrary string as the description and an | 61 | * big_key defined keys take an arbitrary string as the description and an |
@@ -64,57 +70,62 @@ struct key_type key_type_big_key = { | |||
64 | .destroy = big_key_destroy, | 70 | .destroy = big_key_destroy, |
65 | .describe = big_key_describe, | 71 | .describe = big_key_describe, |
66 | .read = big_key_read, | 72 | .read = big_key_read, |
73 | /* no ->update(); don't add it without changing big_key_crypt() nonce */ | ||
67 | }; | 74 | }; |
68 | 75 | ||
69 | /* | 76 | /* |
70 | * Crypto names for big_key data encryption | 77 | * Crypto names for big_key data authenticated encryption |
71 | */ | 78 | */ |
72 | static const char big_key_rng_name[] = "stdrng"; | 79 | static const char big_key_alg_name[] = "gcm(aes)"; |
73 | static const char big_key_alg_name[] = "ecb(aes)"; | ||
74 | 80 | ||
75 | /* | 81 | /* |
76 | * Crypto algorithms for big_key data encryption | 82 | * Crypto algorithms for big_key data authenticated encryption |
77 | */ | 83 | */ |
78 | static struct crypto_rng *big_key_rng; | 84 | static struct crypto_aead *big_key_aead; |
79 | static struct crypto_skcipher *big_key_skcipher; | ||
80 | 85 | ||
81 | /* | 86 | /* |
82 | * Generate random key to encrypt big_key data | 87 | * Since changing the key affects the entire object, we need a mutex. |
83 | */ | 88 | */ |
84 | static inline int big_key_gen_enckey(u8 *key) | 89 | static DEFINE_MUTEX(big_key_aead_lock); |
85 | { | ||
86 | return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE); | ||
87 | } | ||
88 | 90 | ||
89 | /* | 91 | /* |
90 | * Encrypt/decrypt big_key data | 92 | * Encrypt/decrypt big_key data |
91 | */ | 93 | */ |
92 | static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) | 94 | static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) |
93 | { | 95 | { |
94 | int ret = -EINVAL; | 96 | int ret; |
95 | struct scatterlist sgio; | 97 | struct scatterlist sgio; |
96 | SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); | 98 | struct aead_request *aead_req; |
97 | 99 | /* We always use a zero nonce. The reason we can get away with this is | |
98 | if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { | 100 | * because we're using a different randomly generated key for every |
101 | * different encryption. Notably, too, key_type_big_key doesn't define | ||
102 | * an .update function, so there's no chance we'll wind up reusing the | ||
103 | * key to encrypt updated data. Simply put: one key, one encryption. | ||
104 | */ | ||
105 | u8 zero_nonce[crypto_aead_ivsize(big_key_aead)]; | ||
106 | |||
107 | aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL); | ||
108 | if (!aead_req) | ||
109 | return -ENOMEM; | ||
110 | |||
111 | memset(zero_nonce, 0, sizeof(zero_nonce)); | ||
112 | sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0)); | ||
113 | aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce); | ||
114 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); | ||
115 | aead_request_set_ad(aead_req, 0); | ||
116 | |||
117 | mutex_lock(&big_key_aead_lock); | ||
118 | if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) { | ||
99 | ret = -EAGAIN; | 119 | ret = -EAGAIN; |
100 | goto error; | 120 | goto error; |
101 | } | 121 | } |
102 | |||
103 | skcipher_request_set_tfm(req, big_key_skcipher); | ||
104 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, | ||
105 | NULL, NULL); | ||
106 | |||
107 | sg_init_one(&sgio, data, datalen); | ||
108 | skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL); | ||
109 | |||
110 | if (op == BIG_KEY_ENC) | 122 | if (op == BIG_KEY_ENC) |
111 | ret = crypto_skcipher_encrypt(req); | 123 | ret = crypto_aead_encrypt(aead_req); |
112 | else | 124 | else |
113 | ret = crypto_skcipher_decrypt(req); | 125 | ret = crypto_aead_decrypt(aead_req); |
114 | |||
115 | skcipher_request_zero(req); | ||
116 | |||
117 | error: | 126 | error: |
127 | mutex_unlock(&big_key_aead_lock); | ||
128 | aead_request_free(aead_req); | ||
118 | return ret; | 129 | return ret; |
119 | } | 130 | } |
120 | 131 | ||
@@ -146,16 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
146 | * | 157 | * |
147 | * File content is stored encrypted with randomly generated key. | 158 | * File content is stored encrypted with randomly generated key. |
148 | */ | 159 | */ |
149 | size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); | 160 | size_t enclen = datalen + ENC_AUTHTAG_SIZE; |
150 | loff_t pos = 0; | 161 | loff_t pos = 0; |
151 | 162 | ||
152 | /* prepare aligned data to encrypt */ | ||
153 | data = kmalloc(enclen, GFP_KERNEL); | 163 | data = kmalloc(enclen, GFP_KERNEL); |
154 | if (!data) | 164 | if (!data) |
155 | return -ENOMEM; | 165 | return -ENOMEM; |
156 | |||
157 | memcpy(data, prep->data, datalen); | 166 | memcpy(data, prep->data, datalen); |
158 | memset(data + datalen, 0x00, enclen - datalen); | ||
159 | 167 | ||
160 | /* generate random key */ | 168 | /* generate random key */ |
161 | enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); | 169 | enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); |
@@ -163,13 +171,12 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
163 | ret = -ENOMEM; | 171 | ret = -ENOMEM; |
164 | goto error; | 172 | goto error; |
165 | } | 173 | } |
166 | 174 | ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE); | |
167 | ret = big_key_gen_enckey(enckey); | 175 | if (unlikely(ret)) |
168 | if (ret) | ||
169 | goto err_enckey; | 176 | goto err_enckey; |
170 | 177 | ||
171 | /* encrypt aligned data */ | 178 | /* encrypt aligned data */ |
172 | ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey); | 179 | ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey); |
173 | if (ret) | 180 | if (ret) |
174 | goto err_enckey; | 181 | goto err_enckey; |
175 | 182 | ||
@@ -195,7 +202,7 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
195 | *path = file->f_path; | 202 | *path = file->f_path; |
196 | path_get(path); | 203 | path_get(path); |
197 | fput(file); | 204 | fput(file); |
198 | kfree(data); | 205 | kzfree(data); |
199 | } else { | 206 | } else { |
200 | /* Just store the data in a buffer */ | 207 | /* Just store the data in a buffer */ |
201 | void *data = kmalloc(datalen, GFP_KERNEL); | 208 | void *data = kmalloc(datalen, GFP_KERNEL); |
@@ -211,9 +218,9 @@ int big_key_preparse(struct key_preparsed_payload *prep) | |||
211 | err_fput: | 218 | err_fput: |
212 | fput(file); | 219 | fput(file); |
213 | err_enckey: | 220 | err_enckey: |
214 | kfree(enckey); | 221 | kzfree(enckey); |
215 | error: | 222 | error: |
216 | kfree(data); | 223 | kzfree(data); |
217 | return ret; | 224 | return ret; |
218 | } | 225 | } |
219 | 226 | ||
@@ -227,7 +234,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep) | |||
227 | 234 | ||
228 | path_put(path); | 235 | path_put(path); |
229 | } | 236 | } |
230 | kfree(prep->payload.data[big_key_data]); | 237 | kzfree(prep->payload.data[big_key_data]); |
231 | } | 238 | } |
232 | 239 | ||
233 | /* | 240 | /* |
@@ -259,7 +266,7 @@ void big_key_destroy(struct key *key) | |||
259 | path->mnt = NULL; | 266 | path->mnt = NULL; |
260 | path->dentry = NULL; | 267 | path->dentry = NULL; |
261 | } | 268 | } |
262 | kfree(key->payload.data[big_key_data]); | 269 | kzfree(key->payload.data[big_key_data]); |
263 | key->payload.data[big_key_data] = NULL; | 270 | key->payload.data[big_key_data] = NULL; |
264 | } | 271 | } |
265 | 272 | ||
@@ -295,7 +302,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) | |||
295 | struct file *file; | 302 | struct file *file; |
296 | u8 *data; | 303 | u8 *data; |
297 | u8 *enckey = (u8 *)key->payload.data[big_key_data]; | 304 | u8 *enckey = (u8 *)key->payload.data[big_key_data]; |
298 | size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); | 305 | size_t enclen = datalen + ENC_AUTHTAG_SIZE; |
299 | loff_t pos = 0; | 306 | loff_t pos = 0; |
300 | 307 | ||
301 | data = kmalloc(enclen, GFP_KERNEL); | 308 | data = kmalloc(enclen, GFP_KERNEL); |
@@ -328,7 +335,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen) | |||
328 | err_fput: | 335 | err_fput: |
329 | fput(file); | 336 | fput(file); |
330 | error: | 337 | error: |
331 | kfree(data); | 338 | kzfree(data); |
332 | } else { | 339 | } else { |
333 | ret = datalen; | 340 | ret = datalen; |
334 | if (copy_to_user(buffer, key->payload.data[big_key_data], | 341 | if (copy_to_user(buffer, key->payload.data[big_key_data], |
@@ -344,47 +351,31 @@ error: | |||
344 | */ | 351 | */ |
345 | static int __init big_key_init(void) | 352 | static int __init big_key_init(void) |
346 | { | 353 | { |
347 | struct crypto_skcipher *cipher; | ||
348 | struct crypto_rng *rng; | ||
349 | int ret; | 354 | int ret; |
350 | 355 | ||
351 | rng = crypto_alloc_rng(big_key_rng_name, 0, 0); | ||
352 | if (IS_ERR(rng)) { | ||
353 | pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng)); | ||
354 | return PTR_ERR(rng); | ||
355 | } | ||
356 | |||
357 | big_key_rng = rng; | ||
358 | |||
359 | /* seed RNG */ | ||
360 | ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng)); | ||
361 | if (ret) { | ||
362 | pr_err("Can't reset rng: %d\n", ret); | ||
363 | goto error_rng; | ||
364 | } | ||
365 | |||
366 | /* init block cipher */ | 356 | /* init block cipher */ |
367 | cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); | 357 | big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); |
368 | if (IS_ERR(cipher)) { | 358 | if (IS_ERR(big_key_aead)) { |
369 | ret = PTR_ERR(cipher); | 359 | ret = PTR_ERR(big_key_aead); |
370 | pr_err("Can't alloc crypto: %d\n", ret); | 360 | pr_err("Can't alloc crypto: %d\n", ret); |
371 | goto error_rng; | 361 | return ret; |
362 | } | ||
363 | ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE); | ||
364 | if (ret < 0) { | ||
365 | pr_err("Can't set crypto auth tag len: %d\n", ret); | ||
366 | goto free_aead; | ||
372 | } | 367 | } |
373 | |||
374 | big_key_skcipher = cipher; | ||
375 | 368 | ||
376 | ret = register_key_type(&key_type_big_key); | 369 | ret = register_key_type(&key_type_big_key); |
377 | if (ret < 0) { | 370 | if (ret < 0) { |
378 | pr_err("Can't register type: %d\n", ret); | 371 | pr_err("Can't register type: %d\n", ret); |
379 | goto error_cipher; | 372 | goto free_aead; |
380 | } | 373 | } |
381 | 374 | ||
382 | return 0; | 375 | return 0; |
383 | 376 | ||
384 | error_cipher: | 377 | free_aead: |
385 | crypto_free_skcipher(big_key_skcipher); | 378 | crypto_free_aead(big_key_aead); |
386 | error_rng: | ||
387 | crypto_free_rng(big_key_rng); | ||
388 | return ret; | 379 | return ret; |
389 | } | 380 | } |
390 | 381 | ||
diff --git a/security/keys/internal.h b/security/keys/internal.h index 1c02c6547038..503adbae7b0d 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
@@ -141,7 +141,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, | |||
141 | extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx); | 141 | extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx); |
142 | extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx); | 142 | extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx); |
143 | 143 | ||
144 | extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); | 144 | extern struct key *find_keyring_by_name(const char *name, bool uid_keyring); |
145 | 145 | ||
146 | extern int install_user_keyrings(void); | 146 | extern int install_user_keyrings(void); |
147 | extern int install_thread_keyring_to_cred(struct cred *); | 147 | extern int install_thread_keyring_to_cred(struct cred *); |
diff --git a/security/keys/key.c b/security/keys/key.c index 83da68d98b40..eb914a838840 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -54,10 +54,10 @@ void __key_check(const struct key *key) | |||
54 | struct key_user *key_user_lookup(kuid_t uid) | 54 | struct key_user *key_user_lookup(kuid_t uid) |
55 | { | 55 | { |
56 | struct key_user *candidate = NULL, *user; | 56 | struct key_user *candidate = NULL, *user; |
57 | struct rb_node *parent = NULL; | 57 | struct rb_node *parent, **p; |
58 | struct rb_node **p; | ||
59 | 58 | ||
60 | try_again: | 59 | try_again: |
60 | parent = NULL; | ||
61 | p = &key_user_tree.rb_node; | 61 | p = &key_user_tree.rb_node; |
62 | spin_lock(&key_user_lock); | 62 | spin_lock(&key_user_lock); |
63 | 63 | ||
@@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
303 | if (flags & KEY_ALLOC_BUILT_IN) | 303 | if (flags & KEY_ALLOC_BUILT_IN) |
304 | key->flags |= 1 << KEY_FLAG_BUILTIN; | 304 | key->flags |= 1 << KEY_FLAG_BUILTIN; |
305 | if (flags & KEY_ALLOC_UID_KEYRING) | ||
306 | key->flags |= 1 << KEY_FLAG_UID_KEYRING; | ||
305 | 307 | ||
306 | #ifdef KEY_DEBUGGING | 308 | #ifdef KEY_DEBUGGING |
307 | key->magic = KEY_DEBUG_MAGIC; | 309 | key->magic = KEY_DEBUG_MAGIC; |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index ab0b337c84b4..365ff85d7e27 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -766,12 +766,17 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) | |||
766 | 766 | ||
767 | key = key_ref_to_ptr(key_ref); | 767 | key = key_ref_to_ptr(key_ref); |
768 | 768 | ||
769 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { | ||
770 | ret = -ENOKEY; | ||
771 | goto error2; | ||
772 | } | ||
773 | |||
769 | /* see if we can read it directly */ | 774 | /* see if we can read it directly */ |
770 | ret = key_permission(key_ref, KEY_NEED_READ); | 775 | ret = key_permission(key_ref, KEY_NEED_READ); |
771 | if (ret == 0) | 776 | if (ret == 0) |
772 | goto can_read_key; | 777 | goto can_read_key; |
773 | if (ret != -EACCES) | 778 | if (ret != -EACCES) |
774 | goto error; | 779 | goto error2; |
775 | 780 | ||
776 | /* we can't; see if it's searchable from this process's keyrings | 781 | /* we can't; see if it's searchable from this process's keyrings |
777 | * - we automatically take account of the fact that it may be | 782 | * - we automatically take account of the fact that it may be |
@@ -1406,11 +1411,9 @@ long keyctl_assume_authority(key_serial_t id) | |||
1406 | } | 1411 | } |
1407 | 1412 | ||
1408 | ret = keyctl_change_reqkey_auth(authkey); | 1413 | ret = keyctl_change_reqkey_auth(authkey); |
1409 | if (ret < 0) | 1414 | if (ret == 0) |
1410 | goto error; | 1415 | ret = authkey->serial; |
1411 | key_put(authkey); | 1416 | key_put(authkey); |
1412 | |||
1413 | ret = authkey->serial; | ||
1414 | error: | 1417 | error: |
1415 | return ret; | 1418 | return ret; |
1416 | } | 1419 | } |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index de81793f9920..4fa82a8a9c0e 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -423,7 +423,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m) | |||
423 | } | 423 | } |
424 | 424 | ||
425 | struct keyring_read_iterator_context { | 425 | struct keyring_read_iterator_context { |
426 | size_t qty; | 426 | size_t buflen; |
427 | size_t count; | 427 | size_t count; |
428 | key_serial_t __user *buffer; | 428 | key_serial_t __user *buffer; |
429 | }; | 429 | }; |
@@ -435,9 +435,9 @@ static int keyring_read_iterator(const void *object, void *data) | |||
435 | int ret; | 435 | int ret; |
436 | 436 | ||
437 | kenter("{%s,%d},,{%zu/%zu}", | 437 | kenter("{%s,%d},,{%zu/%zu}", |
438 | key->type->name, key->serial, ctx->count, ctx->qty); | 438 | key->type->name, key->serial, ctx->count, ctx->buflen); |
439 | 439 | ||
440 | if (ctx->count >= ctx->qty) | 440 | if (ctx->count >= ctx->buflen) |
441 | return 1; | 441 | return 1; |
442 | 442 | ||
443 | ret = put_user(key->serial, ctx->buffer); | 443 | ret = put_user(key->serial, ctx->buffer); |
@@ -472,16 +472,12 @@ static long keyring_read(const struct key *keyring, | |||
472 | return 0; | 472 | return 0; |
473 | 473 | ||
474 | /* Calculate how much data we could return */ | 474 | /* Calculate how much data we could return */ |
475 | ctx.qty = nr_keys * sizeof(key_serial_t); | ||
476 | |||
477 | if (!buffer || !buflen) | 475 | if (!buffer || !buflen) |
478 | return ctx.qty; | 476 | return nr_keys * sizeof(key_serial_t); |
479 | |||
480 | if (buflen > ctx.qty) | ||
481 | ctx.qty = buflen; | ||
482 | 477 | ||
483 | /* Copy the IDs of the subscribed keys into the buffer */ | 478 | /* Copy the IDs of the subscribed keys into the buffer */ |
484 | ctx.buffer = (key_serial_t __user *)buffer; | 479 | ctx.buffer = (key_serial_t __user *)buffer; |
480 | ctx.buflen = buflen; | ||
485 | ctx.count = 0; | 481 | ctx.count = 0; |
486 | ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); | 482 | ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); |
487 | if (ret < 0) { | 483 | if (ret < 0) { |
@@ -1101,15 +1097,15 @@ found: | |||
1101 | /* | 1097 | /* |
1102 | * Find a keyring with the specified name. | 1098 | * Find a keyring with the specified name. |
1103 | * | 1099 | * |
1104 | * All named keyrings in the current user namespace are searched, provided they | 1100 | * Only keyrings that have nonzero refcount, are not revoked, and are owned by a |
1105 | * grant Search permission directly to the caller (unless this check is | 1101 | * user in the current user namespace are considered. If @uid_keyring is %true, |
1106 | * skipped). Keyrings whose usage points have reached zero or who have been | 1102 | * the keyring additionally must have been allocated as a user or user session |
1107 | * revoked are skipped. | 1103 | * keyring; otherwise, it must grant Search permission directly to the caller. |
1108 | * | 1104 | * |
1109 | * Returns a pointer to the keyring with the keyring's refcount having being | 1105 | * Returns a pointer to the keyring with the keyring's refcount having being |
1110 | * incremented on success. -ENOKEY is returned if a key could not be found. | 1106 | * incremented on success. -ENOKEY is returned if a key could not be found. |
1111 | */ | 1107 | */ |
1112 | struct key *find_keyring_by_name(const char *name, bool skip_perm_check) | 1108 | struct key *find_keyring_by_name(const char *name, bool uid_keyring) |
1113 | { | 1109 | { |
1114 | struct key *keyring; | 1110 | struct key *keyring; |
1115 | int bucket; | 1111 | int bucket; |
@@ -1137,10 +1133,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) | |||
1137 | if (strcmp(keyring->description, name) != 0) | 1133 | if (strcmp(keyring->description, name) != 0) |
1138 | continue; | 1134 | continue; |
1139 | 1135 | ||
1140 | if (!skip_perm_check && | 1136 | if (uid_keyring) { |
1141 | key_permission(make_key_ref(keyring, 0), | 1137 | if (!test_bit(KEY_FLAG_UID_KEYRING, |
1142 | KEY_NEED_SEARCH) < 0) | 1138 | &keyring->flags)) |
1143 | continue; | 1139 | continue; |
1140 | } else { | ||
1141 | if (key_permission(make_key_ref(keyring, 0), | ||
1142 | KEY_NEED_SEARCH) < 0) | ||
1143 | continue; | ||
1144 | } | ||
1144 | 1145 | ||
1145 | /* we've got a match but we might end up racing with | 1146 | /* we've got a match but we might end up racing with |
1146 | * key_cleanup() if the keyring is currently 'dead' | 1147 | * key_cleanup() if the keyring is currently 'dead' |
diff --git a/security/keys/proc.c b/security/keys/proc.c index bf08d02b6646..de834309d100 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c | |||
@@ -187,7 +187,7 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
187 | struct keyring_search_context ctx = { | 187 | struct keyring_search_context ctx = { |
188 | .index_key.type = key->type, | 188 | .index_key.type = key->type, |
189 | .index_key.description = key->description, | 189 | .index_key.description = key->description, |
190 | .cred = current_cred(), | 190 | .cred = m->file->f_cred, |
191 | .match_data.cmp = lookup_user_key_possessed, | 191 | .match_data.cmp = lookup_user_key_possessed, |
192 | .match_data.raw_data = key, | 192 | .match_data.raw_data = key, |
193 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, | 193 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, |
@@ -207,11 +207,7 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | /* check whether the current task is allowed to view the key (assuming | 210 | /* check whether the current task is allowed to view the key */ |
211 | * non-possession) | ||
212 | * - the caller holds a spinlock, and thus the RCU read lock, making our | ||
213 | * access to __current_cred() safe | ||
214 | */ | ||
215 | rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); | 211 | rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); |
216 | if (rc < 0) | 212 | if (rc < 0) |
217 | return 0; | 213 | return 0; |
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 86bced9fdbdf..293d3598153b 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -77,7 +77,8 @@ int install_user_keyrings(void) | |||
77 | if (IS_ERR(uid_keyring)) { | 77 | if (IS_ERR(uid_keyring)) { |
78 | uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, | 78 | uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, |
79 | cred, user_keyring_perm, | 79 | cred, user_keyring_perm, |
80 | KEY_ALLOC_IN_QUOTA, | 80 | KEY_ALLOC_UID_KEYRING | |
81 | KEY_ALLOC_IN_QUOTA, | ||
81 | NULL, NULL); | 82 | NULL, NULL); |
82 | if (IS_ERR(uid_keyring)) { | 83 | if (IS_ERR(uid_keyring)) { |
83 | ret = PTR_ERR(uid_keyring); | 84 | ret = PTR_ERR(uid_keyring); |
@@ -94,7 +95,8 @@ int install_user_keyrings(void) | |||
94 | session_keyring = | 95 | session_keyring = |
95 | keyring_alloc(buf, user->uid, INVALID_GID, | 96 | keyring_alloc(buf, user->uid, INVALID_GID, |
96 | cred, user_keyring_perm, | 97 | cred, user_keyring_perm, |
97 | KEY_ALLOC_IN_QUOTA, | 98 | KEY_ALLOC_UID_KEYRING | |
99 | KEY_ALLOC_IN_QUOTA, | ||
98 | NULL, NULL); | 100 | NULL, NULL); |
99 | if (IS_ERR(session_keyring)) { | 101 | if (IS_ERR(session_keyring)) { |
100 | ret = PTR_ERR(session_keyring); | 102 | ret = PTR_ERR(session_keyring); |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index afe9d22ab361..6ebf1af8fce9 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -120,6 +120,18 @@ static void request_key_auth_revoke(struct key *key) | |||
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
123 | static void free_request_key_auth(struct request_key_auth *rka) | ||
124 | { | ||
125 | if (!rka) | ||
126 | return; | ||
127 | key_put(rka->target_key); | ||
128 | key_put(rka->dest_keyring); | ||
129 | if (rka->cred) | ||
130 | put_cred(rka->cred); | ||
131 | kfree(rka->callout_info); | ||
132 | kfree(rka); | ||
133 | } | ||
134 | |||
123 | /* | 135 | /* |
124 | * Destroy an instantiation authorisation token key. | 136 | * Destroy an instantiation authorisation token key. |
125 | */ | 137 | */ |
@@ -129,15 +141,7 @@ static void request_key_auth_destroy(struct key *key) | |||
129 | 141 | ||
130 | kenter("{%d}", key->serial); | 142 | kenter("{%d}", key->serial); |
131 | 143 | ||
132 | if (rka->cred) { | 144 | free_request_key_auth(rka); |
133 | put_cred(rka->cred); | ||
134 | rka->cred = NULL; | ||
135 | } | ||
136 | |||
137 | key_put(rka->target_key); | ||
138 | key_put(rka->dest_keyring); | ||
139 | kfree(rka->callout_info); | ||
140 | kfree(rka); | ||
141 | } | 145 | } |
142 | 146 | ||
143 | /* | 147 | /* |
@@ -151,22 +155,18 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
151 | const struct cred *cred = current->cred; | 155 | const struct cred *cred = current->cred; |
152 | struct key *authkey = NULL; | 156 | struct key *authkey = NULL; |
153 | char desc[20]; | 157 | char desc[20]; |
154 | int ret; | 158 | int ret = -ENOMEM; |
155 | 159 | ||
156 | kenter("%d,", target->serial); | 160 | kenter("%d,", target->serial); |
157 | 161 | ||
158 | /* allocate a auth record */ | 162 | /* allocate a auth record */ |
159 | rka = kmalloc(sizeof(*rka), GFP_KERNEL); | 163 | rka = kzalloc(sizeof(*rka), GFP_KERNEL); |
160 | if (!rka) { | 164 | if (!rka) |
161 | kleave(" = -ENOMEM"); | 165 | goto error; |
162 | return ERR_PTR(-ENOMEM); | 166 | rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL); |
163 | } | 167 | if (!rka->callout_info) |
164 | rka->callout_info = kmalloc(callout_len, GFP_KERNEL); | 168 | goto error_free_rka; |
165 | if (!rka->callout_info) { | 169 | rka->callout_len = callout_len; |
166 | kleave(" = -ENOMEM"); | ||
167 | kfree(rka); | ||
168 | return ERR_PTR(-ENOMEM); | ||
169 | } | ||
170 | 170 | ||
171 | /* see if the calling process is already servicing the key request of | 171 | /* see if the calling process is already servicing the key request of |
172 | * another process */ | 172 | * another process */ |
@@ -176,8 +176,12 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
176 | 176 | ||
177 | /* if the auth key has been revoked, then the key we're | 177 | /* if the auth key has been revoked, then the key we're |
178 | * servicing is already instantiated */ | 178 | * servicing is already instantiated */ |
179 | if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) | 179 | if (test_bit(KEY_FLAG_REVOKED, |
180 | goto auth_key_revoked; | 180 | &cred->request_key_auth->flags)) { |
181 | up_read(&cred->request_key_auth->sem); | ||
182 | ret = -EKEYREVOKED; | ||
183 | goto error_free_rka; | ||
184 | } | ||
181 | 185 | ||
182 | irka = cred->request_key_auth->payload.data[0]; | 186 | irka = cred->request_key_auth->payload.data[0]; |
183 | rka->cred = get_cred(irka->cred); | 187 | rka->cred = get_cred(irka->cred); |
@@ -193,8 +197,6 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
193 | 197 | ||
194 | rka->target_key = key_get(target); | 198 | rka->target_key = key_get(target); |
195 | rka->dest_keyring = key_get(dest_keyring); | 199 | rka->dest_keyring = key_get(dest_keyring); |
196 | memcpy(rka->callout_info, callout_info, callout_len); | ||
197 | rka->callout_len = callout_len; | ||
198 | 200 | ||
199 | /* allocate the auth key */ | 201 | /* allocate the auth key */ |
200 | sprintf(desc, "%x", target->serial); | 202 | sprintf(desc, "%x", target->serial); |
@@ -205,32 +207,22 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
205 | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL); | 207 | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL); |
206 | if (IS_ERR(authkey)) { | 208 | if (IS_ERR(authkey)) { |
207 | ret = PTR_ERR(authkey); | 209 | ret = PTR_ERR(authkey); |
208 | goto error_alloc; | 210 | goto error_free_rka; |
209 | } | 211 | } |
210 | 212 | ||
211 | /* construct the auth key */ | 213 | /* construct the auth key */ |
212 | ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); | 214 | ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); |
213 | if (ret < 0) | 215 | if (ret < 0) |
214 | goto error_inst; | 216 | goto error_put_authkey; |
215 | 217 | ||
216 | kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage)); | 218 | kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage)); |
217 | return authkey; | 219 | return authkey; |
218 | 220 | ||
219 | auth_key_revoked: | 221 | error_put_authkey: |
220 | up_read(&cred->request_key_auth->sem); | ||
221 | kfree(rka->callout_info); | ||
222 | kfree(rka); | ||
223 | kleave("= -EKEYREVOKED"); | ||
224 | return ERR_PTR(-EKEYREVOKED); | ||
225 | |||
226 | error_inst: | ||
227 | key_revoke(authkey); | ||
228 | key_put(authkey); | 222 | key_put(authkey); |
229 | error_alloc: | 223 | error_free_rka: |
230 | key_put(rka->target_key); | 224 | free_request_key_auth(rka); |
231 | key_put(rka->dest_keyring); | 225 | error: |
232 | kfree(rka->callout_info); | ||
233 | kfree(rka); | ||
234 | kleave("= %d", ret); | 226 | kleave("= %d", ret); |
235 | return ERR_PTR(ret); | 227 | return ERR_PTR(ret); |
236 | } | 228 | } |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 319add31b4a4..286171a16ed2 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -1473,7 +1473,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name) | |||
1473 | * @inode: the object | 1473 | * @inode: the object |
1474 | * @name: attribute name | 1474 | * @name: attribute name |
1475 | * @buffer: where to put the result | 1475 | * @buffer: where to put the result |
1476 | * @alloc: unused | 1476 | * @alloc: duplicate memory |
1477 | * | 1477 | * |
1478 | * Returns the size of the attribute or an error code | 1478 | * Returns the size of the attribute or an error code |
1479 | */ | 1479 | */ |
@@ -1486,43 +1486,38 @@ static int smack_inode_getsecurity(struct inode *inode, | |||
1486 | struct super_block *sbp; | 1486 | struct super_block *sbp; |
1487 | struct inode *ip = (struct inode *)inode; | 1487 | struct inode *ip = (struct inode *)inode; |
1488 | struct smack_known *isp; | 1488 | struct smack_known *isp; |
1489 | int ilen; | ||
1490 | int rc = 0; | ||
1491 | 1489 | ||
1492 | if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { | 1490 | if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) |
1493 | isp = smk_of_inode(inode); | 1491 | isp = smk_of_inode(inode); |
1494 | ilen = strlen(isp->smk_known); | 1492 | else { |
1495 | *buffer = isp->smk_known; | 1493 | /* |
1496 | return ilen; | 1494 | * The rest of the Smack xattrs are only on sockets. |
1497 | } | 1495 | */ |
1496 | sbp = ip->i_sb; | ||
1497 | if (sbp->s_magic != SOCKFS_MAGIC) | ||
1498 | return -EOPNOTSUPP; | ||
1498 | 1499 | ||
1499 | /* | 1500 | sock = SOCKET_I(ip); |
1500 | * The rest of the Smack xattrs are only on sockets. | 1501 | if (sock == NULL || sock->sk == NULL) |
1501 | */ | 1502 | return -EOPNOTSUPP; |
1502 | sbp = ip->i_sb; | ||
1503 | if (sbp->s_magic != SOCKFS_MAGIC) | ||
1504 | return -EOPNOTSUPP; | ||
1505 | 1503 | ||
1506 | sock = SOCKET_I(ip); | 1504 | ssp = sock->sk->sk_security; |
1507 | if (sock == NULL || sock->sk == NULL) | ||
1508 | return -EOPNOTSUPP; | ||
1509 | |||
1510 | ssp = sock->sk->sk_security; | ||
1511 | 1505 | ||
1512 | if (strcmp(name, XATTR_SMACK_IPIN) == 0) | 1506 | if (strcmp(name, XATTR_SMACK_IPIN) == 0) |
1513 | isp = ssp->smk_in; | 1507 | isp = ssp->smk_in; |
1514 | else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) | 1508 | else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) |
1515 | isp = ssp->smk_out; | 1509 | isp = ssp->smk_out; |
1516 | else | 1510 | else |
1517 | return -EOPNOTSUPP; | 1511 | return -EOPNOTSUPP; |
1512 | } | ||
1518 | 1513 | ||
1519 | ilen = strlen(isp->smk_known); | 1514 | if (alloc) { |
1520 | if (rc == 0) { | 1515 | *buffer = kstrdup(isp->smk_known, GFP_KERNEL); |
1521 | *buffer = isp->smk_known; | 1516 | if (*buffer == NULL) |
1522 | rc = ilen; | 1517 | return -ENOMEM; |
1523 | } | 1518 | } |
1524 | 1519 | ||
1525 | return rc; | 1520 | return strlen(isp->smk_known); |
1526 | } | 1521 | } |
1527 | 1522 | ||
1528 | 1523 | ||
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index fec1dfdb14ad..4490a699030b 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = { | |||
948 | static int snd_compress_dev_register(struct snd_device *device) | 948 | static int snd_compress_dev_register(struct snd_device *device) |
949 | { | 949 | { |
950 | int ret = -EINVAL; | 950 | int ret = -EINVAL; |
951 | char str[16]; | ||
952 | struct snd_compr *compr; | 951 | struct snd_compr *compr; |
953 | 952 | ||
954 | if (snd_BUG_ON(!device || !device->device_data)) | 953 | if (snd_BUG_ON(!device || !device->device_data)) |
955 | return -EBADFD; | 954 | return -EBADFD; |
956 | compr = device->device_data; | 955 | compr = device->device_data; |
957 | 956 | ||
958 | pr_debug("reg %s for device %s, direction %d\n", str, compr->name, | 957 | pr_debug("reg device %s, direction %d\n", compr->name, |
959 | compr->direction); | 958 | compr->direction); |
960 | /* register compressed device */ | 959 | /* register compressed device */ |
961 | ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, | 960 | ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, |
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c index 3a1cc7b97e46..b719d0bd833e 100644 --- a/sound/core/pcm_compat.c +++ b/sound/core/pcm_compat.c | |||
@@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 { | |||
547 | u32 pad2; /* alignment */ | 547 | u32 pad2; /* alignment */ |
548 | struct timespec tstamp; | 548 | struct timespec tstamp; |
549 | s32 suspended_state; | 549 | s32 suspended_state; |
550 | s32 pad3; | ||
550 | struct timespec audio_tstamp; | 551 | struct timespec audio_tstamp; |
551 | } __packed; | 552 | } __packed; |
552 | 553 | ||
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 7e3aa50b21f9..5badd08e1d69 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c | |||
@@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
103 | void __user *puhr; | 103 | void __user *puhr; |
104 | union hpi_message_buffer_v1 *hm; | 104 | union hpi_message_buffer_v1 *hm; |
105 | union hpi_response_buffer_v1 *hr; | 105 | union hpi_response_buffer_v1 *hr; |
106 | u16 msg_size; | ||
106 | u16 res_max_size; | 107 | u16 res_max_size; |
107 | u32 uncopied_bytes; | 108 | u32 uncopied_bytes; |
108 | int err = 0; | 109 | int err = 0; |
@@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
127 | } | 128 | } |
128 | 129 | ||
129 | /* Now read the message size and data from user space. */ | 130 | /* Now read the message size and data from user space. */ |
130 | if (get_user(hm->h.size, (u16 __user *)puhm)) { | 131 | if (get_user(msg_size, (u16 __user *)puhm)) { |
131 | err = -EFAULT; | 132 | err = -EFAULT; |
132 | goto out; | 133 | goto out; |
133 | } | 134 | } |
134 | if (hm->h.size > sizeof(*hm)) | 135 | if (msg_size > sizeof(*hm)) |
135 | hm->h.size = sizeof(*hm); | 136 | msg_size = sizeof(*hm); |
136 | 137 | ||
137 | /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */ | 138 | /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */ |
138 | 139 | ||
139 | uncopied_bytes = copy_from_user(hm, puhm, hm->h.size); | 140 | uncopied_bytes = copy_from_user(hm, puhm, msg_size); |
140 | if (uncopied_bytes) { | 141 | if (uncopied_bytes) { |
141 | HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); | 142 | HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); |
142 | err = -EFAULT; | 143 | err = -EFAULT; |
143 | goto out; | 144 | goto out; |
144 | } | 145 | } |
145 | 146 | ||
147 | /* Override h.size in case it is changed between two userspace fetches */ | ||
148 | hm->h.size = msg_size; | ||
149 | |||
146 | if (get_user(res_max_size, (u16 __user *)puhr)) { | 150 | if (get_user(res_max_size, (u16 __user *)puhr)) { |
147 | err = -EFAULT; | 151 | err = -EFAULT; |
148 | goto out; | 152 | goto out; |
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 7326695bca33..d68f99e076a8 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c | |||
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol, | |||
1272 | 1272 | ||
1273 | chip = snd_kcontrol_chip(kcontrol); | 1273 | chip = snd_kcontrol_chip(kcontrol); |
1274 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1274 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
1275 | uinfo->count = 1; | ||
1275 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1276 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
1276 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; | 1277 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; |
1277 | uinfo->dimen.d[0] = num_busses_out(chip); | 1278 | uinfo->dimen.d[0] = num_busses_out(chip); |
1278 | uinfo->dimen.d[1] = num_busses_in(chip); | 1279 | uinfo->dimen.d[1] = num_busses_in(chip); |
1279 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1]; | ||
1280 | return 0; | 1280 | return 0; |
1281 | } | 1281 | } |
1282 | 1282 | ||
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol, | |||
1344 | 1344 | ||
1345 | chip = snd_kcontrol_chip(kcontrol); | 1345 | chip = snd_kcontrol_chip(kcontrol); |
1346 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1346 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
1347 | uinfo->count = 1; | ||
1347 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1348 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
1348 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; | 1349 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; |
1349 | uinfo->dimen.d[0] = num_busses_out(chip); | 1350 | uinfo->dimen.d[0] = num_busses_out(chip); |
1350 | uinfo->dimen.d[1] = num_pipes_out(chip); | 1351 | uinfo->dimen.d[1] = num_pipes_out(chip); |
1351 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1]; | ||
1352 | return 0; | 1352 | return 0; |
1353 | } | 1353 | } |
1354 | 1354 | ||
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol, | |||
1728 | struct snd_ctl_elem_info *uinfo) | 1728 | struct snd_ctl_elem_info *uinfo) |
1729 | { | 1729 | { |
1730 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1730 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
1731 | uinfo->count = 96; | ||
1731 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1732 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
1732 | uinfo->value.integer.max = 0; | 1733 | uinfo->value.integer.max = 0; |
1733 | #ifdef ECHOCARD_HAS_VMIXER | 1734 | #ifdef ECHOCARD_HAS_VMIXER |
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol, | |||
1737 | #endif | 1738 | #endif |
1738 | uinfo->dimen.d[1] = 16; /* 16 channels */ | 1739 | uinfo->dimen.d[1] = 16; /* 16 channels */ |
1739 | uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */ | 1740 | uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */ |
1740 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2]; | ||
1741 | return 0; | 1741 | return 0; |
1742 | } | 1742 | } |
1743 | 1743 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2b64fabd5faa..c19c81d230bd 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, | |||
906 | hda_nid_t pin_nid, u32 stream_tag, int format) | 906 | hda_nid_t pin_nid, u32 stream_tag, int format) |
907 | { | 907 | { |
908 | struct hdmi_spec *spec = codec->spec; | 908 | struct hdmi_spec *spec = codec->spec; |
909 | unsigned int param; | ||
909 | int err; | 910 | int err; |
910 | 911 | ||
911 | err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); | 912 | err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); |
@@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, | |||
915 | return err; | 916 | return err; |
916 | } | 917 | } |
917 | 918 | ||
919 | if (is_haswell_plus(codec)) { | ||
920 | |||
921 | /* | ||
922 | * on recent platforms IEC Coding Type is required for HBR | ||
923 | * support, read current Digital Converter settings and set | ||
924 | * ICT bitfield if needed. | ||
925 | */ | ||
926 | param = snd_hda_codec_read(codec, cvt_nid, 0, | ||
927 | AC_VERB_GET_DIGI_CONVERT_1, 0); | ||
928 | |||
929 | param = (param >> 16) & ~(AC_DIG3_ICT); | ||
930 | |||
931 | /* on recent platforms ICT mode is required for HBR support */ | ||
932 | if (is_hbr_format(format)) | ||
933 | param |= 0x1; | ||
934 | |||
935 | snd_hda_codec_write(codec, cvt_nid, 0, | ||
936 | AC_VERB_SET_DIGI_CONVERT_3, param); | ||
937 | } | ||
938 | |||
918 | snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); | 939 | snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); |
919 | return 0; | 940 | return 0; |
920 | } | 941 | } |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 3dc36d913550..23d1d23aefec 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
221 | struct usb_interface_descriptor *altsd; | 221 | struct usb_interface_descriptor *altsd; |
222 | void *control_header; | 222 | void *control_header; |
223 | int i, protocol; | 223 | int i, protocol; |
224 | int rest_bytes; | ||
224 | 225 | ||
225 | /* find audiocontrol interface */ | 226 | /* find audiocontrol interface */ |
226 | host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; | 227 | host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; |
@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
235 | return -EINVAL; | 236 | return -EINVAL; |
236 | } | 237 | } |
237 | 238 | ||
239 | rest_bytes = (void *)(host_iface->extra + host_iface->extralen) - | ||
240 | control_header; | ||
241 | |||
242 | /* just to be sure -- this shouldn't hit at all */ | ||
243 | if (rest_bytes <= 0) { | ||
244 | dev_err(&dev->dev, "invalid control header\n"); | ||
245 | return -EINVAL; | ||
246 | } | ||
247 | |||
238 | switch (protocol) { | 248 | switch (protocol) { |
239 | default: | 249 | default: |
240 | dev_warn(&dev->dev, | 250 | dev_warn(&dev->dev, |
@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
245 | case UAC_VERSION_1: { | 255 | case UAC_VERSION_1: { |
246 | struct uac1_ac_header_descriptor *h1 = control_header; | 256 | struct uac1_ac_header_descriptor *h1 = control_header; |
247 | 257 | ||
258 | if (rest_bytes < sizeof(*h1)) { | ||
259 | dev_err(&dev->dev, "too short v1 buffer descriptor\n"); | ||
260 | return -EINVAL; | ||
261 | } | ||
262 | |||
248 | if (!h1->bInCollection) { | 263 | if (!h1->bInCollection) { |
249 | dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); | 264 | dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); |
250 | return -EINVAL; | 265 | return -EINVAL; |
251 | } | 266 | } |
252 | 267 | ||
268 | if (rest_bytes < h1->bLength) { | ||
269 | dev_err(&dev->dev, "invalid buffer length (v1)\n"); | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | |||
253 | if (h1->bLength < sizeof(*h1) + h1->bInCollection) { | 273 | if (h1->bLength < sizeof(*h1) + h1->bInCollection) { |
254 | dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); | 274 | dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); |
255 | return -EINVAL; | 275 | return -EINVAL; |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 913552078285..b8cb57aeec77 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1137,6 +1137,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
1137 | case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */ | 1137 | case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */ |
1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ | 1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ |
1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ | 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ |
1140 | case USB_ID(0x047F, 0xC022): /* Plantronics C310 */ | ||
1141 | case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */ | ||
1140 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1142 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
1141 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ | 1143 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ |
1142 | case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ | 1144 | case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ |
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c index 4dab49080700..e229abd21652 100644 --- a/sound/usb/usx2y/usb_stream.c +++ b/sound/usb/usx2y/usb_stream.c | |||
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, | |||
191 | } | 191 | } |
192 | 192 | ||
193 | pg = get_order(read_size); | 193 | pg = get_order(read_size); |
194 | sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); | 194 | sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO| |
195 | __GFP_NOWARN, pg); | ||
195 | if (!sk->s) { | 196 | if (!sk->s) { |
196 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); | 197 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); |
197 | goto out; | 198 | goto out; |
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, | |||
211 | pg = get_order(write_size); | 212 | pg = get_order(write_size); |
212 | 213 | ||
213 | sk->write_page = | 214 | sk->write_page = |
214 | (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); | 215 | (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO| |
216 | __GFP_NOWARN, pg); | ||
215 | if (!sk->write_page) { | 217 | if (!sk->write_page) { |
216 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); | 218 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); |
217 | usb_stream_free(sk); | 219 | usb_stream_free(sk); |
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 69d09c39bbcd..cd7359e23d86 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h | |||
@@ -88,6 +88,12 @@ struct kvm_s390_io_adapter_req { | |||
88 | /* kvm attributes for KVM_S390_VM_TOD */ | 88 | /* kvm attributes for KVM_S390_VM_TOD */ |
89 | #define KVM_S390_VM_TOD_LOW 0 | 89 | #define KVM_S390_VM_TOD_LOW 0 |
90 | #define KVM_S390_VM_TOD_HIGH 1 | 90 | #define KVM_S390_VM_TOD_HIGH 1 |
91 | #define KVM_S390_VM_TOD_EXT 2 | ||
92 | |||
93 | struct kvm_s390_vm_tod_clock { | ||
94 | __u8 epoch_idx; | ||
95 | __u64 tod; | ||
96 | }; | ||
91 | 97 | ||
92 | /* kvm attributes for KVM_S390_VM_CPU_MODEL */ | 98 | /* kvm attributes for KVM_S390_VM_CPU_MODEL */ |
93 | /* processor related attributes are r/w */ | 99 | /* processor related attributes are r/w */ |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 8ea315a11fe0..2519c6c801c9 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -196,6 +196,7 @@ | |||
196 | 196 | ||
197 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ | 197 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
198 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ | 198 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
199 | #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ | ||
199 | 200 | ||
200 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ | 201 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ |
201 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ | 202 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ |
@@ -287,6 +288,7 @@ | |||
287 | #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ | 288 | #define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ |
288 | #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ | 289 | #define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ |
289 | #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ | 290 | #define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ |
291 | #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ | ||
290 | 292 | ||
291 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ | 293 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ |
292 | #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ | 294 | #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ |
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 5dff775af7cd..c10c9128f54e 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h | |||
@@ -21,11 +21,13 @@ | |||
21 | # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) | 21 | # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) |
22 | # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) | 22 | # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) |
23 | # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) | 23 | # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) |
24 | # define DISABLE_PCID 0 | ||
24 | #else | 25 | #else |
25 | # define DISABLE_VME 0 | 26 | # define DISABLE_VME 0 |
26 | # define DISABLE_K6_MTRR 0 | 27 | # define DISABLE_K6_MTRR 0 |
27 | # define DISABLE_CYRIX_ARR 0 | 28 | # define DISABLE_CYRIX_ARR 0 |
28 | # define DISABLE_CENTAUR_MCR 0 | 29 | # define DISABLE_CENTAUR_MCR 0 |
30 | # define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31)) | ||
29 | #endif /* CONFIG_X86_64 */ | 31 | #endif /* CONFIG_X86_64 */ |
30 | 32 | ||
31 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 33 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
@@ -49,7 +51,7 @@ | |||
49 | #define DISABLED_MASK1 0 | 51 | #define DISABLED_MASK1 0 |
50 | #define DISABLED_MASK2 0 | 52 | #define DISABLED_MASK2 0 |
51 | #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) | 53 | #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) |
52 | #define DISABLED_MASK4 0 | 54 | #define DISABLED_MASK4 (DISABLE_PCID) |
53 | #define DISABLED_MASK5 0 | 55 | #define DISABLED_MASK5 0 |
54 | #define DISABLED_MASK6 0 | 56 | #define DISABLED_MASK6 0 |
55 | #define DISABLED_MASK7 0 | 57 | #define DISABLED_MASK7 0 |
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h new file mode 100644 index 000000000000..e4732d3c2998 --- /dev/null +++ b/tools/include/asm-generic/hugetlb_encode.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _ASM_GENERIC_HUGETLB_ENCODE_H_ | ||
2 | #define _ASM_GENERIC_HUGETLB_ENCODE_H_ | ||
3 | |||
4 | /* | ||
5 | * Several system calls take a flag to request "hugetlb" huge pages. | ||
6 | * Without further specification, these system calls will use the | ||
7 | * system's default huge page size. If a system supports multiple | ||
8 | * huge page sizes, the desired huge page size can be specified in | ||
9 | * bits [26:31] of the flag arguments. The value in these 6 bits | ||
10 | * will encode the log2 of the huge page size. | ||
11 | * | ||
12 | * The following definitions are associated with this huge page size | ||
13 | * encoding in flag arguments. System call specific header files | ||
14 | * that use this encoding should include this file. They can then | ||
15 | * provide definitions based on these with their own specific prefix. | ||
16 | * for example: | ||
17 | * #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT | ||
18 | */ | ||
19 | |||
20 | #define HUGETLB_FLAG_ENCODE_SHIFT 26 | ||
21 | #define HUGETLB_FLAG_ENCODE_MASK 0x3f | ||
22 | |||
23 | #define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
24 | #define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
25 | #define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
26 | #define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
27 | #define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
28 | #define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
29 | #define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
30 | #define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
31 | #define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
32 | #define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT) | ||
33 | |||
34 | #endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */ | ||
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index 8c27db0c5c08..203268f9231e 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h | |||
@@ -58,20 +58,12 @@ | |||
58 | overrides the coredump filter bits */ | 58 | overrides the coredump filter bits */ |
59 | #define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */ | 59 | #define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */ |
60 | 60 | ||
61 | #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ | ||
62 | #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ | ||
63 | |||
61 | /* compatibility flags */ | 64 | /* compatibility flags */ |
62 | #define MAP_FILE 0 | 65 | #define MAP_FILE 0 |
63 | 66 | ||
64 | /* | ||
65 | * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. | ||
66 | * This gives us 6 bits, which is enough until someone invents 128 bit address | ||
67 | * spaces. | ||
68 | * | ||
69 | * Assume these are all power of twos. | ||
70 | * When 0 use the default page size. | ||
71 | */ | ||
72 | #define MAP_HUGE_SHIFT 26 | ||
73 | #define MAP_HUGE_MASK 0x3f | ||
74 | |||
75 | #define PKEY_DISABLE_ACCESS 0x1 | 67 | #define PKEY_DISABLE_ACCESS 0x1 |
76 | #define PKEY_DISABLE_WRITE 0x2 | 68 | #define PKEY_DISABLE_WRITE 0x2 |
77 | #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ | 69 | #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ |
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 101593ab10ac..97677cd6964d 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h | |||
@@ -700,6 +700,7 @@ struct drm_prime_handle { | |||
700 | 700 | ||
701 | struct drm_syncobj_create { | 701 | struct drm_syncobj_create { |
702 | __u32 handle; | 702 | __u32 handle; |
703 | #define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0) | ||
703 | __u32 flags; | 704 | __u32 flags; |
704 | }; | 705 | }; |
705 | 706 | ||
@@ -718,6 +719,24 @@ struct drm_syncobj_handle { | |||
718 | __u32 pad; | 719 | __u32 pad; |
719 | }; | 720 | }; |
720 | 721 | ||
722 | #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) | ||
723 | #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) | ||
724 | struct drm_syncobj_wait { | ||
725 | __u64 handles; | ||
726 | /* absolute timeout */ | ||
727 | __s64 timeout_nsec; | ||
728 | __u32 count_handles; | ||
729 | __u32 flags; | ||
730 | __u32 first_signaled; /* only valid when not waiting all */ | ||
731 | __u32 pad; | ||
732 | }; | ||
733 | |||
734 | struct drm_syncobj_array { | ||
735 | __u64 handles; | ||
736 | __u32 count_handles; | ||
737 | __u32 pad; | ||
738 | }; | ||
739 | |||
721 | #if defined(__cplusplus) | 740 | #if defined(__cplusplus) |
722 | } | 741 | } |
723 | #endif | 742 | #endif |
@@ -840,6 +859,9 @@ extern "C" { | |||
840 | #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) | 859 | #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) |
841 | #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) | 860 | #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) |
842 | #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) | 861 | #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) |
862 | #define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait) | ||
863 | #define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) | ||
864 | #define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) | ||
843 | 865 | ||
844 | /** | 866 | /** |
845 | * Device specific ioctls should only be in their respective headers | 867 | * Device specific ioctls should only be in their respective headers |
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 7ccbd6a2bbe0..6598fb76d2c2 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h | |||
@@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea { | |||
260 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 | 260 | #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 |
261 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 | 261 | #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 |
262 | #define DRM_I915_PERF_OPEN 0x36 | 262 | #define DRM_I915_PERF_OPEN 0x36 |
263 | #define DRM_I915_PERF_ADD_CONFIG 0x37 | ||
264 | #define DRM_I915_PERF_REMOVE_CONFIG 0x38 | ||
263 | 265 | ||
264 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | 266 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
265 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | 267 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
@@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea { | |||
315 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) | 317 | #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) |
316 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) | 318 | #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) |
317 | #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) | 319 | #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) |
320 | #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) | ||
321 | #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) | ||
318 | 322 | ||
319 | /* Allow drivers to submit batchbuffers directly to hardware, relying | 323 | /* Allow drivers to submit batchbuffers directly to hardware, relying |
320 | * on the security mechanisms provided by hardware. | 324 | * on the security mechanisms provided by hardware. |
@@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait { | |||
431 | */ | 435 | */ |
432 | #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 | 436 | #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 |
433 | 437 | ||
438 | /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of | ||
439 | * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. | ||
440 | */ | ||
441 | #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 | ||
442 | |||
434 | typedef struct drm_i915_getparam { | 443 | typedef struct drm_i915_getparam { |
435 | __s32 param; | 444 | __s32 param; |
436 | /* | 445 | /* |
@@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 { | |||
812 | __u64 rsvd2; | 821 | __u64 rsvd2; |
813 | }; | 822 | }; |
814 | 823 | ||
824 | struct drm_i915_gem_exec_fence { | ||
825 | /** | ||
826 | * User's handle for a drm_syncobj to wait on or signal. | ||
827 | */ | ||
828 | __u32 handle; | ||
829 | |||
830 | #define I915_EXEC_FENCE_WAIT (1<<0) | ||
831 | #define I915_EXEC_FENCE_SIGNAL (1<<1) | ||
832 | __u32 flags; | ||
833 | }; | ||
834 | |||
815 | struct drm_i915_gem_execbuffer2 { | 835 | struct drm_i915_gem_execbuffer2 { |
816 | /** | 836 | /** |
817 | * List of gem_exec_object2 structs | 837 | * List of gem_exec_object2 structs |
@@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 { | |||
826 | __u32 DR1; | 846 | __u32 DR1; |
827 | __u32 DR4; | 847 | __u32 DR4; |
828 | __u32 num_cliprects; | 848 | __u32 num_cliprects; |
829 | /** This is a struct drm_clip_rect *cliprects */ | 849 | /** |
850 | * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY | ||
851 | * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a | ||
852 | * struct drm_i915_gem_exec_fence *fences. | ||
853 | */ | ||
830 | __u64 cliprects_ptr; | 854 | __u64 cliprects_ptr; |
831 | #define I915_EXEC_RING_MASK (7<<0) | 855 | #define I915_EXEC_RING_MASK (7<<0) |
832 | #define I915_EXEC_DEFAULT (0<<0) | 856 | #define I915_EXEC_DEFAULT (0<<0) |
@@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 { | |||
927 | * element). | 951 | * element). |
928 | */ | 952 | */ |
929 | #define I915_EXEC_BATCH_FIRST (1<<18) | 953 | #define I915_EXEC_BATCH_FIRST (1<<18) |
930 | #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1)) | 954 | |
955 | /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr | ||
956 | * define an array of i915_gem_exec_fence structures which specify a set of | ||
957 | * dma fences to wait upon or signal. | ||
958 | */ | ||
959 | #define I915_EXEC_FENCE_ARRAY (1<<19) | ||
960 | |||
961 | #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) | ||
931 | 962 | ||
932 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) | 963 | #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
933 | #define i915_execbuffer2_set_context_id(eb2, context) \ | 964 | #define i915_execbuffer2_set_context_id(eb2, context) \ |
@@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type { | |||
1467 | DRM_I915_PERF_RECORD_MAX /* non-ABI */ | 1498 | DRM_I915_PERF_RECORD_MAX /* non-ABI */ |
1468 | }; | 1499 | }; |
1469 | 1500 | ||
1501 | /** | ||
1502 | * Structure to upload perf dynamic configuration into the kernel. | ||
1503 | */ | ||
1504 | struct drm_i915_perf_oa_config { | ||
1505 | /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ | ||
1506 | char uuid[36]; | ||
1507 | |||
1508 | __u32 n_mux_regs; | ||
1509 | __u32 n_boolean_regs; | ||
1510 | __u32 n_flex_regs; | ||
1511 | |||
1512 | __u64 __user mux_regs_ptr; | ||
1513 | __u64 __user boolean_regs_ptr; | ||
1514 | __u64 __user flex_regs_ptr; | ||
1515 | }; | ||
1516 | |||
1470 | #if defined(__cplusplus) | 1517 | #if defined(__cplusplus) |
1471 | } | 1518 | } |
1472 | #endif | 1519 | #endif |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 461811e57140..43ab5c402f98 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -143,12 +143,6 @@ enum bpf_attach_type { | |||
143 | 143 | ||
144 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE | 144 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE |
145 | 145 | ||
146 | enum bpf_sockmap_flags { | ||
147 | BPF_SOCKMAP_UNSPEC, | ||
148 | BPF_SOCKMAP_STRPARSER, | ||
149 | __MAX_BPF_SOCKMAP_FLAG | ||
150 | }; | ||
151 | |||
152 | /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command | 146 | /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command |
153 | * to the given target_fd cgroup the descendent cgroup will be able to | 147 | * to the given target_fd cgroup the descendent cgroup will be able to |
154 | * override effective bpf program that was inherited from this cgroup | 148 | * override effective bpf program that was inherited from this cgroup |
@@ -368,9 +362,20 @@ union bpf_attr { | |||
368 | * int bpf_redirect(ifindex, flags) | 362 | * int bpf_redirect(ifindex, flags) |
369 | * redirect to another netdev | 363 | * redirect to another netdev |
370 | * @ifindex: ifindex of the net device | 364 | * @ifindex: ifindex of the net device |
371 | * @flags: bit 0 - if set, redirect to ingress instead of egress | 365 | * @flags: |
372 | * other bits - reserved | 366 | * cls_bpf: |
373 | * Return: TC_ACT_REDIRECT | 367 | * bit 0 - if set, redirect to ingress instead of egress |
368 | * other bits - reserved | ||
369 | * xdp_bpf: | ||
370 | * all bits - reserved | ||
371 | * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error | ||
372 | * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error | ||
373 | * int bpf_redirect_map(map, key, flags) | ||
374 | * redirect to endpoint in map | ||
375 | * @map: pointer to dev map | ||
376 | * @key: index in map to lookup | ||
377 | * @flags: -- | ||
378 | * Return: XDP_REDIRECT on success or XDP_ABORT on error | ||
374 | * | 379 | * |
375 | * u32 bpf_get_route_realm(skb) | 380 | * u32 bpf_get_route_realm(skb) |
376 | * retrieve a dst's tclassid | 381 | * retrieve a dst's tclassid |
@@ -632,7 +637,7 @@ union bpf_attr { | |||
632 | FN(skb_adjust_room), \ | 637 | FN(skb_adjust_room), \ |
633 | FN(redirect_map), \ | 638 | FN(redirect_map), \ |
634 | FN(sk_redirect_map), \ | 639 | FN(sk_redirect_map), \ |
635 | FN(sock_map_update), | 640 | FN(sock_map_update), \ |
636 | 641 | ||
637 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 642 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
638 | * function eBPF program intends to call | 643 | * function eBPF program intends to call |
@@ -753,20 +758,23 @@ struct bpf_sock { | |||
753 | __u32 family; | 758 | __u32 family; |
754 | __u32 type; | 759 | __u32 type; |
755 | __u32 protocol; | 760 | __u32 protocol; |
761 | __u32 mark; | ||
762 | __u32 priority; | ||
756 | }; | 763 | }; |
757 | 764 | ||
758 | #define XDP_PACKET_HEADROOM 256 | 765 | #define XDP_PACKET_HEADROOM 256 |
759 | 766 | ||
760 | /* User return codes for XDP prog type. | 767 | /* User return codes for XDP prog type. |
761 | * A valid XDP program must return one of these defined values. All other | 768 | * A valid XDP program must return one of these defined values. All other |
762 | * return codes are reserved for future use. Unknown return codes will result | 769 | * return codes are reserved for future use. Unknown return codes will |
763 | * in packet drop. | 770 | * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). |
764 | */ | 771 | */ |
765 | enum xdp_action { | 772 | enum xdp_action { |
766 | XDP_ABORTED = 0, | 773 | XDP_ABORTED = 0, |
767 | XDP_DROP, | 774 | XDP_DROP, |
768 | XDP_PASS, | 775 | XDP_PASS, |
769 | XDP_TX, | 776 | XDP_TX, |
777 | XDP_REDIRECT, | ||
770 | }; | 778 | }; |
771 | 779 | ||
772 | /* user accessible metadata for XDP packet hook | 780 | /* user accessible metadata for XDP packet hook |
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 6cd63c18708a..838887587411 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h | |||
@@ -711,7 +711,8 @@ struct kvm_ppc_one_seg_page_size { | |||
711 | struct kvm_ppc_smmu_info { | 711 | struct kvm_ppc_smmu_info { |
712 | __u64 flags; | 712 | __u64 flags; |
713 | __u32 slb_size; | 713 | __u32 slb_size; |
714 | __u32 pad; | 714 | __u16 data_keys; /* # storage keys supported for data */ |
715 | __u16 instr_keys; /* # storage keys supported for instructions */ | ||
715 | struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; | 716 | struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; |
716 | }; | 717 | }; |
717 | 718 | ||
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h index 81d8edf11789..a937480d7cd3 100644 --- a/tools/include/uapi/linux/mman.h +++ b/tools/include/uapi/linux/mman.h | |||
@@ -1,7 +1,8 @@ | |||
1 | #ifndef _UAPI_LINUX_MMAN_H | 1 | #ifndef _UAPI_LINUX_MMAN_H |
2 | #define _UAPI_LINUX_MMAN_H | 2 | #define _UAPI_LINUX_MMAN_H |
3 | 3 | ||
4 | #include <uapi/asm/mman.h> | 4 | #include <asm/mman.h> |
5 | #include <asm-generic/hugetlb_encode.h> | ||
5 | 6 | ||
6 | #define MREMAP_MAYMOVE 1 | 7 | #define MREMAP_MAYMOVE 1 |
7 | #define MREMAP_FIXED 2 | 8 | #define MREMAP_FIXED 2 |
@@ -10,4 +11,25 @@ | |||
10 | #define OVERCOMMIT_ALWAYS 1 | 11 | #define OVERCOMMIT_ALWAYS 1 |
11 | #define OVERCOMMIT_NEVER 2 | 12 | #define OVERCOMMIT_NEVER 2 |
12 | 13 | ||
14 | /* | ||
15 | * Huge page size encoding when MAP_HUGETLB is specified, and a huge page | ||
16 | * size other than the default is desired. See hugetlb_encode.h. | ||
17 | * All known huge page size encodings are provided here. It is the | ||
18 | * responsibility of the application to know which sizes are supported on | ||
19 | * the running system. See mmap(2) man page for details. | ||
20 | */ | ||
21 | #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT | ||
22 | #define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK | ||
23 | |||
24 | #define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB | ||
25 | #define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB | ||
26 | #define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB | ||
27 | #define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB | ||
28 | #define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB | ||
29 | #define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB | ||
30 | #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB | ||
31 | #define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB | ||
32 | #define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB | ||
33 | #define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB | ||
34 | |||
13 | #endif /* _UAPI_LINUX_MMAN_H */ | 35 | #endif /* _UAPI_LINUX_MMAN_H */ |
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt index 6a1af43862df..3995735a878f 100644 --- a/tools/objtool/Documentation/stack-validation.txt +++ b/tools/objtool/Documentation/stack-validation.txt | |||
@@ -194,10 +194,10 @@ they mean, and suggestions for how to fix them. | |||
194 | If it's a GCC-compiled .c file, the error may be because the function | 194 | If it's a GCC-compiled .c file, the error may be because the function |
195 | uses an inline asm() statement which has a "call" instruction. An | 195 | uses an inline asm() statement which has a "call" instruction. An |
196 | asm() statement with a call instruction must declare the use of the | 196 | asm() statement with a call instruction must declare the use of the |
197 | stack pointer in its output operand. For example, on x86_64: | 197 | stack pointer in its output operand. On x86_64, this means adding |
198 | the ASM_CALL_CONSTRAINT as an output constraint: | ||
198 | 199 | ||
199 | register void *__sp asm("rsp"); | 200 | asm volatile("call func" : ASM_CALL_CONSTRAINT); |
200 | asm volatile("call func" : "+r" (__sp)); | ||
201 | 201 | ||
202 | Otherwise the stack frame may not get created before the call. | 202 | Otherwise the stack frame may not get created before the call. |
203 | 203 | ||
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 0e8c8ec4fd4e..34a579f806e3 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c | |||
@@ -208,14 +208,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, | |||
208 | break; | 208 | break; |
209 | 209 | ||
210 | case 0x89: | 210 | case 0x89: |
211 | if (rex == 0x48 && modrm == 0xe5) { | 211 | if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) { |
212 | 212 | ||
213 | /* mov %rsp, %rbp */ | 213 | /* mov %rsp, reg */ |
214 | *type = INSN_STACK; | 214 | *type = INSN_STACK; |
215 | op->src.type = OP_SRC_REG; | 215 | op->src.type = OP_SRC_REG; |
216 | op->src.reg = CFI_SP; | 216 | op->src.reg = CFI_SP; |
217 | op->dest.type = OP_DEST_REG; | 217 | op->dest.type = OP_DEST_REG; |
218 | op->dest.reg = CFI_BP; | 218 | op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b]; |
219 | break; | 219 | break; |
220 | } | 220 | } |
221 | 221 | ||
@@ -284,11 +284,16 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, | |||
284 | case 0x8d: | 284 | case 0x8d: |
285 | if (sib == 0x24 && rex_w && !rex_b && !rex_x) { | 285 | if (sib == 0x24 && rex_w && !rex_b && !rex_x) { |
286 | 286 | ||
287 | /* lea disp(%rsp), reg */ | ||
288 | *type = INSN_STACK; | 287 | *type = INSN_STACK; |
289 | op->src.type = OP_SRC_ADD; | 288 | if (!insn.displacement.value) { |
289 | /* lea (%rsp), reg */ | ||
290 | op->src.type = OP_SRC_REG; | ||
291 | } else { | ||
292 | /* lea disp(%rsp), reg */ | ||
293 | op->src.type = OP_SRC_ADD; | ||
294 | op->src.offset = insn.displacement.value; | ||
295 | } | ||
290 | op->src.reg = CFI_SP; | 296 | op->src.reg = CFI_SP; |
291 | op->src.offset = insn.displacement.value; | ||
292 | op->dest.type = OP_DEST_REG; | 297 | op->dest.type = OP_DEST_REG; |
293 | op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r]; | 298 | op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r]; |
294 | 299 | ||
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f744617c9946..a0c518ecf085 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
@@ -1203,24 +1203,39 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) | |||
1203 | switch (op->src.type) { | 1203 | switch (op->src.type) { |
1204 | 1204 | ||
1205 | case OP_SRC_REG: | 1205 | case OP_SRC_REG: |
1206 | if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP) { | 1206 | if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && |
1207 | cfa->base == CFI_SP && | ||
1208 | regs[CFI_BP].base == CFI_CFA && | ||
1209 | regs[CFI_BP].offset == -cfa->offset) { | ||
1210 | |||
1211 | /* mov %rsp, %rbp */ | ||
1212 | cfa->base = op->dest.reg; | ||
1213 | state->bp_scratch = false; | ||
1214 | } | ||
1207 | 1215 | ||
1208 | if (cfa->base == CFI_SP && | 1216 | else if (op->src.reg == CFI_SP && |
1209 | regs[CFI_BP].base == CFI_CFA && | 1217 | op->dest.reg == CFI_BP && state->drap) { |
1210 | regs[CFI_BP].offset == -cfa->offset) { | ||
1211 | 1218 | ||
1212 | /* mov %rsp, %rbp */ | 1219 | /* drap: mov %rsp, %rbp */ |
1213 | cfa->base = op->dest.reg; | 1220 | regs[CFI_BP].base = CFI_BP; |
1214 | state->bp_scratch = false; | 1221 | regs[CFI_BP].offset = -state->stack_size; |
1215 | } | 1222 | state->bp_scratch = false; |
1223 | } | ||
1216 | 1224 | ||
1217 | else if (state->drap) { | 1225 | else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { |
1218 | 1226 | ||
1219 | /* drap: mov %rsp, %rbp */ | 1227 | /* |
1220 | regs[CFI_BP].base = CFI_BP; | 1228 | * mov %rsp, %reg |
1221 | regs[CFI_BP].offset = -state->stack_size; | 1229 | * |
1222 | state->bp_scratch = false; | 1230 | * This is needed for the rare case where GCC |
1223 | } | 1231 | * does: |
1232 | * | ||
1233 | * mov %rsp, %rax | ||
1234 | * ... | ||
1235 | * mov %rax, %rsp | ||
1236 | */ | ||
1237 | state->vals[op->dest.reg].base = CFI_CFA; | ||
1238 | state->vals[op->dest.reg].offset = -state->stack_size; | ||
1224 | } | 1239 | } |
1225 | 1240 | ||
1226 | else if (op->dest.reg == cfa->base) { | 1241 | else if (op->dest.reg == cfa->base) { |
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 6e9f980a7d26..24460155c82c 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c | |||
@@ -175,19 +175,20 @@ static int read_sections(struct elf *elf) | |||
175 | return -1; | 175 | return -1; |
176 | } | 176 | } |
177 | 177 | ||
178 | sec->data = elf_getdata(s, NULL); | 178 | if (sec->sh.sh_size != 0) { |
179 | if (!sec->data) { | 179 | sec->data = elf_getdata(s, NULL); |
180 | WARN_ELF("elf_getdata"); | 180 | if (!sec->data) { |
181 | return -1; | 181 | WARN_ELF("elf_getdata"); |
182 | } | 182 | return -1; |
183 | 183 | } | |
184 | if (sec->data->d_off != 0 || | 184 | if (sec->data->d_off != 0 || |
185 | sec->data->d_size != sec->sh.sh_size) { | 185 | sec->data->d_size != sec->sh.sh_size) { |
186 | WARN("unexpected data attributes for %s", sec->name); | 186 | WARN("unexpected data attributes for %s", |
187 | return -1; | 187 | sec->name); |
188 | return -1; | ||
189 | } | ||
188 | } | 190 | } |
189 | 191 | sec->len = sec->sh.sh_size; | |
190 | sec->len = sec->data->d_size; | ||
191 | } | 192 | } |
192 | 193 | ||
193 | /* sanity check, one more call to elf_nextscn() should return NULL */ | 194 | /* sanity check, one more call to elf_nextscn() should return NULL */ |
@@ -508,6 +509,7 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base) | |||
508 | strcat(relaname, base->name); | 509 | strcat(relaname, base->name); |
509 | 510 | ||
510 | sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0); | 511 | sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0); |
512 | free(relaname); | ||
511 | if (!sec) | 513 | if (!sec) |
512 | return NULL; | 514 | return NULL; |
513 | 515 | ||
@@ -561,6 +563,7 @@ int elf_write(struct elf *elf) | |||
561 | struct section *sec; | 563 | struct section *sec; |
562 | Elf_Scn *s; | 564 | Elf_Scn *s; |
563 | 565 | ||
566 | /* Update section headers for changed sections: */ | ||
564 | list_for_each_entry(sec, &elf->sections, list) { | 567 | list_for_each_entry(sec, &elf->sections, list) { |
565 | if (sec->changed) { | 568 | if (sec->changed) { |
566 | s = elf_getscn(elf->elf, sec->idx); | 569 | s = elf_getscn(elf->elf, sec->idx); |
@@ -568,13 +571,17 @@ int elf_write(struct elf *elf) | |||
568 | WARN_ELF("elf_getscn"); | 571 | WARN_ELF("elf_getscn"); |
569 | return -1; | 572 | return -1; |
570 | } | 573 | } |
571 | if (!gelf_update_shdr (s, &sec->sh)) { | 574 | if (!gelf_update_shdr(s, &sec->sh)) { |
572 | WARN_ELF("gelf_update_shdr"); | 575 | WARN_ELF("gelf_update_shdr"); |
573 | return -1; | 576 | return -1; |
574 | } | 577 | } |
575 | } | 578 | } |
576 | } | 579 | } |
577 | 580 | ||
581 | /* Make sure the new section header entries get updated properly. */ | ||
582 | elf_flagelf(elf->elf, ELF_C_SET, ELF_F_DIRTY); | ||
583 | |||
584 | /* Write all changes to the file. */ | ||
578 | if (elf_update(elf->elf, ELF_C_WRITE) < 0) { | 585 | if (elf_update(elf->elf, ELF_C_WRITE) < 0) { |
579 | WARN_ELF("elf_update"); | 586 | WARN_ELF("elf_update"); |
580 | return -1; | 587 | return -1; |
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 62072822dc85..627b7cada144 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -1,34 +1,8 @@ | |||
1 | tools/perf | 1 | tools/perf |
2 | tools/arch/alpha/include/asm/barrier.h | 2 | tools/arch |
3 | tools/arch/arm/include/asm/barrier.h | ||
4 | tools/arch/arm64/include/asm/barrier.h | ||
5 | tools/arch/ia64/include/asm/barrier.h | ||
6 | tools/arch/mips/include/asm/barrier.h | ||
7 | tools/arch/powerpc/include/asm/barrier.h | ||
8 | tools/arch/s390/include/asm/barrier.h | ||
9 | tools/arch/sh/include/asm/barrier.h | ||
10 | tools/arch/sparc/include/asm/barrier.h | ||
11 | tools/arch/sparc/include/asm/barrier_32.h | ||
12 | tools/arch/sparc/include/asm/barrier_64.h | ||
13 | tools/arch/tile/include/asm/barrier.h | ||
14 | tools/arch/x86/include/asm/barrier.h | ||
15 | tools/arch/x86/include/asm/cmpxchg.h | ||
16 | tools/arch/x86/include/asm/cpufeatures.h | ||
17 | tools/arch/x86/include/asm/disabled-features.h | ||
18 | tools/arch/x86/include/asm/required-features.h | ||
19 | tools/arch/x86/include/uapi/asm/svm.h | ||
20 | tools/arch/x86/include/uapi/asm/vmx.h | ||
21 | tools/arch/x86/include/uapi/asm/kvm.h | ||
22 | tools/arch/x86/include/uapi/asm/kvm_perf.h | ||
23 | tools/arch/x86/lib/memcpy_64.S | ||
24 | tools/arch/x86/lib/memset_64.S | ||
25 | tools/arch/s390/include/uapi/asm/kvm_perf.h | ||
26 | tools/arch/s390/include/uapi/asm/sie.h | ||
27 | tools/arch/xtensa/include/asm/barrier.h | ||
28 | tools/scripts | 3 | tools/scripts |
29 | tools/build | 4 | tools/build |
30 | tools/arch/x86/include/asm/atomic.h | 5 | tools/include |
31 | tools/arch/x86/include/asm/rmwcc.h | ||
32 | tools/lib/traceevent | 6 | tools/lib/traceevent |
33 | tools/lib/api | 7 | tools/lib/api |
34 | tools/lib/bpf | 8 | tools/lib/bpf |
@@ -42,60 +16,3 @@ tools/lib/find_bit.c | |||
42 | tools/lib/bitmap.c | 16 | tools/lib/bitmap.c |
43 | tools/lib/str_error_r.c | 17 | tools/lib/str_error_r.c |
44 | tools/lib/vsprintf.c | 18 | tools/lib/vsprintf.c |
45 | tools/include/asm/alternative-asm.h | ||
46 | tools/include/asm/atomic.h | ||
47 | tools/include/asm/barrier.h | ||
48 | tools/include/asm/bug.h | ||
49 | tools/include/asm-generic/atomic-gcc.h | ||
50 | tools/include/asm-generic/barrier.h | ||
51 | tools/include/asm-generic/bitops/arch_hweight.h | ||
52 | tools/include/asm-generic/bitops/atomic.h | ||
53 | tools/include/asm-generic/bitops/const_hweight.h | ||
54 | tools/include/asm-generic/bitops/__ffs.h | ||
55 | tools/include/asm-generic/bitops/__ffz.h | ||
56 | tools/include/asm-generic/bitops/__fls.h | ||
57 | tools/include/asm-generic/bitops/find.h | ||
58 | tools/include/asm-generic/bitops/fls64.h | ||
59 | tools/include/asm-generic/bitops/fls.h | ||
60 | tools/include/asm-generic/bitops/hweight.h | ||
61 | tools/include/asm-generic/bitops.h | ||
62 | tools/include/linux/atomic.h | ||
63 | tools/include/linux/bitops.h | ||
64 | tools/include/linux/compiler.h | ||
65 | tools/include/linux/compiler-gcc.h | ||
66 | tools/include/linux/coresight-pmu.h | ||
67 | tools/include/linux/bug.h | ||
68 | tools/include/linux/filter.h | ||
69 | tools/include/linux/hash.h | ||
70 | tools/include/linux/kernel.h | ||
71 | tools/include/linux/list.h | ||
72 | tools/include/linux/log2.h | ||
73 | tools/include/uapi/asm-generic/fcntl.h | ||
74 | tools/include/uapi/asm-generic/ioctls.h | ||
75 | tools/include/uapi/asm-generic/mman-common.h | ||
76 | tools/include/uapi/asm-generic/mman.h | ||
77 | tools/include/uapi/drm/drm.h | ||
78 | tools/include/uapi/drm/i915_drm.h | ||
79 | tools/include/uapi/linux/bpf.h | ||
80 | tools/include/uapi/linux/bpf_common.h | ||
81 | tools/include/uapi/linux/fcntl.h | ||
82 | tools/include/uapi/linux/hw_breakpoint.h | ||
83 | tools/include/uapi/linux/kvm.h | ||
84 | tools/include/uapi/linux/mman.h | ||
85 | tools/include/uapi/linux/perf_event.h | ||
86 | tools/include/uapi/linux/sched.h | ||
87 | tools/include/uapi/linux/stat.h | ||
88 | tools/include/uapi/linux/vhost.h | ||
89 | tools/include/uapi/sound/asound.h | ||
90 | tools/include/linux/poison.h | ||
91 | tools/include/linux/rbtree.h | ||
92 | tools/include/linux/rbtree_augmented.h | ||
93 | tools/include/linux/refcount.h | ||
94 | tools/include/linux/string.h | ||
95 | tools/include/linux/stringify.h | ||
96 | tools/include/linux/types.h | ||
97 | tools/include/linux/err.h | ||
98 | tools/include/linux/bitmap.h | ||
99 | tools/include/linux/time64.h | ||
100 | tools/arch/*/include/uapi/asm/mman.h | ||
101 | tools/arch/*/include/uapi/asm/perf_regs.h | ||
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build index bd518b623d7a..5bd7b9260cc0 100644 --- a/tools/perf/arch/s390/util/Build +++ b/tools/perf/arch/s390/util/Build | |||
@@ -1,5 +1,4 @@ | |||
1 | libperf-y += header.o | 1 | libperf-y += header.o |
2 | libperf-y += sym-handling.o | ||
3 | libperf-y += kvm-stat.o | 2 | libperf-y += kvm-stat.o |
4 | 3 | ||
5 | libperf-$(CONFIG_DWARF) += dwarf-regs.o | 4 | libperf-$(CONFIG_DWARF) += dwarf-regs.o |
diff --git a/tools/perf/arch/s390/util/sym-handling.c b/tools/perf/arch/s390/util/sym-handling.c deleted file mode 100644 index e103f6e46afe..000000000000 --- a/tools/perf/arch/s390/util/sym-handling.c +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * Architecture specific ELF symbol handling and relocation mapping. | ||
3 | * | ||
4 | * Copyright 2017 IBM Corp. | ||
5 | * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License (version 2 only) | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include "symbol.h" | ||
13 | |||
14 | #ifdef HAVE_LIBELF_SUPPORT | ||
15 | bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) | ||
16 | { | ||
17 | if (ehdr.e_type == ET_EXEC) | ||
18 | return false; | ||
19 | return ehdr.e_type == ET_REL || ehdr.e_type == ET_DYN; | ||
20 | } | ||
21 | |||
22 | void arch__adjust_sym_map_offset(GElf_Sym *sym, | ||
23 | GElf_Shdr *shdr __maybe_unused, | ||
24 | struct map *map) | ||
25 | { | ||
26 | if (map->type == MAP__FUNCTION) | ||
27 | sym->st_value += map->start; | ||
28 | } | ||
29 | #endif | ||
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 510b513e0f01..be09d77cade0 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -65,8 +65,6 @@ static int parse_callchain_mode(const char *value) | |||
65 | callchain_param.mode = CHAIN_FOLDED; | 65 | callchain_param.mode = CHAIN_FOLDED; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | |||
69 | pr_err("Invalid callchain mode: %s\n", value); | ||
70 | return -1; | 68 | return -1; |
71 | } | 69 | } |
72 | 70 | ||
@@ -82,8 +80,6 @@ static int parse_callchain_order(const char *value) | |||
82 | callchain_param.order_set = true; | 80 | callchain_param.order_set = true; |
83 | return 0; | 81 | return 0; |
84 | } | 82 | } |
85 | |||
86 | pr_err("Invalid callchain order: %s\n", value); | ||
87 | return -1; | 83 | return -1; |
88 | } | 84 | } |
89 | 85 | ||
@@ -105,8 +101,6 @@ static int parse_callchain_sort_key(const char *value) | |||
105 | callchain_param.branch_callstack = 1; | 101 | callchain_param.branch_callstack = 1; |
106 | return 0; | 102 | return 0; |
107 | } | 103 | } |
108 | |||
109 | pr_err("Invalid callchain sort key: %s\n", value); | ||
110 | return -1; | 104 | return -1; |
111 | } | 105 | } |
112 | 106 | ||
@@ -124,8 +118,6 @@ static int parse_callchain_value(const char *value) | |||
124 | callchain_param.value = CCVAL_COUNT; | 118 | callchain_param.value = CCVAL_COUNT; |
125 | return 0; | 119 | return 0; |
126 | } | 120 | } |
127 | |||
128 | pr_err("Invalid callchain config key: %s\n", value); | ||
129 | return -1; | 121 | return -1; |
130 | } | 122 | } |
131 | 123 | ||
@@ -319,12 +311,27 @@ int perf_callchain_config(const char *var, const char *value) | |||
319 | 311 | ||
320 | return ret; | 312 | return ret; |
321 | } | 313 | } |
322 | if (!strcmp(var, "print-type")) | 314 | if (!strcmp(var, "print-type")){ |
323 | return parse_callchain_mode(value); | 315 | int ret; |
324 | if (!strcmp(var, "order")) | 316 | ret = parse_callchain_mode(value); |
325 | return parse_callchain_order(value); | 317 | if (ret == -1) |
326 | if (!strcmp(var, "sort-key")) | 318 | pr_err("Invalid callchain mode: %s\n", value); |
327 | return parse_callchain_sort_key(value); | 319 | return ret; |
320 | } | ||
321 | if (!strcmp(var, "order")){ | ||
322 | int ret; | ||
323 | ret = parse_callchain_order(value); | ||
324 | if (ret == -1) | ||
325 | pr_err("Invalid callchain order: %s\n", value); | ||
326 | return ret; | ||
327 | } | ||
328 | if (!strcmp(var, "sort-key")){ | ||
329 | int ret; | ||
330 | ret = parse_callchain_sort_key(value); | ||
331 | if (ret == -1) | ||
332 | pr_err("Invalid callchain sort key: %s\n", value); | ||
333 | return ret; | ||
334 | } | ||
328 | if (!strcmp(var, "threshold")) { | 335 | if (!strcmp(var, "threshold")) { |
329 | callchain_param.min_percent = strtod(value, &endptr); | 336 | callchain_param.min_percent = strtod(value, &endptr); |
330 | if (value == endptr) { | 337 | if (value == endptr) { |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 4bb89373eb52..0dccdb89572c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -271,12 +271,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) | |||
271 | return evsel; | 271 | return evsel; |
272 | } | 272 | } |
273 | 273 | ||
274 | static bool perf_event_can_profile_kernel(void) | ||
275 | { | ||
276 | return geteuid() == 0 || perf_event_paranoid() == -1; | ||
277 | } | ||
278 | |||
274 | struct perf_evsel *perf_evsel__new_cycles(bool precise) | 279 | struct perf_evsel *perf_evsel__new_cycles(bool precise) |
275 | { | 280 | { |
276 | struct perf_event_attr attr = { | 281 | struct perf_event_attr attr = { |
277 | .type = PERF_TYPE_HARDWARE, | 282 | .type = PERF_TYPE_HARDWARE, |
278 | .config = PERF_COUNT_HW_CPU_CYCLES, | 283 | .config = PERF_COUNT_HW_CPU_CYCLES, |
279 | .exclude_kernel = geteuid() != 0, | 284 | .exclude_kernel = !perf_event_can_profile_kernel(), |
280 | }; | 285 | }; |
281 | struct perf_evsel *evsel; | 286 | struct perf_evsel *evsel; |
282 | 287 | ||
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 5c39f420111e..9cf781f0d8a2 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -810,12 +810,6 @@ static u64 ref_reloc(struct kmap *kmap) | |||
810 | void __weak arch__sym_update(struct symbol *s __maybe_unused, | 810 | void __weak arch__sym_update(struct symbol *s __maybe_unused, |
811 | GElf_Sym *sym __maybe_unused) { } | 811 | GElf_Sym *sym __maybe_unused) { } |
812 | 812 | ||
813 | void __weak arch__adjust_sym_map_offset(GElf_Sym *sym, GElf_Shdr *shdr, | ||
814 | struct map *map __maybe_unused) | ||
815 | { | ||
816 | sym->st_value -= shdr->sh_addr - shdr->sh_offset; | ||
817 | } | ||
818 | |||
819 | int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, | 813 | int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, |
820 | struct symsrc *runtime_ss, int kmodule) | 814 | struct symsrc *runtime_ss, int kmodule) |
821 | { | 815 | { |
@@ -996,7 +990,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, | |||
996 | 990 | ||
997 | /* Adjust symbol to map to file offset */ | 991 | /* Adjust symbol to map to file offset */ |
998 | if (adjust_kernel_syms) | 992 | if (adjust_kernel_syms) |
999 | arch__adjust_sym_map_offset(&sym, &shdr, map); | 993 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; |
1000 | 994 | ||
1001 | if (strcmp(section_name, | 995 | if (strcmp(section_name, |
1002 | (curr_dso->short_name + | 996 | (curr_dso->short_name + |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2bd6a1f01a1c..aad99e7e179b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -344,9 +344,6 @@ int setup_intlist(struct intlist **list, const char *list_str, | |||
344 | #ifdef HAVE_LIBELF_SUPPORT | 344 | #ifdef HAVE_LIBELF_SUPPORT |
345 | bool elf__needs_adjust_symbols(GElf_Ehdr ehdr); | 345 | bool elf__needs_adjust_symbols(GElf_Ehdr ehdr); |
346 | void arch__sym_update(struct symbol *s, GElf_Sym *sym); | 346 | void arch__sym_update(struct symbol *s, GElf_Sym *sym); |
347 | void arch__adjust_sym_map_offset(GElf_Sym *sym, | ||
348 | GElf_Shdr *shdr __maybe_unused, | ||
349 | struct map *map __maybe_unused); | ||
350 | #endif | 347 | #endif |
351 | 348 | ||
352 | #define SYMBOL_A 0 | 349 | #define SYMBOL_A 0 |
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 19e5db90394c..6eea7cff3d4e 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c | |||
@@ -15,9 +15,9 @@ | |||
15 | 15 | ||
16 | #include "syscalltbl.h" | 16 | #include "syscalltbl.h" |
17 | #include <stdlib.h> | 17 | #include <stdlib.h> |
18 | #include <linux/compiler.h> | ||
18 | 19 | ||
19 | #ifdef HAVE_SYSCALL_TABLE | 20 | #ifdef HAVE_SYSCALL_TABLE |
20 | #include <linux/compiler.h> | ||
21 | #include <string.h> | 21 | #include <string.h> |
22 | #include "string2.h" | 22 | #include "string2.h" |
23 | #include "util.h" | 23 | #include "util.h" |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index d20791c3f499..bef419d4266d 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -1527,9 +1527,6 @@ static void nfit_test1_setup(struct nfit_test *t) | |||
1527 | set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); | 1527 | set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); |
1528 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); | 1528 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); |
1529 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); | 1529 | set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); |
1530 | set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); | ||
1531 | set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
1532 | set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); | ||
1533 | } | 1530 | } |
1534 | 1531 | ||
1535 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, | 1532 | static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 26ce4f7168be..ff805643b5f7 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -52,6 +52,10 @@ override LDFLAGS = | |||
52 | override MAKEFLAGS = | 52 | override MAKEFLAGS = |
53 | endif | 53 | endif |
54 | 54 | ||
55 | ifneq ($(KBUILD_SRC),) | ||
56 | override LDFLAGS = | ||
57 | endif | ||
58 | |||
55 | BUILD := $(O) | 59 | BUILD := $(O) |
56 | ifndef BUILD | 60 | ifndef BUILD |
57 | BUILD := $(KBUILD_OUTPUT) | 61 | BUILD := $(KBUILD_OUTPUT) |
@@ -62,32 +66,32 @@ endif | |||
62 | 66 | ||
63 | export BUILD | 67 | export BUILD |
64 | all: | 68 | all: |
65 | for TARGET in $(TARGETS); do \ | 69 | @for TARGET in $(TARGETS); do \ |
66 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 70 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
67 | mkdir $$BUILD_TARGET -p; \ | 71 | mkdir $$BUILD_TARGET -p; \ |
68 | make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ | 72 | make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ |
69 | done; | 73 | done; |
70 | 74 | ||
71 | run_tests: all | 75 | run_tests: all |
72 | for TARGET in $(TARGETS); do \ | 76 | @for TARGET in $(TARGETS); do \ |
73 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 77 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
74 | make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ | 78 | make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ |
75 | done; | 79 | done; |
76 | 80 | ||
77 | hotplug: | 81 | hotplug: |
78 | for TARGET in $(TARGETS_HOTPLUG); do \ | 82 | @for TARGET in $(TARGETS_HOTPLUG); do \ |
79 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 83 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
80 | make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ | 84 | make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ |
81 | done; | 85 | done; |
82 | 86 | ||
83 | run_hotplug: hotplug | 87 | run_hotplug: hotplug |
84 | for TARGET in $(TARGETS_HOTPLUG); do \ | 88 | @for TARGET in $(TARGETS_HOTPLUG); do \ |
85 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 89 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
86 | make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\ | 90 | make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\ |
87 | done; | 91 | done; |
88 | 92 | ||
89 | clean_hotplug: | 93 | clean_hotplug: |
90 | for TARGET in $(TARGETS_HOTPLUG); do \ | 94 | @for TARGET in $(TARGETS_HOTPLUG); do \ |
91 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 95 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
92 | make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ | 96 | make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ |
93 | done; | 97 | done; |
@@ -103,7 +107,7 @@ install: | |||
103 | ifdef INSTALL_PATH | 107 | ifdef INSTALL_PATH |
104 | @# Ask all targets to install their files | 108 | @# Ask all targets to install their files |
105 | mkdir -p $(INSTALL_PATH) | 109 | mkdir -p $(INSTALL_PATH) |
106 | for TARGET in $(TARGETS); do \ | 110 | @for TARGET in $(TARGETS); do \ |
107 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 111 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
108 | make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \ | 112 | make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \ |
109 | done; | 113 | done; |
@@ -128,7 +132,7 @@ else | |||
128 | endif | 132 | endif |
129 | 133 | ||
130 | clean: | 134 | clean: |
131 | for TARGET in $(TARGETS); do \ | 135 | @for TARGET in $(TARGETS); do \ |
132 | BUILD_TARGET=$$BUILD/$$TARGET; \ | 136 | BUILD_TARGET=$$BUILD/$$TARGET; \ |
133 | make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ | 137 | make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ |
134 | done; | 138 | done; |
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 20ecbaa0d85d..6c53a8906eff 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h | |||
@@ -12,6 +12,7 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
12 | unsigned int start, end, possible_cpus = 0; | 12 | unsigned int start, end, possible_cpus = 0; |
13 | char buff[128]; | 13 | char buff[128]; |
14 | FILE *fp; | 14 | FILE *fp; |
15 | int n; | ||
15 | 16 | ||
16 | fp = fopen(fcpu, "r"); | 17 | fp = fopen(fcpu, "r"); |
17 | if (!fp) { | 18 | if (!fp) { |
@@ -20,17 +21,17 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
20 | } | 21 | } |
21 | 22 | ||
22 | while (fgets(buff, sizeof(buff), fp)) { | 23 | while (fgets(buff, sizeof(buff), fp)) { |
23 | if (sscanf(buff, "%u-%u", &start, &end) == 2) { | 24 | n = sscanf(buff, "%u-%u", &start, &end); |
24 | possible_cpus = start == 0 ? end + 1 : 0; | 25 | if (n == 0) { |
25 | break; | 26 | printf("Failed to retrieve # possible CPUs!\n"); |
27 | exit(1); | ||
28 | } else if (n == 1) { | ||
29 | end = start; | ||
26 | } | 30 | } |
31 | possible_cpus = start == 0 ? end + 1 : 0; | ||
32 | break; | ||
27 | } | 33 | } |
28 | |||
29 | fclose(fp); | 34 | fclose(fp); |
30 | if (!possible_cpus) { | ||
31 | printf("Failed to retrieve # possible CPUs!\n"); | ||
32 | exit(1); | ||
33 | } | ||
34 | 35 | ||
35 | return possible_cpus; | 36 | return possible_cpus; |
36 | } | 37 | } |
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile index 6b214b7b10fb..247b0a1899d7 100644 --- a/tools/testing/selftests/breakpoints/Makefile +++ b/tools/testing/selftests/breakpoints/Makefile | |||
@@ -2,14 +2,14 @@ | |||
2 | uname_M := $(shell uname -m 2>/dev/null || echo not) | 2 | uname_M := $(shell uname -m 2>/dev/null || echo not) |
3 | ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) | 3 | ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) |
4 | 4 | ||
5 | TEST_GEN_PROGS := step_after_suspend_test | ||
6 | |||
5 | ifeq ($(ARCH),x86) | 7 | ifeq ($(ARCH),x86) |
6 | TEST_GEN_PROGS := breakpoint_test | 8 | TEST_GEN_PROGS += breakpoint_test |
7 | endif | 9 | endif |
8 | ifneq (,$(filter $(ARCH),aarch64 arm64)) | 10 | ifneq (,$(filter $(ARCH),aarch64 arm64)) |
9 | TEST_GEN_PROGS := breakpoint_test_arm64 | 11 | TEST_GEN_PROGS += breakpoint_test_arm64 |
10 | endif | 12 | endif |
11 | 13 | ||
12 | TEST_GEN_PROGS += step_after_suspend_test | ||
13 | |||
14 | include ../lib.mk | 14 | include ../lib.mk |
15 | 15 | ||
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc index 2a1cb9908746..a4fd4c851a5b 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc | |||
@@ -1,6 +1,8 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: Register/unregister many kprobe events | 2 | # description: Register/unregister many kprobe events |
3 | 3 | ||
4 | [ -f kprobe_events ] || exit_unsupported # this is configurable | ||
5 | |||
4 | # ftrace fentry skip size depends on the machine architecture. | 6 | # ftrace fentry skip size depends on the machine architecture. |
5 | # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le | 7 | # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le |
6 | case `uname -m` in | 8 | case `uname -m` in |
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 7c647f619d63..f0c0369ccb79 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile | |||
@@ -7,14 +7,17 @@ TEST_PROGS := run.sh | |||
7 | include ../lib.mk | 7 | include ../lib.mk |
8 | 8 | ||
9 | all: | 9 | all: |
10 | for DIR in $(SUBDIRS); do \ | 10 | @for DIR in $(SUBDIRS); do \ |
11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 11 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
12 | mkdir $$BUILD_TARGET -p; \ | 12 | mkdir $$BUILD_TARGET -p; \ |
13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 13 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
14 | if [ -e $$DIR/$(TEST_PROGS) ]; then | ||
15 | rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; | ||
16 | fi | ||
14 | done | 17 | done |
15 | 18 | ||
16 | override define RUN_TESTS | 19 | override define RUN_TESTS |
17 | $(OUTPUT)/run.sh | 20 | @cd $(OUTPUT); ./run.sh |
18 | endef | 21 | endef |
19 | 22 | ||
20 | override define INSTALL_RULE | 23 | override define INSTALL_RULE |
@@ -33,7 +36,7 @@ override define EMIT_TESTS | |||
33 | endef | 36 | endef |
34 | 37 | ||
35 | override define CLEAN | 38 | override define CLEAN |
36 | for DIR in $(SUBDIRS); do \ | 39 | @for DIR in $(SUBDIRS); do \ |
37 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ | 40 | BUILD_TARGET=$(OUTPUT)/$$DIR; \ |
38 | mkdir $$BUILD_TARGET -p; \ | 41 | mkdir $$BUILD_TARGET -p; \ |
39 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ | 42 | make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ |
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile index 849a90ffe8dd..a97e24edde39 100644 --- a/tools/testing/selftests/intel_pstate/Makefile +++ b/tools/testing/selftests/intel_pstate/Makefile | |||
@@ -1,7 +1,9 @@ | |||
1 | CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE | 1 | CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE |
2 | LDLIBS := $(LDLIBS) -lm | 2 | LDLIBS := $(LDLIBS) -lm |
3 | 3 | ||
4 | ifeq (,$(filter $(ARCH),x86)) | ||
4 | TEST_GEN_FILES := msr aperf | 5 | TEST_GEN_FILES := msr aperf |
6 | endif | ||
5 | 7 | ||
6 | TEST_PROGS := run.sh | 8 | TEST_PROGS := run.sh |
7 | 9 | ||
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh index 7868c106b8b1..d3ab48f91cd6 100755 --- a/tools/testing/selftests/intel_pstate/run.sh +++ b/tools/testing/selftests/intel_pstate/run.sh | |||
@@ -29,13 +29,12 @@ | |||
29 | 29 | ||
30 | EVALUATE_ONLY=0 | 30 | EVALUATE_ONLY=0 |
31 | 31 | ||
32 | max_cpus=$(($(nproc)-1)) | 32 | if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then |
33 | echo "$0 # Skipped: Test can only run on x86 architectures." | ||
34 | exit 0 | ||
35 | fi | ||
33 | 36 | ||
34 | # compile programs | 37 | max_cpus=$(($(nproc)-1)) |
35 | gcc aperf.c -Wall -D_GNU_SOURCE -o aperf -lm | ||
36 | [ $? -ne 0 ] && echo "Problem compiling aperf.c." && exit 1 | ||
37 | gcc -o msr msr.c -lm | ||
38 | [ $? -ne 0 ] && echo "Problem compiling msr.c." && exit 1 | ||
39 | 38 | ||
40 | function run_test () { | 39 | function run_test () { |
41 | 40 | ||
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 693616651da5..f65886af7c0c 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk | |||
@@ -6,7 +6,14 @@ ifeq (0,$(MAKELEVEL)) | |||
6 | OUTPUT := $(shell pwd) | 6 | OUTPUT := $(shell pwd) |
7 | endif | 7 | endif |
8 | 8 | ||
9 | # The following are built by lib.mk common compile rules. | ||
10 | # TEST_CUSTOM_PROGS should be used by tests that require | ||
11 | # custom build rule and prevent common build rule use. | ||
12 | # TEST_PROGS are for test shell scripts. | ||
13 | # TEST_CUSTOM_PROGS and TEST_PROGS will be run by common run_tests | ||
14 | # and install targets. Common clean doesn't touch them. | ||
9 | TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) | 15 | TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) |
16 | TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) | ||
10 | TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) | 17 | TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) |
11 | 18 | ||
12 | all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) | 19 | all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) |
@@ -20,17 +27,28 @@ define RUN_TESTS | |||
20 | test_num=`echo $$test_num+1 | bc`; \ | 27 | test_num=`echo $$test_num+1 | bc`; \ |
21 | echo "selftests: $$BASENAME_TEST"; \ | 28 | echo "selftests: $$BASENAME_TEST"; \ |
22 | echo "========================================"; \ | 29 | echo "========================================"; \ |
23 | if [ ! -x $$BASENAME_TEST ]; then \ | 30 | if [ ! -x $$TEST ]; then \ |
24 | echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\ | 31 | echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\ |
25 | echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \ | 32 | echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \ |
26 | else \ | 33 | else \ |
27 | cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\ | 34 | cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\ |
28 | fi; \ | 35 | fi; \ |
29 | done; | 36 | done; |
30 | endef | 37 | endef |
31 | 38 | ||
32 | run_tests: all | 39 | run_tests: all |
33 | $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_PROGS)) | 40 | ifneq ($(KBUILD_SRC),) |
41 | @if [ "X$(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)" != "X" ]; then | ||
42 | @rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT) | ||
43 | fi | ||
44 | @if [ "X$(TEST_PROGS)" != "X" ]; then | ||
45 | $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) | ||
46 | else | ||
47 | $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)) | ||
48 | fi | ||
49 | else | ||
50 | $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS)) | ||
51 | endif | ||
34 | 52 | ||
35 | define INSTALL_RULE | 53 | define INSTALL_RULE |
36 | @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ | 54 | @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ |
@@ -38,10 +56,10 @@ define INSTALL_RULE | |||
38 | echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ | 56 | echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ |
39 | rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ | 57 | rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ |
40 | fi | 58 | fi |
41 | @if [ "X$(TEST_GEN_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ | 59 | @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ |
42 | mkdir -p ${INSTALL_PATH}; \ | 60 | mkdir -p ${INSTALL_PATH}; \ |
43 | echo "rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ | 61 | echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ |
44 | rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ | 62 | rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ |
45 | fi | 63 | fi |
46 | endef | 64 | endef |
47 | 65 | ||
@@ -53,15 +71,20 @@ else | |||
53 | endif | 71 | endif |
54 | 72 | ||
55 | define EMIT_TESTS | 73 | define EMIT_TESTS |
56 | @for TEST in $(TEST_GEN_PROGS) $(TEST_PROGS); do \ | 74 | @for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \ |
57 | BASENAME_TEST=`basename $$TEST`; \ | 75 | BASENAME_TEST=`basename $$TEST`; \ |
58 | echo "(./$$BASENAME_TEST && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \ | 76 | echo "(./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \ |
59 | done; | 77 | done; |
60 | endef | 78 | endef |
61 | 79 | ||
62 | emit_tests: | 80 | emit_tests: |
63 | $(EMIT_TESTS) | 81 | $(EMIT_TESTS) |
64 | 82 | ||
83 | # define if isn't already. It is undefined in make O= case. | ||
84 | ifeq ($(RM),) | ||
85 | RM := rm -f | ||
86 | endif | ||
87 | |||
65 | define CLEAN | 88 | define CLEAN |
66 | $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) | 89 | $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) |
67 | endef | 90 | endef |
@@ -69,6 +92,15 @@ endef | |||
69 | clean: | 92 | clean: |
70 | $(CLEAN) | 93 | $(CLEAN) |
71 | 94 | ||
95 | # When make O= with kselftest target from main level | ||
96 | # the following aren't defined. | ||
97 | # | ||
98 | ifneq ($(KBUILD_SRC),) | ||
99 | LINK.c = $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) | ||
100 | COMPILE.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c | ||
101 | LINK.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) | ||
102 | endif | ||
103 | |||
72 | $(OUTPUT)/%:%.c | 104 | $(OUTPUT)/%:%.c |
73 | $(LINK.c) $^ $(LDLIBS) -o $@ | 105 | $(LINK.c) $^ $(LDLIBS) -o $@ |
74 | 106 | ||
diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh index daabb350697c..daabb350697c 100644..100755 --- a/tools/testing/selftests/memfd/run_tests.sh +++ b/tools/testing/selftests/memfd/run_tests.sh | |||
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 79a664aeb8d7..0f5e347b068d 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile | |||
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests | |||
5 | include ../lib.mk | 5 | include ../lib.mk |
6 | 6 | ||
7 | override define RUN_TESTS | 7 | override define RUN_TESTS |
8 | @./mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" | 8 | $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" |
9 | @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" | 9 | $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" |
10 | endef | 10 | endef |
11 | 11 | ||
12 | override define EMIT_TESTS | 12 | override define EMIT_TESTS |
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 9801253e4802..c612d6e38c62 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore | |||
@@ -6,3 +6,4 @@ reuseport_bpf | |||
6 | reuseport_bpf_cpu | 6 | reuseport_bpf_cpu |
7 | reuseport_bpf_numa | 7 | reuseport_bpf_numa |
8 | reuseport_dualstack | 8 | reuseport_dualstack |
9 | reuseaddr_conflict | ||
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index de1f5772b878..d86bca991f45 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
@@ -5,9 +5,9 @@ CFLAGS += -I../../../../usr/include/ | |||
5 | 5 | ||
6 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh | 6 | TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh |
7 | TEST_GEN_FILES = socket | 7 | TEST_GEN_FILES = socket |
8 | TEST_GEN_FILES += psock_fanout psock_tpacket | 8 | TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy |
9 | TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa | 9 | TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa |
10 | TEST_GEN_FILES += reuseport_dualstack msg_zerocopy | 10 | TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict |
11 | 11 | ||
12 | include ../lib.mk | 12 | include ../lib.mk |
13 | 13 | ||
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 40232af5b023..3ab6ec403905 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <unistd.h> | 55 | #include <unistd.h> |
56 | 56 | ||
57 | #ifndef SO_EE_ORIGIN_ZEROCOPY | 57 | #ifndef SO_EE_ORIGIN_ZEROCOPY |
58 | #define SO_EE_ORIGIN_ZEROCOPY SO_EE_ORIGIN_UPAGE | 58 | #define SO_EE_ORIGIN_ZEROCOPY 5 |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifndef SO_ZEROCOPY | 61 | #ifndef SO_ZEROCOPY |
diff --git a/tools/testing/selftests/net/netdevice.sh b/tools/testing/selftests/net/netdevice.sh index 4e00568d70c2..90cb903c3381 100755 --- a/tools/testing/selftests/net/netdevice.sh +++ b/tools/testing/selftests/net/netdevice.sh | |||
@@ -178,7 +178,7 @@ if [ "$(id -u)" -ne 0 ];then | |||
178 | exit 0 | 178 | exit 0 |
179 | fi | 179 | fi |
180 | 180 | ||
181 | ip -Version 2>/dev/null >/dev/null | 181 | ip link show 2>/dev/null >/dev/null |
182 | if [ $? -ne 0 ];then | 182 | if [ $? -ne 0 ];then |
183 | echo "SKIP: Could not run test without the ip tool" | 183 | echo "SKIP: Could not run test without the ip tool" |
184 | exit 0 | 184 | exit 0 |
diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c new file mode 100644 index 000000000000..7c5b12664b03 --- /dev/null +++ b/tools/testing/selftests/net/reuseaddr_conflict.c | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * Test for the regression introduced by | ||
3 | * | ||
4 | * b9470c27607b ("inet: kill smallest_size and smallest_port") | ||
5 | * | ||
6 | * If we open an ipv4 socket on a port with reuseaddr we shouldn't reset the tb | ||
7 | * when we open the ipv6 conterpart, which is what was happening previously. | ||
8 | */ | ||
9 | #include <errno.h> | ||
10 | #include <error.h> | ||
11 | #include <arpa/inet.h> | ||
12 | #include <netinet/in.h> | ||
13 | #include <stdbool.h> | ||
14 | #include <stdio.h> | ||
15 | #include <sys/socket.h> | ||
16 | #include <sys/types.h> | ||
17 | #include <unistd.h> | ||
18 | |||
19 | #define PORT 9999 | ||
20 | |||
21 | int open_port(int ipv6, int any) | ||
22 | { | ||
23 | int fd = -1; | ||
24 | int reuseaddr = 1; | ||
25 | int v6only = 1; | ||
26 | int addrlen; | ||
27 | int ret = -1; | ||
28 | struct sockaddr *addr; | ||
29 | int family = ipv6 ? AF_INET6 : AF_INET; | ||
30 | |||
31 | struct sockaddr_in6 addr6 = { | ||
32 | .sin6_family = AF_INET6, | ||
33 | .sin6_port = htons(PORT), | ||
34 | .sin6_addr = in6addr_any | ||
35 | }; | ||
36 | struct sockaddr_in addr4 = { | ||
37 | .sin_family = AF_INET, | ||
38 | .sin_port = htons(PORT), | ||
39 | .sin_addr.s_addr = any ? htonl(INADDR_ANY) : inet_addr("127.0.0.1"), | ||
40 | }; | ||
41 | |||
42 | |||
43 | if (ipv6) { | ||
44 | addr = (struct sockaddr*)&addr6; | ||
45 | addrlen = sizeof(addr6); | ||
46 | } else { | ||
47 | addr = (struct sockaddr*)&addr4; | ||
48 | addrlen = sizeof(addr4); | ||
49 | } | ||
50 | |||
51 | if ((fd = socket(family, SOCK_STREAM, IPPROTO_TCP)) < 0) { | ||
52 | perror("socket"); | ||
53 | goto out; | ||
54 | } | ||
55 | |||
56 | if (ipv6 && setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&v6only, | ||
57 | sizeof(v6only)) < 0) { | ||
58 | perror("setsockopt IPV6_V6ONLY"); | ||
59 | goto out; | ||
60 | } | ||
61 | |||
62 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, | ||
63 | sizeof(reuseaddr)) < 0) { | ||
64 | perror("setsockopt SO_REUSEADDR"); | ||
65 | goto out; | ||
66 | } | ||
67 | |||
68 | if (bind(fd, addr, addrlen) < 0) { | ||
69 | perror("bind"); | ||
70 | goto out; | ||
71 | } | ||
72 | |||
73 | if (any) | ||
74 | return fd; | ||
75 | |||
76 | if (listen(fd, 1) < 0) { | ||
77 | perror("listen"); | ||
78 | goto out; | ||
79 | } | ||
80 | return fd; | ||
81 | out: | ||
82 | close(fd); | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | int main(void) | ||
87 | { | ||
88 | int listenfd; | ||
89 | int fd1, fd2; | ||
90 | |||
91 | fprintf(stderr, "Opening 127.0.0.1:%d\n", PORT); | ||
92 | listenfd = open_port(0, 0); | ||
93 | if (listenfd < 0) | ||
94 | error(1, errno, "Couldn't open listen socket"); | ||
95 | fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT); | ||
96 | fd1 = open_port(0, 1); | ||
97 | if (fd1 >= 0) | ||
98 | error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket"); | ||
99 | fprintf(stderr, "Opening in6addr_any:%d\n", PORT); | ||
100 | fd1 = open_port(1, 1); | ||
101 | if (fd1 < 0) | ||
102 | error(1, errno, "Couldn't open ipv6 reuseport"); | ||
103 | fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT); | ||
104 | fd2 = open_port(0, 1); | ||
105 | if (fd2 >= 0) | ||
106 | error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket"); | ||
107 | close(fd1); | ||
108 | fprintf(stderr, "Opening INADDR_ANY:%d after closing ipv6 socket\n", PORT); | ||
109 | fd1 = open_port(0, 1); | ||
110 | if (fd1 >= 0) | ||
111 | error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6"); | ||
112 | fprintf(stderr, "Success"); | ||
113 | return 0; | ||
114 | } | ||
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index aeb0c805f3ca..553d870b4ca9 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile | |||
@@ -1,8 +1,16 @@ | |||
1 | TEST_GEN_PROGS := seccomp_bpf | 1 | all: |
2 | CFLAGS += -Wl,-no-as-needed -Wall | ||
3 | LDFLAGS += -lpthread | ||
4 | 2 | ||
5 | include ../lib.mk | 3 | include ../lib.mk |
6 | 4 | ||
7 | $(TEST_GEN_PROGS): seccomp_bpf.c ../kselftest_harness.h | 5 | .PHONY: all clean |
8 | $(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ | 6 | |
7 | BINARIES := seccomp_bpf seccomp_benchmark | ||
8 | CFLAGS += -Wl,-no-as-needed -Wall | ||
9 | |||
10 | seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h | ||
11 | $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ | ||
12 | |||
13 | TEST_PROGS += $(BINARIES) | ||
14 | EXTRA_CLEAN := $(BINARIES) | ||
15 | |||
16 | all: $(BINARIES) | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c new file mode 100644 index 000000000000..5838c8697ec3 --- /dev/null +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Strictly speaking, this is not a test. But it can report during test | ||
3 | * runs so relative performace can be measured. | ||
4 | */ | ||
5 | #define _GNU_SOURCE | ||
6 | #include <assert.h> | ||
7 | #include <stdio.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <time.h> | ||
10 | #include <unistd.h> | ||
11 | #include <linux/filter.h> | ||
12 | #include <linux/seccomp.h> | ||
13 | #include <sys/prctl.h> | ||
14 | #include <sys/syscall.h> | ||
15 | #include <sys/types.h> | ||
16 | |||
17 | #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) | ||
18 | |||
19 | unsigned long long timing(clockid_t clk_id, unsigned long long samples) | ||
20 | { | ||
21 | pid_t pid, ret; | ||
22 | unsigned long long i; | ||
23 | struct timespec start, finish; | ||
24 | |||
25 | pid = getpid(); | ||
26 | assert(clock_gettime(clk_id, &start) == 0); | ||
27 | for (i = 0; i < samples; i++) { | ||
28 | ret = syscall(__NR_getpid); | ||
29 | assert(pid == ret); | ||
30 | } | ||
31 | assert(clock_gettime(clk_id, &finish) == 0); | ||
32 | |||
33 | i = finish.tv_sec - start.tv_sec; | ||
34 | i *= 1000000000; | ||
35 | i += finish.tv_nsec - start.tv_nsec; | ||
36 | |||
37 | printf("%lu.%09lu - %lu.%09lu = %llu\n", | ||
38 | finish.tv_sec, finish.tv_nsec, | ||
39 | start.tv_sec, start.tv_nsec, | ||
40 | i); | ||
41 | |||
42 | return i; | ||
43 | } | ||
44 | |||
45 | unsigned long long calibrate(void) | ||
46 | { | ||
47 | unsigned long long i; | ||
48 | |||
49 | printf("Calibrating reasonable sample size...\n"); | ||
50 | |||
51 | for (i = 5; ; i++) { | ||
52 | unsigned long long samples = 1 << i; | ||
53 | |||
54 | /* Find something that takes more than 5 seconds to run. */ | ||
55 | if (timing(CLOCK_REALTIME, samples) / 1000000000ULL > 5) | ||
56 | return samples; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | int main(int argc, char *argv[]) | ||
61 | { | ||
62 | struct sock_filter filter[] = { | ||
63 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
64 | }; | ||
65 | struct sock_fprog prog = { | ||
66 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
67 | .filter = filter, | ||
68 | }; | ||
69 | long ret; | ||
70 | unsigned long long samples; | ||
71 | unsigned long long native, filtered; | ||
72 | |||
73 | if (argc > 1) | ||
74 | samples = strtoull(argv[1], NULL, 0); | ||
75 | else | ||
76 | samples = calibrate(); | ||
77 | |||
78 | printf("Benchmarking %llu samples...\n", samples); | ||
79 | |||
80 | native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; | ||
81 | printf("getpid native: %llu ns\n", native); | ||
82 | |||
83 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
84 | assert(ret == 0); | ||
85 | |||
86 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); | ||
87 | assert(ret == 0); | ||
88 | |||
89 | filtered = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples; | ||
90 | printf("getpid RET_ALLOW: %llu ns\n", filtered); | ||
91 | |||
92 | printf("Estimated seccomp overhead per syscall: %llu ns\n", | ||
93 | filtered - native); | ||
94 | |||
95 | if (filtered == native) | ||
96 | printf("Trying running again with more samples.\n"); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 4d6f92a9df6b..24dbf634e2dd 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -6,10 +6,18 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <sys/types.h> | 8 | #include <sys/types.h> |
9 | #include <asm/siginfo.h> | 9 | |
10 | #define __have_siginfo_t 1 | 10 | /* |
11 | #define __have_sigval_t 1 | 11 | * glibc 2.26 and later have SIGSYS in siginfo_t. Before that, |
12 | #define __have_sigevent_t 1 | 12 | * we need to use the kernel's siginfo.h file and trick glibc |
13 | * into accepting it. | ||
14 | */ | ||
15 | #if !__GLIBC_PREREQ(2, 26) | ||
16 | # include <asm/siginfo.h> | ||
17 | # define __have_siginfo_t 1 | ||
18 | # define __have_sigval_t 1 | ||
19 | # define __have_sigevent_t 1 | ||
20 | #endif | ||
13 | 21 | ||
14 | #include <errno.h> | 22 | #include <errno.h> |
15 | #include <linux/filter.h> | 23 | #include <linux/filter.h> |
@@ -68,17 +76,7 @@ | |||
68 | #define SECCOMP_MODE_FILTER 2 | 76 | #define SECCOMP_MODE_FILTER 2 |
69 | #endif | 77 | #endif |
70 | 78 | ||
71 | #ifndef SECCOMP_RET_KILL | 79 | #ifndef SECCOMP_RET_ALLOW |
72 | #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */ | ||
73 | #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ | ||
74 | #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ | ||
75 | #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ | ||
76 | #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ | ||
77 | |||
78 | /* Masks for the return value sections. */ | ||
79 | #define SECCOMP_RET_ACTION 0x7fff0000U | ||
80 | #define SECCOMP_RET_DATA 0x0000ffffU | ||
81 | |||
82 | struct seccomp_data { | 80 | struct seccomp_data { |
83 | int nr; | 81 | int nr; |
84 | __u32 arch; | 82 | __u32 arch; |
@@ -87,6 +85,70 @@ struct seccomp_data { | |||
87 | }; | 85 | }; |
88 | #endif | 86 | #endif |
89 | 87 | ||
88 | #ifndef SECCOMP_RET_KILL_PROCESS | ||
89 | #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ | ||
90 | #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ | ||
91 | #endif | ||
92 | #ifndef SECCOMP_RET_KILL | ||
93 | #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD | ||
94 | #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ | ||
95 | #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ | ||
96 | #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ | ||
97 | #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ | ||
98 | #endif | ||
99 | #ifndef SECCOMP_RET_LOG | ||
100 | #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ | ||
101 | #endif | ||
102 | |||
103 | #ifndef __NR_seccomp | ||
104 | # if defined(__i386__) | ||
105 | # define __NR_seccomp 354 | ||
106 | # elif defined(__x86_64__) | ||
107 | # define __NR_seccomp 317 | ||
108 | # elif defined(__arm__) | ||
109 | # define __NR_seccomp 383 | ||
110 | # elif defined(__aarch64__) | ||
111 | # define __NR_seccomp 277 | ||
112 | # elif defined(__hppa__) | ||
113 | # define __NR_seccomp 338 | ||
114 | # elif defined(__powerpc__) | ||
115 | # define __NR_seccomp 358 | ||
116 | # elif defined(__s390__) | ||
117 | # define __NR_seccomp 348 | ||
118 | # else | ||
119 | # warning "seccomp syscall number unknown for this architecture" | ||
120 | # define __NR_seccomp 0xffff | ||
121 | # endif | ||
122 | #endif | ||
123 | |||
124 | #ifndef SECCOMP_SET_MODE_STRICT | ||
125 | #define SECCOMP_SET_MODE_STRICT 0 | ||
126 | #endif | ||
127 | |||
128 | #ifndef SECCOMP_SET_MODE_FILTER | ||
129 | #define SECCOMP_SET_MODE_FILTER 1 | ||
130 | #endif | ||
131 | |||
132 | #ifndef SECCOMP_GET_ACTION_AVAIL | ||
133 | #define SECCOMP_GET_ACTION_AVAIL 2 | ||
134 | #endif | ||
135 | |||
136 | #ifndef SECCOMP_FILTER_FLAG_TSYNC | ||
137 | #define SECCOMP_FILTER_FLAG_TSYNC 1 | ||
138 | #endif | ||
139 | |||
140 | #ifndef SECCOMP_FILTER_FLAG_LOG | ||
141 | #define SECCOMP_FILTER_FLAG_LOG 2 | ||
142 | #endif | ||
143 | |||
144 | #ifndef seccomp | ||
145 | int seccomp(unsigned int op, unsigned int flags, void *args) | ||
146 | { | ||
147 | errno = 0; | ||
148 | return syscall(__NR_seccomp, op, flags, args); | ||
149 | } | ||
150 | #endif | ||
151 | |||
90 | #if __BYTE_ORDER == __LITTLE_ENDIAN | 152 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
91 | #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) | 153 | #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) |
92 | #elif __BYTE_ORDER == __BIG_ENDIAN | 154 | #elif __BYTE_ORDER == __BIG_ENDIAN |
@@ -136,7 +198,7 @@ TEST(no_new_privs_support) | |||
136 | } | 198 | } |
137 | } | 199 | } |
138 | 200 | ||
139 | /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */ | 201 | /* Tests kernel support by checking for a copy_from_user() fault on NULL. */ |
140 | TEST(mode_filter_support) | 202 | TEST(mode_filter_support) |
141 | { | 203 | { |
142 | long ret; | 204 | long ret; |
@@ -342,6 +404,28 @@ TEST(empty_prog) | |||
342 | EXPECT_EQ(EINVAL, errno); | 404 | EXPECT_EQ(EINVAL, errno); |
343 | } | 405 | } |
344 | 406 | ||
407 | TEST(log_all) | ||
408 | { | ||
409 | struct sock_filter filter[] = { | ||
410 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), | ||
411 | }; | ||
412 | struct sock_fprog prog = { | ||
413 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
414 | .filter = filter, | ||
415 | }; | ||
416 | long ret; | ||
417 | pid_t parent = getppid(); | ||
418 | |||
419 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
420 | ASSERT_EQ(0, ret); | ||
421 | |||
422 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); | ||
423 | ASSERT_EQ(0, ret); | ||
424 | |||
425 | /* getppid() should succeed and be logged (no check for logging) */ | ||
426 | EXPECT_EQ(parent, syscall(__NR_getppid)); | ||
427 | } | ||
428 | |||
345 | TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) | 429 | TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) |
346 | { | 430 | { |
347 | struct sock_filter filter[] = { | 431 | struct sock_filter filter[] = { |
@@ -520,6 +604,117 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) | |||
520 | close(fd); | 604 | close(fd); |
521 | } | 605 | } |
522 | 606 | ||
607 | /* This is a thread task to die via seccomp filter violation. */ | ||
608 | void *kill_thread(void *data) | ||
609 | { | ||
610 | bool die = (bool)data; | ||
611 | |||
612 | if (die) { | ||
613 | prctl(PR_GET_SECCOMP, 0, 0, 0, 0); | ||
614 | return (void *)SIBLING_EXIT_FAILURE; | ||
615 | } | ||
616 | |||
617 | return (void *)SIBLING_EXIT_UNKILLED; | ||
618 | } | ||
619 | |||
620 | /* Prepare a thread that will kill itself or both of us. */ | ||
621 | void kill_thread_or_group(struct __test_metadata *_metadata, bool kill_process) | ||
622 | { | ||
623 | pthread_t thread; | ||
624 | void *status; | ||
625 | /* Kill only when calling __NR_prctl. */ | ||
626 | struct sock_filter filter_thread[] = { | ||
627 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
628 | offsetof(struct seccomp_data, nr)), | ||
629 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), | ||
630 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), | ||
631 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
632 | }; | ||
633 | struct sock_fprog prog_thread = { | ||
634 | .len = (unsigned short)ARRAY_SIZE(filter_thread), | ||
635 | .filter = filter_thread, | ||
636 | }; | ||
637 | struct sock_filter filter_process[] = { | ||
638 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
639 | offsetof(struct seccomp_data, nr)), | ||
640 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), | ||
641 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_PROCESS), | ||
642 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
643 | }; | ||
644 | struct sock_fprog prog_process = { | ||
645 | .len = (unsigned short)ARRAY_SIZE(filter_process), | ||
646 | .filter = filter_process, | ||
647 | }; | ||
648 | |||
649 | ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { | ||
650 | TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); | ||
651 | } | ||
652 | |||
653 | ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, | ||
654 | kill_process ? &prog_process : &prog_thread)); | ||
655 | |||
656 | /* | ||
657 | * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS | ||
658 | * flag cannot be downgraded by a new filter. | ||
659 | */ | ||
660 | ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread)); | ||
661 | |||
662 | /* Start a thread that will exit immediately. */ | ||
663 | ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false)); | ||
664 | ASSERT_EQ(0, pthread_join(thread, &status)); | ||
665 | ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status); | ||
666 | |||
667 | /* Start a thread that will die immediately. */ | ||
668 | ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true)); | ||
669 | ASSERT_EQ(0, pthread_join(thread, &status)); | ||
670 | ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status); | ||
671 | |||
672 | /* | ||
673 | * If we get here, only the spawned thread died. Let the parent know | ||
674 | * the whole process didn't die (i.e. this thread, the spawner, | ||
675 | * stayed running). | ||
676 | */ | ||
677 | exit(42); | ||
678 | } | ||
679 | |||
680 | TEST(KILL_thread) | ||
681 | { | ||
682 | int status; | ||
683 | pid_t child_pid; | ||
684 | |||
685 | child_pid = fork(); | ||
686 | ASSERT_LE(0, child_pid); | ||
687 | if (child_pid == 0) { | ||
688 | kill_thread_or_group(_metadata, false); | ||
689 | _exit(38); | ||
690 | } | ||
691 | |||
692 | ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); | ||
693 | |||
694 | /* If only the thread was killed, we'll see exit 42. */ | ||
695 | ASSERT_TRUE(WIFEXITED(status)); | ||
696 | ASSERT_EQ(42, WEXITSTATUS(status)); | ||
697 | } | ||
698 | |||
699 | TEST(KILL_process) | ||
700 | { | ||
701 | int status; | ||
702 | pid_t child_pid; | ||
703 | |||
704 | child_pid = fork(); | ||
705 | ASSERT_LE(0, child_pid); | ||
706 | if (child_pid == 0) { | ||
707 | kill_thread_or_group(_metadata, true); | ||
708 | _exit(38); | ||
709 | } | ||
710 | |||
711 | ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); | ||
712 | |||
713 | /* If the entire process was killed, we'll see SIGSYS. */ | ||
714 | ASSERT_TRUE(WIFSIGNALED(status)); | ||
715 | ASSERT_EQ(SIGSYS, WTERMSIG(status)); | ||
716 | } | ||
717 | |||
523 | /* TODO(wad) add 64-bit versus 32-bit arg tests. */ | 718 | /* TODO(wad) add 64-bit versus 32-bit arg tests. */ |
524 | TEST(arg_out_of_range) | 719 | TEST(arg_out_of_range) |
525 | { | 720 | { |
@@ -541,26 +736,30 @@ TEST(arg_out_of_range) | |||
541 | EXPECT_EQ(EINVAL, errno); | 736 | EXPECT_EQ(EINVAL, errno); |
542 | } | 737 | } |
543 | 738 | ||
739 | #define ERRNO_FILTER(name, errno) \ | ||
740 | struct sock_filter _read_filter_##name[] = { \ | ||
741 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \ | ||
742 | offsetof(struct seccomp_data, nr)), \ | ||
743 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \ | ||
744 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \ | ||
745 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \ | ||
746 | }; \ | ||
747 | struct sock_fprog prog_##name = { \ | ||
748 | .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \ | ||
749 | .filter = _read_filter_##name, \ | ||
750 | } | ||
751 | |||
752 | /* Make sure basic errno values are correctly passed through a filter. */ | ||
544 | TEST(ERRNO_valid) | 753 | TEST(ERRNO_valid) |
545 | { | 754 | { |
546 | struct sock_filter filter[] = { | 755 | ERRNO_FILTER(valid, E2BIG); |
547 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
548 | offsetof(struct seccomp_data, nr)), | ||
549 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), | ||
550 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG), | ||
551 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
552 | }; | ||
553 | struct sock_fprog prog = { | ||
554 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
555 | .filter = filter, | ||
556 | }; | ||
557 | long ret; | 756 | long ret; |
558 | pid_t parent = getppid(); | 757 | pid_t parent = getppid(); |
559 | 758 | ||
560 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 759 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
561 | ASSERT_EQ(0, ret); | 760 | ASSERT_EQ(0, ret); |
562 | 761 | ||
563 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); | 762 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid); |
564 | ASSERT_EQ(0, ret); | 763 | ASSERT_EQ(0, ret); |
565 | 764 | ||
566 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 765 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
@@ -568,26 +767,17 @@ TEST(ERRNO_valid) | |||
568 | EXPECT_EQ(E2BIG, errno); | 767 | EXPECT_EQ(E2BIG, errno); |
569 | } | 768 | } |
570 | 769 | ||
770 | /* Make sure an errno of zero is correctly handled by the arch code. */ | ||
571 | TEST(ERRNO_zero) | 771 | TEST(ERRNO_zero) |
572 | { | 772 | { |
573 | struct sock_filter filter[] = { | 773 | ERRNO_FILTER(zero, 0); |
574 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
575 | offsetof(struct seccomp_data, nr)), | ||
576 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), | ||
577 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0), | ||
578 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
579 | }; | ||
580 | struct sock_fprog prog = { | ||
581 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
582 | .filter = filter, | ||
583 | }; | ||
584 | long ret; | 774 | long ret; |
585 | pid_t parent = getppid(); | 775 | pid_t parent = getppid(); |
586 | 776 | ||
587 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 777 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
588 | ASSERT_EQ(0, ret); | 778 | ASSERT_EQ(0, ret); |
589 | 779 | ||
590 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); | 780 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero); |
591 | ASSERT_EQ(0, ret); | 781 | ASSERT_EQ(0, ret); |
592 | 782 | ||
593 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 783 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
@@ -595,26 +785,21 @@ TEST(ERRNO_zero) | |||
595 | EXPECT_EQ(0, read(0, NULL, 0)); | 785 | EXPECT_EQ(0, read(0, NULL, 0)); |
596 | } | 786 | } |
597 | 787 | ||
788 | /* | ||
789 | * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller. | ||
790 | * This tests that the errno value gets capped correctly, fixed by | ||
791 | * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO"). | ||
792 | */ | ||
598 | TEST(ERRNO_capped) | 793 | TEST(ERRNO_capped) |
599 | { | 794 | { |
600 | struct sock_filter filter[] = { | 795 | ERRNO_FILTER(capped, 4096); |
601 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
602 | offsetof(struct seccomp_data, nr)), | ||
603 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), | ||
604 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096), | ||
605 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
606 | }; | ||
607 | struct sock_fprog prog = { | ||
608 | .len = (unsigned short)ARRAY_SIZE(filter), | ||
609 | .filter = filter, | ||
610 | }; | ||
611 | long ret; | 796 | long ret; |
612 | pid_t parent = getppid(); | 797 | pid_t parent = getppid(); |
613 | 798 | ||
614 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 799 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
615 | ASSERT_EQ(0, ret); | 800 | ASSERT_EQ(0, ret); |
616 | 801 | ||
617 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); | 802 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped); |
618 | ASSERT_EQ(0, ret); | 803 | ASSERT_EQ(0, ret); |
619 | 804 | ||
620 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 805 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
@@ -622,6 +807,37 @@ TEST(ERRNO_capped) | |||
622 | EXPECT_EQ(4095, errno); | 807 | EXPECT_EQ(4095, errno); |
623 | } | 808 | } |
624 | 809 | ||
810 | /* | ||
811 | * Filters are processed in reverse order: last applied is executed first. | ||
812 | * Since only the SECCOMP_RET_ACTION mask is tested for return values, the | ||
813 | * SECCOMP_RET_DATA mask results will follow the most recently applied | ||
814 | * matching filter return (and not the lowest or highest value). | ||
815 | */ | ||
816 | TEST(ERRNO_order) | ||
817 | { | ||
818 | ERRNO_FILTER(first, 11); | ||
819 | ERRNO_FILTER(second, 13); | ||
820 | ERRNO_FILTER(third, 12); | ||
821 | long ret; | ||
822 | pid_t parent = getppid(); | ||
823 | |||
824 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
825 | ASSERT_EQ(0, ret); | ||
826 | |||
827 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first); | ||
828 | ASSERT_EQ(0, ret); | ||
829 | |||
830 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second); | ||
831 | ASSERT_EQ(0, ret); | ||
832 | |||
833 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third); | ||
834 | ASSERT_EQ(0, ret); | ||
835 | |||
836 | EXPECT_EQ(parent, syscall(__NR_getppid)); | ||
837 | EXPECT_EQ(-1, read(0, NULL, 0)); | ||
838 | EXPECT_EQ(12, errno); | ||
839 | } | ||
840 | |||
625 | FIXTURE_DATA(TRAP) { | 841 | FIXTURE_DATA(TRAP) { |
626 | struct sock_fprog prog; | 842 | struct sock_fprog prog; |
627 | }; | 843 | }; |
@@ -676,7 +892,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS) | |||
676 | syscall(__NR_getpid); | 892 | syscall(__NR_getpid); |
677 | } | 893 | } |
678 | 894 | ||
679 | static struct siginfo TRAP_info; | 895 | static siginfo_t TRAP_info; |
680 | static volatile int TRAP_nr; | 896 | static volatile int TRAP_nr; |
681 | static void TRAP_action(int nr, siginfo_t *info, void *void_context) | 897 | static void TRAP_action(int nr, siginfo_t *info, void *void_context) |
682 | { | 898 | { |
@@ -735,6 +951,7 @@ TEST_F(TRAP, handler) | |||
735 | 951 | ||
736 | FIXTURE_DATA(precedence) { | 952 | FIXTURE_DATA(precedence) { |
737 | struct sock_fprog allow; | 953 | struct sock_fprog allow; |
954 | struct sock_fprog log; | ||
738 | struct sock_fprog trace; | 955 | struct sock_fprog trace; |
739 | struct sock_fprog error; | 956 | struct sock_fprog error; |
740 | struct sock_fprog trap; | 957 | struct sock_fprog trap; |
@@ -746,6 +963,13 @@ FIXTURE_SETUP(precedence) | |||
746 | struct sock_filter allow_insns[] = { | 963 | struct sock_filter allow_insns[] = { |
747 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | 964 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
748 | }; | 965 | }; |
966 | struct sock_filter log_insns[] = { | ||
967 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
968 | offsetof(struct seccomp_data, nr)), | ||
969 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), | ||
970 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
971 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), | ||
972 | }; | ||
749 | struct sock_filter trace_insns[] = { | 973 | struct sock_filter trace_insns[] = { |
750 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | 974 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, |
751 | offsetof(struct seccomp_data, nr)), | 975 | offsetof(struct seccomp_data, nr)), |
@@ -782,6 +1006,7 @@ FIXTURE_SETUP(precedence) | |||
782 | memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ | 1006 | memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ |
783 | self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) | 1007 | self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) |
784 | FILTER_ALLOC(allow); | 1008 | FILTER_ALLOC(allow); |
1009 | FILTER_ALLOC(log); | ||
785 | FILTER_ALLOC(trace); | 1010 | FILTER_ALLOC(trace); |
786 | FILTER_ALLOC(error); | 1011 | FILTER_ALLOC(error); |
787 | FILTER_ALLOC(trap); | 1012 | FILTER_ALLOC(trap); |
@@ -792,6 +1017,7 @@ FIXTURE_TEARDOWN(precedence) | |||
792 | { | 1017 | { |
793 | #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) | 1018 | #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) |
794 | FILTER_FREE(allow); | 1019 | FILTER_FREE(allow); |
1020 | FILTER_FREE(log); | ||
795 | FILTER_FREE(trace); | 1021 | FILTER_FREE(trace); |
796 | FILTER_FREE(error); | 1022 | FILTER_FREE(error); |
797 | FILTER_FREE(trap); | 1023 | FILTER_FREE(trap); |
@@ -809,6 +1035,8 @@ TEST_F(precedence, allow_ok) | |||
809 | 1035 | ||
810 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1036 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
811 | ASSERT_EQ(0, ret); | 1037 | ASSERT_EQ(0, ret); |
1038 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1039 | ASSERT_EQ(0, ret); | ||
812 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1040 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
813 | ASSERT_EQ(0, ret); | 1041 | ASSERT_EQ(0, ret); |
814 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1042 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
@@ -833,6 +1061,8 @@ TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) | |||
833 | 1061 | ||
834 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1062 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
835 | ASSERT_EQ(0, ret); | 1063 | ASSERT_EQ(0, ret); |
1064 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1065 | ASSERT_EQ(0, ret); | ||
836 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1066 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
837 | ASSERT_EQ(0, ret); | 1067 | ASSERT_EQ(0, ret); |
838 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1068 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
@@ -864,6 +1094,8 @@ TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) | |||
864 | ASSERT_EQ(0, ret); | 1094 | ASSERT_EQ(0, ret); |
865 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1095 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
866 | ASSERT_EQ(0, ret); | 1096 | ASSERT_EQ(0, ret); |
1097 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1098 | ASSERT_EQ(0, ret); | ||
867 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1099 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
868 | ASSERT_EQ(0, ret); | 1100 | ASSERT_EQ(0, ret); |
869 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); | 1101 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); |
@@ -885,6 +1117,8 @@ TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) | |||
885 | 1117 | ||
886 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1118 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
887 | ASSERT_EQ(0, ret); | 1119 | ASSERT_EQ(0, ret); |
1120 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1121 | ASSERT_EQ(0, ret); | ||
888 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1122 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
889 | ASSERT_EQ(0, ret); | 1123 | ASSERT_EQ(0, ret); |
890 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1124 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
@@ -910,6 +1144,8 @@ TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) | |||
910 | ASSERT_EQ(0, ret); | 1144 | ASSERT_EQ(0, ret); |
911 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); | 1145 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); |
912 | ASSERT_EQ(0, ret); | 1146 | ASSERT_EQ(0, ret); |
1147 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1148 | ASSERT_EQ(0, ret); | ||
913 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1149 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
914 | ASSERT_EQ(0, ret); | 1150 | ASSERT_EQ(0, ret); |
915 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1151 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
@@ -931,6 +1167,8 @@ TEST_F(precedence, errno_is_third) | |||
931 | 1167 | ||
932 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1168 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
933 | ASSERT_EQ(0, ret); | 1169 | ASSERT_EQ(0, ret); |
1170 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1171 | ASSERT_EQ(0, ret); | ||
934 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1172 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
935 | ASSERT_EQ(0, ret); | 1173 | ASSERT_EQ(0, ret); |
936 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1174 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
@@ -949,6 +1187,8 @@ TEST_F(precedence, errno_is_third_in_any_order) | |||
949 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 1187 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
950 | ASSERT_EQ(0, ret); | 1188 | ASSERT_EQ(0, ret); |
951 | 1189 | ||
1190 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1191 | ASSERT_EQ(0, ret); | ||
952 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); | 1192 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); |
953 | ASSERT_EQ(0, ret); | 1193 | ASSERT_EQ(0, ret); |
954 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1194 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
@@ -971,6 +1211,8 @@ TEST_F(precedence, trace_is_fourth) | |||
971 | 1211 | ||
972 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1212 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
973 | ASSERT_EQ(0, ret); | 1213 | ASSERT_EQ(0, ret); |
1214 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1215 | ASSERT_EQ(0, ret); | ||
974 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); | 1216 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); |
975 | ASSERT_EQ(0, ret); | 1217 | ASSERT_EQ(0, ret); |
976 | /* Should work just fine. */ | 1218 | /* Should work just fine. */ |
@@ -992,12 +1234,54 @@ TEST_F(precedence, trace_is_fourth_in_any_order) | |||
992 | ASSERT_EQ(0, ret); | 1234 | ASSERT_EQ(0, ret); |
993 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | 1235 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); |
994 | ASSERT_EQ(0, ret); | 1236 | ASSERT_EQ(0, ret); |
1237 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1238 | ASSERT_EQ(0, ret); | ||
995 | /* Should work just fine. */ | 1239 | /* Should work just fine. */ |
996 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 1240 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
997 | /* No ptracer */ | 1241 | /* No ptracer */ |
998 | EXPECT_EQ(-1, syscall(__NR_getpid)); | 1242 | EXPECT_EQ(-1, syscall(__NR_getpid)); |
999 | } | 1243 | } |
1000 | 1244 | ||
1245 | TEST_F(precedence, log_is_fifth) | ||
1246 | { | ||
1247 | pid_t mypid, parent; | ||
1248 | long ret; | ||
1249 | |||
1250 | mypid = getpid(); | ||
1251 | parent = getppid(); | ||
1252 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1253 | ASSERT_EQ(0, ret); | ||
1254 | |||
1255 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | ||
1256 | ASSERT_EQ(0, ret); | ||
1257 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1258 | ASSERT_EQ(0, ret); | ||
1259 | /* Should work just fine. */ | ||
1260 | EXPECT_EQ(parent, syscall(__NR_getppid)); | ||
1261 | /* Should also work just fine */ | ||
1262 | EXPECT_EQ(mypid, syscall(__NR_getpid)); | ||
1263 | } | ||
1264 | |||
1265 | TEST_F(precedence, log_is_fifth_in_any_order) | ||
1266 | { | ||
1267 | pid_t mypid, parent; | ||
1268 | long ret; | ||
1269 | |||
1270 | mypid = getpid(); | ||
1271 | parent = getppid(); | ||
1272 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1273 | ASSERT_EQ(0, ret); | ||
1274 | |||
1275 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); | ||
1276 | ASSERT_EQ(0, ret); | ||
1277 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); | ||
1278 | ASSERT_EQ(0, ret); | ||
1279 | /* Should work just fine. */ | ||
1280 | EXPECT_EQ(parent, syscall(__NR_getppid)); | ||
1281 | /* Should also work just fine */ | ||
1282 | EXPECT_EQ(mypid, syscall(__NR_getpid)); | ||
1283 | } | ||
1284 | |||
1001 | #ifndef PTRACE_O_TRACESECCOMP | 1285 | #ifndef PTRACE_O_TRACESECCOMP |
1002 | #define PTRACE_O_TRACESECCOMP 0x00000080 | 1286 | #define PTRACE_O_TRACESECCOMP 0x00000080 |
1003 | #endif | 1287 | #endif |
@@ -1262,6 +1546,13 @@ TEST_F(TRACE_poke, getpid_runs_normally) | |||
1262 | # error "Do not know how to find your architecture's registers and syscalls" | 1546 | # error "Do not know how to find your architecture's registers and syscalls" |
1263 | #endif | 1547 | #endif |
1264 | 1548 | ||
1549 | /* When the syscall return can't be changed, stub out the tests for it. */ | ||
1550 | #ifdef SYSCALL_NUM_RET_SHARE_REG | ||
1551 | # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) | ||
1552 | #else | ||
1553 | # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) | ||
1554 | #endif | ||
1555 | |||
1265 | /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for | 1556 | /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for |
1266 | * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). | 1557 | * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). |
1267 | */ | 1558 | */ |
@@ -1357,7 +1648,7 @@ void change_syscall(struct __test_metadata *_metadata, | |||
1357 | #ifdef SYSCALL_NUM_RET_SHARE_REG | 1648 | #ifdef SYSCALL_NUM_RET_SHARE_REG |
1358 | TH_LOG("Can't modify syscall return on this architecture"); | 1649 | TH_LOG("Can't modify syscall return on this architecture"); |
1359 | #else | 1650 | #else |
1360 | regs.SYSCALL_RET = 1; | 1651 | regs.SYSCALL_RET = EPERM; |
1361 | #endif | 1652 | #endif |
1362 | 1653 | ||
1363 | #ifdef HAVE_GETREGS | 1654 | #ifdef HAVE_GETREGS |
@@ -1426,6 +1717,8 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, | |||
1426 | 1717 | ||
1427 | if (nr == __NR_getpid) | 1718 | if (nr == __NR_getpid) |
1428 | change_syscall(_metadata, tracee, __NR_getppid); | 1719 | change_syscall(_metadata, tracee, __NR_getppid); |
1720 | if (nr == __NR_open) | ||
1721 | change_syscall(_metadata, tracee, -1); | ||
1429 | } | 1722 | } |
1430 | 1723 | ||
1431 | FIXTURE_DATA(TRACE_syscall) { | 1724 | FIXTURE_DATA(TRACE_syscall) { |
@@ -1480,6 +1773,28 @@ FIXTURE_TEARDOWN(TRACE_syscall) | |||
1480 | free(self->prog.filter); | 1773 | free(self->prog.filter); |
1481 | } | 1774 | } |
1482 | 1775 | ||
1776 | TEST_F(TRACE_syscall, ptrace_syscall_redirected) | ||
1777 | { | ||
1778 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | ||
1779 | teardown_trace_fixture(_metadata, self->tracer); | ||
1780 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | ||
1781 | true); | ||
1782 | |||
1783 | /* Tracer will redirect getpid to getppid. */ | ||
1784 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | ||
1785 | } | ||
1786 | |||
1787 | TEST_F(TRACE_syscall, ptrace_syscall_dropped) | ||
1788 | { | ||
1789 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | ||
1790 | teardown_trace_fixture(_metadata, self->tracer); | ||
1791 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | ||
1792 | true); | ||
1793 | |||
1794 | /* Tracer should skip the open syscall, resulting in EPERM. */ | ||
1795 | EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_open)); | ||
1796 | } | ||
1797 | |||
1483 | TEST_F(TRACE_syscall, syscall_allowed) | 1798 | TEST_F(TRACE_syscall, syscall_allowed) |
1484 | { | 1799 | { |
1485 | long ret; | 1800 | long ret; |
@@ -1520,13 +1835,8 @@ TEST_F(TRACE_syscall, syscall_dropped) | |||
1520 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); | 1835 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); |
1521 | ASSERT_EQ(0, ret); | 1836 | ASSERT_EQ(0, ret); |
1522 | 1837 | ||
1523 | #ifdef SYSCALL_NUM_RET_SHARE_REG | ||
1524 | /* gettid has been skipped */ | ||
1525 | EXPECT_EQ(-1, syscall(__NR_gettid)); | ||
1526 | #else | ||
1527 | /* gettid has been skipped and an altered return value stored. */ | 1838 | /* gettid has been skipped and an altered return value stored. */ |
1528 | EXPECT_EQ(1, syscall(__NR_gettid)); | 1839 | EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); |
1529 | #endif | ||
1530 | EXPECT_NE(self->mytid, syscall(__NR_gettid)); | 1840 | EXPECT_NE(self->mytid, syscall(__NR_gettid)); |
1531 | } | 1841 | } |
1532 | 1842 | ||
@@ -1557,6 +1867,7 @@ TEST_F(TRACE_syscall, skip_after_RET_TRACE) | |||
1557 | ASSERT_EQ(0, ret); | 1867 | ASSERT_EQ(0, ret); |
1558 | 1868 | ||
1559 | /* Tracer will redirect getpid to getppid, and we should see EPERM. */ | 1869 | /* Tracer will redirect getpid to getppid, and we should see EPERM. */ |
1870 | errno = 0; | ||
1560 | EXPECT_EQ(-1, syscall(__NR_getpid)); | 1871 | EXPECT_EQ(-1, syscall(__NR_getpid)); |
1561 | EXPECT_EQ(EPERM, errno); | 1872 | EXPECT_EQ(EPERM, errno); |
1562 | } | 1873 | } |
@@ -1654,47 +1965,6 @@ TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) | |||
1654 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | 1965 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); |
1655 | } | 1966 | } |
1656 | 1967 | ||
1657 | #ifndef __NR_seccomp | ||
1658 | # if defined(__i386__) | ||
1659 | # define __NR_seccomp 354 | ||
1660 | # elif defined(__x86_64__) | ||
1661 | # define __NR_seccomp 317 | ||
1662 | # elif defined(__arm__) | ||
1663 | # define __NR_seccomp 383 | ||
1664 | # elif defined(__aarch64__) | ||
1665 | # define __NR_seccomp 277 | ||
1666 | # elif defined(__hppa__) | ||
1667 | # define __NR_seccomp 338 | ||
1668 | # elif defined(__powerpc__) | ||
1669 | # define __NR_seccomp 358 | ||
1670 | # elif defined(__s390__) | ||
1671 | # define __NR_seccomp 348 | ||
1672 | # else | ||
1673 | # warning "seccomp syscall number unknown for this architecture" | ||
1674 | # define __NR_seccomp 0xffff | ||
1675 | # endif | ||
1676 | #endif | ||
1677 | |||
1678 | #ifndef SECCOMP_SET_MODE_STRICT | ||
1679 | #define SECCOMP_SET_MODE_STRICT 0 | ||
1680 | #endif | ||
1681 | |||
1682 | #ifndef SECCOMP_SET_MODE_FILTER | ||
1683 | #define SECCOMP_SET_MODE_FILTER 1 | ||
1684 | #endif | ||
1685 | |||
1686 | #ifndef SECCOMP_FILTER_FLAG_TSYNC | ||
1687 | #define SECCOMP_FILTER_FLAG_TSYNC 1 | ||
1688 | #endif | ||
1689 | |||
1690 | #ifndef seccomp | ||
1691 | int seccomp(unsigned int op, unsigned int flags, void *args) | ||
1692 | { | ||
1693 | errno = 0; | ||
1694 | return syscall(__NR_seccomp, op, flags, args); | ||
1695 | } | ||
1696 | #endif | ||
1697 | |||
1698 | TEST(seccomp_syscall) | 1968 | TEST(seccomp_syscall) |
1699 | { | 1969 | { |
1700 | struct sock_filter filter[] = { | 1970 | struct sock_filter filter[] = { |
@@ -1783,6 +2053,67 @@ TEST(seccomp_syscall_mode_lock) | |||
1783 | } | 2053 | } |
1784 | } | 2054 | } |
1785 | 2055 | ||
2056 | /* | ||
2057 | * Test detection of known and unknown filter flags. Userspace needs to be able | ||
2058 | * to check if a filter flag is supported by the current kernel and a good way | ||
2059 | * of doing that is by attempting to enter filter mode, with the flag bit in | ||
2060 | * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates | ||
2061 | * that the flag is valid and EINVAL indicates that the flag is invalid. | ||
2062 | */ | ||
2063 | TEST(detect_seccomp_filter_flags) | ||
2064 | { | ||
2065 | unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, | ||
2066 | SECCOMP_FILTER_FLAG_LOG }; | ||
2067 | unsigned int flag, all_flags; | ||
2068 | int i; | ||
2069 | long ret; | ||
2070 | |||
2071 | /* Test detection of known-good filter flags */ | ||
2072 | for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { | ||
2073 | flag = flags[i]; | ||
2074 | ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); | ||
2075 | ASSERT_NE(ENOSYS, errno) { | ||
2076 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
2077 | } | ||
2078 | EXPECT_EQ(-1, ret); | ||
2079 | EXPECT_EQ(EFAULT, errno) { | ||
2080 | TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", | ||
2081 | flag); | ||
2082 | } | ||
2083 | |||
2084 | all_flags |= flag; | ||
2085 | } | ||
2086 | |||
2087 | /* Test detection of all known-good filter flags */ | ||
2088 | ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL); | ||
2089 | EXPECT_EQ(-1, ret); | ||
2090 | EXPECT_EQ(EFAULT, errno) { | ||
2091 | TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", | ||
2092 | all_flags); | ||
2093 | } | ||
2094 | |||
2095 | /* Test detection of an unknown filter flag */ | ||
2096 | flag = -1; | ||
2097 | ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); | ||
2098 | EXPECT_EQ(-1, ret); | ||
2099 | EXPECT_EQ(EINVAL, errno) { | ||
2100 | TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", | ||
2101 | flag); | ||
2102 | } | ||
2103 | |||
2104 | /* | ||
2105 | * Test detection of an unknown filter flag that may simply need to be | ||
2106 | * added to this test | ||
2107 | */ | ||
2108 | flag = flags[ARRAY_SIZE(flags) - 1] << 1; | ||
2109 | ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); | ||
2110 | EXPECT_EQ(-1, ret); | ||
2111 | EXPECT_EQ(EINVAL, errno) { | ||
2112 | TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", | ||
2113 | flag); | ||
2114 | } | ||
2115 | } | ||
2116 | |||
1786 | TEST(TSYNC_first) | 2117 | TEST(TSYNC_first) |
1787 | { | 2118 | { |
1788 | struct sock_filter filter[] = { | 2119 | struct sock_filter filter[] = { |
@@ -2421,6 +2752,99 @@ TEST(syscall_restart) | |||
2421 | _metadata->passed = 0; | 2752 | _metadata->passed = 0; |
2422 | } | 2753 | } |
2423 | 2754 | ||
2755 | TEST_SIGNAL(filter_flag_log, SIGSYS) | ||
2756 | { | ||
2757 | struct sock_filter allow_filter[] = { | ||
2758 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
2759 | }; | ||
2760 | struct sock_filter kill_filter[] = { | ||
2761 | BPF_STMT(BPF_LD|BPF_W|BPF_ABS, | ||
2762 | offsetof(struct seccomp_data, nr)), | ||
2763 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), | ||
2764 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), | ||
2765 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | ||
2766 | }; | ||
2767 | struct sock_fprog allow_prog = { | ||
2768 | .len = (unsigned short)ARRAY_SIZE(allow_filter), | ||
2769 | .filter = allow_filter, | ||
2770 | }; | ||
2771 | struct sock_fprog kill_prog = { | ||
2772 | .len = (unsigned short)ARRAY_SIZE(kill_filter), | ||
2773 | .filter = kill_filter, | ||
2774 | }; | ||
2775 | long ret; | ||
2776 | pid_t parent = getppid(); | ||
2777 | |||
2778 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
2779 | ASSERT_EQ(0, ret); | ||
2780 | |||
2781 | /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */ | ||
2782 | ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG, | ||
2783 | &allow_prog); | ||
2784 | ASSERT_NE(ENOSYS, errno) { | ||
2785 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
2786 | } | ||
2787 | EXPECT_NE(0, ret) { | ||
2788 | TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!"); | ||
2789 | } | ||
2790 | EXPECT_EQ(EINVAL, errno) { | ||
2791 | TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!"); | ||
2792 | } | ||
2793 | |||
2794 | /* Verify that a simple, permissive filter can be added with no flags */ | ||
2795 | ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog); | ||
2796 | EXPECT_EQ(0, ret); | ||
2797 | |||
2798 | /* See if the same filter can be added with the FILTER_FLAG_LOG flag */ | ||
2799 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, | ||
2800 | &allow_prog); | ||
2801 | ASSERT_NE(EINVAL, errno) { | ||
2802 | TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!"); | ||
2803 | } | ||
2804 | EXPECT_EQ(0, ret); | ||
2805 | |||
2806 | /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */ | ||
2807 | ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, | ||
2808 | &kill_prog); | ||
2809 | EXPECT_EQ(0, ret); | ||
2810 | |||
2811 | EXPECT_EQ(parent, syscall(__NR_getppid)); | ||
2812 | /* getpid() should never return. */ | ||
2813 | EXPECT_EQ(0, syscall(__NR_getpid)); | ||
2814 | } | ||
2815 | |||
2816 | TEST(get_action_avail) | ||
2817 | { | ||
2818 | __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP, | ||
2819 | SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE, | ||
2820 | SECCOMP_RET_LOG, SECCOMP_RET_ALLOW }; | ||
2821 | __u32 unknown_action = 0x10000000U; | ||
2822 | int i; | ||
2823 | long ret; | ||
2824 | |||
2825 | ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]); | ||
2826 | ASSERT_NE(ENOSYS, errno) { | ||
2827 | TH_LOG("Kernel does not support seccomp syscall!"); | ||
2828 | } | ||
2829 | ASSERT_NE(EINVAL, errno) { | ||
2830 | TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!"); | ||
2831 | } | ||
2832 | EXPECT_EQ(ret, 0); | ||
2833 | |||
2834 | for (i = 0; i < ARRAY_SIZE(actions); i++) { | ||
2835 | ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]); | ||
2836 | EXPECT_EQ(ret, 0) { | ||
2837 | TH_LOG("Expected action (0x%X) not available!", | ||
2838 | actions[i]); | ||
2839 | } | ||
2840 | } | ||
2841 | |||
2842 | /* Check that an unknown action is handled properly (EOPNOTSUPP) */ | ||
2843 | ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action); | ||
2844 | EXPECT_EQ(ret, -1); | ||
2845 | EXPECT_EQ(errno, EOPNOTSUPP); | ||
2846 | } | ||
2847 | |||
2424 | /* | 2848 | /* |
2425 | * TODO: | 2849 | * TODO: |
2426 | * - add microbenchmarks | 2850 | * - add microbenchmarks |
@@ -2429,6 +2853,8 @@ TEST(syscall_restart) | |||
2429 | * - endianness checking when appropriate | 2853 | * - endianness checking when appropriate |
2430 | * - 64-bit arg prodding | 2854 | * - 64-bit arg prodding |
2431 | * - arch value testing (x86 modes especially) | 2855 | * - arch value testing (x86 modes especially) |
2856 | * - verify that FILTER_FLAG_LOG filters generate log messages | ||
2857 | * - verify that RET_LOG generates log messages | ||
2432 | * - ... | 2858 | * - ... |
2433 | */ | 2859 | */ |
2434 | 2860 | ||
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c index 7d406c3973ba..97bb150837df 100644 --- a/tools/testing/selftests/sigaltstack/sas.c +++ b/tools/testing/selftests/sigaltstack/sas.c | |||
@@ -39,7 +39,11 @@ void my_usr1(int sig, siginfo_t *si, void *u) | |||
39 | stack_t stk; | 39 | stack_t stk; |
40 | struct stk_data *p; | 40 | struct stk_data *p; |
41 | 41 | ||
42 | #if __s390x__ | ||
43 | register unsigned long sp asm("%15"); | ||
44 | #else | ||
42 | register unsigned long sp asm("sp"); | 45 | register unsigned long sp asm("sp"); |
46 | #endif | ||
43 | 47 | ||
44 | if (sp < (unsigned long)sstack || | 48 | if (sp < (unsigned long)sstack || |
45 | sp >= (unsigned long)sstack + SIGSTKSZ) { | 49 | sp >= (unsigned long)sstack + SIGSTKSZ) { |
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile index 4981c6b6d050..8e04d0afcbd7 100644 --- a/tools/testing/selftests/sync/Makefile +++ b/tools/testing/selftests/sync/Makefile | |||
@@ -2,12 +2,16 @@ CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra | |||
2 | CFLAGS += -I../../../../usr/include/ | 2 | CFLAGS += -I../../../../usr/include/ |
3 | LDFLAGS += -pthread | 3 | LDFLAGS += -pthread |
4 | 4 | ||
5 | TEST_PROGS = sync_test | 5 | .PHONY: all clean |
6 | |||
7 | all: $(TEST_PROGS) | ||
8 | 6 | ||
9 | include ../lib.mk | 7 | include ../lib.mk |
10 | 8 | ||
9 | # lib.mk TEST_CUSTOM_PROGS var is for custom tests that need special | ||
10 | # build rules. lib.mk will run and install them. | ||
11 | |||
12 | TEST_CUSTOM_PROGS := $(OUTPUT)/sync_test | ||
13 | all: $(TEST_CUSTOM_PROGS) | ||
14 | |||
11 | OBJS = sync_test.o sync.o | 15 | OBJS = sync_test.o sync.o |
12 | 16 | ||
13 | TESTS += sync_alloc.o | 17 | TESTS += sync_alloc.o |
@@ -18,6 +22,16 @@ TESTS += sync_stress_parallelism.o | |||
18 | TESTS += sync_stress_consumer.o | 22 | TESTS += sync_stress_consumer.o |
19 | TESTS += sync_stress_merge.o | 23 | TESTS += sync_stress_merge.o |
20 | 24 | ||
21 | sync_test: $(OBJS) $(TESTS) | 25 | OBJS := $(patsubst %,$(OUTPUT)/%,$(OBJS)) |
26 | TESTS := $(patsubst %,$(OUTPUT)/%,$(TESTS)) | ||
27 | |||
28 | $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS) | ||
29 | $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) | ||
30 | |||
31 | $(OBJS): $(OUTPUT)/%.o: %.c | ||
32 | $(CC) -c $^ -o $@ | ||
33 | |||
34 | $(TESTS): $(OUTPUT)/%.o: %.c | ||
35 | $(CC) -c $^ -o $@ | ||
22 | 36 | ||
23 | EXTRA_CLEAN := sync_test $(OBJS) $(TESTS) | 37 | EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) |
diff --git a/tools/testing/selftests/timers/set-timer-lat.c b/tools/testing/selftests/timers/set-timer-lat.c index 9c92b7bd5641..50da45437daa 100644 --- a/tools/testing/selftests/timers/set-timer-lat.c +++ b/tools/testing/selftests/timers/set-timer-lat.c | |||
@@ -143,7 +143,8 @@ int setup_timer(int clock_id, int flags, int interval, timer_t *tm1) | |||
143 | printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n", | 143 | printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n", |
144 | clockstring(clock_id), | 144 | clockstring(clock_id), |
145 | flags ? "ABSTIME":"RELTIME"); | 145 | flags ? "ABSTIME":"RELTIME"); |
146 | return 0; | 146 | /* Indicate timer isn't set, so caller doesn't wait */ |
147 | return 1; | ||
147 | } | 148 | } |
148 | printf("%s - timer_create() failed\n", clockstring(clock_id)); | 149 | printf("%s - timer_create() failed\n", clockstring(clock_id)); |
149 | return -1; | 150 | return -1; |
@@ -213,8 +214,9 @@ int do_timer(int clock_id, int flags) | |||
213 | int err; | 214 | int err; |
214 | 215 | ||
215 | err = setup_timer(clock_id, flags, interval, &tm1); | 216 | err = setup_timer(clock_id, flags, interval, &tm1); |
217 | /* Unsupported case - return 0 to not fail the test */ | ||
216 | if (err) | 218 | if (err) |
217 | return err; | 219 | return err == 1 ? 0 : err; |
218 | 220 | ||
219 | while (alarmcount < 5) | 221 | while (alarmcount < 5) |
220 | sleep(1); | 222 | sleep(1); |
@@ -228,18 +230,17 @@ int do_timer_oneshot(int clock_id, int flags) | |||
228 | timer_t tm1; | 230 | timer_t tm1; |
229 | const int interval = 0; | 231 | const int interval = 0; |
230 | struct timeval timeout; | 232 | struct timeval timeout; |
231 | fd_set fds; | ||
232 | int err; | 233 | int err; |
233 | 234 | ||
234 | err = setup_timer(clock_id, flags, interval, &tm1); | 235 | err = setup_timer(clock_id, flags, interval, &tm1); |
236 | /* Unsupported case - return 0 to not fail the test */ | ||
235 | if (err) | 237 | if (err) |
236 | return err; | 238 | return err == 1 ? 0 : err; |
237 | 239 | ||
238 | memset(&timeout, 0, sizeof(timeout)); | 240 | memset(&timeout, 0, sizeof(timeout)); |
239 | timeout.tv_sec = 5; | 241 | timeout.tv_sec = 5; |
240 | FD_ZERO(&fds); | ||
241 | do { | 242 | do { |
242 | err = select(FD_SETSIZE, &fds, NULL, NULL, &timeout); | 243 | err = select(0, NULL, NULL, NULL, &timeout); |
243 | } while (err == -1 && errno == EINTR); | 244 | } while (err == -1 && errno == EINTR); |
244 | 245 | ||
245 | timer_delete(tm1); | 246 | timer_delete(tm1); |
diff --git a/tools/testing/selftests/watchdog/Makefile b/tools/testing/selftests/watchdog/Makefile index f863c664e3d1..ee068511fd0b 100644 --- a/tools/testing/selftests/watchdog/Makefile +++ b/tools/testing/selftests/watchdog/Makefile | |||
@@ -1,8 +1,3 @@ | |||
1 | TEST_PROGS := watchdog-test | 1 | TEST_GEN_PROGS := watchdog-test |
2 | |||
3 | all: $(TEST_PROGS) | ||
4 | 2 | ||
5 | include ../lib.mk | 3 | include ../lib.mk |
6 | |||
7 | clean: | ||
8 | rm -fr $(TEST_PROGS) | ||
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index c608ab495282..f2ac53ab8243 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
@@ -565,8 +565,6 @@ kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | |||
565 | { | 565 | { |
566 | if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) | 566 | if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) |
567 | return -EINVAL; | 567 | return -EINVAL; |
568 | if (args->gsi >= KVM_MAX_IRQ_ROUTES) | ||
569 | return -EINVAL; | ||
570 | 568 | ||
571 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) | 569 | if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) |
572 | return kvm_irqfd_deassign(kvm, args); | 570 | return kvm_irqfd_deassign(kvm, args); |